LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Liam Howlett <liam.howlett@oracle.com>
To: "maple-tree@lists.infradead.org" <maple-tree@lists.infradead.org>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: Song Liu <songliubraving@fb.com>,
	Davidlohr Bueso <dave@stgolabs.net>,
	"Paul E . McKenney" <paulmck@kernel.org>,
	Matthew Wilcox <willy@infradead.org>,
	Laurent Dufour <ldufour@linux.ibm.com>,
	David Rientjes <rientjes@google.com>,
	Axel Rasmussen <axelrasmussen@google.com>,
	Suren Baghdasaryan <surenb@google.com>,
	Vlastimil Babka <vbabka@suse.cz>, Rik van Riel <riel@surriel.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Michel Lespinasse <walken.cr@gmail.com>,
	Liam Howlett <liam.howlett@oracle.com>
Subject: [PATCH v2 60/61] mm: Remove the vma linked list
Date: Tue, 17 Aug 2021 15:47:38 +0000	[thread overview]
Message-ID: <20210817154651.1570984-61-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20210817154651.1570984-1-Liam.Howlett@oracle.com>

From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Replace any vm_next use with vma_next().

Update free_pgtables(), unmap_vmas(), and zap_page_range() to use the
maple tree.

Use the new free_pgtables() and unmap_vmas() in do_mas_align_munmap().
At the same time, alter the loop to be more compact.

Now that free_pgtables() and unmap_vmas() take a maple tree as an
argument, rearrange do_mas_align_munmap() to use the new table to hold
the lock

Move mas_lock()/mas_unlock() from vma_mas_store() and into
vma_mas_link().  This will allow vma_mas_store() to be used in loops.

Remove __vma_link_list() and __vma_unlink_list() as they are exclusively
used to update the linked list

Rework validation of tree as it was depending on the linked list.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
 fs/proc/task_nommu.c     |   2 +-
 include/linux/mm.h       |   5 +-
 include/linux/mm_types.h |   4 -
 kernel/fork.c            |  13 +-
 mm/debug.c               |  14 +-
 mm/internal.h            |  14 +-
 mm/memory.c              |  33 ++-
 mm/mmap.c                | 531 +++++++++++++++++++--------------------
 mm/nommu.c               |   6 +-
 mm/util.c                |  40 ---
 10 files changed, 305 insertions(+), 357 deletions(-)

diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 8691a1216d1c..be02e8997ddf 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -246,7 +246,7 @@ static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
 	struct vm_area_struct *vma = _p;
 
 	*pos = vma->vm_end;
-	return vma->vm_next;
+	return vma_next(vma->vm_mm, vma);
 }
 
 static const struct seq_operations proc_pid_maps_ops = {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7a5eb1fbb488..96002be8585d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1843,8 +1843,9 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 		  unsigned long size);
 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
 		    unsigned long size);
-void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
-		unsigned long start, unsigned long end);
+void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
+		struct vm_area_struct *start_vma, unsigned long start,
+		unsigned long end);
 
 struct mmu_notifier_range;
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 374c48169692..428a697db7de 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -403,8 +403,6 @@ struct vm_area_struct {
 	unsigned long vm_end;		/* The first byte after our end address
 					   within vm_mm. */
 
-	/* linked list of VM areas per task, sorted by address */
-	struct vm_area_struct *vm_next, *vm_prev;
 	struct mm_struct *vm_mm;	/* The address space we belong to. */
 
 	/*
@@ -470,7 +468,6 @@ struct core_state {
 struct kioctx_table;
 struct mm_struct {
 	struct {
-		struct vm_area_struct *mmap;		/* list of VMAs */
 		struct maple_tree mm_mt;
 #ifdef CONFIG_MMU
 		unsigned long (*get_unmapped_area) (struct file *filp,
@@ -485,7 +482,6 @@ struct mm_struct {
 		unsigned long mmap_compat_legacy_base;
 #endif
 		unsigned long task_size;	/* size of task vm space */
-		unsigned long highest_vm_end;	/* highest vma end address */
 		pgd_t * pgd;
 
 #ifdef CONFIG_MEMBARRIER
diff --git a/kernel/fork.c b/kernel/fork.c
index 0c35460c092e..e8471b90910b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -364,7 +364,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
 		 */
 		*new = data_race(*orig);
 		INIT_LIST_HEAD(&new->anon_vma_chain);
-		new->vm_next = new->vm_prev = NULL;
 	}
 	return new;
 }
@@ -473,7 +472,7 @@ EXPORT_SYMBOL(free_task);
 static __latent_entropy int dup_mmap(struct mm_struct *mm,
 					struct mm_struct *oldmm)
 {
-	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+	struct vm_area_struct *mpnt, *tmp, *prev;
 	int retval;
 	unsigned long charge = 0;
 	MA_STATE(old_mas, &oldmm->mm_mt, 0, 0);
@@ -500,7 +499,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	mm->exec_vm = oldmm->exec_vm;
 	mm->stack_vm = oldmm->stack_vm;
 
-	pprev = &mm->mmap;
 	retval = ksm_fork(mm, oldmm);
 	if (retval)
 		goto out;
@@ -588,14 +586,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 		if (is_vm_hugetlb_page(tmp))
 			reset_vma_resv_huge_pages(tmp);
 
-		/*
-		 * Link in the new vma and copy the page table entries.
-		 */
-		*pprev = tmp;
-		pprev = &tmp->vm_next;
-		tmp->vm_prev = prev;
-		prev = tmp;
-
 		/* Link the vma into the MT */
 		mas_lock(&mas);
 		mas.index = tmp->vm_start;
@@ -1091,7 +1081,6 @@ static void mm_init_uprobes_state(struct mm_struct *mm)
 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 	struct user_namespace *user_ns)
 {
-	mm->mmap = NULL;
 	mt_init_flags(&mm->mm_mt, MAPLE_ALLOC_RANGE);
 	atomic_set(&mm->mm_users, 1);
 	atomic_set(&mm->mm_count, 1);
diff --git a/mm/debug.c b/mm/debug.c
index f84662c87b4f..5b126eb5ef1b 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -187,13 +187,11 @@ EXPORT_SYMBOL(dump_page);
 
 void dump_vma(const struct vm_area_struct *vma)
 {
-	pr_emerg("vma %px start %px end %px\n"
-		"next %px prev %px mm %px\n"
+	pr_emerg("vma %px start %px end %px mm %px\n"
 		"prot %lx anon_vma %px vm_ops %px\n"
 		"pgoff %lx file %px private_data %px\n"
 		"flags: %#lx(%pGv)\n",
-		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
-		vma->vm_prev, vma->vm_mm,
+		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
 		(unsigned long)pgprot_val(vma->vm_page_prot),
 		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
 		vma->vm_file, vma->vm_private_data,
@@ -203,11 +201,11 @@ EXPORT_SYMBOL(dump_vma);
 
 void dump_mm(const struct mm_struct *mm)
 {
-	pr_emerg("mm %px mmap %px task_size %lu\n"
+	pr_emerg("mm %px task_size %lu\n"
 #ifdef CONFIG_MMU
 		"get_unmapped_area %px\n"
 #endif
-		"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
+		"mmap_base %lu mmap_legacy_base %lu\n"
 		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
 		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
 		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
@@ -231,11 +229,11 @@ void dump_mm(const struct mm_struct *mm)
 		"tlb_flush_pending %d\n"
 		"def_flags: %#lx(%pGv)\n",
 
-		mm, mm->mmap, mm->task_size,
+		mm, mm->task_size,
 #ifdef CONFIG_MMU
 		mm->get_unmapped_area,
 #endif
-		mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
+		mm->mmap_base, mm->mmap_legacy_base,
 		mm->pgd, atomic_read(&mm->mm_users),
 		atomic_read(&mm->mm_count),
 		mm_pgtables_bytes(mm),
diff --git a/mm/internal.h b/mm/internal.h
index eb883691709d..c8da57abf19b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -45,8 +45,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf);
 void folio_rotate_reclaimable(struct folio *folio);
 bool __folio_end_writeback(struct folio *folio);
 
-void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
-		unsigned long floor, unsigned long ceiling);
+void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
+		   struct vm_area_struct *start_vma, unsigned long floor,
+		   unsigned long ceiling);
 
 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
 {
@@ -395,9 +396,7 @@ static inline int vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas
 #endif
 	mas->index = vma->vm_start;
 	mas->last = vma->vm_end - 1;
-	mas_lock(mas);
 	ret = mas_store_gfp(mas, vma, GFP_KERNEL);
-	mas_unlock(mas);
 	return ret;
 }
 
@@ -431,17 +430,10 @@ static inline int vma_mas_remove(struct vm_area_struct *vma, struct ma_state *ma
 #endif
 	mas->index = vma->vm_start;
 	mas->last = vma->vm_end - 1;
-	mas_lock(mas);
 	ret = mas_store_gfp(mas, NULL, GFP_KERNEL);
-	mas_unlock(mas);
 	return ret;
 }
 
-/* mm/util.c */
-void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
-		struct vm_area_struct *prev);
-void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
-
 #ifdef CONFIG_MMU
 extern long populate_vma_page_range(struct vm_area_struct *vma,
 		unsigned long start, unsigned long end, int *locked);
diff --git a/mm/memory.c b/mm/memory.c
index 39e7a1495c3c..2d0c8b1066ba 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -398,12 +398,21 @@ void free_pgd_range(struct mmu_gather *tlb,
 	} while (pgd++, addr = next, addr != end);
 }
 
-void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
-		unsigned long floor, unsigned long ceiling)
+void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
+		   struct vm_area_struct *vma, unsigned long floor,
+		   unsigned long ceiling)
 {
-	while (vma) {
-		struct vm_area_struct *next = vma->vm_next;
+	unsigned long index = vma->vm_end;
+
+	do {
 		unsigned long addr = vma->vm_start;
+		struct vm_area_struct *next;
+
+		/*
+		 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
+		 * be 0.  This will underflow and is okay.
+		 */
+		next = mt_find(mt, &index, ceiling - 1);
 
 		/*
 		 * Hide vma from rmap and truncate_pagecache before freeing
@@ -422,7 +431,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
 			       && !is_vm_hugetlb_page(next)) {
 				vma = next;
-				next = vma->vm_next;
+				next = mt_find(mt, &index, ceiling - 1);
 				unlink_anon_vmas(vma);
 				unlink_file_vma(vma);
 			}
@@ -430,7 +439,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
 				floor, next ? next->vm_start : ceiling);
 		}
 		vma = next;
-	}
+	} while (vma);
 }
 
 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
@@ -1616,17 +1625,19 @@ static void unmap_single_vma(struct mmu_gather *tlb,
  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
  * drops the lock and schedules.
  */
-void unmap_vmas(struct mmu_gather *tlb,
+void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
 		struct vm_area_struct *vma, unsigned long start_addr,
 		unsigned long end_addr)
 {
 	struct mmu_notifier_range range;
+	unsigned long index = vma->vm_end;
 
 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
 				start_addr, end_addr);
 	mmu_notifier_invalidate_range_start(&range);
-	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
+	do {
 		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
+	} while ((vma = mt_find(mt, &index, end_addr - 1)) != NULL);
 	mmu_notifier_invalidate_range_end(&range);
 }
 
@@ -1641,6 +1652,9 @@ void unmap_vmas(struct mmu_gather *tlb,
 void zap_page_range(struct vm_area_struct *vma, unsigned long start,
 		unsigned long size)
 {
+	struct maple_tree *mt = &vma->vm_mm->mm_mt;
+	unsigned long index = vma->vm_end;
+	unsigned long end = start + size;
 	struct mmu_notifier_range range;
 	struct mmu_gather tlb;
 
@@ -1650,8 +1664,9 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
 	tlb_gather_mmu(&tlb, vma->vm_mm);
 	update_hiwater_rss(vma->vm_mm);
 	mmu_notifier_invalidate_range_start(&range);
-	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
+	do {
 		unmap_single_vma(&tlb, vma, start, range.end, NULL);
+	} while ((vma = mt_find(mt, &index, end - 1)) != NULL);
 	mmu_notifier_invalidate_range_end(&range);
 	tlb_finish_mmu(&tlb);
 }
diff --git a/mm/mmap.c b/mm/mmap.c
index afef4af1c621..06666706d5d3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -74,9 +74,10 @@ int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
 static bool ignore_rlimit_data;
 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
 
-static void unmap_region(struct mm_struct *mm,
+static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
 		struct vm_area_struct *vma, struct vm_area_struct *prev,
-		unsigned long start, unsigned long end);
+		struct vm_area_struct *next, unsigned long start,
+		unsigned long end);
 
 /* description of effects of mapping type and prot in current implementation.
  * this is due to the limited x86 page protection hardware.  The expected
@@ -175,10 +176,8 @@ void unlink_file_vma(struct vm_area_struct *vma)
 /*
  * Close a vm structure and free it, returning the next.
  */
-static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+static void remove_vma(struct vm_area_struct *vma)
 {
-	struct vm_area_struct *next = vma->vm_next;
-
 	might_sleep();
 	if (vma->vm_ops && vma->vm_ops->close)
 		vma->vm_ops->close(vma);
@@ -186,13 +185,13 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
 		fput(vma->vm_file);
 	mpol_put(vma_policy(vma));
 	vm_area_free(vma);
-	return next;
 }
 
 static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 			 unsigned long newbrk, unsigned long oldbrk,
 			 struct list_head *uf);
-static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
+static int do_brk_flags(struct ma_state *mas, struct ma_state *ma_prev,
+			struct vm_area_struct **brkvma,
 			unsigned long addr, unsigned long request,
 			unsigned long flags);
 SYSCALL_DEFINE1(brk, unsigned long, brk)
@@ -205,6 +204,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 	bool downgraded = false;
 	LIST_HEAD(uf);
 	MA_STATE(mas, &mm->mm_mt, 0, 0);
+	struct ma_state ma_neighbour;
 
 	if (mmap_write_lock_killable(mm))
 		return -EINTR;
@@ -263,7 +263,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 		 * before calling do_brk_munmap().
 		 */
 		mm->brk = brk;
-		mas.last = oldbrk - 1;
 		ret = do_brk_munmap(&mas, brkvma, newbrk, oldbrk, &uf);
 		if (ret == 1)  {
 			downgraded = true;
@@ -274,28 +273,30 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 		mm->brk = origbrk;
 		goto out;
 	}
+	ma_neighbour = mas;
+	mas_lock(&ma_neighbour);
+	next = mas_next(&ma_neighbour, newbrk + PAGE_SIZE + stack_guard_gap);
+	mas_unlock(&ma_neighbour);
 	/* Only check if the next VMA is within the stack_guard_gap of the
 	 * expansion area */
-	next = mas_next(&mas, newbrk + PAGE_SIZE + stack_guard_gap);
 	/* Check against existing mmap mappings. */
 	if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
 		goto out;
 
-	mas_lock(&mas);
-	brkvma = mas_prev(&mas, mm->start_brk);
-	mas_unlock(&mas);
+	mas_lock(&ma_neighbour);
+	brkvma = mas_prev(&ma_neighbour, mm->start_brk);
+	mas_unlock(&ma_neighbour);
 	if (brkvma) {
-		if(brkvma->vm_start >= oldbrk)
+		if (brkvma->vm_start >= oldbrk)
 			goto out; // Trying to map over another vma.
 
-		if (brkvma->vm_end <= min_brk) {
+		if (brkvma->vm_end <= min_brk)
 			brkvma = NULL;
-			mas_reset(&mas);
-		}
 	}
 
 	/* Ok, looks good - let it rip. */
-	if (do_brk_flags(&mas, &brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
+	if (do_brk_flags(&mas, &ma_neighbour, &brkvma, oldbrk,
+			 newbrk - oldbrk, 0) < 0)
 		goto out;
 
 	mm->brk = brk;
@@ -306,6 +307,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 		mmap_read_unlock(mm);
 	else
 		mmap_write_unlock(mm);
+
 	userfaultfd_unmap_complete(mm, &uf);
 	if (populate)
 		mm_populate(oldbrk, newbrk - oldbrk);
@@ -324,45 +326,23 @@ extern void mt_dump(const struct maple_tree *mt);
 static void validate_mm_mt(struct mm_struct *mm)
 {
 	struct maple_tree *mt = &mm->mm_mt;
-	struct vm_area_struct *vma_mt, *vma = mm->mmap;
+	struct vm_area_struct *vma_mt;
 
 	MA_STATE(mas, mt, 0, 0);
+
 	rcu_read_lock();
 	mas_for_each(&mas, vma_mt, ULONG_MAX) {
-		if (xa_is_zero(vma_mt))
-			continue;
-
-		if (!vma)
-			break;
-
-		if ((vma != vma_mt) ||
-		    (vma->vm_start != vma_mt->vm_start) ||
-		    (vma->vm_end != vma_mt->vm_end) ||
-		    (vma->vm_start != mas.index) ||
-		    (vma->vm_end - 1 != mas.last)) {
+		if ((vma_mt->vm_start != mas.index) ||
+		    (vma_mt->vm_end - 1 != mas.last)) {
 			pr_emerg("issue in %s\n", current->comm);
 			dump_stack();
 #ifdef CONFIG_DEBUG_VM
 			dump_vma(vma_mt);
-			pr_emerg("and vm_next\n");
-			dump_vma(vma->vm_next);
 #endif // CONFIG_DEBUG_VM
 			pr_emerg("mt piv: %px %lu - %lu\n", vma_mt,
 				 mas.index, mas.last);
 			pr_emerg("mt vma: %px %lu - %lu\n", vma_mt,
 				 vma_mt->vm_start, vma_mt->vm_end);
-			if (vma->vm_prev) {
-				pr_emerg("ll prev: %px %lu - %lu\n",
-					 vma->vm_prev, vma->vm_prev->vm_start,
-					 vma->vm_prev->vm_end);
-			}
-			pr_emerg("ll vma: %px %lu - %lu\n", vma,
-				 vma->vm_start, vma->vm_end);
-			if (vma->vm_next) {
-				pr_emerg("ll next: %px %lu - %lu\n",
-					 vma->vm_next, vma->vm_next->vm_start,
-					 vma->vm_next->vm_end);
-			}
 
 			mt_dump(mas.tree);
 			if (vma_mt->vm_end != mas.last + 1) {
@@ -379,12 +359,7 @@ static void validate_mm_mt(struct mm_struct *mm)
 			}
 			VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm);
 		}
-		VM_BUG_ON(vma != vma_mt);
-		vma = vma->vm_next;
-
 	}
-	VM_BUG_ON(vma);
-
 	rcu_read_unlock();
 	mt_validate(&mm->mm_mt);
 }
@@ -393,35 +368,33 @@ static void validate_mm(struct mm_struct *mm)
 {
 	int bug = 0;
 	int i = 0;
-	unsigned long highest_address = 0;
-	struct vm_area_struct *vma = mm->mmap;
+	struct vm_area_struct *vma;
+	MA_STATE(mas, &mm->mm_mt, 0, 0);
 
 	validate_mm_mt(mm);
 
-	while (vma) {
+	rcu_read_lock();
+	mas_for_each(&mas, vma, ULONG_MAX) {
 #ifdef CONFIG_DEBUG_VM_RB
 		struct anon_vma *anon_vma = vma->anon_vma;
 		struct anon_vma_chain *avc;
+
+		rcu_read_unlock();
 		if (anon_vma) {
 			anon_vma_lock_read(anon_vma);
 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
 				anon_vma_interval_tree_verify(avc);
 			anon_vma_unlock_read(anon_vma);
 		}
+		rcu_read_lock();
 #endif
-		highest_address = vm_end_gap(vma);
-		vma = vma->vm_next;
 		i++;
 	}
+	rcu_read_unlock();
 	if (i != mm->map_count) {
 		pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
 		bug = 1;
 	}
-	if (highest_address != mm->highest_vm_end) {
-		pr_emerg("mm->highest_vm_end %lx, found %lx\n",
-			  mm->highest_vm_end, highest_address);
-		bug = 1;
-	}
 	VM_BUG_ON_MM(bug, mm);
 }
 
@@ -479,29 +452,13 @@ bool range_has_overlap(struct mm_struct *mm, unsigned long start,
 	struct vm_area_struct *existing;
 
 	MA_STATE(mas, &mm->mm_mt, start, start);
+	rcu_read_lock();
 	existing = mas_find(&mas, end - 1);
 	*pprev = mas_prev(&mas, 0);
+	rcu_read_unlock();
 	return existing ? true : false;
 }
 
-/*
- * _vma_next() - Get the next VMA or the first.
- * @mm: The mm_struct.
- * @vma: The current vma.
- *
- * If @vma is NULL, return the first vma in the mm.
- *
- * Returns: The next VMA after @vma.
- */
-static inline struct vm_area_struct *_vma_next(struct mm_struct *mm,
-					 struct vm_area_struct *vma)
-{
-	if (!vma)
-		return mm->mmap;
-
-	return vma->vm_next;
-}
-
 /*
  * count_vma_pages_range() - Count the number of pages in a range.
  * @mas: The maple state
@@ -598,7 +555,7 @@ void vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma)
  * @vma: The VMA to link in
  * @mas: The maple state
  *
- * Must hold the @mas lock.
+ * Must not hold the maple tree lock.
  */
 static void vma_mas_link(struct mm_struct *mm, struct vm_area_struct *vma,
 			 struct ma_state *mas, struct vm_area_struct *prev)
@@ -610,15 +567,15 @@ static void vma_mas_link(struct mm_struct *mm, struct vm_area_struct *vma,
 		i_mmap_lock_write(mapping);
 	}
 
+	mas_lock(mas);
 	vma_mas_store(vma, mas);
-	__vma_link_list(mm, vma, prev);
+	mas_unlock(mas);
 	__vma_link_file(vma);
 
 	if (mapping)
 		i_mmap_unlock_write(mapping);
 
 	mm->map_count++;
-	validate_mm(mm);
 }
 
 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -632,7 +589,6 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
 	}
 
 	vma_mt_store(mm, vma);
-	__vma_link_list(mm, vma, prev);
 	__vma_link_file(vma);
 
 	if (mapping)
@@ -652,7 +608,6 @@ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 
 	BUG_ON(range_has_overlap(mm, vma->vm_start, vma->vm_end, &prev));
 	vma_mt_store(mm, vma);
-	__vma_link_list(mm, vma, prev);
 	mm->map_count++;
 }
 
@@ -710,7 +665,9 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
 	vma->vm_end = end;
 	vma->vm_pgoff = pgoff;
 	/* Note: mas must be pointing to the expanding VMA */
+	mas_lock(mas);
 	vma_mas_store(vma, mas);
+	mas_unlock(mas);
 
 	if (file) {
 		vma_interval_tree_insert(vma, root);
@@ -718,15 +675,8 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
 	}
 
 	/* Expanding over the next vma */
-	if (remove_next) {
-		/* Remove from mm linked list - also updates highest_vm_end */
-		__vma_unlink_list(mm, next);
-
-		if (file)
-			__remove_shared_vm_struct(next, file, mapping);
-
-	} else if (!next) {
-		mm->highest_vm_end = vm_end_gap(vma);
+	if (remove_next && file) {
+		__remove_shared_vm_struct(next, file, mapping);
 	}
 
 	if (anon_vma) {
@@ -767,7 +717,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 	struct vm_area_struct *expand)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
+	struct vm_area_struct *next = vma_next(mm, vma), *orig_vma = vma;
 	struct address_space *mapping = NULL;
 	struct rb_root_cached *root = NULL;
 	struct anon_vma *anon_vma = NULL;
@@ -808,7 +758,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 				 */
 				remove_next = 1 + (end > next->vm_end);
 				VM_WARN_ON(remove_next == 2 &&
-					   end != next->vm_next->vm_end);
+					   end != vma_next(mm, next)->vm_end);
 				/* trim end to next, for case 6 first pass */
 				end = next->vm_end;
 			}
@@ -821,7 +771,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 			 * next, if the vma overlaps with it.
 			 */
 			if (remove_next == 2 && !next->anon_vma)
-				exporter = next->vm_next;
+				exporter = vma_next(mm, next);
 
 		} else if (end > next->vm_start) {
 			/*
@@ -913,8 +863,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 		else
 			vma_changed = true;
 		vma->vm_end = end;
-		if (!next)
-			mm->highest_vm_end = vm_end_gap(vma);
 	}
 
 	if (vma_changed)
@@ -934,10 +882,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 		flush_dcache_mmap_unlock(mapping);
 	}
 
-	if (remove_next) {
-		__vma_unlink_list(mm, next);
-		if (file)
-			__remove_shared_vm_struct(next, file, mapping);
+	if (remove_next && file) {
+		__remove_shared_vm_struct(next, file, mapping);
 	} else if (insert) {
 		/*
 		 * split_vma has split insert from vma, and needs
@@ -984,7 +930,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 			 * "next->vm_prev->vm_end" changed and the
 			 * "vma->vm_next" gap must be updated.
 			 */
-			next = vma->vm_next;
+			next = vma_next(mm, vma);
 		} else {
 			/*
 			 * For the scope of the comment "next" and
@@ -1002,33 +948,14 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 			remove_next = 1;
 			end = next->vm_end;
 			goto again;
-		} else if (!next) {
-			/*
-			 * If remove_next == 2 we obviously can't
-			 * reach this path.
-			 *
-			 * If remove_next == 3 we can't reach this
-			 * path because pre-swap() next is always not
-			 * NULL. pre-swap() "next" is not being
-			 * removed and its next->vm_end is not altered
-			 * (and furthermore "end" already matches
-			 * next->vm_end in remove_next == 3).
-			 *
-			 * We reach this only in the remove_next == 1
-			 * case if the "next" vma that was removed was
-			 * the highest vma of the mm. However in such
-			 * case next->vm_end == "end" and the extended
-			 * "vma" has vma->vm_end == next->vm_end so
-			 * mm->highest_vm_end doesn't need any update
-			 * in remove_next == 1 case.
-			 */
-			VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
 		}
 	}
-	if (insert && file)
+	if (insert && file) {
 		uprobe_mmap(insert);
+	}
 
 	validate_mm(mm);
+
 	return 0;
 }
 
@@ -1182,10 +1109,10 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
 	if (vm_flags & VM_SPECIAL)
 		return NULL;
 
-	next = _vma_next(mm, prev);
+	next = find_vma(mm, prev ? prev->vm_end : 0);
 	area = next;
 	if (area && area->vm_end == end)		/* cases 6, 7, 8 */
-		next = next->vm_next;
+		next = vma_next(mm, next);
 
 	/* verify some invariant that must be enforced by the caller */
 	VM_WARN_ON(prev && addr <= prev->vm_start);
@@ -1321,16 +1248,20 @@ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
 {
 	struct anon_vma *anon_vma = NULL;
 
+	struct vm_area_struct *prev, *next;
+
 	/* Try next first. */
-	if (vma->vm_next) {
-		anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next);
+	next = vma_next(vma->vm_mm, vma);
+	if (next) {
+		anon_vma = reusable_anon_vma(next, vma, next);
 		if (anon_vma)
 			return anon_vma;
 	}
 
+	prev = vma_prev(vma->vm_mm, vma);
 	/* Try prev next. */
-	if (vma->vm_prev)
-		anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma);
+	if (prev)
+		anon_vma = reusable_anon_vma(prev, prev, vma);
 
 	/*
 	 * We might reach here with anon_vma == NULL if we can't find
@@ -1986,15 +1917,10 @@ struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
 					     unsigned long start_addr,
 					     unsigned long end_addr)
 {
-	struct vm_area_struct *vma;
-	MA_STATE(mas, &mm->mm_mt, start_addr, start_addr);
+	unsigned long index = start_addr;
 
 	mmap_assert_locked(mm);
-	rcu_read_lock();
-	vma = mas_find(&mas, end_addr - 1);
-	rcu_read_unlock();
-
-	return vma;
+	return mt_find(&mm->mm_mt, &index, end_addr - 1);
 }
 EXPORT_SYMBOL(find_vma_intersection);
 
@@ -2008,8 +1934,9 @@ EXPORT_SYMBOL(find_vma_intersection);
  */
 inline struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
-	// Note find_vma_intersection will decrease 0 to underflow to ULONG_MAX
-	return find_vma_intersection(mm, addr, 0);
+	unsigned long index = addr;
+
+	return mt_find(&mm->mm_mt, &index, ULONG_MAX);
 }
 EXPORT_SYMBOL(find_vma);
 
@@ -2111,7 +2038,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 	if (gap_addr < address || gap_addr > TASK_SIZE)
 		gap_addr = TASK_SIZE;
 
-	next = vma->vm_next;
+	next = vma_next(mm, vma);
 	if (next && next->vm_start < gap_addr && vma_is_accessible(next)) {
 		if (!(next->vm_flags & VM_GROWSUP))
 			return -ENOMEM;
@@ -2157,8 +2084,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 				vma->vm_end = address;
 				vma_mt_store(mm, vma);
 				anon_vma_interval_tree_post_update_vma(vma);
-				if (!vma->vm_next)
-					mm->highest_vm_end = vm_end_gap(vma);
 				spin_unlock(&mm->page_table_lock);
 
 				perf_event_mmap(vma);
@@ -2185,7 +2110,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
 		return -EPERM;
 
 	/* Enforce stack_guard_gap */
-	prev = vma->vm_prev;
+	prev = vma_prev(mm, vma);
 	/* Check that both stack segments have the same anon_vma? */
 	if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
 			vma_is_accessible(prev)) {
@@ -2320,20 +2245,22 @@ EXPORT_SYMBOL_GPL(find_extend_vma);
  *
  * Called with the mm semaphore held.
  */
-static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+static inline void remove_mt(struct mm_struct *mm, struct maple_tree *detached)
 {
 	unsigned long nr_accounted = 0;
+	unsigned long index = 0;
+	struct vm_area_struct *vma;
 
 	/* Update high watermark before we lower total_vm */
 	update_hiwater_vm(mm);
-	do {
+	mt_for_each(detached, vma, index, ULONG_MAX) {
 		long nrpages = vma_pages(vma);
 
 		if (vma->vm_flags & VM_ACCOUNT)
 			nr_accounted += nrpages;
 		vm_stat_account(mm, vma->vm_flags, -nrpages);
-		vma = remove_vma(vma);
-	} while (vma);
+		remove_vma(vma);
+	}
 	vm_unacct_memory(nr_accounted);
 	validate_mm(mm);
 }
@@ -2343,18 +2270,18 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
  *
  * Called with the mm semaphore held.
  */
-static void unmap_region(struct mm_struct *mm,
+static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
 		struct vm_area_struct *vma, struct vm_area_struct *prev,
+		struct vm_area_struct *next,
 		unsigned long start, unsigned long end)
 {
-	struct vm_area_struct *next = _vma_next(mm, prev);
 	struct mmu_gather tlb;
 
 	lru_add_drain();
 	tlb_gather_mmu(&tlb, mm);
 	update_hiwater_rss(mm);
-	unmap_vmas(&tlb, vma, start, end);
-	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
+	unmap_vmas(&tlb, mt, vma, start, end);
+	free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
 				 next ? next->vm_start : USER_PGTABLES_CEILING);
 	tlb_finish_mmu(&tlb);
 }
@@ -2395,8 +2322,9 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 	if (err)
 		goto out_free_mpol;
 
-	if (new->vm_file)
+	if (new->vm_file) {
 		get_file(new->vm_file);
+	}
 
 	if (new->vm_ops && new->vm_ops->open)
 		new->vm_ops->open(new);
@@ -2438,27 +2366,6 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 	return __split_vma(mm, vma, addr, new_below);
 }
 
-static inline int
-unlock_range(struct vm_area_struct *start, struct vm_area_struct **tail,
-	     unsigned long limit)
-{
-	struct mm_struct *mm = start->vm_mm;
-	struct vm_area_struct *tmp = start;
-	int count = 0;
-
-	while (tmp && tmp->vm_start < limit) {
-		*tail = tmp;
-		count++;
-		if (tmp->vm_flags & VM_LOCKED) {
-			mm->locked_vm -= vma_pages(tmp);
-			munlock_vma_pages_all(tmp);
-		}
-		tmp = tmp->vm_next;
-	}
-
-	return count;
-}
-
 /*
  * do_mas_align_munmap() - munmap the aligned region from @start to @end.
  * @mas: The maple_state, ideally set up to alter the correct tree location.
@@ -2477,9 +2384,13 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 		    struct mm_struct *mm, unsigned long start,
 		    unsigned long end, struct list_head *uf, bool downgrade)
 {
-	struct vm_area_struct *prev, *last;
-	/* we have start < vma->vm_end  */
+	struct vm_area_struct *prev, *next;
+	struct maple_tree mt_detach;
+	int count = 0;
+	MA_STATE(mas_detach, &mt_detach, start, end - 1);
+	mtree_init(&mt_detach, MAPLE_ALLOC_RANGE);
 
+	prev = next = NULL;
 	/*
 	 * If we need to split any vma, do it now to save pain later.
 	 *
@@ -2487,8 +2398,11 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 	 * unmapped vm_area_struct will remain in use: so lower split_vma
 	 * places tmp vma above, and higher split_vma places tmp vma below.
 	 */
+
+	/* Does it split the first one? */
 	if (start > vma->vm_start) {
 		int error;
+
 		/*
 		 * Make sure that map_count on return from munmap() will
 		 * not exceed its limit; but let map_count go just above
@@ -2497,31 +2411,55 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 		if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
 			return -ENOMEM;
 
-		error = __split_vma(mm, vma, start, 0);
+		/*
+		 * mas_pause() is not needed since mas->index needs to be set
+		 * differently than vma->vm_end anyways.
+		 */
+		mas_unlock(mas);
+		error = __split_vma(mm, vma, start, 1);
+		mas_lock(mas);
 		if (error)
 			return error;
-		prev = vma;
-		vma = _vma_next(mm, prev);
-		mas->index = start;
-		mas_reset(mas);
+
+		mas_set(mas, start - 1);
+		prev = mas_walk(mas);
 	} else {
-		prev = vma->vm_prev;
+		prev = mas_prev(mas, 0);
+		if (!prev)
+			mas_set(mas, start);
 	}
 
-	if (vma->vm_end >= end)
-		last = vma;
-	else
-		last = find_vma_intersection(mm, end - 1, end);
+	/*
+	 * Detach a range of VMAs from the mm. Using next as a temp variable as
+	 * it is always overwritten.
+	 */
+	mas_lock(&mas_detach);
+	mas_for_each(mas, next, end - 1) {
+		/* Does it split the end? */
+		if (next->vm_end > end) {
+			int error;
 
-	/* Does it split the last one? */
-	if (last && end < last->vm_end) {
-		int error = __split_vma(mm, last, end, 1);
-		if (error)
-			return error;
-		vma = _vma_next(mm, prev);
-		mas_reset(mas);
+			mas_unlock(mas);
+			error = __split_vma(mm, next, end, 0);
+			mas_lock(mas);
+			if (error)
+				return error;
+		}
+		count++;
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+		BUG_ON(next->vm_start < start);
+		BUG_ON(next->vm_start > end);
+#endif
+		vma_mas_store(next, &mas_detach);
+		if (next->vm_flags & VM_LOCKED) {
+			mm->locked_vm -= vma_pages(next);
+			munlock_vma_pages_all(next);
+		}
 	}
+	mas_unlock(&mas_detach);
 
+	mas_set(mas, end);
+	next = mas_find(mas, ULONG_MAX);
 
 	if (unlikely(uf)) {
 		/*
@@ -2539,35 +2477,37 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 			return error;
 	}
 
-	/*
-	 * unlock any mlock()ed ranges before detaching vmas, count the number
-	 * of VMAs to be dropped, and return the tail entry of the affected
-	 * area.
-	 */
-	mm->map_count -= unlock_range(vma, &last, end);
-	/* Drop removed area from the tree */
-	mas_store_gfp(mas, NULL, GFP_KERNEL);
-
-	/* Detach vmas from the MM linked list */
-	vma->vm_prev = NULL;
-	if (prev)
-		prev->vm_next = last->vm_next;
-	else
-		mm->mmap = last->vm_next;
-
-	if (last->vm_next) {
-		last->vm_next->vm_prev = prev;
-		last->vm_next = NULL;
-	} else
-		mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
+	/* Point of no return */
+	mas_set_range(mas, start, end - 1);
+#if defined(CONFIG_DEBUG_MAPLE_TREE)
+	/* Make sure no VMAs are about to be lost. */
+	{
+		MA_STATE(test, &mt_detach, start, end - 1);
+		struct vm_area_struct *vma_mas, *vma_test;
+		int test_count = 0;
 
+		rcu_read_lock();
+		vma_test = mas_find(&test, end - 1);
+		mas_for_each(mas, vma_mas, end - 1) {
+			BUG_ON(vma_mas != vma_test);
+			test_count++;
+			vma_test = mas_next(&test, end - 1);
+		}
+		rcu_read_unlock();
+		BUG_ON(count != test_count);
+		mas_set_range(mas, start, end - 1);
+	}
+#endif
+	mas_store_gfp(mas, NULL, GFP_KERNEL);
+	mm->map_count -= count;
+	mas_unlock(mas);
 	/*
 	 * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
 	 * VM_GROWSUP VMA. Such VMAs can change their size under
 	 * down_read(mmap_lock) and collide with the VMA we are about to unmap.
 	 */
 	if (downgrade) {
-		if (last && (last->vm_flags & VM_GROWSDOWN))
+		if (next && (next->vm_flags & VM_GROWSDOWN))
 			downgrade = false;
 		else if (prev && (prev->vm_flags & VM_GROWSUP))
 			downgrade = false;
@@ -2575,10 +2515,15 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 			mmap_write_downgrade(mm);
 	}
 
-	unmap_region(mm, vma, prev, start, end);
+	unmap_region(mm, &mt_detach, vma, prev, next, start, end);
+
+	/* Statistics and freeing VMAs */
+	remove_mt(mm, &mt_detach);
+	mtree_destroy(&mt_detach);
+	validate_mm(mm);
 
-	/* Fix up all other VM information */
-	remove_vma_list(mm, vma);
+	if (!downgrade)
+		mas_lock(mas);
 
 	return downgrade ? 1 : 0;
 }
@@ -2635,8 +2580,13 @@ int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 	      struct list_head *uf)
 {
+	int ret;
 	MA_STATE(mas, &mm->mm_mt, start, start);
-	return do_mas_munmap(&mas, mm, start, len, uf, false);
+
+	mas_lock(&mas);
+	ret = do_mas_munmap(&mas, mm, start, len, uf, false);
+	mas_unlock(&mas);
+	return ret;
 }
 
 unsigned long mmap_region(struct file *file, unsigned long addr,
@@ -2670,8 +2620,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	}
 
 	/* Unmap any existing mapping in the area */
-	if (do_mas_munmap(&mas, mm, addr, len, uf, false))
+	mas_lock(&mas);
+	if (do_mas_munmap(&mas, mm, addr, len, uf, false)) {
+		mas_unlock(&mas);
 		return -ENOMEM;
+	}
+	mas_unlock(&mas);
 
 	/*
 	 * Private writable mapping: check memory availability
@@ -2762,6 +2716,11 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 		 * new file must not have been exposed to user-space, yet.
 		 */
 		vma->vm_file = get_file(file);
+		/*
+		 * call_mmap() may sleep, but will not alter the maple tree
+		 * since the mmap_lock is held.  This is safe and will be
+		 * changed later.
+		 */
 		error = call_mmap(file, vma);
 		file = vma->vm_file;
 		if (error)
@@ -2865,7 +2824,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	vma->vm_file = NULL;
 
 	/* Undo any partial mapping done by a device driver. */
-	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
+	unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end);
 	charged = 0;
 	if (vm_flags & VM_SHARED)
 		mapping_unmap_writable(file->f_mapping);
@@ -2890,6 +2849,7 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
 	if (mmap_write_lock_killable(mm))
 		return -EINTR;
 
+	mas_lock(&mas);
 	ret = do_mas_munmap(&mas, mm, start, len, &uf, downgrade);
 	/*
 	 * Returning 1 indicates mmap_lock is downgraded.
@@ -2899,8 +2859,10 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
 	if (ret == 1) {
 		mmap_read_unlock(mm);
 		ret = 0;
-	} else
+	} else {
+		mas_unlock(&mas);
 		mmap_write_unlock(mm);
+	}
 
 	userfaultfd_unmap_complete(mm, &uf);
 	return ret;
@@ -2957,11 +2919,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 		goto out;
 
 	if (start + size > vma->vm_end) {
-		struct vm_area_struct *next;
+		struct vm_area_struct *next, *prev = vma;
 
-		for (next = vma->vm_next; next; next = next->vm_next) {
+		for (next = vma_next(mm, vma); next; next = vma_next(mm, next)) {
 			/* hole between vmas ? */
-			if (next->vm_start != next->vm_prev->vm_end)
+			if (next->vm_start != prev->vm_end)
 				goto out;
 
 			if (next->vm_file != vma->vm_file)
@@ -2972,6 +2934,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 
 			if (start + size <= next->vm_end)
 				break;
+			prev = next;
 		}
 
 		if (!next)
@@ -3017,7 +2980,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 			 struct list_head *uf)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	struct vm_area_struct unmap;
+	struct vm_area_struct unmap, *next;
 	unsigned long unmap_pages;
 	int ret;
 
@@ -3027,17 +2990,25 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 		   ((vma->vm_start == newbrk) && (vma->vm_end == oldbrk)))) {
 		// remove entire mapping(s)
 		mas->last = oldbrk - 1;
+		mas_lock(mas);
 		ret = do_mas_align_munmap(mas, vma, mm, newbrk, oldbrk, uf,
 					  true);
+		if (ret != 1)
+			mas_unlock(mas);
 		goto munmap_full_vma;
 	}
 
 	vma_init(&unmap, mm);
 	unmap.vm_start = newbrk;
 	unmap.vm_end = oldbrk;
+	unmap.vm_pgoff = newbrk >> PAGE_SHIFT;
+	if (vma->anon_vma)
+		vma_set_anonymous(&unmap);
+
 	ret = userfaultfd_unmap_prep(&unmap, newbrk, oldbrk, uf);
 	if (ret)
 		return ret;
+
 	ret = 1;
 
 	// Change the oldbrk of vma to the newbrk of the munmap area
@@ -3047,10 +3018,12 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 		anon_vma_interval_tree_pre_update_vma(vma);
 	}
 
-	vma->vm_end = newbrk;
+	mas_lock(mas);
 	if (vma_mas_remove(&unmap, mas))
 		goto mas_store_fail;
+	mas_unlock(mas);
 
+	vma->vm_end = newbrk;
 	if (vma->anon_vma) {
 		anon_vma_interval_tree_post_update_vma(vma);
 		anon_vma_unlock_write(vma->anon_vma);
@@ -3062,8 +3035,11 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 		munlock_vma_pages_range(&unmap, newbrk, oldbrk);
 	}
 
+	mas_lock(mas);
+	next = mas_next(mas, ULONG_MAX);
+	mas_unlock(mas);
 	mmap_write_downgrade(mm);
-	unmap_region(mm, &unmap, vma, newbrk, oldbrk);
+	unmap_region(mm, mas->tree, &unmap, vma, next, newbrk, oldbrk);
 	/* Statistics */
 	vm_stat_account(mm, unmap.vm_flags, -unmap_pages);
 	if (unmap.vm_flags & VM_ACCOUNT)
@@ -3074,6 +3050,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 	return ret;
 
 mas_store_fail:
+	mas_unlock(mas);
 	vma->vm_end = oldbrk;
 	if (vma->anon_vma) {
 		anon_vma_interval_tree_post_update_vma(vma);
@@ -3094,15 +3071,16 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
  * do not match then create a new anonymous VMA.  Eventually we may be able to
  * do some brk-specific accounting here.
  */
-static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
+static int do_brk_flags(struct ma_state *mas, struct ma_state *ma_prev,
+			struct vm_area_struct **brkvma,
 			unsigned long addr, unsigned long len,
 			unsigned long flags)
 {
 	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *prev = NULL, *vma;
+	struct vm_area_struct *vma;
 	int error;
 	unsigned long mapped_addr;
-	validate_mm_mt(mm);
+	validate_mm(mm);
 
 	/* Until we need other flags, refuse anything except VM_EXEC. */
 	if ((flags & (~VM_EXEC)) != 0)
@@ -3127,7 +3105,6 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
 		return -ENOMEM;
 
-	mas->last = addr + len - 1;
 	if (*brkvma) {
 		vma = *brkvma;
 		/* Expand the existing vma if possible; almost never a singular
@@ -3136,7 +3113,8 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 		if ((!vma->anon_vma ||
 		     list_is_singular(&vma->anon_vma_chain)) &&
 		     ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)){
-			mas->index = vma->vm_start;
+			ma_prev->index = vma->vm_start;
+			ma_prev->last = addr + len - 1;
 
 			vma_adjust_trans_huge(vma, addr, addr + len, 0);
 			if (vma->anon_vma) {
@@ -3145,11 +3123,28 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 			}
 			vma->vm_end = addr + len;
 			vma->vm_flags |= VM_SOFTDIRTY;
-			mas_lock(mas);
-			if (mas_store_gfp(mas, vma, GFP_KERNEL))
+
+			mas_lock(ma_prev);
+#if defined(CONFIG_DEBUG_MAPLE_TREE)
+			/* Make sure no VMAs are about to be lost. */
+			{
+				MA_STATE(test, ma_prev->tree, vma->vm_start,
+					 vma->vm_end - 1);
+				struct vm_area_struct *vma_mas;
+				int count = 0;
+
+				mas_for_each(&test, vma_mas, vma->vm_end - 1)
+					count++;
+
+				BUG_ON(count != 1);
+				mas_set_range(ma_prev, vma->vm_start,
+					      vma->vm_end - 1);
+			}
+#endif
+			if (mas_store_gfp(ma_prev, vma, GFP_KERNEL))
 				goto mas_mod_fail;
+			mas_unlock(ma_prev);
 
-			mas_unlock(mas);
 			if (vma->anon_vma) {
 				anon_vma_interval_tree_post_update_vma(vma);
 				anon_vma_unlock_write(vma->anon_vma);
@@ -3157,12 +3152,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 			khugepaged_enter_vma_merge(vma, flags);
 			goto out;
 		}
-		prev = vma;
 	}
-	mas_lock(mas);
-	mas->index = addr;
-	mas_walk(mas);
-	mas_unlock(mas);
 
 	/* create a vma struct for an anonymous mapping */
 	vma = vm_area_alloc(mm);
@@ -3175,16 +3165,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 	vma->vm_pgoff = addr >> PAGE_SHIFT;
 	vma->vm_flags = flags;
 	vma->vm_page_prot = vm_get_page_prot(flags);
-	mas_lock(mas);
-	if (vma_mas_store(vma, mas))
-		goto mas_store_fail;
-	mas_unlock(mas);
-
-	if (!prev)
-		prev = mas_prev(mas, 0);
-
-	__vma_link_list(mm, vma, prev);
-	mm->map_count++;
+	vma_mas_link(mm, vma, mas, NULL);
 	*brkvma = vma;
 out:
 	perf_event_mmap(vma);
@@ -3193,17 +3174,16 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 	if (flags & VM_LOCKED)
 		mm->locked_vm += (len >> PAGE_SHIFT);
 	vma->vm_flags |= VM_SOFTDIRTY;
-	validate_mm_mt(mm);
+	validate_mm(mm);
 	return 0;
 
-mas_store_fail:
 	vm_area_free(vma);
 vma_alloc_fail:
 	vm_unacct_memory(len >> PAGE_SHIFT);
 	return -ENOMEM;
 
 mas_mod_fail:
-	mas_unlock(mas);
+	mas_unlock(ma_prev);
 	vma->vm_end = addr;
 	if (vma->anon_vma) {
 		anon_vma_interval_tree_post_update_vma(vma);
@@ -3231,9 +3211,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
 	if (mmap_write_lock_killable(mm))
 		return -EINTR;
 
-	// This vma left intentionally blank.
-	mas_walk(&mas);
-	ret = do_brk_flags(&mas, &vma, addr, len, flags);
+	ret = do_brk_flags(&mas, &mas, &vma, addr, len, flags);
 	populate = ((mm->def_flags & VM_LOCKED) != 0);
 	mmap_write_unlock(mm);
 	if (populate && !ret)
@@ -3254,6 +3232,9 @@ void exit_mmap(struct mm_struct *mm)
 	struct mmu_gather tlb;
 	struct vm_area_struct *vma;
 	unsigned long nr_accounted = 0;
+	unsigned long index = 0;
+	MA_STATE(mas, &mm->mm_mt, 0, 0);
+	int count = 0;
 
 	/* mm's last user has gone, and its about to be pulled down */
 	mmu_notifier_release(mm);
@@ -3282,13 +3263,24 @@ void exit_mmap(struct mm_struct *mm)
 		mmap_write_unlock(mm);
 	}
 
-	if (mm->locked_vm)
-		unlock_range(mm->mmap, &vma, ULONG_MAX);
+	if (mm->locked_vm) {
+		mas_lock(&mas);
+		mas_for_each(&mas, vma, ULONG_MAX) {
+			if (vma->vm_flags & VM_LOCKED) {
+				mm->locked_vm -= vma_pages(vma);
+				munlock_vma_pages_all(vma);
+			}
+		}
+		mas_unlock(&mas);
+		mas_set(&mas, 0);
+	}
 
 	arch_exit_mmap(mm);
 
-	vma = mm->mmap;
-	if (!vma)	/* Can happen if dup_mmap() received an OOM */
+	rcu_read_lock();
+	vma = mas_find(&mas, ULONG_MAX);
+	rcu_read_unlock();
+	if (!vma) /* Can happen if dup_mmap() received an OOM */
 		return;
 
 	lru_add_drain();
@@ -3296,21 +3288,24 @@ void exit_mmap(struct mm_struct *mm)
 	tlb_gather_mmu_fullmm(&tlb, mm);
 	/* update_hiwater_rss(mm) here? but nobody should be looking */
 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
-	unmap_vmas(&tlb, vma, 0, -1);
-	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
+	unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX);
+	free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
 	tlb_finish_mmu(&tlb);
 
 	/*
 	 * Walk the list again, actually closing and freeing it,
 	 * with preemption enabled, without holding any MM locks.
 	 */
-	while (vma) {
+	mt_for_each(&mm->mm_mt, vma, index, ULONG_MAX) {
 		if (vma->vm_flags & VM_ACCOUNT)
 			nr_accounted += vma_pages(vma);
-		vma = remove_vma(vma);
+		remove_vma(vma);
+		count++;
 		cond_resched();
 	}
 
+	BUG_ON(count != mm->map_count);
+
 	trace_exit_mmap(mm);
 	mtree_destroy(&mm->mm_mt);
 	vm_unacct_memory(nr_accounted);
@@ -3719,12 +3714,13 @@ int mm_take_all_locks(struct mm_struct *mm)
 {
 	struct vm_area_struct *vma;
 	struct anon_vma_chain *avc;
+	MA_STATE(mas, &mm->mm_mt, 0, 0);
 
 	BUG_ON(mmap_read_trylock(mm));
 
 	mutex_lock(&mm_all_locks_mutex);
 
-	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+	mas_for_each(&mas, vma, ULONG_MAX) {
 		if (signal_pending(current))
 			goto out_unlock;
 		if (vma->vm_file && vma->vm_file->f_mapping &&
@@ -3732,7 +3728,8 @@ int mm_take_all_locks(struct mm_struct *mm)
 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
 	}
 
-	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+	mas_set(&mas, 0);
+	mas_for_each(&mas, vma, ULONG_MAX) {
 		if (signal_pending(current))
 			goto out_unlock;
 		if (vma->vm_file && vma->vm_file->f_mapping &&
@@ -3740,7 +3737,8 @@ int mm_take_all_locks(struct mm_struct *mm)
 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
 	}
 
-	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+	mas_set(&mas, 0);
+	mas_for_each(&mas, vma, ULONG_MAX) {
 		if (signal_pending(current))
 			goto out_unlock;
 		if (vma->anon_vma)
@@ -3799,11 +3797,12 @@ void mm_drop_all_locks(struct mm_struct *mm)
 {
 	struct vm_area_struct *vma;
 	struct anon_vma_chain *avc;
+	MA_STATE(mas, &mm->mm_mt, 0, 0);
 
 	BUG_ON(mmap_read_trylock(mm));
 	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
 
-	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+	mas_for_each(&mas, vma, ULONG_MAX) {
 		if (vma->anon_vma)
 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
 				vm_unlock_anon_vma(avc->anon_vma);
diff --git a/mm/nommu.c b/mm/nommu.c
index 675aee78f8f2..63dd382d09d3 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -577,7 +577,6 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 	mas_reset(&mas);
 	/* add the VMA to the tree */
 	vma_mas_store(vma, &mas);
-	__vma_link_list(mm, vma, prev);
 }
 
 /*
@@ -602,7 +601,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
 
 	/* remove from the MM's tree and list */
 	vma_mas_remove(vma, &mas);
-	__vma_unlink_list(vma->vm_mm, vma);
 }
 
 /*
@@ -1415,7 +1413,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
 				return -EINVAL;
 			if (end == vma->vm_end)
 				goto erase_whole_vma;
-			vma = vma->vm_next;
+			vma =  vma_next(mm, vma);
 		} while (vma);
 		return -EINVAL;
 	} else {
@@ -1472,7 +1470,7 @@ void exit_mmap(struct mm_struct *mm)
 	mm->total_vm = 0;
 
 	while ((vma = mm->mmap)) {
-		mm->mmap = vma->vm_next;
+		mm->mmap = vma_next(mm, vma);
 		delete_vma_from_mm(vma);
 		delete_vma(mm, vma);
 		cond_resched();
diff --git a/mm/util.c b/mm/util.c
index af8508e07a8e..1a420f0742f4 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -271,46 +271,6 @@ void *memdup_user_nul(const void __user *src, size_t len)
 }
 EXPORT_SYMBOL(memdup_user_nul);
 
-void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
-		struct vm_area_struct *prev)
-{
-	struct vm_area_struct *next;
-
-	vma->vm_prev = prev;
-	if (prev) {
-		next = prev->vm_next;
-		prev->vm_next = vma;
-	} else {
-		next = mm->mmap;
-		mm->mmap = vma;
-	}
-	vma->vm_next = next;
-	if (next)
-		next->vm_prev = vma;
-	else
-		mm->highest_vm_end = vm_end_gap(vma);
-}
-
-void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
-{
-	struct vm_area_struct *prev, *next;
-
-	next = vma->vm_next;
-	prev = vma->vm_prev;
-	if (prev)
-		prev->vm_next = next;
-	else
-		mm->mmap = next;
-	if (next)
-		next->vm_prev = prev;
-	else {
-		if (prev)
-			mm->highest_vm_end = vm_end_gap(prev);
-		else
-			mm->highest_vm_end = 0;
-	}
-}
-
 /* Check if the vma is being used as a stack by this task */
 int vma_is_stack_for_current(struct vm_area_struct *vma)
 {
-- 
2.30.2

  parent reply	other threads:[~2021-08-17 15:54 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-17 15:47 [PATCH v2 00/61] Introducing the Maple Tree Liam Howlett
2021-08-17 15:47 ` [PATCH v2 01/61] radix tree test suite: Add pr_err define Liam Howlett
2021-08-17 15:47 ` [PATCH v2 04/61] radix tree test suite: Add support for slab bulk APIs Liam Howlett
2021-08-17 15:47 ` [PATCH v2 02/61] radix tree test suite: Add kmem_cache_set_non_kernel() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 03/61] radix tree test suite: Add allocation counts and size to kmem_cache Liam Howlett
2021-08-17 15:47 ` [PATCH v2 05/61] Maple Tree: Add new data structure Liam Howlett
2021-08-17 15:47 ` [PATCH v2 06/61] mm: Start tracking VMAs with maple tree Liam Howlett
2021-08-17 15:47 ` [PATCH v2 07/61] mm/mmap: Use the maple tree in find_vma() instead of the rbtree Liam Howlett
2021-08-17 15:47 ` [PATCH v2 09/61] mm/mmap: Use maple tree for unmapped_area{_topdown} Liam Howlett
2021-08-17 15:47 ` [PATCH v2 08/61] mm/mmap: Use the maple tree for find_vma_prev() instead of the rbtree Liam Howlett
2021-08-17 15:47 ` [PATCH v2 10/61] kernel/fork: Use maple tree for dup_mmap() during forking Liam Howlett
2021-08-17 15:47 ` [PATCH v2 12/61] xen/privcmd: Optimized privcmd_ioctl_mmap() by using vma_lookup() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 13/61] mm: Optimize find_exact_vma() to use vma_lookup() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 11/61] mm: Remove rb tree Liam Howlett
2021-08-23  9:49   ` David Hildenbrand
2021-08-31 14:40     ` Liam Howlett
2021-08-17 15:47 ` [PATCH v2 16/61] mm: Use maple tree operations for find_vma_intersection() and find_vma() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 15/61] mm/mmap: Change do_brk_flags() to expand existing VMA and add do_brk_munmap() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 17/61] mm/mmap: Use advanced maple tree API for mmap_region() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 14/61] mm/khugepaged: Optimize collapse_pte_mapped_thp() by using vma_lookup() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 18/61] mm: Remove vmacache Liam Howlett
2021-08-17 15:47 ` [PATCH v2 19/61] mm/mmap: Move mmap_region() below do_munmap() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 21/61] mm/mmap: Reorganize munmap to use maple states Liam Howlett
2021-08-17 15:47 ` [PATCH v2 20/61] mm/mmap: Convert count_vma_pages_range() to use ma_state Liam Howlett
2021-08-17 15:47 ` [PATCH v2 22/61] mm/mmap: Change do_brk_munmap() to use do_mas_align_munmap() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 24/61] arch/arm64: Remove mmap linked list from vdso Liam Howlett
2021-08-17 15:47 ` [PATCH v2 23/61] mm: Introduce vma_next() and vma_prev() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 26/61] arch/powerpc: Remove mmap linked list from mm/book3s32/tlb Liam Howlett
2021-08-17 15:47 ` [PATCH v2 25/61] arch/parisc: Remove mmap linked list from kernel/cache Liam Howlett
2021-08-17 15:47 ` [PATCH v2 28/61] arch/s390: Use maple tree iterators instead of linked list Liam Howlett
2021-08-17 15:47 ` [PATCH v2 27/61] arch/powerpc: Remove mmap linked list from mm/book3s64/subpage_prot Liam Howlett
2021-08-17 15:47 ` [PATCH v2 31/61] drivers/misc/cxl: Use maple tree iterators for cxl_prefault_vma() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 30/61] arch/xtensa: Use maple tree iterators for unmapped area Liam Howlett
2021-08-17 15:47 ` [PATCH v2 29/61] arch/x86: Use maple tree iterators for vdso/vma Liam Howlett
2021-08-17 15:47 ` [PATCH v2 33/61] fs/binfmt_elf: Use maple tree iterators for fill_files_note() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 32/61] drivers/tee/optee: Use maple tree iterators for __check_mem_type() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 34/61] fs/coredump: Use maple tree iterators in place of linked list Liam Howlett
2021-08-17 15:47 ` [PATCH v2 38/61] fs/userfaultfd: Stop using vma " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 37/61] fs/proc/task_mmu: Stop using linked list and highest_vm_end Liam Howlett
2021-08-17 15:47 ` [PATCH v2 36/61] fs/proc/base: Use maple tree iterators in place of linked list Liam Howlett
2021-08-17 15:47 ` [PATCH v2 35/61] fs/exec: Use vma_next() instead " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 39/61] ipc/shm: Stop using the vma " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 41/61] kernel/events/core: Use maple tree iterators instead of " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 40/61] kernel/acct: " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 45/61] arch/um/kernel/tlb: Stop using " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 42/61] kernel/events/uprobes: Use maple tree iterators instead of " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 44/61] kernel/sys: " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 43/61] kernel/sched/fair: " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 46/61] bpf: Remove VMA " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 48/61] mm/khugepaged: Use maple tree iterators instead of vma " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 47/61] mm/gup: Use maple tree navigation instead of " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 51/61] mm/memcontrol: Stop using mm->highest_vm_end Liam Howlett
2021-08-17 15:47 ` [PATCH v2 49/61] mm/ksm: Use maple tree iterators instead of vma linked list Liam Howlett
2021-08-17 15:47 ` [PATCH v2 52/61] mm/mempolicy: " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 50/61] mm/madvise: Use vma_next " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 54/61] mm/mprotect: Use maple tree navigation " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 56/61] mm/msync: Use vma_next() " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 53/61] mm/mlock: Use maple tree iterators " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 55/61] mm/mremap: Use vma_next() " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 57/61] mm/oom_kill: Use maple tree iterators " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 59/61] mm/swapfile: Use maple tree iterator " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 58/61] mm/pagewalk: Use vma_next() " Liam Howlett
2021-08-17 15:47 ` Liam Howlett [this message]
2021-08-17 15:47 ` [PATCH v2 61/61] mm/mmap: Drop range_has_overlap() function Liam Howlett
     [not found] ` <20210818083610.3698-1-hdanton@sina.com>
2021-08-18 14:54   ` [PATCH v2 10/61] kernel/fork: Use maple tree for dup_mmap() during forking Liam Howlett
     [not found]   ` <20210819020105.3756-1-hdanton@sina.com>
2021-08-19 13:32     ` Liam Howlett
2021-08-20 17:45     ` Liam Howlett

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210817154651.1570984-61-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=axelrasmussen@google.com \
    --cc=dave@stgolabs.net \
    --cc=ldufour@linux.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    --cc=paulmck@kernel.org \
    --cc=peterz@infradead.org \
    --cc=riel@surriel.com \
    --cc=rientjes@google.com \
    --cc=songliubraving@fb.com \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    --cc=walken.cr@gmail.com \
    --cc=willy@infradead.org \
    --subject='Re: [PATCH v2 60/61] mm: Remove the vma linked list' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).