From: Liam R. Howlett <Liam.Howlett@Oracle.com>
Date: Fri, 18 Dec 2020 01:20:43 +0000 (-0500)
Subject: mm: linked list many+2
X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=f5ec451c5418d9003d66c41f637b4a68f295ca7c;p=users%2Fjedix%2Flinux-maple.git

mm: linked list many+2

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---

diff --git a/fs/coredump.c b/fs/coredump.c
index 0699ce6f6cc1..5b950af6e793 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -1043,7 +1043,7 @@ whole:
 static struct vm_area_struct *first_vma(struct task_struct *tsk,
 					struct vm_area_struct *gate_vma)
 {
-	struct vm_area_struct *ret = tsk->mm->mmap;
+	struct vm_area_struct *ret = find_vma(tsk->mm, 0);
 
 	if (ret)
 		return ret;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 20a0fb414c4f..a331120a1cd3 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -840,16 +840,16 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 {
 	struct proc_maps_private *priv = m->private;
 	struct mem_size_stats mss;
-	struct mm_struct *mm;
+	struct mm_struct *mm = priv->mm;
 	struct vm_area_struct *vma;
-	unsigned long last_vma_end = 0;
+	unsigned long vma_start, last_vma_end = 0;
 	int ret = 0;
+	MA_STATE(mas, &mm->mm_mt, 0, 0);
 
 	priv->task = get_proc_task(priv->inode);
 	if (!priv->task)
 		return -ESRCH;
 
-	mm = priv->mm;
 	if (!mm || !mmget_not_zero(mm)) {
 		ret = -ESRCH;
 		goto out_put_task;
@@ -862,8 +862,16 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 		goto out_put_mm;
 
 	hold_task_mempolicy(priv);
+	vma = mas_find(&mas, 0);
+
+	if (vma)
+		vma_start = vma->vm_start;
+
+	mas_reset(&mas);
+	mas_set(&mas, 0);
+
 
-	for (vma = priv->mm->mmap; vma;) {
+	mas_for_each(&mas, vma, -1) {
 		smap_gather_stats(vma, &mss, 0);
 		last_vma_end = vma->vm_end;
 
@@ -872,6 +880,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 		 * access it for write request.
 		 */
 		if (mmap_lock_is_contended(mm)) {
+			mas_pause(&mas);
 			mmap_read_unlock(mm);
 			ret = mmap_read_lock_killable(mm);
 			if (ret) {
@@ -915,7 +924,8 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 			 *    contains last_vma_end.
 			 *    Iterate VMA' from last_vma_end.
 			 */
-			vma = find_vma(mm, last_vma_end - 1);
+			mas_set(&mas, last_vma_end - 1);
+			vma = mas_find(&mas, -1);
 			/* Case 3 above */
 			if (!vma)
 				break;
@@ -929,11 +939,9 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
 				smap_gather_stats(vma, &mss, last_vma_end);
 		}
 		/* Case 2 above */
-		vma = vma_next(mm, vma);
 	}
 
-	show_vma_header_prefix(m, priv->mm->mmap->vm_start,
-			       last_vma_end, 0, 0, 0, 0);
+	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
 	seq_pad(m, ' ');
 	seq_puts(m, "[rollup]\n");
 
@@ -1257,7 +1265,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 						0, NULL, mm, 0, -1UL);
 			mmu_notifier_invalidate_range_start(&range);
 		}
-		walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
+		walk_page_range(mm, 0, -1, &clear_refs_walk_ops,
 				&cp);
 		if (type == CLEAR_REFS_SOFT_DIRTY)
 			mmu_notifier_invalidate_range_end(&range);
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 060e35f4bde9..b0eac6fb3e8e 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -792,7 +792,7 @@ int userfaultfd_unmap_prep(struct vm_area_struct *vma,
 			   unsigned long start, unsigned long end,
 			   struct list_head *unmaps)
 {
-	MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
+	MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_start, vma->vm_start);
 
 	mas_for_each(&mas, vma, end) {
 		struct userfaultfd_unmap_ctx *unmap_ctx;
@@ -1335,7 +1335,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
 	found = false;
 	basic_ioctls = false;
 	mas_set(&mas, vma->vm_start);
-	mas_for_each(&mas, curr, end) {
+	mas_for_each(&mas, cur, end) {
 		cond_resched();
 
 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
@@ -1451,7 +1451,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
 	skip:
 		prev = vma;
 		start = vma->vm_end;
-		vma = vma_next(vma);
+		vma = vma_next(mm, vma);
 	} while (vma && vma->vm_start < end);
 out_unlock:
 	mmap_write_unlock(mm);
@@ -1624,7 +1624,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
 	skip:
 		prev = vma;
 		start = vma->vm_end;
-		vma = vma_next(vma);
+		vma = vma_next(mm, vma);
 	} while (vma && vma->vm_start < end);
 out_unlock:
 	mmap_write_unlock(mm);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a9ad566f24fd..1b8900398a65 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2655,7 +2655,7 @@ extern struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
 		     unsigned long start_addr, unsigned long end_addr);
 
 static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
-			struct vm_area_struct *vma)
+			const struct vm_area_struct *vma)
 {
 	MA_STATE(mas, &mm->mm_mt, 0, 0);
 
@@ -2664,7 +2664,7 @@ static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
 }
 
 static inline struct vm_area_struct *vma_prev(struct mm_struct *mm,
-			struct vm_area_struct *vma)
+			const struct vm_area_struct *vma)
 {
 	MA_STATE(mas, &mm->mm_mt, 0, 0);
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4e87ea66ca3c..2548f9bda064 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -303,14 +303,11 @@ struct vm_userfaultfd_ctx {};
  * library, the executable area etc).
  */
 struct vm_area_struct {
-	/* The first cache line has the info for VMA tree walking. */
-
 	unsigned long vm_start;		/* Our start address within vm_mm. */
 	unsigned long vm_end;		/* The first byte after our end address
 					   within vm_mm. */
 
 	/* linked list of VM areas per task, sorted by address */
-	struct vm_area_struct *vm_next, *vm_prev;
 	struct mm_struct *vm_mm;	/* The address space we belong to. */
 
 	/*
@@ -323,7 +320,6 @@ struct vm_area_struct {
 	/* Information about our backing store: */
 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
 					   units */
-	/* Second cache line starts here. */
 	struct file * vm_file;		/* File we map to (can be NULL). */
 	/*
 	 * For areas with an address space and backing store,
@@ -377,7 +373,6 @@ struct core_state {
 struct kioctx_table;
 struct mm_struct {
 	struct {
-		struct vm_area_struct *mmap;		/* list of VMAs */
 		struct maple_tree mm_mt;
 #ifdef CONFIG_MMU
 		unsigned long (*get_unmapped_area) (struct file *filp,
@@ -392,7 +387,6 @@ struct mm_struct {
 		unsigned long mmap_compat_legacy_base;
 #endif
 		unsigned long task_size;	/* size of task vm space */
-		unsigned long highest_vm_end;	/* highest vma end address */
 		pgd_t * pgd;
 
 #ifdef CONFIG_MEMBARRIER
diff --git a/kernel/acct.c b/kernel/acct.c
index c1cb3d68948e..bcfe2cf7c7c0 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -537,16 +537,14 @@ void acct_collect(long exitcode, int group_dead)
 	struct pacct_struct *pacct = &current->signal->pacct;
 	u64 utime, stime;
 	unsigned long vsize = 0;
+	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
 
 	if (group_dead && current->mm) {
 		struct vm_area_struct *vma;
 
 		mmap_read_lock(current->mm);
-		vma = current->mm->mmap;
-		while (vma) {
+		mas_for_each(&mas, vma, -1)
 			vsize += vma->vm_end - vma->vm_start;
-			vma = vma_next(vma->vm_mm, vma);
-		}
 		mmap_read_unlock(current->mm);
 	}
 
diff --git a/kernel/fork.c b/kernel/fork.c
index b8c25c2e9587..d3c22604493f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -363,7 +363,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
 		 */
 		*new = data_race(*orig);
 		INIT_LIST_HEAD(&new->anon_vma_chain);
-		new->vm_next = new->vm_prev = NULL;
 	}
 	return new;
 }
@@ -468,7 +467,7 @@ EXPORT_SYMBOL(free_task);
 static __latent_entropy int dup_mmap(struct mm_struct *mm,
 					struct mm_struct *oldmm)
 {
-	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+	struct vm_area_struct *mpnt, *tmp;
 	int retval;
 	unsigned long charge = 0;
 	MA_STATE(old_mas, &oldmm->mm_mt, 0, 0);
@@ -495,7 +494,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	mm->exec_vm = oldmm->exec_vm;
 	mm->stack_vm = oldmm->stack_vm;
 
-	pprev = &mm->mmap;
 	retval = ksm_fork(mm, oldmm);
 	if (retval)
 		goto out;
@@ -503,8 +501,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 	if (retval)
 		goto out;
 
-	prev = NULL;
-
 	retval = mas_entry_count(&mas, oldmm->map_count);
 	if (retval)
 		goto fail_nomem;
@@ -579,14 +575,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 		if (is_vm_hugetlb_page(tmp))
 			reset_vma_resv_huge_pages(tmp);
 
-		/*
-		 * Link in the new vma and copy the page table entries.
-		 */
-		*pprev = tmp;
-		pprev = &tmp->vm_next;
-		tmp->vm_prev = prev;
-		prev = tmp;
-
 		/* Link the vma into the MT */
 		mas.index = tmp->vm_start;
 		mas.last = tmp->vm_end - 1;
@@ -1008,7 +996,6 @@ static void mm_init_uprobes_state(struct mm_struct *mm)
 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 	struct user_namespace *user_ns)
 {
-	mm->mmap = NULL;
 	mt_init_flags(&mm->mm_mt, MAPLE_ALLOC_RANGE);
 	atomic_set(&mm->mm_users, 1);
 	atomic_set(&mm->mm_count, 1);
diff --git a/mm/debug.c b/mm/debug.c
index d8ed9d738326..8dd7a2378248 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -203,8 +203,8 @@ void dump_vma(const struct vm_area_struct *vma)
 		"prot %lx anon_vma %px vm_ops %px\n"
 		"pgoff %lx file %px private_data %px\n"
 		"flags: %#lx(%pGv)\n",
-		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
-		vma->vm_prev, vma->vm_mm,
+		vma, (void *)vma->vm_start, (void *)vma->vm_end,
+		vma_next(vma->vm_mm, vma), vma_prev(vma->vm_mm, vma), vma->vm_mm,
 		(unsigned long)pgprot_val(vma->vm_page_prot),
 		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
 		vma->vm_file, vma->vm_private_data,
@@ -214,11 +214,11 @@ EXPORT_SYMBOL(dump_vma);
 
 void dump_mm(const struct mm_struct *mm)
 {
-	pr_emerg("mm %px mmap %px task_size %lu\n"
+	pr_emerg("mm %px task_size %lu\n"
 #ifdef CONFIG_MMU
 		"get_unmapped_area %px\n"
 #endif
-		"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
+		"mmap_base %lu mmap_legacy_base %lu\n"
 		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
 		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
 		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
@@ -242,11 +242,11 @@ void dump_mm(const struct mm_struct *mm)
 		"tlb_flush_pending %d\n"
 		"def_flags: %#lx(%pGv)\n",
 
-		mm, mm->mmap, mm->task_size,
+		mm, mm->task_size,
 #ifdef CONFIG_MMU
 		mm->get_unmapped_area,
 #endif
-		mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
+		mm->mmap_base, mm->mmap_legacy_base,
 		mm->pgd, atomic_read(&mm->mm_users),
 		atomic_read(&mm->mm_count),
 		mm_pgtables_bytes(mm),
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c2627619d622..4005beb23daf 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2309,7 +2309,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
 	 * contain an hugepage: check if we need to split an huge pmd.
 	 */
 	if (adjust_next > 0) {
-		struct vm_area_struct *next = vma_next(vma);
+		struct vm_area_struct *next = vma_next(vma->vm_mm, vma);
 		unsigned long nstart = next->vm_start;
 		nstart += adjust_next;
 		if (nstart & ~HPAGE_PMD_MASK &&
diff --git a/mm/internal.h b/mm/internal.h
index 39fe96678504..aaf382dbee14 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -37,7 +37,7 @@ void page_writeback_init(void);
 vm_fault_t do_swap_page(struct vm_fault *vmf);
 
 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
-		unsigned long floor, unsigned long ceiling);
+	struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling);
 
 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
 {
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index b20228f10725..fbfcee552da6 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2049,7 +2049,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 	struct mm_struct *mm;
 	struct vm_area_struct *vma;
 	int progress = 0;
-	MA_STATE(mas, &mm->mm_mt, 0, 0);
+	MA_STATE(mas, NULL, 0, 0);
 
 	VM_BUG_ON(!pages);
 	lockdep_assert_held(&khugepaged_mm_lock);
@@ -2066,6 +2066,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 	khugepaged_collapse_pte_mapped_thps(mm_slot);
 
 	mm = mm_slot->mm;
+	mas.tree = &mm->mm_mt;
 	/*
 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
 	 * the next mm on the list.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 29459a6ce1c7..7fe753d6a81f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5892,7 +5892,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
 	unsigned long precharge;
 
 	mmap_read_lock(mm);
-	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
+	walk_page_range(mm, 0, -1, &precharge_walk_ops, NULL);
 	mmap_read_unlock(mm);
 
 	precharge = mc.precharge;
@@ -6190,9 +6190,7 @@ retry:
 	 * When we have consumed all precharges and failed in doing
 	 * additional charge, the page walk just aborts.
 	 */
-	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
-			NULL);
-
+	walk_page_range(mc.mm, 0, -1, &charge_walk_ops, NULL);
 	mmap_read_unlock(mc.mm);
 	atomic_dec(&mc.from->moving_account);
 }
diff --git a/mm/memory.c b/mm/memory.c
index 18246038e731..2bf7b6291f13 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -388,15 +388,15 @@ void free_pgd_range(struct mmu_gather *tlb,
 }
 
 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
-		      unsigned long floor, unsigned long ceiling)
+	struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling)
 {
+	struct vm_area_struct *next;
 	struct ma_state ma_next = *mas;
-	struct vm_area_struct *vma;
 
-	mas_find(&ma_next, ceiling - 1);
-	mas_for_each(mas, vma, ceiling - 1) {
-		struct vm_area_struct *next = mas_find(&ma_next, ceiling - 1);
+	do {
 		unsigned long addr = vma->vm_start;
+		next = mas_find(&ma_next, ceiling - 1);
+
 
 		/*
 		 * Hide vma from rmap and truncate_pagecache before freeing
@@ -422,7 +422,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
 			free_pgd_range(tlb, addr, vma->vm_end,
 				floor, next ? next->vm_start : ceiling);
 		}
-	}
+	} while ((vma = mas_find(mas, (ceiling - 1))) != NULL);
 }
 
 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
@@ -1504,8 +1504,9 @@ void unmap_vmas(struct mmu_gather *tlb,
 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
 				start_addr, end_addr);
 	mmu_notifier_invalidate_range_start(&range);
-	mas_for_each(mas, vma, end_addr - 1)
+	do {
 		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
+	} while ((vma = mas_find(mas, end_addr - 1)) != NULL);
 	mmu_notifier_invalidate_range_end(&range);
 }
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f16ff55b10c8..4757b7939920 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1077,6 +1077,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 			   int flags)
 {
 	nodemask_t nmask;
+	struct vm_area_struct *vma;
 	LIST_HEAD(pagelist);
 	int err = 0;
 	struct migration_target_control mtc = {
@@ -1092,8 +1093,9 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 	 * need migration.  Between passing in the full user address
 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
 	 */
+	vma = find_vma(mm, 0);
 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
-	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+	queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 
 	if (!list_empty(&pagelist)) {
diff --git a/mm/mlock.c b/mm/mlock.c
index c5337fbf7139..4f6350aa982d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -652,10 +652,6 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
 	if (mm == NULL)
 		mm = current->mm;
 
-	vma = mas_find(&mas, ULONG_MAX);
-	if (vma == NULL)
-		vma = mm->mmap;
-
 	mas_for_each(&mas, vma, ULONG_MAX) {
 		if (start >= vma->vm_end)
 			continue;
@@ -771,6 +767,7 @@ static int apply_mlockall_flags(int flags)
 {
 	struct vm_area_struct * vma, * prev = NULL;
 	vm_flags_t to_add = 0;
+	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
 
 	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
 	if (flags & MCL_FUTURE) {
@@ -789,7 +786,7 @@ static int apply_mlockall_flags(int flags)
 			to_add |= VM_LOCKONFAULT;
 	}
 
-	for (vma = current->mm->mmap; vma ; vma = vma_next(vma->vm_mm, prev)) {
+	mas_for_each(&mas, vma, ULONG_MAX) {
 		vm_flags_t newflags;
 
 		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
@@ -797,6 +794,7 @@ static int apply_mlockall_flags(int flags)
 
 		/* Ignore errors */
 		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
+		mas_pause(&mas);
 		cond_resched();
 	}
 out:
diff --git a/mm/mmap.c b/mm/mmap.c
index c270aba9c263..2945140df9b8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -177,8 +177,9 @@ static void remove_vma(struct vm_area_struct *vma)
 
 static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 			 unsigned long newbrk, unsigned long oldbrk,
-			 struct list_head *uf, unsigned long max);
-static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
+			 struct list_head *uf);
+static int do_brk_flags(struct ma_state *mas, struct ma_state *ma_prev,
+			struct vm_area_struct **brkvma,
 			unsigned long addr, unsigned long request,
 			unsigned long flags);
 SYSCALL_DEFINE1(brk, unsigned long, brk)
@@ -191,7 +192,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 	bool downgraded = false;
 	LIST_HEAD(uf);
 	MA_STATE(mas, &mm->mm_mt, 0, 0);
-	struct ma_state ma_next;
+	struct ma_state ma_neighbour;
 
 	if (mmap_write_lock_killable(mm))
 		return -EINTR;
@@ -233,8 +234,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 
 	mas_set(&mas, newbrk);
 	brkvma = mas_walk(&mas);
-	ma_next = mas;
-	next = mas_next(&ma_next, -1);
 	if (brkvma) { // munmap necessary, there is something at newbrk.
 		/*
 		 * Always allow shrinking brk.
@@ -250,9 +249,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 		 * before calling do_brk_munmap().
 		 */
 		mm->brk = brk;
-		mas.last = oldbrk - 1;
-		ret = do_brk_munmap(&mas, brkvma, newbrk, oldbrk, &uf,
-			    next ? next->vm_start : USER_PGTABLES_CEILING);
+		ret = do_brk_munmap(&mas, brkvma, newbrk, oldbrk, &uf);
 		if (ret == 1)  {
 			downgraded = true;
 			goto success;
@@ -262,18 +259,21 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 		mm->brk = origbrk;
 		goto out;
 	}
+	ma_neighbour = mas;
+	next = mas_next(&ma_neighbour, newbrk + PAGE_SIZE + stack_guard_gap);
 	/* Only check if the next VMA is within the stack_guard_gap of the
 	 * expansion area */
 	/* Check against existing mmap mappings. */
 	if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
 		goto out;
 
-	brkvma = mas_prev(&mas, mm->start_brk);
+	brkvma = mas_prev(&ma_neighbour, mm->start_brk);
 	if (brkvma && (brkvma->vm_start >= oldbrk))
 		goto out; // Trying to map over another vma.
 
 	/* Ok, looks good - let it rip. */
-	if (do_brk_flags(&mas, &brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
+	if (do_brk_flags(&mas, &ma_neighbour, &brkvma, oldbrk,
+			 newbrk - oldbrk, 0) < 0)
 		goto out;
 
 	mm->brk = brk;
@@ -303,7 +303,6 @@ static void validate_mm(struct mm_struct *mm)
 {
 	int bug = 0;
 	int i = 0;
-	unsigned long highest_address = 0;
 	struct vm_area_struct *vma;
 	MA_STATE(mas, &mm->mm_mt, 0, 0);
 
@@ -319,18 +318,12 @@ static void validate_mm(struct mm_struct *mm)
 			anon_vma_unlock_read(anon_vma);
 		}
 #endif
-		highest_address = vm_end_gap(vma);
 		i++;
 	}
 	if (i != mm->map_count) {
 		pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i);
 		bug = 1;
 	}
-	if (highest_address != mm->highest_vm_end) {
-		pr_emerg("mm->highest_vm_end %lx, found %lx\n",
-			  mm->highest_vm_end, highest_address);
-		bug = 1;
-	}
 	VM_BUG_ON_MM(bug, mm);
 }
 #else // !CONFIG_DEBUG_MAPLE_TREE
@@ -380,7 +373,7 @@ anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
  *
  * Returns: True if there is an overlapping VMA, false otherwise
  */
-static bool range_has_overlap(struct mm_struct *mm, unsigned long start,
+static inline bool range_has_overlap(struct mm_struct *mm, unsigned long start,
 			      unsigned long end, struct vm_area_struct **pprev)
 {
 	struct vm_area_struct *existing;
@@ -495,7 +488,7 @@ void vma_store(struct mm_struct *mm, struct vm_area_struct *vma)
 }
 
 static void vma_mas_link(struct mm_struct *mm, struct vm_area_struct *vma,
-			 struct ma_state *mas, struct vm_area_struct *prev)
+			 struct ma_state *mas)
 {
 	struct address_space *mapping = NULL;
 
@@ -505,7 +498,6 @@ static void vma_mas_link(struct mm_struct *mm, struct vm_area_struct *vma,
 	}
 
 	vma_mas_store(vma, mas);
-	__vma_link_list(mm, vma, prev);
 	__vma_link_file(vma);
 
 	if (mapping)
@@ -514,8 +506,7 @@ static void vma_mas_link(struct mm_struct *mm, struct vm_area_struct *vma,
 	mm->map_count++;
 	validate_mm(mm);
 }
-static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
-			struct vm_area_struct *prev)
+static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
 {
 	struct address_space *mapping = NULL;
 
@@ -525,7 +516,6 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
 	}
 
 	vma_mt_store(mm, vma);
-	__vma_link_list(mm, vma, prev);
 	__vma_link_file(vma);
 
 	if (mapping)
@@ -539,14 +529,12 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
  * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
  * mm's list and the mm tree.  It has already been inserted into the interval tree.
  */
-static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+static inline void __insert_vm_struct(struct mm_struct *mm,
+				      struct vm_area_struct *vma)
 {
-	struct vm_area_struct *prev;
-
-	if (range_has_overlap(mm, vma->vm_start, vma->vm_end, &prev))
+	if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
 		BUG();
 	vma_mt_store(mm, vma);
-	__vma_link_list(mm, vma, prev);
 	mm->map_count++;
 }
 
@@ -605,13 +593,8 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
 	/* Expanding over the next vma */
 	if (remove_next) {
 		/* Remove from mm linked list - also updates highest_vm_end */
-		__vma_unlink_list(mm, next);
-
 		if (file)
 			__remove_shared_vm_struct(next, file, mapping);
-
-	} else if (!next) {
-		mm->highest_vm_end = vm_end_gap(vma);
 	}
 
 	if (anon_vma) {
@@ -797,8 +780,6 @@ again:
 		else
 			vma_changed = true;
 		vma->vm_end = end;
-		if (!next)
-			mm->highest_vm_end = vm_end_gap(vma);
 	}
 
 	if (vma_changed)
@@ -819,7 +800,6 @@ again:
 	}
 
 	if (remove_next) {
-		__vma_unlink_list(mm, next);
 		if (file)
 			__remove_shared_vm_struct(next, file, mapping);
 	} else if (insert) {
@@ -886,27 +866,6 @@ again:
 			remove_next = 1;
 			end = next->vm_end;
 			goto again;
-		} else if (!next) {
-			/*
-			 * If remove_next == 2 we obviously can't
-			 * reach this path.
-			 *
-			 * If remove_next == 3 we can't reach this
-			 * path because pre-swap() next is always not
-			 * NULL. pre-swap() "next" is not being
-			 * removed and its next->vm_end is not altered
-			 * (and furthermore "end" already matches
-			 * next->vm_end in remove_next == 3).
-			 *
-			 * We reach this only in the remove_next == 1
-			 * case if the "next" vma that was removed was
-			 * the highest vma of the mm. However in such
-			 * case next->vm_end == "end" and the extended
-			 * "vma" has vma->vm_end == next->vm_end so
-			 * mm->highest_vm_end doesn't need any update
-			 * in remove_next == 1 case.
-			 */
-			VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
 		}
 	}
 	if (insert && file)
@@ -1005,24 +964,6 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 	return 0;
 }
 
-/*
- * vma_next_wrap() - Get the next VMA of the first.
- * @mm: The mm_struct.
- * @vma: The current vma.
- *
- * If @vma is NULL, return the first vma in the mm.
- *
- * Returns: The next VMA after @vma.
- */
-static inline struct vm_area_struct *vma_next_wrap(struct mm_struct *mm,
-                                        struct vm_area_struct *vma)
-{
-       if (!vma)
-               return mm->mmap;
-
-       return vma_next(mm, vma);
-}
-
 /*
  * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
  * whether that can be merged with its predecessor or its successor.
@@ -1084,7 +1025,11 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
 	if (vm_flags & VM_SPECIAL)
 		return NULL;
 
-	next = vma_next_wrap(mm, prev);
+	if (!prev)
+		next = find_vma(mm, 0);
+	else
+		next = vma_next(mm, prev);
+
 	area = next;
 	if (area && area->vm_end == end)		/* cases 6, 7, 8 */
 		next = vma_next(mm, next);
@@ -2052,8 +1997,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 				vma->vm_end = address;
 				vma_mt_store(mm, vma);
 				anon_vma_interval_tree_post_update_vma(vma);
-				if (!vma_next(mm, vma))
-					mm->highest_vm_end = vm_end_gap(vma);
 				spin_unlock(&mm->page_table_lock);
 
 				perf_event_mmap(vma);
@@ -2082,7 +2025,7 @@ int expand_downwards(struct vm_area_struct *vma,
 		return -EPERM;
 
 	/* Enforce stack_guard_gap */
-	prev = vma->vm_prev;
+	prev = vma_prev(mm, vma);
 	/* Check that both stack segments have the same anon_vma? */
 	if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
 			vma_is_accessible(prev)) {
@@ -2254,7 +2197,7 @@ static void unmap_region(struct mm_struct *mm,
 	tlb_gather_mmu(&tlb, mm, start, end);
 	update_hiwater_rss(mm);
 	unmap_vmas(&tlb, vma, mas, start, end);
-	free_pgtables(&tlb, &ma_pgtb,
+	free_pgtables(&tlb, &ma_pgtb, vma,
 		      prev ? prev->vm_end : FIRST_USER_ADDRESS,
 		      max);
 	tlb_finish_mmu(&tlb, start, end);
@@ -2368,10 +2311,8 @@ static inline unsigned long detach_range(struct mm_struct *mm,
 	/* Drop removed area from the tree */
 	mas_store_gfp(src, NULL, GFP_KERNEL);
 	/* Set the upper limit */
-	if (!tmp) {
-		mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
+	if (!tmp)
 		return USER_PGTABLES_CEILING;
-	}
 
 	return tmp->vm_start;
 }
@@ -2423,7 +2364,7 @@ int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 		mas->index = start;
 		mas_reset(mas);
 	} else {
-		prev = vma->vm_prev;
+		prev = vma_prev(mm, vma);
 	}
 
 	if (vma->vm_end >= end)
@@ -2460,19 +2401,6 @@ int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 	/* Point of no return */
 	max = detach_range(mm, mas, &dst, prev, &last);
 
-#if 1
-	/* Detach vmas from the MM linked list */
-	vma->vm_prev = NULL;
-	if (prev)
-		prev->vm_next = last->vm_next;
-	else
-		mm->mmap = last->vm_next;
-
-	if (last->vm_next) {
-		last->vm_next->vm_prev = prev;
-		last->vm_next = NULL;
-	}
-#endif
 	/*
 	 * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
 	 * VM_GROWSUP VMA. Such VMAs can change their size under
@@ -2489,6 +2417,7 @@ int do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 
 	mas_reset(&dst);
 	mas_set(&dst, start);
+	vma = mas_walk(&dst);
 	unmap_region(mm, vma, &dst, start, end, prev, max);
 
 	/* Fix up all other VM information */
@@ -2565,6 +2494,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	unsigned long max = USER_PGTABLES_CEILING;
 	pgoff_t vm_pgoff;
 	int error;
+	struct ma_state ma_prev;
 	MA_STATE(mas, &mm->mm_mt, addr, end - 1);
 
 	/* Check against address space limit. */
@@ -2597,15 +2527,16 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	}
 
 
+	ma_prev = mas;
 	if (vm_flags & VM_SPECIAL) {
-		prev = mas_prev(&mas, 0);
+		prev = mas_prev(&ma_prev, 0);
 		goto cannot_expand;
 	}
 
 	/* Attempt to expand an old mapping */
 
 	/* Check next */
-	next = mas_next(&mas, ULONG_MAX);
+	next = mas_next(&ma_prev, ULONG_MAX);
 	if (next) {
 		max = next->vm_start;
 
@@ -2619,7 +2550,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	}
 
 	/* Check prev */
-	prev = mas_prev(&mas, 0);
+	prev = mas_prev(&ma_prev, 0);
 	if (prev && prev->vm_end == addr && !vma_policy(prev) &&
 	    can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
 				NULL_VM_UFFD_CTX)) {
@@ -2630,7 +2561,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 
 	/* Actually expand, if possible */
 	if (vma &&
-	    !vma_expand(&mas, vma, merge_start, merge_end, vm_pgoff, next)) {
+	    !vma_expand(&ma_prev, vma, merge_start, merge_end, vm_pgoff, next)) {
 		khugepaged_enter_vma_merge(prev, vm_flags);
 		goto expanded;
 	}
@@ -2684,7 +2615,7 @@ cannot_expand:
 						 pgoff, NULL_VM_UFFD_CTX))) {
 			merge_start = prev->vm_start;
 			vm_pgoff = prev->vm_pgoff;
-			if (!vma_expand(&mas, prev, merge_start, merge_end,
+			if (!vma_expand(&ma_prev, prev, merge_start, merge_end,
 					vm_pgoff, next)) {
 				/* ->mmap() can change vma->vm_file and fput the original file. So
 				 * fput the vma->vm_file here or we would add an extra fput for file
@@ -2730,15 +2661,7 @@ cannot_expand:
 			goto free_vma;
 	}
 
-	/*
-	 * mas was called for the prev vma, and that may not be the correct
-	 * location for the vma being inserted, but is is before that location
-	 * and so the call to vma_mas_link()->vma_mas_store()->mas_store_gfp()
-	 * will detect the write as a spanning store and reset mas if necessary.
-	 */
-	mas.index = mas.last = addr;
-	mas_walk(&mas);
-	vma_mas_link(mm, vma, &mas, prev);
+	vma_mas_link(mm, vma, &mas);
 	/* Once vma denies write, undo our temporary denial count */
 	if (file) {
 unmap_writable:
@@ -2782,7 +2705,7 @@ unmap_and_free_vma:
 	fput(file);
 
 	mas.index = mas.last = addr;
-	mas_walk(&mas);
+	vma = mas_walk(&mas);
 	/* Undo any partial mapping done by a device driver. */
 	unmap_region(mm, vma, &mas, vma->vm_start, vma->vm_end, prev, max);
 	charged = 0;
@@ -2939,12 +2862,13 @@ out:
  */
 static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 			 unsigned long newbrk, unsigned long oldbrk,
-			 struct list_head *uf, unsigned long max)
+			 struct list_head *uf)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	struct vm_area_struct unmap;
+	struct vm_area_struct unmap, *next;
 	unsigned long unmap_pages;
 	int ret;
+	struct ma_state ma_next;
 
 	arch_unmap(mm, newbrk, oldbrk);
 
@@ -2956,6 +2880,7 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 	vma_init(&unmap, mm);
 	unmap.vm_start = newbrk;
 	unmap.vm_end = oldbrk;
+	unmap.vm_pgoff = newbrk >> PAGE_SHIFT;
 	ret = userfaultfd_unmap_prep(&unmap, newbrk, oldbrk, uf);
 	if (ret)
 		return ret;
@@ -2984,7 +2909,10 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
 	}
 
 	mmap_write_downgrade(mm);
-	unmap_region(mm, &unmap, mas, newbrk, oldbrk, vma, max);
+	ma_next = *mas;
+	next = mas_next(&ma_next, -1);
+	unmap_region(mm, &unmap, mas, newbrk, oldbrk, vma,
+		     next ? next->vm_start : 0);
 	/* Statistics */
 	vm_stat_account(mm, unmap.vm_flags, -unmap_pages);
 	if (unmap.vm_flags & VM_ACCOUNT)
@@ -3012,12 +2940,13 @@ mas_store_fail:
  * do not match then create a new anonymous VMA.  Eventually we may be able to
  * do some brk-specific accounting here.
  */
-static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
+static int do_brk_flags(struct ma_state *mas, struct ma_state *ma_prev,
+			struct vm_area_struct **brkvma,
 			unsigned long addr, unsigned long len,
 			unsigned long flags)
 {
 	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *prev = NULL, *vma;
+	struct vm_area_struct *vma;
 	int error;
 	unsigned long mapped_addr;
 
@@ -3044,7 +2973,6 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
 		return -ENOMEM;
 
-	mas->last = addr + len - 1;
 	if (*brkvma) {
 		vma = *brkvma;
 		/* Expand the existing vma if possible; almost never a singular
@@ -3053,7 +2981,8 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 		if ((!vma->anon_vma ||
 		     list_is_singular(&vma->anon_vma_chain)) &&
 		     ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)){
-			mas->index = vma->vm_start;
+			ma_prev->index = vma->vm_start;
+			ma_prev->last = addr + len - 1;
 
 			vma_adjust_trans_huge(vma, addr, addr + len, 0);
 			if (vma->anon_vma) {
@@ -3062,7 +2991,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 			}
 			vma->vm_end = addr + len;
 			vma->vm_flags |= VM_SOFTDIRTY;
-			if (mas_store_gfp(mas, vma, GFP_KERNEL))
+			if (mas_store_gfp(ma_prev, vma, GFP_KERNEL))
 				goto mas_mod_fail;
 			if (vma->anon_vma) {
 				anon_vma_interval_tree_post_update_vma(vma);
@@ -3071,11 +3000,9 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 			khugepaged_enter_vma_merge(vma, flags);
 			goto out;
 		}
-		prev = vma;
 	}
-	mas->index = addr;
-	mas_walk(mas);
 
+	mas->last = addr + len - 1;
 	/* create a vma struct for an anonymous mapping */
 	vma = vm_area_alloc(mm);
 	if (!vma)
@@ -3090,10 +3017,6 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct **brkvma,
 	if (vma_mas_store(vma, mas))
 		goto mas_store_fail;
 
-	if (!prev)
-		prev = mas_prev(mas, 0);
-
-	__vma_link_list(mm, vma, prev);
 	mm->map_count++;
 	*brkvma = vma;
 out:
@@ -3139,7 +3062,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
 
 	// This vma left intentionally blank.
 	mas_walk(&mas);
-	ret = do_brk_flags(&mas, &vma, addr, len, flags);
+	ret = do_brk_flags(&mas, &mas, &vma, addr, len, flags);
 	mmap_write_unlock(mm);
 	populate = ((mm->def_flags & VM_LOCKED) != 0);
 	if (populate && !ret)
@@ -3160,6 +3083,7 @@ void exit_mmap(struct mm_struct *mm)
 	struct mmu_gather tlb;
 	struct vm_area_struct *vma;
 	unsigned long nr_accounted = 0;
+	struct ma_state mas2;
 	MA_STATE(mas, &mm->mm_mt, FIRST_USER_ADDRESS, FIRST_USER_ADDRESS);
 
 	/* mm's last user has gone, and its about to be pulled down */
@@ -3202,19 +3126,21 @@ void exit_mmap(struct mm_struct *mm)
 
 	arch_exit_mmap(mm);
 
-	vma = mm->mmap;
+	vma = mas_find(&mas, -1);
 	if (!vma)	/* Can happen if dup_mmap() received an OOM */
 		return;
 
+	mas2 = mas;
+	mas_reset(&mas);
+	mas_set(&mas, FIRST_USER_ADDRESS);
+
 	lru_add_drain();
 	flush_cache_mm(mm);
 	tlb_gather_mmu(&tlb, mm, 0, -1);
 	/* update_hiwater_rss(mm) here? but nobody should be looking */
 	/* Use 0 here to ensure all VMAs in the mm are unmapped */
 	unmap_vmas(&tlb, vma, &mas, 0, -1);
-	mas_reset(&mas);
-	mas_set(&mas, FIRST_USER_ADDRESS);
-	free_pgtables(&tlb, &mas, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
+	free_pgtables(&tlb, &mas2, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
 	tlb_finish_mmu(&tlb, 0, -1);
 
 	/*
@@ -3241,9 +3167,7 @@ void exit_mmap(struct mm_struct *mm)
  */
 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-	struct vm_area_struct *prev;
-
-	if (range_has_overlap(mm, vma->vm_start, vma->vm_end, &prev))
+	if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
 		return -ENOMEM;
 
 	if ((vma->vm_flags & VM_ACCOUNT) &&
@@ -3267,7 +3191,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
 	}
 
-	vma_link(mm, vma, prev);
+	vma_link(mm, vma);
 	return 0;
 }
 
@@ -3337,7 +3261,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 			get_file(new_vma->vm_file);
 		if (new_vma->vm_ops && new_vma->vm_ops->open)
 			new_vma->vm_ops->open(new_vma);
-		vma_link(mm, new_vma, prev);
+		vma_link(mm, new_vma);
 		*need_rmap_locks = false;
 	}
 	return new_vma;
diff --git a/mm/util.c b/mm/util.c
index 48c79f2784ec..74888472a084 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -271,46 +271,6 @@ void *memdup_user_nul(const void __user *src, size_t len)
 }
 EXPORT_SYMBOL(memdup_user_nul);
 
-void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
-		struct vm_area_struct *prev)
-{
-	struct vm_area_struct *next;
-
-	vma->vm_prev = prev;
-	if (prev) {
-		next = prev->vm_next;
-		prev->vm_next = vma;
-	} else {
-		next = mm->mmap;
-		mm->mmap = vma;
-	}
-	vma->vm_next = next;
-	if (next)
-		next->vm_prev = vma;
-	else
-		mm->highest_vm_end = vm_end_gap(vma);
-}
-
-void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
-{
-	struct vm_area_struct *prev, *next;
-
-	next = vma->vm_next;
-	prev = vma->vm_prev;
-	if (prev)
-		prev->vm_next = next;
-	else
-		mm->mmap = next;
-	if (next)
-		next->vm_prev = prev;
-	else {
-		if (prev)
-			mm->highest_vm_end = vm_end_gap(prev);
-		else
-			mm->highest_vm_end = 0;
-	}
-}
-
 /* Check if the vma is being used as a stack by this task */
 int vma_is_stack_for_current(struct vm_area_struct *vma)
 {