within vm_mm. */
 
        /* linked list of VM areas per task, sorted by address */
-       struct vm_area_struct *vm_next;
+       struct vm_area_struct *vm_next, *vm_prev;
 
        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
        unsigned long vm_flags;         /* Flags, see mm.h. */
 
 #ifdef CONFIG_MMU
 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
-       struct vm_area_struct *mpnt, *tmp, **pprev;
+       struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
        struct rb_node **rb_link, *rb_parent;
        int retval;
        unsigned long charge;
        if (retval)
                goto out;
 
+       prev = NULL;
        for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
                struct file *file;
 
                        goto fail_nomem_anon_vma_fork;
                tmp->vm_flags &= ~VM_LOCKED;
                tmp->vm_mm = mm;
-               tmp->vm_next = NULL;
+               tmp->vm_next = tmp->vm_prev = NULL;
                file = tmp->vm_file;
                if (file) {
                        struct inode *inode = file->f_path.dentry->d_inode;
                 */
                *pprev = tmp;
                pprev = &tmp->vm_next;
+               tmp->vm_prev = prev;
+               prev = tmp;
 
                __vma_link_rb(mm, tmp, rb_link, rb_parent);
                rb_link = &tmp->vm_rb.rb_right;
 
 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev, struct rb_node *rb_parent)
 {
+       struct vm_area_struct *next;
+
+       vma->vm_prev = prev;
        if (prev) {
-               vma->vm_next = prev->vm_next;
+               next = prev->vm_next;
                prev->vm_next = vma;
        } else {
                mm->mmap = vma;
                if (rb_parent)
-                       vma->vm_next = rb_entry(rb_parent,
+                       next = rb_entry(rb_parent,
                                        struct vm_area_struct, vm_rb);
                else
-                       vma->vm_next = NULL;
+                       next = NULL;
        }
+       vma->vm_next = next;
+       if (next)
+               next->vm_prev = vma;
 }
 
 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev)
 {
-       prev->vm_next = vma->vm_next;
+       struct vm_area_struct *next = vma->vm_next;
+
+       prev->vm_next = next;
+       if (next)
+               next->vm_prev = prev;
        rb_erase(&vma->vm_rb, &mm->mm_rb);
        if (mm->mmap_cache == vma)
                mm->mmap_cache = prev;
        unsigned long addr;
 
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+       vma->vm_prev = NULL;
        do {
                rb_erase(&vma->vm_rb, &mm->mm_rb);
                mm->map_count--;
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
        *insertion_point = vma;
+       if (vma)
+               vma->vm_prev = prev;
        tail_vma->vm_next = NULL;
        if (mm->unmap_area == arch_unmap_area)
                addr = prev ? prev->vm_end : mm->mmap_base;
 
  */
 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct *pvma, **pp;
+       struct vm_area_struct *pvma, **pp, *next;
        struct address_space *mapping;
        struct rb_node **p, *parent;
 
                        break;
        }
 
-       vma->vm_next = *pp;
+       next = *pp;
        *pp = vma;
+       vma->vm_next = next;
+       if (next)
+               next->vm_prev = vma;
 }
 
 /*