* But __ksm_exit has to be careful: do the rb_erase
                 * if it's interrupting a scan, and this rmap_item was
                 * inserted by this scan rather than left from before.
-                *
-                * Because of the case in which remove_mm_from_lists
-                * increments seqnr before removing rmaps, unstable_nr
-                * may even be 2 behind seqnr, but should never be
-                * further behind.  Yes, I did have trouble with this!
                 */
                age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
-               BUG_ON(age > 2);
+               BUG_ON(age > 1);
                if (!age)
                        rb_erase(&rmap_item->node, &root_unstable_tree);
                ksm_pages_unshared--;
        return err;
 }
 
-static void remove_mm_from_lists(struct mm_struct *mm)
-{
-       struct mm_slot *mm_slot;
-
-       spin_lock(&ksm_mmlist_lock);
-       mm_slot = get_mm_slot(mm);
-
-       /*
-        * This mm_slot is always at the scanning cursor when we're
-        * called from scan_get_next_rmap_item; but it's a special
-        * case when we're called from __ksm_exit.
-        */
-       if (ksm_scan.mm_slot == mm_slot) {
-               ksm_scan.mm_slot = list_entry(
-                       mm_slot->mm_list.next, struct mm_slot, mm_list);
-               ksm_scan.address = 0;
-               ksm_scan.rmap_item = list_entry(
-                       &ksm_scan.mm_slot->rmap_list, struct rmap_item, link);
-               if (ksm_scan.mm_slot == &ksm_mm_head)
-                       ksm_scan.seqnr++;
-       }
-
-       hlist_del(&mm_slot->link);
-       list_del(&mm_slot->mm_list);
-       spin_unlock(&ksm_mmlist_lock);
-
-       remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
-       free_mm_slot(mm_slot);
-       clear_bit(MMF_VM_MERGEABLE, &mm->flags);
-}
-
 static u32 calc_checksum(struct page *page)
 {
        u32 checksum;
                }
        }
 
-       if (!ksm_scan.address) {
-               /*
-                * We've completed a full scan of all vmas, holding mmap_sem
-                * throughout, and found no VM_MERGEABLE: so do the same as
-                * __ksm_exit does to remove this mm from all our lists now.
-                */
-               remove_mm_from_lists(mm);
-               up_read(&mm->mmap_sem);
-               slot = ksm_scan.mm_slot;
-               if (slot != &ksm_mm_head)
-                       goto next_mm;
-               return NULL;
-       }
-
        /*
         * Nuke all the rmap_items that are above this current rmap:
         * because there were no VM_MERGEABLE vmas with such addresses.
         */
        remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next);
-       up_read(&mm->mmap_sem);
 
        spin_lock(&ksm_mmlist_lock);
-       slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
-       ksm_scan.mm_slot = slot;
+       ksm_scan.mm_slot = list_entry(slot->mm_list.next,
+                                               struct mm_slot, mm_list);
+       if (ksm_scan.address == 0) {
+               /*
+                * We've completed a full scan of all vmas, holding mmap_sem
+                * throughout, and found no VM_MERGEABLE: so do the same as
+                * __ksm_exit does to remove this mm from all our lists now.
+                */
+               hlist_del(&slot->link);
+               list_del(&slot->mm_list);
+               free_mm_slot(slot);
+               clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+       }
        spin_unlock(&ksm_mmlist_lock);
+       up_read(&mm->mmap_sem);
 
        /* Repeat until we've completed scanning the whole list */
+       slot = ksm_scan.mm_slot;
        if (slot != &ksm_mm_head)
                goto next_mm;
 
 
 void __ksm_exit(struct mm_struct *mm)
 {
+       struct mm_slot *mm_slot;
+
        /*
         * This process is exiting: doesn't hold and doesn't need mmap_sem;
         * but we do need to exclude ksmd and other exiters while we modify
         * the various lists and trees.
         */
        mutex_lock(&ksm_thread_mutex);
-       remove_mm_from_lists(mm);
+       spin_lock(&ksm_mmlist_lock);
+       mm_slot = get_mm_slot(mm);
+       if (!list_empty(&mm_slot->rmap_list)) {
+               spin_unlock(&ksm_mmlist_lock);
+               remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
+               spin_lock(&ksm_mmlist_lock);
+       }
+
+       if (ksm_scan.mm_slot == mm_slot) {
+               ksm_scan.mm_slot = list_entry(
+                       mm_slot->mm_list.next, struct mm_slot, mm_list);
+               ksm_scan.address = 0;
+               ksm_scan.rmap_item = list_entry(
+                       &ksm_scan.mm_slot->rmap_list, struct rmap_item, link);
+               if (ksm_scan.mm_slot == &ksm_mm_head)
+                       ksm_scan.seqnr++;
+       }
+
+       hlist_del(&mm_slot->link);
+       list_del(&mm_slot->mm_list);
+       spin_unlock(&ksm_mmlist_lock);
+
+       free_mm_slot(mm_slot);
+       clear_bit(MMF_VM_MERGEABLE, &mm->flags);
        mutex_unlock(&ksm_thread_mutex);
 }