int page_referenced_ksm(struct page *page,
                        struct mem_cgroup *memcg, unsigned long *vm_flags);
 int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+                 struct vm_area_struct *, unsigned long, void *), void *arg);
+void ksm_migrate_page(struct page *newpage, struct page *oldpage);
 
 #else  /* !CONFIG_KSM */
 
 {
        return 0;
 }
+
+static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
+               struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+       return 0;
+}
+
+static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+}
 #endif /* !CONFIG_KSM */
 
 #endif /* __LINUX_KSM_H */
 
 void page_unlock_anon_vma(struct anon_vma *anon_vma);
 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 
+/*
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+               struct vm_area_struct *, unsigned long, void *), void *arg);
+
 #else  /* !CONFIG_MMU */
 
 #define anon_vma_init()                do {} while (0)
 
        return ret;
 }
 
+#ifdef CONFIG_MIGRATION
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+                 struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+       struct stable_node *stable_node;
+       struct hlist_node *hlist;
+       struct rmap_item *rmap_item;
+       int ret = SWAP_AGAIN;
+       int search_new_forks = 0;
+
+       VM_BUG_ON(!PageKsm(page));
+       VM_BUG_ON(!PageLocked(page));
+
+       stable_node = page_stable_node(page);
+       if (!stable_node)
+               return ret;
+again:
+       hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+               struct anon_vma *anon_vma = rmap_item->anon_vma;
+               struct vm_area_struct *vma;
+
+               spin_lock(&anon_vma->lock);
+               list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+                       if (rmap_item->address < vma->vm_start ||
+                           rmap_item->address >= vma->vm_end)
+                               continue;
+                       /*
+                        * Initially we examine only the vma which covers this
+                        * rmap_item; but later, if there is still work to do,
+                        * we examine covering vmas in other mms: in case they
+                        * were forked from the original since ksmd passed.
+                        */
+                       if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+                               continue;
+
+                       ret = rmap_one(page, vma, rmap_item->address, arg);
+                       if (ret != SWAP_AGAIN) {
+                               spin_unlock(&anon_vma->lock);
+                               goto out;
+                       }
+               }
+               spin_unlock(&anon_vma->lock);
+       }
+       if (!search_new_forks++)
+               goto again;
+out:
+       return ret;
+}
+
+void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+       struct stable_node *stable_node;
+
+       VM_BUG_ON(!PageLocked(oldpage));
+       VM_BUG_ON(!PageLocked(newpage));
+       VM_BUG_ON(newpage->mapping != oldpage->mapping);
+
+       stable_node = page_stable_node(newpage);
+       if (stable_node) {
+               VM_BUG_ON(stable_node->page != oldpage);
+               stable_node->page = newpage;
+       }
+}
+#endif /* CONFIG_MIGRATION */
+
 #ifdef CONFIG_SYSFS
 /*
  * This all compiles without CONFIG_SYSFS, but is a waste of space.
 
 #include <linux/mm_inline.h>
 #include <linux/nsproxy.h>
 #include <linux/pagevec.h>
+#include <linux/ksm.h>
 #include <linux/rmap.h>
 #include <linux/topology.h>
 #include <linux/cpu.h>
 /*
  * Restore a potential migration pte to a working pte entry
  */
-static void remove_migration_pte(struct vm_area_struct *vma,
-               struct page *old, struct page *new)
+static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+                                unsigned long addr, void *old)
 {
        struct mm_struct *mm = vma->vm_mm;
        swp_entry_t entry;
        pmd_t *pmd;
        pte_t *ptep, pte;
        spinlock_t *ptl;
-       unsigned long addr = page_address_in_vma(new, vma);
-
-       if (addr == -EFAULT)
-               return;
 
        pgd = pgd_offset(mm, addr);
        if (!pgd_present(*pgd))
-                return;
+               goto out;
 
        pud = pud_offset(pgd, addr);
        if (!pud_present(*pud))
-                return;
+               goto out;
 
        pmd = pmd_offset(pud, addr);
        if (!pmd_present(*pmd))
-               return;
+               goto out;
 
        ptep = pte_offset_map(pmd, addr);
 
        if (!is_swap_pte(*ptep)) {
                pte_unmap(ptep);
-               return;
+               goto out;
        }
 
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
        pte = *ptep;
        if (!is_swap_pte(pte))
-               goto out;
+               goto unlock;
 
        entry = pte_to_swp_entry(pte);
 
-       if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
-               goto out;
+       if (!is_migration_entry(entry) ||
+           migration_entry_to_page(entry) != old)
+               goto unlock;
 
        get_page(new);
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
 
        /* No need to invalidate - it was non-present before */
        update_mmu_cache(vma, addr, pte);
-
-out:
+unlock:
        pte_unmap_unlock(ptep, ptl);
-}
-
-/*
- * Note that remove_file_migration_ptes will only work on regular mappings,
- * Nonlinear mappings do not use migration entries.
- */
-static void remove_file_migration_ptes(struct page *old, struct page *new)
-{
-       struct vm_area_struct *vma;
-       struct address_space *mapping = new->mapping;
-       struct prio_tree_iter iter;
-       pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
-       if (!mapping)
-               return;
-
-       spin_lock(&mapping->i_mmap_lock);
-
-       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
-               remove_migration_pte(vma, old, new);
-
-       spin_unlock(&mapping->i_mmap_lock);
-}
-
-/*
- * Must hold mmap_sem lock on at least one of the vmas containing
- * the page so that the anon_vma cannot vanish.
- */
-static void remove_anon_migration_ptes(struct page *old, struct page *new)
-{
-       struct anon_vma *anon_vma;
-       struct vm_area_struct *vma;
-
-       /*
-        * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
-        */
-       anon_vma = page_anon_vma(new);
-       if (!anon_vma)
-               return;
-
-       spin_lock(&anon_vma->lock);
-
-       list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
-               remove_migration_pte(vma, old, new);
-
-       spin_unlock(&anon_vma->lock);
+out:
+       return SWAP_AGAIN;
 }
 
 /*
  */
 static void remove_migration_ptes(struct page *old, struct page *new)
 {
-       if (PageAnon(new))
-               remove_anon_migration_ptes(old, new);
-       else
-               remove_file_migration_ptes(old, new);
+       rmap_walk(new, remove_migration_pte, old);
 }
 
 /*
        }
 
        mlock_migrate_page(newpage, page);
+       ksm_migrate_page(newpage, page);
 
        ClearPageSwapCache(page);
        ClearPagePrivate(page);
        else
                rc = fallback_migrate_page(mapping, newpage, page);
 
-       if (!rc) {
+       if (!rc)
                remove_migration_ptes(page, newpage);
-       } else
+       else
                newpage->mapping = NULL;
 
        unlock_page(newpage);
 
        else
                return try_to_unmap_file(page, TTU_MUNLOCK);
 }
+
+#ifdef CONFIG_MIGRATION
+/*
+ * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
+               struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+       struct anon_vma *anon_vma;
+       struct vm_area_struct *vma;
+       int ret = SWAP_AGAIN;
+
+       /*
+        * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
+        * because that depends on page_mapped(); but not all its usages
+        * are holding mmap_sem, which also gave the necessary guarantee
+        * (that this anon_vma's slab has not already been destroyed).
+        * This needs to be reviewed later: avoiding page_lock_anon_vma()
+        * is risky, and currently limits the usefulness of rmap_walk().
+        */
+       anon_vma = page_anon_vma(page);
+       if (!anon_vma)
+               return ret;
+       spin_lock(&anon_vma->lock);
+       list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+               unsigned long address = vma_address(page, vma);
+               if (address == -EFAULT)
+                       continue;
+               ret = rmap_one(page, vma, address, arg);
+               if (ret != SWAP_AGAIN)
+                       break;
+       }
+       spin_unlock(&anon_vma->lock);
+       return ret;
+}
+
+static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
+               struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+       struct address_space *mapping = page->mapping;
+       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       struct vm_area_struct *vma;
+       struct prio_tree_iter iter;
+       int ret = SWAP_AGAIN;
+
+       if (!mapping)
+               return ret;
+       spin_lock(&mapping->i_mmap_lock);
+       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+               unsigned long address = vma_address(page, vma);
+               if (address == -EFAULT)
+                       continue;
+               ret = rmap_one(page, vma, address, arg);
+               if (ret != SWAP_AGAIN)
+                       break;
+       }
+       /*
+        * No nonlinear handling: being always shared, nonlinear vmas
+        * never contain migration ptes.  Decide what to do about this
+        * limitation to linear when we need rmap_walk() on nonlinear.
+        */
+       spin_unlock(&mapping->i_mmap_lock);
+       return ret;
+}
+
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+               struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+       VM_BUG_ON(!PageLocked(page));
+
+       if (unlikely(PageKsm(page)))
+               return rmap_walk_ksm(page, rmap_one, arg);
+       else if (PageAnon(page))
+               return rmap_walk_anon(page, rmap_one, arg);
+       else
+               return rmap_walk_file(page, rmap_one, arg);
+}
+#endif /* CONFIG_MIGRATION */