void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
 void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+bool reuse_ksm_page(struct page *page,
+                       struct vm_area_struct *vma, unsigned long address);
 
 #else  /* !CONFIG_KSM */
 
 static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
 {
 }
+static inline bool reuse_ksm_page(struct page *page,
+                       struct vm_area_struct *vma, unsigned long address)
+{
+       return false;
+}
 #endif /* CONFIG_MMU */
 #endif /* !CONFIG_KSM */
 
 
         * case this node is no longer referenced, and should be freed;
         * however, it might mean that the page is under page_ref_freeze().
         * The __remove_mapping() case is easy, again the node is now stale;
-        * but if page is swapcache in migrate_page_move_mapping(), it might
-        * still be our page, in which case it's essential to keep the node.
+        * the same is in reuse_ksm_page() case; but if page is swapcache
+        * in migrate_page_move_mapping(), it might still be our page,
+        * in which case it's essential to keep the node.
         */
        while (!get_page_unless_zero(page)) {
                /*
                goto again;
 }
 
+bool reuse_ksm_page(struct page *page,
+                   struct vm_area_struct *vma,
+                   unsigned long address)
+{
+#ifdef CONFIG_DEBUG_VM
+       if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
+                       WARN_ON(!page_mapped(page)) ||
+                       WARN_ON(!PageLocked(page))) {
+               dump_page(page, "reuse_ksm_page");
+               return false;
+       }
+#endif
+
+       if (PageSwapCache(page) || !page_stable_node(page))
+               return false;
+       /* Prohibit parallel get_ksm_page() */
+       if (!page_ref_freeze(page, 1))
+               return false;
+
+       page_move_anon_rmap(page, vma);
+       page->index = linear_page_index(vma, address);
+       page_ref_unfreeze(page, 1);
+
+       return true;
+}
 #ifdef CONFIG_MIGRATION
 void ksm_migrate_page(struct page *newpage, struct page *oldpage)
 {
 
         * Take out anonymous pages first, anonymous shared vmas are
         * not dirty accountable.
         */
-       if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
+       if (PageAnon(vmf->page)) {
                int total_map_swapcount;
+               if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
+                                          page_count(vmf->page) != 1))
+                       goto copy;
                if (!trylock_page(vmf->page)) {
                        get_page(vmf->page);
                        pte_unmap_unlock(vmf->pte, vmf->ptl);
                        }
                        put_page(vmf->page);
                }
+               if (PageKsm(vmf->page)) {
+                       bool reused = reuse_ksm_page(vmf->page, vmf->vma,
+                                                    vmf->address);
+                       unlock_page(vmf->page);
+                       if (!reused)
+                               goto copy;
+                       wp_page_reuse(vmf);
+                       return VM_FAULT_WRITE;
+               }
                if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
                        if (total_map_swapcount == 1) {
                                /*
                                        (VM_WRITE|VM_SHARED))) {
                return wp_page_shared(vmf);
        }
-
+copy:
        /*
         * Ok, we need to copy. Oh, well..
         */