return false;
 }
 
-static void remap_page(struct folio *folio, unsigned long nr)
+static void remap_page(struct folio *folio, unsigned long nr, int flags)
 {
        int i = 0;
 
        if (!folio_test_anon(folio))
                return;
        for (;;) {
-               remove_migration_ptes(folio, folio, true);
+               remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
                i += folio_nr_pages(folio);
                if (i >= nr)
                        break;
 
        if (nr_dropped)
                shmem_uncharge(folio->mapping->host, nr_dropped);
-       remap_page(folio, nr);
+       remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0);
 
        /*
         * set page to its compound_head when split to non order-0 pages, so
                if (mapping)
                        xas_unlock(&xas);
                local_irq_enable();
-               remap_page(folio, folio_nr_pages(folio));
+               remap_page(folio, folio_nr_pages(folio), 0);
                ret = -EAGAIN;
        }
 
 
        return true;
 }
 
+static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
+                                         struct folio *folio,
+                                         unsigned long idx)
+{
+       struct page *page = folio_page(folio, idx);
+       bool contains_data;
+       pte_t newpte;
+       void *addr;
+
+       VM_BUG_ON_PAGE(PageCompound(page), page);
+       VM_BUG_ON_PAGE(!PageAnon(page), page);
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
+
+       if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
+           mm_forbids_zeropage(pvmw->vma->vm_mm))
+               return false;
+
+       /*
+        * The pmd entry mapping the old thp was flushed and the pte mapping
+        * this subpage has been non present. If the subpage is only zero-filled
+        * then map it to the shared zeropage.
+        */
+       addr = kmap_local_page(page);
+       contains_data = memchr_inv(addr, 0, PAGE_SIZE);
+       kunmap_local(addr);
+
+       if (contains_data)
+               return false;
+
+       newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
+                                       pvmw->vma->vm_page_prot));
+       set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
+
+       dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
+       return true;
+}
+
+struct rmap_walk_arg {
+       struct folio *folio;
+       bool map_unused_to_zeropage;
+};
+
 /*
  * Restore a potential migration pte to a working pte entry
  */
 static bool remove_migration_pte(struct folio *folio,
-               struct vm_area_struct *vma, unsigned long addr, void *old)
+               struct vm_area_struct *vma, unsigned long addr, void *arg)
 {
-       DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
+       struct rmap_walk_arg *rmap_walk_arg = arg;
+       DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
 
        while (page_vma_mapped_walk(&pvmw)) {
                rmap_t rmap_flags = RMAP_NONE;
                        continue;
                }
 #endif
+               if (rmap_walk_arg->map_unused_to_zeropage &&
+                   try_to_map_unused_to_zeropage(&pvmw, folio, idx))
+                       continue;
 
                folio_get(folio);
                pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
  * Get rid of all migration entries and replace them by
  * references to the indicated page.
  */
-void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
+void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
 {
+       struct rmap_walk_arg rmap_walk_arg = {
+               .folio = src,
+               .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
+       };
+
        struct rmap_walk_control rwc = {
                .rmap_one = remove_migration_pte,
-               .arg = src,
+               .arg = &rmap_walk_arg,
        };
 
-       if (locked)
+       VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
+
+       if (flags & RMP_LOCKED)
                rmap_walk_locked(dst, &rwc);
        else
                rmap_walk(dst, &rwc);
         * At this point we know that the migration attempt cannot
         * be successful.
         */
-       remove_migration_ptes(folio, folio, false);
+       remove_migration_ptes(folio, folio, 0);
 
        rc = mapping->a_ops->writepage(&folio->page, &wbc);
 
                                   struct list_head *ret)
 {
        if (page_was_mapped)
-               remove_migration_ptes(src, src, false);
+               remove_migration_ptes(src, src, 0);
        /* Drop an anon_vma reference if we took one */
        if (anon_vma)
                put_anon_vma(anon_vma);
                lru_add_drain();
 
        if (old_page_state & PAGE_WAS_MAPPED)
-               remove_migration_ptes(src, dst, false);
+               remove_migration_ptes(src, dst, 0);
 
 out_unlock_both:
        folio_unlock(dst);
 
        if (page_was_mapped)
                remove_migration_ptes(src,
-                       rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
+                       rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
 
 unlock_put_anon:
        folio_unlock(dst);