return 0;
 }
 
+int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+               pmd_t *pmd, unsigned long addr, unsigned long next)
+
+{
+       spinlock_t *ptl;
+       pmd_t orig_pmd;
+       struct page *page;
+       struct mm_struct *mm = tlb->mm;
+       int ret = 0;
+
+       if (!pmd_trans_huge_lock(pmd, vma, &ptl))
+               goto out;
+
+       orig_pmd = *pmd;
+       if (is_huge_zero_pmd(orig_pmd)) {
+               ret = 1;
+               goto out;
+       }
+
+       page = pmd_page(orig_pmd);
+       /*
+        * If other processes are mapping this page, we couldn't discard
+        * the page unless they all do MADV_FREE so let's skip the page.
+        */
+       if (page_mapcount(page) != 1)
+               goto out;
+
+       if (!trylock_page(page))
+               goto out;
+
+       /*
+        * If user want to discard part-pages of THP, split it so MADV_FREE
+        * will deactivate only them.
+        */
+       if (next - addr != HPAGE_PMD_SIZE) {
+               get_page(page);
+               spin_unlock(ptl);
+               if (split_huge_page(page)) {
+                       put_page(page);
+                       unlock_page(page);
+                       goto out_unlocked;
+               }
+               put_page(page);
+               unlock_page(page);
+               ret = 1;
+               goto out_unlocked;
+       }
+
+       if (PageDirty(page))
+               ClearPageDirty(page);
+       unlock_page(page);
+
+       if (PageActive(page))
+               deactivate_page(page);
+
+       if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
+               orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
+                       tlb->fullmm);
+               orig_pmd = pmd_mkold(orig_pmd);
+               orig_pmd = pmd_mkclean(orig_pmd);
+
+               set_pmd_at(mm, addr, pmd, orig_pmd);
+               tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
+       }
+       ret = 1;
+out:
+       spin_unlock(ptl);
+out_unlocked:
+       return ret;
+}
+
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 pmd_t *pmd, unsigned long addr)
 {
        struct page *page;
        pgtable_t pgtable;
        pmd_t _pmd;
-       bool young, write;
+       bool young, write, dirty;
        int i;
 
        VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
        atomic_add(HPAGE_PMD_NR - 1, &page->_count);
        write = pmd_write(*pmd);
        young = pmd_young(*pmd);
+       dirty = pmd_dirty(*pmd);
 
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
        pmd_populate(mm, &_pmd, pgtable);
                        entry = swp_entry_to_pte(swp_entry);
                } else {
                        entry = mk_pte(page + i, vma->vm_page_prot);
-                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+                       entry = maybe_mkwrite(entry, vma);
                        if (!write)
                                entry = pte_wrprotect(entry);
                        if (!young)
                                entry = pte_mkold(entry);
                }
+               if (dirty)
+                       SetPageDirty(page + i);
                pte = pte_offset_map(&_pmd, haddr);
                BUG_ON(!pte_none(*pte));
                set_pte_at(mm, haddr, pte, entry);
                        continue;
                flush_cache_page(vma, address, page_to_pfn(page));
                entry = ptep_clear_flush(vma, address, pte + i);
+               if (pte_dirty(entry))
+                       SetPageDirty(page);
                swp_entry = make_migration_entry(page, pte_write(entry));
                swp_pte = swp_entry_to_pte(swp_entry);
                if (pte_soft_dirty(entry))
                page_add_anon_rmap(page, vma, address, false);
 
                entry = pte_mkold(mk_pte(page, vma->vm_page_prot));
-               entry = pte_mkdirty(entry);
+               if (PageDirty(page))
+                       entry = pte_mkdirty(entry);
                if (is_write_migration_entry(swp_entry))
                        entry = maybe_mkwrite(entry, vma);
 
                         (1L << PG_uptodate) |
                         (1L << PG_active) |
                         (1L << PG_locked) |
-                        (1L << PG_unevictable)));
-       page_tail->flags |= (1L << PG_dirty);
+                        (1L << PG_unevictable) |
+                        (1L << PG_dirty)));
 
        /*
         * After clearing PageTail the gup refcount can be released.