}
 __setup("transparent_hugepage=", setup_transparent_hugepage);
 
-pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma, bool dirty)
+pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 {
-       if (likely(vma->vm_flags & VM_WRITE)) {
+       if (likely(vma->vm_flags & VM_WRITE))
                pmd = pmd_mkwrite(pmd);
-               if (dirty)
-                       pmd = pmd_mkdirty(pmd);
-       }
        return pmd;
 }
 
                }
 
                entry = mk_huge_pmd(page, vma->vm_page_prot);
-               entry = maybe_pmd_mkwrite(entry, vma, true);
+               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                page_add_new_anon_rmap(page, vma, haddr, true);
                mem_cgroup_commit_charge(page, memcg, false, true);
                lru_cache_add_active_or_unevictable(page, vma);
        if (pfn_t_devmap(pfn))
                entry = pmd_mkdevmap(entry);
        if (write) {
-               entry = pmd_mkyoung(entry);
-               entry = maybe_pmd_mkwrite(entry, vma, true);
+               entry = pmd_mkyoung(pmd_mkdirty(entry));
+               entry = maybe_pmd_mkwrite(entry, vma);
        }
 
        if (pgtable) {
 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
 
 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma,
-               bool dirty)
+static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
 {
-       if (likely(vma->vm_flags & VM_WRITE)) {
+       if (likely(vma->vm_flags & VM_WRITE))
                pud = pud_mkwrite(pud);
-               if (dirty)
-                       pud = pud_mkdirty(pud);
-       }
        return pud;
 }
 
        if (pfn_t_devmap(pfn))
                entry = pud_mkdevmap(entry);
        if (write) {
-               entry = pud_mkyoung(entry);
-               entry = maybe_pud_mkwrite(entry, vma, true);
+               entry = pud_mkyoung(pud_mkdirty(entry));
+               entry = maybe_pud_mkwrite(entry, vma);
        }
        set_pud_at(mm, addr, pud, entry);
        update_mmu_cache_pud(vma, addr, pud);
        if (reuse_swap_page(page, NULL)) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
-               entry = maybe_pmd_mkwrite(entry, vma, true);
+               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
                        update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
                ret |= VM_FAULT_WRITE;
        } else {
                pmd_t entry;
                entry = mk_huge_pmd(new_page, vma->vm_page_prot);
-               entry = maybe_pmd_mkwrite(entry, vma, true);
+               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
                page_add_new_anon_rmap(new_page, vma, haddr, true);
                mem_cgroup_commit_charge(new_page, memcg, false, true);
        if (pmd_swp_soft_dirty(*pvmw->pmd))
                pmde = pmd_mksoft_dirty(pmde);
        if (is_write_migration_entry(entry))
-               pmde = maybe_pmd_mkwrite(pmde, vma, false);
+               pmde = maybe_pmd_mkwrite(pmde, vma);
 
        flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
        page_add_anon_rmap(new, vma, mmun_start, true);