pgtable_t pgtable = NULL;
        int ret = -ENOMEM;
 
+       pmd = pmdp_get_lockless(src_pmd);
+       if (unlikely(pmd_special(pmd))) {
+               dst_ptl = pmd_lock(dst_mm, dst_pmd);
+               src_ptl = pmd_lockptr(src_mm, src_pmd);
+               spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+               /*
+                * No need to recheck the pmd, it can't change with write
+                * mmap lock held here.
+                *
+                * Meanwhile, making sure it's not a CoW VMA with writable
+                * mapping, otherwise it means either the anon page wrongly
+                * applied special bit, or we made the PRIVATE mapping be
+                * able to wrongly write to the backend MMIO.
+                */
+               VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
+               goto set_pmd;
+       }
+
        /* Skip if can be re-fill on fault */
        if (!vma_is_anonymous(dst_vma))
                return 0;
        pmdp_set_wrprotect(src_mm, addr, src_pmd);
        if (!userfaultfd_wp(dst_vma))
                pmd = pmd_clear_uffd_wp(pmd);
-       pmd = pmd_mkold(pmd_wrprotect(pmd));
+       pmd = pmd_wrprotect(pmd);
+set_pmd:
+       pmd = pmd_mkold(pmd);
        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
 
        ret = 0;
         * TODO: once we support anonymous pages, use
         * folio_try_dup_anon_rmap_*() and split if duplicating fails.
         */
-       pudp_set_wrprotect(src_mm, addr, src_pud);
-       pud = pud_mkold(pud_wrprotect(pud));
+       if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
+               pudp_set_wrprotect(src_mm, addr, src_pud);
+               pud = pud_wrprotect(pud);
+       }
+       pud = pud_mkold(pud);
        set_pud_at(dst_mm, addr, dst_pud, pud);
 
        ret = 0;