]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm: Set the pte dirty if the folio is already dirty
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 30 Jan 2025 23:15:15 +0000 (18:15 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 25 Feb 2025 19:47:20 +0000 (14:47 -0500)
If the first access to a folio is a read that is then followed by a
write, we can save a page fault.  s390 implemented this in their
mk_pte() in commit abf09bed3cce ("s390/mm: implement software dirty
bits"), but other architectures can also benefit from this.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
arch/s390/include/asm/pgtable.h
mm/memory.c

index 3ca5af4cfe432e24d2484f7cb48b0393697a84bb..3ee495b5171e95068eed3e50ca296c0b9d9e2295 100644 (file)
@@ -1451,12 +1451,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
 
 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 {
-       unsigned long physpage = page_to_phys(page);
-       pte_t __pte = mk_pte_phys(physpage, pgprot);
-
-       if (pte_write(__pte) && PageDirty(page))
-               __pte = pte_mkdirty(__pte);
-       return __pte;
+       return mk_pte_phys(page_to_phys(page), pgprot);
 }
 
 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
index b4d3d4893267c8c9007a2b6e5419136ece85b59a..7650e1793a6c6fd79d9bf383b9e6f7d0e1d687b7 100644 (file)
@@ -5135,6 +5135,8 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
 
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+       else if (pte_write(entry) && folio_test_dirty(folio))
+               entry = pte_mkdirty(entry);
        if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
                entry = pte_mkuffd_wp(entry);
        /* copy-on-write page */