From: Matthew Wilcox (Oracle) Date: Thu, 30 Jan 2025 23:15:15 +0000 (-0500) Subject: mm: Set the pte dirty if the folio is already dirty X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=3bf8886ea29494a6165b5f395f8723cfe0f58691;p=users%2Fwilly%2Fpagecache.git mm: Set the pte dirty if the folio is already dirty If the first access to a folio is a read that is then followed by a write, we can save a page fault. s390 implemented this in their mk_pte() in commit abf09bed3cce ("s390/mm: implement software dirty bits"), but other architectures can also benefit from this. Signed-off-by: Matthew Wilcox (Oracle) --- diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 3ca5af4cfe432..3ee495b5171e9 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1451,12 +1451,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) { - unsigned long physpage = page_to_phys(page); - pte_t __pte = mk_pte_phys(physpage, pgprot); - - if (pte_write(__pte) && PageDirty(page)) - __pte = pte_mkdirty(__pte); - return __pte; + return mk_pte_phys(page_to_phys(page), pgprot); } #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) diff --git a/mm/memory.c b/mm/memory.c index b4d3d4893267c..7650e1793a6c6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5135,6 +5135,8 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma); + else if (pte_write(entry) && folio_test_dirty(folio)) + entry = pte_mkdirty(entry); if (unlikely(vmf_orig_pte_uffd_wp(vmf))) entry = pte_mkuffd_wp(entry); /* copy-on-write page */