From 659ffeb059dc1ccd54abbcc5f783f4ad516c9835 Mon Sep 17 00:00:00 2001 From: Lance Yang Date: Tue, 30 Sep 2025 16:10:40 +0800 Subject: [PATCH] mm/rmap: fix soft-dirty and uffd-wp bit loss when remapping zero-filled mTHP subpage to shared zeropage When splitting an mTHP and replacing a zero-filled subpage with the shared zeropage, try_to_map_unused_to_zeropage() currently drops several important PTE bits. For userspace tools like CRIU, which rely on the soft-dirty mechanism for incremental snapshots, losing the soft-dirty bit means modified pages are missed, leading to inconsistent memory state after restore. As pointed out by David, the more critical uffd-wp bit is also dropped. This breaks the userfaultfd write-protection mechanism, causing writes to be silently missed by monitoring applications, which can lead to data corruption. Preserve both the soft-dirty and uffd-wp bits from the old PTE when creating the new zeropage mapping to ensure they are correctly tracked. Link: https://lkml.kernel.org/r/20250930081040.80926-1-lance.yang@linux.dev Fixes: b1f202060afe ("mm: remap unused subpages to shared zeropage when splitting isolated thp") Signed-off-by: Lance Yang Suggested-by: David Hildenbrand Suggested-by: Dev Jain Acked-by: David Hildenbrand Reviewed-by: Dev Jain Acked-by: Zi Yan Cc: Alistair Popple Cc: Baolin Wang Cc: Barry Song Cc: Byungchul Park Cc: Gregory Price Cc: "Huang, Ying" Cc: Jann Horn Cc: Joshua Hahn Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Mariano Pache Cc: Mathew Brost Cc: Peter Xu Cc: Rakie Kim Cc: Rik van Riel Cc: Ryan Roberts Cc: Usama Arif Cc: Vlastimil Babka Cc: Yu Zhao Cc: Signed-off-by: Andrew Morton --- mm/migrate.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 2ce0e08ceb1f..4ff6eea0ef7e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -297,8 +297,7 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list) } static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, - struct folio *folio, - unsigned long idx) + struct folio *folio, pte_t old_pte, unsigned long idx) { struct page *page = folio_page(folio, idx); pte_t newpte; @@ -307,7 +306,7 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, return false; VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page); + VM_BUG_ON_PAGE(pte_present(old_pte), page); if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || mm_forbids_zeropage(pvmw->vma->vm_mm)) @@ -323,6 +322,12 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address), pvmw->vma->vm_page_prot)); + + if (pte_swp_soft_dirty(old_pte)) + newpte = pte_mksoft_dirty(newpte); + if (pte_swp_uffd_wp(old_pte)) + newpte = pte_mkuffd_wp(newpte); + set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio)); @@ -365,13 +370,13 @@ static bool remove_migration_pte(struct folio *folio, continue; } #endif + old_pte = ptep_get(pvmw.pte); if (rmap_walk_arg->map_unused_to_zeropage && - try_to_map_unused_to_zeropage(&pvmw, folio, idx)) + try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx)) continue; folio_get(folio); pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); - old_pte = ptep_get(pvmw.pte); entry = pte_to_swp_entry(old_pte); if (!is_migration_entry_young(entry)) -- 2.51.0