From: Balbir Singh Date: Mon, 8 Sep 2025 04:57:23 +0000 (+1000) Subject: mm/migrate_device: handle partially mapped folios during collection X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=52668fffc04377f9bcbc5f4b2d0b6544a885f796;p=users%2Fjedix%2Flinux-maple.git mm/migrate_device: handle partially mapped folios during collection Extend migrate_vma_collect_pmd() to handle partially mapped large folios that require splitting before migration can proceed. During PTE walk in the collection phase, if a large folio is only partially mapped in the migration range, it must be split to ensure the folio is correctly migrated. Link: https://lkml.kernel.org/r/8e1ef390-ab28-4294-8528-c57453e3acc1@nvidia.com Signed-off-by: Balbir Singh Cc: David Hildenbrand Cc: Zi Yan Cc: Joshua Hahn Cc: Rakie Kim Cc: Byungchul Park Cc: Gregory Price Cc: Ying Huang Cc: Alistair Popple Cc: Oscar Salvador Cc: Lorenzo Stoakes Cc: Baolin Wang Cc: "Liam R. Howlett" Cc: Nico Pache Cc: Ryan Roberts Cc: Dev Jain Cc: Barry Song Cc: Lyude Paul Cc: Danilo Krummrich Cc: David Airlie Cc: Simona Vetter Cc: Ralph Campbell Cc: Mika Penttilä Cc: Matthew Brost Cc: Francois Dugast Signed-off-by: Andrew Morton --- diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 833ce5eafa40..2069971d513c 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -54,6 +54,53 @@ static int migrate_vma_collect_hole(unsigned long start, return 0; } +/** + * migrate_vma_split_folio() - Helper function to split a THP folio + * @folio: the folio to split + * @fault_page: struct page associated with the fault if any + * + * Returns 0 on success + */ +static int migrate_vma_split_folio(struct folio *folio, + struct page *fault_page) +{ + int ret; + struct folio *fault_folio = fault_page ? page_folio(fault_page) : NULL; + struct folio *new_fault_folio = NULL; + + if (folio != fault_folio) { + folio_get(folio); + folio_lock(folio); + } + + ret = split_folio(folio); + if (ret) { + if (folio != fault_folio) { + folio_unlock(folio); + folio_put(folio); + } + return ret; + } + + new_fault_folio = fault_page ? page_folio(fault_page) : NULL; + + /* + * Ensure the lock is held on the correct + * folio after the split + */ + if (!new_fault_folio) { + folio_unlock(folio); + folio_put(folio); + } else if (folio != new_fault_folio) { + folio_get(new_fault_folio); + folio_lock(new_fault_folio); + folio_unlock(folio); + folio_put(folio); + } + + return 0; +} + static int migrate_vma_collect_pmd(pmd_t *pmdp, unsigned long start, unsigned long end, @@ -137,6 +184,8 @@ again: * page table entry. Other special swap entries are not * migratable, and we ignore regular swapped page. */ + struct folio *folio; + entry = pte_to_swp_entry(pte); if (!is_device_private_entry(entry)) goto next; @@ -148,6 +197,23 @@ again: pgmap->owner != migrate->pgmap_owner) goto next; + folio = page_folio(page); + if (folio_test_large(folio)) { + int ret; + + pte_unmap_unlock(ptep, ptl); + ret = migrate_vma_split_folio(folio, + migrate->fault_page); + + if (ret) { + ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + goto next; + } + + addr = start; + goto again; + } + mpfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; if (is_writable_device_private_entry(entry)) @@ -172,6 +238,22 @@ again: pgmap->owner != migrate->pgmap_owner) goto next; } + folio = page_folio(page); + if (folio_test_large(folio)) { + int ret; + + pte_unmap_unlock(ptep, ptl); + ret = migrate_vma_split_folio(folio, + migrate->fault_page); + + if (ret) { + ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + goto next; + } + + addr = start; + goto again; + } mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; }