]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/migrate_device: handle partially mapped folios during collection
authorBalbir Singh <balbirs@nvidia.com>
Mon, 8 Sep 2025 04:57:23 +0000 (14:57 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:26:05 +0000 (17:26 -0700)
Extend migrate_vma_collect_pmd() to handle partially mapped large folios
that require splitting before migration can proceed.

During PTE walk in the collection phase, if a large folio is only
partially mapped in the migration range, it must be split to ensure the
folio is correctly migrated.

Link: https://lkml.kernel.org/r/8e1ef390-ab28-4294-8528-c57453e3acc1@nvidia.com
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/migrate_device.c

index 833ce5eafa4063b963e89c4f04f96032ffd05771..2069971d513c592c3980f9fa7df9a8e444874783 100644 (file)
@@ -54,6 +54,53 @@ static int migrate_vma_collect_hole(unsigned long start,
        return 0;
 }
 
+/**
+ * migrate_vma_split_folio() - Helper function to split a THP folio
+ * @folio: the folio to split
+ * @fault_page: struct page associated with the fault if any
+ *
+ * Returns 0 on success
+ */
+static int migrate_vma_split_folio(struct folio *folio,
+                                  struct page *fault_page)
+{
+       int ret;
+       struct folio *fault_folio = fault_page ? page_folio(fault_page) : NULL;
+       struct folio *new_fault_folio = NULL;
+
+       if (folio != fault_folio) {
+               folio_get(folio);
+               folio_lock(folio);
+       }
+
+       ret = split_folio(folio);
+       if (ret) {
+               if (folio != fault_folio) {
+                       folio_unlock(folio);
+                       folio_put(folio);
+               }
+               return ret;
+       }
+
+       new_fault_folio = fault_page ? page_folio(fault_page) : NULL;
+
+       /*
+        * Ensure the lock is held on the correct
+        * folio after the split
+        */
+       if (!new_fault_folio) {
+               folio_unlock(folio);
+               folio_put(folio);
+       } else if (folio != new_fault_folio) {
+               folio_get(new_fault_folio);
+               folio_lock(new_fault_folio);
+               folio_unlock(folio);
+               folio_put(folio);
+       }
+
+       return 0;
+}
+
 static int migrate_vma_collect_pmd(pmd_t *pmdp,
                                   unsigned long start,
                                   unsigned long end,
@@ -137,6 +184,8 @@ again:
                         * page table entry. Other special swap entries are not
                         * migratable, and we ignore regular swapped page.
                         */
+                       struct folio *folio;
+
                        entry = pte_to_swp_entry(pte);
                        if (!is_device_private_entry(entry))
                                goto next;
@@ -148,6 +197,23 @@ again:
                            pgmap->owner != migrate->pgmap_owner)
                                goto next;
 
+                       folio = page_folio(page);
+                       if (folio_test_large(folio)) {
+                               int ret;
+
+                               pte_unmap_unlock(ptep, ptl);
+                               ret = migrate_vma_split_folio(folio,
+                                                         migrate->fault_page);
+
+                               if (ret) {
+                                       ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
+                                       goto next;
+                               }
+
+                               addr = start;
+                               goto again;
+                       }
+
                        mpfn = migrate_pfn(page_to_pfn(page)) |
                                        MIGRATE_PFN_MIGRATE;
                        if (is_writable_device_private_entry(entry))
@@ -172,6 +238,22 @@ again:
                                        pgmap->owner != migrate->pgmap_owner)
                                        goto next;
                        }
+                       folio = page_folio(page);
+                       if (folio_test_large(folio)) {
+                               int ret;
+
+                               pte_unmap_unlock(ptep, ptl);
+                               ret = migrate_vma_split_folio(folio,
+                                                         migrate->fault_page);
+
+                               if (ret) {
+                                       ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
+                                       goto next;
+                               }
+
+                               addr = start;
+                               goto again;
+                       }
                        mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
                        mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
                }