]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
lib/test_hmm: add large page allocation failure testing
authorBalbir Singh <balbirs@nvidia.com>
Wed, 1 Oct 2025 06:57:03 +0000 (16:57 +1000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:39 +0000 (21:28 -0700)
Add HMM_DMIRROR_FLAG_FAIL_ALLOC flag to simulate large page allocation
failures, enabling testing of split migration code paths.

This test flag allows validation of the fallback behavior when destination
device cannot allocate compound pages.  This is useful for testing the
split migration functionality.

Link: https://lkml.kernel.org/r/20251001065707.920170-13-balbirs@nvidia.com
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lib/test_hmm.c
lib/test_hmm_uapi.h

index df429670633e41a322af39fdc950544d9102e5da..72a8b2f38d8a98bd802f05038060ef894f1c9745 100644 (file)
@@ -92,6 +92,7 @@ struct dmirror {
        struct xarray                   pt;
        struct mmu_interval_notifier    notifier;
        struct mutex                    mutex;
+       __u64                   flags;
 };
 
 /*
@@ -699,7 +700,12 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
                     page_to_pfn(spage)))
                        goto next;
 
-               dpage = dmirror_devmem_alloc_page(dmirror, is_large);
+               if (dmirror->flags & HMM_DMIRROR_FLAG_FAIL_ALLOC) {
+                       dmirror->flags &= ~HMM_DMIRROR_FLAG_FAIL_ALLOC;
+                       dpage = NULL;
+               } else
+                       dpage = dmirror_devmem_alloc_page(dmirror, is_large);
+
                if (!dpage) {
                        struct folio *folio;
                        unsigned long i;
@@ -959,44 +965,55 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
 
                spage = BACKING_PAGE(spage);
                order = folio_order(page_folio(spage));
-
                if (order)
+                       *dst = MIGRATE_PFN_COMPOUND;
+               if (*src & MIGRATE_PFN_WRITE)
+                       *dst |= MIGRATE_PFN_WRITE;
+
+               if (dmirror->flags & HMM_DMIRROR_FLAG_FAIL_ALLOC) {
+                       dmirror->flags &= ~HMM_DMIRROR_FLAG_FAIL_ALLOC;
+                       *dst &= ~MIGRATE_PFN_COMPOUND;
+                       dpage = NULL;
+               } else if (order) {
                        dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER_MOVABLE,
                                                order, args->vma, addr), 0);
-               else
-                       dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
-
-               /* Try with smaller pages if large allocation fails */
-               if (!dpage && order) {
+               } else {
                        dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
-                       if (!dpage)
-                               return VM_FAULT_OOM;
-                       order = 0;
                }
 
+               if (!dpage && !order)
+                       return VM_FAULT_OOM;
+
                pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n",
                                page_to_pfn(spage), page_to_pfn(dpage));
-               lock_page(dpage);
-               xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
-               copy_highpage(dpage, spage);
-               *dst = migrate_pfn(page_to_pfn(dpage));
-               if (*src & MIGRATE_PFN_WRITE)
-                       *dst |= MIGRATE_PFN_WRITE;
-               if (order)
-                       *dst |= MIGRATE_PFN_COMPOUND;
+
+               if (dpage) {
+                       lock_page(dpage);
+                       *dst |= migrate_pfn(page_to_pfn(dpage));
+               }
 
                for (i = 0; i < (1 << order); i++) {
                        struct page *src_page;
                        struct page *dst_page;
 
+                       /* Try with smaller pages if large allocation fails */
+                       if (!dpage && order) {
+                               dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
+                               lock_page(dpage);
+                               dst[i] = migrate_pfn(page_to_pfn(dpage));
+                               dst_page = pfn_to_page(page_to_pfn(dpage));
+                               dpage = NULL; /* For the next iteration */
+                       } else {
+                               dst_page = pfn_to_page(page_to_pfn(dpage) + i);
+                       }
+
                        src_page = pfn_to_page(page_to_pfn(spage) + i);
-                       dst_page = pfn_to_page(page_to_pfn(dpage) + i);
 
                        xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
+                       addr += PAGE_SIZE;
                        copy_highpage(dst_page, src_page);
                }
 next:
-               addr += PAGE_SIZE << order;
                src += 1 << order;
                dst += 1 << order;
        }
@@ -1514,6 +1531,10 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
                dmirror_device_remove_chunks(dmirror->mdevice);
                ret = 0;
                break;
+       case HMM_DMIRROR_FLAGS:
+               dmirror->flags = cmd.npages;
+               ret = 0;
+               break;
 
        default:
                return -EINVAL;
index 8c818a2cf4f69a93317842bfa4aa11bb45626805..f94c6d457338283708ac2132b19179a44d421dc5 100644 (file)
@@ -37,6 +37,9 @@ struct hmm_dmirror_cmd {
 #define HMM_DMIRROR_EXCLUSIVE          _IOWR('H', 0x05, struct hmm_dmirror_cmd)
 #define HMM_DMIRROR_CHECK_EXCLUSIVE    _IOWR('H', 0x06, struct hmm_dmirror_cmd)
 #define HMM_DMIRROR_RELEASE            _IOWR('H', 0x07, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_FLAGS              _IOWR('H', 0x08, struct hmm_dmirror_cmd)
+
+#define HMM_DMIRROR_FLAG_FAIL_ALLOC    (1ULL << 0)
 
 /*
  * Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.