]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
lib/test_hmm: make dmirror_atomic_map() consume a single page
authorDavid Hildenbrand <david@redhat.com>
Wed, 26 Feb 2025 13:22:53 +0000 (14:22 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 4 Mar 2025 05:50:35 +0000 (21:50 -0800)
Patch series "mm: cleanups for device-exclusive entries (hmm)", v2.

Some smaller device-exclusive cleanups I have lying around.

This patch (of 5):

The caller now always passes a single page; let's simplify, and return "0"
on success.

Link: https://lkml.kernel.org/r/20250226132257.2826043-1-david@redhat.com
Link: https://lkml.kernel.org/r/20250226132257.2826043-2-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lib/test_hmm.c

index 155b18cd9f2afba75aa506858bafafe701e63428..5b144bc5c4ec74abb673062a8ff07a0f9156c633 100644 (file)
@@ -707,34 +707,23 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
        return 0;
 }
 
-static int dmirror_atomic_map(unsigned long start, unsigned long end,
-                             struct page **pages, struct dmirror *dmirror)
+static int dmirror_atomic_map(unsigned long addr, struct page *page,
+               struct dmirror *dmirror)
 {
-       unsigned long pfn, mapped = 0;
-       int i;
+       void *entry;
 
        /* Map the migrated pages into the device's page tables. */
        mutex_lock(&dmirror->mutex);
 
-       for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
-               void *entry;
-
-               if (!pages[i])
-                       continue;
-
-               entry = pages[i];
-               entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
-               entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
-               if (xa_is_err(entry)) {
-                       mutex_unlock(&dmirror->mutex);
-                       return xa_err(entry);
-               }
-
-               mapped++;
+       entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC);
+       entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC);
+       if (xa_is_err(entry)) {
+               mutex_unlock(&dmirror->mutex);
+               return xa_err(entry);
        }
 
        mutex_unlock(&dmirror->mutex);
-       return mapped;
+       return 0;
 }
 
 static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
@@ -804,8 +793,7 @@ static int dmirror_exclusive(struct dmirror *dmirror,
                        break;
                }
 
-               ret = dmirror_atomic_map(addr, addr + PAGE_SIZE, &page, dmirror);
-               ret = ret == 1 ? 0 : -EBUSY;
+               ret = dmirror_atomic_map(addr, page, dmirror);
                folio_unlock(folio);
                folio_put(folio);
        }