]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: remove the vma check in migrate_vma_setup()
authorAlistair Popple <apopple@nvidia.com>
Tue, 31 May 2022 20:00:32 +0000 (15:00 -0500)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 20 Jul 2022 00:15:01 +0000 (20:15 -0400)
migrate_vma_setup() checks that a valid vma is passed so that the page
tables can be walked to find the pfns associated with a given address
range.  However in some cases the pfns are already known, such as when
migrating device coherent pages during pin_user_pages() meaning a valid
vma isn't required.

Link: https://lkml.kernel.org/r/20220531200041.24904-5-alex.sierra@amd.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/migrate_device.c

index 18bc6483f63a2663652209d6c7fd1f57b43831a8..cf9668376c5a8f08e89db530c77dd2517e0b11cb 100644 (file)
@@ -486,24 +486,24 @@ int migrate_vma_setup(struct migrate_vma *args)
 
        args->start &= PAGE_MASK;
        args->end &= PAGE_MASK;
-       if (!args->vma || is_vm_hugetlb_page(args->vma) ||
-           (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
-               return -EINVAL;
-       if (nr_pages <= 0)
-               return -EINVAL;
-       if (args->start < args->vma->vm_start ||
-           args->start >= args->vma->vm_end)
-               return -EINVAL;
-       if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
-               return -EINVAL;
        if (!args->src || !args->dst)
                return -EINVAL;
-
-       memset(args->src, 0, sizeof(*args->src) * nr_pages);
-       args->cpages = 0;
-       args->npages = 0;
-
-       migrate_vma_collect(args);
+       if (args->vma) {
+               if (is_vm_hugetlb_page(args->vma) ||
+                   (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
+                       return -EINVAL;
+               if (args->start < args->vma->vm_start ||
+                   args->start >= args->vma->vm_end)
+                       return -EINVAL;
+               if (args->end <= args->vma->vm_start ||
+                   args->end > args->vma->vm_end)
+                       return -EINVAL;
+               memset(args->src, 0, sizeof(*args->src) * nr_pages);
+               args->cpages = 0;
+               args->npages = 0;
+
+               migrate_vma_collect(args);
+       }
 
        if (args->cpages)
                migrate_vma_unmap(args);
@@ -685,7 +685,7 @@ void migrate_vma_pages(struct migrate_vma *migrate)
                        continue;
                }
 
-               if (!page) {
+               if (!page && migrate->vma) {
                        if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
                                continue;
                        if (!notified) {