]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
vpda: try to fix the potential crash due to misusing __GFP_NOFAIL
authorBarry Song <v-songbaohua@oppo.com>
Wed, 31 Jul 2024 00:01:52 +0000 (12:01 +1200)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 17 Aug 2024 00:52:43 +0000 (17:52 -0700)
Patch series "mm: clarify nofail memory allocation", v2.

__GFP_NOFAIL carries the semantics of never failing, so its callers
do not check the return value:
  %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
  cannot handle allocation failures. The allocation could block
  indefinitely but will never return with failure. Testing for
  failure is pointless.

However, __GFP_NOFAIL can sometimes fail if it exceeds size limits or is
used with GFP_ATOMIC/GFP_NOWAIT in a non-sleepable context.  This can
expose security vulnerabilities due to potential NULL dereferences.

Since __GFP_NOFAIL does not support non-blocking allocation, we introduce
GFP_NOFAIL with inclusive blocking semantics and encourage using
GFP_NOFAIL as a replacement for __GFP_NOFAIL in non-mm.

If we must still fail a nofail allocation, we should trigger a BUG rather
than exposing NULL dereferences to callers who do not check the return
value.

* The discussion started from this topic:
 [PATCH RFC] mm: warn potential return NULL for kmalloc_array and
             kvmalloc_array with __GFP_NOFAIL

 https://lore.kernel.org/linux-mm/20240717230025.77361-1-21cnbao@gmail.com/

This patch (of 4):

mm doesn't support non-blockable __GFP_NOFAIL allocation.  Because
__GFP_NOFAIL without direct reclamation may just result in a busy loop
within non-sleepable contexts.

static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                                struct alloc_context *ac)
{
        ...
        /*
         * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
         * we always retry
         */
        if (gfp_mask & __GFP_NOFAIL) {
                /*
                 * All existing users of the __GFP_NOFAIL are blockable, so warn
                 * of any new users that actually require GFP_NOWAIT
                 */
                if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
                        goto fail;
                ...
        }
        ...
fail:
        warn_alloc(gfp_mask, ac->nodemask,
                        "page allocation failure: order:%u", order);
got_pg:
        return page;
}

Let's move the memory allocation out of the atomic context and use the
normal sleepable context to get pages.

Link: https://lkml.kernel.org/r/20240731000155.109583-1-21cnbao@gmail.com
Link: https://lkml.kernel.org/r/20240731000155.109583-2-21cnbao@gmail.com
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: "Eugenio Pérez" <eperezma@redhat.com>
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hailong.Liu <hailong.liu@oppo.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Kees Cook <kees@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
drivers/vdpa/vdpa_user/iova_domain.c
drivers/vdpa/vdpa_user/iova_domain.h
drivers/vdpa/vdpa_user/vduse_dev.c

index 791d38d6284c562b21f1eec662a95bdf2b4c2d12..06875a1df385f0bc74b0eb913f8935066f5e8d55 100644 (file)
@@ -283,7 +283,23 @@ out:
        return ret;
 }
 
-void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
+struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain)
+{
+       struct page **pages;
+       unsigned long count, i;
+
+       if (!domain->user_bounce_pages)
+               return NULL;
+
+       count = domain->bounce_size >> PAGE_SHIFT;
+       pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
+       for (i = 0; i < count; i++)
+               pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
+
+       return pages;
+}
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
 {
        struct vduse_bounce_map *map;
        unsigned long i, count;
@@ -294,15 +310,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
 
        count = domain->bounce_size >> PAGE_SHIFT;
        for (i = 0; i < count; i++) {
-               struct page *page = NULL;
+               struct page *page = pages[i];
 
                map = &domain->bounce_maps[i];
-               if (WARN_ON(!map->bounce_page))
+               if (WARN_ON(!map->bounce_page)) {
+                       put_page(page);
                        continue;
+               }
 
                /* Copy user page to kernel page if it's in use */
                if (map->orig_phys != INVALID_PHYS_ADDR) {
-                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
                        memcpy_from_page(page_address(page),
                                         map->bounce_page, 0, PAGE_SIZE);
                }
@@ -310,6 +327,7 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
                map->bounce_page = page;
        }
        domain->user_bounce_pages = false;
+       kfree(pages);
 out:
        write_unlock(&domain->bounce_lock);
 }
@@ -543,10 +561,13 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
 static int vduse_domain_release(struct inode *inode, struct file *file)
 {
        struct vduse_iova_domain *domain = file->private_data;
+       struct page **pages;
+
+       pages = vduse_domain_alloc_pages_to_remove_bounce(domain);
 
        spin_lock(&domain->iotlb_lock);
        vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
-       vduse_domain_remove_user_bounce_pages(domain);
+       vduse_domain_remove_user_bounce_pages(domain, pages);
        vduse_domain_free_kernel_bounce_pages(domain);
        spin_unlock(&domain->iotlb_lock);
        put_iova_domain(&domain->stream_iovad);
index f92f22a7267d70f46d008723ecb40f36c841c1f2..716a6de4d4129357ab513093ee0b35fa6a0c8670 100644 (file)
@@ -74,7 +74,10 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
 int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
                                       struct page **pages, int count);
 
-void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
+                                          struct page **pages);
+
+struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain);
 
 void vduse_domain_destroy(struct vduse_iova_domain *domain);
 
index 7ae99691efdf9250a2d812e8a7984dcad0ffc7aa..5d8d5810df570e62a238a4cb12f00fd586fcccce 100644 (file)
@@ -1030,6 +1030,7 @@ unlock:
 static int vduse_dev_dereg_umem(struct vduse_dev *dev,
                                u64 iova, u64 size)
 {
+       struct page **pages;
        int ret;
 
        mutex_lock(&dev->mem_lock);
@@ -1044,7 +1045,8 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
        if (dev->umem->iova != iova || size != dev->domain->bounce_size)
                goto unlock;
 
-       vduse_domain_remove_user_bounce_pages(dev->domain);
+       pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain);
+       vduse_domain_remove_user_bounce_pages(dev->domain, pages);
        unpin_user_pages_dirty_lock(dev->umem->pages,
                                    dev->umem->npages, true);
        atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);