From: Barry Song Date: Wed, 31 Jul 2024 00:01:52 +0000 (+1200) Subject: vpda: try to fix the potential crash due to misusing __GFP_NOFAIL X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=da04c5689e88a86c3d6fbc3a6ad65d1ca36a7043;p=users%2Fjedix%2Flinux-maple.git vpda: try to fix the potential crash due to misusing __GFP_NOFAIL Patch series "mm: clarify nofail memory allocation", v2. __GFP_NOFAIL carries the semantics of never failing, so its callers do not check the return value: %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller cannot handle allocation failures. The allocation could block indefinitely but will never return with failure. Testing for failure is pointless. However, __GFP_NOFAIL can sometimes fail if it exceeds size limits or is used with GFP_ATOMIC/GFP_NOWAIT in a non-sleepable context. This can expose security vulnerabilities due to potential NULL dereferences. Since __GFP_NOFAIL does not support non-blocking allocation, we introduce GFP_NOFAIL with inclusive blocking semantics and encourage using GFP_NOFAIL as a replacement for __GFP_NOFAIL in non-mm. If we must still fail a nofail allocation, we should trigger a BUG rather than exposing NULL dereferences to callers who do not check the return value. * The discussion started from this topic: [PATCH RFC] mm: warn potential return NULL for kmalloc_array and kvmalloc_array with __GFP_NOFAIL https://lore.kernel.org/linux-mm/20240717230025.77361-1-21cnbao@gmail.com/ This patch (of 4): mm doesn't support non-blockable __GFP_NOFAIL allocation. Because __GFP_NOFAIL without direct reclamation may just result in a busy loop within non-sleepable contexts. static inline struct page * __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac) { ... /* * Make sure that __GFP_NOFAIL request doesn't leak out and make sure * we always retry */ if (gfp_mask & __GFP_NOFAIL) { /* * All existing users of the __GFP_NOFAIL are blockable, so warn * of any new users that actually require GFP_NOWAIT */ if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) goto fail; ... } ... fail: warn_alloc(gfp_mask, ac->nodemask, "page allocation failure: order:%u", order); got_pg: return page; } Let's move the memory allocation out of the atomic context and use the normal sleepable context to get pages. Link: https://lkml.kernel.org/r/20240731000155.109583-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240731000155.109583-2-21cnbao@gmail.com Signed-off-by: Barry Song Cc: "Michael S. Tsirkin" Cc: Jason Wang Cc: Xuan Zhuo Cc: "Eugenio Pérez" Cc: Maxime Coquelin Cc: Christoph Hellwig Cc: Christoph Lameter Cc: David Rientjes Cc: Hailong.Liu Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Joonsoo Kim Cc: Linus Torvalds Cc: Lorenzo Stoakes Cc: Michal Hocko Cc: Pekka Enberg Cc: Roman Gushchin Cc: Uladzislau Rezki (Sony) Cc: Vlastimil Babka Cc: Kees Cook Signed-off-by: Andrew Morton --- diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 791d38d6284c..06875a1df385 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -283,7 +283,23 @@ out: return ret; } -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain) +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain) +{ + struct page **pages; + unsigned long count, i; + + if (!domain->user_bounce_pages) + return NULL; + + count = domain->bounce_size >> PAGE_SHIFT; + pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL); + for (i = 0; i < count; i++) + pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL); + + return pages; +} + +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages) { struct vduse_bounce_map *map; unsigned long i, count; @@ -294,15 +310,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain) count = domain->bounce_size >> PAGE_SHIFT; for (i = 0; i < count; i++) { - struct page *page = NULL; + struct page *page = pages[i]; map = &domain->bounce_maps[i]; - if (WARN_ON(!map->bounce_page)) + if (WARN_ON(!map->bounce_page)) { + put_page(page); continue; + } /* Copy user page to kernel page if it's in use */ if (map->orig_phys != INVALID_PHYS_ADDR) { - page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL); memcpy_from_page(page_address(page), map->bounce_page, 0, PAGE_SIZE); } @@ -310,6 +327,7 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain) map->bounce_page = page; } domain->user_bounce_pages = false; + kfree(pages); out: write_unlock(&domain->bounce_lock); } @@ -543,10 +561,13 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma) static int vduse_domain_release(struct inode *inode, struct file *file) { struct vduse_iova_domain *domain = file->private_data; + struct page **pages; + + pages = vduse_domain_alloc_pages_to_remove_bounce(domain); spin_lock(&domain->iotlb_lock); vduse_iotlb_del_range(domain, 0, ULLONG_MAX); - vduse_domain_remove_user_bounce_pages(domain); + vduse_domain_remove_user_bounce_pages(domain, pages); vduse_domain_free_kernel_bounce_pages(domain); spin_unlock(&domain->iotlb_lock); put_iova_domain(&domain->stream_iovad); diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h index f92f22a7267d..716a6de4d412 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.h +++ b/drivers/vdpa/vdpa_user/iova_domain.h @@ -74,7 +74,10 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain); int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages, int count); -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain); +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, + struct page **pages); + +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain); void vduse_domain_destroy(struct vduse_iova_domain *domain); diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 7ae99691efdf..5d8d5810df57 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1030,6 +1030,7 @@ unlock: static int vduse_dev_dereg_umem(struct vduse_dev *dev, u64 iova, u64 size) { + struct page **pages; int ret; mutex_lock(&dev->mem_lock); @@ -1044,7 +1045,8 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev, if (dev->umem->iova != iova || size != dev->domain->bounce_size) goto unlock; - vduse_domain_remove_user_bounce_pages(dev->domain); + pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain); + vduse_domain_remove_user_bounce_pages(dev->domain, pages); unpin_user_pages_dirty_lock(dev->umem->pages, dev->umem->npages, true); atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);