Patch series "mm: clarify nofail memory allocation", v2.
__GFP_NOFAIL carries the semantics of never failing, so its callers
do not check the return value:
%__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
cannot handle allocation failures. The allocation could block
indefinitely but will never return with failure. Testing for
failure is pointless.
However, __GFP_NOFAIL can sometimes fail if it exceeds size limits or is
used with GFP_ATOMIC/GFP_NOWAIT in a non-sleepable context. This can
expose security vulnerabilities due to potential NULL dereferences.
Since __GFP_NOFAIL does not support non-blocking allocation, we introduce
GFP_NOFAIL with inclusive blocking semantics and encourage using
GFP_NOFAIL as a replacement for __GFP_NOFAIL in non-mm.
If we must still fail a nofail allocation, we should trigger a BUG rather
than exposing NULL dereferences to callers who do not check the return
value.
* The discussion started from this topic:
[PATCH RFC] mm: warn potential return NULL for kmalloc_array and
kvmalloc_array with __GFP_NOFAIL
https://lore.kernel.org/linux-mm/
20240717230025.77361-1-21cnbao@gmail.com/
This patch (of 4):
mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
__GFP_NOFAIL without direct reclamation may just result in a busy loop
within non-sleepable contexts.
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct alloc_context *ac)
{
...
/*
* Make sure that __GFP_NOFAIL request doesn't leak out and make sure
* we always retry
*/
if (gfp_mask & __GFP_NOFAIL) {
/*
* All existing users of the __GFP_NOFAIL are blockable, so warn
* of any new users that actually require GFP_NOWAIT
*/
if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
goto fail;
...
}
...
fail:
warn_alloc(gfp_mask, ac->nodemask,
"page allocation failure: order:%u", order);
got_pg:
return page;
}
Let's move the memory allocation out of the atomic context and use the
normal sleepable context to get pages.
Link: https://lkml.kernel.org/r/20240731000155.109583-1-21cnbao@gmail.com
Link: https://lkml.kernel.org/r/20240731000155.109583-2-21cnbao@gmail.com
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: "Eugenio Pérez" <eperezma@redhat.com>
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hailong.Liu <hailong.liu@oppo.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Kees Cook <kees@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return ret;
}
-void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
+struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain)
+{
+ struct page **pages;
+ unsigned long count, i;
+
+ if (!domain->user_bounce_pages)
+ return NULL;
+
+ count = domain->bounce_size >> PAGE_SHIFT;
+ pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
+ for (i = 0; i < count; i++)
+ pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
+
+ return pages;
+}
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
{
struct vduse_bounce_map *map;
unsigned long i, count;
count = domain->bounce_size >> PAGE_SHIFT;
for (i = 0; i < count; i++) {
- struct page *page = NULL;
+ struct page *page = pages[i];
map = &domain->bounce_maps[i];
- if (WARN_ON(!map->bounce_page))
+ if (WARN_ON(!map->bounce_page)) {
+ put_page(page);
continue;
+ }
/* Copy user page to kernel page if it's in use */
if (map->orig_phys != INVALID_PHYS_ADDR) {
- page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
memcpy_from_page(page_address(page),
map->bounce_page, 0, PAGE_SIZE);
}
map->bounce_page = page;
}
domain->user_bounce_pages = false;
+ kfree(pages);
out:
write_unlock(&domain->bounce_lock);
}
static int vduse_domain_release(struct inode *inode, struct file *file)
{
struct vduse_iova_domain *domain = file->private_data;
+ struct page **pages;
+
+ pages = vduse_domain_alloc_pages_to_remove_bounce(domain);
spin_lock(&domain->iotlb_lock);
vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
- vduse_domain_remove_user_bounce_pages(domain);
+ vduse_domain_remove_user_bounce_pages(domain, pages);
vduse_domain_free_kernel_bounce_pages(domain);
spin_unlock(&domain->iotlb_lock);
put_iova_domain(&domain->stream_iovad);
int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
struct page **pages, int count);
-void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
+ struct page **pages);
+
+struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain);
void vduse_domain_destroy(struct vduse_iova_domain *domain);
static int vduse_dev_dereg_umem(struct vduse_dev *dev,
u64 iova, u64 size)
{
+ struct page **pages;
int ret;
mutex_lock(&dev->mem_lock);
if (dev->umem->iova != iova || size != dev->domain->bounce_size)
goto unlock;
- vduse_domain_remove_user_bounce_pages(dev->domain);
+ pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain);
+ vduse_domain_remove_user_bounce_pages(dev->domain, pages);
unpin_user_pages_dirty_lock(dev->umem->pages,
dev->umem->npages, true);
atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);