]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: shmem: fix too little space for tmpfs only fallback 4KB
authorVernon Yang <yanglincheng@kylinos.cn>
Mon, 8 Sep 2025 12:31:28 +0000 (20:31 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:26:00 +0000 (17:26 -0700)
When the system memory is sufficient, allocating memory is always
successful, but when tmpfs size is low (e.g.  1MB), it falls back directly
from 2MB to 4KB, and other small granularity (8KB ~ 1024KB) will not be
tried.

Therefore add check whether the remaining space of tmpfs is sufficient for
allocation.  If there is too little space left, try smaller large folio.

Link: https://lkml.kernel.org/r/20250908123128.900254-1-vernon2gm@gmail.com
Fixes: acd7ccb284b8 ("mm: shmem: add large folio support for tmpfs")
Signed-off-by: Vernon Yang <yanglincheng@kylinos.cn>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Daniel Gomez <da.gomez@samsung.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index 8147a99a4b075bf3f114f0e5838f1914a9e46ccf..c750bcae0ee361aba98368260ff58c6b42b1f41e 100644 (file)
@@ -1820,6 +1820,7 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
                                           unsigned long orders)
 {
        struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
+       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
        pgoff_t aligned_index;
        unsigned long pages;
        int order;
@@ -1835,6 +1836,18 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
        while (orders) {
                pages = 1UL << order;
                aligned_index = round_down(index, pages);
+
+               /*
+                * Check whether the remaining space of tmpfs is sufficient for
+                * allocation. If there is too little space left, try smaller
+                * large folio.
+                */
+               if (sbinfo->max_blocks && percpu_counter_read(&sbinfo->used_blocks)
+                                               + pages > sbinfo->max_blocks) {
+                       order = next_order(&orders, order);
+                       continue;
+               }
+
                /*
                 * Check for conflict before waiting on a huge allocation.
                 * Conflict might be that a huge page has just been allocated