]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: shmem: simplify the suitable huge orders validation for tmpfs
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Mon, 22 Jul 2024 05:43:17 +0000 (13:43 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 2 Sep 2024 03:25:44 +0000 (20:25 -0700)
Patch series "Some cleanups for shmem", v3.

This series does some cleanups to reuse code, rename functions and
simplify logic to make code more clear.  No functional changes are
expected.

This patch (of 3):

Move the suitable huge orders validation into shmem_suitable_orders() for
tmpfs, which can reuse some code to simplify the logic.

In addition, we don't have special handling for the error code -E2BIG when
checking for conflicts with PMD sized THP in the pagecache for tmpfs,
instead, it will just fallback to order-0 allocations like this patch
does, so this simplification will not add functional changes.

Link: https://lkml.kernel.org/r/cover.1721626645.git.baolin.wang@linux.alibaba.com
Link: https://lkml.kernel.org/r/965985dd6d322929d78a0beee0dafa1c2a1b81e2.1721626645.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c

index 5a77acf6ac6a621dc7b5e7b46402b2b714b45bea..7889b499d33fa09c1692eb01bb9deb95bdb48e54 100644 (file)
@@ -1680,20 +1680,30 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
                                           struct address_space *mapping, pgoff_t index,
                                           unsigned long orders)
 {
-       struct vm_area_struct *vma = vmf->vma;
+       struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
        pgoff_t aligned_index;
        unsigned long pages;
        int order;
 
-       orders = thp_vma_suitable_orders(vma, vmf->address, orders);
-       if (!orders)
-               return 0;
+       if (vma) {
+               orders = thp_vma_suitable_orders(vma, vmf->address, orders);
+               if (!orders)
+                       return 0;
+       }
 
        /* Find the highest order that can add into the page cache */
        order = highest_order(orders);
        while (orders) {
                pages = 1UL << order;
                aligned_index = round_down(index, pages);
+               /*
+                * Check for conflict before waiting on a huge allocation.
+                * Conflict might be that a huge page has just been allocated
+                * and added to page cache by a racing thread, or that there
+                * is already at least one small page in the huge extent.
+                * Be careful to retry when appropriate, but not forever!
+                * Elsewhere -EEXIST would be the right code, but not here.
+                */
                if (!xa_find(&mapping->i_pages, &aligned_index,
                             aligned_index + pages - 1, XA_PRESENT))
                        break;
@@ -1731,7 +1741,6 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
 {
        struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info = SHMEM_I(inode);
-       struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
        unsigned long suitable_orders = 0;
        struct folio *folio = NULL;
        long pages;
@@ -1741,26 +1750,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
                orders = 0;
 
        if (orders > 0) {
-               if (vma && vma_is_anon_shmem(vma)) {
-                       suitable_orders = shmem_suitable_orders(inode, vmf,
+               suitable_orders = shmem_suitable_orders(inode, vmf,
                                                        mapping, index, orders);
-               } else if (orders & BIT(HPAGE_PMD_ORDER)) {
-                       pages = HPAGE_PMD_NR;
-                       suitable_orders = BIT(HPAGE_PMD_ORDER);
-                       index = round_down(index, HPAGE_PMD_NR);
-
-                       /*
-                        * Check for conflict before waiting on a huge allocation.
-                        * Conflict might be that a huge page has just been allocated
-                        * and added to page cache by a racing thread, or that there
-                        * is already at least one small page in the huge extent.
-                        * Be careful to retry when appropriate, but not forever!
-                        * Elsewhere -EEXIST would be the right code, but not here.
-                        */
-                       if (xa_find(&mapping->i_pages, &index,
-                                   index + HPAGE_PMD_NR - 1, XA_PRESENT))
-                               return ERR_PTR(-E2BIG);
-               }
 
                order = highest_order(suitable_orders);
                while (suitable_orders) {