]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
filemap: optimize folio refount update in filemap_map_pages
authorJinjiang Tu <tujinjiang@huawei.com>
Thu, 4 Sep 2025 13:27:37 +0000 (21:27 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 12 Sep 2025 00:25:51 +0000 (17:25 -0700)
There are two meaningless folio refcount update for order0 folio in
filemap_map_pages().  First, filemap_map_order0_folio() adds folio
refcount after the folio is mapped to pte.  And then, filemap_map_pages()
drops a refcount grabbed by next_uptodate_folio().  We could remain the
refcount unchanged in this case.

As Matthew metenioned in [1], it is safe to call folio_unlock() before
calling folio_put() here, because the folio is in page cache with refcount
held, and truncation will wait for the unlock.

Optimize filemap_map_folio_range() with the same method too.

With this patch, we can get 8% performance gain for lmbench testcase
'lat_pagefault -P 1 file' in order0 folio case, the size of file is 512M.

Link: https://lkml.kernel.org/r/20250904132737.1250368-1-tujinjiang@huawei.com
Link: https://lore.kernel.org/all/aKcU-fzxeW3xT5Wv@casper.infradead.org/
Signed-off-by: Jinjiang Tu <tujinjiang@huawei.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/filemap.c

index 344ab106c21c530beb0d42e7a657ce7ea688604d..8d078aa2738acf32ffd7d039fff3373b7a641579 100644 (file)
@@ -3665,6 +3665,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
                        unsigned long addr, unsigned int nr_pages,
                        unsigned long *rss, unsigned short *mmap_miss)
 {
+       unsigned int ref_from_caller = 1;
        vm_fault_t ret = 0;
        struct page *page = folio_page(folio, start);
        unsigned int count = 0;
@@ -3698,7 +3699,8 @@ skip:
                if (count) {
                        set_pte_range(vmf, folio, page, count, addr);
                        *rss += count;
-                       folio_ref_add(folio, count);
+                       folio_ref_add(folio, count - ref_from_caller);
+                       ref_from_caller = 0;
                        if (in_range(vmf->address, addr, count * PAGE_SIZE))
                                ret = VM_FAULT_NOPAGE;
                }
@@ -3713,12 +3715,16 @@ skip:
        if (count) {
                set_pte_range(vmf, folio, page, count, addr);
                *rss += count;
-               folio_ref_add(folio, count);
+               folio_ref_add(folio, count - ref_from_caller);
+               ref_from_caller = 0;
                if (in_range(vmf->address, addr, count * PAGE_SIZE))
                        ret = VM_FAULT_NOPAGE;
        }
 
        vmf->pte = old_ptep;
+       if (ref_from_caller)
+               /* Locked folios cannot get truncated. */
+               folio_ref_dec(folio);
 
        return ret;
 }
@@ -3731,7 +3737,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
        struct page *page = &folio->page;
 
        if (PageHWPoison(page))
-               return ret;
+               goto out;
 
        /* See comment of filemap_map_folio_range() */
        if (!folio_test_workingset(folio))
@@ -3743,15 +3749,18 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
         * the fault-around logic.
         */
        if (!pte_none(ptep_get(vmf->pte)))
-               return ret;
+               goto out;
 
        if (vmf->address == addr)
                ret = VM_FAULT_NOPAGE;
 
        set_pte_range(vmf, folio, page, 1, addr);
        (*rss)++;
-       folio_ref_inc(folio);
+       return ret;
 
+out:
+       /* Locked folios cannot get truncated. */
+       folio_ref_dec(folio);
        return ret;
 }
 
@@ -3811,7 +3820,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
                                        nr_pages, &rss, &mmap_miss);
 
                folio_unlock(folio);
-               folio_put(folio);
        } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
        add_mm_counter(vma->vm_mm, folio_type, rss);
        pte_unmap_unlock(vmf->pte, vmf->ptl);