]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/filemap: clean up hugetlb exclusion code
authorKairui Song <kasong@tencent.com>
Mon, 15 Apr 2024 17:18:54 +0000 (01:18 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:09 +0000 (20:56 -0700)
__filemap_add_folio only has two callers, one never passes hugetlb folio
and one always passes in hugetlb folio.  So move the hugetlb related
cgroup charging out of it to make the code cleaner.

Link: https://lkml.kernel.org/r/20240415171857.19244-3-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Acked-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/filemap.c

index 12089c24abfbd585bfda246db9d69f6898bc5f7b..17a66ea544e7153f9363b081c4099f76e9bd4a15 100644 (file)
@@ -853,20 +853,12 @@ noinline int __filemap_add_folio(struct address_space *mapping,
 {
        XA_STATE(xas, &mapping->i_pages, index);
        bool huge = folio_test_hugetlb(folio);
-       bool charged = false;
-       long nr = 1;
+       long nr;
 
        VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
        VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
        mapping_set_update(&xas, mapping);
 
-       if (!huge) {
-               int error = mem_cgroup_charge(folio, NULL, gfp);
-               if (error)
-                       return error;
-               charged = true;
-       }
-
        VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
        xas_set_order(&xas, index, folio_order(folio));
        nr = folio_nr_pages(folio);
@@ -931,8 +923,6 @@ unlock:
        trace_mm_filemap_add_to_page_cache(folio);
        return 0;
 error:
-       if (charged)
-               mem_cgroup_uncharge(folio);
        folio->mapping = NULL;
        /* Leave page->index set: truncation relies upon it */
        folio_put_refs(folio, nr);
@@ -946,11 +936,16 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
        void *shadow = NULL;
        int ret;
 
+       ret = mem_cgroup_charge(folio, NULL, gfp);
+       if (ret)
+               return ret;
+
        __folio_set_locked(folio);
        ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
-       if (unlikely(ret))
+       if (unlikely(ret)) {
+               mem_cgroup_uncharge(folio);
                __folio_clear_locked(folio);
-       else {
+       else {
                /*
                 * The folio might have been evicted from cache only
                 * recently, in which case it should be activated like