]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/memcg: Convert mem_cgroup_uncharge() to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sun, 2 May 2021 00:42:23 +0000 (20:42 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 14 Aug 2021 01:34:59 +0000 (21:34 -0400)
Convert all the callers to call page_folio().  Most of them were already
using a head page, but a few of them I can't prove were, so this may
actually fix a bug.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/memcontrol.h
mm/filemap.c
mm/khugepaged.c
mm/memcontrol.c
mm/memory-failure.c
mm/memremap.c
mm/page_alloc.c
mm/swap.c

index a81da2422a928f49b24a54d99eb7abd83a78b844..3e5311045d5f40bbfe3e26afdb1c71ad4661395c 100644 (file)
@@ -708,7 +708,7 @@ int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
                                  gfp_t gfp, swp_entry_t entry);
 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
 
-void mem_cgroup_uncharge(struct page *page);
+void mem_cgroup_uncharge(struct folio *folio);
 void mem_cgroup_uncharge_list(struct list_head *page_list);
 
 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
@@ -1205,7 +1205,7 @@ static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
 {
 }
 
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline void mem_cgroup_uncharge(struct folio *folio)
 {
 }
 
index 65abbef9012765dc3ae8b43ea8554ac8a4943851..1d8cf186c0e13adec3657df349661a04023583ce 100644 (file)
@@ -923,7 +923,7 @@ unlock:
        if (xas_error(&xas)) {
                error = xas_error(&xas);
                if (charged)
-                       mem_cgroup_uncharge(page);
+                       mem_cgroup_uncharge(page_folio(page));
                goto error;
        }
 
index 8f6d7fdea9f4bf8623c9a2e698192509a76c8914..6b9c98ddcd09ffbcc834aff0acdc5e18a88edeea 100644 (file)
@@ -1211,7 +1211,7 @@ out_up_write:
        mmap_write_unlock(mm);
 out_nolock:
        if (!IS_ERR_OR_NULL(*hpage))
-               mem_cgroup_uncharge(*hpage);
+               mem_cgroup_uncharge(page_folio(*hpage));
        trace_mm_collapse_huge_page(mm, isolated, result);
        return;
 }
@@ -1975,7 +1975,7 @@ xa_unlocked:
 out:
        VM_BUG_ON(!list_empty(&pagelist));
        if (!IS_ERR_OR_NULL(*hpage))
-               mem_cgroup_uncharge(*hpage);
+               mem_cgroup_uncharge(page_folio(*hpage));
        /* TODO: tracepoints */
 }
 
index 917fe2644d68e81ebb99263c9d7eacb5b7d23081..db26a83d1535c8e19051e65db65e4cb283ab5078 100644 (file)
@@ -6897,24 +6897,24 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
 }
 
 /**
- * mem_cgroup_uncharge - uncharge a page
- * @page: page to uncharge
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
  *
- * Uncharge a page previously charged with mem_cgroup_charge().
+ * Uncharge a folio previously charged with mem_cgroup_charge().
  */
-void mem_cgroup_uncharge(struct page *page)
+void mem_cgroup_uncharge(struct folio *folio)
 {
        struct uncharge_gather ug;
 
        if (mem_cgroup_disabled())
                return;
 
-       /* Don't touch page->lru of any random page, pre-check: */
-       if (!page_memcg(page))
+       /* Don't touch folio->lru of any random page, pre-check: */
+       if (!folio_memcg(folio))
                return;
 
        uncharge_gather_clear(&ug);
-       uncharge_folio(page_folio(page), &ug);
+       uncharge_folio(folio, &ug);
        uncharge_batch(&ug);
 }
 
index eefd823deb679ad0b9d84b5b374614aace672851..9ae7a57a4cc02f2fceb9487f8c3eb407f75216b0 100644 (file)
@@ -763,7 +763,7 @@ static int delete_from_lru_cache(struct page *p)
                 * Poisoned page might never drop its ref count to 0 so we have
                 * to uncharge it manually from its memcg.
                 */
-               mem_cgroup_uncharge(p);
+               mem_cgroup_uncharge(page_folio(p));
 
                /*
                 * drop the page count elevated by isolate_lru_page()
index 15a074ffb8d73d5d71705d4e47f11c83ca98f77b..6eac40f9f62a763e3384dd3196f8c16f95ce65c4 100644 (file)
@@ -508,7 +508,7 @@ void free_devmap_managed_page(struct page *page)
 
        __ClearPageWaiters(page);
 
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
 
        /*
         * When a device_private page is freed, the page->mapping field
index 3e97e68aef7a899a3441e3c3832cdbdacafc2b7f..dbf25555c9b8bc9ac7d52f3bc0251c1c2a29b552 100644 (file)
@@ -726,7 +726,7 @@ static inline void free_the_page(struct page *page, unsigned int order)
 
 void free_compound_page(struct page *page)
 {
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
        free_the_page(page, compound_order(page));
 }
 
index 095a5ec6f986c15a4c68d15f791032b749ac4349..11ff40104a2cd32bc2a23fe6b3f59990cb587f27 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -94,7 +94,7 @@ static void __page_cache_release(struct page *page)
 static void __put_single_page(struct page *page)
 {
        __page_cache_release(page);
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
        free_unref_page(page, 0);
 }