]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm/memcg: Convert uncharge_page() to uncharge_pageset()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 30 Jun 2021 01:47:12 +0000 (21:47 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 28 Aug 2021 02:52:23 +0000 (22:52 -0400)
Use a pageset rather than a page to ensure that we're only operating on
base or head pages, and not tail pages.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
mm/memcontrol.c

index 7a01111f8c6591678f87feacebd02d9ad46b54b3..689431284e80e41ae919530fa6973d9485f830ca 100644 (file)
@@ -6832,24 +6832,23 @@ static void uncharge_batch(const struct uncharge_gather *ug)
        memcg_check_events(ug->memcg, ug->nid);
        local_irq_restore(flags);
 
-       /* drop reference from uncharge_page */
+       /* drop reference from uncharge_pageset */
        css_put(&ug->memcg->css);
 }
 
-static void uncharge_page(struct page *page, struct uncharge_gather *ug)
+static void uncharge_pageset(struct pageset *pageset, struct uncharge_gather *ug)
 {
-       struct pageset *pageset = page_pageset(page);
-       unsigned long nr_pages;
+       long nr_pages;
        struct mem_cgroup *memcg;
        struct obj_cgroup *objcg;
-       bool use_objcg = PageMemcgKmem(page);
+       bool use_objcg = pageset_memcg_kmem(pageset);
 
-       VM_BUG_ON_PAGE(PageLRU(page), page);
+       VM_BUG_ON_PAGESET(pageset_test_lru(pageset), pageset);
 
        /*
         * Nobody should be changing or seriously looking at
-        * page memcg or objcg at this point, we have fully
-        * exclusive access to the page.
+        * pageset memcg or objcg at this point, we have fully
+        * exclusive access to the pageset.
         */
        if (use_objcg) {
                objcg = __pageset_objcg(pageset);
@@ -6871,19 +6870,19 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
                        uncharge_gather_clear(ug);
                }
                ug->memcg = memcg;
-               ug->nid = page_to_nid(page);
+               ug->nid = pageset_nid(pageset);
 
                /* pairs with css_put in uncharge_batch */
                css_get(&memcg->css);
        }
 
-       nr_pages = compound_nr(page);
+       nr_pages = pageset_nr_pages(pageset);
 
        if (use_objcg) {
                ug->nr_memory += nr_pages;
                ug->nr_kmem += nr_pages;
 
-               page->memcg_data = 0;
+               pageset->memcg_data = 0;
                obj_cgroup_put(objcg);
        } else {
                /* LRU pages aren't accounted at the root level */
@@ -6891,7 +6890,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
                        ug->nr_memory += nr_pages;
                ug->pgpgout++;
 
-               page->memcg_data = 0;
+               pageset->memcg_data = 0;
        }
 
        css_put(&memcg->css);
@@ -6915,7 +6914,7 @@ void mem_cgroup_uncharge(struct page *page)
                return;
 
        uncharge_gather_clear(&ug);
-       uncharge_page(page, &ug);
+       uncharge_pageset(page_pageset(page), &ug);
        uncharge_batch(&ug);
 }
 
@@ -6929,14 +6928,14 @@ void mem_cgroup_uncharge(struct page *page)
 void mem_cgroup_uncharge_list(struct list_head *page_list)
 {
        struct uncharge_gather ug;
-       struct page *page;
+       struct pageset *pageset;
 
        if (mem_cgroup_disabled())
                return;
 
        uncharge_gather_clear(&ug);
-       list_for_each_entry(page, page_list, lru)
-               uncharge_page(page, &ug);
+       list_for_each_entry(pageset, page_list, lru)
+               uncharge_pageset(pageset, &ug);
        if (ug.memcg)
                uncharge_batch(&ug);
 }