]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
secretmem: add memcg accounting
authorMike Rapoport <rppt@linux.ibm.com>
Thu, 31 Dec 2020 22:05:04 +0000 (22:05 +0000)
committerJohannes Weiner <hannes@cmpxchg.org>
Thu, 31 Dec 2020 22:05:04 +0000 (22:05 +0000)
Account memory consumed by secretmem to memcg. The accounting is updated
when the memory is actually allocated and freed.

Link: https://lkml.kernel.org/r/20201203062949.5484-8-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Hagen Paul Pfeifer <hagen@jauu.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Palmer Dabbelt <palmerdabbelt@google.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rick Edgecombe <rick.p.edgecombe@intel.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tycho Andersen <tycho@tycho.ws>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/filemap.c
mm/secretmem.c

index 994bc0c2b4ed6c963b9f59e4e38043e61e21d8be..5bc672d3014348b3d9cc5425922579392cff22c5 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/psi.h>
 #include <linux/ramfs.h>
 #include <linux/page_idle.h>
+#include <linux/secretmem.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -844,7 +845,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
        page->mapping = mapping;
        page->index = offset;
 
-       if (!huge) {
+       if (!huge && !page_is_secretmem(page)) {
                error = mem_cgroup_charge(page, current->mm, gfp);
                if (error)
                        goto error;
index 52a900a135a5d0409ef8deb64f87deff37df3d1e..2390901d3ff77df1360301c769d8779a04686a8b 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/memblock.h>
 #include <linux/pseudo_fs.h>
 #include <linux/secretmem.h>
+#include <linux/memcontrol.h>
 #include <linux/set_memory.h>
 #include <linux/sched/signal.h>
 
@@ -44,6 +45,32 @@ struct secretmem_ctx {
 
 static struct cma *secretmem_cma;
 
+static int secretmem_account_pages(struct page *page, gfp_t gfp, int order)
+{
+       int err;
+
+       err = memcg_kmem_charge_page(page, gfp, order);
+       if (err)
+               return err;
+
+       /*
+        * seceremem caches are unreclaimable kernel allocations, so treat
+        * them as unreclaimable slab memory for VM statistics purposes
+        */
+       mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+                             PAGE_SIZE << order);
+
+       return 0;
+}
+
+static void secretmem_unaccount_pages(struct page *page, int order)
+{
+
+       mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+                             -PAGE_SIZE << order);
+       memcg_kmem_uncharge_page(page, order);
+}
+
 static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)
 {
        unsigned long nr_pages = (1 << PMD_PAGE_ORDER);
@@ -56,10 +83,14 @@ static int secretmem_pool_increase(struct secretmem_ctx *ctx, gfp_t gfp)
        if (!page)
                return -ENOMEM;
 
-       err = set_direct_map_invalid_noflush(page, nr_pages);
+       err = secretmem_account_pages(page, gfp, PMD_PAGE_ORDER);
        if (err)
                goto err_cma_release;
 
+       err = set_direct_map_invalid_noflush(page, nr_pages);
+       if (err)
+               goto err_memcg_uncharge;
+
        addr = (unsigned long)page_address(page);
        err = gen_pool_add(pool, addr, PMD_SIZE, NUMA_NO_NODE);
        if (err)
@@ -76,6 +107,8 @@ err_set_direct_map:
         * won't fail
         */
        set_direct_map_default_noflush(page, nr_pages);
+err_memcg_uncharge:
+       secretmem_unaccount_pages(page, PMD_PAGE_ORDER);
 err_cma_release:
        cma_release(secretmem_cma, page, nr_pages);
        return err;
@@ -302,6 +335,7 @@ static void secretmem_cleanup_chunk(struct gen_pool *pool,
        int i;
 
        set_direct_map_default_noflush(page, nr_pages);
+       secretmem_unaccount_pages(page, PMD_PAGE_ORDER);
 
        for (i = 0; i < nr_pages; i++)
                clear_highpage(page + i);