]> www.infradead.org Git - nvme.git/commitdiff
mm: memcg: rename memcg_oom_recover()
authorRoman Gushchin <roman.gushchin@linux.dev>
Tue, 25 Jun 2024 00:59:01 +0000 (17:59 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 5 Jul 2024 01:05:53 +0000 (18:05 -0700)
Rename memcg_oom_recover() into memcg1_oom_recover() for consistency with
other memory cgroup v1-related functions.

Move the declaration in mm/memcontrol-v1.h to be nearby other memcg v1 oom
handling functions.

Link: https://lkml.kernel.org/r/20240625005906.106920-10-roman.gushchin@linux.dev
Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol-v1.c
mm/memcontrol-v1.h
mm/memcontrol.c

index 253d49d5fb12cb4ef4ae71dbcdf2ff5ad893ca45..1d5608ee1606ab879f96cb2c6a0045d8897dd9a4 100644 (file)
@@ -1090,8 +1090,8 @@ static void __mem_cgroup_clear_mc(void)
 
                mc.moved_swap = 0;
        }
-       memcg_oom_recover(from);
-       memcg_oom_recover(to);
+       memcg1_oom_recover(from);
+       memcg1_oom_recover(to);
        wake_up_all(&mc.waitq);
 }
 
@@ -2067,7 +2067,7 @@ static int memcg_oom_wake_function(wait_queue_entry_t *wait,
        return autoremove_wake_function(wait, mode, sync, arg);
 }
 
-void memcg_oom_recover(struct mem_cgroup *memcg)
+void memcg1_oom_recover(struct mem_cgroup *memcg)
 {
        /*
         * For the following lockless ->under_oom test, the only required
index 3de956b2422f1bac1f44c3b5ebbb3d586bee72c8..972c493a8ae32d25d4f83eef833b5ef7ef9b07fd 100644 (file)
@@ -13,7 +13,6 @@ static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
 
 void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
 void memcg1_check_events(struct mem_cgroup *memcg, int nid);
-void memcg_oom_recover(struct mem_cgroup *memcg);
 int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
                     unsigned int nr_pages);
 
@@ -92,5 +91,6 @@ ssize_t memcg_write_event_control(struct kernfs_open_file *of,
 
 bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
 void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
+void memcg1_oom_recover(struct mem_cgroup *memcg);
 
 #endif /* __MM_MEMCONTROL_V1_H */
index 4c37e7003f774361244f1a538bb2bc44dec4ea3f..4f40f06e2317fa626037c61490bed707f6fc8673 100644 (file)
@@ -3165,7 +3165,7 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
        } while (true);
 
        if (!ret && enlarge)
-               memcg_oom_recover(memcg);
+               memcg1_oom_recover(memcg);
 
        return ret;
 }
@@ -3750,7 +3750,7 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
 
        WRITE_ONCE(memcg->oom_kill_disable, val);
        if (!val)
-               memcg_oom_recover(memcg);
+               memcg1_oom_recover(memcg);
 
        return 0;
 }
@@ -5444,7 +5444,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
                        page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
                if (ug->nr_kmem)
                        memcg_account_kmem(ug->memcg, -ug->nr_kmem);
-               memcg_oom_recover(ug->memcg);
+               memcg1_oom_recover(ug->memcg);
        }
 
        local_irq_save(flags);