From 9e619cd4fefd19cdce16e169d5827bc64ae01aa1 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Tue, 6 May 2025 15:55:33 -0700 Subject: [PATCH] memcg: no irq disable for memcg stock lock There is no need to disable irqs to use memcg per-cpu stock, so let's just not do that. One consequence of this change is if the kernel while in task context has the memcg stock lock and that cpu got interrupted. The memcg charges on that cpu in the irq context will take the slow path of memcg charging. However that should be super rare and should be fine in general. Link: https://lkml.kernel.org/r/20250506225533.2580386-5-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Acked-by: Vlastimil Babka Cc: Alexei Starovoitov Cc: Eric Dumaze Cc: Jakub Kacinski Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Sebastian Andrzej Siewior Signed-off-by: Andrew Morton --- mm/memcontrol.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1f8611e9a8ff..2b6768f0e916 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1829,12 +1829,11 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) { struct memcg_stock_pcp *stock; uint8_t stock_pages; - unsigned long flags; bool ret = false; int i; if (nr_pages > MEMCG_CHARGE_BATCH || - !local_trylock_irqsave(&memcg_stock.lock, flags)) + !local_trylock(&memcg_stock.lock)) return ret; stock = this_cpu_ptr(&memcg_stock); @@ -1851,7 +1850,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) break; } - local_unlock_irqrestore(&memcg_stock.lock, flags); + local_unlock(&memcg_stock.lock); return ret; } @@ -1895,18 +1894,17 @@ static void drain_stock_fully(struct memcg_stock_pcp *stock) static void drain_local_memcg_stock(struct work_struct *dummy) { struct memcg_stock_pcp *stock; - unsigned long flags; if (WARN_ONCE(!in_task(), "drain in non-task context")) return; - local_lock_irqsave(&memcg_stock.lock, flags); + local_lock(&memcg_stock.lock); stock = this_cpu_ptr(&memcg_stock); drain_stock_fully(stock); clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); - local_unlock_irqrestore(&memcg_stock.lock, flags); + local_unlock(&memcg_stock.lock); } static void drain_local_obj_stock(struct work_struct *dummy) @@ -1931,7 +1929,6 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) struct memcg_stock_pcp *stock; struct mem_cgroup *cached; uint8_t stock_pages; - unsigned long flags; bool success = false; int empty_slot = -1; int i; @@ -1946,7 +1943,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg)); if (nr_pages > MEMCG_CHARGE_BATCH || - !local_trylock_irqsave(&memcg_stock.lock, flags)) { + !local_trylock(&memcg_stock.lock)) { /* * In case of larger than batch refill or unlikely failure to * lock the percpu memcg_stock.lock, uncharge memcg directly. @@ -1981,7 +1978,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) WRITE_ONCE(stock->nr_pages[i], nr_pages); } - local_unlock_irqrestore(&memcg_stock.lock, flags); + local_unlock(&memcg_stock.lock); } static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock, -- 2.50.1