]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
memcg-v1: no need for memcg locking for MGLRU
authorShakeel Butt <shakeel.butt@linux.dev>
Fri, 25 Oct 2024 01:23:02 +0000 (18:23 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 1 Nov 2024 04:29:25 +0000 (21:29 -0700)
While updating the generation of the folios, MGLRU requires that the
folio's memcg association remains stable.  With the charge migration
deprecated, there is no need for MGLRU to acquire locks to keep the folio
and memcg association stable.

Link: https://lkml.kernel.org/r/20241025012304.2473312-6-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c

index 8d1301c0f22a19a5ff3ed16d330b8cbbee70b22b..a1b64ef1d8b1aa7ad469572f4ac8f103db09c86d 100644 (file)
@@ -3665,10 +3665,6 @@ static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
                if (walk->seq != max_seq)
                        break;
 
-               /* folio_update_gen() requires stable folio_memcg() */
-               if (!mem_cgroup_trylock_pages(memcg))
-                       break;
-
                /* the caller might be holding the lock for write */
                if (mmap_read_trylock(mm)) {
                        err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
@@ -3676,8 +3672,6 @@ static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
                        mmap_read_unlock(mm);
                }
 
-               mem_cgroup_unlock_pages();
-
                if (walk->batched) {
                        spin_lock_irq(&lruvec->lru_lock);
                        reset_batch_size(walk);
@@ -4099,10 +4093,6 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
                }
        }
 
-       /* folio_update_gen() requires stable folio_memcg() */
-       if (!mem_cgroup_trylock_pages(memcg))
-               return true;
-
        arch_enter_lazy_mmu_mode();
 
        pte -= (addr - start) / PAGE_SIZE;
@@ -4147,7 +4137,6 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
        }
 
        arch_leave_lazy_mmu_mode();
-       mem_cgroup_unlock_pages();
 
        /* feedback from rmap walkers to page table walkers */
        if (mm_state && suitable_to_scan(i, young))