From 3faa3ebc89c1d95605cb43174240f97510ed0e52 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:55 +0100 Subject: [PATCH 01/16] mm/page_idle: handle device-exclusive entries correctly in page_idle_clear_pte_refs_one() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). page_idle_clear_pte_refs_one() is not prepared for that, so let's teach it what to do with these PFN swap PTEs. Note that device-private entries are so far not applicable on that path, as page_idle_get_folio() filters out non-lru folios. Should we just skip PFN swap PTEs completely? Possible, but it seems straight forward to just handle them correctly. Note that we could currently only run into this case with device-exclusive entries on THPs. We still adjust the mapcount on conversion to device-exclusive; this makes the rmap walk abort early for small folios, because we'll always have !folio_mapped() with a single device-exclusive entry. We'll adjust the mapcount logic once all page_vma_mapped_walk() users can properly handle device-exclusive entries. Link: https://lkml.kernel.org/r/20250210193801.781278-14-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/page_idle.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/mm/page_idle.c b/mm/page_idle.c index 947c7c7a3728..408aaf29a3ea 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -62,9 +62,14 @@ static bool page_idle_clear_pte_refs_one(struct folio *folio, /* * For PTE-mapped THP, one sub page is referenced, * the whole THP is referenced. + * + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are "old" from a CPU perspective. + * The MMU notifier takes care of any device aspects. */ - if (ptep_clear_young_notify(vma, addr, pvmw.pte)) - referenced = true; + if (likely(pte_present(ptep_get(pvmw.pte)))) + referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte); + referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE); } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) referenced = true; -- 2.50.1 From 39628c39ba3b9efbbb4ba9df4a20254f0c137cd7 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:56 +0100 Subject: [PATCH 02/16] mm/damon: handle device-exclusive entries correctly in damon_folio_young_one() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). damon_folio_young_one() is not prepared for that, so teach it about these PFN swap PTEs. Note that device-private entries are so far not applicable on that path, as we expect ZONE_DEVICE pages so far only in migration code when it comes to the RMAP. The impact is rather small: we'd be calling pte_young() on a non-present PTE, which is not really defined to have semantic. Note that we could currently only run into this case with device-exclusive entries on THPs. We still adjust the mapcount on conversion to device-exclusive; this makes the rmap walk abort early for small folios, because we'll always have !folio_mapped() with a single device-exclusive entry. We'll adjust the mapcount logic once all page_vma_mapped_walk() users can properly handle device-exclusive entries. Link: https://lkml.kernel.org/r/20250210193801.781278-15-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Reviewed-by: SeongJae Park Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index c834aa217835..1a89920efce9 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -92,12 +92,20 @@ static bool damon_folio_young_one(struct folio *folio, { bool *accessed = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); + pte_t pte; *accessed = false; while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) { - *accessed = pte_young(ptep_get(pvmw.pte)) || + pte = ptep_get(pvmw.pte); + + /* + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are "old" from a CPU perspective. + * The MMU notifier takes care of any device aspects. + */ + *accessed = (pte_present(pte) && pte_young(pte)) || !folio_test_idle(folio) || mmu_notifier_test_young(vma->vm_mm, addr); } else { -- 2.50.1 From 1f3ac4c577bb993539561b47bf2fe9d1beaaca2e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:57 +0100 Subject: [PATCH 03/16] mm/damon: handle device-exclusive entries correctly in damon_folio_mkold_one() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). damon_folio_mkold_one() is not prepared for that and calls damon_ptep_mkold() with PFN swap PTEs. Teach damon_ptep_mkold() to deal with these PFN swap PTEs. Note that device-private entries are so far not applicable on that path, as damon_get_folio() filters out non-lru folios. Should we just skip PFN swap PTEs completely? Possible, but it seems straight forward to just handle it correctly. Note that we could currently only run into this case with device-exclusive entries on THPs. We still adjust the mapcount on conversion to device-exclusive; this makes the rmap walk abort early for small folios, because we'll always have !folio_mapped() with a single device-exclusive entry. We'll adjust the mapcount logic once all page_vma_mapped_walk() users can properly handle device-exclusive entries. Link: https://lkml.kernel.org/r/20250210193801.781278-16-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: SeongJae Park Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/damon/ops-common.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index d25d99cb5f2b..86a50e8fbc80 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include "ops-common.h" @@ -39,12 +41,29 @@ struct folio *damon_get_folio(unsigned long pfn) void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte))); + pte_t pteval = ptep_get(pte); + struct folio *folio; + bool young = false; + unsigned long pfn; + + if (likely(pte_present(pteval))) + pfn = pte_pfn(pteval); + else + pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + folio = damon_get_folio(pfn); if (!folio) return; - if (ptep_clear_young_notify(vma, addr, pte)) + /* + * PFN swap PTEs, such as device-exclusive ones, that actually map pages + * are "old" from a CPU perspective. The MMU notifier takes care of any + * device aspects. + */ + if (likely(pte_present(pteval))) + young |= ptep_test_and_clear_young(vma, addr, pte); + young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE); + if (young) folio_set_young(folio); folio_set_idle(folio); -- 2.50.1 From f495bd7e0d9bd00c9a76c49f792b532bbb0efd0a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:58 +0100 Subject: [PATCH 04/16] mm/rmap: keep mapcount untouched for device-exclusive entries Now that conversion to device-exclusive does no longer perform an rmap walk and all page_vma_mapped_walk() users were taught to properly handle device-exclusive entries, let's treat device-exclusive entries just as if they would be present, similar to how we handle device-private entries already. This fixes swapout/migration/split/hwpoison of folios with device-exclusive entries. We only had to take care of page_vma_mapped_walk() users, because these traditionally assume pte_present(). Other page table walkers already have to handle !pte_present(), and some of them might simply skip them (e.g., MADV_PAGEOUT) if they are not specialized on them. This change doesn't modify the latter. Note that while folios with device-exclusive PTEs can now get migrated, khugepaged will not collapse a THP if there is device-exclusive PTE. Doing so might also not be desired if the device frequently performs atomics to the same page. Similarly, KSM will never merge order-0 folios that are device-exclusive. Link: https://lkml.kernel.org/r/20250210193801.781278-17-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/memory.c | 17 +---------------- mm/rmap.c | 7 ------- 2 files changed, 1 insertion(+), 23 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 568b3afde8d2..94feb51a7983 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -741,20 +741,6 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && PageAnonExclusive(page)), folio); - - /* - * No need to take a page reference as one was already - * created when the swap entry was made. - */ - if (folio_test_anon(folio)) - folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); - else - /* - * Currently device exclusive access only supports anonymous - * memory so the entry shouldn't point to a filebacked page. - */ - WARN_ON_ONCE(1); - set_pte_at(vma->vm_mm, address, ptep, pte); /* @@ -1626,8 +1612,7 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, */ WARN_ON_ONCE(!vma_is_anonymous(vma)); rss[mm_counter(folio)]--; - if (is_device_private_entry(entry)) - folio_remove_rmap_pte(folio, page, vma); + folio_remove_rmap_pte(folio, page, vma); folio_put(folio); } else if (!non_swap_entry(entry)) { /* Genuine swap entries, hence a private anon pages */ diff --git a/mm/rmap.c b/mm/rmap.c index 7b737f0f68fb..e2a543f639ce 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2511,13 +2511,6 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, /* The pte is writable, uffd-wp does not apply. */ set_pte_at(mm, addr, fw.ptep, swp_pte); - /* - * TODO: The device-exclusive PFN swap PTE holds a folio reference but - * does not count as a mapping (mapcount), which is wrong and must be - * fixed, otherwise RMAP walks don't behave as expected. - */ - folio_remove_rmap_pte(folio, page, vma); - folio_walk_end(&fw, vma); mmu_notifier_invalidate_range_end(&range); *foliop = folio; -- 2.50.1 From b8932ca8b9244839b263786c69d57ed05c0d96ec Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:59 +0100 Subject: [PATCH 05/16] mm/rmap: avoid -EBUSY from make_device_exclusive() Failing to obtain the folio lock, for example because the folio is concurrently getting migrated or swapped out, can easily make the callers fail: for example, the hmm selftest can sometimes be observed to fail because of this. Instead of forcing the caller to retry, let's simply retry in this to-be-expected case. Similarly, avoid spurious failures simply because we raced with someone (e.g., swapout) modifying the page table such that our folio_walk fails. Simply unconditionally lock the folio, and retry GUP if our folio_walk fails. Note that the folio_walk repeatedly failing is not something we expect. Note that we might want to avoid grabbing the folio lock at some point; for now, keep that as is and only unconditionally lock the folio. With this change, the hmm selftests don't fail simply because the folio is already locked. While this fixes the selftests in some cases, it's likely not something that deserves a "Fixes:". Link: https://lkml.kernel.org/r/20250210193801.781278-18-david@redhat.com Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/rmap.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index e2a543f639ce..0f760b93fc0a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2435,6 +2435,7 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, struct page *page; swp_entry_t entry; pte_t swp_pte; + int ret; mmap_assert_locked(mm); addr = PAGE_ALIGN_DOWN(addr); @@ -2448,6 +2449,7 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, * fault will trigger a conversion to an ordinary * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE. */ +retry: page = get_user_page_vma_remote(mm, addr, FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, &vma); @@ -2460,9 +2462,10 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, return ERR_PTR(-EOPNOTSUPP); } - if (!folio_trylock(folio)) { + ret = folio_lock_killable(folio); + if (ret) { folio_put(folio); - return ERR_PTR(-EBUSY); + return ERR_PTR(ret); } /* @@ -2488,7 +2491,7 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, mmu_notifier_invalidate_range_end(&range); folio_unlock(folio); folio_put(folio); - return ERR_PTR(-EBUSY); + goto retry; } /* Nuke the page table entry so we get the uptodate dirty bit. */ -- 2.50.1 From 350dce38eb6e221cca16940f3bd8d9947364f1ca Mon Sep 17 00:00:00 2001 From: Hao Zhang Date: Wed, 15 Jan 2025 09:58:29 +0800 Subject: [PATCH 06/16] mm/vmscan: extract calculated pressure balance as a function Extract pressure balance calculation into a function.This doesn't change current behaviour. [akpm@linux-foundation.org: 80-col wrapping] Link: https://lkml.kernel.org/r/tencent_735DB36A2306C08B8568049E4C0B99716C07@qq.com Signed-off-by: Hao Zhang Signed-off-by: Andrew Morton --- mm/vmscan.c | 68 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 30 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index fc4951d23b97..bc1826020159 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2400,6 +2400,43 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) } } +static inline void calculate_pressure_balance(struct scan_control *sc, + int swappiness, u64 *fraction, u64 *denominator) +{ + unsigned long anon_cost, file_cost, total_cost; + unsigned long ap, fp; + + /* + * Calculate the pressure balance between anon and file pages. + * + * The amount of pressure we put on each LRU is inversely + * proportional to the cost of reclaiming each list, as + * determined by the share of pages that are refaulting, times + * the relative IO cost of bringing back a swapped out + * anonymous page vs reloading a filesystem page (swappiness). + * + * Although we limit that influence to ensure no list gets + * left behind completely: at least a third of the pressure is + * applied, before swappiness. + * + * With swappiness at 100, anon and file have equal IO cost. + */ + total_cost = sc->anon_cost + sc->file_cost; + anon_cost = total_cost + sc->anon_cost; + file_cost = total_cost + sc->file_cost; + total_cost = anon_cost + file_cost; + + ap = swappiness * (total_cost + 1); + ap /= anon_cost + 1; + + fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); + fp /= file_cost + 1; + + fraction[WORKINGSET_ANON] = ap; + fraction[WORKINGSET_FILE] = fp; + *denominator = ap + fp; +} + /* * Determine how aggressively the anon and file LRU lists should be * scanned. @@ -2412,12 +2449,10 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, { struct pglist_data *pgdat = lruvec_pgdat(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec); - unsigned long anon_cost, file_cost, total_cost; int swappiness = sc_swappiness(sc, memcg); u64 fraction[ANON_AND_FILE]; u64 denominator = 0; /* gcc */ enum scan_balance scan_balance; - unsigned long ap, fp; enum lru_list lru; /* If we have no swap space, do not bother scanning anon folios. */ @@ -2466,35 +2501,8 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, } scan_balance = SCAN_FRACT; - /* - * Calculate the pressure balance between anon and file pages. - * - * The amount of pressure we put on each LRU is inversely - * proportional to the cost of reclaiming each list, as - * determined by the share of pages that are refaulting, times - * the relative IO cost of bringing back a swapped out - * anonymous page vs reloading a filesystem page (swappiness). - * - * Although we limit that influence to ensure no list gets - * left behind completely: at least a third of the pressure is - * applied, before swappiness. - * - * With swappiness at 100, anon and file have equal IO cost. - */ - total_cost = sc->anon_cost + sc->file_cost; - anon_cost = total_cost + sc->anon_cost; - file_cost = total_cost + sc->file_cost; - total_cost = anon_cost + file_cost; - - ap = swappiness * (total_cost + 1); - ap /= anon_cost + 1; - - fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); - fp /= file_cost + 1; + calculate_pressure_balance(sc, swappiness, fraction, &denominator); - fraction[0] = ap; - fraction[1] = fp; - denominator = ap + fp; out: for_each_evictable_lru(lru) { bool file = is_file_lru(lru); -- 2.50.1 From 58ba73e521b3d3a7a7612fc200beba1544a5100c Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 29 Jan 2025 18:06:31 +0000 Subject: [PATCH 07/16] mm: z3fold: remove z3fold Patch series "mm: zswap: remove z3fold and zbud", v2. After 2 cycles of deprecating z3fold, remove it as well as zbud (rationale in specific patches). This patch (of 2): Z3fold has been marked as deprecated for 2 cycles and no one complained, as expected. As there are no known users, remove the code now. Link: https://lkml.kernel.org/r/20250129180633.3501650-1-yosry.ahmed@linux.dev Link: https://lkml.kernel.org/r/20250129180633.3501650-2-yosry.ahmed@linux.dev Signed-off-by: Yosry Ahmed Acked-by: Johannes Weiner Reviewed-by: Shakeel Butt Acked-by: Nhat Pham Cc: Alexander Gordeev Cc: Chengming Zhou Cc: Christian Borntraeger Cc: Dan Streetman Cc: Heiko Carstens Cc: Huacai Chen Cc: Miaohe Lin Cc: Seth Jennings Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Vitaly Wool Cc: Vlastimil Babka Cc: WANG Xuerui Signed-off-by: Andrew Morton --- CREDITS | 1 + Documentation/mm/index.rst | 1 - Documentation/mm/z3fold.rst | 28 - Documentation/translations/zh_CN/mm/index.rst | 1 - .../translations/zh_CN/mm/z3fold.rst | 31 - MAINTAINERS | 7 - mm/Kconfig | 29 - mm/Makefile | 1 - mm/z3fold.c | 1447 ----------------- 9 files changed, 1 insertion(+), 1545 deletions(-) delete mode 100644 Documentation/mm/z3fold.rst delete mode 100644 Documentation/translations/zh_CN/mm/z3fold.rst delete mode 100644 mm/z3fold.c diff --git a/CREDITS b/CREDITS index 53d11a46fd69..aeccd79f9778 100644 --- a/CREDITS +++ b/CREDITS @@ -4311,6 +4311,7 @@ S: England N: Vitaly Wool E: vitaly.wool@konsulko.com D: Maintenance and development of zswap +D: Maintenance and development of z3fold N: Chris Wright E: chrisw@sous-sol.org diff --git a/Documentation/mm/index.rst b/Documentation/mm/index.rst index 0be1c7503a01..d3ada3e45e10 100644 --- a/Documentation/mm/index.rst +++ b/Documentation/mm/index.rst @@ -62,5 +62,4 @@ documentation, or deleted if it has served its purpose. unevictable-lru vmalloced-kernel-stacks vmemmap_dedup - z3fold zsmalloc diff --git a/Documentation/mm/z3fold.rst b/Documentation/mm/z3fold.rst deleted file mode 100644 index 25b5935d06c7..000000000000 --- a/Documentation/mm/z3fold.rst +++ /dev/null @@ -1,28 +0,0 @@ -====== -z3fold -====== - -z3fold is a special purpose allocator for storing compressed pages. -It is designed to store up to three compressed pages per physical page. -It is a zbud derivative which allows for higher compression -ratio keeping the simplicity and determinism of its predecessor. - -The main differences between z3fold and zbud are: - -* unlike zbud, z3fold allows for up to PAGE_SIZE allocations -* z3fold can hold up to 3 compressed pages in its page -* z3fold doesn't export any API itself and is thus intended to be used - via the zpool API. - -To keep the determinism and simplicity, z3fold, just like zbud, always -stores an integral number of compressed pages per page, but it can store -up to 3 pages unlike zbud which can store at most 2. Therefore the -compression ratio goes to around 2.7x while zbud's one is around 1.7x. - -Unlike zbud (but like zsmalloc for that matter) z3fold_alloc() does not -return a dereferenceable pointer. Instead, it returns an unsigned long -handle which encodes actual location of the allocated object. - -Keeping effective compression ratio close to zsmalloc's, z3fold doesn't -depend on MMU enabled and provides more predictable reclaim behavior -which makes it a better fit for small and response-critical systems. diff --git a/Documentation/translations/zh_CN/mm/index.rst b/Documentation/translations/zh_CN/mm/index.rst index c8726bce8f74..a71116be058f 100644 --- a/Documentation/translations/zh_CN/mm/index.rst +++ b/Documentation/translations/zh_CN/mm/index.rst @@ -58,7 +58,6 @@ Linux内存管理文档 remap_file_pages split_page_table_lock vmalloced-kernel-stacks - z3fold zsmalloc TODOLIST: diff --git a/Documentation/translations/zh_CN/mm/z3fold.rst b/Documentation/translations/zh_CN/mm/z3fold.rst deleted file mode 100644 index 9569a6d88270..000000000000 --- a/Documentation/translations/zh_CN/mm/z3fold.rst +++ /dev/null @@ -1,31 +0,0 @@ -:Original: Documentation/mm/z3fold.rst - -:翻译: - - 司延腾 Yanteng Si - -:校译: - - -====== -z3fold -====== - -z3fold是一个专门用于存储压缩页的分配器。它被设计为每个物理页最多可以存储三个压缩页。 -它是zbud的衍生物,允许更高的压缩率,保持其前辈的简单性和确定性。 - -z3fold和zbud的主要区别是: - -* 与zbud不同的是,z3fold允许最大的PAGE_SIZE分配。 -* z3fold在其页面中最多可以容纳3个压缩页面 -* z3fold本身没有输出任何API,因此打算通过zpool的API来使用 - -为了保持确定性和简单性,z3fold,就像zbud一样,总是在每页存储一个整数的压缩页,但是 -它最多可以存储3页,不像zbud最多可以存储2页。因此压缩率达到2.7倍左右,而zbud的压缩 -率是1.7倍左右。 - -不像zbud(但也像zsmalloc),z3fold_alloc()那样不返回一个可重复引用的指针。相反,它 -返回一个无符号长句柄,它编码了被分配对象的实际位置。 - -保持有效的压缩率接近于zsmalloc,z3fold不依赖于MMU的启用,并提供更可预测的回收行 -为,这使得它更适合于小型和反应迅速的系统。 diff --git a/MAINTAINERS b/MAINTAINERS index ed7aa6867674..a65a73227e3d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -26181,13 +26181,6 @@ S: Maintained F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* -Z3FOLD COMPRESSED PAGE ALLOCATOR -M: Vitaly Wool -R: Miaohe Lin -L: linux-mm@kvack.org -S: Maintained -F: mm/z3fold.c - Z8530 DRIVER FOR AX.25 M: Joerg Reuter L: linux-hams@vger.kernel.org diff --git a/mm/Kconfig b/mm/Kconfig index 1b501db06417..6fa19022c09b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -146,15 +146,6 @@ config ZSWAP_ZPOOL_DEFAULT_ZBUD help Use the zbud allocator as the default allocator. -config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED - bool "z3foldi (DEPRECATED)" - select Z3FOLD_DEPRECATED - help - Use the z3fold allocator as the default allocator. - - Deprecated and scheduled for removal in a few cycles, - see CONFIG_Z3FOLD_DEPRECATED. - config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC bool "zsmalloc" select ZSMALLOC @@ -166,7 +157,6 @@ config ZSWAP_ZPOOL_DEFAULT string depends on ZSWAP default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD - default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC default "" @@ -180,25 +170,6 @@ config ZBUD deterministic reclaim properties that make it preferable to a higher density approach when reclaim will be used. -config Z3FOLD_DEPRECATED - tristate "3:1 compression allocator (z3fold) (DEPRECATED)" - depends on ZSWAP - help - Deprecated and scheduled for removal in a few cycles. If you have - a good reason for using Z3FOLD over ZSMALLOC, please contact - linux-mm@kvack.org and the zswap maintainers. - - A special purpose allocator for storing compressed pages. - It is designed to store up to three compressed pages per physical - page. It is a ZBUD derivative so the simplicity and determinism are - still there. - -config Z3FOLD - tristate - default y if Z3FOLD_DEPRECATED=y - default m if Z3FOLD_DEPRECATED=m - depends on Z3FOLD_DEPRECATED - config ZSMALLOC tristate prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM) diff --git a/mm/Makefile b/mm/Makefile index 850386a67b3e..e4c03da3c084 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -115,7 +115,6 @@ obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o obj-$(CONFIG_ZPOOL) += zpool.o obj-$(CONFIG_ZBUD) += zbud.o obj-$(CONFIG_ZSMALLOC) += zsmalloc.o -obj-$(CONFIG_Z3FOLD) += z3fold.o obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o obj-$(CONFIG_CMA) += cma.o obj-$(CONFIG_NUMA) += numa.o diff --git a/mm/z3fold.c b/mm/z3fold.c deleted file mode 100644 index 379d24b4fef9..000000000000 --- a/mm/z3fold.c +++ /dev/null @@ -1,1447 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * z3fold.c - * - * Author: Vitaly Wool - * Copyright (C) 2016, Sony Mobile Communications Inc. - * - * This implementation is based on zbud written by Seth Jennings. - * - * z3fold is an special purpose allocator for storing compressed pages. It - * can store up to three compressed pages per page which improves the - * compression ratio of zbud while retaining its main concepts (e. g. always - * storing an integral number of objects per page) and simplicity. - * It still has simple and deterministic reclaim properties that make it - * preferable to a higher density approach (with no requirement on integral - * number of object per page) when reclaim is used. - * - * As in zbud, pages are divided into "chunks". The size of the chunks is - * fixed at compile time and is determined by NCHUNKS_ORDER below. - * - * z3fold doesn't export any API and is meant to be used via zpool API. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * NCHUNKS_ORDER determines the internal allocation granularity, effectively - * adjusting internal fragmentation. It also determines the number of - * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the - * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks - * in the beginning of an allocated page are occupied by z3fold header, so - * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), - * which shows the max number of free chunks in z3fold page, also there will - * be 63, or 62, respectively, freelists per pool. - */ -#define NCHUNKS_ORDER 6 - -#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) -#define CHUNK_SIZE (1 << CHUNK_SHIFT) -#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) -#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) -#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) -#define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS) - -#define BUDDY_MASK (0x3) -#define BUDDY_SHIFT 2 -#define SLOTS_ALIGN (0x40) - -/***************** - * Structures -*****************/ -struct z3fold_pool; - -enum buddy { - HEADLESS = 0, - FIRST, - MIDDLE, - LAST, - BUDDIES_MAX = LAST -}; - -struct z3fold_buddy_slots { - /* - * we are using BUDDY_MASK in handle_to_buddy etc. so there should - * be enough slots to hold all possible variants - */ - unsigned long slot[BUDDY_MASK + 1]; - unsigned long pool; /* back link */ - rwlock_t lock; -}; -#define HANDLE_FLAG_MASK (0x03) - -/* - * struct z3fold_header - z3fold page metadata occupying first chunks of each - * z3fold page, except for HEADLESS pages - * @buddy: links the z3fold page into the relevant list in the - * pool - * @page_lock: per-page lock - * @refcount: reference count for the z3fold page - * @work: work_struct for page layout optimization - * @slots: pointer to the structure holding buddy slots - * @pool: pointer to the containing pool - * @cpu: CPU which this page "belongs" to - * @first_chunks: the size of the first buddy in chunks, 0 if free - * @middle_chunks: the size of the middle buddy in chunks, 0 if free - * @last_chunks: the size of the last buddy in chunks, 0 if free - * @first_num: the starting number (for the first handle) - * @mapped_count: the number of objects currently mapped - */ -struct z3fold_header { - struct list_head buddy; - spinlock_t page_lock; - struct kref refcount; - struct work_struct work; - struct z3fold_buddy_slots *slots; - struct z3fold_pool *pool; - short cpu; - unsigned short first_chunks; - unsigned short middle_chunks; - unsigned short last_chunks; - unsigned short start_middle; - unsigned short first_num:2; - unsigned short mapped_count:2; - unsigned short foreign_handles:2; -}; - -/** - * struct z3fold_pool - stores metadata for each z3fold pool - * @name: pool name - * @lock: protects pool unbuddied lists - * @stale_lock: protects pool stale page list - * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- - * buddies; the list each z3fold page is added to depends on - * the size of its free region. - * @stale: list of pages marked for freeing - * @pages_nr: number of z3fold pages in the pool. - * @c_handle: cache for z3fold_buddy_slots allocation - * @compact_wq: workqueue for page layout background optimization - * @release_wq: workqueue for safe page release - * @work: work_struct for safe page release - * - * This structure is allocated at pool creation time and maintains metadata - * pertaining to a particular z3fold pool. - */ -struct z3fold_pool { - const char *name; - spinlock_t lock; - spinlock_t stale_lock; - struct list_head __percpu *unbuddied; - struct list_head stale; - atomic64_t pages_nr; - struct kmem_cache *c_handle; - struct workqueue_struct *compact_wq; - struct workqueue_struct *release_wq; - struct work_struct work; -}; - -/* - * Internal z3fold page flags - */ -enum z3fold_page_flags { - PAGE_HEADLESS = 0, - MIDDLE_CHUNK_MAPPED, - NEEDS_COMPACTING, - PAGE_STALE, - PAGE_CLAIMED, /* by either reclaim or free */ - PAGE_MIGRATED, /* page is migrated and soon to be released */ -}; - -/* - * handle flags, go under HANDLE_FLAG_MASK - */ -enum z3fold_handle_flags { - HANDLES_NOFREE = 0, -}; - -/* - * Forward declarations - */ -static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool); -static void compact_page_work(struct work_struct *w); - -/***************** - * Helpers -*****************/ - -/* Converts an allocation size in bytes to size in z3fold chunks */ -static int size_to_chunks(size_t size) -{ - return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; -} - -#define for_each_unbuddied_list(_iter, _begin) \ - for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) - -static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, - gfp_t gfp) -{ - struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle, - gfp); - - if (slots) { - /* It will be freed separately in free_handle(). */ - kmemleak_not_leak(slots); - slots->pool = (unsigned long)pool; - rwlock_init(&slots->lock); - } - - return slots; -} - -static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) -{ - return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); -} - -static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle) -{ - return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1)); -} - -/* Lock a z3fold page */ -static inline void z3fold_page_lock(struct z3fold_header *zhdr) -{ - spin_lock(&zhdr->page_lock); -} - -/* Try to lock a z3fold page */ -static inline int z3fold_page_trylock(struct z3fold_header *zhdr) -{ - return spin_trylock(&zhdr->page_lock); -} - -/* Unlock a z3fold page */ -static inline void z3fold_page_unlock(struct z3fold_header *zhdr) -{ - spin_unlock(&zhdr->page_lock); -} - -/* return locked z3fold page if it's not headless */ -static inline struct z3fold_header *get_z3fold_header(unsigned long handle) -{ - struct z3fold_buddy_slots *slots; - struct z3fold_header *zhdr; - int locked = 0; - - if (!(handle & (1 << PAGE_HEADLESS))) { - slots = handle_to_slots(handle); - do { - unsigned long addr; - - read_lock(&slots->lock); - addr = *(unsigned long *)handle; - zhdr = (struct z3fold_header *)(addr & PAGE_MASK); - locked = z3fold_page_trylock(zhdr); - read_unlock(&slots->lock); - if (locked) { - struct page *page = virt_to_page(zhdr); - - if (!test_bit(PAGE_MIGRATED, &page->private)) - break; - z3fold_page_unlock(zhdr); - } - cpu_relax(); - } while (true); - } else { - zhdr = (struct z3fold_header *)(handle & PAGE_MASK); - } - - return zhdr; -} - -static inline void put_z3fold_header(struct z3fold_header *zhdr) -{ - struct page *page = virt_to_page(zhdr); - - if (!test_bit(PAGE_HEADLESS, &page->private)) - z3fold_page_unlock(zhdr); -} - -static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr) -{ - struct z3fold_buddy_slots *slots; - int i; - bool is_free; - - if (WARN_ON(*(unsigned long *)handle == 0)) - return; - - slots = handle_to_slots(handle); - write_lock(&slots->lock); - *(unsigned long *)handle = 0; - - if (test_bit(HANDLES_NOFREE, &slots->pool)) { - write_unlock(&slots->lock); - return; /* simple case, nothing else to do */ - } - - if (zhdr->slots != slots) - zhdr->foreign_handles--; - - is_free = true; - for (i = 0; i <= BUDDY_MASK; i++) { - if (slots->slot[i]) { - is_free = false; - break; - } - } - write_unlock(&slots->lock); - - if (is_free) { - struct z3fold_pool *pool = slots_to_pool(slots); - - if (zhdr->slots == slots) - zhdr->slots = NULL; - kmem_cache_free(pool->c_handle, slots); - } -} - -/* Initializes the z3fold header of a newly allocated z3fold page */ -static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, - struct z3fold_pool *pool, gfp_t gfp) -{ - struct z3fold_header *zhdr = page_address(page); - struct z3fold_buddy_slots *slots; - - clear_bit(PAGE_HEADLESS, &page->private); - clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); - clear_bit(NEEDS_COMPACTING, &page->private); - clear_bit(PAGE_STALE, &page->private); - clear_bit(PAGE_CLAIMED, &page->private); - clear_bit(PAGE_MIGRATED, &page->private); - if (headless) - return zhdr; - - slots = alloc_slots(pool, gfp); - if (!slots) - return NULL; - - memset(zhdr, 0, sizeof(*zhdr)); - spin_lock_init(&zhdr->page_lock); - kref_init(&zhdr->refcount); - zhdr->cpu = -1; - zhdr->slots = slots; - zhdr->pool = pool; - INIT_LIST_HEAD(&zhdr->buddy); - INIT_WORK(&zhdr->work, compact_page_work); - return zhdr; -} - -/* Resets the struct page fields and frees the page */ -static void free_z3fold_page(struct page *page, bool headless) -{ - if (!headless) { - lock_page(page); - __ClearPageMovable(page); - unlock_page(page); - } - __free_page(page); -} - -/* Helper function to build the index */ -static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) -{ - return (bud + zhdr->first_num) & BUDDY_MASK; -} - -/* - * Encodes the handle of a particular buddy within a z3fold page. - * Zhdr->page_lock should be held as this function accesses first_num - * if bud != HEADLESS. - */ -static unsigned long __encode_handle(struct z3fold_header *zhdr, - struct z3fold_buddy_slots *slots, - enum buddy bud) -{ - unsigned long h = (unsigned long)zhdr; - int idx = 0; - - /* - * For a headless page, its handle is its pointer with the extra - * PAGE_HEADLESS bit set - */ - if (bud == HEADLESS) - return h | (1 << PAGE_HEADLESS); - - /* otherwise, return pointer to encoded handle */ - idx = __idx(zhdr, bud); - h += idx; - if (bud == LAST) - h |= (zhdr->last_chunks << BUDDY_SHIFT); - - write_lock(&slots->lock); - slots->slot[idx] = h; - write_unlock(&slots->lock); - return (unsigned long)&slots->slot[idx]; -} - -static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) -{ - return __encode_handle(zhdr, zhdr->slots, bud); -} - -/* only for LAST bud, returns zero otherwise */ -static unsigned short handle_to_chunks(unsigned long handle) -{ - struct z3fold_buddy_slots *slots = handle_to_slots(handle); - unsigned long addr; - - read_lock(&slots->lock); - addr = *(unsigned long *)handle; - read_unlock(&slots->lock); - return (addr & ~PAGE_MASK) >> BUDDY_SHIFT; -} - -/* - * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle - * but that doesn't matter. because the masking will result in the - * correct buddy number. - */ -static enum buddy handle_to_buddy(unsigned long handle) -{ - struct z3fold_header *zhdr; - struct z3fold_buddy_slots *slots = handle_to_slots(handle); - unsigned long addr; - - read_lock(&slots->lock); - WARN_ON(handle & (1 << PAGE_HEADLESS)); - addr = *(unsigned long *)handle; - read_unlock(&slots->lock); - zhdr = (struct z3fold_header *)(addr & PAGE_MASK); - return (addr - zhdr->first_num) & BUDDY_MASK; -} - -static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) -{ - return zhdr->pool; -} - -static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) -{ - struct page *page = virt_to_page(zhdr); - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - - WARN_ON(!list_empty(&zhdr->buddy)); - set_bit(PAGE_STALE, &page->private); - clear_bit(NEEDS_COMPACTING, &page->private); - spin_lock(&pool->lock); - spin_unlock(&pool->lock); - - if (locked) - z3fold_page_unlock(zhdr); - - spin_lock(&pool->stale_lock); - list_add(&zhdr->buddy, &pool->stale); - queue_work(pool->release_wq, &pool->work); - spin_unlock(&pool->stale_lock); - - atomic64_dec(&pool->pages_nr); -} - -static void release_z3fold_page_locked(struct kref *ref) -{ - struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, - refcount); - WARN_ON(z3fold_page_trylock(zhdr)); - __release_z3fold_page(zhdr, true); -} - -static void release_z3fold_page_locked_list(struct kref *ref) -{ - struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, - refcount); - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - - spin_lock(&pool->lock); - list_del_init(&zhdr->buddy); - spin_unlock(&pool->lock); - - WARN_ON(z3fold_page_trylock(zhdr)); - __release_z3fold_page(zhdr, true); -} - -static inline int put_z3fold_locked(struct z3fold_header *zhdr) -{ - return kref_put(&zhdr->refcount, release_z3fold_page_locked); -} - -static inline int put_z3fold_locked_list(struct z3fold_header *zhdr) -{ - return kref_put(&zhdr->refcount, release_z3fold_page_locked_list); -} - -static void free_pages_work(struct work_struct *w) -{ - struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); - - spin_lock(&pool->stale_lock); - while (!list_empty(&pool->stale)) { - struct z3fold_header *zhdr = list_first_entry(&pool->stale, - struct z3fold_header, buddy); - struct page *page = virt_to_page(zhdr); - - list_del(&zhdr->buddy); - if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) - continue; - spin_unlock(&pool->stale_lock); - cancel_work_sync(&zhdr->work); - free_z3fold_page(page, false); - cond_resched(); - spin_lock(&pool->stale_lock); - } - spin_unlock(&pool->stale_lock); -} - -/* - * Returns the number of free chunks in a z3fold page. - * NB: can't be used with HEADLESS pages. - */ -static int num_free_chunks(struct z3fold_header *zhdr) -{ - int nfree; - /* - * If there is a middle object, pick up the bigger free space - * either before or after it. Otherwise just subtract the number - * of chunks occupied by the first and the last objects. - */ - if (zhdr->middle_chunks != 0) { - int nfree_before = zhdr->first_chunks ? - 0 : zhdr->start_middle - ZHDR_CHUNKS; - int nfree_after = zhdr->last_chunks ? - 0 : TOTAL_CHUNKS - - (zhdr->start_middle + zhdr->middle_chunks); - nfree = max(nfree_before, nfree_after); - } else - nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; - return nfree; -} - -/* Add to the appropriate unbuddied list */ -static inline void add_to_unbuddied(struct z3fold_pool *pool, - struct z3fold_header *zhdr) -{ - if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || - zhdr->middle_chunks == 0) { - struct list_head *unbuddied; - int freechunks = num_free_chunks(zhdr); - - migrate_disable(); - unbuddied = this_cpu_ptr(pool->unbuddied); - spin_lock(&pool->lock); - list_add(&zhdr->buddy, &unbuddied[freechunks]); - spin_unlock(&pool->lock); - zhdr->cpu = smp_processor_id(); - migrate_enable(); - } -} - -static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) -{ - enum buddy bud = HEADLESS; - - if (zhdr->middle_chunks) { - if (!zhdr->first_chunks && - chunks <= zhdr->start_middle - ZHDR_CHUNKS) - bud = FIRST; - else if (!zhdr->last_chunks) - bud = LAST; - } else { - if (!zhdr->first_chunks) - bud = FIRST; - else if (!zhdr->last_chunks) - bud = LAST; - else - bud = MIDDLE; - } - - return bud; -} - -static inline void *mchunk_memmove(struct z3fold_header *zhdr, - unsigned short dst_chunk) -{ - void *beg = zhdr; - return memmove(beg + (dst_chunk << CHUNK_SHIFT), - beg + (zhdr->start_middle << CHUNK_SHIFT), - zhdr->middle_chunks << CHUNK_SHIFT); -} - -static inline bool buddy_single(struct z3fold_header *zhdr) -{ - return !((zhdr->first_chunks && zhdr->middle_chunks) || - (zhdr->first_chunks && zhdr->last_chunks) || - (zhdr->middle_chunks && zhdr->last_chunks)); -} - -static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) -{ - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - void *p = zhdr; - unsigned long old_handle = 0; - size_t sz = 0; - struct z3fold_header *new_zhdr = NULL; - int first_idx = __idx(zhdr, FIRST); - int middle_idx = __idx(zhdr, MIDDLE); - int last_idx = __idx(zhdr, LAST); - unsigned short *moved_chunks = NULL; - - /* - * No need to protect slots here -- all the slots are "local" and - * the page lock is already taken - */ - if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) { - p += ZHDR_SIZE_ALIGNED; - sz = zhdr->first_chunks << CHUNK_SHIFT; - old_handle = (unsigned long)&zhdr->slots->slot[first_idx]; - moved_chunks = &zhdr->first_chunks; - } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) { - p += zhdr->start_middle << CHUNK_SHIFT; - sz = zhdr->middle_chunks << CHUNK_SHIFT; - old_handle = (unsigned long)&zhdr->slots->slot[middle_idx]; - moved_chunks = &zhdr->middle_chunks; - } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) { - p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); - sz = zhdr->last_chunks << CHUNK_SHIFT; - old_handle = (unsigned long)&zhdr->slots->slot[last_idx]; - moved_chunks = &zhdr->last_chunks; - } - - if (sz > 0) { - enum buddy new_bud = HEADLESS; - short chunks = size_to_chunks(sz); - void *q; - - new_zhdr = __z3fold_alloc(pool, sz, false); - if (!new_zhdr) - return NULL; - - if (WARN_ON(new_zhdr == zhdr)) - goto out_fail; - - new_bud = get_free_buddy(new_zhdr, chunks); - q = new_zhdr; - switch (new_bud) { - case FIRST: - new_zhdr->first_chunks = chunks; - q += ZHDR_SIZE_ALIGNED; - break; - case MIDDLE: - new_zhdr->middle_chunks = chunks; - new_zhdr->start_middle = - new_zhdr->first_chunks + ZHDR_CHUNKS; - q += new_zhdr->start_middle << CHUNK_SHIFT; - break; - case LAST: - new_zhdr->last_chunks = chunks; - q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT); - break; - default: - goto out_fail; - } - new_zhdr->foreign_handles++; - memcpy(q, p, sz); - write_lock(&zhdr->slots->lock); - *(unsigned long *)old_handle = (unsigned long)new_zhdr + - __idx(new_zhdr, new_bud); - if (new_bud == LAST) - *(unsigned long *)old_handle |= - (new_zhdr->last_chunks << BUDDY_SHIFT); - write_unlock(&zhdr->slots->lock); - add_to_unbuddied(pool, new_zhdr); - z3fold_page_unlock(new_zhdr); - - *moved_chunks = 0; - } - - return new_zhdr; - -out_fail: - if (new_zhdr && !put_z3fold_locked(new_zhdr)) { - add_to_unbuddied(pool, new_zhdr); - z3fold_page_unlock(new_zhdr); - } - return NULL; - -} - -#define BIG_CHUNK_GAP 3 -/* Has to be called with lock held */ -static int z3fold_compact_page(struct z3fold_header *zhdr) -{ - struct page *page = virt_to_page(zhdr); - - if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) - return 0; /* can't move middle chunk, it's used */ - - if (unlikely(PageIsolated(page))) - return 0; - - if (zhdr->middle_chunks == 0) - return 0; /* nothing to compact */ - - if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { - /* move to the beginning */ - mchunk_memmove(zhdr, ZHDR_CHUNKS); - zhdr->first_chunks = zhdr->middle_chunks; - zhdr->middle_chunks = 0; - zhdr->start_middle = 0; - zhdr->first_num++; - return 1; - } - - /* - * moving data is expensive, so let's only do that if - * there's substantial gain (at least BIG_CHUNK_GAP chunks) - */ - if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && - zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= - BIG_CHUNK_GAP) { - mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); - zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; - return 1; - } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && - TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle - + zhdr->middle_chunks) >= - BIG_CHUNK_GAP) { - unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - - zhdr->middle_chunks; - mchunk_memmove(zhdr, new_start); - zhdr->start_middle = new_start; - return 1; - } - - return 0; -} - -static void do_compact_page(struct z3fold_header *zhdr, bool locked) -{ - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - struct page *page; - - page = virt_to_page(zhdr); - if (locked) - WARN_ON(z3fold_page_trylock(zhdr)); - else - z3fold_page_lock(zhdr); - if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { - z3fold_page_unlock(zhdr); - return; - } - spin_lock(&pool->lock); - list_del_init(&zhdr->buddy); - spin_unlock(&pool->lock); - - if (put_z3fold_locked(zhdr)) - return; - - if (test_bit(PAGE_STALE, &page->private) || - test_and_set_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - return; - } - - if (!zhdr->foreign_handles && buddy_single(zhdr) && - zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { - if (!put_z3fold_locked(zhdr)) { - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); - } - return; - } - - z3fold_compact_page(zhdr); - add_to_unbuddied(pool, zhdr); - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); -} - -static void compact_page_work(struct work_struct *w) -{ - struct z3fold_header *zhdr = container_of(w, struct z3fold_header, - work); - - do_compact_page(zhdr, false); -} - -/* returns _locked_ z3fold page header or NULL */ -static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, - size_t size, bool can_sleep) -{ - struct z3fold_header *zhdr = NULL; - struct page *page; - struct list_head *unbuddied; - int chunks = size_to_chunks(size), i; - -lookup: - migrate_disable(); - /* First, try to find an unbuddied z3fold page. */ - unbuddied = this_cpu_ptr(pool->unbuddied); - for_each_unbuddied_list(i, chunks) { - struct list_head *l = &unbuddied[i]; - - zhdr = list_first_entry_or_null(READ_ONCE(l), - struct z3fold_header, buddy); - - if (!zhdr) - continue; - - /* Re-check under lock. */ - spin_lock(&pool->lock); - if (unlikely(zhdr != list_first_entry(READ_ONCE(l), - struct z3fold_header, buddy)) || - !z3fold_page_trylock(zhdr)) { - spin_unlock(&pool->lock); - zhdr = NULL; - migrate_enable(); - if (can_sleep) - cond_resched(); - goto lookup; - } - list_del_init(&zhdr->buddy); - zhdr->cpu = -1; - spin_unlock(&pool->lock); - - page = virt_to_page(zhdr); - if (test_bit(NEEDS_COMPACTING, &page->private) || - test_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - zhdr = NULL; - migrate_enable(); - if (can_sleep) - cond_resched(); - goto lookup; - } - - /* - * this page could not be removed from its unbuddied - * list while pool lock was held, and then we've taken - * page lock so kref_put could not be called before - * we got here, so it's safe to just call kref_get() - */ - kref_get(&zhdr->refcount); - break; - } - migrate_enable(); - - if (!zhdr) { - int cpu; - - /* look for _exact_ match on other cpus' lists */ - for_each_online_cpu(cpu) { - struct list_head *l; - - unbuddied = per_cpu_ptr(pool->unbuddied, cpu); - spin_lock(&pool->lock); - l = &unbuddied[chunks]; - - zhdr = list_first_entry_or_null(READ_ONCE(l), - struct z3fold_header, buddy); - - if (!zhdr || !z3fold_page_trylock(zhdr)) { - spin_unlock(&pool->lock); - zhdr = NULL; - continue; - } - list_del_init(&zhdr->buddy); - zhdr->cpu = -1; - spin_unlock(&pool->lock); - - page = virt_to_page(zhdr); - if (test_bit(NEEDS_COMPACTING, &page->private) || - test_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - zhdr = NULL; - if (can_sleep) - cond_resched(); - continue; - } - kref_get(&zhdr->refcount); - break; - } - } - - if (zhdr && !zhdr->slots) { - zhdr->slots = alloc_slots(pool, GFP_ATOMIC); - if (!zhdr->slots) - goto out_fail; - } - return zhdr; - -out_fail: - if (!put_z3fold_locked(zhdr)) { - add_to_unbuddied(pool, zhdr); - z3fold_page_unlock(zhdr); - } - return NULL; -} - -/* - * API Functions - */ - -/** - * z3fold_create_pool() - create a new z3fold pool - * @name: pool name - * @gfp: gfp flags when allocating the z3fold pool structure - * - * Return: pointer to the new z3fold pool or NULL if the metadata allocation - * failed. - */ -static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp) -{ - struct z3fold_pool *pool = NULL; - int i, cpu; - - pool = kzalloc(sizeof(struct z3fold_pool), gfp); - if (!pool) - goto out; - pool->c_handle = kmem_cache_create("z3fold_handle", - sizeof(struct z3fold_buddy_slots), - SLOTS_ALIGN, 0, NULL); - if (!pool->c_handle) - goto out_c; - spin_lock_init(&pool->lock); - spin_lock_init(&pool->stale_lock); - pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS, - __alignof__(struct list_head)); - if (!pool->unbuddied) - goto out_pool; - for_each_possible_cpu(cpu) { - struct list_head *unbuddied = - per_cpu_ptr(pool->unbuddied, cpu); - for_each_unbuddied_list(i, 0) - INIT_LIST_HEAD(&unbuddied[i]); - } - INIT_LIST_HEAD(&pool->stale); - atomic64_set(&pool->pages_nr, 0); - pool->name = name; - pool->compact_wq = create_singlethread_workqueue(pool->name); - if (!pool->compact_wq) - goto out_unbuddied; - pool->release_wq = create_singlethread_workqueue(pool->name); - if (!pool->release_wq) - goto out_wq; - INIT_WORK(&pool->work, free_pages_work); - return pool; - -out_wq: - destroy_workqueue(pool->compact_wq); -out_unbuddied: - free_percpu(pool->unbuddied); -out_pool: - kmem_cache_destroy(pool->c_handle); -out_c: - kfree(pool); -out: - return NULL; -} - -/** - * z3fold_destroy_pool() - destroys an existing z3fold pool - * @pool: the z3fold pool to be destroyed - * - * The pool should be emptied before this function is called. - */ -static void z3fold_destroy_pool(struct z3fold_pool *pool) -{ - kmem_cache_destroy(pool->c_handle); - - /* - * We need to destroy pool->compact_wq before pool->release_wq, - * as any pending work on pool->compact_wq will call - * queue_work(pool->release_wq, &pool->work). - * - * There are still outstanding pages until both workqueues are drained, - * so we cannot unregister migration until then. - */ - - destroy_workqueue(pool->compact_wq); - destroy_workqueue(pool->release_wq); - free_percpu(pool->unbuddied); - kfree(pool); -} - -static const struct movable_operations z3fold_mops; - -/** - * z3fold_alloc() - allocates a region of a given size - * @pool: z3fold pool from which to allocate - * @size: size in bytes of the desired allocation - * @gfp: gfp flags used if the pool needs to grow - * @handle: handle of the new allocation - * - * This function will attempt to find a free region in the pool large enough to - * satisfy the allocation request. A search of the unbuddied lists is - * performed first. If no suitable free region is found, then a new page is - * allocated and added to the pool to satisfy the request. - * - * Return: 0 if success and handle is set, otherwise -EINVAL if the size or - * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate - * a new page. - */ -static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - int chunks = size_to_chunks(size); - struct z3fold_header *zhdr = NULL; - struct page *page = NULL; - enum buddy bud; - bool can_sleep = gfpflags_allow_blocking(gfp); - - if (!size || (gfp & __GFP_HIGHMEM)) - return -EINVAL; - - if (size > PAGE_SIZE) - return -ENOSPC; - - if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) - bud = HEADLESS; - else { -retry: - zhdr = __z3fold_alloc(pool, size, can_sleep); - if (zhdr) { - bud = get_free_buddy(zhdr, chunks); - if (bud == HEADLESS) { - if (!put_z3fold_locked(zhdr)) - z3fold_page_unlock(zhdr); - pr_err("No free chunks in unbuddied\n"); - WARN_ON(1); - goto retry; - } - page = virt_to_page(zhdr); - goto found; - } - bud = FIRST; - } - - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - - zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); - if (!zhdr) { - __free_page(page); - return -ENOMEM; - } - atomic64_inc(&pool->pages_nr); - - if (bud == HEADLESS) { - set_bit(PAGE_HEADLESS, &page->private); - goto headless; - } - if (can_sleep) { - lock_page(page); - __SetPageMovable(page, &z3fold_mops); - unlock_page(page); - } else { - WARN_ON(!trylock_page(page)); - __SetPageMovable(page, &z3fold_mops); - unlock_page(page); - } - z3fold_page_lock(zhdr); - -found: - if (bud == FIRST) - zhdr->first_chunks = chunks; - else if (bud == LAST) - zhdr->last_chunks = chunks; - else { - zhdr->middle_chunks = chunks; - zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; - } - add_to_unbuddied(pool, zhdr); - -headless: - spin_lock(&pool->lock); - *handle = encode_handle(zhdr, bud); - spin_unlock(&pool->lock); - if (bud != HEADLESS) - z3fold_page_unlock(zhdr); - - return 0; -} - -/** - * z3fold_free() - frees the allocation associated with the given handle - * @pool: pool in which the allocation resided - * @handle: handle associated with the allocation returned by z3fold_alloc() - * - * In the case that the z3fold page in which the allocation resides is under - * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function - * only sets the first|middle|last_chunks to 0. The page is actually freed - * once all buddies are evicted (see z3fold_reclaim_page() below). - */ -static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) -{ - struct z3fold_header *zhdr; - struct page *page; - enum buddy bud; - bool page_claimed; - - zhdr = get_z3fold_header(handle); - page = virt_to_page(zhdr); - page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private); - - if (test_bit(PAGE_HEADLESS, &page->private)) { - /* if a headless page is under reclaim, just leave. - * NB: we use test_and_set_bit for a reason: if the bit - * has not been set before, we release this page - * immediately so we don't care about its value any more. - */ - if (!page_claimed) { - put_z3fold_header(zhdr); - free_z3fold_page(page, true); - atomic64_dec(&pool->pages_nr); - } - return; - } - - /* Non-headless case */ - bud = handle_to_buddy(handle); - - switch (bud) { - case FIRST: - zhdr->first_chunks = 0; - break; - case MIDDLE: - zhdr->middle_chunks = 0; - break; - case LAST: - zhdr->last_chunks = 0; - break; - default: - pr_err("%s: unknown bud %d\n", __func__, bud); - WARN_ON(1); - put_z3fold_header(zhdr); - return; - } - - if (!page_claimed) - free_handle(handle, zhdr); - if (put_z3fold_locked_list(zhdr)) - return; - if (page_claimed) { - /* the page has not been claimed by us */ - put_z3fold_header(zhdr); - return; - } - if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { - clear_bit(PAGE_CLAIMED, &page->private); - put_z3fold_header(zhdr); - return; - } - if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { - zhdr->cpu = -1; - kref_get(&zhdr->refcount); - clear_bit(PAGE_CLAIMED, &page->private); - do_compact_page(zhdr, true); - return; - } - kref_get(&zhdr->refcount); - clear_bit(PAGE_CLAIMED, &page->private); - queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); - put_z3fold_header(zhdr); -} - -/** - * z3fold_map() - maps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be mapped - * - * Extracts the buddy number from handle and constructs the pointer to the - * correct starting chunk within the page. - * - * Returns: a pointer to the mapped allocation - */ -static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) -{ - struct z3fold_header *zhdr; - struct page *page; - void *addr; - enum buddy buddy; - - zhdr = get_z3fold_header(handle); - addr = zhdr; - page = virt_to_page(zhdr); - - if (test_bit(PAGE_HEADLESS, &page->private)) - goto out; - - buddy = handle_to_buddy(handle); - switch (buddy) { - case FIRST: - addr += ZHDR_SIZE_ALIGNED; - break; - case MIDDLE: - addr += zhdr->start_middle << CHUNK_SHIFT; - set_bit(MIDDLE_CHUNK_MAPPED, &page->private); - break; - case LAST: - addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT); - break; - default: - pr_err("unknown buddy id %d\n", buddy); - WARN_ON(1); - addr = NULL; - break; - } - - if (addr) - zhdr->mapped_count++; -out: - put_z3fold_header(zhdr); - return addr; -} - -/** - * z3fold_unmap() - unmaps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be unmapped - */ -static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) -{ - struct z3fold_header *zhdr; - struct page *page; - enum buddy buddy; - - zhdr = get_z3fold_header(handle); - page = virt_to_page(zhdr); - - if (test_bit(PAGE_HEADLESS, &page->private)) - return; - - buddy = handle_to_buddy(handle); - if (buddy == MIDDLE) - clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); - zhdr->mapped_count--; - put_z3fold_header(zhdr); -} - -/** - * z3fold_get_pool_pages() - gets the z3fold pool size in pages - * @pool: pool whose size is being queried - * - * Returns: size in pages of the given pool. - */ -static u64 z3fold_get_pool_pages(struct z3fold_pool *pool) -{ - return atomic64_read(&pool->pages_nr); -} - -static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) -{ - struct z3fold_header *zhdr; - struct z3fold_pool *pool; - - VM_BUG_ON_PAGE(PageIsolated(page), page); - - if (test_bit(PAGE_HEADLESS, &page->private)) - return false; - - zhdr = page_address(page); - z3fold_page_lock(zhdr); - if (test_bit(NEEDS_COMPACTING, &page->private) || - test_bit(PAGE_STALE, &page->private)) - goto out; - - if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) - goto out; - - if (test_and_set_bit(PAGE_CLAIMED, &page->private)) - goto out; - pool = zhdr_to_pool(zhdr); - spin_lock(&pool->lock); - if (!list_empty(&zhdr->buddy)) - list_del_init(&zhdr->buddy); - spin_unlock(&pool->lock); - - kref_get(&zhdr->refcount); - z3fold_page_unlock(zhdr); - return true; - -out: - z3fold_page_unlock(zhdr); - return false; -} - -static int z3fold_page_migrate(struct page *newpage, struct page *page, - enum migrate_mode mode) -{ - struct z3fold_header *zhdr, *new_zhdr; - struct z3fold_pool *pool; - - VM_BUG_ON_PAGE(!PageIsolated(page), page); - VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page); - VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); - - zhdr = page_address(page); - pool = zhdr_to_pool(zhdr); - - if (!z3fold_page_trylock(zhdr)) - return -EAGAIN; - if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) { - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); - return -EBUSY; - } - if (work_pending(&zhdr->work)) { - z3fold_page_unlock(zhdr); - return -EAGAIN; - } - new_zhdr = page_address(newpage); - memcpy(new_zhdr, zhdr, PAGE_SIZE); - newpage->private = page->private; - set_bit(PAGE_MIGRATED, &page->private); - z3fold_page_unlock(zhdr); - spin_lock_init(&new_zhdr->page_lock); - INIT_WORK(&new_zhdr->work, compact_page_work); - /* - * z3fold_page_isolate() ensures that new_zhdr->buddy is empty, - * so we only have to reinitialize it. - */ - INIT_LIST_HEAD(&new_zhdr->buddy); - __ClearPageMovable(page); - - get_page(newpage); - z3fold_page_lock(new_zhdr); - if (new_zhdr->first_chunks) - encode_handle(new_zhdr, FIRST); - if (new_zhdr->last_chunks) - encode_handle(new_zhdr, LAST); - if (new_zhdr->middle_chunks) - encode_handle(new_zhdr, MIDDLE); - set_bit(NEEDS_COMPACTING, &newpage->private); - new_zhdr->cpu = smp_processor_id(); - __SetPageMovable(newpage, &z3fold_mops); - z3fold_page_unlock(new_zhdr); - - queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); - - /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */ - page->private = 0; - put_page(page); - return 0; -} - -static void z3fold_page_putback(struct page *page) -{ - struct z3fold_header *zhdr; - struct z3fold_pool *pool; - - zhdr = page_address(page); - pool = zhdr_to_pool(zhdr); - - z3fold_page_lock(zhdr); - if (!list_empty(&zhdr->buddy)) - list_del_init(&zhdr->buddy); - INIT_LIST_HEAD(&page->lru); - if (put_z3fold_locked(zhdr)) - return; - if (list_empty(&zhdr->buddy)) - add_to_unbuddied(pool, zhdr); - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); -} - -static const struct movable_operations z3fold_mops = { - .isolate_page = z3fold_page_isolate, - .migrate_page = z3fold_page_migrate, - .putback_page = z3fold_page_putback, -}; - -/***************** - * zpool - ****************/ - -static void *z3fold_zpool_create(const char *name, gfp_t gfp) -{ - return z3fold_create_pool(name, gfp); -} - -static void z3fold_zpool_destroy(void *pool) -{ - z3fold_destroy_pool(pool); -} - -static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - return z3fold_alloc(pool, size, gfp, handle); -} -static void z3fold_zpool_free(void *pool, unsigned long handle) -{ - z3fold_free(pool, handle); -} - -static void *z3fold_zpool_map(void *pool, unsigned long handle, - enum zpool_mapmode mm) -{ - return z3fold_map(pool, handle); -} -static void z3fold_zpool_unmap(void *pool, unsigned long handle) -{ - z3fold_unmap(pool, handle); -} - -static u64 z3fold_zpool_total_pages(void *pool) -{ - return z3fold_get_pool_pages(pool); -} - -static struct zpool_driver z3fold_zpool_driver = { - .type = "z3fold", - .sleep_mapped = true, - .owner = THIS_MODULE, - .create = z3fold_zpool_create, - .destroy = z3fold_zpool_destroy, - .malloc = z3fold_zpool_malloc, - .free = z3fold_zpool_free, - .map = z3fold_zpool_map, - .unmap = z3fold_zpool_unmap, - .total_pages = z3fold_zpool_total_pages, -}; - -MODULE_ALIAS("zpool-z3fold"); - -static int __init init_z3fold(void) -{ - /* - * Make sure the z3fold header is not larger than the page size and - * there has remaining spaces for its buddy. - */ - BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE); - zpool_register_driver(&z3fold_zpool_driver); - - return 0; -} - -static void __exit exit_z3fold(void) -{ - zpool_unregister_driver(&z3fold_zpool_driver); -} - -module_init(init_z3fold); -module_exit(exit_z3fold); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Vitaly Wool "); -MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); -- 2.50.1 From 6df8bae8e851eacf2acf2237860213e002aba74f Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 29 Jan 2025 18:06:32 +0000 Subject: [PATCH 08/16] mm: zbud: remove zbud The zbud compressed pages allocator is rarely used, most users use zsmalloc. zbud consumes much more memory (only stores 1 or 2 compressed pages per physical page). The only advantage of zbud is a marginal performance improvement that by no means justify the memory overhead. Historically, zsmalloc had significantly worse latency than zbud and z3fold but offered better memory savings. This is no longer the case as shown by a simple recent analysis [1]. In a kernel build test on tmpfs in a limited cgroup, zbud 2-3% less time than zsmalloc, but at the cost of using ~32% more memory (1.5G vs 1.13G). The tradeoff does not make sense for zbud in any practical scenario. The only alleged advantage of zbud is not having the dependency on CONFIG_MMU, but CONFIG_SWAP already depends on CONFIG_MMU anyway, and zbud is only used by zswap. Remove zbud after z3fold's removal, leaving zsmalloc as the one and only zpool allocator. Leave the removal of the zpool API (and its associated config options) to a followup cleanup after no more allocators show up. Deprecating zbud for a few cycles before removing it was initially proposed [2], like z3fold was marked as deprecated for 2 cycles [3]. However, Johannes rightfully pointed out that the 2 cycles is too short for most downstream consumers, and z3fold was deprecated first only as a courtesy anyway. [1]https://lore.kernel.org/lkml/CAJD7tkbRF6od-2x_L8-A1QL3=2Ww13sCj4S3i4bNndqF+3+_Vg@mail.gmail.com/ [2]https://lore.kernel.org/lkml/Z5gdnSX5Lv-nfjQL@google.com/ [3]https://lore.kernel.org/lkml/20240904233343.933462-1-yosryahmed@google.com/ Link: https://lkml.kernel.org/r/20250129180633.3501650-3-yosry.ahmed@linux.dev Signed-off-by: Yosry Ahmed Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Acked-by: Nhat Pham Cc: Alexander Gordeev Cc: Chengming Zhou Cc: Christian Borntraeger Cc: Dan Streetman Cc: Heiko Carstens Cc: Huacai Chen Cc: Miaohe Lin Cc: Seth Jennings Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Vitaly Wool Cc: Vlastimil Babka Cc: WANG Xuerui Signed-off-by: Andrew Morton --- CREDITS | 2 + Documentation/admin-guide/mm/zswap.rst | 10 +- MAINTAINERS | 7 - arch/loongarch/configs/loongson3_defconfig | 3 +- arch/s390/configs/debug_defconfig | 2 +- arch/s390/configs/defconfig | 2 +- include/linux/zpool.h | 5 +- mm/Kconfig | 18 - mm/Makefile | 1 - mm/zbud.c | 455 --------------------- mm/zpool.c | 4 +- 11 files changed, 13 insertions(+), 496 deletions(-) delete mode 100644 mm/zbud.c diff --git a/CREDITS b/CREDITS index aeccd79f9778..5e65bf8553ba 100644 --- a/CREDITS +++ b/CREDITS @@ -1895,6 +1895,7 @@ S: Czech Republic N: Seth Jennings E: sjenning@redhat.com D: Creation and maintenance of zswap +D: Creation and maintenace of the zbud allocator N: Jeremy Kerr D: Maintainer of SPU File System @@ -3788,6 +3789,7 @@ N: Dan Streetman E: ddstreet@ieee.org D: Maintenance and development of zswap D: Creation and maintenance of the zpool API +D: Maintenace of the zbud allocator N: Drew Sullivan E: drew@ss.org diff --git a/Documentation/admin-guide/mm/zswap.rst b/Documentation/admin-guide/mm/zswap.rst index 3598dcd7dbe7..fd3370aa43fe 100644 --- a/Documentation/admin-guide/mm/zswap.rst +++ b/Documentation/admin-guide/mm/zswap.rst @@ -60,15 +60,13 @@ accessed. The compressed memory pool grows on demand and shrinks as compressed pages are freed. The pool is not preallocated. By default, a zpool of type selected in ``CONFIG_ZSWAP_ZPOOL_DEFAULT`` Kconfig option is created, but it can be overridden at boot time by setting the ``zpool`` attribute, -e.g. ``zswap.zpool=zbud``. It can also be changed at runtime using the sysfs +e.g. ``zswap.zpool=zsmalloc``. It can also be changed at runtime using the sysfs ``zpool`` attribute, e.g.:: - echo zbud > /sys/module/zswap/parameters/zpool + echo zsmalloc > /sys/module/zswap/parameters/zpool -The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which -means the compression ratio will always be 2:1 or worse (because of half-full -zbud pages). The zsmalloc type zpool has a more complex compressed page -storage method, and it can achieve greater storage densities. +The zsmalloc type zpool has a complex compressed page storage method, and it +can achieve great storage densities. When a swap page is passed from swapout to zswap, zswap maintains a mapping of the swap entry, a combination of the swap type and swap offset, to the zpool diff --git a/MAINTAINERS b/MAINTAINERS index a65a73227e3d..60421d3e48a8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -26191,13 +26191,6 @@ F: Documentation/networking/device_drivers/hamradio/z8530drv.rst F: drivers/net/hamradio/*scc.c F: drivers/net/hamradio/z8530.h -ZBUD COMPRESSED PAGE ALLOCATOR -M: Seth Jennings -M: Dan Streetman -L: linux-mm@kvack.org -S: Maintained -F: mm/zbud.c - ZD1211RW WIRELESS DRIVER L: linux-wireless@vger.kernel.org S: Orphan diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 73c77500ac46..7ce5beb3cbf3 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -109,8 +109,7 @@ CONFIG_BINFMT_MISC=m CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y -CONFIG_ZBUD=y -CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC=y # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y # CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 80bdfbae6e5b..074df6328376 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -92,7 +92,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_SLAB_BUCKETS=y CONFIG_SLUB_STATS=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 449a0e996b96..ac68e663dd4d 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -86,7 +86,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_SLAB_BUCKETS=y # CONFIG_COMPAT_BRK is not set diff --git a/include/linux/zpool.h b/include/linux/zpool.h index a67d62b79698..5e6dc46b8cc4 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -4,9 +4,8 @@ * * Copyright (C) 2014 Dan Streetman * - * This is a common frontend for the zbud and zsmalloc memory - * storage pool implementations. Typically, this is used to - * store compressed memory. + * This is a common frontend for the zswap compressed memory storage + * implementations. */ #ifndef _ZPOOL_H_ diff --git a/mm/Kconfig b/mm/Kconfig index 6fa19022c09b..fba9757e5814 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -129,7 +129,6 @@ choice prompt "Default allocator" depends on ZSWAP default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU - default ZSWAP_ZPOOL_DEFAULT_ZBUD help Selects the default allocator for the compressed cache for swap pages. @@ -140,12 +139,6 @@ choice The selection made here can be overridden by using the kernel command line 'zswap.zpool=' option. -config ZSWAP_ZPOOL_DEFAULT_ZBUD - bool "zbud" - select ZBUD - help - Use the zbud allocator as the default allocator. - config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC bool "zsmalloc" select ZSMALLOC @@ -156,20 +149,9 @@ endchoice config ZSWAP_ZPOOL_DEFAULT string depends on ZSWAP - default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC default "" -config ZBUD - tristate "2:1 compression allocator (zbud)" - depends on ZSWAP - help - A special purpose allocator for storing compressed pages. - It is designed to store up to two compressed pages per physical - page. While this design limits storage density, it has simple and - deterministic reclaim properties that make it preferable to a higher - density approach when reclaim will be used. - config ZSMALLOC tristate prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM) diff --git a/mm/Makefile b/mm/Makefile index e4c03da3c084..53392d2af3a5 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -113,7 +113,6 @@ obj-$(CONFIG_DEBUG_VM_PGTABLE) += debug_vm_pgtable.o obj-$(CONFIG_PAGE_OWNER) += page_owner.o obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o obj-$(CONFIG_ZPOOL) += zpool.o -obj-$(CONFIG_ZBUD) += zbud.o obj-$(CONFIG_ZSMALLOC) += zsmalloc.o obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o obj-$(CONFIG_CMA) += cma.o diff --git a/mm/zbud.c b/mm/zbud.c deleted file mode 100644 index e9836fff9438..000000000000 --- a/mm/zbud.c +++ /dev/null @@ -1,455 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * zbud.c - * - * Copyright (C) 2013, Seth Jennings, IBM - * - * Concepts based on zcache internal zbud allocator by Dan Magenheimer. - * - * zbud is an special purpose allocator for storing compressed pages. Contrary - * to what its name may suggest, zbud is not a buddy allocator, but rather an - * allocator that "buddies" two compressed pages together in a single memory - * page. - * - * While this design limits storage density, it has simple and deterministic - * reclaim properties that make it preferable to a higher density approach when - * reclaim will be used. - * - * zbud works by storing compressed pages, or "zpages", together in pairs in a - * single memory page called a "zbud page". The first buddy is "left - * justified" at the beginning of the zbud page, and the last buddy is "right - * justified" at the end of the zbud page. The benefit is that if either - * buddy is freed, the freed buddy space, coalesced with whatever slack space - * that existed between the buddies, results in the largest possible free region - * within the zbud page. - * - * zbud also provides an attractive lower bound on density. The ratio of zpages - * to zbud pages can not be less than 1. This ensures that zbud can never "do - * harm" by using more pages to store zpages than the uncompressed zpages would - * have used on their own. - * - * zbud pages are divided into "chunks". The size of the chunks is fixed at - * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages - * into chunks allows organizing unbuddied zbud pages into a manageable number - * of unbuddied lists according to the number of free chunks available in the - * zbud page. - * - * The zbud API differs from that of conventional allocators in that the - * allocation function, zbud_alloc(), returns an opaque handle to the user, - * not a dereferenceable pointer. The user must map the handle using - * zbud_map() in order to get a usable pointer by which to access the - * allocation data and unmap the handle with zbud_unmap() when operations - * on the allocation data are complete. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include - -/***************** - * Structures -*****************/ -/* - * NCHUNKS_ORDER determines the internal allocation granularity, effectively - * adjusting internal fragmentation. It also determines the number of - * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the - * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk - * in allocated page is occupied by zbud header, NCHUNKS will be calculated to - * 63 which shows the max number of free chunks in zbud page, also there will be - * 63 freelists per pool. - */ -#define NCHUNKS_ORDER 6 - -#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) -#define CHUNK_SIZE (1 << CHUNK_SHIFT) -#define ZHDR_SIZE_ALIGNED CHUNK_SIZE -#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) - -struct zbud_pool; - -/** - * struct zbud_pool - stores metadata for each zbud pool - * @lock: protects all pool fields and first|last_chunk fields of any - * zbud page in the pool - * @unbuddied: array of lists tracking zbud pages that only contain one buddy; - * the lists each zbud page is added to depends on the size of - * its free region. - * @buddied: list tracking the zbud pages that contain two buddies; - * these zbud pages are full - * @pages_nr: number of zbud pages in the pool. - * - * This structure is allocated at pool creation time and maintains metadata - * pertaining to a particular zbud pool. - */ -struct zbud_pool { - spinlock_t lock; - union { - /* - * Reuse unbuddied[0] as buddied on the ground that - * unbuddied[0] is unused. - */ - struct list_head buddied; - struct list_head unbuddied[NCHUNKS]; - }; - u64 pages_nr; -}; - -/* - * struct zbud_header - zbud page metadata occupying the first chunk of each - * zbud page. - * @buddy: links the zbud page into the unbuddied/buddied lists in the pool - * @first_chunks: the size of the first buddy in chunks, 0 if free - * @last_chunks: the size of the last buddy in chunks, 0 if free - */ -struct zbud_header { - struct list_head buddy; - unsigned int first_chunks; - unsigned int last_chunks; -}; - -/***************** - * Helpers -*****************/ -/* Just to make the code easier to read */ -enum buddy { - FIRST, - LAST -}; - -/* Converts an allocation size in bytes to size in zbud chunks */ -static int size_to_chunks(size_t size) -{ - return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; -} - -#define for_each_unbuddied_list(_iter, _begin) \ - for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) - -/* Initializes the zbud header of a newly allocated zbud page */ -static struct zbud_header *init_zbud_page(struct page *page) -{ - struct zbud_header *zhdr = page_address(page); - zhdr->first_chunks = 0; - zhdr->last_chunks = 0; - INIT_LIST_HEAD(&zhdr->buddy); - return zhdr; -} - -/* Resets the struct page fields and frees the page */ -static void free_zbud_page(struct zbud_header *zhdr) -{ - __free_page(virt_to_page(zhdr)); -} - -/* - * Encodes the handle of a particular buddy within a zbud page - * Pool lock should be held as this function accesses first|last_chunks - */ -static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud) -{ - unsigned long handle; - - /* - * For now, the encoded handle is actually just the pointer to the data - * but this might not always be the case. A little information hiding. - * Add CHUNK_SIZE to the handle if it is the first allocation to jump - * over the zbud header in the first chunk. - */ - handle = (unsigned long)zhdr; - if (bud == FIRST) - /* skip over zbud header */ - handle += ZHDR_SIZE_ALIGNED; - else /* bud == LAST */ - handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); - return handle; -} - -/* Returns the zbud page where a given handle is stored */ -static struct zbud_header *handle_to_zbud_header(unsigned long handle) -{ - return (struct zbud_header *)(handle & PAGE_MASK); -} - -/* Returns the number of free chunks in a zbud page */ -static int num_free_chunks(struct zbud_header *zhdr) -{ - /* - * Rather than branch for different situations, just use the fact that - * free buddies have a length of zero to simplify everything. - */ - return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; -} - -/***************** - * API Functions -*****************/ -/** - * zbud_create_pool() - create a new zbud pool - * @gfp: gfp flags when allocating the zbud pool structure - * - * Return: pointer to the new zbud pool or NULL if the metadata allocation - * failed. - */ -static struct zbud_pool *zbud_create_pool(gfp_t gfp) -{ - struct zbud_pool *pool; - int i; - - pool = kzalloc(sizeof(struct zbud_pool), gfp); - if (!pool) - return NULL; - spin_lock_init(&pool->lock); - for_each_unbuddied_list(i, 0) - INIT_LIST_HEAD(&pool->unbuddied[i]); - INIT_LIST_HEAD(&pool->buddied); - pool->pages_nr = 0; - return pool; -} - -/** - * zbud_destroy_pool() - destroys an existing zbud pool - * @pool: the zbud pool to be destroyed - * - * The pool should be emptied before this function is called. - */ -static void zbud_destroy_pool(struct zbud_pool *pool) -{ - kfree(pool); -} - -/** - * zbud_alloc() - allocates a region of a given size - * @pool: zbud pool from which to allocate - * @size: size in bytes of the desired allocation - * @gfp: gfp flags used if the pool needs to grow - * @handle: handle of the new allocation - * - * This function will attempt to find a free region in the pool large enough to - * satisfy the allocation request. A search of the unbuddied lists is - * performed first. If no suitable free region is found, then a new page is - * allocated and added to the pool to satisfy the request. - * - * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used - * as zbud pool pages. - * - * Return: 0 if success and handle is set, otherwise -EINVAL if the size or - * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate - * a new page. - */ -static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - int chunks, i, freechunks; - struct zbud_header *zhdr = NULL; - enum buddy bud; - struct page *page; - - if (!size || (gfp & __GFP_HIGHMEM)) - return -EINVAL; - if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) - return -ENOSPC; - chunks = size_to_chunks(size); - spin_lock(&pool->lock); - - /* First, try to find an unbuddied zbud page. */ - for_each_unbuddied_list(i, chunks) { - if (!list_empty(&pool->unbuddied[i])) { - zhdr = list_first_entry(&pool->unbuddied[i], - struct zbud_header, buddy); - list_del(&zhdr->buddy); - if (zhdr->first_chunks == 0) - bud = FIRST; - else - bud = LAST; - goto found; - } - } - - /* Couldn't find unbuddied zbud page, create new one */ - spin_unlock(&pool->lock); - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - spin_lock(&pool->lock); - pool->pages_nr++; - zhdr = init_zbud_page(page); - bud = FIRST; - -found: - if (bud == FIRST) - zhdr->first_chunks = chunks; - else - zhdr->last_chunks = chunks; - - if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) { - /* Add to unbuddied list */ - freechunks = num_free_chunks(zhdr); - list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); - } else { - /* Add to buddied list */ - list_add(&zhdr->buddy, &pool->buddied); - } - - *handle = encode_handle(zhdr, bud); - spin_unlock(&pool->lock); - - return 0; -} - -/** - * zbud_free() - frees the allocation associated with the given handle - * @pool: pool in which the allocation resided - * @handle: handle associated with the allocation returned by zbud_alloc() - */ -static void zbud_free(struct zbud_pool *pool, unsigned long handle) -{ - struct zbud_header *zhdr; - int freechunks; - - spin_lock(&pool->lock); - zhdr = handle_to_zbud_header(handle); - - /* If first buddy, handle will be page aligned */ - if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK) - zhdr->last_chunks = 0; - else - zhdr->first_chunks = 0; - - /* Remove from existing buddy list */ - list_del(&zhdr->buddy); - - if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { - /* zbud page is empty, free */ - free_zbud_page(zhdr); - pool->pages_nr--; - } else { - /* Add to unbuddied list */ - freechunks = num_free_chunks(zhdr); - list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); - } - - spin_unlock(&pool->lock); -} - -/** - * zbud_map() - maps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be mapped - * - * While trivial for zbud, the mapping functions for others allocators - * implementing this allocation API could have more complex information encoded - * in the handle and could create temporary mappings to make the data - * accessible to the user. - * - * Returns: a pointer to the mapped allocation - */ -static void *zbud_map(struct zbud_pool *pool, unsigned long handle) -{ - return (void *)(handle); -} - -/** - * zbud_unmap() - maps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be unmapped - */ -static void zbud_unmap(struct zbud_pool *pool, unsigned long handle) -{ -} - -/** - * zbud_get_pool_pages() - gets the zbud pool size in pages - * @pool: pool whose size is being queried - * - * Returns: size in pages of the given pool. The pool lock need not be - * taken to access pages_nr. - */ -static u64 zbud_get_pool_pages(struct zbud_pool *pool) -{ - return pool->pages_nr; -} - -/***************** - * zpool - ****************/ - -static void *zbud_zpool_create(const char *name, gfp_t gfp) -{ - return zbud_create_pool(gfp); -} - -static void zbud_zpool_destroy(void *pool) -{ - zbud_destroy_pool(pool); -} - -static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - return zbud_alloc(pool, size, gfp, handle); -} -static void zbud_zpool_free(void *pool, unsigned long handle) -{ - zbud_free(pool, handle); -} - -static void *zbud_zpool_map(void *pool, unsigned long handle, - enum zpool_mapmode mm) -{ - return zbud_map(pool, handle); -} -static void zbud_zpool_unmap(void *pool, unsigned long handle) -{ - zbud_unmap(pool, handle); -} - -static u64 zbud_zpool_total_pages(void *pool) -{ - return zbud_get_pool_pages(pool); -} - -static struct zpool_driver zbud_zpool_driver = { - .type = "zbud", - .sleep_mapped = true, - .owner = THIS_MODULE, - .create = zbud_zpool_create, - .destroy = zbud_zpool_destroy, - .malloc = zbud_zpool_malloc, - .free = zbud_zpool_free, - .map = zbud_zpool_map, - .unmap = zbud_zpool_unmap, - .total_pages = zbud_zpool_total_pages, -}; - -MODULE_ALIAS("zpool-zbud"); - -static int __init init_zbud(void) -{ - /* Make sure the zbud header will fit in one chunk */ - BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED); - pr_info("loaded\n"); - - zpool_register_driver(&zbud_zpool_driver); - - return 0; -} - -static void __exit exit_zbud(void) -{ - zpool_unregister_driver(&zbud_zpool_driver); - pr_info("unloaded\n"); -} - -module_init(init_zbud); -module_exit(exit_zbud); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Seth Jennings "); -MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages"); diff --git a/mm/zpool.c b/mm/zpool.c index b9fda1fa857d..4bbd12d4b659 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -95,7 +95,7 @@ static void zpool_put_driver(struct zpool_driver *driver) /** * zpool_has_pool() - Check if the pool driver is available - * @type: The type of the zpool to check (e.g. zbud, zsmalloc) + * @type: The type of the zpool to check (e.g. zsmalloc) * * This checks if the @type pool driver is available. This will try to load * the requested module, if needed, but there is no guarantee the module will @@ -130,7 +130,7 @@ EXPORT_SYMBOL(zpool_has_pool); /** * zpool_create_pool() - Create a new zpool - * @type: The type of the zpool to create (e.g. zbud, zsmalloc) + * @type: The type of the zpool to create (e.g. zsmalloc) * @name: The name of the zpool (e.g. zram0, zswap) * @gfp: The GFP flags to use when allocating the pool. * -- 2.50.1 From 3a75ccba047b11a4e8c437e8347b2d1746f1d808 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 31 Jan 2025 12:31:49 +0000 Subject: [PATCH 09/16] mm: simplify vma merge structure and expand comments Patch series "mm: further simplify VMA merge operation", v3. While significant efforts have been made to improve the VMA merge operation, there remains remnants of the bad (or rather confusing) old days, which make the code difficult to understand, more bug prone and thus harder to modify. This series attempts to significantly improve matters in a number of respects - with a focus on simplifying the commit_merge() function which actually actions the merge operation - and importantly, adjusting the two most confusing merge cases - those in which we 'adjust' the VMA immediately adjacent to the one being merged. One source of confusion are the VMAs being threaded through the operation themselves - vmg->prev, vmg->vma and vmg->next. At the start of the operation, vmg->vma is either NULL if a new VMA is propose to be added, or if not then a pointer to an existing VMA being modified, and prev/next are (perhaps not present) VMAs sat immediately before and after the range specified in vmg->start, end, respectively. However, during the VMA merge operation, we change vmg->start, end and pgoff to span the newly merged range and vmg->vma to either be: a. The ultimately returned VMA (in most cases) or b. A VMA which we will manipulate, but ultimately instead return vmg->next. Case b. especially here is confusing for somebody reading this code, but the fact we update this state, along with vmg->start, end, pgoff only makes matters worse. We simplify things by replacing vmg->vma with vmg->middle and never changing it - this is always either NULL (for a new VMA) or the VMA being modified between vmg->prev and vmg->next. We further simplify by placing the merged VMA in a new vmg->target field - whether case b. above is the case or not. The reader of the code can now simply rely on vmg->middle being the middle VMA and vmg->target being the ultimately merged VMA. We additionally tackle the confusing cases where we 'adjust' VMAs other than the one we ultimately return as the merged VMA (this includes case b. above). These are: (1) merge <-----------> |------||--------| |------------|---| | prev || middle | -> | target | m | |------||--------| |------------|---| In which case middle must be adjusted so middle->vm_start is increased as well as performing the merge. (2) (equivalent to case b. above) <-------------> |---------||------| |---|-------------| | middle || next | -> | m | target | |---------||------| |---|-------------| In which case next must be adjusted so next->vm_start is decreased as well as performing the merge. This cases have previously been performed by calculating and passing around a dubious and confusing 'adj_start' parameter along side a pointer to an 'adjust' VMA indicating which VMA requires additional adjustment (middle in case 1 and next in case 2). With the VMG structure in place we are able to avoid this by simply setting a merge flag to describe each case: (1) Sets the vmg->__adjust_middle_start flag (2) Sets the vmg->__adjust_next_start flag By doing so it turns out we can vastly simplify the logic and calculate what is required to perform the operation. Taken together the refactorings make it far easier to understand what is being done even in these more confusing cases, make the code far more maintainable, debuggable, and testable, providing more internal state indicating what is happening in the merge operation. The changes have no functional net impact on the merge operation and everything should still behave as it did before. This patch (of 5): The merge code, while much improved, still has a number of points of confusion. As part of a broader series cleaning this up to make this more maintainable, we start by addressing some confusion around vma_merge_struct fields. So far, the caller either provides no vmg->vma (a new VMA) or supplies the existing VMA which is being altered, setting vmg->start,end,pgoff to the proposed VMA dimensions. vmg->vma is then updated, as are vmg->start,end,pgoff as the merge process proceeds and the appropriate merge strategy is determined. This is rather confusing, as vmg->vma starts off as the 'middle' VMA between vmg->prev,next, but becomes the 'target' VMA, except in one specific edge case (merge next, shrink middle). Int his patch we introduce vmg->middle to describe the VMA that is between vmg->prev and vmg->next, and does NOT change during the merge operation. We replace vmg->vma with vmg->target, and use this only during the merge operation itself. Aside from the merge right, shrink middle case, this becomes the VMA that forms the basis of the VMA that is returned. This edge case can be addressed in a future commit. We also add a number of comments to explain what is going on. Finally, we adjust the ASCII diagrams showing each merge case in vma_merge_existing_range() to be clearer - the arrow range previously showed the vmg->start, end spanned area, but it is clearer to change this to show the final merged VMA. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/cover.1738326519.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/4dfe60f1419d55e5d0516f56349695d73a57184c.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Jann Horn Cc: Liam Howlett Signed-off-by: Andrew Morton --- mm/debug.c | 18 ++--- mm/mmap.c | 2 +- mm/vma.c | 166 +++++++++++++++++++++------------------- mm/vma.h | 42 ++++++++-- tools/testing/vma/vma.c | 52 ++++++------- 5 files changed, 159 insertions(+), 121 deletions(-) diff --git a/mm/debug.c b/mm/debug.c index 8d2acf432385..c9e07651677b 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -261,7 +261,7 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) pr_warn("vmg %px state: mm %px pgoff %lx\n" "vmi %px [%lx,%lx)\n" - "prev %px next %px vma %px\n" + "prev %px middle %px next %px target %px\n" "start %lx end %lx flags %lx\n" "file %px anon_vma %px policy %px\n" "uffd_ctx %px\n" @@ -270,7 +270,7 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) vmg, vmg->mm, vmg->pgoff, vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0, vmg->vmi ? vma_iter_end(vmg->vmi) : 0, - vmg->prev, vmg->next, vmg->vma, + vmg->prev, vmg->middle, vmg->next, vmg->target, vmg->start, vmg->end, vmg->flags, vmg->file, vmg->anon_vma, vmg->policy, #ifdef CONFIG_USERFAULTFD @@ -288,13 +288,6 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) pr_warn("vmg %px mm: (NULL)\n", vmg); } - if (vmg->vma) { - pr_warn("vmg %px vma:\n", vmg); - dump_vma(vmg->vma); - } else { - pr_warn("vmg %px vma: (NULL)\n", vmg); - } - if (vmg->prev) { pr_warn("vmg %px prev:\n", vmg); dump_vma(vmg->prev); @@ -302,6 +295,13 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) pr_warn("vmg %px prev: (NULL)\n", vmg); } + if (vmg->middle) { + pr_warn("vmg %px middle:\n", vmg); + dump_vma(vmg->middle); + } else { + pr_warn("vmg %px middle: (NULL)\n", vmg); + } + if (vmg->next) { pr_warn("vmg %px next:\n", vmg); dump_vma(vmg->next); diff --git a/mm/mmap.c b/mm/mmap.c index cda01071c7b1..6401a1d73f4a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1707,7 +1707,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) /* * cover the whole range: [new_start, old_end) */ - vmg.vma = vma; + vmg.middle = vma; if (vma_expand(&vmg)) return -ENOMEM; diff --git a/mm/vma.c b/mm/vma.c index 71ca012c616c..7fca21dee7b3 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -52,7 +52,7 @@ struct mmap_state { .pgoff = (map_)->pgoff, \ .file = (map_)->file, \ .prev = (map_)->prev, \ - .vma = vma_, \ + .middle = vma_, \ .next = (vma_) ? NULL : (map_)->next, \ .state = VMA_MERGE_START, \ .merge_flags = VMG_FLAG_DEFAULT, \ @@ -639,7 +639,7 @@ static int commit_merge(struct vma_merge_struct *vmg, { struct vma_prepare vp; - init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2); + init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2); VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && vp.anon_vma != adjust->anon_vma); @@ -652,15 +652,15 @@ static int commit_merge(struct vma_merge_struct *vmg, adjust->vm_end); } - if (vma_iter_prealloc(vmg->vmi, vmg->vma)) + if (vma_iter_prealloc(vmg->vmi, vmg->target)) return -ENOMEM; vma_prepare(&vp); - vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start); - vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff); + vma_adjust_trans_huge(vmg->target, vmg->start, vmg->end, adj_start); + vma_set_range(vmg->target, vmg->start, vmg->end, vmg->pgoff); if (expanded) - vma_iter_store(vmg->vmi, vmg->vma); + vma_iter_store(vmg->vmi, vmg->target); if (adj_start) { adjust->vm_start += adj_start; @@ -671,7 +671,7 @@ static int commit_merge(struct vma_merge_struct *vmg, } } - vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm); + vma_complete(&vp, vmg->vmi, vmg->target->vm_mm); return 0; } @@ -694,8 +694,9 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma) * identical properties. * * This function checks for the existence of any such mergeable VMAs and updates - * the maple tree describing the @vmg->vma->vm_mm address space to account for - * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge. + * the maple tree describing the @vmg->middle->vm_mm address space to account + * for this, as well as any VMAs shrunk/expanded/deleted as a result of this + * merge. * * As part of this operation, if a merge occurs, the @vmg object will have its * vma, start, end, and pgoff fields modified to execute the merge. Subsequent @@ -704,45 +705,47 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma) * Returns: The merged VMA if merge succeeds, or NULL otherwise. * * ASSUMPTIONS: - * - The caller must assign the VMA to be modifed to @vmg->vma. + * - The caller must assign the VMA to be modifed to @vmg->middle. * - The caller must have set @vmg->prev to the previous VMA, if there is one. * - The caller must not set @vmg->next, as we determine this. * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. - * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end). + * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end). */ static __must_check struct vm_area_struct *vma_merge_existing_range( struct vma_merge_struct *vmg) { - struct vm_area_struct *vma = vmg->vma; + struct vm_area_struct *middle = vmg->middle; struct vm_area_struct *prev = vmg->prev; struct vm_area_struct *next, *res; struct vm_area_struct *anon_dup = NULL; struct vm_area_struct *adjust = NULL; unsigned long start = vmg->start; unsigned long end = vmg->end; - bool left_side = vma && start == vma->vm_start; - bool right_side = vma && end == vma->vm_end; + bool left_side = middle && start == middle->vm_start; + bool right_side = middle && end == middle->vm_end; int err = 0; long adj_start = 0; - bool merge_will_delete_vma, merge_will_delete_next; + bool merge_will_delete_middle, merge_will_delete_next; bool merge_left, merge_right, merge_both; bool expanded; mmap_assert_write_locked(vmg->mm); - VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */ + VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */ VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */ VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg); VM_WARN_ON_VMG(start >= end, vmg); /* - * If vma == prev, then we are offset into a VMA. Otherwise, if we are + * If middle == prev, then we are offset into a VMA. Otherwise, if we are * not, we must span a portion of the VMA. */ - VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) || - vmg->end > vma->vm_end), vmg); - /* The vmi must be positioned within vmg->vma. */ - VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start && - vma_iter_addr(vmg->vmi) < vma->vm_end), vmg); + VM_WARN_ON_VMG(middle && + ((middle != prev && vmg->start != middle->vm_start) || + vmg->end > middle->vm_end), vmg); + /* The vmi must be positioned within vmg->middle. */ + VM_WARN_ON_VMG(middle && + !(vma_iter_addr(vmg->vmi) >= middle->vm_start && + vma_iter_addr(vmg->vmi) < middle->vm_end), vmg); vmg->state = VMA_MERGE_NOMERGE; @@ -776,13 +779,13 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( merge_both = merge_left && merge_right; /* If we span the entire VMA, a merge implies it will be deleted. */ - merge_will_delete_vma = left_side && right_side; + merge_will_delete_middle = left_side && right_side; /* - * If we need to remove vma in its entirety but are unable to do so, + * If we need to remove middle in its entirety but are unable to do so, * we have no sensible recourse but to abort the merge. */ - if (merge_will_delete_vma && !can_merge_remove_vma(vma)) + if (merge_will_delete_middle && !can_merge_remove_vma(middle)) return NULL; /* @@ -793,7 +796,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( /* * If we cannot delete next, then we can reduce the operation to merging - * prev and vma (thereby deleting vma). + * prev and middle (thereby deleting middle). */ if (merge_will_delete_next && !can_merge_remove_vma(next)) { merge_will_delete_next = false; @@ -801,8 +804,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( merge_both = false; } - /* No matter what happens, we will be adjusting vma. */ - vma_start_write(vma); + /* No matter what happens, we will be adjusting middle. */ + vma_start_write(middle); if (merge_left) vma_start_write(prev); @@ -812,13 +815,13 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( if (merge_both) { /* - * |<----->| - * |-------*********-------| - * prev vma next - * extend delete delete + * |<-------------------->| + * |-------********-------| + * prev middle next + * extend delete delete */ - vmg->vma = prev; + vmg->target = prev; vmg->start = prev->vm_start; vmg->end = next->vm_end; vmg->pgoff = prev->vm_pgoff; @@ -826,78 +829,79 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( /* * We already ensured anon_vma compatibility above, so now it's * simply a case of, if prev has no anon_vma object, which of - * next or vma contains the anon_vma we must duplicate. + * next or middle contains the anon_vma we must duplicate. */ - err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup); + err = dup_anon_vma(prev, next->anon_vma ? next : middle, + &anon_dup); } else if (merge_left) { /* - * |<----->| OR - * |<--------->| + * |<------------>| OR + * |<----------------->| * |-------************* - * prev vma + * prev middle * extend shrink/delete */ - vmg->vma = prev; + vmg->target = prev; vmg->start = prev->vm_start; vmg->pgoff = prev->vm_pgoff; - if (!merge_will_delete_vma) { - adjust = vma; - adj_start = vmg->end - vma->vm_start; + if (!merge_will_delete_middle) { + adjust = middle; + adj_start = vmg->end - middle->vm_start; } - err = dup_anon_vma(prev, vma, &anon_dup); + err = dup_anon_vma(prev, middle, &anon_dup); } else { /* merge_right */ /* - * |<----->| OR - * |<--------->| + * |<------------->| OR + * |<----------------->| * *************-------| - * vma next + * middle next * shrink/delete extend */ pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); VM_WARN_ON_VMG(!merge_right, vmg); - /* If we are offset into a VMA, then prev must be vma. */ - VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg); + /* If we are offset into a VMA, then prev must be middle. */ + VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg); - if (merge_will_delete_vma) { - vmg->vma = next; + if (merge_will_delete_middle) { + vmg->target = next; vmg->end = next->vm_end; vmg->pgoff = next->vm_pgoff - pglen; } else { /* - * We shrink vma and expand next. + * We shrink middle and expand next. * * IMPORTANT: This is the ONLY case where the final - * merged VMA is NOT vmg->vma, but rather vmg->next. + * merged VMA is NOT vmg->target, but rather vmg->next. */ - - vmg->start = vma->vm_start; + vmg->target = middle; + vmg->start = middle->vm_start; vmg->end = start; - vmg->pgoff = vma->vm_pgoff; + vmg->pgoff = middle->vm_pgoff; adjust = next; - adj_start = -(vma->vm_end - start); + adj_start = -(middle->vm_end - start); } - err = dup_anon_vma(next, vma, &anon_dup); + err = dup_anon_vma(next, middle, &anon_dup); } if (err) goto abort; /* - * In nearly all cases, we expand vmg->vma. There is one exception - + * In nearly all cases, we expand vmg->middle. There is one exception - * merge_right where we partially span the VMA. In this case we shrink - * the end of vmg->vma and adjust the start of vmg->next accordingly. + * the end of vmg->middle and adjust the start of vmg->next accordingly. */ - expanded = !merge_right || merge_will_delete_vma; + expanded = !merge_right || merge_will_delete_middle; if (commit_merge(vmg, adjust, - merge_will_delete_vma ? vma : NULL, + merge_will_delete_middle ? middle : NULL, merge_will_delete_next ? next : NULL, adj_start, expanded)) { if (anon_dup) @@ -973,7 +977,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND; mmap_assert_write_locked(vmg->mm); - VM_WARN_ON_VMG(vmg->vma, vmg); + VM_WARN_ON_VMG(vmg->middle, vmg); /* vmi must point at or before the gap. */ VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg); @@ -989,13 +993,13 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) /* If we can merge with the next VMA, adjust vmg accordingly. */ if (can_merge_right) { vmg->end = next->vm_end; - vmg->vma = next; + vmg->middle = next; } /* If we can merge with the previous VMA, adjust vmg accordingly. */ if (can_merge_left) { vmg->start = prev->vm_start; - vmg->vma = prev; + vmg->middle = prev; vmg->pgoff = prev->vm_pgoff; /* @@ -1017,10 +1021,10 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) * Now try to expand adjacent VMA(s). This takes care of removing the * following VMA if we have VMAs on both sides. */ - if (vmg->vma && !vma_expand(vmg)) { - khugepaged_enter_vma(vmg->vma, vmg->flags); + if (vmg->middle && !vma_expand(vmg)) { + khugepaged_enter_vma(vmg->middle, vmg->flags); vmg->state = VMA_MERGE_SUCCESS; - return vmg->vma; + return vmg->middle; } return NULL; @@ -1032,44 +1036,46 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) * @vmg: Describes a VMA expansion operation. * * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. - * Will expand over vmg->next if it's different from vmg->vma and vmg->end == - * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with + * Will expand over vmg->next if it's different from vmg->middle and vmg->end == + * vmg->next->vm_end. Checking if the vmg->middle can expand and merge with * vmg->next needs to be handled by the caller. * * Returns: 0 on success. * * ASSUMPTIONS: - * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock. - * - The caller must have set @vmg->vma and @vmg->next. + * - The caller must hold a WRITE lock on vmg->middle->mm->mmap_lock. + * - The caller must have set @vmg->middle and @vmg->next. */ int vma_expand(struct vma_merge_struct *vmg) { struct vm_area_struct *anon_dup = NULL; bool remove_next = false; - struct vm_area_struct *vma = vmg->vma; + struct vm_area_struct *middle = vmg->middle; struct vm_area_struct *next = vmg->next; mmap_assert_write_locked(vmg->mm); - vma_start_write(vma); - if (next && (vma != next) && (vmg->end == next->vm_end)) { + vma_start_write(middle); + if (next && (middle != next) && (vmg->end == next->vm_end)) { int ret; remove_next = true; /* This should already have been checked by this point. */ VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg); vma_start_write(next); - ret = dup_anon_vma(vma, next, &anon_dup); + ret = dup_anon_vma(middle, next, &anon_dup); if (ret) return ret; } /* Not merging but overwriting any part of next is not handled. */ VM_WARN_ON_VMG(next && !remove_next && - next != vma && vmg->end > next->vm_start, vmg); + next != middle && vmg->end > next->vm_start, vmg); /* Only handles expanding */ - VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg); + VM_WARN_ON_VMG(middle->vm_start < vmg->start || + middle->vm_end > vmg->end, vmg); + vmg->target = middle; if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true)) goto nomem; @@ -1508,7 +1514,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, */ static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) { - struct vm_area_struct *vma = vmg->vma; + struct vm_area_struct *vma = vmg->middle; unsigned long start = vmg->start; unsigned long end = vmg->end; struct vm_area_struct *merged; @@ -1609,7 +1615,7 @@ struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); vmg.next = vma_iter_next_rewind(vmi, NULL); - vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */ + vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */ return vma_merge_new_range(&vmg); } @@ -1730,7 +1736,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (new_vma && new_vma->vm_start < addr + len) return NULL; /* should never get here */ - vmg.vma = NULL; /* New VMA range. */ + vmg.middle = NULL; /* New VMA range. */ vmg.pgoff = pgoff; vmg.next = vma_iter_next_rewind(&vmi, NULL); new_vma = vma_merge_new_range(&vmg); diff --git a/mm/vma.h b/mm/vma.h index a2e8710b8c47..5b5dd07e478c 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -69,16 +69,48 @@ enum vma_merge_flags { VMG_FLAG_JUST_EXPAND = 1 << 0, }; -/* Represents a VMA merge operation. */ +/* + * Describes a VMA merge operation and is threaded throughout it. + * + * Any of the fields may be mutated by the merge operation, so no guarantees are + * made to the contents of this structure after a merge operation has completed. + */ struct vma_merge_struct { struct mm_struct *mm; struct vma_iterator *vmi; - pgoff_t pgoff; + /* + * Adjacent VMAs, any of which may be NULL if not present: + * + * |------|--------|------| + * | prev | middle | next | + * |------|--------|------| + * + * middle may not yet exist in the case of a proposed new VMA being + * merged, or it may be an existing VMA. + * + * next may be assigned by the caller. + */ struct vm_area_struct *prev; - struct vm_area_struct *next; /* Modified by vma_merge(). */ - struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */ + struct vm_area_struct *middle; + struct vm_area_struct *next; + /* + * This is the VMA we ultimately target to become the merged VMA, except + * for the one exception of merge right, shrink next (for details of + * this scenario see vma_merge_existing_range()). + */ + struct vm_area_struct *target; + /* + * Initially, the start, end, pgoff fields are provided by the caller + * and describe the proposed new VMA range, whether modifying an + * existing VMA (which will be 'middle'), or adding a new one. + * + * During the merge process these fields are updated to describe the new + * range _including those VMAs which will be merged_. + */ unsigned long start; unsigned long end; + pgoff_t pgoff; + unsigned long flags; struct file *file; struct anon_vma *anon_vma; @@ -118,8 +150,8 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, .mm = vma_->vm_mm, \ .vmi = vmi_, \ .prev = prev_, \ + .middle = vma_, \ .next = NULL, \ - .vma = vma_, \ .start = start_, \ .end = end_, \ .flags = vma_->vm_flags, \ diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 04ab45e27fb8..3c0572120e94 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -147,8 +147,8 @@ static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, vma_iter_set(vmg->vmi, start); vmg->prev = NULL; + vmg->middle = NULL; vmg->next = NULL; - vmg->vma = NULL; vmg->start = start; vmg->end = end; @@ -338,7 +338,7 @@ static bool test_simple_expand(void) VMA_ITERATOR(vmi, &mm, 0); struct vma_merge_struct vmg = { .vmi = &vmi, - .vma = vma, + .middle = vma, .start = 0, .end = 0x3000, .pgoff = 0, @@ -631,7 +631,7 @@ static bool test_vma_merge_special_flags(void) */ vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags); ASSERT_NE(vma, NULL); - vmg.vma = vma; + vmg.middle = vma; for (i = 0; i < ARRAY_SIZE(special_flags); i++) { vm_flags_t special_flag = special_flags[i]; @@ -760,7 +760,7 @@ static bool test_vma_merge_with_close(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; /* * The VMA being modified in a way that would otherwise merge should @@ -787,7 +787,7 @@ static bool test_vma_merge_with_close(void) vma->vm_ops = &vm_ops; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); /* * Initially this is misapprehended as an out of memory report, as the @@ -817,7 +817,7 @@ static bool test_vma_merge_with_close(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); @@ -843,7 +843,7 @@ static bool test_vma_merge_with_close(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -940,7 +940,7 @@ static bool test_merge_existing(void) vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags); vma_next->vm_ops = &vm_ops; /* This should have no impact. */ vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags); - vmg.vma = vma; + vmg.middle = vma; vmg.prev = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_next); @@ -973,7 +973,7 @@ static bool test_merge_existing(void) vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags); vma_next->vm_ops = &vm_ops; /* This should have no impact. */ vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags); - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_next); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1003,7 +1003,7 @@ static bool test_merge_existing(void) vma->vm_ops = &vm_ops; /* This should have no impact. */ vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); @@ -1037,7 +1037,7 @@ static bool test_merge_existing(void) vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags); vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1067,7 +1067,7 @@ static bool test_merge_existing(void) vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags); vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1102,37 +1102,37 @@ static bool test_merge_existing(void) vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); @@ -1197,7 +1197,7 @@ static bool test_anon_vma_non_mergeable(void) vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1277,7 +1277,7 @@ static bool test_dup_anon_vma(void) vma_next->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0, 0x5000, 0, flags); - vmg.vma = vma_prev; + vmg.middle = vma_prev; vmg.next = vma_next; ASSERT_EQ(expand_existing(&vmg), 0); @@ -1309,7 +1309,7 @@ static bool test_dup_anon_vma(void) vma_next->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1338,7 +1338,7 @@ static bool test_dup_anon_vma(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1366,7 +1366,7 @@ static bool test_dup_anon_vma(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1394,7 +1394,7 @@ static bool test_dup_anon_vma(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_next); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1432,7 +1432,7 @@ static bool test_vmi_prealloc_fail(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; fail_prealloc = true; @@ -1458,7 +1458,7 @@ static bool test_vmi_prealloc_fail(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0, 0x5000, 3, flags); - vmg.vma = vma_prev; + vmg.middle = vma_prev; vmg.next = vma; fail_prealloc = true; -- 2.50.1 From 6ab2d9c7c680c0a041477126722cebe7dc57f713 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 31 Jan 2025 12:31:50 +0000 Subject: [PATCH 10/16] mm: further refactor commit_merge() The current VMA merge mechanism contains a number of confusing mechanisms around removal of VMAs on merge and the shrinking of the VMA adjacent to vma->target in the case of merges which result in a partial merge with that adjacent VMA. Since we now have a STABLE set of VMAs - prev, middle, next - we are now able to have the caller of commit_merge() explicitly tell us which VMAs need deleting, using newly introduced internal VMA merge flags. Doing so allows us to embed this state within the VMG and remove the confusing remove, remove2 parameters from commit_merge(). We additionally are able to eliminate the highly confusing and misleading 'expanded' parameter - a parameter that in reality refers to whether or not the return VMA is the target one or the one immediately adjacent. We can infer which is the case from whether or not the adj_start parameter is negative. This also allows us to simplify further logic around iterator configuration and VMA iterator stores. Doing so means we can also eliminate the adjust parameter, as we are able to infer which VMA ought to be adjusted from adj_start - a positive value implies we adjust the start of 'middle', a negative one implies we adjust the start of 'next'. We are then able to have commit_merge() explicitly return the target VMA, or NULL on inability to pre-allocate memory. Errors were previously filtered so behaviour does not change. We additionally move from the slightly odd use of a bitwise-flag enum vmg->merge_flags field to vmg bitfields. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/7bf2ed24af68aac18672b7acebbd9102f48c5b03.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Jann Horn Cc: Liam Howlett Signed-off-by: Andrew Morton --- mm/debug.c | 6 ++- mm/vma.c | 101 +++++++++++++++++++++------------------- mm/vma.h | 38 +++++++++------ tools/testing/vma/vma.c | 9 +++- 4 files changed, 87 insertions(+), 67 deletions(-) diff --git a/mm/debug.c b/mm/debug.c index c9e07651677b..60c6f1134383 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -266,7 +266,8 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) "file %px anon_vma %px policy %px\n" "uffd_ctx %px\n" "anon_name %px\n" - "merge_flags %x state %x\n", + "state %x\n" + "just_expand %d __remove_middle %d __remove_next %d\n", vmg, vmg->mm, vmg->pgoff, vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0, vmg->vmi ? vma_iter_end(vmg->vmi) : 0, @@ -279,7 +280,8 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) (void *)0, #endif vmg->anon_name, - (int)vmg->merge_flags, (int)vmg->state); + (int)vmg->state, + vmg->just_expand, vmg->__remove_middle, vmg->__remove_next); if (vmg->mm) { pr_warn("vmg %px mm:\n", vmg); diff --git a/mm/vma.c b/mm/vma.c index 7fca21dee7b3..dd9fdf5c9429 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -55,7 +55,6 @@ struct mmap_state { .middle = vma_, \ .next = (vma_) ? NULL : (map_)->next, \ .state = VMA_MERGE_START, \ - .merge_flags = VMG_FLAG_DEFAULT, \ } static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next) @@ -120,8 +119,8 @@ static void init_multi_vma_prep(struct vma_prepare *vp, memset(vp, 0, sizeof(struct vma_prepare)); vp->vma = vma; vp->anon_vma = vma->anon_vma; - vp->remove = remove; - vp->remove2 = remove2; + vp->remove = remove ? remove : remove2; + vp->remove2 = remove ? remove2 : NULL; vp->adj_next = next; if (!vp->anon_vma && next) vp->anon_vma = next->anon_vma; @@ -129,7 +128,6 @@ static void init_multi_vma_prep(struct vma_prepare *vp, vp->file = vma->vm_file; if (vp->file) vp->mapping = vma->vm_file->f_mapping; - } /* @@ -629,22 +627,40 @@ void validate_mm(struct mm_struct *mm) } #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ -/* Actually perform the VMA merge operation. */ -static int commit_merge(struct vma_merge_struct *vmg, - struct vm_area_struct *adjust, - struct vm_area_struct *remove, - struct vm_area_struct *remove2, - long adj_start, - bool expanded) +/* + * Actually perform the VMA merge operation. + * + * On success, returns the merged VMA. Otherwise returns NULL. + */ +static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg, + long adj_start) { struct vma_prepare vp; + struct vm_area_struct *remove = NULL; + struct vm_area_struct *remove2 = NULL; + struct vm_area_struct *adjust = NULL; + /* + * In all cases but that of merge right, shrink next, we write + * vmg->target to the maple tree and return this as the merged VMA. + */ + bool merge_target = adj_start >= 0; + + if (vmg->__remove_middle) + remove = vmg->middle; + if (vmg->__remove_next) + remove2 = vmg->next; + + if (adj_start > 0) + adjust = vmg->middle; + else if (adj_start < 0) + adjust = vmg->next; init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2); VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && vp.anon_vma != adjust->anon_vma); - if (expanded) { + if (merge_target) { /* Note: vma iterator must be pointing to 'start'. */ vma_iter_config(vmg->vmi, vmg->start, vmg->end); } else { @@ -653,27 +669,26 @@ static int commit_merge(struct vma_merge_struct *vmg, } if (vma_iter_prealloc(vmg->vmi, vmg->target)) - return -ENOMEM; + return NULL; vma_prepare(&vp); vma_adjust_trans_huge(vmg->target, vmg->start, vmg->end, adj_start); vma_set_range(vmg->target, vmg->start, vmg->end, vmg->pgoff); - if (expanded) + if (merge_target) vma_iter_store(vmg->vmi, vmg->target); if (adj_start) { adjust->vm_start += adj_start; adjust->vm_pgoff += PHYS_PFN(adj_start); - if (adj_start < 0) { - WARN_ON(expanded); + + if (!merge_target) vma_iter_store(vmg->vmi, adjust); - } } vma_complete(&vp, vmg->vmi, vmg->target->vm_mm); - return 0; + return merge_target ? vmg->target : vmg->next; } /* We can only remove VMAs when merging if they do not have a close hook. */ @@ -718,16 +733,13 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( struct vm_area_struct *prev = vmg->prev; struct vm_area_struct *next, *res; struct vm_area_struct *anon_dup = NULL; - struct vm_area_struct *adjust = NULL; unsigned long start = vmg->start; unsigned long end = vmg->end; bool left_side = middle && start == middle->vm_start; bool right_side = middle && end == middle->vm_end; int err = 0; long adj_start = 0; - bool merge_will_delete_middle, merge_will_delete_next; bool merge_left, merge_right, merge_both; - bool expanded; mmap_assert_write_locked(vmg->mm); VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */ @@ -779,27 +791,27 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( merge_both = merge_left && merge_right; /* If we span the entire VMA, a merge implies it will be deleted. */ - merge_will_delete_middle = left_side && right_side; + vmg->__remove_middle = left_side && right_side; /* * If we need to remove middle in its entirety but are unable to do so, * we have no sensible recourse but to abort the merge. */ - if (merge_will_delete_middle && !can_merge_remove_vma(middle)) + if (vmg->__remove_middle && !can_merge_remove_vma(middle)) return NULL; /* * If we merge both VMAs, then next is also deleted. This implies * merge_will_delete_vma also. */ - merge_will_delete_next = merge_both; + vmg->__remove_next = merge_both; /* * If we cannot delete next, then we can reduce the operation to merging * prev and middle (thereby deleting middle). */ - if (merge_will_delete_next && !can_merge_remove_vma(next)) { - merge_will_delete_next = false; + if (vmg->__remove_next && !can_merge_remove_vma(next)) { + vmg->__remove_next = false; merge_right = false; merge_both = false; } @@ -846,10 +858,11 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( vmg->start = prev->vm_start; vmg->pgoff = prev->vm_pgoff; - if (!merge_will_delete_middle) { - adjust = middle; + /* + * We both expand prev and shrink middle. + */ + if (!vmg->__remove_middle) adj_start = vmg->end - middle->vm_start; - } err = dup_anon_vma(prev, middle, &anon_dup); } else { /* merge_right */ @@ -867,7 +880,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( /* If we are offset into a VMA, then prev must be middle. */ VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg); - if (merge_will_delete_middle) { + if (vmg->__remove_middle) { vmg->target = next; vmg->end = next->vm_end; vmg->pgoff = next->vm_pgoff - pglen; @@ -883,7 +896,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( vmg->end = start; vmg->pgoff = middle->vm_pgoff; - adjust = next; adj_start = -(middle->vm_end - start); } @@ -893,17 +905,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( if (err) goto abort; - /* - * In nearly all cases, we expand vmg->middle. There is one exception - - * merge_right where we partially span the VMA. In this case we shrink - * the end of vmg->middle and adjust the start of vmg->next accordingly. - */ - expanded = !merge_right || merge_will_delete_middle; - - if (commit_merge(vmg, adjust, - merge_will_delete_middle ? middle : NULL, - merge_will_delete_next ? next : NULL, - adj_start, expanded)) { + res = commit_merge(vmg, adj_start); + if (!res) { if (anon_dup) unlink_anon_vmas(anon_dup); @@ -911,9 +914,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( return NULL; } - res = merge_left ? prev : next; khugepaged_enter_vma(res, vmg->flags); - vmg->state = VMA_MERGE_SUCCESS; return res; @@ -974,7 +975,6 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) struct vm_area_struct *next = vmg->next; unsigned long end = vmg->end; bool can_merge_left, can_merge_right; - bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND; mmap_assert_write_locked(vmg->mm); VM_WARN_ON_VMG(vmg->middle, vmg); @@ -988,7 +988,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) return NULL; can_merge_left = can_vma_merge_left(vmg); - can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left); + can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left); /* If we can merge with the next VMA, adjust vmg accordingly. */ if (can_merge_right) { @@ -1011,7 +1011,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) vmg->end = end; /* In expand-only case we are already positioned at prev. */ - if (!just_expand) { + if (!vmg->just_expand) { /* Equivalent to going to the previous range. */ vma_prev(vmg->vmi); } @@ -1076,7 +1076,10 @@ int vma_expand(struct vma_merge_struct *vmg) middle->vm_end > vmg->end, vmg); vmg->target = middle; - if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true)) + if (remove_next) + vmg->__remove_next = true; + + if (!commit_merge(vmg, 0)) goto nomem; return 0; @@ -2593,7 +2596,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, vmg.prev = vma; /* vmi is positioned at prev, which this mode expects. */ - vmg.merge_flags = VMG_FLAG_JUST_EXPAND; + vmg.just_expand = true; if (vma_merge_new_range(&vmg)) goto out; diff --git a/mm/vma.h b/mm/vma.h index 5b5dd07e478c..7935681a2db8 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -58,17 +58,6 @@ enum vma_merge_state { VMA_MERGE_SUCCESS, }; -enum vma_merge_flags { - VMG_FLAG_DEFAULT = 0, - /* - * If we can expand, simply do so. We know there is nothing to merge to - * the right. Does not reset state upon failure to merge. The VMA - * iterator is assumed to be positioned at the previous VMA, rather than - * at the gap. - */ - VMG_FLAG_JUST_EXPAND = 1 << 0, -}; - /* * Describes a VMA merge operation and is threaded throughout it. * @@ -117,8 +106,31 @@ struct vma_merge_struct { struct mempolicy *policy; struct vm_userfaultfd_ctx uffd_ctx; struct anon_vma_name *anon_name; - enum vma_merge_flags merge_flags; enum vma_merge_state state; + + /* Flags which callers can use to modify merge behaviour: */ + + /* + * If we can expand, simply do so. We know there is nothing to merge to + * the right. Does not reset state upon failure to merge. The VMA + * iterator is assumed to be positioned at the previous VMA, rather than + * at the gap. + */ + bool just_expand :1; + + /* Internal flags set during merge process: */ + + /* + * Internal flag used during the merge operation to indicate we will + * remove vmg->middle. + */ + bool __remove_middle :1; + /* + * Internal flag used during the merge operationr to indicate we will + * remove vmg->next. + */ + bool __remove_next :1; + }; static inline bool vmg_nomem(struct vma_merge_struct *vmg) @@ -142,7 +154,6 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, .flags = flags_, \ .pgoff = pgoff_, \ .state = VMA_MERGE_START, \ - .merge_flags = VMG_FLAG_DEFAULT, \ } #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \ @@ -162,7 +173,6 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, .uffd_ctx = vma_->vm_userfaultfd_ctx, \ .anon_name = anon_vma_name(vma_), \ .state = VMA_MERGE_START, \ - .merge_flags = VMG_FLAG_DEFAULT, \ } #ifdef CONFIG_DEBUG_VM_MAPLE_TREE diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 3c0572120e94..7728498b2f7e 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -149,11 +149,16 @@ static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, vmg->prev = NULL; vmg->middle = NULL; vmg->next = NULL; + vmg->target = NULL; vmg->start = start; vmg->end = end; vmg->pgoff = pgoff; vmg->flags = flags; + + vmg->just_expand = false; + vmg->__remove_middle = false; + vmg->__remove_next = false; } /* @@ -1546,7 +1551,7 @@ static bool test_expand_only_mode(void) /* * Place a VMA prior to the one we're expanding so we assert that we do * not erroneously try to traverse to the previous VMA even though we - * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not + * have, through the use of the just_expand flag, indicated we do not * need to do so. */ alloc_and_link_vma(&mm, 0, 0x2000, 0, flags); @@ -1558,7 +1563,7 @@ static bool test_expand_only_mode(void) vma_iter_set(&vmi, 0x3000); vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.merge_flags = VMG_FLAG_JUST_EXPAND; + vmg.just_expand = true; vma = vma_merge_new_range(&vmg); ASSERT_NE(vma, NULL); -- 2.50.1 From fe3e9cf0d7a28d333523189d1405770d980b07d6 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 31 Jan 2025 12:31:51 +0000 Subject: [PATCH 11/16] mm: eliminate adj_start parameter from commit_merge() Introduce internal vmg->__adjust_middle_start and vmg->__adjust_next_start merge flags, enabling us to indicate to commit_merge() that we are performing a merge which either spans only part of vmg->middle, or part of vmg->next respectively. In the former instance, we change the start of vmg->middle to match the attributes of vmg->prev, without spanning all of vmg->middle. This implies that vmg->prev->vm_end and vmg->middle->vm_start are both increased to form the new merged VMA (vmg->prev) and the new subsequent VMA (vmg->middle). In the latter case, we change the end of vmg->middle to match the attributes of vmg->next, without spanning all of vmg->next. This implies that vmg->middle->vm_end and vmg->next->vm_start are both decreased to form the new merged VMA (vmg->next) and the new prior VMA (vmg->middle). Since we now have a stable set of prev, middle, next VMAs threaded through vmg and with these flags set know what is happening, we can perform the calculation in commit_merge() instead. This allows us to drop the confusing adj_start parameter and instead pass semantic information to commit_merge(). In the latter case the -(middle->vm_end - start) calculation becomes -(middle->vm-end - vmg->end), however this is correct as vmg->end is set to the start parameter. This is because in this case (rather confusingly), we manipulate vmg->middle, but ultimately return vmg->next, whose range will be correctly specified. At this point vmg->start, end is the new range for the prior VMA rather than the merged one. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/bcec0cd980b373a5eb02236cb033034ce1effe42.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Jann Horn Cc: Liam Howlett Signed-off-by: Andrew Morton --- mm/debug.c | 8 +++++-- mm/vma.c | 50 ++++++++++++++++++++++++----------------- mm/vma.h | 10 +++++++++ tools/testing/vma/vma.c | 2 ++ 4 files changed, 48 insertions(+), 22 deletions(-) diff --git a/mm/debug.c b/mm/debug.c index 60c6f1134383..e1282b85a877 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -267,7 +267,9 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) "uffd_ctx %px\n" "anon_name %px\n" "state %x\n" - "just_expand %d __remove_middle %d __remove_next %d\n", + "just_expand %d\n" + "__adjust_middle_start %d __adjust_next_start %d\n" + "__remove_middle %d __remove_next %d\n", vmg, vmg->mm, vmg->pgoff, vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0, vmg->vmi ? vma_iter_end(vmg->vmi) : 0, @@ -281,7 +283,9 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) #endif vmg->anon_name, (int)vmg->state, - vmg->just_expand, vmg->__remove_middle, vmg->__remove_next); + vmg->just_expand, + vmg->__adjust_middle_start, vmg->__adjust_next_start, + vmg->__remove_middle, vmg->__remove_next); if (vmg->mm) { pr_warn("vmg %px mm:\n", vmg); diff --git a/mm/vma.c b/mm/vma.c index dd9fdf5c9429..bac0e72ccb62 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -632,29 +632,44 @@ void validate_mm(struct mm_struct *mm) * * On success, returns the merged VMA. Otherwise returns NULL. */ -static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg, - long adj_start) +static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg) { - struct vma_prepare vp; struct vm_area_struct *remove = NULL; struct vm_area_struct *remove2 = NULL; + struct vma_prepare vp; struct vm_area_struct *adjust = NULL; + long adj_start; + bool merge_target; + /* - * In all cases but that of merge right, shrink next, we write - * vmg->target to the maple tree and return this as the merged VMA. + * If modifying an existing VMA and we don't remove vmg->middle, then we + * shrink the adjacent VMA. */ - bool merge_target = adj_start >= 0; + if (vmg->__adjust_middle_start) { + adjust = vmg->middle; + /* The POSITIVE value by which we offset vmg->middle->vm_start. */ + adj_start = vmg->end - vmg->middle->vm_start; + merge_target = true; + } else if (vmg->__adjust_next_start) { + adjust = vmg->next; + /* The NEGATIVE value by which we offset vmg->next->vm_start. */ + adj_start = -(vmg->middle->vm_end - vmg->end); + /* + * In all cases but this - merge right, shrink next - we write + * vmg->target to the maple tree and return this as the merged VMA. + */ + merge_target = false; + } else { + adjust = NULL; + adj_start = 0; + merge_target = true; + } if (vmg->__remove_middle) remove = vmg->middle; if (vmg->__remove_next) remove2 = vmg->next; - if (adj_start > 0) - adjust = vmg->middle; - else if (adj_start < 0) - adjust = vmg->next; - init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2); VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && @@ -738,7 +753,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( bool left_side = middle && start == middle->vm_start; bool right_side = middle && end == middle->vm_end; int err = 0; - long adj_start = 0; bool merge_left, merge_right, merge_both; mmap_assert_write_locked(vmg->mm); @@ -858,11 +872,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( vmg->start = prev->vm_start; vmg->pgoff = prev->vm_pgoff; - /* - * We both expand prev and shrink middle. - */ if (!vmg->__remove_middle) - adj_start = vmg->end - middle->vm_start; + vmg->__adjust_middle_start = true; err = dup_anon_vma(prev, middle, &anon_dup); } else { /* merge_right */ @@ -891,12 +902,11 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( * IMPORTANT: This is the ONLY case where the final * merged VMA is NOT vmg->target, but rather vmg->next. */ + vmg->__adjust_next_start = true; vmg->target = middle; vmg->start = middle->vm_start; vmg->end = start; vmg->pgoff = middle->vm_pgoff; - - adj_start = -(middle->vm_end - start); } err = dup_anon_vma(next, middle, &anon_dup); @@ -905,7 +915,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( if (err) goto abort; - res = commit_merge(vmg, adj_start); + res = commit_merge(vmg); if (!res) { if (anon_dup) unlink_anon_vmas(anon_dup); @@ -1079,7 +1089,7 @@ int vma_expand(struct vma_merge_struct *vmg) if (remove_next) vmg->__remove_next = true; - if (!commit_merge(vmg, 0)) + if (!commit_merge(vmg)) goto nomem; return 0; diff --git a/mm/vma.h b/mm/vma.h index 7935681a2db8..e18487797fa4 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -120,6 +120,16 @@ struct vma_merge_struct { /* Internal flags set during merge process: */ + /* + * Internal flag indicating the merge increases vmg->middle->vm_start + * (and thereby, vmg->prev->vm_end). + */ + bool __adjust_middle_start :1; + /* + * Internal flag indicating the merge decreases vmg->next->vm_start + * (and thereby, vmg->middle->vm_end). + */ + bool __adjust_next_start :1; /* * Internal flag used during the merge operation to indicate we will * remove vmg->middle. diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 7728498b2f7e..c7ffa71841ca 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -159,6 +159,8 @@ static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, vmg->just_expand = false; vmg->__remove_middle = false; vmg->__remove_next = false; + vmg->__adjust_middle_start = false; + vmg->__adjust_next_start = false; } /* -- 2.50.1 From 0e5ffe9b2bd6d9ab7bf45f512c016e4710bf6d5d Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 31 Jan 2025 12:31:52 +0000 Subject: [PATCH 12/16] mm: make vmg->target consistent and further simplify commit_merge() It is confusing for vmg->target to sometimes be the target merged VMA and in one case not. Fix this by having commit_merge() use its awareness of the vmg->_adjust_next_start case to know that it is manipulating a separate vma, abstracted in the 'vma' local variable. Place removal and adjust VMA determination logic into init_multi_vma_prep(), as the flags give us enough information to do so, and since this is the function that sets up the vma_prepare struct it makes sense to do so here. Doing this significantly simplifies commit_merge(), allowing us to eliminate the 'merge_target' handling, initialise the VMA iterator in a more sensible place and simply return vmg->target consistently. This also allows us to simplify setting vmg->target in vma_merge_existing_range() since we are then left only with two cases - merge left (or both) where the target is vmg->prev or merge right in which the target is vmg->next. This makes it easy for somebody reading the code to know what VMA will actually be the one returned and merged into and removes a great deal of the confusing 'adjust' nonsense. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/50f96e31ab1980eaaf1006e34a4f6e6dad9320b8.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Jann Horn Cc: Liam Howlett Signed-off-by: Andrew Morton --- mm/vma.c | 119 ++++++++++++++++++++++++++++--------------------------- mm/vma.h | 6 +-- 2 files changed, 62 insertions(+), 63 deletions(-) diff --git a/mm/vma.c b/mm/vma.c index bac0e72ccb62..b10625e8fc99 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -106,24 +106,40 @@ static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1, * init_multi_vma_prep() - Initializer for struct vma_prepare * @vp: The vma_prepare struct * @vma: The vma that will be altered once locked - * @next: The next vma if it is to be adjusted - * @remove: The first vma to be removed - * @remove2: The second vma to be removed + * @vmg: The merge state that will be used to determine adjustment and VMA + * removal. */ static void init_multi_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma, - struct vm_area_struct *next, - struct vm_area_struct *remove, - struct vm_area_struct *remove2) + struct vma_merge_struct *vmg) { + struct vm_area_struct *adjust; + struct vm_area_struct **remove = &vp->remove; + memset(vp, 0, sizeof(struct vma_prepare)); vp->vma = vma; vp->anon_vma = vma->anon_vma; - vp->remove = remove ? remove : remove2; - vp->remove2 = remove ? remove2 : NULL; - vp->adj_next = next; - if (!vp->anon_vma && next) - vp->anon_vma = next->anon_vma; + + if (vmg && vmg->__remove_middle) { + *remove = vmg->middle; + remove = &vp->remove2; + } + if (vmg && vmg->__remove_next) + *remove = vmg->next; + + if (vmg && vmg->__adjust_middle_start) + adjust = vmg->middle; + else if (vmg && vmg->__adjust_next_start) + adjust = vmg->next; + else + adjust = NULL; + + vp->adj_next = adjust; + if (!vp->anon_vma && adjust) + vp->anon_vma = adjust->anon_vma; + + VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma && + vp->anon_vma != adjust->anon_vma); vp->file = vma->vm_file; if (vp->file) @@ -360,7 +376,7 @@ again: */ static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) { - init_multi_vma_prep(vp, vma, NULL, NULL, NULL); + init_multi_vma_prep(vp, vma, NULL); } /* @@ -634,76 +650,63 @@ void validate_mm(struct mm_struct *mm) */ static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg) { - struct vm_area_struct *remove = NULL; - struct vm_area_struct *remove2 = NULL; + struct vm_area_struct *vma; struct vma_prepare vp; - struct vm_area_struct *adjust = NULL; + struct vm_area_struct *adjust; long adj_start; - bool merge_target; /* * If modifying an existing VMA and we don't remove vmg->middle, then we * shrink the adjacent VMA. */ if (vmg->__adjust_middle_start) { + vma = vmg->target; adjust = vmg->middle; /* The POSITIVE value by which we offset vmg->middle->vm_start. */ adj_start = vmg->end - vmg->middle->vm_start; - merge_target = true; + + /* Note: vma iterator must be pointing to 'start'. */ + vma_iter_config(vmg->vmi, vmg->start, vmg->end); } else if (vmg->__adjust_next_start) { + /* + * In this case alone, the VMA we manipulate is vmg->middle, but + * we ultimately return vmg->next. + */ + vma = vmg->middle; adjust = vmg->next; /* The NEGATIVE value by which we offset vmg->next->vm_start. */ adj_start = -(vmg->middle->vm_end - vmg->end); - /* - * In all cases but this - merge right, shrink next - we write - * vmg->target to the maple tree and return this as the merged VMA. - */ - merge_target = false; + + vma_iter_config(vmg->vmi, vmg->next->vm_start + adj_start, + vmg->next->vm_end); } else { + vma = vmg->target; adjust = NULL; adj_start = 0; - merge_target = true; - } - - if (vmg->__remove_middle) - remove = vmg->middle; - if (vmg->__remove_next) - remove2 = vmg->next; - - init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2); - VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && - vp.anon_vma != adjust->anon_vma); - - if (merge_target) { - /* Note: vma iterator must be pointing to 'start'. */ + /* Note: vma iterator must be pointing to 'start'. */ vma_iter_config(vmg->vmi, vmg->start, vmg->end); - } else { - vma_iter_config(vmg->vmi, adjust->vm_start + adj_start, - adjust->vm_end); } - if (vma_iter_prealloc(vmg->vmi, vmg->target)) + init_multi_vma_prep(&vp, vma, vmg); + + if (vma_iter_prealloc(vmg->vmi, vma)) return NULL; vma_prepare(&vp); - vma_adjust_trans_huge(vmg->target, vmg->start, vmg->end, adj_start); - vma_set_range(vmg->target, vmg->start, vmg->end, vmg->pgoff); - - if (merge_target) - vma_iter_store(vmg->vmi, vmg->target); + vma_adjust_trans_huge(vma, vmg->start, vmg->end, adj_start); + vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); if (adj_start) { adjust->vm_start += adj_start; adjust->vm_pgoff += PHYS_PFN(adj_start); - - if (!merge_target) - vma_iter_store(vmg->vmi, adjust); } - vma_complete(&vp, vmg->vmi, vmg->target->vm_mm); + vma_iter_store(vmg->vmi, vmg->target); + + vma_complete(&vp, vmg->vmi, vma->vm_mm); - return merge_target ? vmg->target : vmg->next; + return vmg->target; } /* We can only remove VMAs when merging if they do not have a close hook. */ @@ -833,11 +836,15 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( /* No matter what happens, we will be adjusting middle. */ vma_start_write(middle); - if (merge_left) - vma_start_write(prev); - - if (merge_right) + if (merge_right) { vma_start_write(next); + vmg->target = next; + } + + if (merge_left) { + vma_start_write(prev); + vmg->target = prev; + } if (merge_both) { /* @@ -847,7 +854,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( * extend delete delete */ - vmg->target = prev; vmg->start = prev->vm_start; vmg->end = next->vm_end; vmg->pgoff = prev->vm_pgoff; @@ -868,7 +874,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( * extend shrink/delete */ - vmg->target = prev; vmg->start = prev->vm_start; vmg->pgoff = prev->vm_pgoff; @@ -892,7 +897,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg); if (vmg->__remove_middle) { - vmg->target = next; vmg->end = next->vm_end; vmg->pgoff = next->vm_pgoff - pglen; } else { @@ -903,7 +907,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( * merged VMA is NOT vmg->target, but rather vmg->next. */ vmg->__adjust_next_start = true; - vmg->target = middle; vmg->start = middle->vm_start; vmg->end = start; vmg->pgoff = middle->vm_pgoff; diff --git a/mm/vma.h b/mm/vma.h index e18487797fa4..e55e68abfbe3 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -82,11 +82,7 @@ struct vma_merge_struct { struct vm_area_struct *prev; struct vm_area_struct *middle; struct vm_area_struct *next; - /* - * This is the VMA we ultimately target to become the merged VMA, except - * for the one exception of merge right, shrink next (for details of - * this scenario see vma_merge_existing_range()). - */ + /* This is the VMA we ultimately target to become the merged VMA. */ struct vm_area_struct *target; /* * Initially, the start, end, pgoff fields are provided by the caller -- 2.50.1 From c372473a545edff2fdbc002fc67c181e17c7557b Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 31 Jan 2025 12:31:53 +0000 Subject: [PATCH 13/16] mm: completely abstract unnecessary adj_start calculation The adj_start calculation has been a constant source of confusion in the VMA merge code. There are two cases to consider, one where we adjust the start of the vmg->middle VMA (i.e. the vmg->__adjust_middle_start merge flag is set), in which case adj_start is calculated as: (1) adj_start = vmg->end - vmg->middle->vm_start And the case where we adjust the start of the vmg->next VMA (i.e. the vmg->__adjust_next_start merge flag is set), in which case adj_start is calculated as: (2) adj_start = -(vmg->middle->vm_end - vmg->end) We apply (1) thusly: vmg->middle->vm_start = vmg->middle->vm_start + vmg->end - vmg->middle->vm_start Which simplifies to: vmg->middle->vm_start = vmg->end Similarly, we apply (2) as: vmg->next->vm_start = vmg->next->vm_start + -(vmg->middle->vm_end - vmg->end) Noting that for these VMAs to be mergeable vmg->middle->vm_end == vmg->next->vm_start and so this simplifies to: vmg->next->vm_start = vmg->next->vm_start + -(vmg->next->vm_start - vmg->end) Which simplifies to: vmg->next->vm_start = vmg->end Therefore in each case, we simply need to adjust the start of the VMA to vmg->end (!) and can do away with this adj_start calculation. The only caveat is that we must ensure we update the vm_pgoff field correctly. We therefore abstract this entire calculation to a new function vmg_adjust_set_range() which performs this calculation and sets the adjusted VMA's new range using the general vma_set_range() function. We also must update vma_adjust_trans_huge() which expects the now-abstracted adj_start parameter. It turns out this is wholly unnecessary. In vma_adjust_trans_huge() the relevant code is: if (adjust_next > 0) { struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); unsigned long nstart = next->vm_start; nstart += adjust_next; split_huge_pmd_if_needed(next, nstart); } The only case where this is relevant is when vmg->__adjust_middle_start is specified (in which case adj_next would have been positive), i.e. the one in which the vma specified is vmg->prev and this the sought 'next' VMA would be vmg->middle. We can therefore eliminate the find_vma() invocation altogether and simply provide the vmg->middle VMA in this instance, or NULL otherwise. Again we have an adj_next offset calculation: next->vm_start + vmg->end - vmg->middle->vm_start Where next == vmg->middle this simplifies to vmg->end as previously demonstrated. Therefore nstart is equal to vmg->end, which is already passed to vma_adjust_trans_huge() via the 'end' parameter and so this code (rather delightfully) simplifies to: if (next) split_huge_pmd_if_needed(next, end); With these changes in place, it becomes silly for commit_merge() to return vmg->target, as it is always the same and threaded through vmg, so we finally change commit_merge() to return an error value once again. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/7bce2cd4b5afb56211822835d145471280c3dccc.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Jann Horn Cc: Liam Howlett Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 4 +- mm/huge_memory.c | 19 ++---- mm/vma.c | 99 +++++++++++++++----------------- tools/testing/vma/vma_internal.h | 4 +- 4 files changed, 57 insertions(+), 69 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 93e509b6c00e..e1bea54820ff 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -404,7 +404,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end); void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, - unsigned long end, long adjust_next); + unsigned long end, struct vm_area_struct *next); spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); @@ -571,7 +571,7 @@ static inline int madvise_collapse(struct vm_area_struct *vma, static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, - long adjust_next) + struct vm_area_struct *next) { } static inline int is_swap_pmd(pmd_t pmd) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e33da765c428..e7ac4f0dc21d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3017,9 +3017,9 @@ static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned } void vma_adjust_trans_huge(struct vm_area_struct *vma, - unsigned long start, - unsigned long end, - long adjust_next) + unsigned long start, + unsigned long end, + struct vm_area_struct *next) { /* Check if we need to split start first. */ split_huge_pmd_if_needed(vma, start); @@ -3027,16 +3027,9 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, /* Check if we need to split end next. */ split_huge_pmd_if_needed(vma, end); - /* - * If we're also updating the next vma vm_start, - * check if we need to split it. - */ - if (adjust_next > 0) { - struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); - unsigned long nstart = next->vm_start; - nstart += adjust_next; - split_huge_pmd_if_needed(next, nstart); - } + /* If we're incrementing next->vm_start, we might need to split it. */ + if (next) + split_huge_pmd_if_needed(next, end); } static void unmap_folio(struct folio *folio) diff --git a/mm/vma.c b/mm/vma.c index b10625e8fc99..603e538a093f 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -513,7 +513,7 @@ __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, init_vma_prep(&vp, vma); vp.insert = new; vma_prepare(&vp); - vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); + vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL); if (new_below) { vma->vm_start = addr; @@ -644,46 +644,44 @@ void validate_mm(struct mm_struct *mm) #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ /* - * Actually perform the VMA merge operation. - * - * On success, returns the merged VMA. Otherwise returns NULL. + * Based on the vmg flag indicating whether we need to adjust the vm_start field + * for the middle or next VMA, we calculate what the range of the newly adjusted + * VMA ought to be, and set the VMA's range accordingly. */ -static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg) +static void vmg_adjust_set_range(struct vma_merge_struct *vmg) { - struct vm_area_struct *vma; - struct vma_prepare vp; struct vm_area_struct *adjust; - long adj_start; + pgoff_t pgoff; - /* - * If modifying an existing VMA and we don't remove vmg->middle, then we - * shrink the adjacent VMA. - */ if (vmg->__adjust_middle_start) { - vma = vmg->target; adjust = vmg->middle; - /* The POSITIVE value by which we offset vmg->middle->vm_start. */ - adj_start = vmg->end - vmg->middle->vm_start; - - /* Note: vma iterator must be pointing to 'start'. */ - vma_iter_config(vmg->vmi, vmg->start, vmg->end); + pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start); } else if (vmg->__adjust_next_start) { - /* - * In this case alone, the VMA we manipulate is vmg->middle, but - * we ultimately return vmg->next. - */ - vma = vmg->middle; adjust = vmg->next; - /* The NEGATIVE value by which we offset vmg->next->vm_start. */ - adj_start = -(vmg->middle->vm_end - vmg->end); + pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end); + } else { + return; + } + + vma_set_range(adjust, vmg->end, adjust->vm_end, pgoff); +} + +/* + * Actually perform the VMA merge operation. + * + * Returns 0 on success, or an error value on failure. + */ +static int commit_merge(struct vma_merge_struct *vmg) +{ + struct vm_area_struct *vma; + struct vma_prepare vp; - vma_iter_config(vmg->vmi, vmg->next->vm_start + adj_start, - vmg->next->vm_end); + if (vmg->__adjust_next_start) { + /* We manipulate middle and adjust next, which is the target. */ + vma = vmg->middle; + vma_iter_config(vmg->vmi, vmg->end, vmg->next->vm_end); } else { vma = vmg->target; - adjust = NULL; - adj_start = 0; - /* Note: vma iterator must be pointing to 'start'. */ vma_iter_config(vmg->vmi, vmg->start, vmg->end); } @@ -691,22 +689,22 @@ static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg) init_multi_vma_prep(&vp, vma, vmg); if (vma_iter_prealloc(vmg->vmi, vma)) - return NULL; + return -ENOMEM; vma_prepare(&vp); - vma_adjust_trans_huge(vma, vmg->start, vmg->end, adj_start); + /* + * THP pages may need to do additional splits if we increase + * middle->vm_start. + */ + vma_adjust_trans_huge(vma, vmg->start, vmg->end, + vmg->__adjust_middle_start ? vmg->middle : NULL); vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); - - if (adj_start) { - adjust->vm_start += adj_start; - adjust->vm_pgoff += PHYS_PFN(adj_start); - } - + vmg_adjust_set_range(vmg); vma_iter_store(vmg->vmi, vmg->target); vma_complete(&vp, vmg->vmi, vma->vm_mm); - return vmg->target; + return 0; } /* We can only remove VMAs when merging if they do not have a close hook. */ @@ -749,7 +747,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( { struct vm_area_struct *middle = vmg->middle; struct vm_area_struct *prev = vmg->prev; - struct vm_area_struct *next, *res; + struct vm_area_struct *next; struct vm_area_struct *anon_dup = NULL; unsigned long start = vmg->start; unsigned long end = vmg->end; @@ -900,12 +898,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( vmg->end = next->vm_end; vmg->pgoff = next->vm_pgoff - pglen; } else { - /* - * We shrink middle and expand next. - * - * IMPORTANT: This is the ONLY case where the final - * merged VMA is NOT vmg->target, but rather vmg->next. - */ + /* We shrink middle and expand next. */ vmg->__adjust_next_start = true; vmg->start = middle->vm_start; vmg->end = start; @@ -918,8 +911,10 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( if (err) goto abort; - res = commit_merge(vmg); - if (!res) { + err = commit_merge(vmg); + if (err) { + VM_WARN_ON(err != -ENOMEM); + if (anon_dup) unlink_anon_vmas(anon_dup); @@ -927,9 +922,9 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( return NULL; } - khugepaged_enter_vma(res, vmg->flags); + khugepaged_enter_vma(vmg->target, vmg->flags); vmg->state = VMA_MERGE_SUCCESS; - return res; + return vmg->target; abort: vma_iter_set(vmg->vmi, start); @@ -1092,7 +1087,7 @@ int vma_expand(struct vma_merge_struct *vmg) if (remove_next) vmg->__remove_next = true; - if (!commit_merge(vmg)) + if (commit_merge(vmg)) goto nomem; return 0; @@ -1132,7 +1127,7 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, init_vma_prep(&vp, vma); vma_prepare(&vp); - vma_adjust_trans_huge(vma, start, end, 0); + vma_adjust_trans_huge(vma, start, end, NULL); vma_iter_clear(vmi); vma_set_range(vma, start, end, pgoff); diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 1eae23039854..bb273927af0f 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -796,12 +796,12 @@ static inline void vma_start_write(struct vm_area_struct *vma) static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, - long adjust_next) + struct vm_area_struct *next) { (void)vma; (void)start; (void)end; - (void)adjust_next; + (void)next; } static inline void vma_iter_free(struct vma_iterator *vmi) -- 2.50.1 From 51ff4d7486f0c0b4110a6da4af805b179dd7b11e Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 1 Feb 2025 15:18:00 -0800 Subject: [PATCH 14/16] mm: avoid extra mem_alloc_profiling_enabled() checks Refactor code to avoid extra mem_alloc_profiling_enabled() checks inside pgalloc_tag_get() function which is often called after that check was already done. Link: https://lkml.kernel.org/r/20250201231803.2661189-1-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Shakeel Butt Cc: David Wang <00107082@163.com> Cc: Steven Rostedt Cc: Kent Overstreet Cc: Minchan Kim Cc: Pasha Tatashin Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Vlastimil Babka Cc: Yu Zhao Cc: Zhenhua Huang Signed-off-by: Andrew Morton --- include/linux/pgalloc_tag.h | 35 +++++++++++++++++++---------------- lib/alloc_tag.c | 6 +++--- mm/page_alloc.c | 3 +-- 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 3469c4b20105..4a82b6b4820e 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -205,28 +205,32 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) } } -static inline struct alloc_tag *pgalloc_tag_get(struct page *page) +/* Should be called only if mem_alloc_profiling_enabled() */ +static inline struct alloc_tag *__pgalloc_tag_get(struct page *page) { struct alloc_tag *tag = NULL; - - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - alloc_tag_sub_check(&ref); - if (ref.ct) - tag = ct_to_alloc_tag(ref.ct); - put_page_tag_ref(handle); - } + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_sub_check(&ref); + if (ref.ct) + tag = ct_to_alloc_tag(ref.ct); + put_page_tag_ref(handle); } return tag; } -static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) +static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) { - if (mem_alloc_profiling_enabled() && tag) + struct alloc_tag *tag; + + if (!mem_alloc_profiling_enabled()) + return; + + tag = __pgalloc_tag_get(page); + if (tag) this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); } @@ -241,8 +245,7 @@ static inline void clear_page_tag_ref(struct page *page) {} static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} -static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } -static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} +static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} static inline void alloc_tag_sec_init(void) {} static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {} diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 19b45617bdcf..1d893e313614 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -174,7 +174,7 @@ void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) if (!mem_alloc_profiling_enabled()) return; - tag = pgalloc_tag_get(&folio->page); + tag = __pgalloc_tag_get(&folio->page); if (!tag) return; @@ -200,10 +200,10 @@ void pgalloc_tag_swap(struct folio *new, struct folio *old) if (!mem_alloc_profiling_enabled()) return; - tag_old = pgalloc_tag_get(&old->page); + tag_old = __pgalloc_tag_get(&old->page); if (!tag_old) return; - tag_new = pgalloc_tag_get(&new->page); + tag_new = __pgalloc_tag_get(&new->page); if (!tag_new) return; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 542d25f77be8..38c2b2d20b1d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4833,12 +4833,11 @@ void __free_pages(struct page *page, unsigned int order) { /* get PageHead before we drop reference */ int head = PageHead(page); - struct alloc_tag *tag = pgalloc_tag_get(page); if (put_page_testzero(page)) free_frozen_pages(page, order); else if (!head) { - pgalloc_tag_sub_pages(tag, (1 << order) - 1); + pgalloc_tag_sub_pages(page, (1 << order) - 1); while (order-- > 0) free_frozen_pages(page + (1 << order), order); } -- 2.50.1 From a642b27b991fd663de1676bf91583d1e2397d93d Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 1 Feb 2025 15:18:01 -0800 Subject: [PATCH 15/16] alloc_tag: uninline code gated by mem_alloc_profiling_key in slab allocator When a sizable code section is protected by a disabled static key, that code gets into the instruction cache even though it's not executed and consumes the cache, increasing cache misses. This can be remedied by moving such code into a separate uninlined function. On a Pixel6 phone, slab allocation profiling overhead measured with CONFIG_MEM_ALLOC_PROFILING=y and profiling disabled is: baseline modified Big core 3.31% 0.17% Medium core 3.79% 0.57% Little core 6.68% 1.28% This improvement comes at the expense of the configuration when profiling gets enabled, since there is now an additional function call. The overhead from this additional call on Pixel6 is: Big core 0.66% Middle core 1.23% Little core 2.42% However this is negligible when compared with the overall overhead of the memory allocation profiling when it is enabled. On x86 this patch does not make noticeable difference because the overhead with mem_alloc_profiling_key disabled is much lower (under 1%) to start with, so any improvement is less visible and hard to distinguish from the noise. The overhead from additional call when profiling is enabled is also within noise levels. Link: https://lkml.kernel.org/r/20250201231803.2661189-2-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: David Wang <00107082@163.com> Cc: Kent Overstreet Cc: Minchan Kim Cc: Pasha Tatashin Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Steven Rostedt Cc: Yu Zhao Cc: Zhenhua Huang Signed-off-by: Andrew Morton --- mm/slub.c | 51 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 1f50129dcfb3..184fd2b14758 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2000,7 +2000,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, return 0; } -static inline void free_slab_obj_exts(struct slab *slab) +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline void free_slab_obj_exts(struct slab *slab) { struct slabobj_ext *obj_exts; @@ -2077,33 +2078,37 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) return slab_obj_exts(slab) + obj_to_index(s, slab, p); } -static inline void -alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline void +__alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) { - if (need_slab_obj_ext()) { - struct slabobj_ext *obj_exts; + struct slabobj_ext *obj_exts; - obj_exts = prepare_slab_obj_exts_hook(s, flags, object); - /* - * Currently obj_exts is used only for allocation profiling. - * If other users appear then mem_alloc_profiling_enabled() - * check should be added before alloc_tag_add(). - */ - if (likely(obj_exts)) - alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); - } + obj_exts = prepare_slab_obj_exts_hook(s, flags, object); + /* + * Currently obj_exts is used only for allocation profiling. + * If other users appear then mem_alloc_profiling_enabled() + * check should be added before alloc_tag_add(). + */ + if (likely(obj_exts)) + alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); } static inline void -alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, - int objects) +alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) +{ + if (need_slab_obj_ext()) + __alloc_tagging_slab_alloc_hook(s, object, flags); +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline void +__alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, + int objects) { struct slabobj_ext *obj_exts; int i; - if (!mem_alloc_profiling_enabled()) - return; - /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) return; @@ -2119,6 +2124,14 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, } } +static inline void +alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, + int objects) +{ + if (mem_alloc_profiling_enabled()) + __alloc_tagging_slab_free_hook(s, slab, p, objects); +} + #else /* CONFIG_MEM_ALLOC_PROFILING */ static inline void -- 2.50.1 From 93d5440ece3c0aa341fb02e3a44a1b7ab44304c8 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 1 Feb 2025 15:18:02 -0800 Subject: [PATCH 16/16] alloc_tag: uninline code gated by mem_alloc_profiling_key in page allocator When a sizable code section is protected by a disabled static key, that code gets into the instruction cache even though it's not executed and consumes the cache, increasing cache misses. This can be remedied by moving such code into a separate uninlined function. On a Pixel6 phone, page allocation profiling overhead measured with CONFIG_MEM_ALLOC_PROFILING=y and profiling disabled is: baseline modified Big core 4.93% 1.53% Medium core 4.39% 1.41% Little core 1.02% 0.36% This improvement comes at the expense of the configuration when profiling gets enabled, since there is now an additional function call. The overhead from this additional call on Pixel6 is: Big core 0.24% Middle core 0.63% Little core 1.1% However this is negligible when compared with the overall overhead of the memory allocation profiling when it is enabled. On x86 this patch does not make noticeable difference because the overhead with mem_alloc_profiling_key disabled is much lower (under 1%) to start with, so any improvement is less visible and hard to distinguish from the noise. The overhead from additional call when profiling is enabled is also within noise levels. Link: https://lkml.kernel.org/r/20250201231803.2661189-3-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Shakeel Butt Cc: David Wang <00107082@163.com> Cc: Kent Overstreet Cc: Minchan Kim Cc: Pasha Tatashin Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Steven Rostedt Cc: Vlastimil Babka Cc: Yu Zhao Cc: Zhenhua Huang Signed-off-by: Andrew Morton --- include/linux/pgalloc_tag.h | 60 +++------------------------- mm/page_alloc.c | 78 +++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 55 deletions(-) diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 4a82b6b4820e..c74077977830 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -162,47 +162,13 @@ static inline void update_page_tag_ref(union pgtag_ref_handle handle, union code } } -static inline void clear_page_tag_ref(struct page *page) -{ - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - set_codetag_empty(&ref); - update_page_tag_ref(handle, &ref); - put_page_tag_ref(handle); - } - } -} - -static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, - unsigned int nr) -{ - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); - update_page_tag_ref(handle, &ref); - put_page_tag_ref(handle); - } - } -} +/* Should be called only if mem_alloc_profiling_enabled() */ +void __clear_page_tag_ref(struct page *page); -static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) +static inline void clear_page_tag_ref(struct page *page) { - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - alloc_tag_sub(&ref, PAGE_SIZE * nr); - update_page_tag_ref(handle, &ref); - put_page_tag_ref(handle); - } - } + if (mem_alloc_profiling_enabled()) + __clear_page_tag_ref(page); } /* Should be called only if mem_alloc_profiling_enabled() */ @@ -222,18 +188,6 @@ static inline struct alloc_tag *__pgalloc_tag_get(struct page *page) return tag; } -static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) -{ - struct alloc_tag *tag; - - if (!mem_alloc_profiling_enabled()) - return; - - tag = __pgalloc_tag_get(page); - if (tag) - this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); -} - void pgalloc_tag_split(struct folio *folio, int old_order, int new_order); void pgalloc_tag_swap(struct folio *new, struct folio *old); @@ -242,10 +196,6 @@ void __init alloc_tag_sec_init(void); #else /* CONFIG_MEM_ALLOC_PROFILING */ static inline void clear_page_tag_ref(struct page *page) {} -static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, - unsigned int nr) {} -static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} -static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} static inline void alloc_tag_sec_init(void) {} static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {} diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 38c2b2d20b1d..d875f055aa53 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1041,6 +1041,84 @@ static void kernel_init_pages(struct page *page, int numpages) kasan_enable_current(); } +#ifdef CONFIG_MEM_ALLOC_PROFILING + +/* Should be called only if mem_alloc_profiling_enabled() */ +void __clear_page_tag_ref(struct page *page) +{ + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + set_codetag_empty(&ref); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); + } +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline +void __pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) +{ + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); + } +} + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) + __pgalloc_tag_add(page, task, nr); +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline +void __pgalloc_tag_sub(struct page *page, unsigned int nr) +{ + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_sub(&ref, PAGE_SIZE * nr); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); + } +} + +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) + __pgalloc_tag_sub(page, nr); +} + +static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) +{ + struct alloc_tag *tag; + + if (!mem_alloc_profiling_enabled()) + return; + + tag = __pgalloc_tag_get(page); + if (tag) + this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); +} + +#else /* CONFIG_MEM_ALLOC_PROFILING */ + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) {} +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} +static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} + +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + __always_inline bool free_pages_prepare(struct page *page, unsigned int order) { -- 2.50.1