From 354a595a4a4d9dfc0d3e5703c6c5520e6c2f52d8 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Fri, 6 Sep 2024 16:05:12 -0700 Subject: [PATCH] mm: replace xa_get_order with xas_get_order where appropriate The tracing of invalidation and truncation operations on large files showed that xa_get_order() is among the top functions where kernel spends a lot of CPUs. xa_get_order() needs to traverse the tree to reach the right node for a given index and then extract the order of the entry. However it seems like at many places it is being called within an already happening tree traversal where there is no need to do another traversal. Just use xas_get_order() at those places. Link: https://lkml.kernel.org/r/20240906230512.124643-1-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Reviewed-by: Liam R. Howlett Cc: Baolin Wang Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Nhat Pham Signed-off-by: Andrew Morton --- mm/filemap.c | 6 +++--- mm/shmem.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 7eb4637ea199..6f14d80feb37 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2112,7 +2112,7 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), folio); } else { - nr = 1 << xa_get_order(&mapping->i_pages, xas.xa_index); + nr = 1 << xas_get_order(&xas); base = xas.xa_index & ~(nr - 1); /* Omit order>0 value which begins before the start */ if (base < *start) @@ -3001,7 +3001,7 @@ unlock: static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) { if (xa_is_value(folio)) - return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index); + return PAGE_SIZE << xas_get_order(xas); return folio_size(folio); } @@ -4297,7 +4297,7 @@ static void filemap_cachestat(struct address_space *mapping, if (xas_retry(&xas, folio)) continue; - order = xa_get_order(xas.xa, xas.xa_index); + order = xas_get_order(&xas); nr_pages = 1 << order; folio_first_index = round_down(xas.xa_index, 1 << order); folio_last_index = folio_first_index + nr_pages - 1; diff --git a/mm/shmem.c b/mm/shmem.c index 74f093d88c78..361affdf3990 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -890,7 +890,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, if (xas_retry(&xas, page)) continue; if (xa_is_value(page)) - swapped += 1 << xa_get_order(xas.xa, xas.xa_index); + swapped += 1 << xas_get_order(&xas); if (xas.xa_index == max) break; if (need_resched()) { -- 2.49.0