From 440770944c0f8988fd09fe0698ab14fd023c35db Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 12 Mar 2019 14:52:01 -0400 Subject: [PATCH] mm: Microoptimise truncate xarray calls By passing the xa_state to __clear_shadow_entry(), we can set up the xa_state once per batch instead of once per shadow entry. This only has a tiny effect on Jan's benchmark, but it has no downside. Signed-off-by: Matthew Wilcox --- mm/truncate.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/mm/truncate.c b/mm/truncate.c index 8563339041f6..22705dead7e1 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -32,23 +32,23 @@ * lock. */ static inline void __clear_shadow_entry(struct address_space *mapping, - pgoff_t index, void *entry) + struct xa_state *xas, void *entry) { - XA_STATE(xas, &mapping->i_pages, index); - - xas_set_update(&xas, workingset_update_node); - if (xas_load(&xas) != entry) + if (xas_load(xas) != entry) return; - xas_store(&xas, NULL); + xas_store(xas, NULL); mapping->nrexceptional--; } static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, void *entry) { - xa_lock_irq(&mapping->i_pages); - __clear_shadow_entry(mapping, index, entry); - xa_unlock_irq(&mapping->i_pages); + XA_STATE(xas, &mapping->i_pages, index); + xas_set_update(&xas, workingset_update_node); + + xas_lock_irq(&xas); + __clear_shadow_entry(mapping, &xas, entry); + xas_unlock_irq(&xas); } /* @@ -60,9 +60,12 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, struct pagevec *pvec, pgoff_t *indices, pgoff_t end) { + XA_STATE(xas, &mapping->i_pages, 0); int i, j; bool dax, lock; + xas_set_update(&xas, workingset_update_node); + /* Handled by shmem itself */ if (shmem_mapping(mapping)) return; @@ -96,7 +99,8 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, continue; } - __clear_shadow_entry(mapping, index, page); + xas_set(&xas, index); + __clear_shadow_entry(mapping, &xas, page); } if (lock) -- 2.49.0