]> www.infradead.org Git - users/willy/xarray.git/commitdiff
mm: Microoptimise truncate xarray calls xarray-conv
authorMatthew Wilcox <willy@infradead.org>
Tue, 12 Mar 2019 18:52:01 +0000 (14:52 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 9 Aug 2019 01:38:19 +0000 (21:38 -0400)
By passing the xa_state to __clear_shadow_entry(), we can set up the
xa_state once per batch instead of once per shadow entry.  This only
has a tiny effect on Jan's benchmark, but it has no downside.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
mm/truncate.c

index 8563339041f68c4a602ab364f913636071ccda70..22705dead7e1d03cecb8b4147c406009e15b255c 100644 (file)
  * lock.
  */
 static inline void __clear_shadow_entry(struct address_space *mapping,
-                               pgoff_t index, void *entry)
+               struct xa_state *xas, void *entry)
 {
-       XA_STATE(xas, &mapping->i_pages, index);
-
-       xas_set_update(&xas, workingset_update_node);
-       if (xas_load(&xas) != entry)
+       if (xas_load(xas) != entry)
                return;
-       xas_store(&xas, NULL);
+       xas_store(xas, NULL);
        mapping->nrexceptional--;
 }
 
 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
                               void *entry)
 {
-       xa_lock_irq(&mapping->i_pages);
-       __clear_shadow_entry(mapping, index, entry);
-       xa_unlock_irq(&mapping->i_pages);
+       XA_STATE(xas, &mapping->i_pages, index);
+       xas_set_update(&xas, workingset_update_node);
+
+       xas_lock_irq(&xas);
+       __clear_shadow_entry(mapping, &xas, entry);
+       xas_unlock_irq(&xas);
 }
 
 /*
@@ -60,9 +60,12 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
                                struct pagevec *pvec, pgoff_t *indices,
                                pgoff_t end)
 {
+       XA_STATE(xas, &mapping->i_pages, 0);
        int i, j;
        bool dax, lock;
 
+       xas_set_update(&xas, workingset_update_node);
+
        /* Handled by shmem itself */
        if (shmem_mapping(mapping))
                return;
@@ -96,7 +99,8 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
                        continue;
                }
 
-               __clear_shadow_entry(mapping, index, page);
+               xas_set(&xas, index);
+               __clear_shadow_entry(mapping, &xas, page);
        }
 
        if (lock)