]> www.infradead.org Git - users/willy/xarray.git/commitdiff
fscache: Convert to XArray
authorMatthew Wilcox <willy@infradead.org>
Wed, 26 Sep 2018 14:21:30 +0000 (10:21 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 8 Aug 2019 14:29:40 +0000 (10:29 -0400)
Use the advanced API for this conversion for efficiency.
Removes another user of radix_tree_preload().

Signed-off-by: Matthew Wilcox <willy@infradead.org>
fs/fscache/cookie.c
fs/fscache/internal.h
fs/fscache/object.c
fs/fscache/page.c
fs/fscache/stats.c
include/linux/fscache.h
include/trace/events/fscache.h

index 0ce39658a6200605a7cb00ad60eb86178929946b..55393c1429138ea69ae0220945bb9232b6382198 100644 (file)
@@ -175,12 +175,9 @@ struct fscache_cookie *fscache_alloc_cookie(
        cookie->flags           = (1 << FSCACHE_COOKIE_NO_DATA_YET);
        cookie->type            = def->type;
        spin_lock_init(&cookie->lock);
-       spin_lock_init(&cookie->stores_lock);
        INIT_HLIST_HEAD(&cookie->backing_objects);
 
-       /* radix tree insertion won't use the preallocation pool unless it's
-        * told it may not wait */
-       INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+       xa_init(&cookie->stores);
        return cookie;
 
 nomem:
@@ -818,7 +815,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie,
        /* Clear pointers back to the netfs */
        cookie->netfs_data      = NULL;
        cookie->def             = NULL;
-       BUG_ON(!radix_tree_empty(&cookie->stores));
+       BUG_ON(!xa_empty(&cookie->stores));
 
        if (cookie->parent) {
                ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
index 9616af3768e11bd3dbbe3c4c0304ca7e405fadfe..21fcc629097ccb99a129301cfd6161ca96e5091a 100644 (file)
@@ -205,7 +205,7 @@ extern atomic_t fscache_n_stores_oom;
 extern atomic_t fscache_n_store_ops;
 extern atomic_t fscache_n_store_calls;
 extern atomic_t fscache_n_store_pages;
-extern atomic_t fscache_n_store_radix_deletes;
+extern atomic_t fscache_n_store_xarray_deletes;
 extern atomic_t fscache_n_store_pages_over_limit;
 
 extern atomic_t fscache_n_store_vmscan_not_storing;
index cfeba839a0f2f1620ea4d726b5b847e54be84910..44adc078c97716f6bd9c2e67e34be30cabc1e64b 100644 (file)
@@ -973,7 +973,7 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
         * retire the object instead.
         */
        if (!fscache_use_cookie(object)) {
-               ASSERT(radix_tree_empty(&object->cookie->stores));
+               ASSERT(xa_empty(&object->cookie->stores));
                set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
                _leave(" [no cookie]");
                return transit_to(KILL_OBJECT);
index 26af6fdf15387fa3e39442b7df2c62706a1afff0..65c89cb89716d91206bf4ca0f0bbabe37f36e810 100644 (file)
  */
 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
 {
-       void *val;
+       void *val = xa_load(&cookie->stores, page->index);
 
-       rcu_read_lock();
-       val = radix_tree_lookup(&cookie->stores, page->index);
-       rcu_read_unlock();
        trace_fscache_check_page(cookie, page, val, 0);
 
        return val != NULL;
@@ -63,8 +60,8 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
                                  struct page *page,
                                  gfp_t gfp)
 {
+       XA_STATE(xas, &cookie->stores, page->index);
        struct page *xpage;
-       void *val;
 
        _enter("%p,%p,%x", cookie, page, gfp);
 
@@ -72,8 +69,8 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
 
 try_again:
        rcu_read_lock();
-       val = radix_tree_lookup(&cookie->stores, page->index);
-       if (!val) {
+       xpage = xas_load(&xas);
+       if (!xpage) {
                rcu_read_unlock();
                fscache_stat(&fscache_n_store_vmscan_not_storing);
                __fscache_uncache_page(cookie, page);
@@ -82,32 +79,33 @@ try_again:
 
        /* see if the page is actually undergoing storage - if so we can't get
         * rid of it till the cache has finished with it */
-       if (radix_tree_tag_get(&cookie->stores, page->index,
-                              FSCACHE_COOKIE_STORING_TAG)) {
+       if (xas_get_mark(&xas, FSCACHE_COOKIE_STORING)) {
                rcu_read_unlock();
+               xas_reset(&xas);
                goto page_busy;
        }
 
        /* the page is pending storage, so we attempt to cancel the store and
         * discard the store request so that the page can be reclaimed */
-       spin_lock(&cookie->stores_lock);
+       xas_reset(&xas);
+       xas_lock(&xas);
        rcu_read_unlock();
 
-       if (radix_tree_tag_get(&cookie->stores, page->index,
-                              FSCACHE_COOKIE_STORING_TAG)) {
+       xpage = xas_load(&xas);
+       if (xas_get_mark(&xas, FSCACHE_COOKIE_STORING)) {
                /* the page started to undergo storage whilst we were looking,
                 * so now we can only wait or return */
-               spin_unlock(&cookie->stores_lock);
+               xas_unlock(&xas);
                goto page_busy;
        }
 
-       xpage = radix_tree_delete(&cookie->stores, page->index);
+       xas_store(&xas, NULL);
        trace_fscache_page(cookie, page, fscache_page_radix_delete);
-       spin_unlock(&cookie->stores_lock);
+       xas_unlock(&xas);
 
        if (xpage) {
                fscache_stat(&fscache_n_store_vmscan_cancelled);
-               fscache_stat(&fscache_n_store_radix_deletes);
+               fscache_stat(&fscache_n_store_xarray_deletes);
                ASSERTCMP(xpage, ==, page);
        } else {
                fscache_stat(&fscache_n_store_vmscan_gone);
@@ -147,30 +145,28 @@ static void fscache_end_page_write(struct fscache_object *object,
                                   struct page *page)
 {
        struct fscache_cookie *cookie;
-       struct page *xpage = NULL, *val;
+       struct page *xpage = NULL;
 
        spin_lock(&object->lock);
        cookie = object->cookie;
        if (cookie) {
-               /* delete the page from the tree if it is now no longer
+               XA_STATE(xas, &cookie->stores, page->index);
+               /* delete the page from the store if it is now no longer
                 * pending */
-               spin_lock(&cookie->stores_lock);
-               radix_tree_tag_clear(&cookie->stores, page->index,
-                                    FSCACHE_COOKIE_STORING_TAG);
+               xas_lock(&xas);
+               xpage = xas_load(&xas);
+               xas_clear_mark(&xas, FSCACHE_COOKIE_STORING);
                trace_fscache_page(cookie, page, fscache_page_radix_clear_store);
-               if (!radix_tree_tag_get(&cookie->stores, page->index,
-                                       FSCACHE_COOKIE_PENDING_TAG)) {
-                       fscache_stat(&fscache_n_store_radix_deletes);
-                       xpage = radix_tree_delete(&cookie->stores, page->index);
+               if (xas_get_mark(&xas, FSCACHE_COOKIE_PENDING)) {
+                       xpage = NULL;
+                       trace_fscache_page(cookie, page, fscache_page_write_end_pend);
+               } else {
+                       fscache_stat(&fscache_n_store_xarray_deletes);
+                       xas_store(&xas, NULL);
                        trace_fscache_page(cookie, page, fscache_page_radix_delete);
                        trace_fscache_page(cookie, page, fscache_page_write_end);
-
-                       val = radix_tree_lookup(&cookie->stores, page->index);
-                       trace_fscache_check_page(cookie, page, val, 1);
-               } else {
-                       trace_fscache_page(cookie, page, fscache_page_write_end_pend);
                }
-               spin_unlock(&cookie->stores_lock);
+               xas_unlock(&xas);
                wake_up_bit(&cookie->flags, 0);
                trace_fscache_wake_cookie(cookie);
        } else {
@@ -787,13 +783,12 @@ static void fscache_release_write_op(struct fscache_operation *_op)
  */
 static void fscache_write_op(struct fscache_operation *_op)
 {
+       XA_STATE(xas, NULL, 0);
        struct fscache_storage *op =
                container_of(_op, struct fscache_storage, op);
        struct fscache_object *object = op->op.object;
        struct fscache_cookie *cookie;
        struct page *page;
-       unsigned n;
-       void *results[1];
        int ret;
 
        _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
@@ -828,28 +823,26 @@ again:
                return;
        }
 
-       spin_lock(&cookie->stores_lock);
+       xas.xa = &cookie->stores;
+       xas_lock(&xas);
 
        fscache_stat(&fscache_n_store_calls);
 
        /* find a page to store */
-       results[0] = NULL;
-       page = NULL;
-       n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
-                                      FSCACHE_COOKIE_PENDING_TAG);
-       trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit);
-       if (n != 1)
+       page = xas_find_marked(&xas, ULONG_MAX, FSCACHE_COOKIE_PENDING);
+       if (!page)
                goto superseded;
-       page = results[0];
-       _debug("gang %d [%lx]", n, page->index);
+       _debug("found %lx", page->index);
+       if (page->index >= op->store_limit) {
+               fscache_stat(&fscache_n_store_pages_over_limit);
+               goto superseded;
+       }
 
-       radix_tree_tag_set(&cookie->stores, page->index,
-                          FSCACHE_COOKIE_STORING_TAG);
-       radix_tree_tag_clear(&cookie->stores, page->index,
-                            FSCACHE_COOKIE_PENDING_TAG);
+       xas_set_mark(&xas, FSCACHE_COOKIE_STORING);
+       xas_clear_mark(&xas, FSCACHE_COOKIE_PENDING);
+       xas_unlock(&xas);
        trace_fscache_page(cookie, page, fscache_page_radix_pend2store);
 
-       spin_unlock(&cookie->stores_lock);
        spin_unlock(&object->lock);
 
        if (page->index >= op->store_limit)
@@ -881,7 +874,7 @@ superseded:
        /* this writer is going away and there aren't any more things to
         * write */
        _debug("cease");
-       spin_unlock(&cookie->stores_lock);
+       xas_unlock(&xas);
        clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
        spin_unlock(&object->lock);
        fscache_op_complete(&op->op, false);
@@ -893,34 +886,27 @@ superseded:
  */
 void fscache_invalidate_writes(struct fscache_cookie *cookie)
 {
+       XA_STATE(xas, &cookie->stores, 0);
+       unsigned int cleared = 0;
        struct page *page;
-       void *results[16];
-       int n, i;
 
        _enter("");
 
-       for (;;) {
-               spin_lock(&cookie->stores_lock);
-               n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
-                                              ARRAY_SIZE(results),
-                                              FSCACHE_COOKIE_PENDING_TAG);
-               if (n == 0) {
-                       spin_unlock(&cookie->stores_lock);
-                       break;
-               }
-
-               for (i = n - 1; i >= 0; i--) {
-                       page = results[i];
-                       radix_tree_delete(&cookie->stores, page->index);
-                       trace_fscache_page(cookie, page, fscache_page_radix_delete);
-                       trace_fscache_page(cookie, page, fscache_page_inval);
-               }
-
-               spin_unlock(&cookie->stores_lock);
-
-               for (i = n - 1; i >= 0; i--)
-                       put_page(results[i]);
+       xas_lock(&xas);
+       xas_for_each_marked(&xas, page, ULONG_MAX, FSCACHE_COOKIE_PENDING) {
+               xas_store(&xas, NULL);
+               trace_fscache_page(cookie, page, fscache_page_radix_delete);
+               trace_fscache_page(cookie, page, fscache_page_inval);
+               put_page(page);
+               if (++cleared % XA_CHECK_SCHED)
+                       continue;
+
+               xas_pause(&xas);
+               xas_unlock(&xas);
+               cond_resched();
+               xas_lock(&xas);
        }
+       xas_unlock(&xas);
 
        wake_up_bit(&cookie->flags, 0);
        trace_fscache_wake_cookie(cookie);
@@ -962,9 +948,11 @@ int __fscache_write_page(struct fscache_cookie *cookie,
                         loff_t object_size,
                         gfp_t gfp)
 {
+       XA_STATE(xas, &cookie->stores, page->index);
        struct fscache_storage *op;
        struct fscache_object *object;
        bool wake_cookie = false;
+       struct page *xpage;
        int ret;
 
        _enter("%p,%x,", cookie, (u32) page->flags);
@@ -989,12 +977,9 @@ int __fscache_write_page(struct fscache_cookie *cookie,
                (1 << FSCACHE_OP_WAITING) |
                (1 << FSCACHE_OP_UNUSE_COOKIE);
 
-       ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
-       if (ret < 0)
-               goto nomem_free;
-
        trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one);
 
+retry:
        ret = -ENOBUFS;
        spin_lock(&cookie->lock);
 
@@ -1008,28 +993,25 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 
        trace_fscache_page(cookie, page, fscache_page_write);
 
-       /* add the page to the pending-storage radix tree on the backing
-        * object */
+       /* add the page to the pending-storage xarray on the backing object */
        spin_lock(&object->lock);
 
        if (object->store_limit_l != object_size)
                fscache_set_store_limit(object, object_size);
 
-       spin_lock(&cookie->stores_lock);
+       xas_lock(&xas);
 
        _debug("store limit %llx", (unsigned long long) object->store_limit);
 
-       ret = radix_tree_insert(&cookie->stores, page->index, page);
-       if (ret < 0) {
-               if (ret == -EEXIST)
-                       goto already_queued;
-               _debug("insert failed %d", ret);
-               goto nobufs_unlock_obj;
-       }
+       xpage = xas_load(&xas);
+       if (xpage)
+               goto already_queued;
 
+       xas_store(&xas, page);
+       if (xas_error(&xas))
+               goto nobufs_unlock_obj;
        trace_fscache_page(cookie, page, fscache_page_radix_insert);
-       radix_tree_tag_set(&cookie->stores, page->index,
-                          FSCACHE_COOKIE_PENDING_TAG);
+       xas_set_mark(&xas, FSCACHE_COOKIE_PENDING);
        trace_fscache_page(cookie, page, fscache_page_radix_set_pend);
        get_page(page);
 
@@ -1038,7 +1020,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
                goto already_pending;
 
-       spin_unlock(&cookie->stores_lock);
+       xas_unlock(&xas);
        spin_unlock(&object->lock);
 
        op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
@@ -1049,7 +1031,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
                goto submit_failed;
 
        spin_unlock(&cookie->lock);
-       radix_tree_preload_end();
        fscache_stat(&fscache_n_store_ops);
        fscache_stat(&fscache_n_stores_ok);
 
@@ -1061,31 +1042,32 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 already_queued:
        fscache_stat(&fscache_n_stores_again);
 already_pending:
-       spin_unlock(&cookie->stores_lock);
+       xas_unlock(&xas);
        spin_unlock(&object->lock);
        spin_unlock(&cookie->lock);
-       radix_tree_preload_end();
        fscache_put_operation(&op->op);
        fscache_stat(&fscache_n_stores_ok);
        _leave(" = 0");
        return 0;
 
 submit_failed:
-       spin_lock(&cookie->stores_lock);
-       radix_tree_delete(&cookie->stores, page->index);
+       xa_erase(&cookie->stores, page->index);
        trace_fscache_page(cookie, page, fscache_page_radix_delete);
-       spin_unlock(&cookie->stores_lock);
        wake_cookie = __fscache_unuse_cookie(cookie);
        put_page(page);
        ret = -ENOBUFS;
        goto nobufs;
 
 nobufs_unlock_obj:
-       spin_unlock(&cookie->stores_lock);
+       xas_unlock(&xas);
        spin_unlock(&object->lock);
+       spin_unlock(&cookie->lock);
+       if (xas_nomem(&xas, gfp))
+               goto retry;
+       goto nobufs2;
 nobufs:
        spin_unlock(&cookie->lock);
-       radix_tree_preload_end();
+nobufs2:
        fscache_put_operation(&op->op);
        if (wake_cookie)
                __fscache_wake_unused_cookie(cookie);
@@ -1093,8 +1075,6 @@ nobufs:
        _leave(" = -ENOBUFS");
        return -ENOBUFS;
 
-nomem_free:
-       fscache_put_operation(&op->op);
 nomem:
        fscache_stat(&fscache_n_stores_oom);
        _leave(" = -ENOMEM");
index a5aa93ece8c50e4cebcdc05067d801e2d200c991..54943bc659a5419b557e4a1590da37febeea6652 100644 (file)
@@ -58,7 +58,7 @@ atomic_t fscache_n_stores_oom;
 atomic_t fscache_n_store_ops;
 atomic_t fscache_n_store_calls;
 atomic_t fscache_n_store_pages;
-atomic_t fscache_n_store_radix_deletes;
+atomic_t fscache_n_store_xarray_deletes;
 atomic_t fscache_n_store_pages_over_limit;
 
 atomic_t fscache_n_store_vmscan_not_storing;
@@ -227,11 +227,11 @@ int fscache_stats_show(struct seq_file *m, void *v)
                   atomic_read(&fscache_n_stores_again),
                   atomic_read(&fscache_n_stores_nobufs),
                   atomic_read(&fscache_n_stores_oom));
-       seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
+       seq_printf(m, "Stores : ops=%u run=%u pgs=%u xar=%u olm=%u\n",
                   atomic_read(&fscache_n_store_ops),
                   atomic_read(&fscache_n_store_calls),
                   atomic_read(&fscache_n_store_pages),
-                  atomic_read(&fscache_n_store_radix_deletes),
+                  atomic_read(&fscache_n_store_xarray_deletes),
                   atomic_read(&fscache_n_store_pages_over_limit));
 
        seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
index ad044c0cb1f3bf511db2e3b72518a15f8b3c3247..12887aa7e2c9f9d6433d3e04dec6512b34d35366 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/pagemap.h>
 #include <linux/pagevec.h>
 #include <linux/list_bl.h>
+#include <linux/xarray.h>
 
 #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
 #define fscache_available() (1)
@@ -135,15 +136,14 @@ struct fscache_cookie {
        atomic_t                        n_children;     /* number of children of this cookie */
        atomic_t                        n_active;       /* number of active users of netfs ptrs */
        spinlock_t                      lock;
-       spinlock_t                      stores_lock;    /* lock on page store tree */
        struct hlist_head               backing_objects; /* object(s) backing this file/index */
        const struct fscache_cookie_def *def;           /* definition */
        struct fscache_cookie           *parent;        /* parent of this entry */
        struct hlist_bl_node            hash_link;      /* Link in hash table */
        void                            *netfs_data;    /* back pointer to netfs */
-       struct radix_tree_root          stores;         /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG     0               /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG     1               /* pages tag: writing to cache */
+       struct xarray                   stores;         /* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING         XA_MARK_0       /* pages mark: pending write to cache */
+#define FSCACHE_COOKIE_STORING         XA_MARK_1       /* pages mark: writing to cache */
 
        unsigned long                   flags;
 #define FSCACHE_COOKIE_LOOKING_UP      0       /* T if non-index cookie being looked up still */
index d16fe6ed78a286fc53609b51a9caeda3a9767e70..502139d70485866fc6bbedd173f14102bc3d7dce 100644 (file)
@@ -500,33 +500,6 @@ TRACE_EVENT(fscache_wrote_page,
                      __entry->cookie, __entry->page, __entry->op, __entry->ret)
            );
 
-TRACE_EVENT(fscache_gang_lookup,
-           TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
-                    void **results, int n, pgoff_t store_limit),
-
-           TP_ARGS(cookie, op, results, n, store_limit),
-
-           TP_STRUCT__entry(
-                   __field(struct fscache_cookie *,    cookie          )
-                   __field(struct fscache_operation *, op              )
-                   __field(pgoff_t,                    results0        )
-                   __field(int,                        n               )
-                   __field(pgoff_t,                    store_limit     )
-                            ),
-
-           TP_fast_assign(
-                   __entry->cookie             = cookie;
-                   __entry->op                 = op;
-                   __entry->results0           = results[0] ? ((struct page *)results[0])->index : (pgoff_t)-1;
-                   __entry->n                  = n;
-                   __entry->store_limit        = store_limit;
-                          ),
-
-           TP_printk("c=%p op=%p r0=%lx n=%d sl=%lx",
-                     __entry->cookie, __entry->op, __entry->results0, __entry->n,
-                     __entry->store_limit)
-           );
-
 #endif /* _TRACE_FSCACHE_H */
 
 /* This part must be outside protection */