*/
bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
{
- void *val;
+ void *val = xa_load(&cookie->stores, page->index);
- rcu_read_lock();
- val = radix_tree_lookup(&cookie->stores, page->index);
- rcu_read_unlock();
trace_fscache_check_page(cookie, page, val, 0);
return val != NULL;
struct page *page,
gfp_t gfp)
{
+ XA_STATE(xas, &cookie->stores, page->index);
struct page *xpage;
- void *val;
_enter("%p,%p,%x", cookie, page, gfp);
try_again:
rcu_read_lock();
- val = radix_tree_lookup(&cookie->stores, page->index);
- if (!val) {
+ xpage = xas_load(&xas);
+ if (!xpage) {
rcu_read_unlock();
fscache_stat(&fscache_n_store_vmscan_not_storing);
__fscache_uncache_page(cookie, page);
/* see if the page is actually undergoing storage - if so we can't get
* rid of it till the cache has finished with it */
- if (radix_tree_tag_get(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG)) {
+ if (xas_get_mark(&xas, FSCACHE_COOKIE_STORING)) {
rcu_read_unlock();
+ xas_reset(&xas);
goto page_busy;
}
/* the page is pending storage, so we attempt to cancel the store and
* discard the store request so that the page can be reclaimed */
- spin_lock(&cookie->stores_lock);
+ xas_reset(&xas);
+ xas_lock(&xas);
rcu_read_unlock();
- if (radix_tree_tag_get(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG)) {
+ xpage = xas_load(&xas);
+ if (xas_get_mark(&xas, FSCACHE_COOKIE_STORING)) {
/* the page started to undergo storage whilst we were looking,
* so now we can only wait or return */
- spin_unlock(&cookie->stores_lock);
+ xas_unlock(&xas);
goto page_busy;
}
- xpage = radix_tree_delete(&cookie->stores, page->index);
+ xas_store(&xas, NULL);
trace_fscache_page(cookie, page, fscache_page_radix_delete);
- spin_unlock(&cookie->stores_lock);
+ xas_unlock(&xas);
if (xpage) {
fscache_stat(&fscache_n_store_vmscan_cancelled);
- fscache_stat(&fscache_n_store_radix_deletes);
+ fscache_stat(&fscache_n_store_xarray_deletes);
ASSERTCMP(xpage, ==, page);
} else {
fscache_stat(&fscache_n_store_vmscan_gone);
struct page *page)
{
struct fscache_cookie *cookie;
- struct page *xpage = NULL, *val;
+ struct page *xpage = NULL;
spin_lock(&object->lock);
cookie = object->cookie;
if (cookie) {
- /* delete the page from the tree if it is now no longer
+ XA_STATE(xas, &cookie->stores, page->index);
+ /* delete the page from the store if it is now no longer
* pending */
- spin_lock(&cookie->stores_lock);
- radix_tree_tag_clear(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG);
+ xas_lock(&xas);
+ xpage = xas_load(&xas);
+ xas_clear_mark(&xas, FSCACHE_COOKIE_STORING);
trace_fscache_page(cookie, page, fscache_page_radix_clear_store);
- if (!radix_tree_tag_get(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG)) {
- fscache_stat(&fscache_n_store_radix_deletes);
- xpage = radix_tree_delete(&cookie->stores, page->index);
+ if (xas_get_mark(&xas, FSCACHE_COOKIE_PENDING)) {
+ xpage = NULL;
+ trace_fscache_page(cookie, page, fscache_page_write_end_pend);
+ } else {
+ fscache_stat(&fscache_n_store_xarray_deletes);
+ xas_store(&xas, NULL);
trace_fscache_page(cookie, page, fscache_page_radix_delete);
trace_fscache_page(cookie, page, fscache_page_write_end);
-
- val = radix_tree_lookup(&cookie->stores, page->index);
- trace_fscache_check_page(cookie, page, val, 1);
- } else {
- trace_fscache_page(cookie, page, fscache_page_write_end_pend);
}
- spin_unlock(&cookie->stores_lock);
+ xas_unlock(&xas);
wake_up_bit(&cookie->flags, 0);
trace_fscache_wake_cookie(cookie);
} else {
*/
static void fscache_write_op(struct fscache_operation *_op)
{
+ XA_STATE(xas, NULL, 0);
struct fscache_storage *op =
container_of(_op, struct fscache_storage, op);
struct fscache_object *object = op->op.object;
struct fscache_cookie *cookie;
struct page *page;
- unsigned n;
- void *results[1];
int ret;
_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
return;
}
- spin_lock(&cookie->stores_lock);
+ xas.xa = &cookie->stores;
+ xas_lock(&xas);
fscache_stat(&fscache_n_store_calls);
/* find a page to store */
- results[0] = NULL;
- page = NULL;
- n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
- FSCACHE_COOKIE_PENDING_TAG);
- trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit);
- if (n != 1)
+ page = xas_find_marked(&xas, ULONG_MAX, FSCACHE_COOKIE_PENDING);
+ if (!page)
goto superseded;
- page = results[0];
- _debug("gang %d [%lx]", n, page->index);
+ _debug("found %lx", page->index);
+ if (page->index >= op->store_limit) {
+ fscache_stat(&fscache_n_store_pages_over_limit);
+ goto superseded;
+ }
- radix_tree_tag_set(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG);
- radix_tree_tag_clear(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG);
+ xas_set_mark(&xas, FSCACHE_COOKIE_STORING);
+ xas_clear_mark(&xas, FSCACHE_COOKIE_PENDING);
+ xas_unlock(&xas);
trace_fscache_page(cookie, page, fscache_page_radix_pend2store);
- spin_unlock(&cookie->stores_lock);
spin_unlock(&object->lock);
if (page->index >= op->store_limit)
/* this writer is going away and there aren't any more things to
* write */
_debug("cease");
- spin_unlock(&cookie->stores_lock);
+ xas_unlock(&xas);
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
spin_unlock(&object->lock);
fscache_op_complete(&op->op, false);
*/
void fscache_invalidate_writes(struct fscache_cookie *cookie)
{
+ XA_STATE(xas, &cookie->stores, 0);
+ unsigned int cleared = 0;
struct page *page;
- void *results[16];
- int n, i;
_enter("");
- for (;;) {
- spin_lock(&cookie->stores_lock);
- n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
- ARRAY_SIZE(results),
- FSCACHE_COOKIE_PENDING_TAG);
- if (n == 0) {
- spin_unlock(&cookie->stores_lock);
- break;
- }
-
- for (i = n - 1; i >= 0; i--) {
- page = results[i];
- radix_tree_delete(&cookie->stores, page->index);
- trace_fscache_page(cookie, page, fscache_page_radix_delete);
- trace_fscache_page(cookie, page, fscache_page_inval);
- }
-
- spin_unlock(&cookie->stores_lock);
-
- for (i = n - 1; i >= 0; i--)
- put_page(results[i]);
+ xas_lock(&xas);
+ xas_for_each_marked(&xas, page, ULONG_MAX, FSCACHE_COOKIE_PENDING) {
+ xas_store(&xas, NULL);
+ trace_fscache_page(cookie, page, fscache_page_radix_delete);
+ trace_fscache_page(cookie, page, fscache_page_inval);
+ put_page(page);
+ if (++cleared % XA_CHECK_SCHED)
+ continue;
+
+ xas_pause(&xas);
+ xas_unlock(&xas);
+ cond_resched();
+ xas_lock(&xas);
}
+ xas_unlock(&xas);
wake_up_bit(&cookie->flags, 0);
trace_fscache_wake_cookie(cookie);
loff_t object_size,
gfp_t gfp)
{
+ XA_STATE(xas, &cookie->stores, page->index);
struct fscache_storage *op;
struct fscache_object *object;
bool wake_cookie = false;
+ struct page *xpage;
int ret;
_enter("%p,%x,", cookie, (u32) page->flags);
(1 << FSCACHE_OP_WAITING) |
(1 << FSCACHE_OP_UNUSE_COOKIE);
- ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
- if (ret < 0)
- goto nomem_free;
-
trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one);
+retry:
ret = -ENOBUFS;
spin_lock(&cookie->lock);
trace_fscache_page(cookie, page, fscache_page_write);
- /* add the page to the pending-storage radix tree on the backing
- * object */
+ /* add the page to the pending-storage xarray on the backing object */
spin_lock(&object->lock);
if (object->store_limit_l != object_size)
fscache_set_store_limit(object, object_size);
- spin_lock(&cookie->stores_lock);
+ xas_lock(&xas);
_debug("store limit %llx", (unsigned long long) object->store_limit);
- ret = radix_tree_insert(&cookie->stores, page->index, page);
- if (ret < 0) {
- if (ret == -EEXIST)
- goto already_queued;
- _debug("insert failed %d", ret);
- goto nobufs_unlock_obj;
- }
+ xpage = xas_load(&xas);
+ if (xpage)
+ goto already_queued;
+ xas_store(&xas, page);
+ if (xas_error(&xas))
+ goto nobufs_unlock_obj;
trace_fscache_page(cookie, page, fscache_page_radix_insert);
- radix_tree_tag_set(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG);
+ xas_set_mark(&xas, FSCACHE_COOKIE_PENDING);
trace_fscache_page(cookie, page, fscache_page_radix_set_pend);
get_page(page);
if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
goto already_pending;
- spin_unlock(&cookie->stores_lock);
+ xas_unlock(&xas);
spin_unlock(&object->lock);
op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
goto submit_failed;
spin_unlock(&cookie->lock);
- radix_tree_preload_end();
fscache_stat(&fscache_n_store_ops);
fscache_stat(&fscache_n_stores_ok);
already_queued:
fscache_stat(&fscache_n_stores_again);
already_pending:
- spin_unlock(&cookie->stores_lock);
+ xas_unlock(&xas);
spin_unlock(&object->lock);
spin_unlock(&cookie->lock);
- radix_tree_preload_end();
fscache_put_operation(&op->op);
fscache_stat(&fscache_n_stores_ok);
_leave(" = 0");
return 0;
submit_failed:
- spin_lock(&cookie->stores_lock);
- radix_tree_delete(&cookie->stores, page->index);
+ xa_erase(&cookie->stores, page->index);
trace_fscache_page(cookie, page, fscache_page_radix_delete);
- spin_unlock(&cookie->stores_lock);
wake_cookie = __fscache_unuse_cookie(cookie);
put_page(page);
ret = -ENOBUFS;
goto nobufs;
nobufs_unlock_obj:
- spin_unlock(&cookie->stores_lock);
+ xas_unlock(&xas);
spin_unlock(&object->lock);
+ spin_unlock(&cookie->lock);
+ if (xas_nomem(&xas, gfp))
+ goto retry;
+ goto nobufs2;
nobufs:
spin_unlock(&cookie->lock);
- radix_tree_preload_end();
+nobufs2:
fscache_put_operation(&op->op);
if (wake_cookie)
__fscache_wake_unused_cookie(cookie);
_leave(" = -ENOBUFS");
return -ENOBUFS;
-nomem_free:
- fscache_put_operation(&op->op);
nomem:
fscache_stat(&fscache_n_stores_oom);
_leave(" = -ENOMEM");