struct dmz_reclaim *reclaim;
/* For chunk work */
- struct radix_tree_root chunk_rxtree;
+ struct xarray chunk_rx;
struct workqueue_struct *chunk_wq;
- struct mutex chunk_lock;
/* For cloned BIOs to zones */
struct bio_set bio_set;
/*
* Decrement a chunk work reference count and
- * free it if it becomes 0.
+ * free it if it becomes 0. Called with xa_lock held.
*/
static void dmz_put_chunk_work(struct dm_chunk_work *cw)
{
if (refcount_dec_and_test(&cw->refcount)) {
WARN_ON(!bio_list_empty(&cw->bio_list));
- radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
+ __xa_erase(&cw->target->chunk_rx, cw->chunk);
kfree(cw);
}
}
struct dmz_target *dmz = cw->target;
struct bio *bio;
- mutex_lock(&dmz->chunk_lock);
+ xa_lock(&dmz->chunk_rx);
/* Process the chunk BIOs */
while ((bio = bio_list_pop(&cw->bio_list))) {
- mutex_unlock(&dmz->chunk_lock);
+ xa_unlock(&dmz->chunk_rx);
dmz_handle_bio(dmz, cw, bio);
- mutex_lock(&dmz->chunk_lock);
+ xa_lock(&dmz->chunk_rx);
dmz_put_chunk_work(cw);
}
/* Queueing the work incremented the work refcount */
dmz_put_chunk_work(cw);
- mutex_unlock(&dmz->chunk_lock);
+ xa_unlock(&dmz->chunk_rx);
}
/*
static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{
unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
- struct dm_chunk_work *cw;
-
- mutex_lock(&dmz->chunk_lock);
+ struct dm_chunk_work *cw, *cw2;
/* Get the BIO chunk work. If one is not active yet, create one */
- cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
+ cw = xa_load(&dmz->chunk_rx, chunk);
if (!cw) {
- int ret;
-
/* Create a new chunk work */
cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
if (!cw)
- goto out;
+ return;
INIT_WORK(&cw->work, dmz_chunk_work);
refcount_set(&cw->refcount, 0);
cw->chunk = chunk;
bio_list_init(&cw->bio_list);
- ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
- if (unlikely(ret)) {
+ /* If somebody else created the same chunk, use theirs */
+ cw2 = xa_cmpxchg(&dmz->chunk_rx, chunk, NULL, cw, GFP_NOIO);
+ if (unlikely(cw2)) {
kfree(cw);
- cw = NULL;
- goto out;
+ if (xa_is_err(cw2))
+ return;
+ cw = cw2;
}
}
if (queue_work(dmz->chunk_wq, &cw->work))
dmz_get_chunk_work(cw);
-out:
- mutex_unlock(&dmz->chunk_lock);
}
/*
}
/* Chunk BIO work */
- mutex_init(&dmz->chunk_lock);
- INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
+ xa_init(&dmz->chunk_rx);
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
0, dev->name);
if (!dmz->chunk_wq) {
err_cwq:
destroy_workqueue(dmz->chunk_wq);
err_bio:
- mutex_destroy(&dmz->chunk_lock);
bioset_exit(&dmz->bio_set);
err_meta:
dmz_dtr_metadata(dmz->metadata);
dmz_put_zoned_device(ti);
- mutex_destroy(&dmz->chunk_lock);
-
kfree(dmz);
}