]> www.infradead.org Git - users/willy/xarray.git/commitdiff
dm-zoned-target: Convert to XArray
authorMatthew Wilcox <willy@infradead.org>
Thu, 25 Oct 2018 16:42:36 +0000 (12:42 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 9 Aug 2019 01:38:12 +0000 (21:38 -0400)
Switch the locking from a mutex to the xa spinlock.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
drivers/md/dm-zoned-target.c
drivers/md/dm-zoned.h

index 51d029bbb740c4f032ea63005611b194fcc5cee8..4da24033e75e1cbbe55cdbdfe1d516a3e0799463 100644 (file)
@@ -51,9 +51,8 @@ struct dmz_target {
        struct dmz_reclaim      *reclaim;
 
        /* For chunk work */
-       struct radix_tree_root  chunk_rxtree;
+       struct xarray           chunk_rx;
        struct workqueue_struct *chunk_wq;
-       struct mutex            chunk_lock;
 
        /* For cloned BIOs to zones */
        struct bio_set          bio_set;
@@ -446,13 +445,13 @@ static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
 
 /*
  * Decrement a chunk work reference count and
- * free it if it becomes 0.
+ * free it if it becomes 0.  Called with xa_lock held.
  */
 static void dmz_put_chunk_work(struct dm_chunk_work *cw)
 {
        if (refcount_dec_and_test(&cw->refcount)) {
                WARN_ON(!bio_list_empty(&cw->bio_list));
-               radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
+               __xa_erase(&cw->target->chunk_rx, cw->chunk);
                kfree(cw);
        }
 }
@@ -466,20 +465,20 @@ static void dmz_chunk_work(struct work_struct *work)
        struct dmz_target *dmz = cw->target;
        struct bio *bio;
 
-       mutex_lock(&dmz->chunk_lock);
+       xa_lock(&dmz->chunk_rx);
 
        /* Process the chunk BIOs */
        while ((bio = bio_list_pop(&cw->bio_list))) {
-               mutex_unlock(&dmz->chunk_lock);
+               xa_unlock(&dmz->chunk_rx);
                dmz_handle_bio(dmz, cw, bio);
-               mutex_lock(&dmz->chunk_lock);
+               xa_lock(&dmz->chunk_rx);
                dmz_put_chunk_work(cw);
        }
 
        /* Queueing the work incremented the work refcount */
        dmz_put_chunk_work(cw);
 
-       mutex_unlock(&dmz->chunk_lock);
+       xa_unlock(&dmz->chunk_rx);
 }
 
 /*
@@ -516,19 +515,15 @@ static void dmz_flush_work(struct work_struct *work)
 static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
 {
        unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
-       struct dm_chunk_work *cw;
-
-       mutex_lock(&dmz->chunk_lock);
+       struct dm_chunk_work *cw, *cw2;
 
        /* Get the BIO chunk work. If one is not active yet, create one */
-       cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
+       cw = xa_load(&dmz->chunk_rx, chunk);
        if (!cw) {
-               int ret;
-
                /* Create a new chunk work */
                cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
                if (!cw)
-                       goto out;
+                       return;
 
                INIT_WORK(&cw->work, dmz_chunk_work);
                refcount_set(&cw->refcount, 0);
@@ -536,11 +531,13 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
                cw->chunk = chunk;
                bio_list_init(&cw->bio_list);
 
-               ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
-               if (unlikely(ret)) {
+               /* If somebody else created the same chunk, use theirs */
+               cw2 = xa_cmpxchg(&dmz->chunk_rx, chunk, NULL, cw, GFP_NOIO);
+               if (unlikely(cw2)) {
                        kfree(cw);
-                       cw = NULL;
-                       goto out;
+                       if (xa_is_err(cw2))
+                               return;
+                       cw = cw2;
                }
        }
 
@@ -549,8 +546,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
 
        if (queue_work(dmz->chunk_wq, &cw->work))
                dmz_get_chunk_work(cw);
-out:
-       mutex_unlock(&dmz->chunk_lock);
 }
 
 /*
@@ -740,8 +735,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        /* Chunk BIO work */
-       mutex_init(&dmz->chunk_lock);
-       INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
+       xa_init(&dmz->chunk_rx);
        dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
                                        0, dev->name);
        if (!dmz->chunk_wq) {
@@ -780,7 +774,6 @@ err_fwq:
 err_cwq:
        destroy_workqueue(dmz->chunk_wq);
 err_bio:
-       mutex_destroy(&dmz->chunk_lock);
        bioset_exit(&dmz->bio_set);
 err_meta:
        dmz_dtr_metadata(dmz->metadata);
@@ -815,8 +808,6 @@ static void dmz_dtr(struct dm_target *ti)
 
        dmz_put_zoned_device(ti);
 
-       mutex_destroy(&dmz->chunk_lock);
-
        kfree(dmz);
 }
 
index ed8de49c9a08263e8ca25ff2979abb2958b4cde0..94c2072202c8f74a1224de3307c903c79438a3bf 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/workqueue.h>
 #include <linux/rwsem.h>
 #include <linux/rbtree.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
 #include <linux/shrinker.h>
 
 /*