]> www.infradead.org Git - users/willy/xarray.git/commitdiff
blk-cgroup: Convert to XArray
authorMatthew Wilcox <willy@infradead.org>
Wed, 17 Oct 2018 18:37:39 +0000 (14:37 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 8 Aug 2019 02:35:35 +0000 (22:35 -0400)
At the point of allocation, we're under not only the xarray lock, but
also under the queue lock.  So we can't drop the lock and retry the
allocation with GFP_KERNEL.  Use xa_insert() of a NULL pointer to ensure
the subsequent store will not need to allocate memory.  Now the store
cannot fail, so we can remove the error checks.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
block/bfq-cgroup.c
block/blk-cgroup.c
include/linux/blk-cgroup.h

index 0f6cd688924fbd23177438f4a2afba03331777bc..9887ba2fdff25cc4e25358ab94e6baeb98fc2568 100644 (file)
@@ -931,7 +931,7 @@ static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
                return ret;
 
        ret = 0;
-       spin_lock_irq(&blkcg->lock);
+       xa_lock_irq(&blkcg->blkg_array);
        bfqgd->weight = (unsigned short)val;
        hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
                struct bfq_group *bfqg = blkg_to_bfqg(blkg);
@@ -965,7 +965,7 @@ static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
                        bfqg->entity.prio_changed = 1;
                }
        }
-       spin_unlock_irq(&blkcg->lock);
+       xa_unlock_irq(&blkcg->blkg_array);
 
        return ret;
 }
index 55a7dc227dfbd6af883a2832d09c8abf2c16d3d0..5d76fd10bab88bfac70cf8c606bc340c79a7f923 100644 (file)
@@ -197,12 +197,12 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
        struct blkcg_gq *blkg;
 
        /*
-        * Hint didn't match.  Look up from the radix tree.  Note that the
+        * Hint didn't match.  Fetch from the xarray.  Note that the
         * hint can only be updated under queue_lock as otherwise @blkg
-        * could have already been removed from blkg_tree.  The caller is
+        * could have already been removed from blkg_array.  The caller is
         * responsible for grabbing queue_lock if @update_hint.
         */
-       blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
+       blkg = xa_load(&blkcg->blkg_array, q->id);
        if (blkg && blkg->q == q) {
                if (update_hint) {
                        lockdep_assert_held(&q->queue_lock);
@@ -279,29 +279,21 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
                        pol->pd_init_fn(blkg->pd[i]);
        }
 
-       /* insert */
-       spin_lock(&blkcg->lock);
-       ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
-       if (likely(!ret)) {
-               hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
-               list_add(&blkg->q_node, &q->blkg_list);
+       xa_lock(&blkcg->blkg_array);
+       __xa_store(&blkcg->blkg_array, q->id, blkg, 0);
+       hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
+       list_add(&blkg->q_node, &q->blkg_list);
 
-               for (i = 0; i < BLKCG_MAX_POLS; i++) {
-                       struct blkcg_policy *pol = blkcg_policy[i];
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
 
-                       if (blkg->pd[i] && pol->pd_online_fn)
-                               pol->pd_online_fn(blkg->pd[i]);
-               }
+               if (blkg->pd[i] && pol->pd_online_fn)
+                       pol->pd_online_fn(blkg->pd[i]);
        }
        blkg->online = true;
-       spin_unlock(&blkcg->lock);
-
-       if (!ret)
-               return blkg;
+       xa_unlock(&blkcg->blkg_array);
 
-       /* @blkg failed fully initialized, use the usual release path */
-       blkg_put(blkg);
-       return ERR_PTR(ret);
+       return blkg;
 
 err_put_congested:
        wb_congested_put(wb_congested);
@@ -397,7 +389,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        int i;
 
        lockdep_assert_held(&blkg->q->queue_lock);
-       lockdep_assert_held(&blkcg->lock);
+       lockdep_assert_held(&blkcg->blkg_array.xa_lock);
 
        /* Something wrong if we are trying to remove same group twice */
        WARN_ON_ONCE(list_empty(&blkg->q_node));
@@ -417,7 +409,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
 
        blkg->online = false;
 
-       radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+       __xa_erase(&blkcg->blkg_array, blkg->q->id);
        list_del_init(&blkg->q_node);
        hlist_del_init_rcu(&blkg->blkcg_node);
 
@@ -450,9 +442,9 @@ static void blkg_destroy_all(struct request_queue *q)
        list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
                struct blkcg *blkcg = blkg->blkcg;
 
-               spin_lock(&blkcg->lock);
+               xa_lock(&blkcg->blkg_array);
                blkg_destroy(blkg);
-               spin_unlock(&blkcg->lock);
+               xa_unlock(&blkcg->blkg_array);
        }
 
        q->root_blkg = NULL;
@@ -467,7 +459,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
        int i;
 
        mutex_lock(&blkcg_pol_mutex);
-       spin_lock_irq(&blkcg->lock);
+       xa_lock_irq(&blkcg->blkg_array);
 
        /*
         * Note that stat reset is racy - it doesn't synchronize against
@@ -486,7 +478,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
                }
        }
 
-       spin_unlock_irq(&blkcg->lock);
+       xa_unlock_irq(&blkcg->blkg_array);
        mutex_unlock(&blkcg_pol_mutex);
        return 0;
 }
@@ -1049,7 +1041,7 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
  */
 void blkcg_destroy_blkgs(struct blkcg *blkcg)
 {
-       spin_lock_irq(&blkcg->lock);
+       xa_lock_irq(&blkcg->blkg_array);
 
        while (!hlist_empty(&blkcg->blkg_list)) {
                struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
@@ -1060,13 +1052,13 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
                        blkg_destroy(blkg);
                        spin_unlock(&q->queue_lock);
                } else {
-                       spin_unlock_irq(&blkcg->lock);
+                       xa_unlock_irq(&blkcg->blkg_array);
                        cpu_relax();
-                       spin_lock_irq(&blkcg->lock);
+                       xa_lock_irq(&blkcg->blkg_array);
                }
        }
 
-       spin_unlock_irq(&blkcg->lock);
+       xa_unlock_irq(&blkcg->blkg_array);
 }
 
 static void blkcg_css_free(struct cgroup_subsys_state *css)
@@ -1131,8 +1123,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
                        pol->cpd_init_fn(cpd);
        }
 
-       spin_lock_init(&blkcg->lock);
-       INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
+       xa_init_flags(&blkcg->blkg_array, XA_FLAGS_LOCK_IRQ);
        INIT_HLIST_HEAD(&blkcg->blkg_list);
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&blkcg->cgwb_list);
@@ -1168,14 +1159,16 @@ unlock:
 int blkcg_init_queue(struct request_queue *q)
 {
        struct blkcg_gq *new_blkg, *blkg;
-       bool preloaded;
        int ret;
 
        new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
        if (!new_blkg)
                return -ENOMEM;
 
-       preloaded = !radix_tree_preload(GFP_KERNEL);
+       ret = xa_insert_irq(&blkcg_root.blkg_array, q->id, NULL, GFP_KERNEL);
+       if (ret == -ENOMEM)
+               return -ENOMEM;
+       BUG_ON(ret < 0);
 
        /* Make sure the root blkg exists. */
        rcu_read_lock();
@@ -1187,9 +1180,6 @@ int blkcg_init_queue(struct request_queue *q)
        spin_unlock_irq(&q->queue_lock);
        rcu_read_unlock();
 
-       if (preloaded)
-               radix_tree_preload_end();
-
        ret = blk_iolatency_init(q);
        if (ret)
                goto err_destroy_all;
@@ -1203,10 +1193,9 @@ err_destroy_all:
        blkg_destroy_all(q);
        return ret;
 err_unlock:
+       xa_erase(&blkcg_root.blkg_array, q->id);
        spin_unlock_irq(&q->queue_lock);
        rcu_read_unlock();
-       if (preloaded)
-               radix_tree_preload_end();
        return PTR_ERR(blkg);
 }
 
index 12811091fd50bcd485d28c87d4e235890bb6ac92..9fe70e0a74a4be5077d7c1ea88debe94b232021c 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/cgroup.h>
 #include <linux/percpu_counter.h>
 #include <linux/seq_file.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
 #include <linux/blkdev.h>
 #include <linux/atomic.h>
 #include <linux/kthread.h>
@@ -46,9 +46,8 @@ struct blkcg_gq;
 
 struct blkcg {
        struct cgroup_subsys_state      css;
-       spinlock_t                      lock;
 
-       struct radix_tree_root          blkg_tree;
+       struct xarray                   blkg_array;
        struct blkcg_gq __rcu           *blkg_hint;
        struct hlist_head               blkg_list;