]> www.infradead.org Git - users/willy/xarray.git/commitdiff
blk-cgroup: Remove blkg_list hlist
authorMatthew Wilcox <willy@infradead.org>
Mon, 18 Mar 2019 15:27:07 +0000 (11:27 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 8 Aug 2019 02:35:35 +0000 (22:35 -0400)
We can iterate over all blkcgs using the XArray iterator instead of
maintaining a separate hlist.  This removes a nasty locking inversion
in blkcg_destroy_blkgs().

Signed-off-by: Matthew Wilcox <willy@infradead.org>
block/bfq-cgroup.c
block/blk-cgroup.c
include/linux/blk-cgroup.h

index 9887ba2fdff25cc4e25358ab94e6baeb98fc2568..f2346e0bea5c173ecc10494076ff8a4a0cb59609 100644 (file)
@@ -925,6 +925,7 @@ static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
        struct blkcg *blkcg = css_to_blkcg(css);
        struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
        struct blkcg_gq *blkg;
+       unsigned long index;
        int ret = -ERANGE;
 
        if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
@@ -933,7 +934,7 @@ static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
        ret = 0;
        xa_lock_irq(&blkcg->blkg_array);
        bfqgd->weight = (unsigned short)val;
-       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+       xa_for_each(&blkcg->blkg_array, index, blkg) {
                struct bfq_group *bfqg = blkg_to_bfqg(blkg);
 
                if (!bfqg)
index 5d76fd10bab88bfac70cf8c606bc340c79a7f923..8f58c83178caafe16247f8976ccbacc6f53da77e 100644 (file)
@@ -281,7 +281,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 
        xa_lock(&blkcg->blkg_array);
        __xa_store(&blkcg->blkg_array, q->id, blkg, 0);
-       hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
        list_add(&blkg->q_node, &q->blkg_list);
 
        for (i = 0; i < BLKCG_MAX_POLS; i++) {
@@ -393,7 +392,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
 
        /* Something wrong if we are trying to remove same group twice */
        WARN_ON_ONCE(list_empty(&blkg->q_node));
-       WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 
        for (i = 0; i < BLKCG_MAX_POLS; i++) {
                struct blkcg_policy *pol = blkcg_policy[i];
@@ -411,7 +409,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
 
        __xa_erase(&blkcg->blkg_array, blkg->q->id);
        list_del_init(&blkg->q_node);
-       hlist_del_init_rcu(&blkg->blkcg_node);
 
        /*
         * Both setting lookup hint to and clearing it from @blkg are done
@@ -456,6 +453,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 {
        struct blkcg *blkcg = css_to_blkcg(css);
        struct blkcg_gq *blkg;
+       unsigned long index;
        int i;
 
        mutex_lock(&blkcg_pol_mutex);
@@ -466,7 +464,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
         * stat updates.  This is a debug feature which shouldn't exist
         * anyway.  If you get hit by a race, retry.
         */
-       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+       xa_for_each(&blkcg->blkg_array, index, blkg) {
                blkg_rwstat_reset(&blkg->stat_bytes);
                blkg_rwstat_reset(&blkg->stat_ios);
 
@@ -516,16 +514,15 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
                       bool show_total)
 {
        struct blkcg_gq *blkg;
+       unsigned long index;
        u64 total = 0;
 
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+       xa_for_each(&blkcg->blkg_array, index, blkg) {
                spin_lock_irq(&blkg->q->queue_lock);
                if (blkcg_policy_enabled(blkg->q, pol))
                        total += prfill(sf, blkg->pd[pol->plid], data);
                spin_unlock_irq(&blkg->q->queue_lock);
        }
-       rcu_read_unlock();
 
        if (show_total)
                seq_printf(sf, "Total %llu\n", (unsigned long long)total);
@@ -888,10 +885,10 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
 {
        struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
        struct blkcg_gq *blkg;
+       unsigned long index;
 
        rcu_read_lock();
-
-       hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+       xa_for_each(&blkcg->blkg_array, index, blkg) {
                const char *dname;
                char *buf;
                struct blkg_rwstat_sample rwstat;
@@ -1041,24 +1038,18 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
  */
 void blkcg_destroy_blkgs(struct blkcg *blkcg)
 {
-       xa_lock_irq(&blkcg->blkg_array);
+       struct blkcg_gq *blkg;
+       unsigned long index;
 
-       while (!hlist_empty(&blkcg->blkg_list)) {
-               struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
-                                               struct blkcg_gq, blkcg_node);
+       xa_for_each(&blkcg->blkg_array, index, blkg) {
                struct request_queue *q = blkg->q;
 
-               if (spin_trylock(&q->queue_lock)) {
-                       blkg_destroy(blkg);
-                       spin_unlock(&q->queue_lock);
-               } else {
-                       xa_unlock_irq(&blkcg->blkg_array);
-                       cpu_relax();
-                       xa_lock_irq(&blkcg->blkg_array);
-               }
+               spin_lock_irq(&q->queue_lock);
+               xa_lock(&blkcg->blkg_array);
+               blkg_destroy(blkg);
+               xa_unlock(&blkcg->blkg_array);
+               spin_unlock_irq(&q->queue_lock);
        }
-
-       xa_unlock_irq(&blkcg->blkg_array);
 }
 
 static void blkcg_css_free(struct cgroup_subsys_state *css)
@@ -1124,7 +1115,6 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
        }
 
        xa_init_flags(&blkcg->blkg_array, XA_FLAGS_LOCK_IRQ);
-       INIT_HLIST_HEAD(&blkcg->blkg_list);
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&blkcg->cgwb_list);
        refcount_set(&blkcg->cgwb_refcnt, 1);
index 9fe70e0a74a4be5077d7c1ea88debe94b232021c..799e40e8501a7c6d9e0865a5f6a3a8936f5961fb 100644 (file)
@@ -49,7 +49,6 @@ struct blkcg {
 
        struct xarray                   blkg_array;
        struct blkcg_gq __rcu           *blkg_hint;
-       struct hlist_head               blkg_list;
 
        struct blkcg_policy_data        *cpd[BLKCG_MAX_POLS];
 
@@ -108,7 +107,6 @@ struct blkcg_gq {
        /* Pointer to the associated request_queue */
        struct request_queue            *q;
        struct list_head                q_node;
-       struct hlist_node               blkcg_node;
        struct blkcg                    *blkcg;
 
        /*