]> www.infradead.org Git - nvme.git/commitdiff
block: Revert "block: Fix potential deadlock while freezing queue and acquiring sysfs...
authorMing Lei <ming.lei@redhat.com>
Wed, 18 Dec 2024 10:16:14 +0000 (18:16 +0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 18 Dec 2024 14:25:37 +0000 (07:25 -0700)
This reverts commit be26ba96421ab0a8fa2055ccf7db7832a13c44d2.

Commit be26ba96421a ("block: Fix potential deadlock while freezing queue and
acquiring sysfs_loc") actually reverts commit 22465bbac53c ("blk-mq: move cpuhp
callback registering out of q->sysfs_lock"), and causes the original resctrl
lockdep warning.

So revert it and we need to fix the issue in another way.

Cc: Nilay Shroff <nilay@linux.ibm.com>
Fixes: be26ba96421a ("block: Fix potential deadlock while freezing queue and acquiring sysfs_loc")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20241218101617.3275704-2-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sysfs.c
block/blk-mq.c
block/blk-sysfs.c

index cd5ea6eaa76b098b7f713fbf3d42461232dc8cc9..156e9bb07abf1a4801f35f7197674b723155a5d8 100644 (file)
@@ -275,13 +275,15 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        unsigned long i;
 
-       lockdep_assert_held(&q->sysfs_dir_lock);
-
+       mutex_lock(&q->sysfs_dir_lock);
        if (!q->mq_sysfs_init_done)
-               return;
+               goto unlock;
 
        queue_for_each_hw_ctx(q, hctx, i)
                blk_mq_unregister_hctx(hctx);
+
+unlock:
+       mutex_unlock(&q->sysfs_dir_lock);
 }
 
 int blk_mq_sysfs_register_hctxs(struct request_queue *q)
@@ -290,10 +292,9 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
        unsigned long i;
        int ret = 0;
 
-       lockdep_assert_held(&q->sysfs_dir_lock);
-
+       mutex_lock(&q->sysfs_dir_lock);
        if (!q->mq_sysfs_init_done)
-               return ret;
+               goto unlock;
 
        queue_for_each_hw_ctx(q, hctx, i) {
                ret = blk_mq_register_hctx(hctx);
@@ -301,5 +302,8 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
                        break;
        }
 
+unlock:
+       mutex_unlock(&q->sysfs_dir_lock);
+
        return ret;
 }
index 6b6111513986f225d97430fafdfbc106ff8ba0b5..92e8ddf34575d3af33a23256da9481b23747921e 100644 (file)
@@ -4453,8 +4453,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
        unsigned long i, j;
 
        /* protect against switching io scheduler  */
-       lockdep_assert_held(&q->sysfs_lock);
-
+       mutex_lock(&q->sysfs_lock);
        for (i = 0; i < set->nr_hw_queues; i++) {
                int old_node;
                int node = blk_mq_get_hctx_node(set, i);
@@ -4487,6 +4486,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 
        xa_for_each_start(&q->hctx_table, j, hctx, j)
                blk_mq_exit_hctx(q, set, hctx, j);
+       mutex_unlock(&q->sysfs_lock);
 
        /* unregister cpuhp callbacks for exited hctxs */
        blk_mq_remove_hw_queues_cpuhp(q);
@@ -4518,14 +4518,10 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 
        xa_init(&q->hctx_table);
 
-       mutex_lock(&q->sysfs_lock);
-
        blk_mq_realloc_hw_ctxs(set, q);
        if (!q->nr_hw_queues)
                goto err_hctxs;
 
-       mutex_unlock(&q->sysfs_lock);
-
        INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
@@ -4544,7 +4540,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        return 0;
 
 err_hctxs:
-       mutex_unlock(&q->sysfs_lock);
        blk_mq_release(q);
 err_exit:
        q->mq_ops = NULL;
@@ -4925,12 +4920,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
                return false;
 
        /* q->elevator needs protection from ->sysfs_lock */
-       lockdep_assert_held(&q->sysfs_lock);
+       mutex_lock(&q->sysfs_lock);
 
        /* the check has to be done with holding sysfs_lock */
        if (!q->elevator) {
                kfree(qe);
-               goto out;
+               goto unlock;
        }
 
        INIT_LIST_HEAD(&qe->node);
@@ -4940,7 +4935,9 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
        __elevator_get(qe->type);
        list_add(&qe->node, head);
        elevator_disable(q);
-out:
+unlock:
+       mutex_unlock(&q->sysfs_lock);
+
        return true;
 }
 
@@ -4969,9 +4966,11 @@ static void blk_mq_elv_switch_back(struct list_head *head,
        list_del(&qe->node);
        kfree(qe);
 
+       mutex_lock(&q->sysfs_lock);
        elevator_switch(q, t);
        /* drop the reference acquired in blk_mq_elv_switch_none */
        elevator_put(t);
+       mutex_unlock(&q->sysfs_lock);
 }
 
 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
@@ -4991,11 +4990,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
        if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
                return;
 
-       list_for_each_entry(q, &set->tag_list, tag_set_list) {
-               mutex_lock(&q->sysfs_dir_lock);
-               mutex_lock(&q->sysfs_lock);
+       list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_freeze_queue(q);
-       }
        /*
         * Switch IO scheduler to 'none', cleaning up the data associated
         * with the previous scheduler. We will switch back once we are done
@@ -5051,11 +5047,8 @@ switch_back:
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_elv_switch_back(&head, q);
 
-       list_for_each_entry(q, &set->tag_list, tag_set_list) {
+       list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_unfreeze_queue(q);
-               mutex_unlock(&q->sysfs_lock);
-               mutex_unlock(&q->sysfs_dir_lock);
-       }
 
        /* Free the excess tags when nr_hw_queues shrink. */
        for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
index 64f70c713d2f92ca604fbebbb95f99de11197f6a..767598e719ab0ee8b6bc7ad477ec010fea9c8f07 100644 (file)
@@ -706,11 +706,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
        if (entry->load_module)
                entry->load_module(disk, page, length);
 
-       mutex_lock(&q->sysfs_lock);
        blk_mq_freeze_queue(q);
+       mutex_lock(&q->sysfs_lock);
        res = entry->store(disk, page, length);
-       blk_mq_unfreeze_queue(q);
        mutex_unlock(&q->sysfs_lock);
+       blk_mq_unfreeze_queue(q);
        return res;
 }