]> www.infradead.org Git - users/hch/misc.git/commitdiff
block: add two helpers for registering/un-registering sched debugfs
authorMing Lei <ming.lei@redhat.com>
Mon, 5 May 2025 14:17:43 +0000 (22:17 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 6 May 2025 13:43:42 +0000 (07:43 -0600)
Add blk_mq_sched_reg_debugfs()/blk_mq_sched_unreg_debugfs() to clean up
sched init/exit code a bit.

Register & unregister debugfs for sched & sched_hctx order is changed a
bit, but it is safe because sched & sched_hctx is guaranteed to be ready
when exporting via debugfs.

Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250505141805.2751237-6-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sched.c

index 9b81771774efd0d9427bd34d6e866217c6add02b..2abc5e0704e8864d9a7e01d34e3a7825a1035784 100644 (file)
@@ -434,6 +434,30 @@ static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
        return 0;
 }
 
+static void blk_mq_sched_reg_debugfs(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned long i;
+
+       mutex_lock(&q->debugfs_mutex);
+       blk_mq_debugfs_register_sched(q);
+       queue_for_each_hw_ctx(q, hctx, i)
+               blk_mq_debugfs_register_sched_hctx(q, hctx);
+       mutex_unlock(&q->debugfs_mutex);
+}
+
+static void blk_mq_sched_unreg_debugfs(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned long i;
+
+       mutex_lock(&q->debugfs_mutex);
+       queue_for_each_hw_ctx(q, hctx, i)
+               blk_mq_debugfs_unregister_sched_hctx(hctx);
+       blk_mq_debugfs_unregister_sched(q);
+       mutex_unlock(&q->debugfs_mutex);
+}
+
 /* caller must have a reference to @e, will grab another one if successful */
 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 {
@@ -467,10 +491,6 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
        if (ret)
                goto err_free_map_and_rqs;
 
-       mutex_lock(&q->debugfs_mutex);
-       blk_mq_debugfs_register_sched(q);
-       mutex_unlock(&q->debugfs_mutex);
-
        queue_for_each_hw_ctx(q, hctx, i) {
                if (e->ops.init_hctx) {
                        ret = e->ops.init_hctx(hctx, i);
@@ -482,11 +502,11 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
                                return ret;
                        }
                }
-               mutex_lock(&q->debugfs_mutex);
-               blk_mq_debugfs_register_sched_hctx(q, hctx);
-               mutex_unlock(&q->debugfs_mutex);
        }
 
+       /* sched is initialized, it is ready to export it via debugfs */
+       blk_mq_sched_reg_debugfs(q);
+
        return 0;
 
 err_free_map_and_rqs:
@@ -524,11 +544,10 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
        unsigned long i;
        unsigned int flags = 0;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
-               mutex_lock(&q->debugfs_mutex);
-               blk_mq_debugfs_unregister_sched_hctx(hctx);
-               mutex_unlock(&q->debugfs_mutex);
+       /* unexport via debugfs before exiting sched */
+       blk_mq_sched_unreg_debugfs(q);
 
+       queue_for_each_hw_ctx(q, hctx, i) {
                if (e->type->ops.exit_hctx && hctx->sched_data) {
                        e->type->ops.exit_hctx(hctx, i);
                        hctx->sched_data = NULL;
@@ -536,10 +555,6 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
                flags = hctx->flags;
        }
 
-       mutex_lock(&q->debugfs_mutex);
-       blk_mq_debugfs_unregister_sched(q);
-       mutex_unlock(&q->debugfs_mutex);
-
        if (e->type->ops.exit_sched)
                e->type->ops.exit_sched(e);
        blk_mq_sched_tags_teardown(q, flags);