]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
blk-mq-sched: add new parameter nr_requests in blk_mq_alloc_sched_tags()
authorYu Kuai <yukuai3@huawei.com>
Wed, 10 Sep 2025 08:04:42 +0000 (16:04 +0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 10 Sep 2025 11:25:56 +0000 (05:25 -0600)
This helper only support to allocate the default number of requests,
add a new parameter to support specific number of requests.

Prepare to fix potential deadlock in the case nr_requests grow.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.h
block/elevator.c

index e2ce4a28e6c9e00267f1970b1326554425ae6aa2..d06bb137a74377ad94ca711522a1afb441293d2d 100644 (file)
@@ -454,7 +454,7 @@ void blk_mq_free_sched_tags_batch(struct xarray *et_table,
 }
 
 struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
-               unsigned int nr_hw_queues)
+               unsigned int nr_hw_queues, unsigned int nr_requests)
 {
        unsigned int nr_tags;
        int i;
@@ -470,13 +470,8 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
                        nr_tags * sizeof(struct blk_mq_tags *), gfp);
        if (!et)
                return NULL;
-       /*
-        * Default to double of smaller one between hw queue_depth and
-        * 128, since we don't split into sync/async like the old code
-        * did. Additionally, this is a per-hw queue depth.
-        */
-       et->nr_requests = 2 * min_t(unsigned int, set->queue_depth,
-                       BLKDEV_DEFAULT_RQ);
+
+       et->nr_requests = nr_requests;
        et->nr_hw_queues = nr_hw_queues;
 
        if (blk_mq_is_shared_tags(set->flags)) {
@@ -521,7 +516,8 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
                 * concurrently.
                 */
                if (q->elevator) {
-                       et = blk_mq_alloc_sched_tags(set, nr_hw_queues);
+                       et = blk_mq_alloc_sched_tags(set, nr_hw_queues,
+                                       blk_mq_default_nr_requests(set));
                        if (!et)
                                goto out_unwind;
                        if (xa_insert(et_table, q->id, et, gfp))
index fe83187f41db4f011fa4fac12fb54ab8123bb176..8e21a6b1415d9da102b52939baaecf235eb447b0 100644 (file)
@@ -24,7 +24,7 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
 void blk_mq_sched_free_rqs(struct request_queue *q);
 
 struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
-               unsigned int nr_hw_queues);
+               unsigned int nr_hw_queues, unsigned int nr_requests);
 int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
                struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
 void blk_mq_free_sched_tags(struct elevator_tags *et,
index 5d42c7d3a952f0c392395f134c1e36d383a19314..3a1d4c37d1bc58b2900c4f61869a2a29fbc613d7 100644 (file)
@@ -109,6 +109,17 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf,
        return ctx->hctxs[blk_mq_get_hctx_type(opf)];
 }
 
+/*
+ * Default to double of smaller one between hw queue_depth and
+ * 128, since we don't split into sync/async like the old code
+ * did. Additionally, this is a per-hw queue depth.
+ */
+static inline unsigned int blk_mq_default_nr_requests(
+               struct blk_mq_tag_set *set)
+{
+       return 2 * min_t(unsigned int, set->queue_depth, BLKDEV_DEFAULT_RQ);
+}
+
 /*
  * sysfs helpers
  */
index fe96c6f4753ca2618a6d0e3f0ed9e8ccadca6595..e2ebfbf107b3af9f180143a0f0d3eb68bde2e5c1 100644 (file)
@@ -669,7 +669,8 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
        lockdep_assert_held(&set->update_nr_hwq_lock);
 
        if (strncmp(ctx->name, "none", 4)) {
-               ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues);
+               ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues,
+                               blk_mq_default_nr_requests(set));
                if (!ctx->et)
                        return -ENOMEM;
        }