]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Revert "block/mq-deadline: use correct way to throttling write requests"
authorBart Van Assche <bvanassche@acm.org>
Wed, 13 Mar 2024 21:42:18 +0000 (14:42 -0700)
committerJens Axboe <axboe@kernel.dk>
Wed, 13 Mar 2024 21:56:14 +0000 (15:56 -0600)
The code "max(1U, 3 * (1U << shift)  / 4)" comes from the Kyber I/O
scheduler. The Kyber I/O scheduler maintains one internal queue per hwq
and hence derives its async_depth from the number of hwq tags. Using
this approach for the mq-deadline scheduler is wrong since the
mq-deadline scheduler maintains one internal queue for all hwqs
combined. Hence this revert.

Cc: stable@vger.kernel.org
Cc: Damien Le Moal <dlemoal@kernel.org>
Cc: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
Cc: Zhiguo Niu <Zhiguo.Niu@unisoc.com>
Fixes: d47f9717e5cf ("block/mq-deadline: use correct way to throttling write requests")
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20240313214218.1736147-1-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/mq-deadline.c

index f958e79277b8bc24e8f26b5375c29f0558ba09ad..02a916ba62ee750d4ad29127604b7d4a0cb474d7 100644 (file)
@@ -646,9 +646,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
        struct request_queue *q = hctx->queue;
        struct deadline_data *dd = q->elevator->elevator_data;
        struct blk_mq_tags *tags = hctx->sched_tags;
-       unsigned int shift = tags->bitmap_tags.sb.shift;
 
-       dd->async_depth = max(1U, 3 * (1U << shift)  / 4);
+       dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
 
        sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
 }