]> www.infradead.org Git - users/hch/block.git/commitdiff
blk-mq: don't queue plugged passthrough requests into scheduler
authorMing Lei <ming.lei@redhat.com>
Mon, 15 May 2023 14:46:00 +0000 (22:46 +0800)
committerChristoph Hellwig <hch@lst.de>
Thu, 18 May 2023 04:33:50 +0000 (06:33 +0200)
Passthrough) request should never be queued to the I/O scheduler,
as scheduling these opaque requests doens't make sense, and I/O
schedulers might required req->bio to be always valid.

We never let passthrough request cross scheduler before commit
1c2d2fff6dc0 ("block: wire-up support for passthrough plugging"),
restored this behavior even for passthrough requests issued under
a plug.

Reported-by: Guangwu Zhang <guazhang@redhat.com>
Closes: https://lore.kernel.org/linux-block/CAGS2=YosaYaUTEMU3uaf+y=8MqSrhL7sYsJn8EwbaM=76p_4Qg@mail.gmail.com/
Investigated-by: Yu Kuai <yukuai1@huaweicloud.com>
Fixes: 1c2d2fff6dc0 ("block: wire-up support for passthrough plugging")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
[hch: use blk_mq_insert_requests for passthrough requests,
      fix up the commit message and comments]
Signed-off-by: Christoph Hellwig <hch@lst.de>
block/blk-mq.c

index f6dad0886a2fa1bacac778c093717ba21e24b33e..8b7e4daaa5b70d410cb86691787c65748c75f62c 100644 (file)
@@ -2711,6 +2711,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
        struct request *requeue_list = NULL;
        struct request **requeue_lastp = &requeue_list;
        unsigned int depth = 0;
+       bool is_passthrough = false;
        LIST_HEAD(list);
 
        do {
@@ -2719,7 +2720,9 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
                if (!this_hctx) {
                        this_hctx = rq->mq_hctx;
                        this_ctx = rq->mq_ctx;
-               } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
+                       is_passthrough = blk_rq_is_passthrough(rq);
+               } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
+                          is_passthrough != blk_rq_is_passthrough(rq)) {
                        rq_list_add_tail(&requeue_lastp, rq);
                        continue;
                }
@@ -2731,7 +2734,8 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
        trace_block_unplug(this_hctx->queue, depth, !from_sched);
 
        percpu_ref_get(&this_hctx->queue->q_usage_counter);
-       if (this_hctx->queue->elevator) {
+       /* passthrough requests should never be issued to the I/O scheduler */
+       if (this_hctx->queue->elevator && !is_passthrough) {
                this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
                                &list, 0);
                blk_mq_run_hw_queue(this_hctx, from_sched);