]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
virtio_blk: reverse request order in virtio_queue_rqs
authorChristoph Hellwig <hch@lst.de>
Wed, 13 Nov 2024 15:20:42 +0000 (16:20 +0100)
committerJens Axboe <axboe@kernel.dk>
Wed, 13 Nov 2024 19:04:26 +0000 (12:04 -0700)
blk_mq_flush_plug_list submits requests in the reverse order that they
were submitted, which leads to a rather suboptimal I/O pattern
especially in rotational devices. Fix this by rewriting virtio_queue_rqs
so that it always pops the requests from the passed in request list, and
then adds them to the head of a local submit list. This actually
simplifies the code a bit as it removes the complicated list splicing,
at the cost of extra updates of the rq_next pointer. As that should be
cache hot anyway it should be an easy price to pay.

Fixes: 0e9911fa768f ("virtio-blk: support mq_ops->queue_rqs()")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20241113152050.157179-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/virtio_blk.c

index 0e99a4714928478c1ba81777b8e98448eb5b992a..b25f7c06a28e6143bc5d4542547fd7cf6b95f04a 100644 (file)
@@ -471,18 +471,18 @@ static bool virtblk_prep_rq_batch(struct request *req)
        return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
 }
 
-static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
+static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
                                        struct request **rqlist)
 {
+       struct request *req;
        unsigned long flags;
-       int err;
        bool kick;
 
        spin_lock_irqsave(&vq->lock, flags);
 
-       while (!rq_list_empty(*rqlist)) {
-               struct request *req = rq_list_pop(rqlist);
+       while ((req = rq_list_pop(rqlist))) {
                struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+               int err;
 
                err = virtblk_add_req(vq->vq, vbr);
                if (err) {
@@ -495,37 +495,33 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
        kick = virtqueue_kick_prepare(vq->vq);
        spin_unlock_irqrestore(&vq->lock, flags);
 
-       return kick;
+       if (kick)
+               virtqueue_notify(vq->vq);
 }
 
 static void virtio_queue_rqs(struct request **rqlist)
 {
-       struct request *req, *next, *prev = NULL;
+       struct request *submit_list = NULL;
        struct request *requeue_list = NULL;
+       struct request **requeue_lastp = &requeue_list;
+       struct virtio_blk_vq *vq = NULL;
+       struct request *req;
 
-       rq_list_for_each_safe(rqlist, req, next) {
-               struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
-               bool kick;
-
-               if (!virtblk_prep_rq_batch(req)) {
-                       rq_list_move(rqlist, &requeue_list, req, prev);
-                       req = prev;
-                       if (!req)
-                               continue;
-               }
+       while ((req = rq_list_pop(rqlist))) {
+               struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx);
 
-               if (!next || req->mq_hctx != next->mq_hctx) {
-                       req->rq_next = NULL;
-                       kick = virtblk_add_req_batch(vq, rqlist);
-                       if (kick)
-                               virtqueue_notify(vq->vq);
+               if (vq && vq != this_vq)
+                       virtblk_add_req_batch(vq, &submit_list);
+               vq = this_vq;
 
-                       *rqlist = next;
-                       prev = NULL;
-               else
-                       prev = req;
+               if (virtblk_prep_rq_batch(req))
+                       rq_list_add(&submit_list, req); /* reverse order */
+               else
+                       rq_list_add_tail(&requeue_lastp, req);
        }
 
+       if (vq)
+               virtblk_add_req_batch(vq, &submit_list);
        *rqlist = requeue_list;
 }