]> www.infradead.org Git - nvme.git/commitdiff
ublk: don't fail request for recovery & reissue in case of ubq->canceling
authorMing Lei <ming.lei@redhat.com>
Wed, 9 Apr 2025 01:14:42 +0000 (09:14 +0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 9 Apr 2025 13:44:49 +0000 (07:44 -0600)
ubq->canceling is set with request queue quiesced when io_uring context is
exiting. USER_RECOVERY or !RECOVERY_FAIL_IO requires request to be re-queued
and re-dispatch after device is recovered.

However commit d796cea7b9f3 ("ublk: implement ->queue_rqs()") still may fail
any request in case of ubq->canceling, this way breaks USER_RECOVERY or
!RECOVERY_FAIL_IO.

Fix it by calling __ublk_abort_rq() in case of ubq->canceling.

Reviewed-by: Uday Shankar <ushankar@purestorage.com>
Reported-by: Uday Shankar <ushankar@purestorage.com>
Closes: https://lore.kernel.org/linux-block/Z%2FQkkTRHfRxtN%2FmB@dev-ushankar.dev.purestorage.com/
Fixes: d796cea7b9f3 ("ublk: implement ->queue_rqs()")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250409011444.2142010-3-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/ublk_drv.c

index 41bed67508f2600b7136d7ca71b267ad68c695fb..d6ca2f1097ad913759f1f660983c1423695c5fb2 100644 (file)
@@ -1371,7 +1371,8 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
        return BLK_EH_RESET_TIMER;
 }
 
-static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq)
+static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
+                                 bool check_cancel)
 {
        blk_status_t res;
 
@@ -1390,7 +1391,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq)
        if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
                return BLK_STS_IOERR;
 
-       if (unlikely(ubq->canceling))
+       if (check_cancel && unlikely(ubq->canceling))
                return BLK_STS_IOERR;
 
        /* fill iod to slot in io cmd buffer */
@@ -1409,7 +1410,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct request *rq = bd->rq;
        blk_status_t res;
 
-       res = ublk_prep_req(ubq, rq);
+       res = ublk_prep_req(ubq, rq, false);
        if (res != BLK_STS_OK)
                return res;
 
@@ -1441,7 +1442,7 @@ static void ublk_queue_rqs(struct rq_list *rqlist)
                        ublk_queue_cmd_list(ubq, &submit_list);
                ubq = this_q;
 
-               if (ublk_prep_req(ubq, req) == BLK_STS_OK)
+               if (ublk_prep_req(ubq, req, true) == BLK_STS_OK)
                        rq_list_add_tail(&submit_list, req);
                else
                        rq_list_add_tail(&requeue_list, req);