]> www.infradead.org Git - nvme.git/commitdiff
ublk: simplify aborting ublk request
authorMing Lei <ming.lei@redhat.com>
Wed, 16 Apr 2025 03:54:41 +0000 (11:54 +0800)
committerJens Axboe <axboe@kernel.dk>
Thu, 17 Apr 2025 01:33:21 +0000 (19:33 -0600)
Now ublk_abort_queue() is moved to ublk char device release handler,
meantime our request queue is "quiesced" because either ->canceling was
set from uring_cmd cancel function or all IOs are inflight and can't be
completed by ublk server, things becomes easy much:

- all uring_cmd are done, so we needn't to mark io as UBLK_IO_FLAG_ABORTED
for handling completion from uring_cmd

- ublk char device is closed, no one can hold IO request reference any more,
so we can simply complete this request or requeue it for ublk_nosrv_should_reissue_outstanding.

Reviewed-by: Uday Shankar <ushankar@purestorage.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250416035444.99569-8-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/ublk_drv.c

index bf708e9e9a126982991618371700aef24b171dd1..2de7b2bd409ddfdc1e9bad68cd942ec0bca7af07 100644 (file)
@@ -122,15 +122,6 @@ struct ublk_uring_cmd_pdu {
  */
 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
 
-/*
- * IO command is aborted, so this flag is set in case of
- * !UBLK_IO_FLAG_ACTIVE.
- *
- * After this flag is observed, any pending or new incoming request
- * associated with this io command will be failed immediately
- */
-#define UBLK_IO_FLAG_ABORTED 0x04
-
 /*
  * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
  * get data buffer address from ublksrv.
@@ -1083,12 +1074,6 @@ static inline void __ublk_complete_rq(struct request *req)
        unsigned int unmapped_bytes;
        blk_status_t res = BLK_STS_OK;
 
-       /* called from ublk_abort_queue() code path */
-       if (io->flags & UBLK_IO_FLAG_ABORTED) {
-               res = BLK_STS_IOERR;
-               goto exit;
-       }
-
        /* failed read IO if nothing is read */
        if (!io->res && req_op(req) == REQ_OP_READ)
                io->res = -EIO;
@@ -1138,47 +1123,6 @@ static void ublk_complete_rq(struct kref *ref)
        __ublk_complete_rq(req);
 }
 
-static void ublk_do_fail_rq(struct request *req)
-{
-       struct ublk_queue *ubq = req->mq_hctx->driver_data;
-
-       if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
-               blk_mq_requeue_request(req, false);
-       else
-               __ublk_complete_rq(req);
-}
-
-static void ublk_fail_rq_fn(struct kref *ref)
-{
-       struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
-                       ref);
-       struct request *req = blk_mq_rq_from_pdu(data);
-
-       ublk_do_fail_rq(req);
-}
-
-/*
- * Since ublk_rq_task_work_cb always fails requests immediately during
- * exiting, __ublk_fail_req() is only called from abort context during
- * exiting. So lock is unnecessary.
- *
- * Also aborting may not be started yet, keep in mind that one failed
- * request may be issued by block layer again.
- */
-static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
-               struct request *req)
-{
-       WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
-
-       if (ublk_need_req_ref(ubq)) {
-               struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
-
-               kref_put(&data->ref, ublk_fail_rq_fn);
-       } else {
-               ublk_do_fail_rq(req);
-       }
-}
-
 static void ubq_complete_io_cmd(struct ublk_io *io, int res,
                                unsigned issue_flags)
 {
@@ -1667,10 +1611,26 @@ static void ublk_commit_completion(struct ublk_device *ub,
                ublk_put_req_ref(ubq, req);
 }
 
+static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+               struct request *req)
+{
+       WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
+
+       if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
+               blk_mq_requeue_request(req, false);
+       else {
+               io->res = -EIO;
+               __ublk_complete_rq(req);
+       }
+}
+
 /*
- * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
- * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
- * context, so everything is serialized.
+ * Called from ublk char device release handler, when any uring_cmd is
+ * done, meantime request queue is "quiesced" since all inflight requests
+ * can't be completed because ublk server is dead.
+ *
+ * So no one can hold our request IO reference any more, simply ignore the
+ * reference, and complete the request immediately
  */
 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
 {
@@ -1687,10 +1647,8 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
                         * will do it
                         */
                        rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
-                       if (rq && blk_mq_request_started(rq)) {
-                               io->flags |= UBLK_IO_FLAG_ABORTED;
+                       if (rq && blk_mq_request_started(rq))
                                __ublk_fail_req(ubq, io, rq);
-                       }
                }
        }
 }