queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
 }
 
-static void nvme_keep_alive_finish(struct request *rq,
-               blk_status_t status, struct nvme_ctrl *ctrl)
+static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+                                                blk_status_t status)
 {
+       struct nvme_ctrl *ctrl = rq->end_io_data;
        unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
        unsigned long delay = nvme_keep_alive_work_period(ctrl);
        enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
                delay = 0;
        }
 
+       blk_mq_free_request(rq);
+
        if (status) {
                dev_err(ctrl->device,
                        "failed nvme_keep_alive_end_io error=%d\n",
                                status);
-               return;
+               return RQ_END_IO_NONE;
        }
 
        ctrl->ka_last_check_time = jiffies;
        ctrl->comp_seen = false;
        if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING)
                queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
+       return RQ_END_IO_NONE;
 }
 
 static void nvme_keep_alive_work(struct work_struct *work)
                        struct nvme_ctrl, ka_work);
        bool comp_seen = ctrl->comp_seen;
        struct request *rq;
-       blk_status_t status;
 
        ctrl->ka_last_check_time = jiffies;
 
        nvme_init_request(rq, &ctrl->ka_cmd);
 
        rq->timeout = ctrl->kato * HZ;
-       status = blk_execute_rq(rq, false);
-       nvme_keep_alive_finish(rq, status, ctrl);
-       blk_mq_free_request(rq);
+       rq->end_io = nvme_keep_alive_end_io;
+       rq->end_io_data = ctrl;
+       blk_execute_rq_nowait(rq, false);
 }
 
 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)