startka = true;
        spin_unlock_irqrestore(&ctrl->lock, flags);
        if (startka)
-               queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+               nvme_queue_keep_alive_work(ctrl);
  }
  
 -static int nvme_keep_alive(struct nvme_ctrl *ctrl)
 -{
 -      struct request *rq;
 -
 -      rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
 -                      BLK_MQ_REQ_RESERVED);
 -      if (IS_ERR(rq))
 -              return PTR_ERR(rq);
 -
 -      rq->timeout = ctrl->kato * HZ;
 -      rq->end_io_data = ctrl;
 -
 -      blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
 -
 -      return 0;
 -}
 -
  static void nvme_keep_alive_work(struct work_struct *work)
  {
        struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
 
                nvmet_tcp_prep_recv_ddgst(cmd);
                return 0;
        }
 -      nvmet_tcp_unmap_pdu_iovec(cmd);
  
-       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
-           cmd->rbytes_done == cmd->req.transfer_len) {
-               cmd->req.execute(&cmd->req);
-       }
+       if (cmd->rbytes_done == cmd->req.transfer_len)
+               nvmet_tcp_execute_request(cmd);
  
        nvmet_prepare_receive_pdu(queue);
        return 0;