nvmet_req_complete(req, 0);
 }
 
-int nvmet_parse_admin_cmd(struct nvmet_req *req)
+u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
 {
        struct nvme_command *cmd = req->cmd;
+       u16 ret;
 
        req->ns = NULL;
 
-       if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
-               pr_err("got admin cmd %d while CC.EN == 0\n",
-                      cmd->common.opcode);
-               return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
-       }
-       if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
-               pr_err("got admin cmd %d while CSTS.RDY == 0\n",
-                      cmd->common.opcode);
-               return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
-       }
+       ret = nvmet_check_ctrl_status(req, cmd);
+       if (unlikely(ret))
+               return ret;
 
        switch (cmd->common.opcode) {
        case nvme_admin_get_log_page:
                return 0;
        }
 
-       pr_err("unhandled cmd %d\n", cmd->common.opcode);
+       pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
+              req->sq->qid);
        return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 }
 
        return status;
 }
 
+u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
+{
+       if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
+               pr_err("got io cmd %d while CC.EN == 0 on qid = %d\n",
+                      cmd->common.opcode, req->sq->qid);
+               return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+       }
+
+       if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+               pr_err("got io cmd %d while CSTS.RDY == 0 on qid = %d\n",
+                      cmd->common.opcode, req->sq->qid);
+               req->ns = NULL;
+               return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+       }
+       return 0;
+}
+
 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
                const char *hostnqn)
 {
 
        nvmet_req_complete(req, status);
 }
 
-int nvmet_parse_discovery_cmd(struct nvmet_req *req)
+u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
 {
        struct nvme_command *cmd = req->cmd;
 
 
        nvmet_req_complete(req, status);
 }
 
-int nvmet_parse_fabrics_cmd(struct nvmet_req *req)
+u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
 {
        struct nvme_command *cmd = req->cmd;
 
        goto out;
 }
 
-int nvmet_parse_connect_cmd(struct nvmet_req *req)
+u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
 {
        struct nvme_command *cmd = req->cmd;
 
 
        }
 }
 
-int nvmet_parse_io_cmd(struct nvmet_req *req)
+u16 nvmet_parse_io_cmd(struct nvmet_req *req)
 {
        struct nvme_command *cmd = req->cmd;
+       u16 ret;
 
-       if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
-               pr_err("got io cmd %d while CC.EN == 0\n",
-                      cmd->common.opcode);
+       ret = nvmet_check_ctrl_status(req, cmd);
+       if (unlikely(ret)) {
                req->ns = NULL;
-               return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
-       }
-
-       if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
-               pr_err("got io cmd %d while CSTS.RDY == 0\n",
-                      cmd->common.opcode);
-               req->ns = NULL;
-               return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+               return ret;
        }
 
        req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
-       if (!req->ns)
+       if (unlikely(!req->ns))
                return NVME_SC_INVALID_NS | NVME_SC_DNR;
 
        switch (cmd->common.opcode) {
                req->execute = nvmet_execute_write_zeroes;
                return 0;
        default:
-               pr_err("unhandled cmd %d\n", cmd->common.opcode);
+               pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
+                      req->sq->qid);
                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
        }
 }
 
        u8                      log_page;
 };
 
-int nvmet_parse_connect_cmd(struct nvmet_req *req);
-int nvmet_parse_io_cmd(struct nvmet_req *req);
-int nvmet_parse_admin_cmd(struct nvmet_req *req);
-int nvmet_parse_discovery_cmd(struct nvmet_req *req);
-int nvmet_parse_fabrics_cmd(struct nvmet_req *req);
+u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
+u16 nvmet_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
 
 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
                struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
                struct nvmet_req *req, struct nvmet_ctrl **ret);
 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
+u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
 
 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
                enum nvme_subsys_type type);