}
 }
 
+static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
+{
+       /* Not supported: return zeroes */
+       nvmet_req_complete(req,
+                  nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
+}
+
 static void nvmet_execute_identify(struct nvmet_req *req)
 {
        if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
                nvmet_execute_identify_ctrl(req);
                return;
        case NVME_ID_CNS_CS_CTRL:
-               if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
-                       switch (req->cmd->identify.csi) {
-                       case NVME_CSI_ZNS:
-                               return nvmet_execute_identify_cns_cs_ctrl(req);
-                       default:
-                               break;
+               switch (req->cmd->identify.csi) {
+               case NVME_CSI_NVM:
+                       nvmet_execute_identify_ctrl_nvm(req);
+                       return;
+               case NVME_CSI_ZNS:
+                       if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+                               nvmet_execute_identify_ctrl_zns(req);
+                               return;
                        }
+                       break;
                }
                break;
        case NVME_ID_CNS_NS_ACTIVE_LIST:
 
 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
 
 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
-void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
+void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
 
        return true;
 }
 
-void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
+void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
 {
        u8 zasl = req->sq->ctrl->subsys->zasl;
        struct nvmet_ctrl *ctrl = req->sq->ctrl;