return ret;
 }
+
+static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
+{
+       hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0);
+       hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0);
+       hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0);
+       hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
+       hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
+       hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0);
+       hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0);
+       hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0);
+       hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
+       hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
+}
+
+void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, bool is_pf,
+                          struct hclge_comm_hw *hw)
+{
+       struct hclge_comm_cmq *cmdq = &hw->cmq;
+
+       hclge_comm_firmware_compat_config(ae_dev, is_pf, hw, false);
+       set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+       /* wait to ensure that the firmware completes the possible left
+        * over commands.
+        */
+       msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME);
+       spin_lock_bh(&cmdq->csq.lock);
+       spin_lock(&cmdq->crq.lock);
+       hclge_comm_cmd_uninit_regs(hw);
+       spin_unlock(&cmdq->crq.lock);
+       spin_unlock_bh(&cmdq->csq.lock);
+
+       hclge_comm_free_cmd_desc(&cmdq->csq);
+       hclge_comm_free_cmd_desc(&cmdq->crq);
+}
+
+int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
+{
+       struct hclge_comm_cmq *cmdq = &hw->cmq;
+       int ret;
+
+       /* Setup the lock for command queue */
+       spin_lock_init(&cmdq->csq.lock);
+       spin_lock_init(&cmdq->crq.lock);
+
+       cmdq->csq.pdev = pdev;
+       cmdq->crq.pdev = pdev;
+
+       /* Setup the queue entries for use cmd queue */
+       cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+       cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+
+       /* Setup Tx write back timeout */
+       cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
+
+       /* Setup queue rings */
+       ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
+       if (ret) {
+               dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret);
+               return ret;
+       }
+
+       ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ);
+       if (ret) {
+               dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret);
+               goto err_csq;
+       }
+
+       return 0;
+err_csq:
+       hclge_comm_free_cmd_desc(&hw->cmq.csq);
+       return ret;
+}
+
+int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
+                       u32 *fw_version, bool is_pf,
+                       unsigned long reset_pending)
+{
+       struct hclge_comm_cmq *cmdq = &hw->cmq;
+       int ret;
+
+       spin_lock_bh(&cmdq->csq.lock);
+       spin_lock(&cmdq->crq.lock);
+
+       cmdq->csq.next_to_clean = 0;
+       cmdq->csq.next_to_use = 0;
+       cmdq->crq.next_to_clean = 0;
+       cmdq->crq.next_to_use = 0;
+
+       hclge_comm_cmd_init_regs(hw);
+
+       spin_unlock(&cmdq->crq.lock);
+       spin_unlock_bh(&cmdq->csq.lock);
+
+       clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+       /* Check if there is new reset pending, because the higher level
+        * reset may happen when lower level reset is being processed.
+        */
+       if (reset_pending) {
+               ret = -EBUSY;
+               goto err_cmd_init;
+       }
+
+       /* get version and device capabilities */
+       ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw,
+                                                         fw_version, is_pf);
+       if (ret) {
+               dev_err(&ae_dev->pdev->dev,
+                       "failed to query version and capabilities, ret = %d\n",
+                       ret);
+               goto err_cmd_init;
+       }
+
+       dev_info(&ae_dev->pdev->dev,
+                "The firmware version is %lu.%lu.%lu.%lu\n",
+                hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+                                HNAE3_FW_VERSION_BYTE3_SHIFT),
+                hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+                                HNAE3_FW_VERSION_BYTE2_SHIFT),
+                hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+                                HNAE3_FW_VERSION_BYTE1_SHIFT),
+                hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+                                HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+       if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+               return 0;
+
+       /* ask the firmware to enable some features, driver can work without
+        * it.
+        */
+       ret = hclge_comm_firmware_compat_config(ae_dev, is_pf, hw, true);
+       if (ret)
+               dev_warn(&ae_dev->pdev->dev,
+                        "Firmware compatible features not enabled(%d).\n",
+                        ret);
+       return 0;
+
+err_cmd_init:
+       set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+       return ret;
+}
 
 #define HCLGE_COMM_TYPE_CRQ                    0
 #define HCLGE_COMM_TYPE_CSQ                    1
 
+#define HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME                200
+
+/* bar registers for cmdq */
 #define HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG      0x27000
 #define HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG      0x27004
 #define HCLGE_COMM_NIC_CSQ_DEPTH_REG           0x27008
 #define HCLGE_COMM_NIC_CRQ_DEPTH_REG           0x27020
 #define HCLGE_COMM_NIC_CRQ_TAIL_REG            0x27024
 #define HCLGE_COMM_NIC_CRQ_HEAD_REG            0x27028
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_COMM_VECTOR0_CMDQ_SRC_REG                0x27100
+/* Vector0 interrupt CMDQ event status register(RO) */
+#define HCLGE_COMM_VECTOR0_CMDQ_STATE_REG      0x27104
+#define HCLGE_COMM_CMDQ_INTR_EN_REG            0x27108
+#define HCLGE_COMM_CMDQ_INTR_GEN_REG           0x2710C
+#define HCLGE_COMM_CMDQ_INTR_STS_REG           0x27104
 
 /* this bit indicates that the driver is ready for hardware reset */
 #define HCLGE_COMM_NIC_SW_RST_RDY_B            16
 #define HCLGE_COMM_NIC_SW_RST_RDY              BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
 #define HCLGE_COMM_NIC_CMQ_DESC_NUM_S          3
+#define HCLGE_COMM_NIC_CMQ_DESC_NUM            1024
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT             30000
 
 enum hclge_comm_cmd_return_status {
        HCLGE_COMM_CMD_EXEC_SUCCESS     = 0,
 void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
                                     enum hclge_comm_opcode_type opcode,
                                     bool is_read);
+void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, bool is_pf,
+                          struct hclge_comm_hw *hw);
+int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
+int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
+                       u32 *fw_version, bool is_pf,
+                       unsigned long reset_pending);
+
 #endif