#include <linux/etherdevice.h>
 
 #define HCLGE_CMDQ_TX_TIMEOUT          30000
+#define HCLGE_DESC_DATA_LEN            6
 
 struct hclge_dev;
 struct hclge_desc {
        __le16 flag;
        __le16 retval;
        __le16 rsv;
-       __le32 data[6];
+       __le32 data[HCLGE_DESC_DATA_LEN];
 };
 
 struct hclge_cmq_ring {
 #define HCLGE_PF_MAC_NUM_MASK  0x3
 #define HCLGE_PF_STATE_MAIN    BIT(HCLGE_PF_STATE_MAIN_B)
 #define HCLGE_PF_STATE_DONE    BIT(HCLGE_PF_STATE_DONE_B)
+#define HCLGE_VF_RST_STATUS_CMD        4
+
 struct hclge_func_status_cmd {
-       __le32  vf_rst_state[4];
+       __le32  vf_rst_state[HCLGE_VF_RST_STATUS_CMD];
        u8 pf_state;
        u8 mac_id;
        u8 rsv1;
 #define HCLGE_CFG_UMV_TBL_SPACE_S      16
 #define HCLGE_CFG_UMV_TBL_SPACE_M      GENMASK(31, 16)
 
+#define HCLGE_CFG_CMD_CNT              4
+
 struct hclge_cfg_param_cmd {
        __le32 offset;
        __le32 rsv;
-       __le32 param[4];
+       __le32 param[HCLGE_CFG_CMD_CNT];
 };
 
 #define HCLGE_MAC_MODE         0x0
        u8 rsv2[19];
 };
 
+#define HCLGE_VLAN_ID_OFFSET_STEP      160
+#define HCLGE_VLAN_BYTE_SIZE           8
+#define        HCLGE_VLAN_OFFSET_BITMAP \
+       (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE)
+
 struct hclge_vlan_filter_pf_cfg_cmd {
        u8 vlan_offset;
        u8 vlan_cfg;
        u8 rsv[2];
-       u8 vlan_offset_bitmap[20];
+       u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP];
 };
 
+#define HCLGE_MAX_VF_BYTES  16
+
 struct hclge_vlan_filter_vf_cfg_cmd {
        __le16 vlan_id;
        u8  resp_code;
        u8  rsv;
        u8  vlan_cfg;
        u8  rsv1[3];
-       u8  vf_bitmap[16];
+       u8  vf_bitmap[HCLGE_MAX_VF_BYTES];
 };
 
 #define HCLGE_SWITCH_ANTI_SPOOF_B      0U
 #define HCLGE_CFG_NIC_ROCE_SEL_B       4
 #define HCLGE_ACCEPT_TAG2_B            5
 #define HCLGE_ACCEPT_UNTAG2_B          6
+#define HCLGE_VF_NUM_PER_BYTE          8
 
 struct hclge_vport_vtag_tx_cfg_cmd {
        u8 vport_vlan_cfg;
        u8 rsv1[2];
        __le16 def_vlan_tag1;
        __le16 def_vlan_tag2;
-       u8 vf_bitmap[8];
+       u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
        u8 rsv2[8];
 };
 
        u8 vport_vlan_cfg;
        u8 vf_offset;
        u8 rsv1[6];
-       u8 vf_bitmap[8];
+       u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
        u8 rsv2[8];
 };
 
        u8      flags;
        u8      resp_code;
        __le16  vlan_tag;
-       u8      mac_addr[6];
+       u8      mac_addr[ETH_ALEN];
        __le16  index;
        __le16  ethter_type;
        __le16  egress_port;
 
                                    bool is_kill, u16 vlan,
                                    __be16 proto)
 {
-#define HCLGE_MAX_VF_BYTES  16
-
        struct hclge_vport *vport = &hdev->vport[vfid];
        struct hclge_vlan_filter_vf_cfg_cmd *req0;
        struct hclge_vlan_filter_vf_cfg_cmd *req1;
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
 
-       vlan_offset_160 = vlan_id / 160;
-       vlan_offset_byte = (vlan_id % 160) / 8;
-       vlan_offset_byte_val = 1 << (vlan_id % 8);
+       vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
+       vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
+                          HCLGE_VLAN_BYTE_SIZE;
+       vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
 
        req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
        req->vlan_offset = vlan_offset_160;
 
 
 /* Factor used to calculate offset and bitmap of VF num */
 #define HCLGE_VF_NUM_PER_CMD           64
-#define HCLGE_VF_NUM_PER_BYTE          8
 
 enum HLCGE_PORT_TYPE {
        HOST_PORT,
 
        qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
        memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
 
-       return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
+       return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+                                   sizeof(resp_data));
 }
 
 static int hclge_get_rss_key(struct hclge_vport *vport,
 
        u32 reg_val;
 
        if (ring->flag == HCLGEVF_TYPE_CSQ) {
-               reg_val = (u32)ring->desc_dma_addr;
+               reg_val = lower_32_bits(ring->desc_dma_addr);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
-               reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+               reg_val = upper_32_bits(ring->desc_dma_addr);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
 
                reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
        } else {
-               reg_val = (u32)ring->desc_dma_addr;
+               reg_val = lower_32_bits(ring->desc_dma_addr);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
-               reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+               reg_val = upper_32_bits(ring->desc_dma_addr);
                hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
 
                reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
 
 
 static void hclgevf_reset_service_task(struct work_struct *work)
 {
+#define        HCLGEVF_MAX_RESET_ATTEMPTS_CNT  3
+
        struct hclgevf_dev *hdev =
                container_of(work, struct hclgevf_dev, rst_service_task);
        int ret;
                 * We cannot do much for 2. but to check first we can try reset
                 * our PCIe + stack and see if it alleviates the problem.
                 */
-               if (hdev->reset_attempts > 3) {
+               if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
                        /* prepare for full reset of stack + pcie interface */
                        set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);