__le32 l_key;
 };
 
+/* Rdma doorbell data for flags update */
+struct rdma_pwm_flags_data {
+       __le16 icid; /* internal CID */
+       u8 agg_flags; /* aggregative flags */
+       u8 reserved;
+};
+
 /* Rdma doorbell data for SQ and RQ */
 struct rdma_pwm_val16_data {
        __le16 icid;
 #define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT              0
 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK             0x1
 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT            2
-#define RDMA_PWM_VAL32_DATA_RESERVED_MASK              0x1F
-#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT             3
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK    0x1
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT   3
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK           0x1
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT          4
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK              0x7
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT             5
        __le32 value;
 };
 
 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT     5
 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK              0x1
 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT             6
-#define RDMA_SQ_FMR_WQE_RESERVED4_MASK                 0x1FF
-#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT                        7
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK      0x1
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT     7
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK                 0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT                        8
        __le32 reserved5;
 };
 
 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT         5
 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK                  0x1
 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT                 6
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK                     0x1FF
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT                    7
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK          0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT         7
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK                     0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT                                8
        __le32 reserved5;
 };
 
 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT      4
 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK  0x1
 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
-#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                0x3
-#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT       6
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK     0x1
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT    6
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                0x1
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT       7
        u8 wqe_size;
        u8 prev_wqe_size;
        struct regpair remote_va;
 
        tristate "QLogic QED 25/40/100Gb core driver"
        depends on PCI
        select ZLIB_INFLATE
+       select CRC8
        ---help---
          This enables the support for ...
 
 
        return sw_fid;
 }
 
-#define PURE_LB_TC 8
 #define PKT_LB_TC      9
+#define MAX_NUM_VOQS_E4        20
 
 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
 
        p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 
        qed_cxt_qm_iids(p_hwfn, &qm_iids);
-       total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+       total = qed_qm_pf_mem_size(qm_iids.cids,
                                   qm_iids.vf_cids, qm_iids.tids,
                                   p_hwfn->qm_info.num_pqs,
                                   p_hwfn->qm_info.num_vf_pqs);
        }
 }
 
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, bool is_pf_loading)
 {
-       struct qed_qm_pf_rt_init_params params;
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       struct qed_qm_pf_rt_init_params params;
+       struct qed_mcp_link_state *p_link;
        struct qed_qm_iids iids;
 
        memset(&iids, 0, sizeof(iids));
        qed_cxt_qm_iids(p_hwfn, &iids);
 
+       p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
        memset(¶ms, 0, sizeof(params));
        params.port_id = p_hwfn->port_id;
        params.pf_id = p_hwfn->rel_pf_id;
        params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
-       params.is_first_pf = p_hwfn->first_on_engine;
+       params.is_pf_loading = is_pf_loading;
        params.num_pf_cids = iids.cids;
        params.num_vf_cids = iids.vf_cids;
        params.num_tids = iids.tids;
        params.num_vports = qm_info->num_vports;
        params.pf_wfq = qm_info->pf_wfq;
        params.pf_rl = qm_info->pf_rl;
+       params.link_speed = p_link->speed;
        params.pq_params = qm_info->qm_pq_params;
        params.vport_params = qm_info->qm_vport_params;
 
 
 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       qed_qm_init_pf(p_hwfn, p_ptt);
+       qed_qm_init_pf(p_hwfn, p_ptt, true);
        qed_cm_init_pf(p_hwfn);
        qed_dq_init_pf(p_hwfn);
        qed_cdu_init_pf(p_hwfn);
 
  *
  * @param p_hwfn
  * @param p_ptt
+ * @param is_pf_loading
  */
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, bool is_pf_loading);
 
 /**
  * @brief Reconfigures QM pf on the fly
 
                                   struct pf_update_ramrod_data *p_dest)
 {
        struct protocol_dcb_data *p_dcb_data;
-       bool update_flag = false;
-
-       p_dest->pf_id = p_src->pf_id;
+       u8 update_flag;
 
        update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
        p_dest->update_fcoe_dcb_data_mode = update_flag;
 
        MEM_GROUP_DMAE_MEM,
        MEM_GROUP_CM_MEM,
        MEM_GROUP_QM_MEM,
-       MEM_GROUP_TM_MEM,
+       MEM_GROUP_DORQ_MEM,
        MEM_GROUP_BRB_RAM,
        MEM_GROUP_BRB_MEM,
        MEM_GROUP_PRS_MEM,
-       MEM_GROUP_SDM_MEM,
        MEM_GROUP_IOR,
-       MEM_GROUP_RAM,
        MEM_GROUP_BTB_RAM,
-       MEM_GROUP_RDIF_CTX,
-       MEM_GROUP_TDIF_CTX,
-       MEM_GROUP_CFC_MEM,
        MEM_GROUP_CONN_CFC_MEM,
        MEM_GROUP_TASK_CFC_MEM,
        MEM_GROUP_CAU_PI,
        MEM_GROUP_CAU_MEM,
        MEM_GROUP_PXP_ILT,
+       MEM_GROUP_TM_MEM,
+       MEM_GROUP_SDM_MEM,
        MEM_GROUP_PBUF,
+       MEM_GROUP_RAM,
        MEM_GROUP_MULD_MEM,
        MEM_GROUP_BTB_MEM,
+       MEM_GROUP_RDIF_CTX,
+       MEM_GROUP_TDIF_CTX,
+       MEM_GROUP_CFC_MEM,
        MEM_GROUP_IGU_MEM,
        MEM_GROUP_IGU_MSIX,
        MEM_GROUP_CAU_SB,
        "DMAE_MEM",
        "CM_MEM",
        "QM_MEM",
-       "TM_MEM",
+       "DORQ_MEM",
        "BRB_RAM",
        "BRB_MEM",
        "PRS_MEM",
-       "SDM_MEM",
        "IOR",
-       "RAM",
        "BTB_RAM",
-       "RDIF_CTX",
-       "TDIF_CTX",
-       "CFC_MEM",
        "CONN_CFC_MEM",
        "TASK_CFC_MEM",
        "CAU_PI",
        "CAU_MEM",
        "PXP_ILT",
+       "TM_MEM",
+       "SDM_MEM",
        "PBUF",
+       "RAM",
        "MULD_MEM",
        "BTB_MEM",
+       "RDIF_CTX",
+       "TDIF_CTX",
+       "CFC_MEM",
        "IGU_MEM",
        "IGU_MSIX",
        "CAU_SB",
        return ((r[0] >> imm[0]) & imm[1]) != imm[2];
 }
 
-static u32 cond14(const u32 *r, const u32 *imm)
-{
-       return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
-}
-
 static u32 cond6(const u32 *r, const u32 *imm)
 {
        return (r[0] & imm[0]) != imm[1];
        cond11,
        cond12,
        cond13,
-       cond14,
 };
 
 /******************************* Data Types **********************************/
 struct platform_defs {
        const char *name;
        u32 delay_factor;
+       u32 dmae_thresh;
+       u32 log_thresh;
 };
 
 /* Storm constant definitions.
 /* Block constant definitions */
 struct block_defs {
        const char *name;
-       bool has_dbg_bus[MAX_CHIP_IDS];
+       bool exists[MAX_CHIP_IDS];
        bool associated_to_storm;
 
        /* Valid only if associated_to_storm is true */
 /* Reset register definitions */
 struct reset_reg_defs {
        u32 addr;
-       u32 unreset_val;
        bool exists[MAX_CHIP_IDS];
+       u32 unreset_val[MAX_CHIP_IDS];
 };
 
 struct grc_param_defs {
        const char *mem_name;
        const char *type_name;
        u32 addr;
+       u32 entry_width;
        u32 num_entries[MAX_CHIP_IDS];
-       u32 entry_width[MAX_CHIP_IDS];
 };
 
 struct vfc_ram_defs {
        enum dbg_grc_params grc_param;
        u32 addr_reg_addr;
        u32 data_reg_addr;
-       u32 num_of_blocks[MAX_CHIP_IDS];
+       u32 is_256b_reg_addr;
+       u32 is_256b_bit_offset[MAX_CHIP_IDS];
+       u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
 };
 
 struct phy_defs {
 #define NUM_RSS_MEM_TYPES              5
 
 #define NUM_BIG_RAM_TYPES              3
-#define BIG_RAM_BLOCK_SIZE_BYTES       128
-#define BIG_RAM_BLOCK_SIZE_DWORDS \
-       BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
 
 #define NUM_PHY_TBUS_ADDRESSES         2048
 #define PHY_DUMP_SIZE_DWORDS           (NUM_PHY_TBUS_ADDRESSES / 2)
           {0, 0, 0} } },
        { "ah",
          {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
+          {0, 0, 0},
+          {0, 0, 0},
+          {0, 0, 0} } },
+       { "reserved",
+          {{0, 0, 0},
           {0, 0, 0},
           {0, 0, 0},
           {0, 0, 0} } }
 static struct storm_defs s_storm_defs[] = {
        /* Tstorm */
        {'T', BLOCK_TSEM,
-        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
+        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+         DBG_BUS_CLIENT_RBCT}, true,
         TSEM_REG_FAST_MEMORY,
         TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
 
        /* Mstorm */
        {'M', BLOCK_MSEM,
-        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
+        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
+         DBG_BUS_CLIENT_RBCM}, false,
         MSEM_REG_FAST_MEMORY,
         MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
 
        /* Ustorm */
        {'U', BLOCK_USEM,
-        {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
+        {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+         DBG_BUS_CLIENT_RBCU}, false,
         USEM_REG_FAST_MEMORY,
         USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
 
        /* Xstorm */
        {'X', BLOCK_XSEM,
-        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
+        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+         DBG_BUS_CLIENT_RBCX}, false,
         XSEM_REG_FAST_MEMORY,
         XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
 
        /* Ystorm */
        {'Y', BLOCK_YSEM,
-        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
+        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
+         DBG_BUS_CLIENT_RBCY}, false,
         YSEM_REG_FAST_MEMORY,
         YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
 
        /* Pstorm */
        {'P', BLOCK_PSEM,
-        {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
+        {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+         DBG_BUS_CLIENT_RBCS}, true,
         PSEM_REG_FAST_MEMORY,
         PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
 
 static struct block_defs block_grc_defs = {
        "grc",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
        GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
        GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
        GRC_REG_DBG_FORCE_FRAME,
 };
 
 static struct block_defs block_miscs_defs = {
-       "miscs", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "miscs", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_defs = {
-       "misc", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "misc", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbu_defs = {
-       "dbu", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "dbu", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_pglue_b_defs = {
        "pglue_b",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
        PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
        PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
        PGLUE_B_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_cnig_defs = {
        "cnig",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+       {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
+        DBG_BUS_CLIENT_RBCW},
        CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
        CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
        CNIG_REG_DBG_FORCE_FRAME_K2_E5,
 };
 
 static struct block_defs block_cpmu_defs = {
-       "cpmu", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "cpmu", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 8
 };
 
 static struct block_defs block_ncsi_defs = {
        "ncsi",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
        NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
        NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
        NCSI_REG_DBG_FORCE_FRAME,
 };
 
 static struct block_defs block_opte_defs = {
-       "opte", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "opte", {true, true, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 4
 };
 
 static struct block_defs block_bmb_defs = {
        "bmb",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
        BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
        BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
        BMB_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pcie_defs = {
        "pcie",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+        DBG_BUS_CLIENT_RBCH},
        PCIE_REG_DBG_COMMON_SELECT_K2_E5,
        PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
        PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
 };
 
 static struct block_defs block_mcp_defs = {
-       "mcp", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "mcp", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_mcp2_defs = {
        "mcp2",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
        MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
        MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
        MCP2_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pswhst_defs = {
        "pswhst",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
        PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
        PSWHST_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pswhst2_defs = {
        "pswhst2",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
        PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
        PSWHST2_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pswrd_defs = {
        "pswrd",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
        PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
        PSWRD_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pswrd2_defs = {
        "pswrd2",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
        PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
        PSWRD2_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pswwr_defs = {
        "pswwr",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
        PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
        PSWWR_REG_DBG_FORCE_FRAME,
 };
 
 static struct block_defs block_pswwr2_defs = {
-       "pswwr2", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "pswwr2", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISC_PL_HV, 3
 };
 
 static struct block_defs block_pswrq_defs = {
        "pswrq",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
        PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
        PSWRQ_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pswrq2_defs = {
        "pswrq2",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
        PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
        PSWRQ2_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pglcs_defs = {
        "pglcs",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+        DBG_BUS_CLIENT_RBCH},
        PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
        PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
        PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
 
 static struct block_defs block_ptu_defs = {
        "ptu",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
        PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
        PTU_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_dmae_defs = {
        "dmae",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
        DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
        DMAE_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_tcm_defs = {
        "tcm",
-       {true, true}, true, DBG_TSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       {true, true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
        TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
        TCM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_mcm_defs = {
        "mcm",
-       {true, true}, true, DBG_MSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
        MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
        MCM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_ucm_defs = {
        "ucm",
-       {true, true}, true, DBG_USTORM_ID,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       {true, true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
        UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
        UCM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_xcm_defs = {
        "xcm",
-       {true, true}, true, DBG_XSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       {true, true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
        XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
        XCM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_ycm_defs = {
        "ycm",
-       {true, true}, true, DBG_YSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       {true, true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
        YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
        YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
        YCM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pcm_defs = {
        "pcm",
-       {true, true}, true, DBG_PSTORM_ID,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       {true, true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
        PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
        PCM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_qm_defs = {
        "qm",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
        QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
        QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
        QM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_tm_defs = {
        "tm",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
        TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
        TM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_dorq_defs = {
        "dorq",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
        DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
        DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
        DORQ_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_brb_defs = {
        "brb",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
        BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
        BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
        BRB_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_src_defs = {
        "src",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
        SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
        SRC_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_prs_defs = {
        "prs",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
        PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
        PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
        PRS_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_tsdm_defs = {
        "tsdm",
-       {true, true}, true, DBG_TSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       {true, true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
        TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
        TSDM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_msdm_defs = {
        "msdm",
-       {true, true}, true, DBG_MSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
        MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
        MSDM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_usdm_defs = {
        "usdm",
-       {true, true}, true, DBG_USTORM_ID,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       {true, true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
        USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
        USDM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_xsdm_defs = {
        "xsdm",
-       {true, true}, true, DBG_XSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       {true, true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
        XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
        XSDM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_ysdm_defs = {
        "ysdm",
-       {true, true}, true, DBG_YSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       {true, true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
        YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
        YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
        YSDM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_psdm_defs = {
        "psdm",
-       {true, true}, true, DBG_PSTORM_ID,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       {true, true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
        PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
        PSDM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_tsem_defs = {
        "tsem",
-       {true, true}, true, DBG_TSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       {true, true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
        TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
        TSEM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_msem_defs = {
        "msem",
-       {true, true}, true, DBG_MSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
        MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
        MSEM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_usem_defs = {
        "usem",
-       {true, true}, true, DBG_USTORM_ID,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       {true, true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
        USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
        USEM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_xsem_defs = {
        "xsem",
-       {true, true}, true, DBG_XSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       {true, true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
        XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
        XSEM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_ysem_defs = {
        "ysem",
-       {true, true}, true, DBG_YSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       {true, true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
        YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
        YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
        YSEM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_psem_defs = {
        "psem",
-       {true, true}, true, DBG_PSTORM_ID,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       {true, true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
        PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
        PSEM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_rss_defs = {
        "rss",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
        RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
        RSS_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_tmld_defs = {
        "tmld",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
        TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
        TMLD_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_muld_defs = {
        "muld",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
        MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
        MULD_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_yuld_defs = {
        "yuld",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       {true, true, false}, false, 0,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+        MAX_DBG_BUS_CLIENTS},
        YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
        YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
        YULD_REG_DBG_FORCE_FRAME_BB_K2,
 
 static struct block_defs block_xyld_defs = {
        "xyld",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
        XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
        XYLD_REG_DBG_FORCE_FRAME,
 };
 
 static struct block_defs block_ptld_defs = {
-       "ptld", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
+       "ptld",
+       {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
+       PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
+       PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
+       PTLD_REG_DBG_FORCE_FRAME_E5,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+       28
 };
 
 static struct block_defs block_ypld_defs = {
-       "ypld", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
+       "ypld",
+       {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
+       YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
+       YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
+       YPLD_REG_DBG_FORCE_FRAME_E5,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+       27
 };
 
 static struct block_defs block_prm_defs = {
        "prm",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
        PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
        PRM_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pbf_pb1_defs = {
        "pbf_pb1",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
        PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
        PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
        PBF_PB1_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pbf_pb2_defs = {
        "pbf_pb2",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
        PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
        PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
        PBF_PB2_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_rpb_defs = {
        "rpb",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
        RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
        RPB_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_btb_defs = {
        "btb",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
        BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
        BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
        BTB_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_pbf_defs = {
        "pbf",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
        PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
        PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
        PBF_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_rdif_defs = {
        "rdif",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
        RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
        RDIF_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_tdif_defs = {
        "tdif",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
        TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
        TDIF_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_cdu_defs = {
        "cdu",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
        CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
        CDU_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_ccfc_defs = {
        "ccfc",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
        CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
        CCFC_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_tcfc_defs = {
        "tcfc",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
        TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
        TCFC_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_igu_defs = {
        "igu",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
        IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
        IGU_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_cau_defs = {
        "cau",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
        CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
        CAU_REG_DBG_FORCE_FRAME,
 };
 
 static struct block_defs block_rgfs_defs = {
-       "rgfs", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "rgfs", {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
 };
 
 static struct block_defs block_rgsrc_defs = {
-       "rgsrc", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
+       "rgsrc",
+       {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
+       RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
+       RGSRC_REG_DBG_FORCE_FRAME_E5,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+       30
 };
 
 static struct block_defs block_tgfs_defs = {
-       "tgfs", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "tgfs", {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
 };
 
 static struct block_defs block_tgsrc_defs = {
-       "tgsrc", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
+       "tgsrc",
+       {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
+       TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
+       TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
+       TGSRC_REG_DBG_FORCE_FRAME_E5,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+       31
 };
 
 static struct block_defs block_umac_defs = {
        "umac",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+       {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
+        DBG_BUS_CLIENT_RBCZ},
        UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
        UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
        UMAC_REG_DBG_FORCE_FRAME_K2_E5,
 };
 
 static struct block_defs block_xmac_defs = {
-       "xmac", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "xmac", {true, false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbg_defs = {
-       "dbg", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "dbg", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
 };
 
 static struct block_defs block_nig_defs = {
        "nig",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
        NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
        NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
        NIG_REG_DBG_FORCE_FRAME,
 
 static struct block_defs block_wol_defs = {
        "wol",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
        WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
        WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
        WOL_REG_DBG_FORCE_FRAME_K2_E5,
 
 static struct block_defs block_bmbn_defs = {
        "bmbn",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
+        DBG_BUS_CLIENT_RBCB},
        BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
        BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
        BMBN_REG_DBG_FORCE_FRAME_K2_E5,
 };
 
 static struct block_defs block_ipc_defs = {
-       "ipc", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "ipc", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_UA, 8
 };
 
 static struct block_defs block_nwm_defs = {
        "nwm",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
        NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
        NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
        NWM_REG_DBG_FORCE_FRAME_K2_E5,
 
 static struct block_defs block_nws_defs = {
        "nws",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
        NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
        NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
        NWS_REG_DBG_FORCE_FRAME_K2_E5,
 
 static struct block_defs block_ms_defs = {
        "ms",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
        MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
        MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
        MS_REG_DBG_FORCE_FRAME_K2_E5,
 
 static struct block_defs block_phy_pcie_defs = {
        "phy_pcie",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+        DBG_BUS_CLIENT_RBCH},
        PCIE_REG_DBG_COMMON_SELECT_K2_E5,
        PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
        PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
 };
 
 static struct block_defs block_led_defs = {
-       "led", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "led", {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 14
 };
 
 static struct block_defs block_avs_wrap_defs = {
-       "avs_wrap", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "avs_wrap", {false, true, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_UA, 11
 };
 
+static struct block_defs block_pxpreqbus_defs = {
+       "pxpreqbus", {false, false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       false, false, MAX_DBG_RESET_REGS, 0
+};
+
 static struct block_defs block_misc_aeu_defs = {
-       "misc_aeu", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "misc_aeu", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_bar0_map_defs = {
-       "bar0_map", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "bar0_map", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
        &block_phy_pcie_defs,
        &block_led_defs,
        &block_avs_wrap_defs,
+       &block_pxpreqbus_defs,
        &block_misc_aeu_defs,
        &block_bar0_map_defs,
 };
 
 static struct platform_defs s_platform_defs[] = {
-       {"asic", 1},
-       {"reserved", 0},
-       {"reserved2", 0},
-       {"reserved3", 0}
+       {"asic", 1, 256, 32768},
+       {"reserved", 0, 0, 0},
+       {"reserved2", 0, 0, 0},
+       {"reserved3", 0, 0, 0}
 };
 
 static struct grc_param_defs s_grc_param_defs[] = {
        /* DBG_GRC_PARAM_DUMP_TSTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_MSTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_USTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_XSTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_YSTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_PSTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_REGS */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_RAM */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_PBUF */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_IOR */
-       {{0, 0}, 0, 1, false, 0, 1},
+       {{0, 0, 0}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_VFC */
-       {{0, 0}, 0, 1, false, 0, 1},
+       {{0, 0, 0}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_CM_CTX */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_ILT */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_RSS */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_CAU */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_QM */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_MCP */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_RESERVED */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_CFC */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_IGU */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_BRB */
-       {{0, 0}, 0, 1, false, 0, 1},
+       {{0, 0, 0}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_BTB */
-       {{0, 0}, 0, 1, false, 0, 1},
+       {{0, 0, 0}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_BMB */
-       {{0, 0}, 0, 1, false, 0, 1},
+       {{0, 0, 0}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_NIG */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_MULD */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_PRS */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_DMAE */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_TM */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_SDM */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_DIF */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_STATIC */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_UNSTALL */
-       {{0, 0}, 0, 1, false, 0, 0},
+       {{0, 0, 0}, 0, 1, false, 0, 0},
 
        /* DBG_GRC_PARAM_NUM_LCIDS */
-       {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+       {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
         MAX_LCIDS},
 
        /* DBG_GRC_PARAM_NUM_LTIDS */
-       {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+       {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
         MAX_LTIDS},
 
        /* DBG_GRC_PARAM_EXCLUDE_ALL */
-       {{0, 0}, 0, 1, true, 0, 0},
+       {{0, 0, 0}, 0, 1, true, 0, 0},
 
        /* DBG_GRC_PARAM_CRASH */
-       {{0, 0}, 0, 1, true, 0, 0},
+       {{0, 0, 0}, 0, 1, true, 0, 0},
 
        /* DBG_GRC_PARAM_PARITY_SAFE */
-       {{0, 0}, 0, 1, false, 1, 0},
+       {{0, 0, 0}, 0, 1, false, 1, 0},
 
        /* DBG_GRC_PARAM_DUMP_CM */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_PHY */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_NO_MCP */
-       {{0, 0}, 0, 1, false, 0, 0},
+       {{0, 0, 0}, 0, 1, false, 0, 0},
 
        /* DBG_GRC_PARAM_NO_FW_VER */
-       {{0, 0}, 0, 1, false, 0, 0}
+       {{0, 0, 0}, 0, 1, false, 0, 0}
 };
 
 static struct rss_mem_defs s_rss_mem_defs[] = {
-       { "rss_mem_cid", "rss_cid", 0,
-         {256, 320},
-         {32, 32} },
+       { "rss_mem_cid", "rss_cid", 0, 32,
+         {256, 320, 512} },
 
-       { "rss_mem_key_msb", "rss_key", 1024,
-         {128, 208},
-         {256, 256} },
+       { "rss_mem_key_msb", "rss_key", 1024, 256,
+         {128, 208, 257} },
 
-       { "rss_mem_key_lsb", "rss_key", 2048,
-         {128, 208},
-         {64, 64} },
+       { "rss_mem_key_lsb", "rss_key", 2048, 64,
+         {128, 208, 257} },
 
-       { "rss_mem_info", "rss_info", 3072,
-         {128, 208},
-         {16, 16} },
+       { "rss_mem_info", "rss_info", 3072, 16,
+         {128, 208, 256} },
 
-       { "rss_mem_ind", "rss_ind", 4096,
-         {16384, 26624},
-         {16, 16} }
+       { "rss_mem_ind", "rss_ind", 4096, 16,
+         {16384, 26624, 32768} }
 };
 
 static struct vfc_ram_defs s_vfc_ram_defs[] = {
 static struct big_ram_defs s_big_ram_defs[] = {
        { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
          BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
-         {4800, 5632} },
+         MISC_REG_BLOCK_256B_EN, {0, 0, 0},
+         {153600, 180224, 282624} },
 
        { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
          BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
-         {2880, 3680} },
+         MISC_REG_BLOCK_256B_EN, {0, 1, 1},
+         {92160, 117760, 168960} },
 
        { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
          BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
-         {1152, 1152} }
+         MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
+         {36864, 36864, 36864} }
 };
 
 static struct reset_reg_defs s_reset_regs_defs[] = {
        /* DBG_RESET_REG_MISCS_PL_UA */
-       { MISCS_REG_RESET_PL_UA, 0x0,
-         {true, true} },
+       { MISCS_REG_RESET_PL_UA,
+         {true, true, true}, {0x0, 0x0, 0x0} },
 
        /* DBG_RESET_REG_MISCS_PL_HV */
-       { MISCS_REG_RESET_PL_HV, 0x0,
-         {true, true} },
+       { MISCS_REG_RESET_PL_HV,
+         {true, true, true}, {0x0, 0x400, 0x600} },
 
        /* DBG_RESET_REG_MISCS_PL_HV_2 */
-       { MISCS_REG_RESET_PL_HV_2_K2_E5, 0x0,
-         {false, true} },
+       { MISCS_REG_RESET_PL_HV_2_K2_E5,
+         {false, true, true}, {0x0, 0x0, 0x0} },
 
        /* DBG_RESET_REG_MISC_PL_UA */
-       { MISC_REG_RESET_PL_UA, 0x0,
-         {true, true} },
+       { MISC_REG_RESET_PL_UA,
+         {true, true, true}, {0x0, 0x0, 0x0} },
 
        /* DBG_RESET_REG_MISC_PL_HV */
-       { MISC_REG_RESET_PL_HV, 0x0,
-         {true, true} },
+       { MISC_REG_RESET_PL_HV,
+         {true, true, true}, {0x0, 0x0, 0x0} },
 
        /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
-       { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
-         {true, true} },
+       { MISC_REG_RESET_PL_PDA_VMAIN_1,
+         {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
 
        /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
-       { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
-         {true, true} },
+       { MISC_REG_RESET_PL_PDA_VMAIN_2,
+         {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
 
        /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
-       { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
-         {true, true} },
+       { MISC_REG_RESET_PL_PDA_VAUX,
+         {true, true, true}, {0x2, 0x2, 0x2} },
 };
 
 static struct phy_defs s_phy_defs[] = {
        /* Initializes the GRC parameters */
        qed_dbg_grc_init_params(p_hwfn);
 
-       dev_data->initialized = true;
+       dev_data->use_dmae = true;
+       dev_data->num_regs_read = 0;
+       dev_data->initialized = 1;
 
        return DBG_STATUS_OK;
 }
 /* Writes the "last" section (including CRC) to the specified buffer at the
  * given offset. Returns the dumped size in dwords.
  */
-static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
-                                u32 *dump_buf, u32 offset, bool dump)
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
 {
        u32 start_offset = offset;
 
        case MEM_GROUP_CFC_MEM:
        case MEM_GROUP_CONN_CFC_MEM:
        case MEM_GROUP_TASK_CFC_MEM:
-               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
+                      qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
        case MEM_GROUP_IGU_MEM:
        case MEM_GROUP_IGU_MSIX:
                return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
        for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
                struct block_defs *block = s_block_defs[block_id];
 
-               if (block->has_reset_bit && block->unreset)
+               if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
+                   block->unreset)
                        reg_val[block->reset_reg] |=
                            BIT(block->reset_bit_offset);
        }
                if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
                        continue;
 
-               reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+               reg_val[i] |=
+                       s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
 
                if (reg_val[i])
                        qed_wr(p_hwfn,
        return offset;
 }
 
+/* Reads the specified registers into the specified buffer.
+ * The addr and len arguments are specified in dwords.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
+{
+       u32 i;
+
+       for (i = 0; i < len; i++)
+               buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
+}
+
 /* Dumps the GRC registers in the specified address range.
  * Returns the dumped size in dwords.
  * The addr and len arguments are specified in dwords.
                                   u32 *dump_buf,
                                   bool dump, u32 addr, u32 len, bool wide_bus)
 {
-       u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 
        if (!dump)
                return len;
 
-       for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
-               *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+       /* Print log if needed */
+       dev_data->num_regs_read += len;
+       if (dev_data->num_regs_read >=
+           s_platform_defs[dev_data->platform_id].log_thresh) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_DEBUG,
+                          "Dumping %d registers...\n",
+                          dev_data->num_regs_read);
+               dev_data->num_regs_read = 0;
+       }
 
-       return offset;
+       /* Try reading using DMAE */
+       if (dev_data->use_dmae &&
+           (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
+            wide_bus)) {
+               if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
+                                      (u64)(uintptr_t)(dump_buf), len, 0))
+                       return len;
+               dev_data->use_dmae = 0;
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_DEBUG,
+                          "Failed reading from chip using DMAE, using GRC instead\n");
+       }
+
+       /* Read registers */
+       qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+
+       return len;
 }
 
 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
        chip = &s_chip_defs[dev_data->chip_id];
        chip_platform = &chip->per_platform[dev_data->platform_id];
 
-       if (dump)
-               DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
-
        while (input_offset <
               s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
                const struct dbg_dump_split_hdr *split_hdr;
 
                offset += qed_dump_str_param(dump_buf + offset,
                                             dump, "name", buf);
-               if (dump)
-                       DP_VERBOSE(p_hwfn,
-                                  QED_MSG_DEBUG,
-                                  "Dumping %d registers from %s...\n",
-                                  len, buf);
        } else {
                /* Dump address */
                u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
 
                offset += qed_dump_num_param(dump_buf + offset,
                                             dump, "addr", addr_in_bytes);
-               if (dump && len > 64)
-                       DP_VERBOSE(p_hwfn,
-                                  QED_MSG_DEBUG,
-                                  "Dumping %d registers from address 0x%x...\n",
-                                  len, addr_in_bytes);
        }
 
        /* Dump len */
        u8 rss_mem_id;
 
        for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
-               u32 rss_addr, num_entries, entry_width, total_dwords, i;
+               u32 rss_addr, num_entries, total_dwords;
                struct rss_mem_defs *rss_defs;
-               u32 addr, size;
+               u32 addr, num_dwords_to_read;
                bool packed;
 
                rss_defs = &s_rss_mem_defs[rss_mem_id];
                rss_addr = rss_defs->addr;
                num_entries = rss_defs->num_entries[dev_data->chip_id];
-               entry_width = rss_defs->entry_width[dev_data->chip_id];
-               total_dwords = (num_entries * entry_width) / 32;
-               packed = (entry_width == 16);
+               total_dwords = (num_entries * rss_defs->entry_width) / 32;
+               packed = (rss_defs->entry_width == 16);
 
                offset += qed_grc_dump_mem_hdr(p_hwfn,
                                               dump_buf + offset,
                                               rss_defs->mem_name,
                                               0,
                                               total_dwords,
-                                              entry_width,
+                                              rss_defs->entry_width,
                                               packed,
                                               rss_defs->type_name, false, 0);
 
                }
 
                addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
-               size = RSS_REG_RSS_RAM_DATA_SIZE;
-               for (i = 0; i < total_dwords; i += size, rss_addr++) {
+               while (total_dwords) {
+                       num_dwords_to_read = min_t(u32,
+                                                  RSS_REG_RSS_RAM_DATA_SIZE,
+                                                  total_dwords);
                        qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
                        offset += qed_grc_dump_addr_range(p_hwfn,
                                                          p_ptt,
                                                          dump_buf + offset,
                                                          dump,
                                                          addr,
-                                                         size,
+                                                         num_dwords_to_read,
                                                          false);
+                       total_dwords -= num_dwords_to_read;
+                       rss_addr++;
                }
        }
 
                                u32 *dump_buf, bool dump, u8 big_ram_id)
 {
        struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-       u32 total_blocks, ram_size, offset = 0, i;
+       u32 block_size, ram_size, offset = 0, reg_val, i;
        char mem_name[12] = "???_BIG_RAM";
        char type_name[8] = "???_RAM";
        struct big_ram_defs *big_ram;
 
        big_ram = &s_big_ram_defs[big_ram_id];
-       total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
-       ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+       ram_size = big_ram->ram_size[dev_data->chip_id];
+
+       reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
+       block_size = reg_val &
+                    BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
+                                                                        : 128;
 
        strncpy(type_name, big_ram->instance_name,
                strlen(big_ram->instance_name));
                                       mem_name,
                                       0,
                                       ram_size,
-                                      BIG_RAM_BLOCK_SIZE_BYTES * 8,
+                                      block_size * 8,
                                       false, type_name, false, 0);
 
        /* Read and dump Big RAM data */
                return offset + ram_size;
 
        /* Dump Big RAM */
-       for (i = 0; i < total_blocks / 2; i++) {
+       for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
+            i++) {
                u32 addr, len;
 
                qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
                addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
-               len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+               len = BRB_REG_BIG_RAM_DATA_SIZE;
                offset += qed_grc_dump_addr_range(p_hwfn,
                                                  p_ptt,
                                                  dump_buf + offset,
                               phy_defs->tbus_data_lo_addr;
                data_hi_addr = phy_defs->base_addr +
                               phy_defs->tbus_data_hi_addr;
-               bytes_buf = (u8 *)(dump_buf + offset);
 
                if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
                             phy_defs->phy_name) < 0)
                        continue;
                }
 
+               bytes_buf = (u8 *)(dump_buf + offset);
                for (tbus_hi_offset = 0;
                     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
                     tbus_hi_offset++) {
        struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
        u32 block_id, line_id, offset = 0;
 
-       /* Skip static debug if a debug bus recording is in progress */
-       if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+       /* Don't dump static debug if a debug bus recording is in progress */
+       if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
                return 0;
 
        if (dump) {
-               DP_VERBOSE(p_hwfn,
-                          QED_MSG_DEBUG, "Dumping static debug data...\n");
-
                /* Disable all blocks debug output */
                for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
                        struct block_defs *block = s_block_defs[block_id];
 
-                       if (block->has_dbg_bus[dev_data->chip_id])
+                       if (block->dbg_client_id[dev_data->chip_id] !=
+                           MAX_DBG_BUS_CLIENTS)
                                qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
                                       0);
                }
                u32 block_dwords, addr, len;
                u8 dbg_client_id;
 
-               if (!block->has_dbg_bus[dev_data->chip_id])
+               if (block->dbg_client_id[dev_data->chip_id] ==
+                   MAX_DBG_BUS_CLIENTS)
                        continue;
 
-               block_desc =
-                       get_dbg_bus_block_desc(p_hwfn,
-                                              (enum block_id)block_id);
+               block_desc = get_dbg_bus_block_desc(p_hwfn,
+                                                   (enum block_id)block_id);
                block_dwords = NUM_DBG_LINES(block_desc) *
                               STATIC_DEBUG_LINE_DWORDS;
 
                                                    dump_buf + offset, dump);
 
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        if (dump) {
                /* Unstall storms */
                if (!check_rule && dump)
                        continue;
 
+               if (!dump) {
+                       u32 entry_dump_size =
+                               qed_idle_chk_dump_failure(p_hwfn,
+                                                         p_ptt,
+                                                         dump_buf + offset,
+                                                         false,
+                                                         rule->rule_id,
+                                                         rule,
+                                                         0,
+                                                         NULL);
+
+                       offset += num_reg_entries * entry_dump_size;
+                       (*num_failing_rules) += num_reg_entries;
+                       continue;
+               }
+
                /* Go over all register entries (number of entries is the same
                 * for all condition registers).
                 */
                for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
                        u32 next_reg_offset = 0;
 
-                       if (!dump) {
-                               offset += qed_idle_chk_dump_failure(p_hwfn,
-                                                       p_ptt,
-                                                       dump_buf + offset,
-                                                       false,
-                                                       rule->rule_id,
-                                                       rule,
-                                                       entry_id,
-                                                       NULL);
-                               (*num_failing_rules)++;
-                               break;
-                       }
-
                        /* Read current entry of all condition registers */
                        for (reg_id = 0; reg_id < rule->num_cond_regs;
                             reg_id++) {
                                const struct dbg_idle_chk_cond_reg *reg =
-                                   &cond_regs[reg_id];
+                                       &cond_regs[reg_id];
                                u32 padded_entry_size, addr;
                                bool wide_bus;
 
                                if (reg->num_entries > 1 ||
                                    reg->start_entry > 0) {
                                        padded_entry_size =
-                                           reg->entry_size > 1 ?
-                                           roundup_pow_of_two(reg->entry_size)
-                                           : 1;
+                                          reg->entry_size > 1 ?
+                                          roundup_pow_of_two(reg->entry_size) :
+                                          1;
                                        addr += (reg->start_entry + entry_id) *
                                                padded_entry_size;
                                }
                                                        entry_id,
                                                        cond_reg_values);
                                (*num_failing_rules)++;
-                               break;
                        }
                }
        }
                                   dump, "num_rules", num_failing_rules);
 
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        return offset;
 }
                                       (nvram_offset_bytes +
                                        read_offset) |
                                       (bytes_to_copy <<
-                                       DRV_MB_PARAM_NVM_LEN_SHIFT),
+                                       DRV_MB_PARAM_NVM_LEN_OFFSET),
                                       &ret_mcp_resp, &ret_mcp_param,
                                       &ret_read_size,
                                       (u32 *)((u8 *)ret_buf + read_offset)))
                offset += trace_meta_size_dwords;
 
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        *num_dumped_dwords = offset;
 
                                         u32 *dump_buf,
                                         bool dump, u32 *num_dumped_dwords)
 {
-       u32 dwords_read, size_param_offset, offset = 0;
+       u32 dwords_read, size_param_offset, offset = 0, addr, len;
        bool fifo_has_data;
 
        *num_dumped_dwords = 0;
         * buffer size since more entries could be added to the buffer as we are
         * emptying it.
         */
+       addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
+       len = REG_FIFO_ELEMENT_DWORDS;
        for (dwords_read = 0;
             fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
-            dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
-            REG_FIFO_ELEMENT_DWORDS) {
-               if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
-                                     (u64)(uintptr_t)(&dump_buf[offset]),
-                                     REG_FIFO_ELEMENT_DWORDS, 0))
-                       return DBG_STATUS_DMAE_FAILED;
+            dwords_read += REG_FIFO_ELEMENT_DWORDS) {
+               offset += qed_grc_dump_addr_range(p_hwfn,
+                                                 p_ptt,
+                                                 dump_buf + offset,
+                                                 true,
+                                                 addr,
+                                                 len,
+                                                 true);
                fifo_has_data = qed_rd(p_hwfn, p_ptt,
                                       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
        }
                           dwords_read);
 out:
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        *num_dumped_dwords = offset;
 
                                         u32 *dump_buf,
                                         bool dump, u32 *num_dumped_dwords)
 {
-       u32 dwords_read, size_param_offset, offset = 0;
+       u32 dwords_read, size_param_offset, offset = 0, addr, len;
        bool fifo_has_data;
 
        *num_dumped_dwords = 0;
         * buffer size since more entries could be added to the buffer as we are
         * emptying it.
         */
+       addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
+       len = IGU_FIFO_ELEMENT_DWORDS;
        for (dwords_read = 0;
             fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
-            dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
-            IGU_FIFO_ELEMENT_DWORDS) {
-               if (qed_dmae_grc2host(p_hwfn, p_ptt,
-                                     IGU_REG_ERROR_HANDLING_MEMORY,
-                                     (u64)(uintptr_t)(&dump_buf[offset]),
-                                     IGU_FIFO_ELEMENT_DWORDS, 0))
-                       return DBG_STATUS_DMAE_FAILED;
-               fifo_has_data = qed_rd(p_hwfn, p_ptt,
+            dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
+               offset += qed_grc_dump_addr_range(p_hwfn,
+                                                 p_ptt,
+                                                 dump_buf + offset,
+                                                 true,
+                                                 addr,
+                                                 len,
+                                                 true);
+               fifo_has_data = qed_rd(p_hwfn, p_ptt,
                                       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
        }
 
                           dwords_read);
 out:
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        *num_dumped_dwords = offset;
 
                                                    bool dump,
                                                    u32 *num_dumped_dwords)
 {
-       u32 size_param_offset, override_window_dwords, offset = 0;
+       u32 size_param_offset, override_window_dwords, offset = 0, addr;
 
        *num_dumped_dwords = 0;
 
 
        /* Add override window info to buffer */
        override_window_dwords =
-               qed_rd(p_hwfn, p_ptt,
-                      GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
-                      PROTECTION_OVERRIDE_ELEMENT_DWORDS;
-       if (qed_dmae_grc2host(p_hwfn, p_ptt,
-                             GRC_REG_PROTECTION_OVERRIDE_WINDOW,
-                             (u64)(uintptr_t)(dump_buf + offset),
-                             override_window_dwords, 0))
-               return DBG_STATUS_DMAE_FAILED;
-       offset += override_window_dwords;
+               qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+               PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+       addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+       offset += qed_grc_dump_addr_range(p_hwfn,
+                                         p_ptt,
+                                         dump_buf + offset,
+                                         true,
+                                         addr,
+                                         override_window_dwords,
+                                         true);
        qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
                           override_window_dwords);
 out:
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        *num_dumped_dwords = offset;
 
                next_list_idx_addr = fw_asserts_section_addr +
                        DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
                next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
-               last_list_idx = (next_list_idx > 0
-                                ? next_list_idx
-                                : asserts->list_num_elements) - 1;
+               last_list_idx = (next_list_idx > 0 ?
+                                next_list_idx :
+                                asserts->list_num_elements) - 1;
                addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
                       asserts->list_dword_offset +
                       last_list_idx * asserts->list_element_dword_size;
        }
 
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        return offset;
 }
        {"phy_pcie", BLOCK_PHY_PCIE},
        {"led", BLOCK_LED},
        {"avs_wrap", BLOCK_AVS_WRAP},
+       {"pxpreqbus", BLOCK_PXPREQBUS},
        {"misc_aeu", BLOCK_MISC_AEU},
        {"bar0_map", BLOCK_BAR0_MAP}
 };
        /* DBG_STATUS_MCP_COULD_NOT_RESUME */
        "Failed to resume MCP after halt",
 
-       /* DBG_STATUS_DMAE_FAILED */
-       "DMAE transaction failed",
+       /* DBG_STATUS_RESERVED2 */
+       "Reserved debug status - shouldn't be returned",
 
        /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
        "Failed to empty SEMI sync FIFO",
        if (*(char_buf + offset++)) {
                /* String param */
                *param_str_val = char_buf + offset;
+               *param_num_val = 0;
                offset += strlen(*param_str_val) + 1;
                if (offset & 0x3)
                        offset += (4 - (offset & 0x3));
 /* Parses the idle check rules and returns the number of characters printed.
  * In case of parsing error, returns 0.
  */
-static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
-                                        u32 *dump_buf,
+static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
                                         u32 *dump_buf_end,
                                         u32 num_rules,
                                         bool print_fw_idle_chk,
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
-                                              u32 *dump_buf,
+static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
                                               u32 num_dumped_dwords,
                                               char *results_buf,
                                               u32 *parsed_results_bytes,
                                            results_offset),
                            "FW_IDLE_CHECK:\n");
                rules_print_size =
-                       qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
-                                                     dump_buf_end, num_rules,
+                       qed_parse_idle_chk_dump_rules(dump_buf,
+                                                     dump_buf_end,
+                                                     num_rules,
                                                      true,
                                                      results_buf ?
                                                      results_buf +
-                                                     results_offset : NULL,
-                                                     num_errors, num_warnings);
+                                                     results_offset :
+                                                     NULL,
+                                                     num_errors,
+                                                     num_warnings);
                results_offset += rules_print_size;
                if (!rules_print_size)
                        return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
                                            results_offset),
                            "\nLSI_IDLE_CHECK:\n");
                rules_print_size =
-                       qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
-                                                     dump_buf_end, num_rules,
+                       qed_parse_idle_chk_dump_rules(dump_buf,
+                                                     dump_buf_end,
+                                                     num_rules,
                                                      false,
                                                      results_buf ?
                                                      results_buf +
-                                                     results_offset : NULL,
-                                                     num_errors, num_warnings);
+                                                     results_offset :
+                                                     NULL,
+                                                     num_errors,
+                                                     num_warnings);
                results_offset += rules_print_size;
                if (!rules_print_size)
                        return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
  */
 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
                                                u32 *dump_buf,
-                                               u32 num_dumped_dwords,
                                                char *results_buf,
                                                u32 *parsed_results_bytes)
 {
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
-                                              u32 *dump_buf,
-                                              u32 num_dumped_dwords,
+static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
                                               char *results_buf,
                                               u32 *parsed_results_bytes)
 {
 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
                                                  *element, char
                                                  *results_buf,
-                                                 u32 *results_offset,
-                                                 u32 *parsed_results_bytes)
+                                                 u32 *results_offset)
 {
        const struct igu_fifo_addr_data *found_addr = NULL;
        u8 source, err_type, i, is_cleanup;
                                "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
                                prod_cons,
                                update_flag ? "update" : "nop",
-                               en_dis_int_for_sb
-                               ? (en_dis_int_for_sb == 1 ? "disable" : "nop")
-                               : "enable",
+                               en_dis_int_for_sb ?
+                               (en_dis_int_for_sb == 1 ? "disable" : "nop") :
+                               "enable",
                                segment ? "attn" : "regular",
                                timer_mask);
                }
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
-                                              u32 *dump_buf,
-                                              u32 num_dumped_dwords,
+static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
                                               char *results_buf,
                                               u32 *parsed_results_bytes)
 {
        for (i = 0; i < num_elements; i++) {
                status = qed_parse_igu_fifo_element(&elements[i],
                                                    results_buf,
-                                                   &results_offset,
-                                                   parsed_results_bytes);
+                                                   &results_offset);
                if (status != DBG_STATUS_OK)
                        return status;
        }
 }
 
 static enum dbg_status
-qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
-                                  u32 *dump_buf,
-                                  u32 num_dumped_dwords,
+qed_parse_protection_override_dump(u32 *dump_buf,
                                   char *results_buf,
                                   u32 *parsed_results_bytes)
 {
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
-                                                u32 *dump_buf,
-                                                u32 num_dumped_dwords,
+static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
                                                 char *results_buf,
                                                 u32 *parsed_results_bytes)
 {
 {
        u32 num_errors, num_warnings;
 
-       return qed_parse_idle_chk_dump(p_hwfn,
-                                      dump_buf,
+       return qed_parse_idle_chk_dump(dump_buf,
                                       num_dumped_dwords,
                                       NULL,
                                       results_buf_size,
                                           u32 *dump_buf,
                                           u32 num_dumped_dwords,
                                           char *results_buf,
-                                          u32 *num_errors, u32 *num_warnings)
+                                          u32 *num_errors,
+                                          u32 *num_warnings)
 {
        u32 parsed_buf_size;
 
-       return qed_parse_idle_chk_dump(p_hwfn,
-                                      dump_buf,
+       return qed_parse_idle_chk_dump(dump_buf,
                                       num_dumped_dwords,
                                       results_buf,
                                       &parsed_buf_size,
                                                   u32 *results_buf_size)
 {
        return qed_parse_mcp_trace_dump(p_hwfn,
-                                       dump_buf,
-                                       num_dumped_dwords,
-                                       NULL, results_buf_size);
+                                       dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
 
        return qed_parse_mcp_trace_dump(p_hwfn,
                                        dump_buf,
-                                       num_dumped_dwords,
                                        results_buf, &parsed_buf_size);
 }
 
                                                  u32 num_dumped_dwords,
                                                  u32 *results_buf_size)
 {
-       return qed_parse_reg_fifo_dump(p_hwfn,
-                                      dump_buf,
-                                      num_dumped_dwords,
-                                      NULL, results_buf_size);
+       return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
 {
        u32 parsed_buf_size;
 
-       return qed_parse_reg_fifo_dump(p_hwfn,
-                                      dump_buf,
-                                      num_dumped_dwords,
-                                      results_buf, &parsed_buf_size);
+       return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
 }
 
 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
                                                  u32 num_dumped_dwords,
                                                  u32 *results_buf_size)
 {
-       return qed_parse_igu_fifo_dump(p_hwfn,
-                                      dump_buf,
-                                      num_dumped_dwords,
-                                      NULL, results_buf_size);
+       return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
 {
        u32 parsed_buf_size;
 
-       return qed_parse_igu_fifo_dump(p_hwfn,
-                                      dump_buf,
-                                      num_dumped_dwords,
-                                      results_buf, &parsed_buf_size);
+       return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
 }
 
 enum dbg_status
                                             u32 num_dumped_dwords,
                                             u32 *results_buf_size)
 {
-       return qed_parse_protection_override_dump(p_hwfn,
-                                                 dump_buf,
-                                                 num_dumped_dwords,
+       return qed_parse_protection_override_dump(dump_buf,
                                                  NULL, results_buf_size);
 }
 
 {
        u32 parsed_buf_size;
 
-       return qed_parse_protection_override_dump(p_hwfn,
-                                                 dump_buf,
-                                                 num_dumped_dwords,
+       return qed_parse_protection_override_dump(dump_buf,
                                                  results_buf,
                                                  &parsed_buf_size);
 }
                                                    u32 num_dumped_dwords,
                                                    u32 *results_buf_size)
 {
-       return qed_parse_fw_asserts_dump(p_hwfn,
-                                        dump_buf,
-                                        num_dumped_dwords,
-                                        NULL, results_buf_size);
+       return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
 {
        u32 parsed_buf_size;
 
-       return qed_parse_fw_asserts_dump(p_hwfn,
-                                        dump_buf,
-                                        num_dumped_dwords,
+       return qed_parse_fw_asserts_dump(dump_buf,
                                         results_buf, &parsed_buf_size);
 }
 
 
        /* Go over registers with a non-zero attention status */
        for (i = 0; i < num_regs; i++) {
+               struct dbg_attn_bit_mapping *bit_mapping;
                struct dbg_attn_reg_result *reg_result;
-               struct dbg_attn_bit_mapping *mapping;
                u8 num_reg_attn, bit_idx = 0;
 
                reg_result = &results->reg_results[i];
                num_reg_attn = GET_FIELD(reg_result->data,
                                         DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
                block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
-               mapping = &((struct dbg_attn_bit_mapping *)
-                           block_attn->ptr)[reg_result->block_attn_offset];
+               bit_mapping = &((struct dbg_attn_bit_mapping *)
+                               block_attn->ptr)[reg_result->block_attn_offset];
 
                pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
 
                /* Go over attention status bits */
                for (j = 0; j < num_reg_attn; j++) {
-                       u16 attn_idx_val = GET_FIELD(mapping[j].data,
+                       u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
                                                     DBG_ATTN_BIT_MAPPING_VAL);
                        const char *attn_name, *attn_type_str, *masked_str;
-                       u32 name_offset, sts_addr;
+                       u32 attn_name_offset, sts_addr;
 
                        /* Check if bit mask should be advanced (due to unused
                         * bits).
                         */
-                       if (GET_FIELD(mapping[j].data,
+                       if (GET_FIELD(bit_mapping[j].data,
                                      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
                                bit_idx += (u8)attn_idx_val;
                                continue;
                        }
 
                        /* Find attention name */
-                       name_offset = block_attn_name_offsets[attn_idx_val];
+                       attn_name_offset =
+                               block_attn_name_offsets[attn_idx_val];
                        attn_name = &((const char *)
-                                     pstrings->ptr)[name_offset];
+                                     pstrings->ptr)[attn_name_offset];
                        attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
                                        "Interrupt" : "Parity";
                        masked_str = reg_result->mask_val & BIT(bit_idx) ?
 
        qed_init_clear_rt_data(p_hwfn);
 
        /* prepare QM portion of runtime array */
-       qed_qm_init_pf(p_hwfn, p_ptt);
+       qed_qm_init_pf(p_hwfn, p_ptt, false);
 
        /* activate init tool on runtime array */
        rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
        if (rc)
                return rc;
 
+       /* Sanity check before the PF init sequence that uses DMAE */
+       rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+       if (rc)
+               return rc;
+
        /* PF Init sequence */
        rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
        if (rc)
                        /* No need for a case for QED_CMDQS_CQS since
                         * CNQ/CMDQS are the same resource.
                         */
-                       resc_max_val = NUM_OF_CMDQS_CQS;
+                       resc_max_val = NUM_OF_GLOBAL_QUEUES;
                        break;
                case QED_RDMA_STATS_QUEUE:
                        resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
        case QED_RDMA_CNQ_RAM:
        case QED_CMDQS_CQS:
                /* CNQ/CMDQS are the same resource */
-               *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+               *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
                break;
        case QED_RDMA_STATS_QUEUE:
                *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
 
        CORE_EVENT_RX_QUEUE_START,
        CORE_EVENT_RX_QUEUE_STOP,
        CORE_EVENT_RX_QUEUE_FLUSH,
+       CORE_EVENT_TX_QUEUE_UPDATE,
        MAX_CORE_EVENT_OPCODE
 };
 
        CORE_RAMROD_RX_QUEUE_STOP,
        CORE_RAMROD_TX_QUEUE_STOP,
        CORE_RAMROD_RX_QUEUE_FLUSH,
+       CORE_RAMROD_TX_QUEUE_UPDATE,
        MAX_CORE_RAMROD_CMD_ID
 };
 
        __le32 src_mac_addrhi;
        __le16 src_mac_addrlo;
        __le16 qp_id;
-       __le32 gid_dst[4];
+       __le32 src_qp;
+       __le32 reserved[3];
 };
 
 /* Core RX CQE for Light L2 */
        u8 complete_event_flg;
        u8 drop_ttl0_flg;
        __le16 num_of_pbl_pages;
-       u8 inner_vlan_removal_en;
+       u8 inner_vlan_stripping_en;
+       u8 report_outer_vlan;
        u8 queue_id;
        u8 main_func_queue;
        u8 mf_si_bcast_accept_all;
        u8 mf_si_mcast_accept_all;
        struct core_rx_action_on_error action_on_error;
        u8 gsi_offload_flag;
-       u8 reserved[7];
+       u8 reserved[6];
 };
 
 /* Ramrod data for rx queue stop ramrod */
 #define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT                        12
 #define CORE_TX_BD_DATA_IP_LEN_MASK                    0x1
 #define CORE_TX_BD_DATA_IP_LEN_SHIFT                   13
-#define CORE_TX_BD_DATA_RESERVED0_MASK                 0x3
-#define CORE_TX_BD_DATA_RESERVED0_SHIFT                        14
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK    0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT   14
+#define CORE_TX_BD_DATA_RESERVED0_MASK                 0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT                        15
 };
 
 /* Core TX BD for Light L2 */
        __le32 reserved0[2];
 };
 
+/* Ramrod data for tx queue update ramrod */
+struct core_tx_update_ramrod_data {
+       u8 update_qm_pq_id_flg;
+       u8 reserved0;
+       __le16 qm_pq_id;
+       __le32 reserved1[1];
+};
+
 /* Enum flag for what type of dcb data to update */
 enum dcb_dscp_update_mode {
        DONT_UPDATE_DCB_DSCP,
 
 struct e4_xstorm_core_conn_ag_ctx {
        u8 reserved0;
-       u8 core_state;
+       u8 state;
        u8 flags0;
 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK   0x1
 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT  0
        u8 bytes[8];
        struct vf_pf_channel_eqe_data vf_pf_channel;
        struct iscsi_eqe_data iscsi_info;
+       struct iscsi_connect_done_results iscsi_conn_done_info;
        union rdma_eqe_data rdma_data;
        struct malicious_vf_eqe_data malicious_vf;
        struct initial_cleanup_eqe_data vf_init_cleanup;
        MAX_FW_FLOW_CTRL_MODE
 };
 
+/* GFT profile type */
+enum gft_profile_type {
+       GFT_PROFILE_TYPE_4_TUPLE,
+       GFT_PROFILE_TYPE_L4_DST_PORT,
+       GFT_PROFILE_TYPE_IP_DST_PORT,
+       MAX_GFT_PROFILE_TYPE
+};
+
 /* Major and Minor hsi Versions */
 struct hsi_fp_ver_struct {
        u8 minor_ver_arr[2];
        ETH_TUNN_IPV6_EXT_NBD_ERR,
        ETH_CONTROL_PACKET_VIOLATION,
        ETH_ANTI_SPOOFING_ERR,
+       ETH_PACKET_SIZE_TOO_LARGE,
        MAX_MALICIOUS_VF_ERROR_ID
 };
 
        struct mstorm_non_trigger_vf_zone non_trigger;
 };
 
+/* vlan header including TPID and TCI fields */
+struct vlan_header {
+       __le16 tpid;
+       __le16 tci;
+};
+
+/* outer tag configurations */
+struct outer_tag_config_struct {
+       u8 enable_stag_pri_change;
+       u8 pri_map_valid;
+       u8 reserved[2];
+       struct vlan_header outer_tag;
+       u8 inner_to_outer_pri_map[8];
+};
+
 /* personality per PF */
 enum personality_type {
        BAD_PERSONALITY_TYP,
        struct regpair event_ring_pbl_addr;
        struct regpair consolid_q_pbl_addr;
        struct pf_start_tunnel_config tunnel_config;
-       __le32 reserved;
        __le16 event_ring_sb_id;
        u8 base_vf_id;
        u8 num_vfs;
        u8 mf_mode;
        u8 integ_phase;
        u8 allow_npar_tx_switching;
-       u8 inner_to_outer_pri_map[8];
-       u8 pri_map_valid;
-       __le32 outer_tag;
+       u8 reserved0;
        struct hsi_fp_ver_struct hsi_fp_ver;
+       struct outer_tag_config_struct outer_tag_config;
 };
 
 /* Data for port update ramrod */
 struct protocol_dcb_data {
        u8 dcb_enable_flag;
-       u8 reserved_a;
+       u8 dscp_enable_flag;
        u8 dcb_priority;
        u8 dcb_tc;
-       u8 reserved_b;
+       u8 dscp_val;
        u8 reserved0;
 };
 
 
 /* Data for port update ramrod */
 struct pf_update_ramrod_data {
-       u8 pf_id;
        u8 update_eth_dcb_data_mode;
        u8 update_fcoe_dcb_data_mode;
        u8 update_iscsi_dcb_data_mode;
        u8 update_rroce_dcb_data_mode;
        u8 update_iwarp_dcb_data_mode;
        u8 update_mf_vlan_flag;
+       u8 update_enable_stag_pri_change;
        struct protocol_dcb_data eth_dcb_data;
        struct protocol_dcb_data fcoe_dcb_data;
        struct protocol_dcb_data iscsi_dcb_data;
        struct protocol_dcb_data rroce_dcb_data;
        struct protocol_dcb_data iwarp_dcb_data;
        __le16 mf_vlan;
-       __le16 reserved;
+       u8 enable_stag_pri_change;
+       u8 reserved;
        struct pf_update_tunnel_config tunnel_config;
 };
 
        struct regpair rcv_pkts;
 };
 
+/* Data for update QCN/DCQCN RL ramrod */
+struct rl_update_ramrod_data {
+       u8 qcn_update_param_flg;
+       u8 dcqcn_update_param_flg;
+       u8 rl_init_flg;
+       u8 rl_start_flg;
+       u8 rl_stop_flg;
+       u8 rl_id_first;
+       u8 rl_id_last;
+       u8 rl_dc_qcn_flg;
+       __le32 rl_bc_rate;
+       __le16 rl_max_rate;
+       __le16 rl_r_ai;
+       __le16 rl_r_hai;
+       __le16 dcqcn_g;
+       __le32 dcqcn_k_us;
+       __le32 dcqcn_timeuot_us;
+       __le32 qcn_timeuot_us;
+       __le32 reserved[2];
+};
+
 /* Slowpath Element (SPQE) */
 struct slow_path_element {
        struct ramrod_header hdr;
        struct regpair roce_irregular_pkt;
        struct regpair iwarp_irregular_pkt;
        struct regpair eth_irregular_pkt;
-       struct regpair reserved1;
+       struct regpair toe_irregular_pkt;
        struct regpair preroce_irregular_pkt;
        struct regpair eth_gre_tunn_filter_discard;
        struct regpair eth_vxlan_tunn_filter_discard;
        struct regpair eth_geneve_tunn_filter_discard;
+       struct regpair eth_gft_drop_pkt;
 };
 
 /* Tstorm VF zone */
        GRCBASE_MULD = 0x4e0000,
        GRCBASE_YULD = 0x4c8000,
        GRCBASE_XYLD = 0x4c0000,
-       GRCBASE_PTLD = 0x590000,
-       GRCBASE_YPLD = 0x5b0000,
+       GRCBASE_PTLD = 0x5a0000,
+       GRCBASE_YPLD = 0x5c0000,
        GRCBASE_PRM = 0x230000,
        GRCBASE_PBF_PB1 = 0xda0000,
        GRCBASE_PBF_PB2 = 0xda4000,
        GRCBASE_PHY_PCIE = 0x620000,
        GRCBASE_LED = 0x6b8000,
        GRCBASE_AVS_WRAP = 0x6b0000,
+       GRCBASE_PXPREQBUS = 0x56000,
        GRCBASE_MISC_AEU = 0x8000,
        GRCBASE_BAR0_MAP = 0x1c00000,
        MAX_BLOCK_ADDR
        BLOCK_PHY_PCIE,
        BLOCK_LED,
        BLOCK_AVS_WRAP,
+       BLOCK_PXPREQBUS,
        BLOCK_MISC_AEU,
        BLOCK_BAR0_MAP,
        MAX_BLOCK_ID
        DBG_STATUS_MCP_TRACE_NO_META,
        DBG_STATUS_MCP_COULD_NOT_HALT,
        DBG_STATUS_MCP_COULD_NOT_RESUME,
-       DBG_STATUS_DMAE_FAILED,
+       DBG_STATUS_RESERVED2,
        DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
        DBG_STATUS_IGU_FIFO_BAD_DATA,
        DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
        u8 chip_id;
        u8 platform_id;
        u8 initialized;
-       u8 reserved;
+       u8 use_dmae;
+       __le32 num_regs_read;
 };
 
 /********************************/
  */
 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
 
+/**
+ * @brief qed_read_regs - Reads registers into a buffer (using GRC).
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf - Destination buffer.
+ * @param addr - Source GRC address in dwords.
+ * @param len - Number of registers to read.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
 /**
  * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
  *     default value.
                                           u32 *num_errors,
                                           u32 *num_warnings);
 
+/**
+ * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace
+ *     meta data.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ *
+ * @param data - pointer to MCP Trace meta data
+ * @param size - size of MCP Trace meta data in dwords
+ */
+void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size);
+
 /**
  * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
  *     for MCP Trace results (in bytes).
        0x00000000,             /* bar0_map, bb, 0 lines */
        0x00000000,             /* bar0_map, k2, 0 lines */
        0x00000000,
+       0x00000000,             /* bar0_map, bb, 0 lines */
+       0x00000000,             /* bar0_map, k2, 0 lines */
+       0x00000000,
 };
 
 /* Win 2 */
  * Returns the required host memory size in 4KB units.
  * Must be called before all QM init HSI functions.
  *
- * @param pf_id - physical function ID
  * @param num_pf_cids - number of connections used by this PF
  * @param num_vf_cids - number of connections used by VFs of this PF
  * @param num_tids - number of tasks used by this PF
  *
  * @return The required host memory size in 4KB units.
  */
-u32 qed_qm_pf_mem_size(u8 pf_id,
-                      u32 num_pf_cids,
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
                       u32 num_vf_cids,
                       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
 
        u8 port_id;
        u8 pf_id;
        u8 max_phys_tcs_per_port;
-       bool is_first_pf;
+       bool is_pf_loading;
        u32 num_pf_cids;
        u32 num_vf_cids;
        u32 num_tids;
        u8 num_vports;
        u16 pf_wfq;
        u32 pf_rl;
+       u32 link_speed;
        struct init_qm_pq_params *pq_params;
        struct init_qm_vport_params *vport_params;
 };
  * @param p_ptt - ptt window used for writing the registers
  * @param vport_id - VPORT ID
  * @param vport_rl - rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
  *
  * @return 0 on success, -1 on error.
  */
 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
-                     struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+                     struct qed_ptt *p_ptt,
+                     u8 vport_id, u32 vport_rl, u32 link_speed);
+
 /**
  * @brief qed_send_qm_stop_cmd  Sends a stop command to the QM
  *
  * @param start_pq - first PQ ID to stop
  * @param num_pqs - Number of PQs to stop, starting from start_pq.
  *
- * @return bool, true if successful, false if timeout occured while waiting for QM command done.
+ * @return bool, true if successful, false if timeout occurred while waiting for
+ *     QM command done.
  */
 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt,
 /**
  * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param dest_port - vxlan destination udp port.
  */
 /**
  * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param vxlan_enable - vxlan enable flag.
  */
 /**
  * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param eth_gre_enable - eth GRE enable enable flag.
  * @param ip_gre_enable - IP GRE enable enable flag.
 /**
  * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param dest_port - geneve destination udp port.
  */
 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
                           struct qed_ptt *p_ptt,
                           bool eth_geneve_enable, bool ip_geneve_enable);
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
-                             struct qed_ptt *p_ptt, u16 pf_id);
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                            u16 pf_id, bool tcp, bool udp,
-                            bool ipv4, bool ipv6);
+
+/**
+ * @brief qed_gft_disable - Disable GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable GFT.
+ */
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
+
+/**
+ * @brief qed_gft_config - Enable and configure HW for GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to enable GFT.
+ * @param tcp - set profile tcp packets.
+ * @param udp - set profile udp  packet.
+ * @param ipv4 - set profile ipv4 packet.
+ * @param ipv6 - set profile ipv6 packet.
+ * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ */
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   u16 pf_id,
+                   bool tcp,
+                   bool udp,
+                   bool ipv4, bool ipv6, enum gft_profile_type profile_type);
+
+/**
+ * @brief qed_enable_context_validation - Enable and configure context
+ *     validation.
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
+ *     session context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+                                    u16 ctx_size, u8 ctx_type, u32 cid);
+
+/**
+ * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
+ *     context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+                                 u16 ctx_size, u8 ctx_type, u32 tid);
+
+/**
+ * @brief qed_memset_session_ctx - Memset session context to 0 while
+ *     preserving validation bytes.
+ *
+ * @param p_hwfn -
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/**
+ * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
+ *     validation bytes.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
 
 /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
 #define YSTORM_FLOW_CONTROL_MODE_OFFSET                        (IRO[0].base)
        (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
 #define USTORM_COMMON_QUEUE_CONS_SIZE                  (IRO[7].size)
 
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[8].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE                    (IRO[8].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[9].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE                    (IRO[9].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[10].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE                    (IRO[10].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[11].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE                    (IRO[11].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[12].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE                    (IRO[12].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET                  (IRO[13].base)
+#define USTORM_INTEG_TEST_DATA_SIZE                    (IRO[13].size)
+
 /* Tstorm producers */
 #define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
        (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
        (IRO[30].base + ((queue_id) * IRO[30].m1))
 #define XSTORM_ETH_QUEUE_ZONE_SIZE                     (IRO[30].size)
 
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+       (IRO[31].base + ((rss_id) * IRO[31].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE                                (IRO[31].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+       (IRO[32].base + ((rss_id) * IRO[32].m1))
+#define USTORM_TOE_CQ_PROD_SIZE                                (IRO[32].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+       (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE                       (IRO[33].size)
+
 /* Tstorm cmdq-cons of given command queue-id */
 #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
        (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
 /* Tstorm FCoE RX stats */
 #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
        (IRO[43].base + ((pf_id) * IRO[43].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE                      (IRO[43].size)
 
 /* Pstorm FCoE TX stats */
 #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
        (IRO[44].base + ((pf_id) * IRO[44].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE                      (IRO[44].size)
 
 /* Pstorm RDMA queue statistics */
 #define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
        (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
 #define TSTORM_RDMA_QUEUE_STAT_SIZE                    (IRO[46].size)
 
-static const struct iro iro_arr[49] = {
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+       (IRO[47].base + ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE                  (IRO[47].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+       (IRO[48].base + ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE                   (IRO[48].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+       (IRO[49].base + ((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE          (IRO[49].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+       (IRO[50].base + ((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE              (IRO[50].size)
+
+static const struct iro iro_arr[51] = {
        {0x0, 0x0, 0x0, 0x0, 0x8},
-       {0x4cb0, 0x80, 0x0, 0x0, 0x80},
-       {0x6518, 0x20, 0x0, 0x0, 0x20},
+       {0x4cb8, 0x88, 0x0, 0x0, 0x88},
+       {0x6530, 0x20, 0x0, 0x0, 0x20},
        {0xb00, 0x8, 0x0, 0x0, 0x4},
        {0xa80, 0x8, 0x0, 0x0, 0x4},
        {0x0, 0x8, 0x0, 0x0, 0x2},
        {0x80, 0x8, 0x0, 0x0, 0x4},
        {0x84, 0x8, 0x0, 0x0, 0x2},
+       {0x4c48, 0x0, 0x0, 0x0, 0x78},
+       {0x3e18, 0x0, 0x0, 0x0, 0x78},
+       {0x2b58, 0x0, 0x0, 0x0, 0x78},
        {0x4c40, 0x0, 0x0, 0x0, 0x78},
-       {0x3df0, 0x0, 0x0, 0x0, 0x78},
-       {0x29b0, 0x0, 0x0, 0x0, 0x78},
-       {0x4c38, 0x0, 0x0, 0x0, 0x78},
-       {0x4990, 0x0, 0x0, 0x0, 0x78},
-       {0x7f48, 0x0, 0x0, 0x0, 0x78},
+       {0x4998, 0x0, 0x0, 0x0, 0x78},
+       {0x7f50, 0x0, 0x0, 0x0, 0x78},
        {0xa28, 0x8, 0x0, 0x0, 0x8},
-       {0x61f8, 0x10, 0x0, 0x0, 0x10},
-       {0xbd20, 0x30, 0x0, 0x0, 0x30},
-       {0x95b8, 0x30, 0x0, 0x0, 0x30},
-       {0x4b60, 0x80, 0x0, 0x0, 0x40},
+       {0x6210, 0x10, 0x0, 0x0, 0x10},
+       {0xb820, 0x30, 0x0, 0x0, 0x30},
+       {0x96c0, 0x30, 0x0, 0x0, 0x30},
+       {0x4b68, 0x80, 0x0, 0x0, 0x40},
        {0x1f8, 0x4, 0x0, 0x0, 0x4},
-       {0x53a0, 0x80, 0x4, 0x0, 0x4},
-       {0xc7c8, 0x0, 0x0, 0x0, 0x4},
-       {0x4ba0, 0x80, 0x0, 0x0, 0x20},
-       {0x8150, 0x40, 0x0, 0x0, 0x30},
-       {0xec70, 0x60, 0x0, 0x0, 0x60},
-       {0x2b48, 0x80, 0x0, 0x0, 0x38},
-       {0xf1b0, 0x78, 0x0, 0x0, 0x78},
+       {0x53a8, 0x80, 0x4, 0x0, 0x4},
+       {0xc7d0, 0x0, 0x0, 0x0, 0x4},
+       {0x4ba8, 0x80, 0x0, 0x0, 0x20},
+       {0x8158, 0x40, 0x0, 0x0, 0x30},
+       {0xe770, 0x60, 0x0, 0x0, 0x60},
+       {0x2cf0, 0x80, 0x0, 0x0, 0x38},
+       {0xf2b8, 0x78, 0x0, 0x0, 0x78},
        {0x1f8, 0x4, 0x0, 0x0, 0x4},
-       {0xaef8, 0x0, 0x0, 0x0, 0xf0},
-       {0xafe8, 0x8, 0x0, 0x0, 0x8},
+       {0xaf20, 0x0, 0x0, 0x0, 0xf0},
+       {0xb010, 0x8, 0x0, 0x0, 0x8},
        {0x1f8, 0x8, 0x0, 0x0, 0x8},
        {0xac0, 0x8, 0x0, 0x0, 0x8},
        {0x2578, 0x8, 0x0, 0x0, 0x8},
        {0x24f8, 0x8, 0x0, 0x0, 0x8},
        {0x0, 0x8, 0x0, 0x0, 0x8},
-       {0x200, 0x10, 0x8, 0x0, 0x8},
-       {0xb78, 0x10, 0x8, 0x0, 0x2},
-       {0xd9a8, 0x38, 0x0, 0x0, 0x24},
-       {0x12988, 0x10, 0x0, 0x0, 0x8},
-       {0x11fa0, 0x38, 0x0, 0x0, 0x18},
-       {0xa580, 0x38, 0x0, 0x0, 0x10},
-       {0x86f8, 0x30, 0x0, 0x0, 0x18},
-       {0x101f8, 0x10, 0x0, 0x0, 0x10},
-       {0xde28, 0x48, 0x0, 0x0, 0x38},
-       {0x10660, 0x20, 0x0, 0x0, 0x20},
-       {0x2b80, 0x80, 0x0, 0x0, 0x10},
-       {0x5020, 0x10, 0x0, 0x0, 0x10},
-       {0xc9b0, 0x30, 0x0, 0x0, 0x10},
-       {0xeec0, 0x10, 0x0, 0x0, 0x10},
+       {0x400, 0x18, 0x8, 0x0, 0x8},
+       {0xb78, 0x18, 0x8, 0x0, 0x2},
+       {0xd898, 0x50, 0x0, 0x0, 0x3c},
+       {0x12908, 0x18, 0x0, 0x0, 0x10},
+       {0x11aa8, 0x40, 0x0, 0x0, 0x18},
+       {0xa588, 0x50, 0x0, 0x0, 0x20},
+       {0x8700, 0x40, 0x0, 0x0, 0x28},
+       {0x10300, 0x18, 0x0, 0x0, 0x10},
+       {0xde48, 0x48, 0x0, 0x0, 0x38},
+       {0x10768, 0x20, 0x0, 0x0, 0x20},
+       {0x2d28, 0x80, 0x0, 0x0, 0x10},
+       {0x5048, 0x10, 0x0, 0x0, 0x10},
+       {0xc9b8, 0x30, 0x0, 0x0, 0x10},
+       {0xeee0, 0x10, 0x0, 0x0, 0x10},
+       {0xa3a0, 0x10, 0x0, 0x0, 0x10},
+       {0x13108, 0x8, 0x0, 0x0, 0x8},
 };
 
 /* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET       0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET       1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET       2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET       3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET       4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET       5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET       6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET       7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET       8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET       9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET       10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET       11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET       12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET       13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET       14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET       15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET      17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET     18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET     19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET      20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET      21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET   22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET  23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET    24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET        761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE  736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET        761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE  736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET       1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET    2233
-#define CAU_REG_PI_MEMORY_RT_SIZE      4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET   6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET     6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET     6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET        6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET        6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET   6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET  6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET  6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET  6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET  6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET      6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET    6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET  6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET     6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET      6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET        6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET    6665
-#define SRC_REG_FIRSTFREE_RT_SIZE      2
-#define SRC_REG_LASTFREE_RT_OFFSET     6667
-#define SRC_REG_LASTFREE_RT_SIZE       2
-#define SRC_REG_COUNTFREE_RT_OFFSET    6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET     6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET       6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET       6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET        6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET       6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET      6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET       6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET      6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET       6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET     6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET      6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET    6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET     6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET    6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET     6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET    6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET     6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET    6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET  6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET  6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET      6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET    6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET    6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET  6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET        6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET        6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET   6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET       6699
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET     6700
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET     6701
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET        6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE  22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET  28702
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET       28703
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET  28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET  28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET     28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET     28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET     28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET        28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET        28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET        28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET    28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET    28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET       28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET       29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET   29738
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET   29739
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET   29740
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET      29741
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET      29742
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET      29743
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET      29744
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET      29745
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET      29746
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET      29747
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET      29748
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET      29749
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET      29750
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET     29751
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET     29752
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET     29753
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET     29754
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET     29755
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET     29756
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET     29757
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET     29758
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET     29759
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET     29760
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET     29761
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET     29762
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET     29763
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET     29764
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET     29765
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET     29766
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET     29767
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET     29768
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET     29769
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET     29770
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET     29771
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET     29772
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET     29773
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET     29774
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET     29775
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET     29776
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET     29777
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET     29778
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET     29779
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET     29780
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET     29781
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET     29782
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET     29783
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET     29784
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET     29785
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET     29786
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET     29787
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET     29788
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET     29789
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET     29790
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET     29791
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET     29792
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET     29793
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET     29794
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET     29795
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET     29796
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET     29797
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET     29798
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET     29799
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET     29800
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET     29801
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET     29802
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET     29803
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET     29804
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET       29805
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET    29933
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET    29934
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET     29935
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET   29936
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET  29937
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET       29938
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET       29939
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET       29940
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET       29941
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET       29942
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET       29943
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET       29944
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET       29945
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET       29946
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET       29947
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET      29948
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET      29949
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET      29950
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET      29951
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET      29952
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET      29953
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET   29954
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET   29955
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET   29956
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET   29957
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET      29958
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET      29959
-#define QM_REG_PQTX2PF_0_RT_OFFSET     29960
-#define QM_REG_PQTX2PF_1_RT_OFFSET     29961
-#define QM_REG_PQTX2PF_2_RT_OFFSET     29962
-#define QM_REG_PQTX2PF_3_RT_OFFSET     29963
-#define QM_REG_PQTX2PF_4_RT_OFFSET     29964
-#define QM_REG_PQTX2PF_5_RT_OFFSET     29965
-#define QM_REG_PQTX2PF_6_RT_OFFSET     29966
-#define QM_REG_PQTX2PF_7_RT_OFFSET     29967
-#define QM_REG_PQTX2PF_8_RT_OFFSET     29968
-#define QM_REG_PQTX2PF_9_RT_OFFSET     29969
-#define QM_REG_PQTX2PF_10_RT_OFFSET    29970
-#define QM_REG_PQTX2PF_11_RT_OFFSET    29971
-#define QM_REG_PQTX2PF_12_RT_OFFSET    29972
-#define QM_REG_PQTX2PF_13_RT_OFFSET    29973
-#define QM_REG_PQTX2PF_14_RT_OFFSET    29974
-#define QM_REG_PQTX2PF_15_RT_OFFSET    29975
-#define QM_REG_PQTX2PF_16_RT_OFFSET    29976
-#define QM_REG_PQTX2PF_17_RT_OFFSET    29977
-#define QM_REG_PQTX2PF_18_RT_OFFSET    29978
-#define QM_REG_PQTX2PF_19_RT_OFFSET    29979
-#define QM_REG_PQTX2PF_20_RT_OFFSET    29980
-#define QM_REG_PQTX2PF_21_RT_OFFSET    29981
-#define QM_REG_PQTX2PF_22_RT_OFFSET    29982
-#define QM_REG_PQTX2PF_23_RT_OFFSET    29983
-#define QM_REG_PQTX2PF_24_RT_OFFSET    29984
-#define QM_REG_PQTX2PF_25_RT_OFFSET    29985
-#define QM_REG_PQTX2PF_26_RT_OFFSET    29986
-#define QM_REG_PQTX2PF_27_RT_OFFSET    29987
-#define QM_REG_PQTX2PF_28_RT_OFFSET    29988
-#define QM_REG_PQTX2PF_29_RT_OFFSET    29989
-#define QM_REG_PQTX2PF_30_RT_OFFSET    29990
-#define QM_REG_PQTX2PF_31_RT_OFFSET    29991
-#define QM_REG_PQTX2PF_32_RT_OFFSET    29992
-#define QM_REG_PQTX2PF_33_RT_OFFSET    29993
-#define QM_REG_PQTX2PF_34_RT_OFFSET    29994
-#define QM_REG_PQTX2PF_35_RT_OFFSET    29995
-#define QM_REG_PQTX2PF_36_RT_OFFSET    29996
-#define QM_REG_PQTX2PF_37_RT_OFFSET    29997
-#define QM_REG_PQTX2PF_38_RT_OFFSET    29998
-#define QM_REG_PQTX2PF_39_RT_OFFSET    29999
-#define QM_REG_PQTX2PF_40_RT_OFFSET    30000
-#define QM_REG_PQTX2PF_41_RT_OFFSET    30001
-#define QM_REG_PQTX2PF_42_RT_OFFSET    30002
-#define QM_REG_PQTX2PF_43_RT_OFFSET    30003
-#define QM_REG_PQTX2PF_44_RT_OFFSET    30004
-#define QM_REG_PQTX2PF_45_RT_OFFSET    30005
-#define QM_REG_PQTX2PF_46_RT_OFFSET    30006
-#define QM_REG_PQTX2PF_47_RT_OFFSET    30007
-#define QM_REG_PQTX2PF_48_RT_OFFSET    30008
-#define QM_REG_PQTX2PF_49_RT_OFFSET    30009
-#define QM_REG_PQTX2PF_50_RT_OFFSET    30010
-#define QM_REG_PQTX2PF_51_RT_OFFSET    30011
-#define QM_REG_PQTX2PF_52_RT_OFFSET    30012
-#define QM_REG_PQTX2PF_53_RT_OFFSET    30013
-#define QM_REG_PQTX2PF_54_RT_OFFSET    30014
-#define QM_REG_PQTX2PF_55_RT_OFFSET    30015
-#define QM_REG_PQTX2PF_56_RT_OFFSET    30016
-#define QM_REG_PQTX2PF_57_RT_OFFSET    30017
-#define QM_REG_PQTX2PF_58_RT_OFFSET    30018
-#define QM_REG_PQTX2PF_59_RT_OFFSET    30019
-#define QM_REG_PQTX2PF_60_RT_OFFSET    30020
-#define QM_REG_PQTX2PF_61_RT_OFFSET    30021
-#define QM_REG_PQTX2PF_62_RT_OFFSET    30022
-#define QM_REG_PQTX2PF_63_RT_OFFSET    30023
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET  30024
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET  30025
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET  30026
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET  30027
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET  30028
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET  30029
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET  30030
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET  30031
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET  30032
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET  30033
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET        30040
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET        30041
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET   30042
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET   30043
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET     30044
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET     30045
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET     30046
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET     30047
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET     30048
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET     30049
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET     30050
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET     30051
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET  30052
-#define QM_REG_RLGLBLINCVAL_RT_SIZE    256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET      30308
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE        256
-#define QM_REG_RLGLBLCRD_RT_OFFSET     30564
-#define QM_REG_RLGLBLCRD_RT_SIZE       256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET  30820
-#define QM_REG_RLPFPERIOD_RT_OFFSET    30821
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET       30822
-#define QM_REG_RLPFINCVAL_RT_OFFSET    30823
-#define QM_REG_RLPFINCVAL_RT_SIZE      16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET        30839
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE  16
-#define QM_REG_RLPFCRD_RT_OFFSET       30855
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET    30871
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET   30873
-#define QM_REG_WFQPFWEIGHT_RT_SIZE     16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET       30889
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET      30905
-#define QM_REG_WFQPFCRD_RT_SIZE        256
-#define QM_REG_WFQPFENABLE_RT_OFFSET   31161
-#define QM_REG_WFQVPENABLE_RT_OFFSET   31162
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET  31163
-#define QM_REG_BASEADDRTXPQ_RT_SIZE    512
-#define QM_REG_TXPQMAP_RT_OFFSET       31675
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET   32187
-#define QM_REG_WFQVPWEIGHT_RT_SIZE     512
-#define QM_REG_WFQVPCRD_RT_OFFSET      32699
-#define QM_REG_WFQVPCRD_RT_SIZE        512
-#define QM_REG_WFQVPMAP_RT_OFFSET      33211
-#define QM_REG_WFQVPMAP_RT_SIZE        512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET  33723
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE    320
-#define QM_REG_VOQCRDLINE_RT_OFFSET    34043
-#define QM_REG_VOQCRDLINE_RT_SIZE      36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET        34079
-#define QM_REG_VOQINITCRDLINE_RT_SIZE  36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET      34115
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET        34116
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET        34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET        34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET        34119
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET     34121
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET      34122
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE        4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE   4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET   34130
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE     4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET      34134
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET        34135
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE  32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET   34167
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE     16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE   16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET        34199
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE  16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET      34215
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE        16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET       34232
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET      34233
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET      34234
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET      34235
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET  34236
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET  34237
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET  34238
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET  34239
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET       34240
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET       34241
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET       34242
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET       34243
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET   34244
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET        34245
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET      34246
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET       34248
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET  34249
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET   34250
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET       34251
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET  34252
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET   34253
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET       34254
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET  34255
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET   34256
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET       34257
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET  34258
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET   34259
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET       34260
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET  34261
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET   34262
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET       34263
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET  34264
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET   34265
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET       34266
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET  34267
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET   34268
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET       34269
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET  34270
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET   34271
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET       34272
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET  34273
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET   34274
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET       34275
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET  34276
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET   34277
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET      34278
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET  34280
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET      34281
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET  34283
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET      34284
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET  34286
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET      34287
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET  34289
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET      34290
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET  34292
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET      34293
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET  34295
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET      34296
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET  34298
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET      34299
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET  34301
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET      34302
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET  34304
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET      34305
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET  34307
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET   34308
-
-#define RUNTIME_ARRAY_SIZE 34309
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET                       0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET                       1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET                       2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET                       3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET                       4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET                       5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET                       6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET                       7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET                       8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET                       9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET                       10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET                       11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET                       12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET                       13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET                       14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET                       15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET                         16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET                      17
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET                      18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET                      19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET               20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET               21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET                   22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET                   23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET                   24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET                   25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET                   26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET                   27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET                   28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET                   29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET            30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET            31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET            32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET            33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET            34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET            35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET            36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET            37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET                     38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET                     39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET                      40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET                      41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET                   42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET                  43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET                    44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                                45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE                          1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET                       1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE                         1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET                            2093
+#define CAU_REG_PI_MEMORY_RT_SIZE                              4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET           6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET             6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET             6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET                        6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET                        6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET                           6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET                          6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET                          6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET                  6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET                  6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET                      6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET            6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET  6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET             6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET                      6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET                        6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET                            6525
+#define SRC_REG_FIRSTFREE_RT_SIZE                              2
+#define SRC_REG_LASTFREE_RT_OFFSET                             6527
+#define SRC_REG_LASTFREE_RT_SIZE                               2
+#define SRC_REG_COUNTFREE_RT_OFFSET                            6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET                     6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET                       6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET                       6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET                         6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET                         6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET                                6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET                       6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET                      6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET                       6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET                      6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET                       6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET                     6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET                      6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET                    6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET                     6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET                    6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET                     6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET                    6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET                     6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET            6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET          6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET          6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET                      6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET                    6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET                    6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET                  6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET                        6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET                        6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET                           6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET                       6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET                     6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET                     6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET                   6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET                   6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET                    6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET                    6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET                                6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE                          26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET                          32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET               32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET                  32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET                  32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET                     32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET                     32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET                     32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET                                32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET                                32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET                                32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET            32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET            32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET                       32992
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE                         416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET                       33408
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE                         608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET                           34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET                           34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET                           34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET                      34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET                      34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET                      34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET                      34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET                      34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET                      34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET                      34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET                      34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET                      34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET                      34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET                     34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET                     34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET                     34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET                     34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET                     34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET                     34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET                     34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET                     34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET                     34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET                     34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET                     34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET                     34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET                     34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET                     34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET                     34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET                     34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET                     34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET                     34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET                     34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET                     34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET                     34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET                     34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET                     34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET                     34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET                     34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET                     34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET                     34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET                     34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET                     34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET                     34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET                     34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET                     34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET                     34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET                     34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET                     34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET                     34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET                     34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET                     34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET                     34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET                     34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET                     34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET                     34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET                     34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET                     34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET                     34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET                     34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET                     34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET                     34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET                     34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET                     34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET                     34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET                     34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET                     34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET                     34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET                       34083
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE                         128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET                           34211
+#define QM_REG_PTRTBLOTHER_RT_SIZE                             256
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET                    34467
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET                    34468
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET                     34469
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET                   34470
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET                  34471
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET                       34472
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET                       34473
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET                       34474
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET                       34475
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET                       34476
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET                       34477
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET                       34478
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET                       34479
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET                       34480
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET                       34481
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET                      34482
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET                      34483
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET                      34484
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET                      34485
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET                      34486
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET                      34487
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET                   34488
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET                   34489
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET                   34490
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET                   34491
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET                      34492
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET                      34493
+#define QM_REG_PQTX2PF_0_RT_OFFSET                             34494
+#define QM_REG_PQTX2PF_1_RT_OFFSET                             34495
+#define QM_REG_PQTX2PF_2_RT_OFFSET                             34496
+#define QM_REG_PQTX2PF_3_RT_OFFSET                             34497
+#define QM_REG_PQTX2PF_4_RT_OFFSET                             34498
+#define QM_REG_PQTX2PF_5_RT_OFFSET                             34499
+#define QM_REG_PQTX2PF_6_RT_OFFSET                             34500
+#define QM_REG_PQTX2PF_7_RT_OFFSET                             34501
+#define QM_REG_PQTX2PF_8_RT_OFFSET                             34502
+#define QM_REG_PQTX2PF_9_RT_OFFSET                             34503
+#define QM_REG_PQTX2PF_10_RT_OFFSET                            34504
+#define QM_REG_PQTX2PF_11_RT_OFFSET                            34505
+#define QM_REG_PQTX2PF_12_RT_OFFSET                            34506
+#define QM_REG_PQTX2PF_13_RT_OFFSET                            34507
+#define QM_REG_PQTX2PF_14_RT_OFFSET                            34508
+#define QM_REG_PQTX2PF_15_RT_OFFSET                            34509
+#define QM_REG_PQTX2PF_16_RT_OFFSET                            34510
+#define QM_REG_PQTX2PF_17_RT_OFFSET                            34511
+#define QM_REG_PQTX2PF_18_RT_OFFSET                            34512
+#define QM_REG_PQTX2PF_19_RT_OFFSET                            34513
+#define QM_REG_PQTX2PF_20_RT_OFFSET                            34514
+#define QM_REG_PQTX2PF_21_RT_OFFSET                            34515
+#define QM_REG_PQTX2PF_22_RT_OFFSET                            34516
+#define QM_REG_PQTX2PF_23_RT_OFFSET                            34517
+#define QM_REG_PQTX2PF_24_RT_OFFSET                            34518
+#define QM_REG_PQTX2PF_25_RT_OFFSET                            34519
+#define QM_REG_PQTX2PF_26_RT_OFFSET                            34520
+#define QM_REG_PQTX2PF_27_RT_OFFSET                            34521
+#define QM_REG_PQTX2PF_28_RT_OFFSET                            34522
+#define QM_REG_PQTX2PF_29_RT_OFFSET                            34523
+#define QM_REG_PQTX2PF_30_RT_OFFSET                            34524
+#define QM_REG_PQTX2PF_31_RT_OFFSET                            34525
+#define QM_REG_PQTX2PF_32_RT_OFFSET                            34526
+#define QM_REG_PQTX2PF_33_RT_OFFSET                            34527
+#define QM_REG_PQTX2PF_34_RT_OFFSET                            34528
+#define QM_REG_PQTX2PF_35_RT_OFFSET                            34529
+#define QM_REG_PQTX2PF_36_RT_OFFSET                            34530
+#define QM_REG_PQTX2PF_37_RT_OFFSET                            34531
+#define QM_REG_PQTX2PF_38_RT_OFFSET                            34532
+#define QM_REG_PQTX2PF_39_RT_OFFSET                            34533
+#define QM_REG_PQTX2PF_40_RT_OFFSET                            34534
+#define QM_REG_PQTX2PF_41_RT_OFFSET                            34535
+#define QM_REG_PQTX2PF_42_RT_OFFSET                            34536
+#define QM_REG_PQTX2PF_43_RT_OFFSET                            34537
+#define QM_REG_PQTX2PF_44_RT_OFFSET                            34538
+#define QM_REG_PQTX2PF_45_RT_OFFSET                            34539
+#define QM_REG_PQTX2PF_46_RT_OFFSET                            34540
+#define QM_REG_PQTX2PF_47_RT_OFFSET                            34541
+#define QM_REG_PQTX2PF_48_RT_OFFSET                            34542
+#define QM_REG_PQTX2PF_49_RT_OFFSET                            34543
+#define QM_REG_PQTX2PF_50_RT_OFFSET                            34544
+#define QM_REG_PQTX2PF_51_RT_OFFSET                            34545
+#define QM_REG_PQTX2PF_52_RT_OFFSET                            34546
+#define QM_REG_PQTX2PF_53_RT_OFFSET                            34547
+#define QM_REG_PQTX2PF_54_RT_OFFSET                            34548
+#define QM_REG_PQTX2PF_55_RT_OFFSET                            34549
+#define QM_REG_PQTX2PF_56_RT_OFFSET                            34550
+#define QM_REG_PQTX2PF_57_RT_OFFSET                            34551
+#define QM_REG_PQTX2PF_58_RT_OFFSET                            34552
+#define QM_REG_PQTX2PF_59_RT_OFFSET                            34553
+#define QM_REG_PQTX2PF_60_RT_OFFSET                            34554
+#define QM_REG_PQTX2PF_61_RT_OFFSET                            34555
+#define QM_REG_PQTX2PF_62_RT_OFFSET                            34556
+#define QM_REG_PQTX2PF_63_RT_OFFSET                            34557
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET                          34558
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET                          34559
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET                          34560
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET                          34561
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET                          34562
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET                          34563
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET                          34564
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET                          34565
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET                          34566
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET                          34567
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET                         34568
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET                         34569
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET                         34570
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET                         34571
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET                         34572
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET                         34573
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET                                34574
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET                                34575
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET                   34576
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET                   34577
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET                     34578
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET                     34579
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET                     34580
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET                     34581
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET                     34582
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET                     34583
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET                     34584
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET                     34585
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET                          34586
+#define QM_REG_RLGLBLINCVAL_RT_SIZE                            256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET                      34842
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE                                256
+#define QM_REG_RLGLBLCRD_RT_OFFSET                             35098
+#define QM_REG_RLGLBLCRD_RT_SIZE                               256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET                          35354
+#define QM_REG_RLPFPERIOD_RT_OFFSET                            35355
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET                       35356
+#define QM_REG_RLPFINCVAL_RT_OFFSET                            35357
+#define QM_REG_RLPFINCVAL_RT_SIZE                              16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET                                35373
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE                          16
+#define QM_REG_RLPFCRD_RT_OFFSET                               35389
+#define QM_REG_RLPFCRD_RT_SIZE                                 16
+#define QM_REG_RLPFENABLE_RT_OFFSET                            35405
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET                         35406
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET                           35407
+#define QM_REG_WFQPFWEIGHT_RT_SIZE                             16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET                       35423
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE                         16
+#define QM_REG_WFQPFCRD_RT_OFFSET                              35439
+#define QM_REG_WFQPFCRD_RT_SIZE                                        256
+#define QM_REG_WFQPFENABLE_RT_OFFSET                           35695
+#define QM_REG_WFQVPENABLE_RT_OFFSET                           35696
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET                          35697
+#define QM_REG_BASEADDRTXPQ_RT_SIZE                            512
+#define QM_REG_TXPQMAP_RT_OFFSET                               36209
+#define QM_REG_TXPQMAP_RT_SIZE                                 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET                           36721
+#define QM_REG_WFQVPWEIGHT_RT_SIZE                             512
+#define QM_REG_WFQVPCRD_RT_OFFSET                              37233
+#define QM_REG_WFQVPCRD_RT_SIZE                                        512
+#define QM_REG_WFQVPMAP_RT_OFFSET                              37745
+#define QM_REG_WFQVPMAP_RT_SIZE                                        512
+#define QM_REG_PTRTBLTX_RT_OFFSET                              38257
+#define QM_REG_PTRTBLTX_RT_SIZE                                        1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET                          39281
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE                            320
+#define QM_REG_VOQCRDLINE_RT_OFFSET                            39601
+#define QM_REG_VOQCRDLINE_RT_SIZE                              36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET                                39637
+#define QM_REG_VOQINITCRDLINE_RT_SIZE                          36
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET                     39673
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET                      39674
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET                 39675
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET                        39676
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET                        39677
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET                        39678
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET                        39679
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET             39680
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET                      39681
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE                                4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET                   39685
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE                     4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET                        39689
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE                  32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET                   39721
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE                     16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET                 39737
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE                   16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET                39753
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE          16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET              39769
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE                        16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET                         39785
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET               39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET                    39787
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE                      8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET         39795
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE           1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET            40819
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE              512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET          41331
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE            512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE   512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET       42355
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE         512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET               42867
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE                 32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET                      42899
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET                      42900
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET                      42901
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET                  42902
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET                  42903
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET                  42904
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET                  42905
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET               42906
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET               42907
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET               42908
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET               42909
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET                   42910
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET                        42911
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET                      42912
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET                 42913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET               42914
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET                  42915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET           42916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET               42917
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET                  42918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET           42919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET               42920
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET                  42921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET           42922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET               42923
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET                  42924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET           42925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET               42926
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET                  42927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET           42928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET               42929
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET                  42930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET           42931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET               42932
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET                  42933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET           42934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET               42935
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET                  42936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET           42937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET               42938
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET                  42939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET           42940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET               42941
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET                  42942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET           42943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET              42944
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET                 42945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET          42946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET              42947
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET                 42948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET          42949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET              42950
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET                 42951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET          42952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET              42953
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET                 42954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET          42955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET              42956
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET                 42957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET          42958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET              42959
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET                 42960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET          42961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET              42962
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET                 42963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET          42964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET              42965
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET                 42966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET          42967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET              42968
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET                 42969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET          42970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET              42971
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET                 42972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET          42973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET              42974
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET                 42975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET          42976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET              42977
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET                 42978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET          42979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET              42980
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET                 42981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET          42982
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET              42983
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET                 42984
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET          42985
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET              42986
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET                 42987
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET          42988
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET              42989
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET                 42990
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET          42991
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET              42992
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET                 42993
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET          42994
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET              42995
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET                 42996
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET          42997
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET              42998
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET                 42999
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET          43000
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET              43001
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET                 43002
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET          43003
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET              43004
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET                 43005
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET          43006
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET              43007
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET                 43008
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET          43009
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET              43010
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET                 43011
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET          43012
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET              43013
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET                 43014
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET          43015
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET              43016
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET                 43017
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET          43018
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET              43019
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET                 43020
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET          43021
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET                           43022
+
+#define RUNTIME_ARRAY_SIZE     43023
+
+/* Init Callbacks */
+#define DMAE_READY_CB  0
 
 /* The eth storm context for the Tstorm */
 struct tstorm_eth_conn_st_ctx {
 
 struct e4_xstorm_eth_conn_ag_ctx {
        u8 reserved0;
-       u8 eth_state;
+       u8 state;
        u8 flags0;
 #define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK    0x1
 #define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT   0
 #define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT      2
 #define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK           0x1
 #define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT          3
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_MASK           0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT          4
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_MASK           0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT          5
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT   4
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT   5
 #define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK  0x1
 #define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
 #define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK    0x1
        ETH_EVENT_RX_QUEUE_UPDATE,
        ETH_EVENT_RX_QUEUE_STOP,
        ETH_EVENT_FILTERS_UPDATE,
-       ETH_EVENT_RESERVED,
-       ETH_EVENT_RESERVED2,
-       ETH_EVENT_RESERVED3,
+       ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+       ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+       ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
        ETH_EVENT_RX_ADD_UDP_FILTER,
        ETH_EVENT_RX_DELETE_UDP_FILTER,
-       ETH_EVENT_RESERVED4,
-       ETH_EVENT_RESERVED5,
+       ETH_EVENT_RX_CREATE_GFT_ACTION,
+       ETH_EVENT_RX_GFT_UPDATE_FILTER,
+       ETH_EVENT_TX_QUEUE_UPDATE,
        MAX_ETH_EVENT_OPCODE
 };
 
        ETH_RAMROD_RX_DELETE_UDP_FILTER,
        ETH_RAMROD_RX_CREATE_GFT_ACTION,
        ETH_RAMROD_GFT_UPDATE_FILTER,
+       ETH_RAMROD_TX_QUEUE_UPDATE,
        MAX_ETH_RAMROD_CMD_ID
 };
 
        MAX_GFT_FILTER_UPDATE_ACTION
 };
 
-enum gft_logic_filter_type {
-       GFT_FILTER_TYPE,
-       RFS_FILTER_TYPE,
-       MAX_GFT_LOGIC_FILTER_TYPE
-};
-
 /* Ramrod data for rx add openflow filter */
 struct rx_add_openflow_filter_data {
        __le16 action_icid;
 struct rx_update_gft_filter_data {
        struct regpair pkt_hdr_addr;
        __le16 pkt_hdr_length;
-       __le16 rx_qid_or_action_icid;
-       u8 vport_id;
-       u8 filter_type;
+       __le16 action_icid;
+       __le16 rx_qid;
+       __le16 flow_id;
+       __le16 vport_id;
+       u8 action_icid_valid;
+       u8 rx_qid_valid;
+       u8 flow_id_valid;
        u8 filter_action;
        u8 assert_on_error;
+       u8 reserved;
 };
 
 /* Ramrod data for rx queue start ramrod */
        __le16 reserved[4];
 };
 
+/* Ramrod data for tx queue update ramrod */
+struct tx_queue_update_ramrod_data {
+       __le16 update_qm_pq_id_flg;
+       __le16 qm_pq_id;
+       __le32 reserved0;
+       struct regpair reserved1[5];
+};
+
 /* Ramrod data for vport update ramrod */
 struct vport_filter_update_ramrod_data {
        struct eth_filter_cmd_header filter_cmd_hdr;
 
 struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
        u8 reserved0;
-       u8 eth_state;
+       u8 state;
        u8 flags0;
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK      0x1
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT     0
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT                2
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK             0x1
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT            3
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK             0x1
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT            4
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK             0x1
-#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT            5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT     4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT     5
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK    0x1
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT   6
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK      0x1
 
 struct e4_xstorm_eth_hw_conn_ag_ctx {
        u8 reserved0;
-       u8 eth_state;
+       u8 state;
        u8 flags0;
 #define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
 #define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
 #define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT           2
 #define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK                        0x1
 #define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT               3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK                        0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT               4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK                        0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT               5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK         0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT                4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK         0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT                5
 #define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1
 #define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
 #define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1
        u8 cq_ring_mode;
        u8 vf_id;
        u8 vf_valid;
-       u8 reserved[3];
+       u8 relaxed_ordering;
+       u8 reserved[2];
 };
 
 /* rdma function init ramrod data */
        __le16 dpi;
 };
 
+/* roce DCQCN received statistics */
+struct roce_dcqcn_received_stats {
+       struct regpair ecn_pkt_rcv;
+       struct regpair cnp_pkt_rcv;
+};
+
+/* roce DCQCN sent statistics */
+struct roce_dcqcn_sent_stats {
+       struct regpair cnp_pkt_sent;
+};
+
 /* RoCE destroy qp requester output params */
 struct roce_destroy_qp_req_output_params {
        __le32 num_bound_mw;
 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT                   9
 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK                                0x7
 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT                       10
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK                  0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT                 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK                0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT       13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK                  0x3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT                 14
        u8 fields;
 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK      0xF
 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT     0
        __le32 ack_timeout_val;
        __le16 mtu;
        __le16 reserved2;
-       __le32 reserved3[3];
+       __le32 reserved3[2];
+       __le16 low_latency_phy_queue;
+       __le16 regular_latency_phy_queue;
        __le32 src_gid[4];
        __le32 dst_gid[4];
 };
 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT    8
 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK           0x1
 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT          9
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK                 0x3F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT                        10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK       0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT      10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK                 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT                        11
        u8 fields;
 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK               0x7
 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT              0
        __le16 p_key;
        __le32 flow_label;
        __le16 mtu;
-       __le16 reserved2;
+       __le16 low_latency_phy_queue;
+       __le16 regular_latency_phy_queue;
+       u8 reserved2[6];
        __le32 src_gid[4];
        __le32 dst_gid[4];
 };
        u8 byte4;
        u8 byte5;
        __le16 snd_sq_cons;
-       __le16 word2;
+       __le16 conn_dpi;
        __le16 word3;
        __le32 reg9;
        __le32 reg10;
 #define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT     6
        u8 byte2;
        __le16 physical_q0;
-       __le16 word1;
+       __le16 irq_prod_shadow;
+       __le16 word2;
+       __le16 irq_cons;
        __le16 irq_prod;
-       __le16 word3;
-       __le16 word4;
        __le16 e5_reserved1;
-       __le16 irq_cons;
+       __le16 conn_dpi;
        u8 rxmit_opcode;
        u8 byte4;
        u8 byte5;
 #define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK   0x3
 #define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT  6
        u8 flags3;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK                      0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT                     0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
 #define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK       0x3
 #define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT      2
 #define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK                         0x1
 #define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT                                3
 #define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK                         0x1
 #define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT                                4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK                   0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT                  5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define        E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
 #define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK    0x1
 #define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT   6
 #define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK                       0x1
        struct regpair async_eqe_output_buf;
        struct regpair handle_for_async;
        struct regpair shared_queue_addr;
+       __le16 rcv_wnd;
        u8 stats_counter_id;
-       u8 reserved3[15];
+       u8 reserved3[13];
 };
 
 /* iWARP TCP connection offload params passed by driver to FW */
 #define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT          2
 #define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK           0x1
 #define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT          3
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK                  0xF
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT                 4
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK          0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT         4
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK                  0x7
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT                 5
        u8 did_2;
        u8 did_1;
        u8 did_0;
 
 struct e4_xstorm_fcoe_conn_ag_ctx {
        u8 reserved0;
-       u8 fcoe_state;
+       u8 state;
        u8 flags0;
 #define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK   0x1
 #define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT  0
 
 struct e4_tstorm_fcoe_conn_ag_ctx {
        u8 reserved0;
-       u8 fcoe_state;
+       u8 state;
        u8 flags0;
 #define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK   0x1
 #define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT  0
 #define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK  0x3F
 #define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
        u8 cq_relative_offset;
+       u8 cmdq_relative_offset;
        u8 bdq_resource_id;
-       u8 reserved0[5];
+       u8 reserved0[4];
 };
 
 struct e4_mstorm_fcoe_conn_ag_ctx {
 /* Fast path part of the fcoe storm context of Mstorm */
 struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
        __le16 xfer_prod;
-       __le16 reserved1;
+       u8 num_cqs;
+       u8 reserved1;
        u8 protection_info;
 #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK  0x1
 #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
 
 /* The iscsi storm connection context of Ystorm */
 struct ystorm_iscsi_conn_st_ctx {
-       __le32 reserved[4];
+       __le32 reserved[8];
 };
 
 /* Combined iSCSI and TCP storm connection of Pstorm */
 
 /* The combined tcp and iscsi storm context of Xstorm */
 struct xstorm_iscsi_tcp_conn_st_ctx {
-       __le32 reserved_iscsi[40];
        __le32 reserved_tcp[4];
+       __le32 reserved_iscsi[44];
 };
 
 struct e4_xstorm_iscsi_conn_ag_ctx {
 
 /* The iscsi storm connection context of Tstorm */
 struct tstorm_iscsi_conn_st_ctx {
-       __le32 reserved[40];
+       __le32 reserved[44];
 };
 
 struct e4_mstorm_iscsi_conn_ag_ctx {
 /* Combined iSCSI and TCP storm connection of Mstorm */
 struct mstorm_iscsi_tcp_conn_st_ctx {
        __le32 reserved_tcp[20];
-       __le32 reserved_iscsi[8];
+       __le32 reserved_iscsi[12];
 };
 
 /* The iscsi storm context of Ustorm */
 /* iscsi connection context */
 struct e4_iscsi_conn_context {
        struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
-       struct regpair ystorm_st_padding[2];
        struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
        struct regpair pstorm_st_padding[2];
        struct pb_context xpb2_context;
 #define DRV_MB_PARAM_DCBX_NOTIFY_MASK          0x000000FF
 #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT         3
 
-#define DRV_MB_PARAM_NVM_LEN_SHIFT             24
+#define DRV_MB_PARAM_NVM_LEN_OFFSET            24
 
 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT   0
 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK    0x000000FF
 
        return rc;
 }
 
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, const char *phase)
+{
+       u32 size = PAGE_SIZE / 2, val;
+       struct qed_dmae_params params;
+       int rc = 0;
+       dma_addr_t p_phys;
+       void *p_virt;
+       u32 *p_tmp;
+
+       p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   2 * size, &p_phys, GFP_KERNEL);
+       if (!p_virt) {
+               DP_NOTICE(p_hwfn,
+                         "DMAE sanity [%s]: failed to allocate memory\n",
+                         phase);
+               return -ENOMEM;
+       }
+
+       /* Fill the bottom half of the allocated memory with a known pattern */
+       for (p_tmp = (u32 *)p_virt;
+            p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
+               /* Save the address itself as the value */
+               val = (u32)(uintptr_t)p_tmp;
+               *p_tmp = val;
+       }
+
+       /* Zero the top half of the allocated memory */
+       memset((u8 *)p_virt + size, 0, size);
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
+                  phase,
+                  (u64)p_phys,
+                  p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
+
+       memset(¶ms, 0, sizeof(params));
+       rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+                               size / 4 /* size_in_dwords */, ¶ms);
+       if (rc) {
+               DP_NOTICE(p_hwfn,
+                         "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
+                         phase, rc);
+               goto out;
+       }
+
+       /* Verify that the top half of the allocated memory has the pattern */
+       for (p_tmp = (u32 *)((u8 *)p_virt + size);
+            p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
+               /* The corresponding address in the bottom half */
+               val = (u32)(uintptr_t)p_tmp - size;
+
+               if (*p_tmp != val) {
+                       DP_NOTICE(p_hwfn,
+                                 "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+                                 phase,
+                                 (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
+                                 p_tmp, *p_tmp, val);
+                       rc = -EINVAL;
+                       goto out;
+               }
+       }
+
+out:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
+       return rc;
+}
 
 
 int qed_init_fw_data(struct qed_dev *cdev,
                     const u8 *fw_data);
+
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, const char *phase);
+
 #endif
 
  */
 
 #include <linux/types.h>
+#include <linux/crc8.h>
 #include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include "qed_init_ops.h"
 #include "qed_reg_addr.h"
 
+#define CDU_VALIDATION_DEFAULT_CFG     61
+
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+       {400, 336, 352, 304, 304, 384, 416, 352},       /* region 3 offsets */
+       {528, 496, 416, 448, 448, 512, 544, 480},       /* region 4 offsets */
+       {608, 544, 496, 512, 576, 592, 624, 560}        /* region 5 offsets */
+};
+
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+       {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
+};
+
 /* General constants */
 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
                                                        QM_PQ_ELEMENT_SIZE, \
 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
 
 /* Max WFQ increment value is 0.7 * upper bound */
-#define QM_WFQ_MAX_INC_VAL     43750000
+#define QM_WFQ_MAX_INC_VAL     ((QM_WFQ_UPPER_BOUND * 7) / 10)
 
 /* RL constants */
 
 #define QM_RL_PERIOD_CLK_25M   (25 * QM_RL_PERIOD)
 
 /* RL increment value - rate is specified in mbps */
-#define QM_RL_INC_VAL(rate)            max_t(u32,      \
-                                             (u32)(((rate ? rate : \
-                                                     1000000) *    \
-                                                    QM_RL_PERIOD * \
-                                                    101) / (8 * 100)), 1)
+#define QM_RL_INC_VAL(rate) ({ \
+       typeof(rate) __rate = (rate); \
+       max_t(u32, \
+             (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
+                   (8 * 100)), \
+             1); })
 
 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
-#define QM_RL_UPPER_BOUND      62500000
+#define QM_PF_RL_UPPER_BOUND   62500000
 
 /* Max PF RL increment value is 0.7 * upper bound */
-#define QM_RL_MAX_INC_VAL      43750000
+#define QM_PF_RL_MAX_INC_VAL   ((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed)    ((u32)max_t(u32, \
+                                                   QM_RL_INC_VAL(speed), \
+                                                   9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed)    QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED   (QM_VP_RL_UPPER_BOUND(10000) - 1)
 
 /* AFullOprtnstcCrdMask constants */
 #define QM_OPPOR_LINE_VOQ_DEF  1
 /* Pure LB CmdQ lines (+spare) */
 #define PBF_CMDQ_PURE_LB_LINES 150
 
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO   8
+
 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
        (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
         (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
        SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
                  cmd ## _ ## field, \
                  value)
-/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) *    \
-                                                  (max_phys_tcs_per_port) + \
-                                                  (tc))
-#define LB_VOQ(port)                           ( \
-               MAX_PHYS_VOQS + (port))
-#define VOQ(port, tc, max_phy_tcs_pr_port)     \
-       ((tc) <         \
-        LB_TC ? PHYS_VOQ(port,         \
-                         tc,                    \
-                         max_phy_tcs_pr_port) \
-               : LB_VOQ(port))
+
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+                         ext_voq, wrr) \
+       do { \
+               typeof(map) __map; \
+               memset(&__map, 0, sizeof(__map)); \
+               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
+               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
+                         rl_valid); \
+               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
+                         vp_pq_id); \
+               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
+               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
+               SET_FIELD(__map.reg, \
+                         QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
+               STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
+                            *((u32 *)&__map)); \
+               (map) = __map; \
+       } while (0)
+
+#define WRITE_PQ_INFO_TO_RAM   1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+       (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
+       ((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+       (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+
 /******************** INTERNAL IMPLEMENTATION *********************/
 
+/* Returns the external VOQ number */
+static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
+                         u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
+{
+       if (tc == PURE_LB_TC)
+               return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
+       else
+               return port_id * max_phys_tcs_per_port + tc;
+}
+
 /* Prepare PF RL enable/disable runtime init values */
 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
 {
        STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
        if (pf_rl_en) {
+               u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+               u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
                /* Enable RLs for all VOQs */
-               STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
-                            (1 << MAX_NUM_VOQS_E4) - 1);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLPFVOQENABLE_RT_OFFSET,
+                            (u32)voq_bit_mask);
+               if (num_ext_voqs >= 32)
+                       STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+                                    (u32)(voq_bit_mask >> 32));
+
                /* Write RL period */
                STORE_RT_REG(p_hwfn,
                             QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
                if (QM_BYPASS_EN)
                        STORE_RT_REG(p_hwfn,
                                     QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
-                                    QM_RL_UPPER_BOUND);
+                                    QM_PF_RL_UPPER_BOUND);
        }
 }
 
                if (QM_BYPASS_EN)
                        STORE_RT_REG(p_hwfn,
                                     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
-                                    QM_RL_UPPER_BOUND);
+                                    QM_VP_RL_BYPASS_THRESH_SPEED);
        }
 }
 
  * the specified VOQ.
  */
 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
-                                      u8 voq, u16 cmdq_lines)
+                                      u8 ext_voq, u16 cmdq_lines)
 {
-       u32 qm_line_crd;
+       u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
 
-       qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
-       OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+       OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
                         (u32)cmdq_lines);
-       STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
-       STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+       STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+                    qm_line_crd);
+       STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
                     qm_line_crd);
 }
 
        u8 max_phys_tcs_per_port,
        struct init_qm_port_params port_params[MAX_NUM_PORTS])
 {
-       u8 tc, voq, port_id, num_tcs_in_port;
+       u8 tc, ext_voq, port_id, num_tcs_in_port;
+       u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+
+       /* Clear PBF lines of all VOQs */
+       for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+               STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
 
-       /* Clear PBF lines for all VOQs */
-       for (voq = 0; voq < MAX_NUM_VOQS_E4; voq++)
-               STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
        for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
-               if (port_params[port_id].active) {
-                       u16 phys_lines, phys_lines_per_tc;
-
-                       /* find #lines to divide between active phys TCs */
-                       phys_lines = port_params[port_id].num_pbf_cmd_lines -
-                                    PBF_CMDQ_PURE_LB_LINES;
-                       /* find #lines per active physical TC */
-                       num_tcs_in_port = 0;
-                       for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
-                               if (((port_params[port_id].active_phys_tcs >>
-                                     tc) & 0x1) == 1)
-                                       num_tcs_in_port++;
-                       }
+               u16 phys_lines, phys_lines_per_tc;
+
+               if (!port_params[port_id].active)
+                       continue;
 
-                       phys_lines_per_tc = phys_lines / num_tcs_in_port;
-                       /* init registers per active TC */
-                       for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
-                               if (((port_params[port_id].active_phys_tcs >>
-                                     tc) & 0x1) != 1)
-                                       continue;
+               /* Find number of command queue lines to divide between the
+                * active physical TCs. In E5, 1/8 of the lines are reserved.
+                * the lines for pure LB TC are subtracted.
+                */
+               phys_lines = port_params[port_id].num_pbf_cmd_lines;
+               phys_lines -= PBF_CMDQ_PURE_LB_LINES;
 
-                               voq = PHYS_VOQ(port_id, tc,
-                                              max_phys_tcs_per_port);
-                               qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
-                                                          phys_lines_per_tc);
-                       }
+               /* Find #lines per active physical TC */
+               num_tcs_in_port = 0;
+               for (tc = 0; tc < max_phys_tcs_per_port; tc++)
+                       if (((port_params[port_id].active_phys_tcs >>
+                             tc) & 0x1) == 1)
+                               num_tcs_in_port++;
+               phys_lines_per_tc = phys_lines / num_tcs_in_port;
 
-                       /* init registers for pure LB TC */
-                       qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
-                                                  PBF_CMDQ_PURE_LB_LINES);
+               /* Init registers per active TC */
+               for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+                       ext_voq = qed_get_ext_voq(p_hwfn,
+                                                 port_id,
+                                                 tc, max_phys_tcs_per_port);
+                       if (((port_params[port_id].active_phys_tcs >>
+                             tc) & 0x1) == 1)
+                               qed_cmdq_lines_voq_rt_init(p_hwfn,
+                                                          ext_voq,
+                                                          phys_lines_per_tc);
                }
+
+               /* Init registers for pure LB TC */
+               ext_voq = qed_get_ext_voq(p_hwfn,
+                                         port_id,
+                                         PURE_LB_TC, max_phys_tcs_per_port);
+               qed_cmdq_lines_voq_rt_init(p_hwfn,
+                                          ext_voq, PBF_CMDQ_PURE_LB_LINES);
        }
 }
 
        struct init_qm_port_params port_params[MAX_NUM_PORTS])
 {
        u32 usable_blocks, pure_lb_blocks, phys_blocks;
-       u8 tc, voq, port_id, num_tcs_in_port;
+       u8 tc, ext_voq, port_id, num_tcs_in_port;
 
        for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
-               u32 temp;
-
                if (!port_params[port_id].active)
                        continue;
 
                usable_blocks = port_params[port_id].num_btb_blocks -
                                BTB_HEADROOM_BLOCKS;
 
-               /* find blocks per physical TC */
+               /* Find blocks per physical TC. Use factor to avoid floating
+                * arithmethic.
+                */
                num_tcs_in_port = 0;
-               for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+               for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
                        if (((port_params[port_id].active_phys_tcs >>
                              tc) & 0x1) == 1)
                                num_tcs_in_port++;
-               }
 
                pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
                                 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
                /* Init physical TCs */
                for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
                        if (((port_params[port_id].active_phys_tcs >>
-                             tc) & 0x1) != 1)
-                               continue;
-
-                       voq = PHYS_VOQ(port_id, tc,
-                                      max_phys_tcs_per_port);
-                       STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
-                                    phys_blocks);
+                             tc) & 0x1) == 1) {
+                               ext_voq =
+                                       qed_get_ext_voq(p_hwfn,
+                                                       port_id,
+                                                       tc,
+                                                       max_phys_tcs_per_port);
+                               STORE_RT_REG(p_hwfn,
+                                            PBF_BTB_GUARANTEED_RT_OFFSET
+                                            (ext_voq), phys_blocks);
+                       }
                }
 
                /* Init pure LB TC */
-               temp = LB_VOQ(port_id);
-               STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+               ext_voq = qed_get_ext_voq(p_hwfn,
+                                         port_id,
+                                         PURE_LB_TC, max_phys_tcs_per_port);
+               STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
                             pure_lb_blocks);
        }
 }
 
 /* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(
-       struct qed_hwfn *p_hwfn,
-       struct qed_ptt *p_ptt,
-       struct qed_qm_pf_rt_init_params *p_params,
-       u32 base_mem_addr_4kb)
+static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct qed_qm_pf_rt_init_params *p_params,
+                                 u32 base_mem_addr_4kb)
 {
-       struct init_qm_vport_params *vport_params = p_params->vport_params;
-       u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
-       u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
-       u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
-                           QM_PF_QUEUE_GROUP_SIZE;
-       u16 i, pq_id, pq_group;
-
-       /* A bit per Tx PQ indicating if the PQ is associated with a VF */
        u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+       struct init_qm_vport_params *vport_params = p_params->vport_params;
        u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
-       u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
-       u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
-       u32 mem_addr_4kb = base_mem_addr_4kb;
+       u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
+       struct init_qm_pq_params *pq_params = p_params->pq_params;
+       u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+       num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+
+       first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+       last_pq_group = (p_params->start_pq + num_pqs - 1) /
+                       QM_PF_QUEUE_GROUP_SIZE;
+
+       pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+       vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+       mem_addr_4kb = base_mem_addr_4kb;
 
        /* Set mapping from PQ group to PF */
        for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
                STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
                             (u32)(p_params->pf_id));
+
        /* Set PQ sizes */
        STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
                     QM_PQ_SIZE_256B(p_params->num_pf_cids));
 
        /* Go over all Tx PQs */
        for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
-               u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
-                            p_params->max_phys_tcs_per_port);
-               bool is_vf_pq = (i >= p_params->num_pf_pqs);
+               u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
+               u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
                struct qm_rf_pq_map_e4 tx_pq_map;
+               bool is_vf_pq, rl_valid;
+               u16 *p_first_tx_pq_id;
 
-               bool rl_valid = p_params->pq_params[i].rl_valid &&
-                               (p_params->pq_params[i].vport_id <
-                                MAX_QM_GLOBAL_RLS);
+               ext_voq = qed_get_ext_voq(p_hwfn,
+                                         p_params->port_id,
+                                         tc_id,
+                                         p_params->max_phys_tcs_per_port);
+               is_vf_pq = (i >= p_params->num_pf_pqs);
+               rl_valid = pq_params[i].rl_valid &&
+                          pq_params[i].vport_id < max_qm_global_rls;
 
                /* Update first Tx PQ of VPORT/TC */
-               u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
-                                   p_params->start_vport;
-               u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
-               u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+               vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
+               p_first_tx_pq_id =
+                   &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
+               if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
+                       u32 map_val =
+                               (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+                               (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
 
-               if (first_tx_pq_id == QM_INVALID_PQ_ID) {
                        /* Create new VP PQ */
-                       pq_ids[p_params->pq_params[i].tc_id] = pq_id;
-                       first_tx_pq_id = pq_id;
+                       *p_first_tx_pq_id = pq_id;
 
                        /* Map VP PQ to VOQ and PF */
                        STORE_RT_REG(p_hwfn,
                                     QM_REG_WFQVPMAP_RT_OFFSET +
-                                    first_tx_pq_id,
-                                    (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
-                                    (p_params->pf_id <<
-                                     QM_WFQ_VP_PQ_PF_E4_SHIFT));
+                                    *p_first_tx_pq_id,
+                                    map_val);
                }
 
-               if (p_params->pq_params[i].rl_valid && !rl_valid)
+               /* Check RL ID */
+               if (pq_params[i].rl_valid && pq_params[i].vport_id >=
+                   max_qm_global_rls)
                        DP_NOTICE(p_hwfn,
-                                 "Invalid VPORT ID for rate limiter configuration");
-               /* Fill PQ map entry */
-               memset(&tx_pq_map, 0, sizeof(tx_pq_map));
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_PQ_VALID, 1);
-               SET_FIELD(tx_pq_map.reg,
-                         QM_RF_PQ_MAP_E4_RL_VALID, rl_valid ? 1 : 0);
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_VP_PQ_ID,
-                         first_tx_pq_id);
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_RL_ID,
-                         rl_valid ?
-                         p_params->pq_params[i].vport_id : 0);
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_VOQ, voq);
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP,
-                         p_params->pq_params[i].wrr_group);
-               /* Write PQ map entry to CAM */
-               STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
-                            *((u32 *)&tx_pq_map));
-               /* Set base address */
+                                 "Invalid VPORT ID for rate limiter configuration\n");
+
+               /* Prepare PQ map entry */
+               QM_INIT_TX_PQ_MAP(p_hwfn,
+                                 tx_pq_map,
+                                 E4,
+                                 pq_id,
+                                 rl_valid ? 1 : 0,
+                                 *p_first_tx_pq_id,
+                                 rl_valid ? pq_params[i].vport_id : 0,
+                                 ext_voq, pq_params[i].wrr_group);
+
+               /* Set PQ base address */
                STORE_RT_REG(p_hwfn,
                             QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
                             mem_addr_4kb);
 
+               /* Clear PQ pointer table entry (64 bit) */
+               if (p_params->is_pf_loading)
+                       for (j = 0; j < 2; j++)
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_PTRTBLTX_RT_OFFSET +
+                                            (pq_id * 2) + j, 0);
+
+               /* Write PQ info to RAM */
+               if (WRITE_PQ_INFO_TO_RAM != 0) {
+                       u32 pq_info = 0;
+
+                       pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
+                                                 p_params->pf_id,
+                                                 tc_id,
+                                                 p_params->port_id,
+                                                 rl_valid ? 1 : 0,
+                                                 rl_valid ?
+                                                 pq_params[i].vport_id : 0);
+                       qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+                              pq_info);
+               }
+
                /* If VF PQ, add indication to PQ VF mask */
                if (is_vf_pq) {
                        tx_pq_vf_mask[pq_id /
 
 /* Prepare Other PQ mapping runtime init values for the specified PF */
 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
-                                    u8 port_id,
                                     u8 pf_id,
+                                    bool is_pf_loading,
                                     u32 num_pf_cids,
                                     u32 num_tids, u32 base_mem_addr_4kb)
 {
        u32 pq_size, pq_mem_4kb, mem_addr_4kb;
-       u16 i, pq_id, pq_group;
+       u16 i, j, pq_id, pq_group;
 
-       /* a single other PQ group is used in each PF,
-        * where PQ group i is used in PF i.
+       /* A single other PQ group is used in each PF, where PQ group i is used
+        * in PF i.
         */
        pq_group = pf_id;
        pq_size = num_pf_cids + num_tids;
        /* Map PQ group to PF */
        STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
                     (u32)(pf_id));
+
        /* Set PQ sizes */
        STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
                     QM_PQ_SIZE_256B(pq_size));
 
-       /* Set base address */
        for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
             i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+               /* Set PQ base address */
                STORE_RT_REG(p_hwfn,
                             QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
                             mem_addr_4kb);
+
+               /* Clear PQ pointer table entry */
+               if (is_pf_loading)
+                       for (j = 0; j < 2; j++)
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_PTRTBLOTHER_RT_OFFSET +
+                                            (pq_id * 2) + j, 0);
+
                mem_addr_4kb += pq_mem_4kb;
        }
 }
                              struct qed_qm_pf_rt_init_params *p_params)
 {
        u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
-       u32 crd_reg_offset;
-       u32 inc_val;
+       struct init_qm_pq_params *pq_params = p_params->pq_params;
+       u32 inc_val, crd_reg_offset;
+       u8 ext_voq;
        u16 i;
 
-       if (p_params->pf_id < MAX_NUM_PFS_BB)
-               crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
-       else
-               crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
-       crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
-
        inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
        if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
                DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
        }
 
        for (i = 0; i < num_tx_pqs; i++) {
-               u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
-                            p_params->max_phys_tcs_per_port);
-
+               ext_voq = qed_get_ext_voq(p_hwfn,
+                                         p_params->port_id,
+                                         pq_params[i].tc_id,
+                                         p_params->max_phys_tcs_per_port);
+               crd_reg_offset =
+                       (p_params->pf_id < MAX_NUM_PFS_BB ?
+                        QM_REG_WFQPFCRD_RT_OFFSET :
+                        QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+                       ext_voq * MAX_NUM_PFS_BB +
+                       (p_params->pf_id % MAX_NUM_PFS_BB);
                OVERWRITE_RT_REG(p_hwfn,
-                                crd_reg_offset + voq * MAX_NUM_PFS_BB,
-                                QM_WFQ_CRD_REG_SIGN_BIT);
+                                crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
        }
 
        STORE_RT_REG(p_hwfn,
                     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
-                    QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+                    QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
        STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
                     inc_val);
+
        return 0;
 }
 
 {
        u32 inc_val = QM_RL_INC_VAL(pf_rl);
 
-       if (inc_val > QM_RL_MAX_INC_VAL) {
+       if (inc_val > QM_PF_RL_MAX_INC_VAL) {
                DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
                return -1;
        }
-       STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
-                    QM_RL_CRD_REG_SIGN_BIT);
-       STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
-                    QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+
+       STORE_RT_REG(p_hwfn,
+                    QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+                    (u32)QM_RL_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn,
+                    QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+                    QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
        STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
        return 0;
 }
 
                              u8 num_vports,
                              struct init_qm_vport_params *vport_params)
 {
+       u16 vport_pq_id;
        u32 inc_val;
        u8 tc, i;
 
        /* Go over all PF VPORTs */
        for (i = 0; i < num_vports; i++) {
-
                if (!vport_params[i].vport_wfq)
                        continue;
 
                        return -1;
                }
 
-               /* each VPORT can have several VPORT PQ IDs for
-                * different TCs
-                */
+               /* Each VPORT can have several VPORT PQ IDs for various TCs */
                for (tc = 0; tc < NUM_OF_TCS; tc++) {
-                       u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
-
+                       vport_pq_id = vport_params[i].first_tx_pq_id[tc];
                        if (vport_pq_id != QM_INVALID_PQ_ID) {
                                STORE_RT_REG(p_hwfn,
                                             QM_REG_WFQVPCRD_RT_OFFSET +
                                             vport_pq_id,
-                                            QM_WFQ_CRD_REG_SIGN_BIT);
+                                            (u32)QM_WFQ_CRD_REG_SIGN_BIT);
                                STORE_RT_REG(p_hwfn,
                                             QM_REG_WFQVPWEIGHT_RT_OFFSET +
                                             vport_pq_id, inc_val);
        return 0;
 }
 
+/* Prepare VPORT RL runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
 static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
                                u8 start_vport,
                                u8 num_vports,
+                               u32 link_speed,
                                struct init_qm_vport_params *vport_params)
 {
        u8 i, vport_id;
+       u32 inc_val;
 
        if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
                DP_NOTICE(p_hwfn,
 
        /* Go over all PF VPORTs */
        for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
-               u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
-
-               if (inc_val > QM_RL_MAX_INC_VAL) {
+               inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+                         vport_params[i].vport_rl :
+                         link_speed);
+               if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
                        DP_NOTICE(p_hwfn,
                                  "Invalid VPORT rate-limit configuration\n");
                        return -1;
                }
 
-               STORE_RT_REG(p_hwfn,
-                            QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
-                            QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+                            (u32)QM_RL_CRD_REG_SIGN_BIT);
                STORE_RT_REG(p_hwfn,
                             QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
-                            QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
-               STORE_RT_REG(p_hwfn,
-                            QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+                            QM_VP_RL_UPPER_BOUND(link_speed) |
+                            (u32)QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
                             inc_val);
        }
 
 {
        u32 reg_val, i;
 
-       for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+       for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
             i++) {
                udelay(QM_STOP_CMD_POLL_PERIOD_US);
                reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
 }
 
 /******************** INTERFACE IMPLEMENTATION *********************/
-u32 qed_qm_pf_mem_size(u8 pf_id,
-                      u32 num_pf_cids,
+
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
                       u32 num_vf_cids,
                       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
 {
               QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
 }
 
-int qed_qm_common_rt_init(
-       struct qed_hwfn *p_hwfn,
-       struct qed_qm_common_rt_init_params *p_params)
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+                         struct qed_qm_common_rt_init_params *p_params)
 {
-       /* init AFullOprtnstcCrdMask */
+       /* Init AFullOprtnstcCrdMask */
        u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
                    QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
                   (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
                    QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
 
        STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+
+       /* Enable/disable PF RL */
        qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+
+       /* Enable/disable PF WFQ */
        qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+
+       /* Enable/disable VPORT RL */
        qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+
+       /* Enable/disable VPORT WFQ */
        qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+
+       /* Init PBF CMDQ line credit */
        qed_cmdq_lines_rt_init(p_hwfn,
                               p_params->max_ports_per_engine,
                               p_params->max_phys_tcs_per_port,
                               p_params->port_params);
+
+       /* Init BTB blocks in PBF */
        qed_btb_blocks_rt_init(p_hwfn,
                               p_params->max_ports_per_engine,
                               p_params->max_phys_tcs_per_port,
                               p_params->port_params);
+
        return 0;
 }
 
                        vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
 
        /* Map Other PQs (if any) */
-       qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
-                                p_params->num_pf_cids, p_params->num_tids, 0);
+       qed_other_pq_map_rt_init(p_hwfn,
+                                p_params->pf_id,
+                                p_params->is_pf_loading, p_params->num_pf_cids,
+                                p_params->num_tids, 0);
 
        /* Map Tx PQs */
        qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
 
+       /* Init PF WFQ */
        if (p_params->pf_wfq)
                if (qed_pf_wfq_rt_init(p_hwfn, p_params))
                        return -1;
 
+       /* Init PF RL */
        if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
                return -1;
 
+       /* Set VPORT WFQ */
        if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
                return -1;
 
+       /* Set VPORT RL */
        if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
-                                p_params->num_vports, vport_params))
+                                p_params->num_vports, p_params->link_speed,
+                                vport_params))
                return -1;
 
        return 0;
        }
 
        qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
        return 0;
 }
 
 {
        u32 inc_val = QM_RL_INC_VAL(pf_rl);
 
-       if (inc_val > QM_RL_MAX_INC_VAL) {
+       if (inc_val > QM_PF_RL_MAX_INC_VAL) {
                DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
                return -1;
        }
 
-       qed_wr(p_hwfn, p_ptt,
-              QM_REG_RLPFCRD + pf_id * 4,
-              QM_RL_CRD_REG_SIGN_BIT);
+       qed_wr(p_hwfn,
+              p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
        qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
 
        return 0;
        for (tc = 0; tc < NUM_OF_TCS; tc++) {
                vport_pq_id = first_tx_pq_id[tc];
                if (vport_pq_id != QM_INVALID_PQ_ID)
-                       qed_wr(p_hwfn, p_ptt,
-                              QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
-                              inc_val);
+                       qed_wr(p_hwfn,
+                              p_ptt,
+                              QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
        }
 
        return 0;
 }
 
 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
-                     struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+                     struct qed_ptt *p_ptt,
+                     u8 vport_id, u32 vport_rl, u32 link_speed)
 {
-       u32 inc_val = QM_RL_INC_VAL(vport_rl);
+       u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
 
-       if (vport_id >= MAX_QM_GLOBAL_RLS) {
+       if (vport_id >= max_qm_global_rls) {
                DP_NOTICE(p_hwfn,
                          "Invalid VPORT ID for rate limiter configuration\n");
                return -1;
        }
 
-       if (inc_val > QM_RL_MAX_INC_VAL) {
+       inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+       if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
                DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
                return -1;
        }
 
-       qed_wr(p_hwfn, p_ptt,
-              QM_REG_RLGLBLCRD + vport_id * 4,
-              QM_RL_CRD_REG_SIGN_BIT);
+       qed_wr(p_hwfn,
+              p_ptt,
+              QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
        qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
 
        return 0;
                          bool is_tx_pq, u16 start_pq, u16 num_pqs)
 {
        u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
-       u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+       u32 pq_mask = 0, last_pq, pq_id;
+
+       last_pq = start_pq + num_pqs - 1;
 
        /* Set command's PQ type */
        QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
 
+       /* Go over requested PQs */
        for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
                /* Set PQ bit in mask (stop command only) */
                if (!is_release_cmd)
-                       pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+                       pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
 
                /* If last PQ or end of PQ mask, write command */
                if ((pq_id == last_pq) ||
                    (pq_id % QM_STOP_PQ_MASK_WIDTH ==
                     (QM_STOP_PQ_MASK_WIDTH - 1))) {
-                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
-                                        PAUSE_MASK, pq_mask);
-                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+                       QM_CMD_SET_FIELD(cmd_arr,
+                                        QM_STOP_CMD, PAUSE_MASK, pq_mask);
+                       QM_CMD_SET_FIELD(cmd_arr,
+                                        QM_STOP_CMD,
                                         GROUP_ID,
                                         pq_id / QM_STOP_PQ_MASK_WIDTH);
                        if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
        return true;
 }
 
-static void
-qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
-{
-       if (enable)
-               set_bit(bit, var);
-       else
-               clear_bit(bit, var);
-}
 
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+       do { \
+               typeof(var) *__p_var = &(var); \
+               typeof(offset) __offset = offset; \
+               *__p_var = (*__p_var & ~BIT(__offset)) | \
+                          ((enable) ? BIT(__offset) : 0); \
+       } while (0)
 #define PRS_ETH_TUNN_FIC_FORMAT        -188897008
 
 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
                             struct qed_ptt *p_ptt, u16 dest_port)
 {
+       /* Update PRS register */
        qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+
+       /* Update NIG register */
        qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+
+       /* Update PBF register */
        qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
 }
 
 void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt, bool vxlan_enable)
 {
-       unsigned long reg_val = 0;
+       u32 reg_val;
        u8 shift;
 
+       /* Update PRS register */
        reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
        shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
        qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
-
        if (reg_val)
-               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
-                      PRS_ETH_TUNN_FIC_FORMAT);
+               qed_wr(p_hwfn,
+                      p_ptt,
+                      PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+                      (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+       /* Update NIG register */
        reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
        shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
        qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
 
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
-              vxlan_enable ? 1 : 0);
+       /* Update DORQ register */
+       qed_wr(p_hwfn,
+              p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
 }
 
-void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt,
                        bool eth_gre_enable, bool ip_gre_enable)
 {
-       unsigned long reg_val = 0;
+       u32 reg_val;
        u8 shift;
 
+       /* Update PRS register */
        reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
        shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
        shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
        qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
        if (reg_val)
-               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
-                      PRS_ETH_TUNN_FIC_FORMAT);
+               qed_wr(p_hwfn,
+                      p_ptt,
+                      PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+                      (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+       /* Update NIG register */
        reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
        shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
        shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
        qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
 
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
-              eth_gre_enable ? 1 : 0);
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
-              ip_gre_enable ? 1 : 0);
+       /* Update DORQ registers */
+       qed_wr(p_hwfn,
+              p_ptt,
+              DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
+       qed_wr(p_hwfn,
+              p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
 }
 
 void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
                              struct qed_ptt *p_ptt, u16 dest_port)
 {
+       /* Update PRS register */
        qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+
+       /* Update NIG register */
        qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+
+       /* Update PBF register */
        qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
 }
 
                           struct qed_ptt *p_ptt,
                           bool eth_geneve_enable, bool ip_geneve_enable)
 {
-       unsigned long reg_val = 0;
+       u32 reg_val;
        u8 shift;
 
+       /* Update PRS register */
        reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
        shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(®_val, shift, eth_geneve_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
        shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(®_val, shift, ip_geneve_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
        qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
        if (reg_val)
-               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
-                      PRS_ETH_TUNN_FIC_FORMAT);
+               qed_wr(p_hwfn,
+                      p_ptt,
+                      PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+                      (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+       /* Update NIG register */
        qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
               eth_geneve_enable ? 1 : 0);
        qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
 
-       /* EDPM with geneve tunnel not supported in BB_B0 */
+       /* EDPM with geneve tunnel not supported in BB */
        if (QED_IS_BB_B0(p_hwfn->cdev))
                return;
 
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
+       /* Update DORQ registers */
+       qed_wr(p_hwfn,
+              p_ptt,
+              DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
               eth_geneve_enable ? 1 : 0);
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
+       qed_wr(p_hwfn,
+              p_ptt,
+              DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
               ip_geneve_enable ? 1 : 0);
 }
 
 #define RAM_LINE_SIZE sizeof(u64)
 #define REG_SIZE sizeof(u32)
 
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
-                             struct qed_ptt *p_ptt, u16 pf_id)
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
 {
-       u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
-                     pf_id * RAM_LINE_SIZE;
-
-       /*stop using gft logic */
+       /* Disable gft search for PF */
        qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
-       qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+       /* Clean ram & cam for next gft session */
+
+       /* Zero camline */
        qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
-       qed_wr(p_hwfn, p_ptt, hw_addr, 0);
-       qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
+
+       /* Zero ramline */
+       qed_wr(p_hwfn,
+              p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
+       qed_wr(p_hwfn,
+              p_ptt,
+              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+              0);
 }
 
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                            u16 pf_id, bool tcp, bool udp,
-                            bool ipv4, bool ipv6)
+void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       union gft_cam_line_union camline;
-       struct gft_ram_line ramline;
        u32 rfs_cm_hdr_event_id;
 
+       /* Set RFS event ID to be awakened i Tstorm By Prs */
        rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+       rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
+                              PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+       rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
+                              PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+       qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+}
+
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   u16 pf_id,
+                   bool tcp,
+                   bool udp,
+                   bool ipv4, bool ipv6, enum gft_profile_type profile_type)
+{
+       u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
 
        if (!ipv6 && !ipv4)
                DP_NOTICE(p_hwfn,
-                         "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+                         "gft_config: must accept at least on of - ipv4 or ipv6'\n");
        if (!tcp && !udp)
                DP_NOTICE(p_hwfn,
-                         "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+                         "gft_config: must accept at least on of - udp or tcp\n");
+       if (profile_type >= MAX_GFT_PROFILE_TYPE)
+               DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
 
-       rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
-                                       PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
-       rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
-                                       PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
-       qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+       /* Set RFS event ID to be awakened i Tstorm By Prs */
+       reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+                 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+       reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+       qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
 
-       /* Configure Registers for RFS mode */
-       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+       /* Do not load context only cid in PRS on match. */
        qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
-       camline.cam_line_mapped.camline = 0;
 
-       /* Cam line is now valid!! */
-       SET_FIELD(camline.cam_line_mapped.camline,
-                 GFT_CAM_LINE_MAPPED_VALID, 1);
+       /* Do not use tenant ID exist bit for gft search */
+       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
+
+       /* Set Cam */
+       cam_line = 0;
+       SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
 
-       /* filters are per PF!! */
-       SET_FIELD(camline.cam_line_mapped.camline,
+       /* Filters are per PF!! */
+       SET_FIELD(cam_line,
                  GFT_CAM_LINE_MAPPED_PF_ID_MASK,
                  GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
-       SET_FIELD(camline.cam_line_mapped.camline,
-                 GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+       SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
        if (!(tcp && udp)) {
-               SET_FIELD(camline.cam_line_mapped.camline,
+               SET_FIELD(cam_line,
                          GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
                          GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
                if (tcp)
-                       SET_FIELD(camline.cam_line_mapped.camline,
+                       SET_FIELD(cam_line,
                                  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
                                  GFT_PROFILE_TCP_PROTOCOL);
                else
-                       SET_FIELD(camline.cam_line_mapped.camline,
+                       SET_FIELD(cam_line,
                                  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
                                  GFT_PROFILE_UDP_PROTOCOL);
        }
 
        if (!(ipv4 && ipv6)) {
-               SET_FIELD(camline.cam_line_mapped.camline,
-                         GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+               SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
                if (ipv4)
-                       SET_FIELD(camline.cam_line_mapped.camline,
+                       SET_FIELD(cam_line,
                                  GFT_CAM_LINE_MAPPED_IP_VERSION,
                                  GFT_PROFILE_IPV4);
                else
-                       SET_FIELD(camline.cam_line_mapped.camline,
+                       SET_FIELD(cam_line,
                                  GFT_CAM_LINE_MAPPED_IP_VERSION,
                                  GFT_PROFILE_IPV6);
        }
 
        /* Write characteristics to cam */
        qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
-              camline.cam_line_mapped.camline);
-       camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
-                                                PRS_REG_GFT_CAM +
-                                                CAM_LINE_SIZE * pf_id);
+              cam_line);
+       cam_line =
+           qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
 
        /* Write line to RAM - compare to filter 4 tuple */
-       ramline.lo = 0;
-       ramline.hi = 0;
-       SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
-       SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
-       SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
-       SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
-       SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
-       SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
-
-       /* Each iteration write to reg */
-       qed_wr(p_hwfn, p_ptt,
+       ram_line_lo = 0;
+       ram_line_hi = 0;
+
+       if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+               SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+               SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+               SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+       } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+               SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+       } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+               SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+       }
+
+       qed_wr(p_hwfn,
+              p_ptt,
               PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
-              ramline.lo);
-       qed_wr(p_hwfn, p_ptt,
-              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
-              ramline.hi);
+              ram_line_lo);
+       qed_wr(p_hwfn,
+              p_ptt,
+              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+              ram_line_hi);
 
        /* Set default profile so that no filter match will happen */
-       qed_wr(p_hwfn, p_ptt,
-              PRS_REG_GFT_PROFILE_MASK_RAM +
-              RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
-              ramline.lo);
-       qed_wr(p_hwfn, p_ptt,
-              PRS_REG_GFT_PROFILE_MASK_RAM +
-              RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
-              ramline.hi);
+       qed_wr(p_hwfn,
+              p_ptt,
+              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+              PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+       qed_wr(p_hwfn,
+              p_ptt,
+              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+              PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+
+       /* Enable gft search */
+       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+}
+
+DECLARE_CRC8_TABLE(cdu_crc8_table);
+
+/* Calculate and return CDU validation byte per connection type/region/cid */
+static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
+{
+       const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+       u8 crc, validation_byte = 0;
+       static u8 crc8_table_valid; /* automatically initialized to 0 */
+       u32 validation_string = 0;
+       u32 data_to_crc;
+
+       if (!crc8_table_valid) {
+               crc8_populate_msb(cdu_crc8_table, 0x07);
+               crc8_table_valid = 1;
+       }
+
+       /* The CRC is calculated on the String-to-compress:
+        * [31:8]  = {CID[31:20],CID[11:0]}
+        * [7:4]   = Region
+        * [3:0]   = Type
+        */
+       if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+               validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+       if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+               validation_string |= ((region & 0xF) << 4);
+
+       if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+               validation_string |= (conn_type & 0xF);
+
+       /* Convert to big-endian and calculate CRC8 */
+       data_to_crc = be32_to_cpu(validation_string);
+
+       crc = crc8(cdu_crc8_table,
+                  (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
+
+       /* The validation byte [7:0] is composed:
+        * for type A validation
+        * [7]          = active configuration bit
+        * [6:0]        = crc[6:0]
+        *
+        * for type B validation
+        * [7]          = active configuration bit
+        * [6:3]        = connection_type[3:0]
+        * [2:0]        = crc[2:0]
+        */
+       validation_byte |=
+           ((validation_cfg >>
+             CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+       if ((validation_cfg >>
+            CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+               validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+       else
+               validation_byte |= crc & 0x7F;
+
+       return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+                                    u16 ctx_size, u8 ctx_type, u32 cid)
+{
+       u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+       p_ctx = (u8 * const)p_ctx_mem;
+       x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+       t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+       u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+       memset(p_ctx, 0, ctx_size);
+
+       *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
+       *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
+       *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+                                 u16 ctx_size, u8 ctx_type, u32 tid)
+{
+       u8 *p_ctx, *region1_val_ptr;
+
+       p_ctx = (u8 * const)p_ctx_mem;
+       region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+       memset(p_ctx, 0, ctx_size);
+
+       *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+       u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+       u8 x_val, t_val, u_val;
+
+       p_ctx = (u8 * const)p_ctx_mem;
+       x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+       t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+       u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+       x_val = *x_val_ptr;
+       t_val = *t_val_ptr;
+       u_val = *u_val_ptr;
+
+       memset(p_ctx, 0, ctx_size);
+
+       *x_val_ptr = x_val;
+       *t_val_ptr = t_val;
+       *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+       u8 *p_ctx, *region1_val_ptr;
+       u8 region1_val;
+
+       p_ctx = (u8 * const)p_ctx_mem;
+       region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+       region1_val = *region1_val_ptr;
+
+       memset(p_ctx, 0, ctx_size);
+
+       *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt)
+{
+       u32 ctx_validation;
+
+       /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
+       ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+       qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+       /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
+       ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+       qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+       /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
+       ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+       qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
 }
 
 }
 
 /* init_ops callbacks entry point */
-static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
-                           struct qed_ptt *p_ptt,
-                           struct init_callback_op *p_cmd)
+static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt,
+                          struct init_callback_op *p_cmd)
 {
-       DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+       int rc;
+
+       switch (p_cmd->callback_id) {
+       case DMAE_READY_CB:
+               rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
+                         p_cmd->callback_id);
+               return -EINVAL;
+       }
+
+       return rc;
 }
 
 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
                        break;
 
                case INIT_OP_CALLBACK:
-                       qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+                       rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
                        break;
                }
 
 
        u8 local_mac[6];
        u8 remote_mac[6];
        u16 vlan_id;
-       u8 tcp_flags;
+       u16 tcp_flags;
        u8 ip_version;
        u32 remote_ip[4];
        u32 local_ip[4];
        u32 ss_thresh;
        u16 srtt;
        u16 rtt_var;
-       u32 ts_time;
        u32 ts_recent;
        u32 ts_recent_age;
        u32 total_rt;
        u16 mss;
        u8 snd_wnd_scale;
        u8 rcv_wnd_scale;
-       u32 ts_ticks_per_second;
        u16 da_timeout_value;
        u8 ack_frequency;
 
        p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
        p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
        p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
-       p_init->ooo_enable = p_params->ooo_enable;
        p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
                                  p_params->ll2_ooo_queue_id;
+
        p_init->func_params.log_page_size = p_params->log_page_size;
        val = p_params->num_tasks;
        p_init->func_params.num_tasks = cpu_to_le16(val);
 
                p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
 
-               p_tcp->flags = p_conn->tcp_flags;
+               p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
                p_tcp->ip_version = p_conn->ip_version;
                for (i = 0; i < 4; i++) {
                        dval = p_conn->remote_ip[i];
                p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
 
                p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
-               p_tcp2->flags = p_conn->tcp_flags;
+               p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
 
                p_tcp2->ip_version = p_conn->ip_version;
                for (i = 0; i < 4; i++) {
                p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
                p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
                p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
+               p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
+               p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
+               p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+               p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+               p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
        }
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
        }
 }
 
-static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
-                                     struct qed_iscsi_conn *p_conn)
+static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
 {
        if (!p_conn->queue_cnts_virt_addr)
                goto nomem;
                rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
 
        if (!rc)
-               rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
+               rc = qed_iscsi_setup_connection(p_conn);
 
        if (rc) {
                spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
        con->ss_thresh = conn_info->ss_thresh;
        con->srtt = conn_info->srtt;
        con->rtt_var = conn_info->rtt_var;
-       con->ts_time = conn_info->ts_time;
        con->ts_recent = conn_info->ts_recent;
        con->ts_recent_age = conn_info->ts_recent_age;
        con->total_rt = conn_info->total_rt;
        con->mss = conn_info->mss;
        con->snd_wnd_scale = conn_info->snd_wnd_scale;
        con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
-       con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
        con->da_timeout_value = conn_info->da_timeout_value;
        con->ack_frequency = conn_info->ack_frequency;
 
 
 
 #define QED_IWARP_INVALID_TCP_CID      0xffffffff
 #define QED_IWARP_RCV_WND_SIZE_DEF     (256 * 1024)
-#define QED_IWARP_RCV_WND_SIZE_MIN     (64 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_MIN     (0xffff)
 #define TIMESTAMP_HEADER_SIZE          (12)
+#define QED_IWARP_MAX_FIN_RT_DEFAULT   (2)
 
 #define QED_IWARP_TS_EN                        BIT(0)
 #define QED_IWARP_DA_EN                        BIT(1)
 #define QED_IWARP_PARAM_CRC_NEEDED     (1)
 #define QED_IWARP_PARAM_P2P            (1)
 
+#define QED_IWARP_DEF_MAX_RT_TIME      (0)
+#define QED_IWARP_DEF_CWND_FACTOR      (4)
+#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
+#define QED_IWARP_DEF_KA_TIMEOUT       (1200000)       /* 20 min */
+#define QED_IWARP_DEF_KA_INTERVAL      (1000)          /* 1 sec */
+
 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
                                 u8 fw_event_code, u16 echo,
                                 union event_ring_data *data,
        spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 }
 
-void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
-                             struct iwarp_init_func_params *p_ramrod)
+void
+qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+                        struct iwarp_init_func_ramrod_data *p_ramrod)
 {
-       p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) +
-                                   p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+       p_ramrod->iwarp.ll2_ooo_q_index =
+               RESC_START(p_hwfn, QED_LL2_QUEUE) +
+               p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+
+       p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
+
+       return;
 }
 
 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
        tcp->ttl = 0x40;
        tcp->tos_or_tc = 0;
 
+       tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
+       tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
+       tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
+       tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
+       tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
+
        tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
        tcp->connect_mode = ep->connect_mode;
 
 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 {
        struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+       struct qed_iwarp_info *iwarp_info;
        struct qed_sp_init_data init_data;
        dma_addr_t async_output_phys;
        struct qed_spq_entry *p_ent;
                p_mpa_ramrod->common.reject = 1;
        }
 
+       iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+       p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
        p_mpa_ramrod->mode = ep->mpa_rev;
        SET_FIELD(p_mpa_ramrod->rtr_pref,
                  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
        /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
        iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
            ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
+       iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
        iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
        iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 
 
        spinlock_t iw_lock;     /* for iwarp resources */
        spinlock_t qp_lock;     /* for teardown races */
        u32 rcv_wnd_scale;
+       u16 rcv_wnd_size;
        u16 max_mtu;
        u8 mac_addr[ETH_ALEN];
        u8 crc_needed;
                    struct qed_rdma_start_in_params *params);
 
 void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
-                             struct iwarp_init_func_params *p_ramrod);
+                             struct iwarp_init_func_ramrod_data *p_ramrod);
 
 int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
 
                _qed_get_vport_stats(cdev, cdev->reset_stats);
 }
 
-static void
-qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                       struct qed_arfs_config_params *p_cfg_params)
-{
-       if (p_cfg_params->arfs_enable) {
-               qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
-                                       p_cfg_params->tcp, p_cfg_params->udp,
-                                       p_cfg_params->ipv4, p_cfg_params->ipv6);
-               DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                          "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+static enum gft_profile_type
+qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
+{
+       if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
+               return GFT_PROFILE_TYPE_4_TUPLE;
+       if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
+               return GFT_PROFILE_TYPE_IP_DST_PORT;
+       return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt,
+                            struct qed_arfs_config_params *p_cfg_params)
+{
+       if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
+               qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+                              p_cfg_params->tcp,
+                              p_cfg_params->udp,
+                              p_cfg_params->ipv4,
+                              p_cfg_params->ipv6,
+                              qed_arfs_mode_to_hsi(p_cfg_params->mode));
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_SP,
+                          "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
                           p_cfg_params->tcp ? "Enable" : "Disable",
                           p_cfg_params->udp ? "Enable" : "Disable",
                           p_cfg_params->ipv4 ? "Enable" : "Disable",
-                          p_cfg_params->ipv6 ? "Enable" : "Disable");
+                          p_cfg_params->ipv6 ? "Enable" : "Disable",
+                          (u32)p_cfg_params->mode);
        } else {
-               qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+               DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
+               qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
        }
-
-       DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
-                  p_cfg_params->arfs_enable ? "Enable" : "Disable");
 }
 
-static int
-qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
                                struct qed_spq_comp_cb *p_cb,
-                               dma_addr_t p_addr, u16 length, u16 qid,
-                               u8 vport_id, bool b_is_add)
+                               struct qed_ntuple_filter_params *p_params)
 {
        struct rx_update_gft_filter_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
        u8 abs_vport_id = 0;
        int rc = -EINVAL;
 
-       rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
        if (rc)
                return rc;
 
-       rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
-       if (rc)
-               return rc;
+       if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+               rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
+               if (rc)
+                       return rc;
+       }
 
        /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
                return rc;
 
        p_ramrod = &p_ent->ramrod.rx_update_gft;
-       DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
-       p_ramrod->pkt_hdr_length = cpu_to_le16(length);
-       p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
-       p_ramrod->vport_id = abs_vport_id;
-       p_ramrod->filter_type = RFS_FILTER_TYPE;
-       p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+       DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
+       p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
+
+       if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+               p_ramrod->rx_qid_valid = 1;
+               p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
+       }
+
+       p_ramrod->flow_id_valid = 0;
+       p_ramrod->flow_id = 0;
+
+       p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
+       p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
+           : GFT_DELETE_FILTER;
 
        DP_VERBOSE(p_hwfn, QED_MSG_SP,
                   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
                   abs_vport_id, abs_rx_q_id,
-                  b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+                  p_params->b_is_add ? "Adding" : "Removing",
+                  (u64)p_params->addr, p_params->length);
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
        }
 }
 
-static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+static int qed_configure_arfs_searcher(struct qed_dev *cdev,
+                                      enum qed_filter_config_mode mode)
 {
        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
        struct qed_arfs_config_params arfs_config_params;
        arfs_config_params.udp = true;
        arfs_config_params.ipv4 = true;
        arfs_config_params.ipv6 = true;
-       arfs_config_params.arfs_enable = en_searcher;
-
+       arfs_config_params.mode = mode;
        qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
                                &arfs_config_params);
        return 0;
 
 static void
 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
-                            void *cookie, union event_ring_data *data,
-                            u8 fw_return_code)
+                            void *cookie,
+                            union event_ring_data *data, u8 fw_return_code)
 {
        struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
        void *dev = p_hwfn->cdev->ops_cookie;
        op->arfs_filter_op(dev, cookie, fw_return_code);
 }
 
-static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
-                                        dma_addr_t mapping, u16 length,
-                                        u16 vport_id, u16 rx_queue_id,
-                                        bool add_filter)
+static int
+qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
+                             void *cookie,
+                             struct qed_ntuple_filter_params *params)
 {
        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
        struct qed_spq_comp_cb cb;
        cb.function = qed_arfs_sp_response_handler;
        cb.cookie = cookie;
 
-       rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
-                                            &cb, mapping, length, rx_queue_id,
-                                            vport_id, add_filter);
+       if (params->b_is_vf) {
+               if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
+                                          false)) {
+                       DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
+                               params->vf_id);
+                       return rc;
+               }
+
+               params->vport_id = params->vf_id + 1;
+               params->qid = QED_RFS_NTUPLE_QID_RSS;
+       }
+
+       rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
        if (rc)
                DP_NOTICE(p_hwfn,
                          "Failed to issue a-RFS filter configuration\n");
 
        bool udp;
        bool ipv4;
        bool ipv6;
-       bool arfs_enable;
+       enum qed_filter_config_mode mode;
 };
 
 struct qed_sp_vport_update_params {
 
 void qed_reset_vport_stats(struct qed_dev *cdev);
 
+/**
+ * *@brief qed_arfs_mode_configure -
+ *
+ **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ **@param p_hwfn
+ **@param p_ptt
+ **@param p_cfg_params - arfs mode configuration parameters.
+ *
+ */
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt,
+                            struct qed_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - qed_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
+ *               it with cookie and callback function address, if not
+ *               using this mode then client must pass NULL.
+ * @params p_params
+ */
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+                               struct qed_spq_comp_cb *p_cb,
+                               struct qed_ntuple_filter_params *p_params);
+
 #define MAX_QUEUES_PER_QZONE    (sizeof(unsigned long) * 8)
 #define QED_QUEUE_CID_SELF     (0xff)
 
 
        data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
        data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
        data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
+       data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
+
+       data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
 }
 
 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
                       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
 
        p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
-       p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
+       p_ramrod->inner_vlan_stripping_en =
+               p_ll2_conn->input.rx_vlan_removal_en;
        p_ramrod->queue_id = p_ll2_conn->queue_id;
        p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
 
 
        memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
 
-       p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
-                             CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+       switch (data->input.tx_dest) {
+       case QED_LL2_TX_DEST_NW:
+               p_ll2_info->tx_dest = CORE_TX_DEST_NW;
+               break;
+       case QED_LL2_TX_DEST_LB:
+               p_ll2_info->tx_dest = CORE_TX_DEST_LB;
+               break;
+       case QED_LL2_TX_DEST_DROP:
+               p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
+               break;
+       default:
+               return -EINVAL;
+       }
+
        if (data->input.conn_type == QED_LL2_TYPE_OOO ||
            data->input.secondary_queue)
                p_ll2_info->main_func_queue = false;
                goto release_terminate;
        }
 
-       if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
-           cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+       if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
                DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
                rc = qed_ll2_start_ooo(cdev, params);
                if (rc) {
        qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
        eth_zero_addr(cdev->ll2_mac_address);
 
-       if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
-           cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+       if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
                qed_ll2_stop_ooo(cdev);
 
        rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
 
                                        DRV_MSG_CODE_NVM_READ_NVRAM,
                                        addr + offset +
                                        (bytes_to_copy <<
-                                        DRV_MB_PARAM_NVM_LEN_SHIFT),
+                                        DRV_MB_PARAM_NVM_LEN_OFFSET),
                                        &resp, &resp_param,
                                        &read_len,
                                        (u32 *)(p_buf + offset));
 
 
        if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
                qed_iwarp_init_fw_ramrod(p_hwfn,
-                                        &p_ent->ramrod.iwarp_init_func.iwarp);
+                                        &p_ent->ramrod.iwarp_init_func);
                p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
        } else {
                p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
 
        0x1f0434UL
 #define PRS_REG_SEARCH_TAG1 \
        0x1f0444UL
+#define PRS_REG_SEARCH_TENANT_ID \
+       0x1f044cUL
 #define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
        0x1f0a0cUL
 #define PRS_REG_SEARCH_TCP_FIRST_FRAG \
        0x2e8800UL
 #define CCFC_REG_STRONG_ENABLE_VF \
        0x2e070cUL
-#define  CDU_REG_CID_ADDR_PARAMS       \
+#define CDU_REG_CCFC_CTX_VALID0 \
+       0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 \
+       0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 \
+       0x580408UL
+#define  CDU_REG_CID_ADDR_PARAMS \
        0x580900UL
 #define  DBG_REG_CLIENT_ENABLE \
        0x010004UL
        0x0543a4UL
 #define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
        0x0543a8UL
+#define PTLD_REG_DBG_SELECT_E5 \
+       0x5a1600UL
+#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
+       0x5a1604UL
+#define PTLD_REG_DBG_SHIFT_E5 \
+       0x5a1608UL
+#define PTLD_REG_DBG_FORCE_VALID_E5 \
+       0x5a160cUL
+#define PTLD_REG_DBG_FORCE_FRAME_E5 \
+       0x5a1610UL
+#define YPLD_REG_DBG_SELECT_E5 \
+       0x5c1600UL
+#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
+       0x5c1604UL
+#define YPLD_REG_DBG_SHIFT_E5 \
+       0x5c1608UL
+#define YPLD_REG_DBG_FORCE_VALID_E5 \
+       0x5c160cUL
+#define YPLD_REG_DBG_FORCE_FRAME_E5 \
+       0x5c1610UL
+#define RGSRC_REG_DBG_SELECT_E5        \
+       0x320040UL
+#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
+       0x320044UL
+#define RGSRC_REG_DBG_SHIFT_E5 \
+       0x320048UL
+#define RGSRC_REG_DBG_FORCE_VALID_E5 \
+       0x32004cUL
+#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
+       0x320050UL
+#define TGSRC_REG_DBG_SELECT_E5        \
+       0x322040UL
+#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
+       0x322044UL
+#define TGSRC_REG_DBG_SHIFT_E5 \
+       0x322048UL
+#define TGSRC_REG_DBG_FORCE_VALID_E5 \
+       0x32204cUL
+#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
+       0x322050UL
 #define MISC_REG_RESET_PL_UA \
        0x008050UL
 #define MISC_REG_RESET_PL_HV \
        0x340800UL
 #define BRB_REG_BIG_RAM_DATA \
        0x341500UL
+#define BRB_REG_BIG_RAM_DATA_SIZE \
+       64
 #define SEM_FAST_REG_STALL_0_BB_K2 \
        0x000488UL
 #define SEM_FAST_REG_STALLED \
 
                DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
                p_ramrod->mf_mode = MF_NPAR;
        }
-       p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+       p_ramrod->outer_tag_config.outer_tag.tci =
+               cpu_to_le16(p_hwfn->hw_info.ovlan);
 
        /* Place EQ address in RAMROD */
        DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
        p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
 
        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-                  "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
-                  sb, sb_index, p_ramrod->outer_tag);
+                  "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
+                  sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
 
        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
-static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
-                                 int rel_vf_id,
-                                 bool b_enabled_only, bool b_non_malicious)
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+                          int rel_vf_id,
+                          bool b_enabled_only, bool b_non_malicious)
 {
        if (!p_hwfn->pf_iov_info) {
                DP_NOTICE(p_hwfn->cdev, "No iov info\n");
 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
                                     struct qed_ptt *p_ptt, int vfid, int val)
 {
+       struct qed_mcp_link_state *p_link;
        struct qed_vf_info *vf;
        u8 abs_vp_id = 0;
        int rc;
        if (rc)
                return rc;
 
-       return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+       p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
+       return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+                                p_link->speed);
 }
 
 static int
 
 };
 
 #ifdef CONFIG_QED_SRIOV
+/**
+ * @brief Check if given VF ID @vfid is valid
+ *        w.r.t. @b_enabled_only value
+ *        if b_enabled_only = true - only enabled VF id is valid
+ *        else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ *
+ * @return bool - true for valid VF ID
+ */
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+                          int rel_vf_id,
+                          bool b_enabled_only, bool b_non_malicious);
+
 /**
  * @brief - Given a VF index, return index of next [including that] active VF.
  *
 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
 void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
 #else
+static inline bool
+qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+                     int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
+{
+       return false;
+}
+
 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
                                             u16 rel_vf_id)
 {
 
                                     u16 rxq_id, bool add_fltr)
 {
        const struct qed_eth_ops *op = edev->ops;
+       struct qed_ntuple_filter_params params;
 
        if (n->used)
                return;
 
+       memset(¶ms, 0, sizeof(params));
+
+       params.addr = n->mapping;
+       params.length = n->buf_len;
+       params.qid = rxq_id;
+       params.b_is_add = add_fltr;
+
        DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
                   "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
                   add_fltr ? "Adding" : "Deleting",
 
        n->used = true;
        n->filter_op = add_fltr;
-       op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
-                                rxq_id, add_fltr);
+       op->ntuple_filter_config(edev->cdev, n, ¶ms);
 }
 
 static void
        edev->arfs->filter_count++;
 
        if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
-               edev->ops->configure_arfs_searcher(edev->cdev, true);
+               enum qed_filter_config_mode mode;
+
+               mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+               edev->ops->configure_arfs_searcher(edev->cdev, mode);
                edev->arfs->enable = true;
        }
 
        edev->arfs->filter_count--;
 
        if (!edev->arfs->filter_count && edev->arfs->enable) {
+               enum qed_filter_config_mode mode;
+
+               mode = QED_FILTER_CONFIG_MODE_DISABLE;
                edev->arfs->enable = false;
-               edev->ops->configure_arfs_searcher(edev->cdev, false);
+               edev->ops->configure_arfs_searcher(edev->cdev, mode);
        }
 }
 
 
        if (!edev->arfs->filter_count) {
                if (edev->arfs->enable) {
+                       enum qed_filter_config_mode mode;
+
+                       mode = QED_FILTER_CONFIG_MODE_DISABLE;
                        edev->arfs->enable = false;
-                       edev->ops->configure_arfs_searcher(edev->cdev, false);
+                       edev->ops->configure_arfs_searcher(edev->cdev, mode);
                }
 #ifdef CONFIG_RFS_ACCEL
        } else {
 
                                u8 fcp_cmd_payload[32])
 {
        struct e4_fcoe_task_context *ctx = task_params->context;
+       const u8 val_byte = ctx->ystorm_ag_context.byte0;
        struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
        struct ystorm_fcoe_task_st_ctx *y_st_ctx;
        struct tstorm_fcoe_task_st_ctx *t_st_ctx;
        bool slow_sgl;
 
        memset(ctx, 0, sizeof(*(ctx)));
+       ctx->ystorm_ag_context.byte0 = val_byte;
        slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
                                    sgl_task_params->small_mid_sge);
        io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
        y_st_ctx = &ctx->ystorm_st_context;
        y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
        y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
-       y_st_ctx->task_type = task_params->task_type;
+       y_st_ctx->task_type = (u8)task_params->task_type;
        memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
               fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
 
        /* Tstorm ctx */
        t_st_ctx = &ctx->tstorm_st_context;
-       t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
-                                       FCOE_TASK_DEV_TYPE_TAPE :
-                                       FCOE_TASK_DEV_TYPE_DISK);
+       t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ?
+                                           FCOE_TASK_DEV_TYPE_TAPE :
+                                           FCOE_TASK_DEV_TYPE_DISK);
        t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
        val = cpu_to_le32(task_params->cq_rss_number);
        t_st_ctx->read_only.glbl_q_num = val;
        t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
-       t_st_ctx->read_only.task_type = task_params->task_type;
+       t_st_ctx->read_only.task_type = (u8)task_params->task_type;
        SET_FIELD(t_st_ctx->read_write.flags,
                  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
        t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
                SET_FIELD(m_st_ctx->flags,
                          MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
                          (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+               m_st_ctx->sgl_params.sgl_num_sges =
+                       cpu_to_le16(sgl_task_params->num_sges);
        } else {
                /* Tstorm ctx */
                SET_FIELD(t_st_ctx->read_write.flags,
                                      sgl_task_params);
        }
 
+       /* Init Sqe */
        init_common_sqe(task_params, SEND_FCOE_CMD);
+
        return 0;
 }
 
        u8 fw_to_place_fc_header)
 {
        struct e4_fcoe_task_context *ctx = task_params->context;
+       const u8 val_byte = ctx->ystorm_ag_context.byte0;
        struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
        struct ystorm_fcoe_task_st_ctx *y_st_ctx;
        struct tstorm_fcoe_task_st_ctx *t_st_ctx;
        u32 val;
 
        memset(ctx, 0, sizeof(*(ctx)));
+       ctx->ystorm_ag_context.byte0 = val_byte;
 
        /* Init Ystorm */
        y_st_ctx = &ctx->ystorm_st_context;
        SET_FIELD(y_st_ctx->sgl_mode,
                  YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
        y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
-       y_st_ctx->task_type = task_params->task_type;
+       y_st_ctx->task_type = (u8)task_params->task_type;
        memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
               mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
 
        t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
        val = cpu_to_le32(task_params->cq_rss_number);
        t_st_ctx->read_only.glbl_q_num = val;
-       t_st_ctx->read_only.task_type = task_params->task_type;
+       t_st_ctx->read_only.task_type = (u8)task_params->task_type;
        SET_FIELD(t_st_ctx->read_write.flags,
                  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
        t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
 }
 
 int init_initiator_sequence_recovery_fcoe_task(
-       struct fcoe_task_params *task_params, u32 off)
+       struct fcoe_task_params *task_params, u32 desired_offset)
 {
        init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
-       task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+       task_params->sqe->additional_info_union.seq_rec_updated_offset =
+                                                               desired_offset;
        return 0;
 }
 
        MAX_FCOE_CQE_TYPE
 };
 
-
-/*
- * FCoE device type
- */
-enum fcoe_device_type {
-       FCOE_TASK_DEV_TYPE_DISK,
-       FCOE_TASK_DEV_TYPE_TAPE,
-       MAX_FCOE_DEVICE_TYPE
-};
-
-
-
-
 /*
  * FCoE fast path error codes
  */
        MAX_FCOE_SP_ERROR_CODE
 };
 
-
-/*
- * FCoE SQE request type
- */
-enum fcoe_sqe_request_type {
-       SEND_FCOE_CMD,
-       SEND_FCOE_MIDPATH,
-       SEND_FCOE_ABTS_REQUEST,
-       FCOE_EXCHANGE_CLEANUP,
-       FCOE_SEQUENCE_RECOVERY,
-       SEND_FCOE_XFER_RDY,
-       SEND_FCOE_RSP,
-       SEND_FCOE_RSP_WITH_SENSE_DATA,
-       SEND_FCOE_TARGET_DATA,
-       SEND_FCOE_INITIATOR_DATA,
-       /*
-        * Xfer Continuation (==1) ready to be sent. Previous XFERs data
-        * received successfully.
-        */
-       SEND_FCOE_XFER_CONTINUATION_RDY,
-       SEND_FCOE_TARGET_ABTS_RSP,
-       MAX_FCOE_SQE_REQUEST_TYPE
-};
-
-
 /*
  * FCoE task TX state
  */
        MAX_FCOE_TASK_TX_STATE
 };
 
-
-/*
- * FCoE task type
- */
-enum fcoe_task_type {
-       FCOE_TASK_TYPE_WRITE_INITIATOR,
-       FCOE_TASK_TYPE_READ_INITIATOR,
-       FCOE_TASK_TYPE_MIDPATH,
-       FCOE_TASK_TYPE_UNSOLICITED,
-       FCOE_TASK_TYPE_ABTS,
-       FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
-       FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
-       FCOE_TASK_TYPE_WRITE_TARGET,
-       FCOE_TASK_TYPE_READ_TARGET,
-       FCOE_TASK_TYPE_RSP,
-       FCOE_TASK_TYPE_RSP_SENSE_DATA,
-       FCOE_TASK_TYPE_ABTS_TARGET,
-       FCOE_TASK_TYPE_ENUM_SIZE,
-       MAX_FCOE_TASK_TYPE
-};
-
-struct scsi_glbl_queue_entry {
-       /* Start physical address for the RQ (receive queue) PBL. */
-       struct regpair rq_pbl_addr;
-       /* Start physical address for the CQ (completion queue) PBL. */
-       struct regpair cq_pbl_addr;
-       /* Start physical address for the CMDQ (command queue) PBL. */
-       struct regpair cmdq_pbl_addr;
-};
-
 #endif /* __QEDF_HSI__ */
 
        struct qedf_io_work *io_work;
        u32 bdq_idx;
        void *bdq_addr;
+       struct scsi_bd *p_bd_info;
 
+       p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
-           "address.hi=%x address.lo=%x opaque_data.hi=%x "
-           "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
-           le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
-           le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
-           le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
-           le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
-           qedf->bdq_prod_idx, pktlen);
-
-       bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
+                 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
+                 le32_to_cpu(p_bd_info->address.hi),
+                 le32_to_cpu(p_bd_info->address.lo),
+                 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
+                 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
+                 qedf->bdq_prod_idx, pktlen);
+
+       bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
        if (bdq_idx >= QEDF_BDQ_SIZE) {
                QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
                    bdq_idx);
 
        for (i = 0; i < QEDF_BDQ_SIZE; i++) {
                pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
                pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
-               pbl->opaque.hi = 0;
+               pbl->opaque.fcoe_opaque.hi = 0;
                /* Opaque lo data is an index into the BDQ array */
-               pbl->opaque.lo = cpu_to_le32(i);
+               pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
                pbl++;
        }
 
 
                  (qedi->bdq_prod_idx % qedi->rq_num_entries));
 
        /* Obtain buffer address from rqe_opaque */
-       idx = cqe->rqe_opaque.lo;
+       idx = cqe->rqe_opaque;
        if (idx > (QEDI_BDQ_NUM - 1)) {
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                          "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
        }
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
-                 "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
-                 cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+                 "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                  "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
        struct scsi_bd *pbl;
 
        /* Obtain buffer address from rqe_opaque */
-       idx = cqe->rqe_opaque.lo;
+       idx = cqe->rqe_opaque;
        if (idx > (QEDI_BDQ_NUM - 1)) {
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                          "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                  "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
                  pbl, pbl->address.hi, pbl->address.lo, idx);
-       pbl->opaque.hi = 0;
-       pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+       pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+       pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+       pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+       pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
 
        /* Increment producer to let f/w know we've handled the frame */
        qedi->bdq_prod_idx += count;
 
                                    enum iscsi_task_type task_type)
 {
        struct e4_iscsi_task_context *context;
-       u16 index;
        u32 val;
+       u16 index;
+       u8 val_byte;
 
        context = task_params->context;
+       val_byte = context->mstorm_ag_context.cdu_validation;
        memset(context, 0, sizeof(*context));
+       context->mstorm_ag_context.cdu_validation = val_byte;
 
        for (index = 0; index <
             ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
 
        cxt = task_params->context;
 
-       val = cpu_to_le32(task_size);
-       cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
-       init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
-                                            cmd_params);
-       val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
-       cxt->mstorm_st_context.sense_db.lo = val;
 
-       val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
-       cxt->mstorm_st_context.sense_db.hi = val;
+       if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
+               set_local_completion_context(cxt);
+       } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
+               val = cpu_to_le32(task_size +
+                          ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
+               cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
+               cxt->mstorm_st_context.expected_itt =
+                                                  cpu_to_le32(pdu_header->itt);
+       } else {
+               val = cpu_to_le32(task_size);
+               cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
+                                                                           val;
+               init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+                                                    cmd_params);
+               val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+               cxt->mstorm_st_context.sense_db.lo = val;
+
+               val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+               cxt->mstorm_st_context.sense_db.hi = val;
+       }
 
        if (task_params->tx_io_size) {
                init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
                                       dif_task_params);
+               init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
+                                      dif_task_params);
                init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
                                      &cxt->ystorm_st_context.state.data_desc,
                                      sgl_task_params);
 
 void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
 void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
 void qedi_process_iscsi_error(struct qedi_endpoint *ep,
-                             struct async_data *data);
+                             struct iscsi_eqe_data *data);
 void qedi_start_conn_recovery(struct qedi_ctx *qedi,
                              struct qedi_conn *qedi_conn);
 struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+                           struct iscsi_eqe_data *data);
 void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
 void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
 void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
 
        conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
        conn_info->dup_ack_theshold = 3;
        conn_info->rcv_wnd = 65535;
-       conn_info->cwnd = DEF_MAX_CWND;
 
        conn_info->ss_thresh = 65535;
        conn_info->srtt = 300;
                                       (qedi_ep->ip_type == TCP_IPV6),
                                       1, (qedi_ep->vlan_id != 0));
 
+       conn_info->cwnd = DEF_MAX_CWND * conn_info->mss;
        conn_info->rcv_wnd_scale = 4;
-       conn_info->ts_ticks_per_second = 1000;
        conn_info->da_timeout_value = 200;
        conn_info->ack_frequency = 2;
 
        return msg;
 }
 
-void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+                             struct iscsi_eqe_data *data)
 {
        struct qedi_conn *qedi_conn;
        struct qedi_ctx *qedi;
                qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
 }
 
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+                           struct iscsi_eqe_data *data)
 {
        struct qedi_conn *qedi_conn;
 
 
 {
        struct qedi_ctx *qedi;
        struct qedi_endpoint *qedi_ep;
-       struct async_data *data;
+       struct iscsi_eqe_data *data;
        int rval = 0;
 
        if (!context || !fw_handle) {
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
 
-       data = (struct async_data *)fw_handle;
+       data = (struct iscsi_eqe_data *)fw_handle;
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
-                 "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
-                  data->cid, data->itid, data->error_code,
-                  data->fw_debug_param);
+                 "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
+                  data->icid, data->conn_id, data->error_code,
+                  data->error_pdu_opcode_reserved);
 
-       qedi_ep = qedi->ep_tbl[data->cid];
+       qedi_ep = qedi->ep_tbl[data->icid];
 
        if (!qedi_ep) {
                QEDI_WARN(&qedi->dbg_ctx,
                          "Cannot process event, ep already disconnected, cid=0x%x\n",
-                          data->cid);
+                          data->icid);
                WARN_ON(1);
                return -ENODEV;
        }
 
        qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
        qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
-       qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
 
 err_alloc_mem:
        return rval;
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                          "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
                          pbl, pbl->address.hi, pbl->address.lo, i);
-               pbl->opaque.hi = 0;
-               pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+               pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+               pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+               pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+               pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
                pbl++;
        }
 
 
 #define MAX_NUM_LL2_TX_STATS_COUNTERS  48
 
 #define FW_MAJOR_VERSION       8
-#define FW_MINOR_VERSION       20
-#define FW_REVISION_VERSION    0
+#define FW_MINOR_VERSION       33
+#define FW_REVISION_VERSION    1
 #define FW_ENGINEERING_VERSION 0
 
 /***********************/
 /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
 #define NUM_PHYS_TCS_4PORT_K2  (4)
 #define NUM_OF_PHYS_TCS                (8)
-
+#define PURE_LB_TC             NUM_OF_PHYS_TCS
 #define NUM_TCS_4PORT_K2       (NUM_PHYS_TCS_4PORT_K2 + 1)
 #define NUM_OF_TCS             (NUM_OF_PHYS_TCS + 1)
 
-#define LB_TC                  (NUM_OF_PHYS_TCS)
-
-#define MAX_NUM_VOQS_K2                (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
-#define MAX_NUM_VOQS_BB                (NUM_OF_TCS * MAX_NUM_PORTS_BB)
-#define MAX_NUM_VOQS_E4                (MAX_NUM_VOQS_K2)
-#define MAX_PHYS_VOQS          (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
-
 /* CIDs */
 #define NUM_OF_CONNECTION_TYPES_E4     (8)
 #define NUM_OF_LCIDS                   (320)
 #define PXP_VF_BAR0_START_SDM_ZONE_A   0x4000
 #define PXP_VF_BAR0_END_SDM_ZONE_A     0x10000
 
+#define PXP_VF_BAR0_START_IGU2         0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH                0xD000
+#define PXP_VF_BAR0_END_IGU2           (PXP_VF_BAR0_START_IGU2 + \
+                                        PXP_VF_BAR0_IGU2_LENGTH - 1)
+
 #define PXP_VF_BAR0_GRC_WINDOW_LENGTH  32
 
 #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
 
 #define PRS_GFT_CAM_LINES_NO_MATCH     31
 
-/* Async data KCQ CQE */
-struct async_data {
-       __le32  cid;
-       __le16  itid;
-       u8      error_code;
-       u8      fw_debug_param;
-};
-
 /* Interrupt coalescing TimeSet */
 struct coalescing_timeset {
        u8 value;
        __le16 cqe_prod;
 };
 
+struct tcp_ulp_connect_done_params {
+       __le16 mss;
+       u8 snd_wnd_scale;
+       u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK         0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT                0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK      0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT     1
+};
+
+struct iscsi_connect_done_results {
+       __le16 icid;
+       __le16 conn_id;
+       struct tcp_ulp_connect_done_params params;
+};
+
 struct iscsi_eqe_data {
-       __le32 cid;
+       __le16 icid;
        __le16 conn_id;
+       __le16 reserved;
        u8 error_code;
        u8 error_pdu_opcode_reserved;
 #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK           0x3F
 
 /* Status block structure */
 struct cau_pi_entry {
-       u32 prod;
+       __le32 prod;
 #define CAU_PI_ENTRY_PROD_VAL_MASK     0xFFFF
 #define CAU_PI_ENTRY_PROD_VAL_SHIFT    0
 #define CAU_PI_ENTRY_PI_TIMESET_MASK   0x7F
 
 /* Status block structure */
 struct cau_sb_entry {
-       u32 data;
+       __le32 data;
 #define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
 #define CAU_SB_ENTRY_SB_PROD_SHIFT     0
 #define CAU_SB_ENTRY_STATE0_MASK       0xF
 #define CAU_SB_ENTRY_STATE0_SHIFT      24
 #define CAU_SB_ENTRY_STATE1_MASK       0xF
 #define CAU_SB_ENTRY_STATE1_SHIFT      28
-       u32 params;
+       __le32 params;
 #define CAU_SB_ENTRY_SB_TIMESET0_MASK  0x7F
 #define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
 #define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
 
 /* IGU producer or consumer update command */
 struct igu_prod_cons_update {
-       u32 sb_id_and_flags;
+       __le32 sb_id_and_flags;
 #define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK             0xFFFFFF
 #define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT            0
 #define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK          0x1
 #define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT           29
 #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK         0x1
 #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT                31
-       u32 reserved1;
+       __le32 reserved1;
 };
 
 /* Igu segments access for default status block only */
        MAX_IGU_SEG_ACCESS
 };
 
+/* Enumeration for L3 type field of parsing_and_err_flags.
+ * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6
+ * (This field can be filled according to the last-ethertype)
+ */
+enum l3_type {
+       e_l3_type_unknown,
+       e_l3_type_ipv4,
+       e_l3_type_ipv6,
+       MAX_L3_TYPE
+};
+
+/* Enumeration for l4Protocol field of parsing_and_err_flags.
+ * L4-protocol: 0 - none, 1 - TCP, 2 - UDP.
+ * If the packet is IPv4 fragment, and its not the first fragment, the
+ * protocol-type should be set to none.
+ */
+enum l4_protocol {
+       e_l4_protocol_none,
+       e_l4_protocol_tcp,
+       e_l4_protocol_udp,
+       MAX_L4_PROTOCOL
+};
+
+/* Parsing and error flags field */
 struct parsing_and_err_flags {
        __le16 flags;
 #define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
 
 /* Control frame check constants */
 #define ETH_CTL_FRAME_ETH_TYPE_NUM     4
 
+/* GFS constants */
+#define ETH_GFT_TRASH_CAN_VPORT                0x1FF
+
+/* Destination port mode */
+enum dest_port_mode {
+       DEST_PORT_PHY,
+       DEST_PORT_LOOPBACK,
+       DEST_PORT_PHY_LOOPBACK,
+       DEST_PORT_DROP,
+       MAX_DEST_PORT_MODE
+};
+
+/* Ethernet address type */
+enum eth_addr_type {
+       BROADCAST_ADDRESS,
+       MULTICAST_ADDRESS,
+       UNICAST_ADDRESS,
+       UNKNOWN_ADDRESS,
+       MAX_ETH_ADDR_TYPE
+};
+
 struct eth_tx_1st_bd_flags {
        u8 bitfields;
 #define ETH_TX_1ST_BD_FLAGS_START_BD_MASK              0x1
        __le32 reserved;
 };
 
-struct eth_fast_path_cqe_fw_debug {
-       __le16 reserved2;
-};
-
 /* Tunneling parsing flags */
 struct eth_tunnel_parsing_flags {
        u8 flags;
        u8 placement_offset;
        struct eth_tunnel_parsing_flags tunnel_pars_flags;
        u8 bd_num;
-       u8 reserved[9];
-       struct eth_fast_path_cqe_fw_debug fw_debug;
-       u8 reserved1[3];
+       u8 reserved;
+       __le16 flow_id;
+       u8 reserved1[11];
        struct eth_pmd_flow_flags pmd_flags;
 };
 
        u8 tpa_agg_index;
        u8 header_len;
        __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
-       struct eth_fast_path_cqe_fw_debug fw_debug;
+       __le16 flow_id;
        u8 reserved;
        struct eth_pmd_flow_flags pmd_flags;
 };
 
 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT                2
 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK         0x1
 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT                3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK       0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT      4
 #define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK                        0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT               4
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK           0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT          6
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT               5
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK           0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT          7
        __le16 conn_id;
        u8 def_q_idx;
        u8 reserved[5];
        struct regpair terminate_params_addr;
 };
 
+/* FCoE device type */
+enum fcoe_device_type {
+       FCOE_TASK_DEV_TYPE_DISK,
+       FCOE_TASK_DEV_TYPE_TAPE,
+       MAX_FCOE_DEVICE_TYPE
+};
+
 /* Data sgl */
 struct fcoe_fast_sgl_ctx {
        struct regpair sgl_start_addr;
        struct scsi_init_func_queues q_params;
        __le16 mtu;
        __le16 sq_num_pages_in_pbl;
-       __le32 reserved;
+       __le32 reserved[3];
 };
 
 /* FCoE: Mode of the connection: Target or Initiator or both */
        __le32 rsrv;
 };
 
+/* FCoE SQE request type */
+enum fcoe_sqe_request_type {
+       SEND_FCOE_CMD,
+       SEND_FCOE_MIDPATH,
+       SEND_FCOE_ABTS_REQUEST,
+       FCOE_EXCHANGE_CLEANUP,
+       FCOE_SEQUENCE_RECOVERY,
+       SEND_FCOE_XFER_RDY,
+       SEND_FCOE_RSP,
+       SEND_FCOE_RSP_WITH_SENSE_DATA,
+       SEND_FCOE_TARGET_DATA,
+       SEND_FCOE_INITIATOR_DATA,
+       SEND_FCOE_XFER_CONTINUATION_RDY,
+       SEND_FCOE_TARGET_ABTS_RSP,
+       MAX_FCOE_SQE_REQUEST_TYPE
+};
+
 /* FCoe statistics request */
 struct fcoe_stat_ramrod_data {
        struct regpair stat_params_addr;
 };
 
+/* FCoE task type */
+enum fcoe_task_type {
+       FCOE_TASK_TYPE_WRITE_INITIATOR,
+       FCOE_TASK_TYPE_READ_INITIATOR,
+       FCOE_TASK_TYPE_MIDPATH,
+       FCOE_TASK_TYPE_UNSOLICITED,
+       FCOE_TASK_TYPE_ABTS,
+       FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
+       FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
+       FCOE_TASK_TYPE_WRITE_TARGET,
+       FCOE_TASK_TYPE_READ_TARGET,
+       FCOE_TASK_TYPE_RSP,
+       FCOE_TASK_TYPE_RSP_SENSE_DATA,
+       FCOE_TASK_TYPE_ABTS_TARGET,
+       FCOE_TASK_TYPE_ENUM_SIZE,
+       MAX_FCOE_TASK_TYPE
+};
+
 /* Per PF FCoE transmit path statistics - pStorm RAM structure */
 struct fcoe_tx_stat {
        struct regpair fcoe_tx_byte_cnt;
 
 #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN   (0x10)
 #define CQE_ERROR_BITMAP_DATA_TRUNCATED                (0x20)
 
+/* Union of data bd_opaque/ tq_tid */
+union bd_opaque_tq_union {
+       __le16 bd_opaque;
+       __le16 tq_tid;
+};
+
 /* ISCSI SGL entry */
 struct cqe_error_bitmap {
        u8 cqe_error_status_bits;
        __le32 data[12];
 };
 
+struct lun_mapper_addr_reserved {
+       struct regpair lun_mapper_addr;
+       u8 reserved0[8];
+};
+
+/* rdif conetxt for dif on immediate */
+struct dif_on_immediate_params {
+       __le32 initial_ref_tag;
+       __le16 application_tag;
+       __le16 application_tag_mask;
+       __le16 flags1;
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK            0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT           0
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK          0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT         1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK          0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT         2
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK             0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT            3
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK           0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT          4
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK           0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT          5
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK             0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT            6
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK         0x1
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT                7
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK            0x3
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT           8
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK              0xF
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT             10
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT        14
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT        15
+       u8 flags0;
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK                  0x1
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT                 0
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK            0x1
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT           1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK  0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK           0x1
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT          3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK           0x3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT          4
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK                  0x1
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT                 6
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK                0x1
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT       7
+       u8 reserved_zero[5];
+};
+
+/* iSCSI dif on immediate mode attributes union */
+union dif_configuration_params {
+       struct lun_mapper_addr_reserved lun_mapper_address;
+       struct dif_on_immediate_params def_dif_conf;
+};
+
 /* Union of data/r2t sequence number */
 union iscsi_seq_num {
        __le16 data_sn;
 #define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT       0
 #define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK           0x1
 #define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT          1
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK         0x3F
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT                2
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK    0x1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT   2
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK         0x1F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT                3
 };
 
 /* The iscsi storm task context of Ystorm */
        __le32 data_buffer_offset;
        u8 task_type;
        struct iscsi_dif_flags dif_flags;
-       u8 reserved0[2];
+       __le16 dif_task_icid;
        struct regpair sense_db;
        __le32 expected_itt;
        __le32 reserved1;
 #define ISCSI_REG1_RESERVED1_SHIFT     4
 };
 
+struct tqe_opaque {
+       __le16 opaque[2];
+};
+
 /* The iscsi storm task context of Ustorm */
 struct ustorm_iscsi_task_st_ctx {
        __le32 rem_rcv_len;
 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT       1
        struct iscsi_dif_flags dif_flags;
        __le16 reserved3;
-       __le32 reserved4;
+       struct tqe_opaque tqe_opaque_list;
        __le32 reserved5;
        __le32 reserved6;
        __le32 reserved7;
        __le32 stat_sn;
 };
 
+/* iSCSI connection statistics */
+struct iscsi_conn_stats_params {
+       struct regpair iscsi_tcp_tx_packets_cnt;
+       struct regpair iscsi_tcp_tx_bytes_cnt;
+       struct regpair iscsi_tcp_tx_rxmit_cnt;
+       struct regpair iscsi_tcp_rx_packets_cnt;
+       struct regpair iscsi_tcp_rx_bytes_cnt;
+       struct regpair iscsi_tcp_rx_dup_ack_cnt;
+       __le32 iscsi_tcp_rx_chksum_err_cnt;
+       __le32 reserved;
+};
+
 /* spe message header */
 struct iscsi_slow_path_hdr {
        u8 op_code;
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT   4
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK    0x1
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT   5
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK         0x3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT                6
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK     0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT    6
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK     0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT    7
        u8 reserved0[3];
        __le32 max_seq_size;
        __le32 max_send_pdu_length;
        __le32 max_recv_pdu_length;
        __le32 first_seq_length;
        __le32 exp_stat_sn;
+       union dif_configuration_params dif_on_imme_params;
 };
 
 /* iSCSI CQ element */
        u8 fw_dbg_field;
        u8 caused_conn_err;
        u8 reserved0[3];
-       __le32 reserved1[1];
+       __le32 data_truncated_bytes;
        union iscsi_task_hdr iscsi_hdr;
 };
 
        __le16 reserved0;
        u8 reserved1;
        u8 unsol_cqe_type;
-       struct regpair rqe_opaque;
+       __le16 rqe_opaque;
+       __le16 reserved2[3];
        union iscsi_task_hdr iscsi_hdr;
 };
 
 /* iscsi debug modes */
 struct iscsi_debug_modes {
        u8 flags;
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK         0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT                0
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK            0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT           1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK              0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT             2
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK          0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT         3
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK  0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK              0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT             5
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK     0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT    6
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK             0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT            7
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK                 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT                        0
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK                    0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT                   1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK                      0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT                     2
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK                  0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT                 3
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK          0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT         4
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK                      0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT                     5
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK      0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT     6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK                    0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT                   7
 };
 
 /* iSCSI kernel completion queue IDs */
        ISCSI_EVENT_TYPE_CLEAR_SQ,
        ISCSI_EVENT_TYPE_TERMINATE_CONN,
        ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
+       ISCSI_EVENT_TYPE_COLLECT_STATS_CONN,
        ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
        ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
-       RESERVED9,
        ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
        ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
        ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
        ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
        ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
        ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
+       ISCSI_RAMROD_CMD_ID_CONN_STATS = 8,
        MAX_ISCSI_RAMROD_CMD_ID
 };
 
        struct tcp_offload_params_opt2 tcp;
 };
 
+/* iSCSI collect connection statistics request */
+struct iscsi_spe_conn_statistics {
+       struct iscsi_slow_path_hdr hdr;
+       __le16 conn_id;
+       __le32 fw_cid;
+       u8 reset_stats;
+       u8 reserved0[7];
+       struct regpair stats_cnts_addr;
+};
+
 /* iSCSI connection termination request */
 struct iscsi_spe_conn_termination {
        struct iscsi_slow_path_hdr hdr;
        u8 num_r2tq_pages_in_ring;
        u8 num_uhq_pages_in_ring;
        u8 ll2_rx_queue_id;
-       u8 ooo_enable;
+       u8 flags;
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK   0x1
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT  0
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK     0x7F
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT    1
        struct iscsi_debug_modes debug_mode;
        __le16 reserved1;
        __le32 reserved2;
-       __le32 reserved3;
-       __le32 reserved4;
        struct scsi_init_func_params func_params;
        struct scsi_init_func_queues q_params;
 };
        ISCSI_TASK_TYPE_TARGET_READ,
        ISCSI_TASK_TYPE_TARGET_RESPONSE,
        ISCSI_TASK_TYPE_LOGIN_RESPONSE,
+       ISCSI_TASK_TYPE_TARGET_IMM_W_DIF,
        MAX_ISCSI_TASK_TYPE
 };
 
 /* Per PF iSCSI receive path statistics - mStorm RAM structure */
 struct mstorm_iscsi_stats_drv {
        struct regpair iscsi_rx_dropped_pdus_task_not_valid;
+       struct regpair iscsi_rx_dup_ack_cnt;
 };
 
 /* Per PF iSCSI transmit path statistics - pStorm RAM structure */
        struct regpair iscsi_rx_bytes_cnt;
        struct regpair iscsi_rx_packet_cnt;
        struct regpair iscsi_rx_new_ooo_isle_events_cnt;
+       struct regpair iscsi_rx_tcp_payload_bytes_cnt;
+       struct regpair iscsi_rx_tcp_pkt_cnt;
+       struct regpair iscsi_rx_pure_ack_cnt;
        __le32 iscsi_cmdq_threshold_cnt;
        __le32 iscsi_rq_threshold_cnt;
        __le32 iscsi_immq_threshold_cnt;
 struct xstorm_iscsi_stats_drv {
        struct regpair iscsi_tx_go_to_slow_start_event_cnt;
        struct regpair iscsi_tx_fast_retransmit_event_cnt;
+       struct regpair iscsi_tx_pure_ack_cnt;
+       struct regpair iscsi_tx_delayed_ack_cnt;
 };
 
 /* Per PF iSCSI transmit path statistics - yStorm RAM structure */
        struct regpair iscsi_tx_data_pdu_cnt;
        struct regpair iscsi_tx_r2t_pdu_cnt;
        struct regpair iscsi_tx_total_pdu_cnt;
+       struct regpair iscsi_tx_tcp_payload_bytes_cnt;
+       struct regpair iscsi_tx_tcp_pkt_cnt;
 };
 
 struct e4_tstorm_iscsi_task_ag_ctx {
 
        void *p_handle;
 };
 
+enum qed_filter_config_mode {
+       QED_FILTER_CONFIG_MODE_DISABLE,
+       QED_FILTER_CONFIG_MODE_5_TUPLE,
+       QED_FILTER_CONFIG_MODE_L4_PORT,
+       QED_FILTER_CONFIG_MODE_IP_DEST,
+};
+
+struct qed_ntuple_filter_params {
+       /* Physically mapped address containing header of buffer to be used
+        * as filter.
+        */
+       dma_addr_t addr;
+
+       /* Length of header in bytes */
+       u16 length;
+
+       /* Relative queue-id to receive classified packet */
+#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
+       u16 qid;
+
+       /* Identifier can either be according to vport-id or vfid */
+       bool b_is_vf;
+       u8 vport_id;
+       u8 vf_id;
+
+       /* true iff this filter is to be added. Else to be removed */
+       bool b_is_add;
+};
+
 struct qed_dev_eth_info {
        struct qed_dev_info common;
 
        int (*tunn_config)(struct qed_dev *cdev,
                           struct qed_tunn_params *params);
 
-       int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
-                                   dma_addr_t mapping, u16 length,
-                                   u16 vport_id, u16 rx_queue_id,
-                                   bool add_filter);
+       int (*ntuple_filter_config)(struct qed_dev *cdev,
+                                   void *cookie,
+                                   struct qed_ntuple_filter_params *params);
 
        int (*configure_arfs_searcher)(struct qed_dev *cdev,
-                                      bool en_searcher);
+                                      enum qed_filter_config_mode mode);
        int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
 };
 
 
 /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
 struct qed_iscsi_pf_params {
        u64 glbl_q_params_addr;
-       u64 bdq_pbl_base_addr[2];
-       u32 max_cwnd;
+       u64 bdq_pbl_base_addr[3];
        u16 cq_num_entries;
        u16 cmdq_num_entries;
        u32 two_msl_timer;
-       u16 dup_ack_threshold;
        u16 tx_sws_timer;
-       u16 min_rto;
-       u16 min_rto_rt;
-       u16 max_rto;
 
        /* The following parameters are used during HW-init
         * and these parameters need to be passed as arguments
 
        /* The following parameters are used during protocol-init */
        u16 half_way_close_timeout;
-       u16 bdq_xoff_threshold[2];
-       u16 bdq_xon_threshold[2];
+       u16 bdq_xoff_threshold[3];
+       u16 bdq_xon_threshold[3];
        u16 cmdq_xoff_threshold;
        u16 cmdq_xon_threshold;
        u16 rq_buffer_size;
        u8 gl_cmd_pi;
        u8 debug_mode;
        u8 ll2_ooo_queue_id;
-       u8 ooo_enable;
 
        u8 is_target;
-       u8 bdq_pbl_num_entries[2];
+       u8 is_soc_en;
+       u8 soc_num_of_blocks_log;
+       u8 bdq_pbl_num_entries[3];
 };
 
 struct qed_rdma_pf_params {
 
        u32 ss_thresh;
        u16 srtt;
        u16 rtt_var;
-       u32 ts_time;
        u32 ts_recent;
        u32 ts_recent_age;
        u32 total_rt;
        u16 mss;
        u8 snd_wnd_scale;
        u8 rcv_wnd_scale;
-       u32 ts_ticks_per_second;
        u16 da_timeout_value;
        u8 ack_frequency;
 };
 
        u32 opaque_data_1;
 
        /* GSI only */
-       u32 gid_dst[4];
+       u32 src_qp;
        u16 qp_id;
 
        union {
 
 /* SCSI CONSTANTS */
 /*********************/
 
-#define NUM_OF_CMDQS_CQS               (NUM_OF_GLOBAL_QUEUES / 2)
+#define SCSI_MAX_NUM_OF_CMDQS          (NUM_OF_GLOBAL_QUEUES / 2)
 #define BDQ_NUM_RESOURCES              (4)
 
 #define BDQ_ID_RQ                      (0)
 #define BDQ_ID_IMM_DATA                        (1)
-#define BDQ_NUM_IDS                    (2)
+#define BDQ_ID_TQ                      (2)
+#define BDQ_NUM_IDS                    (3)
 
 #define SCSI_NUM_SGES_SLOW_SGL_THR     8
 
 #define BDQ_MAX_EXTERNAL_RING_SIZE     BIT(15)
 
+/* SCSI op codes */
+#define SCSI_OPCODE_COMPARE_AND_WRITE  (0x89)
+#define SCSI_OPCODE_READ_10            (0x28)
+#define SCSI_OPCODE_WRITE_6            (0x0A)
+#define SCSI_OPCODE_WRITE_10           (0x2A)
+#define SCSI_OPCODE_WRITE_12           (0xAA)
+#define SCSI_OPCODE_WRITE_16           (0x8A)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_10        (0x2E)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_12        (0xAE)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_16        (0x8E)
+
+/* iSCSI Drv opaque */
+struct iscsi_drv_opaque {
+       __le16 reserved_zero[3];
+       __le16 opaque;
+};
+
+/* Scsi 2B/8B opaque union */
+union scsi_opaque {
+       struct regpair fcoe_opaque;
+       struct iscsi_drv_opaque iscsi_opaque;
+};
+
 /* SCSI buffer descriptor */
 struct scsi_bd {
        struct regpair address;
-       struct regpair opaque;
+       union scsi_opaque opaque;
 };
 
 /* Scsi Drv BDQ struct */
 #define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT             1
 #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK                   0x1
 #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT                  2
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK              0x1F
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT             3
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK                    0x1
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT                   3
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK                      0x1
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT                     4
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK       0x7
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT      5
+       __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
        u8 num_queues;
        u8 queue_relative_offset;
        u8 cq_sb_pi;
        u8 cmdq_sb_pi;
-       __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
-       __le16 reserved0;
        u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
+       u8 reserved1;
        struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
        __le16 bdq_xoff_threshold[BDQ_NUM_IDS];
-       __le16 bdq_xon_threshold[BDQ_NUM_IDS];
        __le16 cmdq_xoff_threshold;
+       __le16 bdq_xon_threshold[BDQ_NUM_IDS];
        __le16 cmdq_xon_threshold;
-       __le32 reserved1;
 };
 
 /* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
        u8 reserved[4];
 };
 
+/* SCSI Task Queue Element */
+struct scsi_tqe {
+       __le16 itid;
+};
+
 #endif /* __STORAGE_COMMON__ */
 
        __le16 remote_mac_addr_mid;
        __le16 remote_mac_addr_hi;
        __le16 vlan_id;
-       u8 flags;
+       __le16 flags;
 #define TCP_OFFLOAD_PARAMS_TS_EN_MASK                  0x1
 #define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT                 0
 #define TCP_OFFLOAD_PARAMS_DA_EN_MASK                  0x1
 #define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT                 1
 #define TCP_OFFLOAD_PARAMS_KA_EN_MASK                  0x1
 #define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT                 2
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK          0x1
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT         3
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK                0x1
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT       4
 #define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK               0x1
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT              3
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT              5
 #define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK              0x1
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT             4
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT             6
 #define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK               0x1
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT              5
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT              7
 #define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK           0x1
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT          6
-#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK              0x1
-#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT             7
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT          8
+#define TCP_OFFLOAD_PARAMS_RESERVED_MASK               0x7F
+#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT              9
        u8 ip_version;
+       u8 reserved0[3];
        __le32 remote_ip[4];
        __le32 local_ip[4];
        __le32 flow_label;
        u8 rcv_wnd_scale;
        u8 connect_mode;
        __le16 srtt;
-       __le32 cwnd;
        __le32 ss_thresh;
-       __le16 reserved1;
+       __le32 rcv_wnd;
+       __le32 cwnd;
        u8 ka_max_probe_cnt;
        u8 dup_ack_theshold;
+       __le16 reserved1;
+       __le32 ka_timeout;
+       __le32 ka_interval;
+       __le32 max_rt_time;
+       __le32 initial_rcv_wnd;
        __le32 rcv_next;
        __le32 snd_una;
        __le32 snd_next;
        __le32 snd_max;
        __le32 snd_wnd;
-       __le32 rcv_wnd;
        __le32 snd_wl1;
        __le32 ts_recent;
        __le32 ts_recent_age;
        u8 rt_cnt;
        __le16 rtt_var;
        __le16 fw_internal;
-       __le32 ka_timeout;
-       __le32 ka_interval;
-       __le32 max_rt_time;
-       __le32 initial_rcv_wnd;
        u8 snd_wnd_scale;
        u8 ack_frequency;
        __le16 da_timeout_value;
-       __le32 reserved3[2];
+       __le32 reserved3;
 };
 
 /* tcp offload parameters */
        __le16 remote_mac_addr_mid;
        __le16 remote_mac_addr_hi;
        __le16 vlan_id;
-       u8 flags;
+       __le16 flags;
 #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK     0x1
 #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT    0
 #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK     0x1
 #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT    1
 #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK     0x1
 #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT    2
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT        3
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK    0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT   3
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT        4
        u8 ip_version;
+       u8 reserved1[3];
        __le32 remote_ip[4];
        __le32 local_ip[4];
        __le32 flow_label;
        __le16 syn_ip_payload_length;
        __le32 syn_phy_addr_lo;
        __le32 syn_phy_addr_hi;
-       __le32 reserved1[22];
+       __le32 cwnd;
+       u8 ka_max_probe_cnt;
+       u8 reserved2[3];
+       __le32 ka_timeout;
+       __le32 ka_interval;
+       __le32 max_rt_time;
+       __le32 reserved3[16];
 };
 
 /* tcp IPv4/IPv6 enum */