bp->bnapi[i] = bnapi;
                        bp->bnapi[i]->index = i;
                        bp->bnapi[i]->bp = bp;
+                       if (bp->flags & BNXT_FLAG_CHIP_P5) {
+                               struct bnxt_cp_ring_info *cpr =
+                                       &bp->bnapi[i]->cp_ring;
+
+                               cpr->cp_ring_struct.ring_mem.flags =
+                                       BNXT_RMEM_RING_PTE_FLAG;
+                       }
                }
 
                bp->rx_ring = kcalloc(bp->rx_nr_rings,
                        return -ENOMEM;
 
                for (i = 0; i < bp->rx_nr_rings; i++) {
-                       bp->rx_ring[i].bnapi = bp->bnapi[i];
+                       struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+
+                       if (bp->flags & BNXT_FLAG_CHIP_P5) {
+                               rxr->rx_ring_struct.ring_mem.flags =
+                                       BNXT_RMEM_RING_PTE_FLAG;
+                               rxr->rx_agg_ring_struct.ring_mem.flags =
+                                       BNXT_RMEM_RING_PTE_FLAG;
+                       }
+                       rxr->bnapi = bp->bnapi[i];
                        bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
                }
 
                        j = bp->rx_nr_rings;
 
                for (i = 0; i < bp->tx_nr_rings; i++, j++) {
-                       bp->tx_ring[i].bnapi = bp->bnapi[j];
-                       bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
+                       struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+                       if (bp->flags & BNXT_FLAG_CHIP_P5)
+                               txr->tx_ring_struct.ring_mem.flags =
+                                       BNXT_RMEM_RING_PTE_FLAG;
+                       txr->bnapi = bp->bnapi[j];
+                       bp->bnapi[j]->tx_ring = txr;
                        bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
                        if (i >= bp->tx_nr_rings_xdp) {
-                               bp->tx_ring[i].txq_index = i -
-                                       bp->tx_nr_rings_xdp;
+                               txr->txq_index = i - bp->tx_nr_rings_xdp;
                                bp->bnapi[j]->tx_int = bnxt_tx_int;
                        } else {
                                bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
                        goto init_err_pci_clean;
        }
 
+       if (BNXT_CHIP_P5(bp))
+               bp->flags |= BNXT_FLAG_CHIP_P5;
+
        rc = bnxt_hwrm_func_reset(bp);
        if (rc)
                goto init_err_pci_clean;
                           NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
                           NETIF_F_RXCSUM | NETIF_F_GRO;
 
-       if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+       if (BNXT_SUPPORTS_TPA(bp))
                dev->hw_features |= NETIF_F_LRO;
 
        dev->hw_enc_features =
        dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
        dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
                            NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
-       if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+       if (BNXT_SUPPORTS_TPA(bp))
                dev->hw_features |= NETIF_F_GRO_HW;
        dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
        if (dev->features & NETIF_F_GRO_HW)
        init_waitqueue_head(&bp->sriov_cfg_wait);
        mutex_init(&bp->sriov_lock);
 #endif
-       bp->gro_func = bnxt_gro_func_5730x;
-       if (BNXT_CHIP_P4_PLUS(bp))
-               bp->gro_func = bnxt_gro_func_5731x;
-       else
+       if (BNXT_SUPPORTS_TPA(bp)) {
+               bp->gro_func = bnxt_gro_func_5730x;
+               if (BNXT_CHIP_P4(bp))
+                       bp->gro_func = bnxt_gro_func_5731x;
+       }
+       if (!BNXT_CHIP_P4_PLUS(bp))
                bp->flags |= BNXT_FLAG_DOUBLE_DB;
 
        rc = bnxt_hwrm_func_drv_rgtr(bp);
                           VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
                           VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
                           VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
-       if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
+       if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
                bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
                bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
                                    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
 
        ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 &                   \
         cpu_to_le32(RX_TPA_END_CMP_ERRORS))
 
+struct nqe_cn {
+       __le16  type;
+       #define NQ_CN_TYPE_MASK           0x3fUL
+       #define NQ_CN_TYPE_SFT            0
+       #define NQ_CN_TYPE_CQ_NOTIFICATION  0x30UL
+       #define NQ_CN_TYPE_LAST            NQ_CN_TYPE_CQ_NOTIFICATION
+       __le16  reserved16;
+       __le32  cq_handle_low;
+       __le32  v;
+       #define NQ_CN_V     0x1UL
+       __le32  cq_handle_high;
+};
+
 #define DB_IDX_MASK                                            0xffffff
 #define DB_IDX_VALID                                           (0x1 << 26)
 #define DB_IRQ_DIS                                             (0x1 << 27)
 #define BNXT_MIN_ROCE_CP_RINGS 2
 #define BNXT_MIN_ROCE_STAT_CTXS        1
 
+/* 64-bit doorbell */
+#define DBR_INDEX_MASK                                 0x0000000000ffffffULL
+#define DBR_XID_MASK                                   0x000fffff00000000ULL
+#define DBR_XID_SFT                                    32
+#define DBR_PATH_L2                                    (0x1ULL << 56)
+#define DBR_TYPE_SQ                                    (0x0ULL << 60)
+#define DBR_TYPE_RQ                                    (0x1ULL << 60)
+#define DBR_TYPE_SRQ                                   (0x2ULL << 60)
+#define DBR_TYPE_SRQ_ARM                               (0x3ULL << 60)
+#define DBR_TYPE_CQ                                    (0x4ULL << 60)
+#define DBR_TYPE_CQ_ARMSE                              (0x5ULL << 60)
+#define DBR_TYPE_CQ_ARMALL                             (0x6ULL << 60)
+#define DBR_TYPE_CQ_ARMENA                             (0x7ULL << 60)
+#define DBR_TYPE_SRQ_ARMENA                            (0x8ULL << 60)
+#define DBR_TYPE_CQ_CUTOFF_ACK                         (0x9ULL << 60)
+#define DBR_TYPE_NQ                                    (0xaULL << 60)
+#define DBR_TYPE_NQ_ARM                                        (0xbULL << 60)
+#define DBR_TYPE_NULL                                  (0xfULL << 60)
+
 #define INVALID_HW_RING_ID     ((u16)-1)
 
 /* The hardware supports certain page sizes.  Use the supported page sizes
 
        struct net_dim          dim;
 
-       struct tx_cmp           *cp_desc_ring[MAX_CP_PAGES];
+       union {
+               struct tx_cmp   *cp_desc_ring[MAX_CP_PAGES];
+               struct nqe_cn   *nq_desc_ring[MAX_CP_PAGES];
+       };
 
        dma_addr_t              cp_desc_mapping[MAX_CP_PAGES];
 
        u64                     rx_l4_csum_errors;
 
        struct bnxt_ring_struct cp_ring_struct;
+
+       struct bnxt_cp_ring_info *cp_ring_arr[2];
 };
 
 struct bnxt_napi {
 
 #define CHIP_NUM_5745X         0xd730
 
+#define CHIP_NUM_57500         0x1750
+
 #define CHIP_NUM_58802         0xd802
 #define CHIP_NUM_58804         0xd804
 #define CHIP_NUM_58808         0xd808
        atomic_t                intr_sem;
 
        u32                     flags;
+       #define BNXT_FLAG_CHIP_P5       0x1
        #define BNXT_FLAG_VF            0x2
        #define BNXT_FLAG_LRO           0x4
 #ifdef CONFIG_INET
 #define BNXT_SINGLE_PF(bp)     (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
 #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
 #define BNXT_RX_PAGE_MODE(bp)  ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
+#define BNXT_SUPPORTS_TPA(bp)  (!BNXT_CHIP_TYPE_NITRO_A0(bp) &&        \
+                                !(bp->flags & BNXT_FLAG_CHIP_P5))
 
-/* Chip class phase 4 and later */
-#define BNXT_CHIP_P4_PLUS(bp)                  \
+/* Chip class phase 5 */
+#define BNXT_CHIP_P5(bp)                       \
+       ((bp)->chip_num == CHIP_NUM_57500)
+
+/* Chip class phase 4.x */
+#define BNXT_CHIP_P4(bp)                       \
        (BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
         BNXT_CHIP_NUM_5745X((bp)->chip_num) || \
         BNXT_CHIP_NUM_588XX((bp)->chip_num) || \
         (BNXT_CHIP_NUM_58700((bp)->chip_num) &&        \
          !BNXT_CHIP_TYPE_NITRO_A0(bp)))
 
+#define BNXT_CHIP_P4_PLUS(bp)                  \
+       (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+
        struct bnxt_en_dev      *edev;
        struct bnxt_en_dev *    (*ulp_probe)(struct net_device *);