return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
 }
 
+static u8 cgx_get_nix_resetbit(struct cgx *cgx)
+{
+       int first_lmac;
+       u8 p2x;
+
+       /* non 98XX silicons supports only NIX0 block */
+       if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX)
+               return CGX_NIX0_RESET;
+
+       first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
+       p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac);
+
+       if (p2x == CMR_P2X_SEL_NIX1)
+               return CGX_NIX1_RESET;
+       else
+               return CGX_NIX0_RESET;
+}
+
 /* Ensure the required lock for event queue(where asynchronous events are
  * posted) is acquired before calling this API. Else an asynchronous event(with
  * latest link status) can reach the destination before this function returns
                lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
        }
 
+       /* Start X2P reset on given MAC block */
+       cgx->mac_ops->mac_x2p_reset(cgx, true);
        return cgx_lmac_verify_fwi_version(cgx);
 
 err_bitmap_free:
                return 0x60;
 }
 
+static void cgx_x2p_reset(void *cgxd, bool enable)
+{
+       struct cgx *cgx = cgxd;
+       int lmac_id;
+       u64 cfg;
+
+       if (enable) {
+               for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac)
+                       cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false);
+
+               usleep_range(1000, 2000);
+
+               cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
+               cfg |= cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP;
+               cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
+       } else {
+               cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
+               cfg &= ~(cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP);
+               cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
+       }
+}
+
+static int cgx_enadis_rx(void *cgxd, int lmac_id, bool enable)
+{
+       struct cgx *cgx = cgxd;
+       u64 cfg;
+
+       if (!is_lmac_valid(cgx, lmac_id))
+               return -ENODEV;
+
+       cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+       if (enable)
+               cfg |= DATA_PKT_RX_EN;
+       else
+               cfg &= ~DATA_PKT_RX_EN;
+       cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+       return 0;
+}
+
 static struct mac_ops  cgx_mac_ops    = {
        .name           =       "cgx",
        .csr_offset     =       0,
        .mac_get_pfc_frm_cfg   =        cgx_lmac_get_pfc_frm_cfg,
        .mac_reset   =                  cgx_lmac_reset,
        .mac_stats_reset       =        cgx_stats_reset,
+       .mac_x2p_reset                   =      cgx_x2p_reset,
+       .mac_enadis_rx                   =      cgx_enadis_rx,
 };
 
 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 #define CGX_LMAC_TYPE_MASK             0xF
 #define CGXX_CMRX_INT                  0x040
 #define FW_CGX_INT                     BIT_ULL(1)
+#define CGXX_CMR_GLOBAL_CONFIG          0x08
+#define CGX_NIX0_RESET                 BIT_ULL(2)
+#define CGX_NIX1_RESET                 BIT_ULL(3)
+#define CGX_NSCI_DROP                  BIT_ULL(9)
 #define CGXX_CMRX_INT_ENA_W1S          0x058
 #define CGXX_CMRX_RX_ID_MAP            0x060
 #define CGXX_CMRX_RX_STAT0             0x070
 
        int                     (*get_fec_stats)(void *cgxd, int lmac_id,
                                                 struct cgx_fec_stats_rsp *rsp);
        int                     (*mac_stats_reset)(void *cgxd, int lmac_id);
+       void                    (*mac_x2p_reset)(void *cgxd, bool enable);
+       int                     (*mac_enadis_rx)(void *cgxd, int lmac_id, bool enable);
 };
 
 struct cgx {
 
        .mac_get_pfc_frm_cfg   =        rpm_lmac_get_pfc_frm_cfg,
        .mac_reset   =                  rpm_lmac_reset,
        .mac_stats_reset                 =        rpm_stats_reset,
+       .mac_x2p_reset                   =        rpm_x2p_reset,
+       .mac_enadis_rx                   =        rpm_enadis_rx,
 };
 
 static struct mac_ops          rpm2_mac_ops   = {
        .mac_get_pfc_frm_cfg   =        rpm_lmac_get_pfc_frm_cfg,
        .mac_reset   =                  rpm_lmac_reset,
        .mac_stats_reset            =   rpm_stats_reset,
+       .mac_x2p_reset              =   rpm_x2p_reset,
+       .mac_enadis_rx              =   rpm_enadis_rx,
 };
 
 bool is_dev_rpm2(void *rpmd)
 
        return 0;
 }
+
+void rpm_x2p_reset(void *rpmd, bool enable)
+{
+       rpm_t *rpm = rpmd;
+       int lmac_id;
+       u64 cfg;
+
+       if (enable) {
+               for_each_set_bit(lmac_id, &rpm->lmac_bmap, rpm->max_lmac_per_mac)
+                       rpm->mac_ops->mac_enadis_rx(rpm, lmac_id, false);
+
+               usleep_range(1000, 2000);
+
+               cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG);
+               rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg | RPM_NIX0_RESET);
+       } else {
+               cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG);
+               cfg &= ~RPM_NIX0_RESET;
+               rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg);
+       }
+}
+
+int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable)
+{
+       rpm_t *rpm = rpmd;
+       u64 cfg;
+
+       if (!is_lmac_valid(rpm, lmac_id))
+               return -ENODEV;
+
+       cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+       if (enable)
+               cfg |= RPM_RX_EN;
+       else
+               cfg &= ~RPM_RX_EN;
+       rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+       return 0;
+}
 
 
 /* Registers */
 #define RPMX_CMRX_CFG                  0x00
+#define RPMX_CMR_GLOBAL_CFG            0x08
+#define RPM_NIX0_RESET                 BIT_ULL(3)
 #define RPMX_RX_TS_PREPEND              BIT_ULL(22)
 #define RPMX_TX_PTP_1S_SUPPORT          BIT_ULL(17)
 #define RPMX_CMRX_RX_ID_MAP            0x80
 int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
 int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr);
 int rpm_stats_reset(void *rpmd, int lmac_id);
+void rpm_x2p_reset(void *rpmd, bool enable);
+int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable);
 #endif /* RPM_H */
 
        }
 
        rvu_program_channels(rvu);
+       cgx_start_linkup(rvu);
 
        err = rvu_mcs_init(rvu);
        if (err) {
 
 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
 void rvu_mac_reset(struct rvu *rvu, u16 pcifunc);
 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
+void cgx_start_linkup(struct rvu *rvu);
 int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
                             int type);
 bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
 
 
 int rvu_cgx_init(struct rvu *rvu)
 {
+       struct mac_ops *mac_ops;
        int cgx, err;
        void *cgxd;
 
        if (err)
                return err;
 
+       /* Clear X2P reset on all MAC blocks */
+       for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
+               cgxd = rvu_cgx_pdata(cgx, rvu);
+               if (!cgxd)
+                       continue;
+               mac_ops = get_mac_ops(cgxd);
+               mac_ops->mac_x2p_reset(cgxd, false);
+       }
+
        /* Register for CGX events */
        err = cgx_lmac_event_handler_init(rvu);
        if (err)
 
        mutex_init(&rvu->cgx_cfg_lock);
 
-       /* Ensure event handler registration is completed, before
-        * we turn on the links
-        */
-       mb();
+       return 0;
+}
+
+void cgx_start_linkup(struct rvu *rvu)
+{
+       unsigned long lmac_bmap;
+       struct mac_ops *mac_ops;
+       int cgx, lmac, err;
+       void *cgxd;
+
+       /* Enable receive on all LMACS */
+       for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+               cgxd = rvu_cgx_pdata(cgx, rvu);
+               if (!cgxd)
+                       continue;
+               mac_ops = get_mac_ops(cgxd);
+               lmac_bmap = cgx_get_lmac_bmap(cgxd);
+               for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
+                       mac_ops->mac_enadis_rx(cgxd, lmac, true);
+       }
 
        /* Do link up for all CGX ports */
        for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
                                "Link up process failed to start on cgx %d\n",
                                cgx);
        }
-
-       return 0;
 }
 
 int rvu_cgx_exit(struct rvu *rvu)