This patch configures classifier with the pagepool information.
Signed-off-by: Iyappan Subramanian <isubramanian@apm.com>
Signed-off-by: Quan Nguyen <qnguyen@apm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
 {
        buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
        buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
+                SET_VAL(CLE_NFPSEL, dbptr->nxtfpsel) |
                 SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
 
        buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) |
                fpsel = xgene_enet_get_fpsel(pool_id);
                dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
                nfpsel = 0;
-               idt_reg = 0;
+               if (pdata->rx_ring[idx]->page_pool) {
+                       pool_id = pdata->rx_ring[idx]->page_pool->id;
+                       nfpsel = xgene_enet_get_fpsel(pool_id);
+               }
 
+               idt_reg = 0;
                xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
                ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
                                        RSS_IDT, CLE_CMD_WR);
 static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
 {
        struct xgene_enet_cle *enet_cle = &pdata->cle;
+       u32 def_qid, def_fpsel, def_nxtfpsel, pool_id;
        struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
        struct xgene_cle_ptree_branch *br;
-       u32 def_qid, def_fpsel, pool_id;
        struct xgene_cle_ptree *ptree;
        struct xgene_cle_ptree_kn kn;
        int ret;
        def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
        pool_id = pdata->rx_ring[0]->buf_pool->id;
        def_fpsel = xgene_enet_get_fpsel(pool_id);
+       def_nxtfpsel = 0;
+       if (pdata->rx_ring[0]->page_pool) {
+               pool_id = pdata->rx_ring[0]->page_pool->id;
+               def_nxtfpsel = xgene_enet_get_fpsel(pool_id);
+       }
 
        memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS);
        dbptr[DB_RES_ACCEPT].fpsel =  def_fpsel;
+       dbptr[DB_RES_ACCEPT].nxtfpsel = def_nxtfpsel;
        dbptr[DB_RES_ACCEPT].dstqid = def_qid;
        dbptr[DB_RES_ACCEPT].cle_priority = 1;
 
        dbptr[DB_RES_DEF].fpsel = def_fpsel;
+       dbptr[DB_RES_DEF].nxtfpsel = def_nxtfpsel;
        dbptr[DB_RES_DEF].dstqid = def_qid;
        dbptr[DB_RES_DEF].cle_priority = 7;
        xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF],
 
 #define CLE_DSTQIDH_LEN                5
 #define CLE_FPSEL_POS          21
 #define CLE_FPSEL_LEN          4
+#define CLE_NFPSEL_POS         17
+#define CLE_NFPSEL_LEN         4
 #define CLE_PRIORITY_POS       5
 #define CLE_PRIORITY_LEN       3
 
 
 }
 
 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
-                                 u32 dst_ring_num, u16 bufpool_id)
+                                 u32 dst_ring_num, u16 bufpool_id,
+                                 u16 nxtbufpool_id)
 {
        u32 cb;
-       u32 fpsel;
+       u32 fpsel, nxtfpsel;
 
        fpsel = xgene_enet_get_fpsel(bufpool_id);
+       nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
 
        xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
        cb |= CFG_CLE_BYPASS_EN0;
        xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
        CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
        CFG_CLE_FPSEL0_SET(&cb, fpsel);
+       CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
        xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
 }
 
 
 #define CFG_CLE_IP_PROTOCOL0_SET(dst, val)     xgene_set_bits(dst, val, 16, 2)
 #define CFG_CLE_DSTQID0_SET(dst, val)          xgene_set_bits(dst, val, 0, 12)
 #define CFG_CLE_FPSEL0_SET(dst, val)           xgene_set_bits(dst, val, 16, 4)
+#define CFG_CLE_NXTFPSEL0_SET(dst, val)                xgene_set_bits(dst, val, 20, 4)
 #define CFG_MACMODE_SET(dst, val)              xgene_set_bits(dst, val, 18, 2)
 #define CFG_WAITASYNCRD_SET(dst, val)          xgene_set_bits(dst, val, 0, 16)
-#define CFG_CLE_DSTQID0(val)           (val & GENMASK(11, 0))
-#define CFG_CLE_FPSEL0(val)            ((val << 16) & GENMASK(19, 16))
+#define CFG_CLE_DSTQID0(val)           ((val) & GENMASK(11, 0))
+#define CFG_CLE_FPSEL0(val)            (((val) << 16) & GENMASK(19, 16))
+#define CFG_CLE_NXTFPSEL0(val)         (((val) << 20) & GENMASK(23, 20))
 #define ICM_CONFIG0_REG_0_ADDR         0x0400
 #define ICM_CONFIG2_REG_0_ADDR         0x0410
 #define RX_DV_GATE_REG_0_ADDR          0x05fc
 
 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
 {
        struct xgene_enet_cle *enet_cle = &pdata->cle;
+       struct xgene_enet_desc_ring *page_pool;
        struct net_device *ndev = pdata->ndev;
        struct xgene_enet_desc_ring *buf_pool;
-       u16 dst_ring_num;
+       u16 dst_ring_num, ring_id;
        int i, ret;
 
        ret = pdata->port_ops->reset(pdata);
                        netdev_err(ndev, "Preclass Tree init error\n");
                        goto err;
                }
+
        } else {
-               pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
+               dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
+               buf_pool = pdata->rx_ring[0]->buf_pool;
+               page_pool = pdata->rx_ring[0]->page_pool;
+               ring_id = (page_pool) ? page_pool->id : 0;
+               pdata->port_ops->cle_bypass(pdata, dst_ring_num,
+                                           buf_pool->id, ring_id);
        }
 
        pdata->phy_speed = SPEED_UNKNOWN;
 
        enum xgene_enet_ring_cfgsize cfgsize;
        struct xgene_enet_desc_ring *cp_ring;
        struct xgene_enet_desc_ring *buf_pool;
+       struct xgene_enet_desc_ring *page_pool;
        struct napi_struct napi;
        union {
                void *desc_addr;
        void (*clear)(struct xgene_enet_pdata *pdata,
                      struct xgene_enet_desc_ring *ring);
        void (*cle_bypass)(struct xgene_enet_pdata *pdata,
-                          u32 dst_ring_num, u16 bufpool_id);
+                          u32 dst_ring_num, u16 bufpool_id, u16 nxtbufpool_id);
        void (*shutdown)(struct xgene_enet_pdata *pdata);
 };
 
 
 }
 
 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
-                                 u32 dst_ring_num, u16 bufpool_id)
+                                 u32 dst_ring_num, u16 bufpool_id,
+                                 u16 nxtbufpool_id)
 {
-       u32 data, fpsel;
        u32 cle_bypass_reg0, cle_bypass_reg1;
        u32 offset = p->port_id * MAC_OFFSET;
+       u32 data, fpsel, nxtfpsel;
 
        if (p->enet_id == XGENE_ENET1) {
                cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
        xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
 
        fpsel = xgene_enet_get_fpsel(bufpool_id);
-       data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
+       nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
+       data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel) |
+              CFG_CLE_NXTFPSEL0(nxtfpsel);
        xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
 }
 
 
 }
 
 static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
-                                   u32 dst_ring_num, u16 bufpool_id)
+                                   u32 dst_ring_num, u16 bufpool_id,
+                                   u16 nxtbufpool_id)
 {
-       u32 cb, fpsel;
+       u32 cb, fpsel, nxtfpsel;
 
        xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
        cb |= CFG_CLE_BYPASS_EN0;
        xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
 
        fpsel = xgene_enet_get_fpsel(bufpool_id);
+       nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
        xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
        CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
        CFG_CLE_FPSEL0_SET(&cb, fpsel);
+       CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
        xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
 }