struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
        struct cppi5_host_desc_t *desc_rx;
        struct device *dev = common->dev;
+       struct am65_cpsw_swdata *swdata;
        dma_addr_t desc_dma;
        dma_addr_t buf_dma;
-       void *swdata;
 
        desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
        if (!desc_rx) {
        cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
                               buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
        swdata = cppi5_hdesc_get_swdata(desc_rx);
-       *((void **)swdata) = page_address(page);
+       swdata->page = page;
+       swdata->flow_id = flow_idx;
 
        return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
                                        desc_rx, desc_dma);
 
 static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
                                      struct page *page,
-                                     bool allow_direct,
-                                     int desc_idx)
+                                     bool allow_direct)
 {
        page_pool_put_full_page(flow->page_pool, page, allow_direct);
-       flow->pages[desc_idx] = NULL;
 }
 
 static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
 {
-       struct am65_cpsw_rx_flow *flow = data;
+       struct am65_cpsw_rx_chn *rx_chn = data;
        struct cppi5_host_desc_t *desc_rx;
-       struct am65_cpsw_rx_chn *rx_chn;
+       struct am65_cpsw_swdata *swdata;
        dma_addr_t buf_dma;
+       struct page *page;
        u32 buf_dma_len;
-       void *page_addr;
-       void **swdata;
-       int desc_idx;
+       u32 flow_id;
 
-       rx_chn = &flow->common->rx_chns;
        desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
        swdata = cppi5_hdesc_get_swdata(desc_rx);
-       page_addr = *swdata;
+       page = swdata->page;
+       flow_id = swdata->flow_id;
        cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
        k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
        dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
        k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
 
-       desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
-                                          rx_chn->dsize_log2);
-       am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx);
+       am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
 }
 
 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
                                ret = -ENOMEM;
                                goto fail_rx;
                        }
-                       flow->pages[i] = page;
 
                        ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
                        if (ret < 0) {
                                dev_err(common->dev,
                                        "cannot submit page to rx channel flow %d, error %d\n",
                                        flow_idx, ret);
-                               am65_cpsw_put_page(flow, page, false, i);
+                               am65_cpsw_put_page(flow, page, false);
                                goto fail_rx;
                        }
                }
 
 fail_rx:
        for (i = 0; i < common->rx_ch_num_flows; i++)
-               k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
-                                         am65_cpsw_nuss_rx_cleanup, 0);
+               k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
+                                         am65_cpsw_nuss_rx_cleanup, !!i);
 
        am65_cpsw_destroy_xdp_rxqs(common);
 
                        dev_err(common->dev, "rx teardown timeout\n");
        }
 
-       for (i = 0; i < common->rx_ch_num_flows; i++) {
+       for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
                napi_disable(&rx_chn->flows[i].napi_rx);
                hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
-               k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
-                                         am65_cpsw_nuss_rx_cleanup, 0);
+               k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
+                                         am65_cpsw_nuss_rx_cleanup, !!i);
        }
 
        k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
 static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
                             struct am65_cpsw_port *port,
                             struct xdp_buff *xdp,
-                            int desc_idx, int cpu, int *len)
+                            int cpu, int *len)
 {
        struct am65_cpsw_common *common = flow->common;
        struct am65_cpsw_ndev_priv *ndev_priv;
        }
 
        page = virt_to_head_page(xdp->data);
-       am65_cpsw_put_page(flow, page, true, desc_idx);
+       am65_cpsw_put_page(flow, page, true);
 
 out:
        return ret;
        struct am65_cpsw_ndev_stats *stats;
        struct cppi5_host_desc_t *desc_rx;
        struct device *dev = common->dev;
+       struct am65_cpsw_swdata *swdata;
        struct page *page, *new_page;
        dma_addr_t desc_dma, buf_dma;
        struct am65_cpsw_port *port;
-       int headroom, desc_idx, ret;
        struct net_device *ndev;
        u32 flow_idx = flow->id;
        struct sk_buff *skb;
        struct xdp_buff xdp;
+       int headroom, ret;
        void *page_addr;
-       void **swdata;
        u32 *psdata;
 
        *xdp_state = AM65_CPSW_XDP_PASS;
                __func__, flow_idx, &desc_dma);
 
        swdata = cppi5_hdesc_get_swdata(desc_rx);
-       page_addr = *swdata;
-       page = virt_to_page(page_addr);
+       page = swdata->page;
+       page_addr = page_address(page);
        cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
        k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
        pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
 
        k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
 
-       desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
-                                          rx_chn->dsize_log2);
-
        skb = am65_cpsw_build_skb(page_addr, ndev,
                                  AM65_CPSW_MAX_PACKET_SIZE);
        if (unlikely(!skb)) {
                xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
                xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
                                 pkt_len, false);
-               *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx,
+               *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
                                               cpu, &pkt_len);
                if (*xdp_state != AM65_CPSW_XDP_PASS)
                        goto allocate;
                return -ENOMEM;
        }
 
-       flow->pages[desc_idx] = new_page;
-
        if (netif_dormant(ndev)) {
-               am65_cpsw_put_page(flow, new_page, true, desc_idx);
+               am65_cpsw_put_page(flow, new_page, true);
                ndev->stats.rx_dropped++;
                return 0;
        }
 requeue:
        ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
        if (WARN_ON(ret < 0)) {
-               am65_cpsw_put_page(flow, new_page, true, desc_idx);
+               am65_cpsw_put_page(flow, new_page, true);
                ndev->stats.rx_errors++;
                ndev->stats.rx_dropped++;
        }
        for (i = 0; i < common->rx_ch_num_flows; i++) {
                flow = &rx_chn->flows[i];
                flow->page_pool = NULL;
-               flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC,
-                                          sizeof(*flow->pages), GFP_KERNEL);
-               if (!flow->pages)
-                       return -ENOMEM;
        }
 
        rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
 
                rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
                rx_flow_cfg.rx_cfg.size = max_desc_num;
-               rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+               /* share same FDQ for all flows */
+               rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num;
                rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
 
                ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
 
        for (i = 0; i < common->rx_ch_num_flows; i++)
                k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
-                                         &rx_chan->flows[i],
-                                         am65_cpsw_nuss_rx_cleanup, 0);
+                                         rx_chan,
+                                         am65_cpsw_nuss_rx_cleanup, !!i);
 
        k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);