(new_rx_count == vsi->rx_rings[0]->count))
                return 0;
 
-       /* If there is a AF_XDP UMEM attached to any of Rx rings,
+       /* If there is a AF_XDP page pool attached to any of Rx rings,
         * disallow changing the number of descriptors -- regardless
         * if the netdev is running or not.
         */
 
 }
 
 /**
- * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
  * @ring: The Tx or Rx ring
  *
- * Returns the UMEM or NULL.
+ * Returns the AF_XDP buffer pool or NULL.
  **/
-static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
 {
        bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
        int qid = ring->queue_index;
        if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
                return NULL;
 
-       return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+       return xdp_get_xsk_pool_from_qid(ring->vsi->netdev, qid);
 }
 
 /**
        u32 qtx_ctl = 0;
 
        if (ring_is_xdp(ring))
-               ring->xsk_umem = i40e_xsk_umem(ring);
+               ring->xsk_pool = i40e_xsk_pool(ring);
 
        /* some ATR related tx ring init */
        if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
                xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
 
        kfree(ring->rx_bi);
-       ring->xsk_umem = i40e_xsk_umem(ring);
-       if (ring->xsk_umem) {
+       ring->xsk_pool = i40e_xsk_pool(ring);
+       if (ring->xsk_pool) {
                ret = i40e_alloc_rx_bi_zc(ring);
                if (ret)
                        return ret;
-               ring->rx_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+               ring->rx_buf_len =
+                 xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
                /* For AF_XDP ZC, we disallow packets to span on
                 * multiple buffers, thus letting us skip that
                 * handling in the fast-path.
        ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
        writel(0, ring->tail);
 
-       if (ring->xsk_umem) {
-               xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+       if (ring->xsk_pool) {
+               xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
                ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
        } else {
                ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
                 */
                dev_info(&vsi->back->pdev->dev,
                         "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
-                        ring->xsk_umem ? "UMEM enabled " : "",
+                        ring->xsk_pool ? "AF_XDP ZC enabled " : "",
                         ring->queue_index, pf_q);
        }
 
         */
        if (need_reset && prog)
                for (i = 0; i < vsi->num_queue_pairs; i++)
-                       if (vsi->xdp_rings[i]->xsk_umem)
+                       if (vsi->xdp_rings[i]->xsk_pool)
                                (void)i40e_xsk_wakeup(vsi->netdev, i,
                                                      XDP_WAKEUP_RX);
 
        switch (xdp->command) {
        case XDP_SETUP_PROG:
                return i40e_xdp_setup(vsi, xdp->prog);
-       case XDP_SETUP_XSK_UMEM:
-               return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
+       case XDP_SETUP_XSK_POOL:
+               return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
                                           xdp->xsk.queue_id);
        default:
                return -EINVAL;
 
        unsigned long bi_size;
        u16 i;
 
-       if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+       if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
                i40e_xsk_clean_tx_ring(tx_ring);
        } else {
                /* ring already cleared, nothing to do */
                rx_ring->skb = NULL;
        }
 
-       if (rx_ring->xsk_umem) {
+       if (rx_ring->xsk_pool) {
                i40e_xsk_clean_rx_ring(rx_ring);
                goto skip_free;
        }
        }
 
 skip_free:
-       if (rx_ring->xsk_umem)
+       if (rx_ring->xsk_pool)
                i40e_clear_rx_bi_zc(rx_ring);
        else
                i40e_clear_rx_bi(rx_ring);
         * budget and be more aggressive about cleaning up the Tx descriptors.
         */
        i40e_for_each_ring(ring, q_vector->tx) {
-               bool wd = ring->xsk_umem ?
+               bool wd = ring->xsk_pool ?
                          i40e_clean_xdp_tx_irq(vsi, ring) :
                          i40e_clean_tx_irq(vsi, ring, budget);
 
                budget_per_ring = budget;
 
        i40e_for_each_ring(ring, q_vector->rx) {
-               int cleaned = ring->xsk_umem ?
+               int cleaned = ring->xsk_pool ?
                              i40e_clean_rx_irq_zc(ring, budget_per_ring) :
                              i40e_clean_rx_irq(ring, budget_per_ring);
 
 
 
        struct i40e_channel *ch;
        struct xdp_rxq_info xdp_rxq;
-       struct xdp_umem *xsk_umem;
+       struct xsk_buff_pool *xsk_pool;
 } ____cacheline_internodealigned_in_smp;
 
 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
 
 }
 
 /**
- * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
+ * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
+ * certain ring/qid
  * @vsi: Current VSI
- * @umem: UMEM
- * @qid: Rx ring to associate UMEM to
+ * @pool: buffer pool
+ * @qid: Rx ring to associate buffer pool with
  *
  * Returns 0 on success, <0 on failure
  **/
-static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
+static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
+                               struct xsk_buff_pool *pool,
                                u16 qid)
 {
        struct net_device *netdev = vsi->netdev;
            qid >= netdev->real_num_tx_queues)
                return -EINVAL;
 
-       err = xsk_buff_dma_map(umem, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
+       err = xsk_buff_dma_map(pool->umem, &vsi->back->pdev->dev,
+                              I40E_RX_DMA_ATTR);
        if (err)
                return err;
 
 }
 
 /**
- * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
+ * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
+ * certain ring/qid
  * @vsi: Current VSI
- * @qid: Rx ring to associate UMEM to
+ * @qid: Rx ring to associate buffer pool with
  *
  * Returns 0 on success, <0 on failure
  **/
-static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
+static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
 {
        struct net_device *netdev = vsi->netdev;
-       struct xdp_umem *umem;
+       struct xsk_buff_pool *pool;
        bool if_running;
        int err;
 
-       umem = xdp_get_umem_from_qid(netdev, qid);
-       if (!umem)
+       pool = xdp_get_xsk_pool_from_qid(netdev, qid);
+       if (!pool)
                return -EINVAL;
 
        if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
        }
 
        clear_bit(qid, vsi->af_xdp_zc_qps);
-       xsk_buff_dma_unmap(umem, I40E_RX_DMA_ATTR);
+       xsk_buff_dma_unmap(pool->umem, I40E_RX_DMA_ATTR);
 
        if (if_running) {
                err = i40e_queue_pair_enable(vsi, qid);
 }
 
 /**
- * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
+ * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
+ * a ring/qid
  * @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, or NULL to disable
- * @qid: Rx ring to (dis)associate UMEM (from)to
+ * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
+ * @qid: Rx ring to (dis)associate buffer pool (from)to
  *
- * This function enables or disables a UMEM to a certain ring.
+ * This function enables or disables a buffer pool to a certain ring.
  *
  * Returns 0 on success, <0 on failure
  **/
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
                        u16 qid)
 {
-       return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
-               i40e_xsk_umem_disable(vsi, qid);
+       return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
+               i40e_xsk_pool_disable(vsi, qid);
 }
 
 /**
        rx_desc = I40E_RX_DESC(rx_ring, ntu);
        bi = i40e_rx_bi(rx_ring, ntu);
        do {
-               xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+               xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
                if (!xdp) {
                        ok = false;
                        goto no_buffers;
        i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
        i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
 
-       if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+       if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
                if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
-                       xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+                       xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
                else
-                       xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+                       xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
 
                return (int)total_rx_packets;
        }
        dma_addr_t dma;
 
        while (budget-- > 0) {
-               if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+               if (!xsk_umem_consume_tx(xdp_ring->xsk_pool->umem, &desc))
                        break;
 
-               dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
-               xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+               dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool->umem,
+                                          desc.addr);
+               xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool->umem, dma,
                                                 desc.len);
 
                tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
                                                 I40E_TXD_QW1_CMD_SHIFT);
                i40e_xdp_ring_update_tail(xdp_ring);
 
-               xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+               xsk_umem_consume_tx_done(xdp_ring->xsk_pool->umem);
                i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
        }
 
  **/
 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
 {
-       struct xdp_umem *umem = tx_ring->xsk_umem;
+       struct xsk_buff_pool *bp = tx_ring->xsk_pool;
        u32 i, completed_frames, xsk_frames = 0;
        u32 head_idx = i40e_get_head(tx_ring);
        struct i40e_tx_buffer *tx_bi;
                tx_ring->next_to_clean -= tx_ring->count;
 
        if (xsk_frames)
-               xsk_umem_complete_tx(umem, xsk_frames);
+               xsk_umem_complete_tx(bp->umem, xsk_frames);
 
        i40e_arm_wb(tx_ring, vsi, completed_frames);
 
 out_xmit:
-       if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
-               xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+       if (xsk_umem_uses_need_wakeup(tx_ring->xsk_pool->umem))
+               xsk_set_tx_need_wakeup(tx_ring->xsk_pool->umem);
 
        return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
 }
        if (queue_id >= vsi->num_queue_pairs)
                return -ENXIO;
 
-       if (!vsi->xdp_rings[queue_id]->xsk_umem)
+       if (!vsi->xdp_rings[queue_id]->xsk_pool)
                return -ENXIO;
 
        ring = vsi->xdp_rings[queue_id];
 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
 {
        u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
-       struct xdp_umem *umem = tx_ring->xsk_umem;
+       struct xsk_buff_pool *bp = tx_ring->xsk_pool;
        struct i40e_tx_buffer *tx_bi;
        u32 xsk_frames = 0;
 
        }
 
        if (xsk_frames)
-               xsk_umem_complete_tx(umem, xsk_frames);
+               xsk_umem_complete_tx(bp->umem, xsk_frames);
 }
 
 /**
- * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
+ * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
+ * buffer pool attached
  * @vsi: vsi
  *
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
  **/
 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
 {
        int i;
 
        for (i = 0; i < vsi->num_queue_pairs; i++) {
-               if (xdp_get_umem_from_qid(netdev, i))
+               if (xdp_get_xsk_pool_from_qid(netdev, i))
                        return true;
        }
 
 
 #define _I40E_XSK_H_
 
 struct i40e_vsi;
-struct xdp_umem;
+struct xsk_buff_pool;
 struct zero_copy_allocator;
 
 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
                        u16 qid);
 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
 
        struct ice_ring **xdp_rings;     /* XDP ring array */
        u16 num_xdp_txq;                 /* Used XDP queues */
        u8 xdp_mapping_mode;             /* ICE_MAP_MODE_[CONTIG|SCATTER] */
-       struct xdp_umem **xsk_umems;
-       u16 num_xsk_umems_used;
-       u16 num_xsk_umems;
+       struct xsk_buff_pool **xsk_pools;
+       u16 num_xsk_pools_used;
+       u16 num_xsk_pools;
 } ____cacheline_internodealigned_in_smp;
 
 /* struct that defines an interrupt vector */
 }
 
 /**
- * ice_xsk_umem - get XDP UMEM bound to a ring
+ * ice_xsk_pool - get XSK buffer pool bound to a ring
  * @ring - ring to use
  *
- * Returns a pointer to xdp_umem structure if there is an UMEM present,
+ * Returns a pointer to xdp_umem structure if there is a buffer pool present,
  * NULL otherwise.
  */
-static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
+static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
 {
-       struct xdp_umem **umems = ring->vsi->xsk_umems;
+       struct xsk_buff_pool **pools = ring->vsi->xsk_pools;
        u16 qid = ring->q_index;
 
        if (ice_ring_is_xdp(ring))
                qid -= ring->vsi->num_xdp_txq;
 
-       if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] ||
+       if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] ||
            !ice_is_xdp_ena_vsi(ring->vsi))
                return NULL;
 
-       return umems[qid];
+       return pools[qid];
 }
 
 /**
 
                        xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
                                         ring->q_index);
 
-               ring->xsk_umem = ice_xsk_umem(ring);
-               if (ring->xsk_umem) {
+               ring->xsk_pool = ice_xsk_pool(ring);
+               if (ring->xsk_pool) {
                        xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
 
                        ring->rx_buf_len =
-                               xsk_umem_get_rx_frame_size(ring->xsk_umem);
+                               xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
                        /* For AF_XDP ZC, we disallow packets to span on
                         * multiple buffers, thus letting us skip that
                         * handling in the fast-path.
                                                         NULL);
                        if (err)
                                return err;
-                       xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+                       xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
 
                        dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
                                 ring->q_index);
        ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
        writel(0, ring->tail);
 
-       if (ring->xsk_umem) {
-               if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
-                       dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+       if (ring->xsk_pool) {
+               if (!xsk_buff_can_alloc(ring->xsk_pool->umem, num_bufs)) {
+                       dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
                                 num_bufs, ring->q_index);
                        dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
 
 
                err = ice_alloc_rx_bufs_zc(ring, num_bufs);
                if (err)
-                       dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
+                       dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
                                 ring->q_index, pf_q);
                return 0;
        }
 
                return ret;
 
        for (i = 0; i < vsi->num_xdp_txq; i++)
-               vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
+               vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
 
        return ret;
 }
 
                if (ice_setup_tx_ring(xdp_ring))
                        goto free_xdp_rings;
                ice_set_ring_xdp(xdp_ring);
-               xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+               xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
        }
 
        return 0;
        if (if_running)
                ret = ice_up(vsi);
 
-       if (!ret && prog && vsi->xsk_umems) {
+       if (!ret && prog && vsi->xsk_pools) {
                int i;
 
                ice_for_each_rxq(vsi, i) {
                        struct ice_ring *rx_ring = vsi->rx_rings[i];
 
-                       if (rx_ring->xsk_umem)
+                       if (rx_ring->xsk_pool)
                                napi_schedule(&rx_ring->q_vector->napi);
                }
        }
        switch (xdp->command) {
        case XDP_SETUP_PROG:
                return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
-       case XDP_SETUP_XSK_UMEM:
-               return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
+       case XDP_SETUP_XSK_POOL:
+               return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
                                          xdp->xsk.queue_id);
        default:
                return -EINVAL;
 
 {
        u16 i;
 
-       if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+       if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
                ice_xsk_clean_xdp_ring(tx_ring);
                goto tx_skip_free;
        }
        if (!rx_ring->rx_buf)
                return;
 
-       if (rx_ring->xsk_umem) {
+       if (rx_ring->xsk_pool) {
                ice_xsk_clean_rx_ring(rx_ring);
                goto rx_skip_free;
        }
         * budget and be more aggressive about cleaning up the Tx descriptors.
         */
        ice_for_each_ring(ring, q_vector->tx) {
-               bool wd = ring->xsk_umem ?
+               bool wd = ring->xsk_pool ?
                          ice_clean_tx_irq_zc(ring, budget) :
                          ice_clean_tx_irq(ring, budget);
 
                 * comparison in the irq context instead of many inside the
                 * ice_clean_rx_irq function and makes the codebase cleaner.
                 */
-               cleaned = ring->xsk_umem ?
+               cleaned = ring->xsk_pool ?
                          ice_clean_rx_irq_zc(ring, budget_per_ring) :
                          ice_clean_rx_irq(ring, budget_per_ring);
                work_done += cleaned;
 
 
        struct rcu_head rcu;            /* to avoid race on free */
        struct bpf_prog *xdp_prog;
-       struct xdp_umem *xsk_umem;
+       struct xsk_buff_pool *xsk_pool;
        /* CL3 - 3rd cacheline starts here */
        struct xdp_rxq_info xdp_rxq;
        /* CLX - the below items are only accessed infrequently and should be
 
                if (err)
                        goto free_buf;
                ice_set_ring_xdp(xdp_ring);
-               xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+               xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
        }
 
        err = ice_setup_rx_ctx(rx_ring);
 }
 
 /**
- * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
- * @vsi: VSI to allocate the UMEM on
+ * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
+ * @vsi: VSI to allocate the buffer pool on
  *
  * Returns 0 on success, negative on error
  */
-static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
+static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
 {
-       if (vsi->xsk_umems)
+       if (vsi->xsk_pools)
                return 0;
 
-       vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+       vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
                                 GFP_KERNEL);
 
-       if (!vsi->xsk_umems) {
-               vsi->num_xsk_umems = 0;
+       if (!vsi->xsk_pools) {
+               vsi->num_xsk_pools = 0;
                return -ENOMEM;
        }
 
 }
 
 /**
- * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
+ * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
  * @vsi: VSI from which the VSI will be removed
- * @qid: Ring/qid associated with the UMEM
+ * @qid: Ring/qid associated with the buffer pool
  */
-static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
+static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
 {
-       vsi->xsk_umems[qid] = NULL;
-       vsi->num_xsk_umems_used--;
+       vsi->xsk_pools[qid] = NULL;
+       vsi->num_xsk_pools_used--;
 
-       if (vsi->num_xsk_umems_used == 0) {
-               kfree(vsi->xsk_umems);
-               vsi->xsk_umems = NULL;
-               vsi->num_xsk_umems = 0;
+       if (vsi->num_xsk_pools_used == 0) {
+               kfree(vsi->xsk_pools);
+               vsi->xsk_pools = NULL;
+               vsi->num_xsk_pools = 0;
        }
 }
 
 /**
- * ice_xsk_umem_disable - disable a UMEM region
+ * ice_xsk_pool_disable - disable a buffer pool region
  * @vsi: Current VSI
  * @qid: queue ID
  *
  * Returns 0 on success, negative on failure
  */
-static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
+static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
 {
-       if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
-           !vsi->xsk_umems[qid])
+       if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
+           !vsi->xsk_pools[qid])
                return -EINVAL;
 
-       xsk_buff_dma_unmap(vsi->xsk_umems[qid], ICE_RX_DMA_ATTR);
-       ice_xsk_remove_umem(vsi, qid);
+       xsk_buff_dma_unmap(vsi->xsk_pools[qid]->umem, ICE_RX_DMA_ATTR);
+       ice_xsk_remove_pool(vsi, qid);
 
        return 0;
 }
 
 /**
- * ice_xsk_umem_enable - enable a UMEM region
+ * ice_xsk_pool_enable - enable a buffer pool region
  * @vsi: Current VSI
- * @umem: pointer to a requested UMEM region
+ * @pool: pointer to a requested buffer pool region
  * @qid: queue ID
  *
  * Returns 0 on success, negative on failure
  */
 static int
-ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
 {
        int err;
 
        if (vsi->type != ICE_VSI_PF)
                return -EINVAL;
 
-       if (!vsi->num_xsk_umems)
-               vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
-       if (qid >= vsi->num_xsk_umems)
+       if (!vsi->num_xsk_pools)
+               vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
+       if (qid >= vsi->num_xsk_pools)
                return -EINVAL;
 
-       err = ice_xsk_alloc_umems(vsi);
+       err = ice_xsk_alloc_pools(vsi);
        if (err)
                return err;
 
-       if (vsi->xsk_umems && vsi->xsk_umems[qid])
+       if (vsi->xsk_pools && vsi->xsk_pools[qid])
                return -EBUSY;
 
-       vsi->xsk_umems[qid] = umem;
-       vsi->num_xsk_umems_used++;
+       vsi->xsk_pools[qid] = pool;
+       vsi->num_xsk_pools_used++;
 
-       err = xsk_buff_dma_map(vsi->xsk_umems[qid], ice_pf_to_dev(vsi->back),
+       err = xsk_buff_dma_map(vsi->xsk_pools[qid]->umem, ice_pf_to_dev(vsi->back),
                               ICE_RX_DMA_ATTR);
        if (err)
                return err;
 }
 
 /**
- * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
+ * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
  * @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, NULL to disable
+ * @pool: buffer pool to enable/associate to a ring, NULL to disable
  * @qid: queue ID
  *
  * Returns 0 on success, negative on failure
  */
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
 {
-       bool if_running, umem_present = !!umem;
-       int ret = 0, umem_failure = 0;
+       bool if_running, pool_present = !!pool;
+       int ret = 0, pool_failure = 0;
 
        if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
 
                ret = ice_qp_dis(vsi, qid);
                if (ret) {
                        netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
-                       goto xsk_umem_if_up;
+                       goto xsk_pool_if_up;
                }
        }
 
-       umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
-                                     ice_xsk_umem_disable(vsi, qid);
+       pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
+                                     ice_xsk_pool_disable(vsi, qid);
 
-xsk_umem_if_up:
+xsk_pool_if_up:
        if (if_running) {
                ret = ice_qp_ena(vsi, qid);
-               if (!ret && umem_present)
+               if (!ret && pool_present)
                        napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
                else if (ret)
                        netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
        }
 
-       if (umem_failure) {
-               netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
-                          umem_present ? "en" : "dis", umem_failure);
-               return umem_failure;
+       if (pool_failure) {
+               netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
+                          pool_present ? "en" : "dis", pool_failure);
+               return pool_failure;
        }
 
        return ret;
        rx_buf = &rx_ring->rx_buf[ntu];
 
        do {
-               rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+               rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
                if (!rx_buf->xdp) {
                        ret = true;
                        break;
        ice_finalize_xdp_rx(rx_ring, xdp_xmit);
        ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
 
-       if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+       if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
                if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
-                       xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+                       xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
                else
-                       xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+                       xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
 
                return (int)total_rx_packets;
        }
 
                tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
 
-               if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+               if (!xsk_umem_consume_tx(xdp_ring->xsk_pool->umem, &desc))
                        break;
 
-               dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
-               xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+               dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool->umem, desc.addr);
+               xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool->umem, dma,
                                                 desc.len);
 
                tx_buf->bytecount = desc.len;
 
        if (tx_desc) {
                ice_xdp_ring_update_tail(xdp_ring);
-               xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+               xsk_umem_consume_tx_done(xdp_ring->xsk_pool->umem);
        }
 
        return budget > 0 && work_done;
        xdp_ring->next_to_clean = ntc;
 
        if (xsk_frames)
-               xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+               xsk_umem_complete_tx(xdp_ring->xsk_pool->umem, xsk_frames);
 
-       if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
-               xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
+       if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_pool->umem))
+               xsk_set_tx_need_wakeup(xdp_ring->xsk_pool->umem);
 
        ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
        xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
        if (queue_id >= vsi->num_txq)
                return -ENXIO;
 
-       if (!vsi->xdp_rings[queue_id]->xsk_umem)
+       if (!vsi->xdp_rings[queue_id]->xsk_pool)
                return -ENXIO;
 
        ring = vsi->xdp_rings[queue_id];
 }
 
 /**
- * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
+ * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
  * @vsi: VSI to be checked
  *
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buff pool attached
  */
 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
 {
        int i;
 
-       if (!vsi->xsk_umems)
+       if (!vsi->xsk_pools)
                return false;
 
-       for (i = 0; i < vsi->num_xsk_umems; i++) {
-               if (vsi->xsk_umems[i])
+       for (i = 0; i < vsi->num_xsk_pools; i++) {
+               if (vsi->xsk_pools[i])
                        return true;
        }
 
 }
 
 /**
- * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
+ * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
  * @rx_ring: ring to be cleaned
  */
 void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
 }
 
 /**
- * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
+ * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
  * @xdp_ring: XDP_Tx ring
  */
 void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
        }
 
        if (xsk_frames)
-               xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+               xsk_umem_complete_tx(xdp_ring->xsk_pool->umem, xsk_frames);
 }
 
 struct ice_vsi;
 
 #ifdef CONFIG_XDP_SOCKETS
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
+                      u16 qid);
 int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
 bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
 int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
 void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
 #else
 static inline int
-ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
-                  struct xdp_umem __always_unused *umem,
+ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
+                  struct xsk_buff_pool __always_unused *pool,
                   u16 __always_unused qid)
 {
        return -EOPNOTSUPP;
 
                struct ixgbe_rx_queue_stats rx_stats;
        };
        struct xdp_rxq_info xdp_rxq;
-       struct xdp_umem *xsk_umem;
+       struct xsk_buff_pool *xsk_pool;
        u16 ring_idx;           /* {rx,tx,xdp}_ring back reference idx */
        u16 rx_buf_len;
 } ____cacheline_internodealigned_in_smp;
 
 #endif
 
        ixgbe_for_each_ring(ring, q_vector->tx) {
-               bool wd = ring->xsk_umem ?
+               bool wd = ring->xsk_pool ?
                          ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
                          ixgbe_clean_tx_irq(q_vector, ring, budget);
 
                per_ring_budget = budget;
 
        ixgbe_for_each_ring(ring, q_vector->rx) {
-               int cleaned = ring->xsk_umem ?
+               int cleaned = ring->xsk_pool ?
                              ixgbe_clean_rx_irq_zc(q_vector, ring,
                                                    per_ring_budget) :
                              ixgbe_clean_rx_irq(q_vector, ring,
        u32 txdctl = IXGBE_TXDCTL_ENABLE;
        u8 reg_idx = ring->reg_idx;
 
-       ring->xsk_umem = NULL;
+       ring->xsk_pool = NULL;
        if (ring_is_xdp(ring))
-               ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+               ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
 
        /* disable queue to avoid issues while updating state */
        IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
        srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 
        /* configure the packet buffer length */
-       if (rx_ring->xsk_umem) {
-               u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_umem);
+       if (rx_ring->xsk_pool) {
+               u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_pool->umem);
 
                /* If the MAC support setting RXDCTL.RLPML, the
                 * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
        u8 reg_idx = ring->reg_idx;
 
        xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
-       ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
-       if (ring->xsk_umem) {
+       ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
+       if (ring->xsk_pool) {
                WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                   MEM_TYPE_XSK_BUFF_POOL,
                                                   NULL));
-               xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+               xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
        } else {
                WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                   MEM_TYPE_PAGE_SHARED, NULL));
 #endif
        }
 
-       if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
-               u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+       if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
+               u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
 
                rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
                            IXGBE_RXDCTL_RLPML_EN);
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
 
        ixgbe_rx_desc_queue_enable(adapter, ring);
-       if (ring->xsk_umem)
+       if (ring->xsk_pool)
                ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
        else
                ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
        u16 i = rx_ring->next_to_clean;
        struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
 
-       if (rx_ring->xsk_umem) {
+       if (rx_ring->xsk_pool) {
                ixgbe_xsk_clean_rx_ring(rx_ring);
                goto skip_free;
        }
        u16 i = tx_ring->next_to_clean;
        struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
 
-       if (tx_ring->xsk_umem) {
+       if (tx_ring->xsk_pool) {
                ixgbe_xsk_clean_tx_ring(tx_ring);
                goto out;
        }
         */
        if (need_reset && prog)
                for (i = 0; i < adapter->num_rx_queues; i++)
-                       if (adapter->xdp_ring[i]->xsk_umem)
+                       if (adapter->xdp_ring[i]->xsk_pool)
                                (void)ixgbe_xsk_wakeup(adapter->netdev, i,
                                                       XDP_WAKEUP_RX);
 
        switch (xdp->command) {
        case XDP_SETUP_PROG:
                return ixgbe_xdp_setup(dev, xdp->prog);
-       case XDP_SETUP_XSK_UMEM:
-               return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+       case XDP_SETUP_XSK_POOL:
+               return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
                                            xdp->xsk.queue_id);
 
        default:
 
 void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
 void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
 
-struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *ring);
-int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
+                                    struct ixgbe_ring *ring);
+int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
+                        struct xsk_buff_pool *pool,
                         u16 qid);
 
 void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
 
 #include "ixgbe.h"
 #include "ixgbe_txrx_common.h"
 
-struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *ring)
+struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
+                                    struct ixgbe_ring *ring)
 {
        bool xdp_on = READ_ONCE(adapter->xdp_prog);
        int qid = ring->ring_idx;
        if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
                return NULL;
 
-       return xdp_get_umem_from_qid(adapter->netdev, qid);
+       return xdp_get_xsk_pool_from_qid(adapter->netdev, qid);
 }
 
-static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
-                                struct xdp_umem *umem,
+static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
+                                struct xsk_buff_pool *pool,
                                 u16 qid)
 {
        struct net_device *netdev = adapter->netdev;
            qid >= netdev->real_num_tx_queues)
                return -EINVAL;
 
-       err = xsk_buff_dma_map(umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
+       err = xsk_buff_dma_map(pool->umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
        if (err)
                return err;
 
        return 0;
 }
 
-static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
+static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
 {
-       struct xdp_umem *umem;
+       struct xsk_buff_pool *pool;
        bool if_running;
 
-       umem = xdp_get_umem_from_qid(adapter->netdev, qid);
-       if (!umem)
+       pool = xdp_get_xsk_pool_from_qid(adapter->netdev, qid);
+       if (!pool)
                return -EINVAL;
 
        if_running = netif_running(adapter->netdev) &&
                ixgbe_txrx_ring_disable(adapter, qid);
 
        clear_bit(qid, adapter->af_xdp_zc_qps);
-       xsk_buff_dma_unmap(umem, IXGBE_RX_DMA_ATTR);
+       xsk_buff_dma_unmap(pool->umem, IXGBE_RX_DMA_ATTR);
 
        if (if_running)
                ixgbe_txrx_ring_enable(adapter, qid);
        return 0;
 }
 
-int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
+                        struct xsk_buff_pool *pool,
                         u16 qid)
 {
-       return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
-               ixgbe_xsk_umem_disable(adapter, qid);
+       return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
+               ixgbe_xsk_pool_disable(adapter, qid);
 }
 
 static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
        i -= rx_ring->count;
 
        do {
-               bi->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+               bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
                if (!bi->xdp) {
                        ok = false;
                        break;
        q_vector->rx.total_packets += total_rx_packets;
        q_vector->rx.total_bytes += total_rx_bytes;
 
-       if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+       if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
                if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
-                       xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+                       xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
                else
-                       xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+                       xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
 
                return (int)total_rx_packets;
        }
 
 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
 {
+       struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
        union ixgbe_adv_tx_desc *tx_desc = NULL;
        struct ixgbe_tx_buffer *tx_bi;
        bool work_done = true;
                        break;
                }
 
-               if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+               if (!xsk_umem_consume_tx(pool->umem, &desc))
                        break;
 
-               dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
-               xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
-                                                desc.len);
+               dma = xsk_buff_raw_get_dma(pool->umem, desc.addr);
+               xsk_buff_raw_dma_sync_for_device(pool->umem, dma, desc.len);
 
                tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
                tx_bi->bytecount = desc.len;
 
        if (tx_desc) {
                ixgbe_xdp_ring_update_tail(xdp_ring);
-               xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+               xsk_umem_consume_tx_done(pool->umem);
        }
 
        return !!budget && work_done;
 {
        u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
        unsigned int total_packets = 0, total_bytes = 0;
-       struct xdp_umem *umem = tx_ring->xsk_umem;
+       struct xsk_buff_pool *pool = tx_ring->xsk_pool;
        union ixgbe_adv_tx_desc *tx_desc;
        struct ixgbe_tx_buffer *tx_bi;
        u32 xsk_frames = 0;
        q_vector->tx.total_packets += total_packets;
 
        if (xsk_frames)
-               xsk_umem_complete_tx(umem, xsk_frames);
+               xsk_umem_complete_tx(pool->umem, xsk_frames);
 
-       if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
-               xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+       if (xsk_umem_uses_need_wakeup(pool->umem))
+               xsk_set_tx_need_wakeup(pool->umem);
 
        return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
 }
        if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
                return -ENETDOWN;
 
-       if (!ring->xsk_umem)
+       if (!ring->xsk_pool)
                return -ENXIO;
 
        if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
 {
        u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
-       struct xdp_umem *umem = tx_ring->xsk_umem;
+       struct xsk_buff_pool *pool = tx_ring->xsk_pool;
        struct ixgbe_tx_buffer *tx_bi;
        u32 xsk_frames = 0;
 
        }
 
        if (xsk_frames)
-               xsk_umem_complete_tx(umem, xsk_frames);
+               xsk_umem_complete_tx(pool->umem, xsk_frames);
 }
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
                en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
                en_selftest.o en/port.o en/monitor_stats.o en/health.o \
-               en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
+               en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
                en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o
 
 #
 
        struct mlx5e_cq            cq;
 
        /* read only */
-       struct xdp_umem           *umem;
+       struct xsk_buff_pool      *xsk_pool;
        struct mlx5_wq_cyc         wq;
        struct mlx5e_xdpsq_stats  *stats;
        mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
        struct page_pool      *page_pool;
 
        /* AF_XDP zero-copy */
-       struct xdp_umem       *umem;
+       struct xsk_buff_pool  *xsk_pool;
 
        struct work_struct     recover_work;
 
 #endif
 
 struct mlx5e_xsk {
-       /* UMEMs are stored separately from channels, because we don't want to
-        * lose them when channels are recreated. The kernel also stores UMEMs,
-        * but it doesn't distinguish between zero-copy and non-zero-copy UMEMs,
-        * so rely on our mechanism.
+       /* XSK buffer pools are stored separately from channels,
+        * because we don't want to lose them when channels are
+        * recreated. The kernel also stores buffer pool, but it doesn't
+        * distinguish between zero-copy and non-zero-copy UMEMs, so
+        * rely on our mechanism.
         */
-       struct xdp_umem **umems;
+       struct xsk_buff_pool **pools;
        u16 refcnt;
        bool ever_used;
 };
 struct mlx5e_rq_param;
 int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
                  struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
-                 struct xdp_umem *umem, struct mlx5e_rq *rq);
+                 struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
 void mlx5e_close_rq(struct mlx5e_rq *rq);
                     struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
 void mlx5e_close_icosq(struct mlx5e_icosq *sq);
 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
-                    struct mlx5e_sq_param *param, struct xdp_umem *umem,
+                    struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
                     struct mlx5e_xdpsq *sq, bool is_redirect);
 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
 
 
        } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 
        if (xsk_frames)
-               xsk_umem_complete_tx(sq->umem, xsk_frames);
+               xsk_umem_complete_tx(sq->xsk_pool->umem, xsk_frames);
 
        sq->stats->cqes += i;
 
        }
 
        if (xsk_frames)
-               xsk_umem_complete_tx(sq->umem, xsk_frames);
+               xsk_umem_complete_tx(sq->xsk_pool->umem, xsk_frames);
 }
 
 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
        sq->xmit_xdp_frame = is_mpw ?
                mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame;
 }
-
 
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
+/* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
 
 #include <net/xdp_sock_drv.h>
-#include "umem.h"
+#include "pool.h"
 #include "setup.h"
 #include "en/params.h"
 
-static int mlx5e_xsk_map_umem(struct mlx5e_priv *priv,
-                             struct xdp_umem *umem)
+static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
+                             struct xsk_buff_pool *pool)
 {
        struct device *dev = priv->mdev->device;
 
-       return xsk_buff_dma_map(umem, dev, 0);
+       return xsk_buff_dma_map(pool->umem, dev, 0);
 }
 
-static void mlx5e_xsk_unmap_umem(struct mlx5e_priv *priv,
-                                struct xdp_umem *umem)
+static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
+                                struct xsk_buff_pool *pool)
 {
-       return xsk_buff_dma_unmap(umem, 0);
+       return xsk_buff_dma_unmap(pool->umem, 0);
 }
 
-static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
+static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
 {
-       if (!xsk->umems) {
-               xsk->umems = kcalloc(MLX5E_MAX_NUM_CHANNELS,
-                                    sizeof(*xsk->umems), GFP_KERNEL);
-               if (unlikely(!xsk->umems))
+       if (!xsk->pools) {
+               xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS,
+                                    sizeof(*xsk->pools), GFP_KERNEL);
+               if (unlikely(!xsk->pools))
                        return -ENOMEM;
        }
 
        return 0;
 }
 
-static void mlx5e_xsk_put_umems(struct mlx5e_xsk *xsk)
+static void mlx5e_xsk_put_pools(struct mlx5e_xsk *xsk)
 {
        if (!--xsk->refcnt) {
-               kfree(xsk->umems);
-               xsk->umems = NULL;
+               kfree(xsk->pools);
+               xsk->pools = NULL;
        }
 }
 
-static int mlx5e_xsk_add_umem(struct mlx5e_xsk *xsk, struct xdp_umem *umem, u16 ix)
+static int mlx5e_xsk_add_pool(struct mlx5e_xsk *xsk, struct xsk_buff_pool *pool, u16 ix)
 {
        int err;
 
-       err = mlx5e_xsk_get_umems(xsk);
+       err = mlx5e_xsk_get_pools(xsk);
        if (unlikely(err))
                return err;
 
-       xsk->umems[ix] = umem;
+       xsk->pools[ix] = pool;
        return 0;
 }
 
-static void mlx5e_xsk_remove_umem(struct mlx5e_xsk *xsk, u16 ix)
+static void mlx5e_xsk_remove_pool(struct mlx5e_xsk *xsk, u16 ix)
 {
-       xsk->umems[ix] = NULL;
+       xsk->pools[ix] = NULL;
 
-       mlx5e_xsk_put_umems(xsk);
+       mlx5e_xsk_put_pools(xsk);
 }
 
-static bool mlx5e_xsk_is_umem_sane(struct xdp_umem *umem)
+static bool mlx5e_xsk_is_pool_sane(struct xsk_buff_pool *pool)
 {
-       return xsk_umem_get_headroom(umem) <= 0xffff &&
-               xsk_umem_get_chunk_size(umem) <= 0xffff;
+       return xsk_umem_get_headroom(pool->umem) <= 0xffff &&
+               xsk_umem_get_chunk_size(pool->umem) <= 0xffff;
 }
 
-void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk)
+void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk)
 {
-       xsk->headroom = xsk_umem_get_headroom(umem);
-       xsk->chunk_size = xsk_umem_get_chunk_size(umem);
+       xsk->headroom = xsk_umem_get_headroom(pool->umem);
+       xsk->chunk_size = xsk_umem_get_chunk_size(pool->umem);
 }
 
 static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
-                                  struct xdp_umem *umem, u16 ix)
+                                  struct xsk_buff_pool *pool, u16 ix)
 {
        struct mlx5e_params *params = &priv->channels.params;
        struct mlx5e_xsk_param xsk;
        struct mlx5e_channel *c;
        int err;
 
-       if (unlikely(mlx5e_xsk_get_umem(&priv->channels.params, &priv->xsk, ix)))
+       if (unlikely(mlx5e_xsk_get_pool(&priv->channels.params, &priv->xsk, ix)))
                return -EBUSY;
 
-       if (unlikely(!mlx5e_xsk_is_umem_sane(umem)))
+       if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
                return -EINVAL;
 
-       err = mlx5e_xsk_map_umem(priv, umem);
+       err = mlx5e_xsk_map_pool(priv, pool);
        if (unlikely(err))
                return err;
 
-       err = mlx5e_xsk_add_umem(&priv->xsk, umem, ix);
+       err = mlx5e_xsk_add_pool(&priv->xsk, pool, ix);
        if (unlikely(err))
-               goto err_unmap_umem;
+               goto err_unmap_pool;
 
-       mlx5e_build_xsk_param(umem, &xsk);
+       mlx5e_build_xsk_param(pool, &xsk);
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                /* XSK objects will be created on open. */
 
        c = priv->channels.c[ix];
 
-       err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
+       err = mlx5e_open_xsk(priv, params, &xsk, pool, c);
        if (unlikely(err))
-               goto err_remove_umem;
+               goto err_remove_pool;
 
        mlx5e_activate_xsk(c);
 
        mlx5e_deactivate_xsk(c);
        mlx5e_close_xsk(c);
 
-err_remove_umem:
-       mlx5e_xsk_remove_umem(&priv->xsk, ix);
+err_remove_pool:
+       mlx5e_xsk_remove_pool(&priv->xsk, ix);
 
-err_unmap_umem:
-       mlx5e_xsk_unmap_umem(priv, umem);
+err_unmap_pool:
+       mlx5e_xsk_unmap_pool(priv, pool);
 
        return err;
 
         */
        if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
                err = -EINVAL;
-               goto err_remove_umem;
+               goto err_remove_pool;
        }
 
        return 0;
 
 static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
 {
-       struct xdp_umem *umem = mlx5e_xsk_get_umem(&priv->channels.params,
+       struct xsk_buff_pool *pool = mlx5e_xsk_get_pool(&priv->channels.params,
                                                   &priv->xsk, ix);
        struct mlx5e_channel *c;
 
-       if (unlikely(!umem))
+       if (unlikely(!pool))
                return -EINVAL;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
-               goto remove_umem;
+               goto remove_pool;
 
        /* XSK RQ and SQ are only created if XDP program is set. */
        if (!priv->channels.params.xdp_prog)
-               goto remove_umem;
+               goto remove_pool;
 
        c = priv->channels.c[ix];
        mlx5e_xsk_redirect_rqt_to_drop(priv, ix);
        mlx5e_deactivate_xsk(c);
        mlx5e_close_xsk(c);
 
-remove_umem:
-       mlx5e_xsk_remove_umem(&priv->xsk, ix);
-       mlx5e_xsk_unmap_umem(priv, umem);
+remove_pool:
+       mlx5e_xsk_remove_pool(&priv->xsk, ix);
+       mlx5e_xsk_unmap_pool(priv, pool);
 
        return 0;
 }
 
-static int mlx5e_xsk_enable_umem(struct mlx5e_priv *priv, struct xdp_umem *umem,
+static int mlx5e_xsk_enable_pool(struct mlx5e_priv *priv, struct xsk_buff_pool *pool,
                                 u16 ix)
 {
        int err;
 
        mutex_lock(&priv->state_lock);
-       err = mlx5e_xsk_enable_locked(priv, umem, ix);
+       err = mlx5e_xsk_enable_locked(priv, pool, ix);
        mutex_unlock(&priv->state_lock);
 
        return err;
 }
 
-static int mlx5e_xsk_disable_umem(struct mlx5e_priv *priv, u16 ix)
+static int mlx5e_xsk_disable_pool(struct mlx5e_priv *priv, u16 ix)
 {
        int err;
 
        return err;
 }
 
-int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
+int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5e_params *params = &priv->channels.params;
        if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
                return -EINVAL;
 
-       return umem ? mlx5e_xsk_enable_umem(priv, umem, ix) :
-                     mlx5e_xsk_disable_umem(priv, ix);
+       return pool ? mlx5e_xsk_enable_pool(priv, pool, ix) :
+                     mlx5e_xsk_disable_pool(priv, ix);
 }
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_XSK_POOL_H__
+#define __MLX5_EN_XSK_POOL_H__
+
+#include "en.h"
+
+static inline struct xsk_buff_pool *mlx5e_xsk_get_pool(struct mlx5e_params *params,
+                                                      struct mlx5e_xsk *xsk, u16 ix)
+{
+       if (!xsk || !xsk->pools)
+               return NULL;
+
+       if (unlikely(ix >= params->num_channels))
+               return NULL;
+
+       return xsk->pools[ix];
+}
+
+struct mlx5e_xsk_param;
+void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk);
+
+/* .ndo_bpf callback. */
+int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
+
+#endif /* __MLX5_EN_XSK_POOL_H__ */
 
                                              struct mlx5e_wqe_frag_info *wi,
                                              u32 cqe_bcnt);
 
-static inline int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
+static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
                                            struct mlx5e_dma_info *dma_info)
 {
-       dma_info->xsk = xsk_buff_alloc(rq->umem);
+       dma_info->xsk = xsk_buff_alloc(rq->xsk_pool->umem);
        if (!dma_info->xsk)
                return -ENOMEM;
 
 
 static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
 {
-       if (!xsk_umem_uses_need_wakeup(rq->umem))
+       if (!xsk_umem_uses_need_wakeup(rq->xsk_pool->umem))
                return alloc_err;
 
        if (unlikely(alloc_err))
-               xsk_set_rx_need_wakeup(rq->umem);
+               xsk_set_rx_need_wakeup(rq->xsk_pool->umem);
        else
-               xsk_clear_rx_need_wakeup(rq->umem);
+               xsk_clear_rx_need_wakeup(rq->xsk_pool->umem);
 
        return false;
 }
 
 }
 
 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
-                  struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
+                  struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
                   struct mlx5e_channel *c)
 {
        struct mlx5e_channel_param *cparam;
        if (unlikely(err))
                goto err_free_cparam;
 
-       err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
+       err = mlx5e_open_rq(c, params, &cparam->rq, xsk, pool, &c->xskrq);
        if (unlikely(err))
                goto err_close_rx_cq;
 
        if (unlikely(err))
                goto err_close_rq;
 
-       /* Create a separate SQ, so that when the UMEM is disabled, we could
+       /* Create a separate SQ, so that when the buff pool is disabled, we could
         * close this SQ safely and stop receiving CQEs. In other case, e.g., if
-        * the XDPSQ was used instead, we might run into trouble when the UMEM
+        * the XDPSQ was used instead, we might run into trouble when the buff pool
         * is disabled and then reenabled, but the SQ continues receiving CQEs
-        * from the old UMEM.
+        * from the old buff pool.
         */
-       err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true);
+       err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true);
        if (unlikely(err))
                goto err_close_tx_cq;
 
 
                              struct mlx5e_xsk_param *xsk,
                              struct mlx5_core_dev *mdev);
 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
-                  struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
+                  struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
                   struct mlx5e_channel *c);
 void mlx5e_close_xsk(struct mlx5e_channel *c);
 void mlx5e_activate_xsk(struct mlx5e_channel *c);
 
 /* Copyright (c) 2019 Mellanox Technologies. */
 
 #include "tx.h"
-#include "umem.h"
+#include "pool.h"
 #include "en/xdp.h"
 #include "en/params.h"
 #include <net/xdp_sock_drv.h>
 
 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
 {
-       struct xdp_umem *umem = sq->umem;
+       struct xsk_buff_pool *pool = sq->xsk_pool;
        struct mlx5e_xdp_info xdpi;
        struct mlx5e_xdp_xmit_data xdptxd;
        bool work_done = true;
                        break;
                }
 
-               if (!xsk_umem_consume_tx(umem, &desc)) {
+               if (!xsk_umem_consume_tx(pool->umem, &desc)) {
                        /* TX will get stuck until something wakes it up by
                         * triggering NAPI. Currently it's expected that the
                         * application calls sendto() if there are consumed, but
                        break;
                }
 
-               xdptxd.dma_addr = xsk_buff_raw_get_dma(umem, desc.addr);
-               xdptxd.data = xsk_buff_raw_get_data(umem, desc.addr);
+               xdptxd.dma_addr = xsk_buff_raw_get_dma(pool->umem, desc.addr);
+               xdptxd.data = xsk_buff_raw_get_data(pool->umem, desc.addr);
                xdptxd.len = desc.len;
 
-               xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len);
+               xsk_buff_raw_dma_sync_for_device(pool->umem, xdptxd.dma_addr, xdptxd.len);
 
                ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
                                      mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
                        mlx5e_xdp_mpwqe_complete(sq);
                mlx5e_xmit_xdp_doorbell(sq);
 
-               xsk_umem_consume_tx_done(umem);
+               xsk_umem_consume_tx_done(pool->umem);
        }
 
        return !(budget && work_done);
 
 
 static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq)
 {
-       if (!xsk_umem_uses_need_wakeup(sq->umem))
+       if (!xsk_umem_uses_need_wakeup(sq->xsk_pool->umem))
                return;
 
        if (sq->pc != sq->cc)
-               xsk_clear_tx_need_wakeup(sq->umem);
+               xsk_clear_tx_need_wakeup(sq->xsk_pool->umem);
        else
-               xsk_set_tx_need_wakeup(sq->umem);
+               xsk_set_tx_need_wakeup(sq->xsk_pool->umem);
 }
 
 #endif /* __MLX5_EN_XSK_TX_H__ */
 
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#ifndef __MLX5_EN_XSK_UMEM_H__
-#define __MLX5_EN_XSK_UMEM_H__
-
-#include "en.h"
-
-static inline struct xdp_umem *mlx5e_xsk_get_umem(struct mlx5e_params *params,
-                                                 struct mlx5e_xsk *xsk, u16 ix)
-{
-       if (!xsk || !xsk->umems)
-               return NULL;
-
-       if (unlikely(ix >= params->num_channels))
-               return NULL;
-
-       return xsk->umems[ix];
-}
-
-struct mlx5e_xsk_param;
-void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk);
-
-/* .ndo_bpf callback. */
-int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid);
-
-int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries);
-
-#endif /* __MLX5_EN_XSK_UMEM_H__ */
 
 
 #include "en.h"
 #include "en/port.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
 #include "lib/clock.h"
 
 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
 
 #include <linux/mlx5/fs.h>
 #include "en.h"
 #include "en/params.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
 
 struct mlx5e_ethtool_rule {
        struct list_head             list;
 
 #include "en/monitor_stats.h"
 #include "en/health.h"
 #include "en/params.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
 #include "en/xsk/setup.h"
 #include "en/xsk/rx.h"
 #include "en/xsk/tx.h"
 static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                          struct mlx5e_params *params,
                          struct mlx5e_xsk_param *xsk,
-                         struct xdp_umem *umem,
+                         struct xsk_buff_pool *xsk_pool,
                          struct mlx5e_rq_param *rqp,
                          struct mlx5e_rq *rq)
 {
        rq->mdev    = mdev;
        rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        rq->xdpsq   = &c->rq_xdpsq;
-       rq->umem    = umem;
+       rq->xsk_pool = xsk_pool;
 
-       if (rq->umem)
+       if (rq->xsk_pool)
                rq->stats = &c->priv->channel_stats[c->ix].xskrq;
        else
                rq->stats = &c->priv->channel_stats[c->ix].rq;
        if (xsk) {
                err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
                                                 MEM_TYPE_XSK_BUFF_POOL, NULL);
-               xsk_buff_set_rxq_info(rq->umem, &rq->xdp_rxq);
+               xsk_buff_set_rxq_info(rq->xsk_pool->umem, &rq->xdp_rxq);
        } else {
                /* Create a page_pool and register it with rxq */
                pp_params.order     = 0;
 
 int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
                  struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
-                 struct xdp_umem *umem, struct mlx5e_rq *rq)
+                 struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq)
 {
        int err;
 
-       err = mlx5e_alloc_rq(c, params, xsk, umem, param, rq);
+       err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq);
        if (err)
                return err;
 
 
 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
                             struct mlx5e_params *params,
-                            struct xdp_umem *umem,
+                            struct xsk_buff_pool *xsk_pool,
                             struct mlx5e_sq_param *param,
                             struct mlx5e_xdpsq *sq,
                             bool is_redirect)
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
        sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
-       sq->umem      = umem;
+       sq->xsk_pool  = xsk_pool;
 
-       sq->stats = sq->umem ?
+       sq->stats = sq->xsk_pool ?
                &c->priv->channel_stats[c->ix].xsksq :
                is_redirect ?
                        &c->priv->channel_stats[c->ix].xdpsq :
 }
 
 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
-                    struct mlx5e_sq_param *param, struct xdp_umem *umem,
+                    struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
                     struct mlx5e_xdpsq *sq, bool is_redirect)
 {
        struct mlx5e_create_sq_param csp = {};
        int err;
 
-       err = mlx5e_alloc_xdpsq(c, params, umem, param, sq, is_redirect);
+       err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
        if (err)
                return err;
 
 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
                              struct mlx5e_params *params,
                              struct mlx5e_channel_param *cparam,
-                             struct xdp_umem *umem,
+                             struct xsk_buff_pool *xsk_pool,
                              struct mlx5e_channel **cp)
 {
        int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
        if (unlikely(err))
                goto err_napi_del;
 
-       if (umem) {
-               mlx5e_build_xsk_param(umem, &xsk);
-               err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
+       if (xsk_pool) {
+               mlx5e_build_xsk_param(xsk_pool, &xsk);
+               err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c);
                if (unlikely(err))
                        goto err_close_queues;
        }
 
        mlx5e_build_channel_param(priv, &chs->params, cparam);
        for (i = 0; i < chs->num; i++) {
-               struct xdp_umem *umem = NULL;
+               struct xsk_buff_pool *xsk_pool = NULL;
 
                if (chs->params.xdp_prog)
-                       umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, i);
+                       xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
 
-               err = mlx5e_open_channel(priv, i, &chs->params, cparam, umem, &chs->c[i]);
+               err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
                if (err)
                        goto err_close_channels;
        }
        u16 ix;
 
        for (ix = 0; ix < chs->params.num_channels; ix++) {
-               struct xdp_umem *umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, ix);
+               struct xsk_buff_pool *xsk_pool =
+                       mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
                struct mlx5e_xsk_param xsk;
 
-               if (!umem)
+               if (!xsk_pool)
                        continue;
 
-               mlx5e_build_xsk_param(umem, &xsk);
+               mlx5e_build_xsk_param(xsk_pool, &xsk);
 
                if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
                        u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
        switch (xdp->command) {
        case XDP_SETUP_PROG:
                return mlx5e_xdp_set(dev, xdp->prog);
-       case XDP_SETUP_XSK_UMEM:
-               return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem,
+       case XDP_SETUP_XSK_POOL:
+               return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool,
                                            xdp->xsk.queue_id);
        default:
                return -EINVAL;
 
 static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
                                   struct mlx5e_dma_info *dma_info)
 {
-       if (rq->umem)
-               return mlx5e_xsk_page_alloc_umem(rq, dma_info);
+       if (rq->xsk_pool)
+               return mlx5e_xsk_page_alloc_pool(rq, dma_info);
        else
                return mlx5e_page_alloc_pool(rq, dma_info);
 }
                                      struct mlx5e_dma_info *dma_info,
                                      bool recycle)
 {
-       if (rq->umem)
+       if (rq->xsk_pool)
                /* The `recycle` parameter is ignored, and the page is always
                 * put into the Reuse Ring, because there is no way to return
                 * the page to the userspace when the interface goes down.
        int err;
        int i;
 
-       if (rq->umem) {
+       if (rq->xsk_pool) {
                int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
 
                /* Check in advance that we have enough frames, instead of
                 * allocating one-by-one, failing and moving frames to the
                 * Reuse Ring.
                 */
-               if (unlikely(!xsk_buff_can_alloc(rq->umem, pages_desired)))
+               if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool->umem, pages_desired)))
                        return -ENOMEM;
        }
 
        /* Check in advance that we have enough frames, instead of allocating
         * one-by-one, failing and moving frames to the Reuse Ring.
         */
-       if (rq->umem &&
-           unlikely(!xsk_buff_can_alloc(rq->umem, MLX5_MPWRQ_PAGES_PER_WQE))) {
+       if (rq->xsk_pool &&
+           unlikely(!xsk_buff_can_alloc(rq->xsk_pool->umem, MLX5_MPWRQ_PAGES_PER_WQE))) {
                err = -ENOMEM;
                goto err;
        }
         * the driver when it refills the Fill Ring.
         * 2. Otherwise, busy poll by rescheduling the NAPI poll.
         */
-       if (unlikely(alloc_err == -ENOMEM && rq->umem))
+       if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
                return true;
 
        return false;
 
        /* Subordinate device that the queue has been assigned to */
        struct net_device       *sb_dev;
 #ifdef CONFIG_XDP_SOCKETS
-       struct xdp_umem         *umem;
+       struct xsk_buff_pool    *pool;
 #endif
 /*
  * write-mostly part
        struct net_device               *dev;
        struct xdp_rxq_info             xdp_rxq;
 #ifdef CONFIG_XDP_SOCKETS
-       struct xdp_umem                 *umem;
+       struct xsk_buff_pool            *pool;
 #endif
 } ____cacheline_aligned_in_smp;
 
        /* BPF program for offload callbacks, invoked at program load time. */
        BPF_OFFLOAD_MAP_ALLOC,
        BPF_OFFLOAD_MAP_FREE,
-       XDP_SETUP_XSK_UMEM,
+       XDP_SETUP_XSK_POOL,
 };
 
 struct bpf_prog_offload_ops;
                struct {
                        struct bpf_offloaded_map *offmap;
                };
-               /* XDP_SETUP_XSK_UMEM */
+               /* XDP_SETUP_XSK_POOL */
                struct {
-                       struct xdp_umem *umem;
+                       struct xsk_buff_pool *pool;
                        u16 queue_id;
                } xsk;
        };
 
 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
-struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
+struct xsk_buff_pool *xdp_get_xsk_pool_from_qid(struct net_device *dev,
+                                               u16 queue_id);
 void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
 void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
 {
 }
 
-static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
-                                                    u16 queue_id)
+static inline struct xsk_buff_pool *
+xdp_get_xsk_pool_from_qid(struct net_device *dev, u16 queue_id)
 {
        return NULL;
 }
 
 struct xdp_rxq_info;
 struct xsk_queue;
 struct xdp_desc;
+struct xdp_umem;
 struct device;
 struct page;
 
        u32 frame_len;
        bool dma_need_sync;
        bool unaligned;
+       struct xdp_umem *umem;
        void *addrs;
        struct device *dev;
        struct xdp_buff_xsk *free_heads[];
 };
 
 /* AF_XDP core. */
-struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
+struct xsk_buff_pool *xp_create(struct xdp_umem *umem, u32 chunks,
                                u32 chunk_size, u32 headroom, u64 size,
                                bool unaligned);
 void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq);
 
        from_channel = channels.combined_count +
                       min(channels.rx_count, channels.tx_count);
        for (i = from_channel; i < old_total; i++)
-               if (xdp_get_umem_from_qid(dev, i)) {
+               if (xdp_get_xsk_pool_from_qid(dev, i)) {
                        GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets");
                        return -EINVAL;
                }
 
                min(channels.rx_count, channels.tx_count);
        to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count);
        for (i = from_channel; i < to_channel; i++)
-               if (xdp_get_umem_from_qid(dev, i))
+               if (xdp_get_xsk_pool_from_qid(dev, i))
                        return -EINVAL;
 
        ret = dev->ethtool_ops->set_channels(dev, &channels);
 
  * not know if the device has more tx queues than rx, or the opposite.
  * This might also change during run time.
  */
-static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
-                              u16 queue_id)
+static int xdp_reg_xsk_pool_at_qid(struct net_device *dev,
+                                  struct xsk_buff_pool *pool,
+                                  u16 queue_id)
 {
        if (queue_id >= max_t(unsigned int,
                              dev->real_num_rx_queues,
                return -EINVAL;
 
        if (queue_id < dev->real_num_rx_queues)
-               dev->_rx[queue_id].umem = umem;
+               dev->_rx[queue_id].pool = pool;
        if (queue_id < dev->real_num_tx_queues)
-               dev->_tx[queue_id].umem = umem;
+               dev->_tx[queue_id].pool = pool;
 
        return 0;
 }
 
-struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
-                                      u16 queue_id)
+struct xsk_buff_pool *xdp_get_xsk_pool_from_qid(struct net_device *dev,
+                                               u16 queue_id)
 {
        if (queue_id < dev->real_num_rx_queues)
-               return dev->_rx[queue_id].umem;
+               return dev->_rx[queue_id].pool;
        if (queue_id < dev->real_num_tx_queues)
-               return dev->_tx[queue_id].umem;
+               return dev->_tx[queue_id].pool;
 
        return NULL;
 }
-EXPORT_SYMBOL(xdp_get_umem_from_qid);
+EXPORT_SYMBOL(xdp_get_xsk_pool_from_qid);
 
-static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
+static void xdp_clear_xsk_pool_at_qid(struct net_device *dev, u16 queue_id)
 {
        if (queue_id < dev->real_num_rx_queues)
-               dev->_rx[queue_id].umem = NULL;
+               dev->_rx[queue_id].pool = NULL;
        if (queue_id < dev->real_num_tx_queues)
-               dev->_tx[queue_id].umem = NULL;
+               dev->_tx[queue_id].pool = NULL;
 }
 
 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
        if (force_zc && force_copy)
                return -EINVAL;
 
-       if (xdp_get_umem_from_qid(dev, queue_id))
+       if (xdp_get_xsk_pool_from_qid(dev, queue_id))
                return -EBUSY;
 
-       err = xdp_reg_umem_at_qid(dev, umem, queue_id);
+       err = xdp_reg_xsk_pool_at_qid(dev, umem->pool, queue_id);
        if (err)
                return err;
 
                goto err_unreg_umem;
        }
 
-       bpf.command = XDP_SETUP_XSK_UMEM;
-       bpf.xsk.umem = umem;
+       bpf.command = XDP_SETUP_XSK_POOL;
+       bpf.xsk.pool = umem->pool;
        bpf.xsk.queue_id = queue_id;
 
        err = dev->netdev_ops->ndo_bpf(dev, &bpf);
        if (!force_zc)
                err = 0; /* fallback to copy mode */
        if (err)
-               xdp_clear_umem_at_qid(dev, queue_id);
+               xdp_clear_xsk_pool_at_qid(dev, queue_id);
        return err;
 }
 
                return;
 
        if (umem->zc) {
-               bpf.command = XDP_SETUP_XSK_UMEM;
-               bpf.xsk.umem = NULL;
+               bpf.command = XDP_SETUP_XSK_POOL;
+               bpf.xsk.pool = NULL;
                bpf.xsk.queue_id = umem->queue_id;
 
                err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
                        WARN(1, "failed to disable umem!\n");
        }
 
-       xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
+       xdp_clear_xsk_pool_at_qid(umem->dev, umem->queue_id);
 
        dev_put(umem->dev);
        umem->dev = NULL;
        if (err)
                goto out_account;
 
-       umem->pool = xp_create(umem->pgs, umem->npgs, chunks, chunk_size,
-                              headroom, size, unaligned_chunks);
+       umem->pool = xp_create(umem, chunks, chunk_size, headroom, size,
+                              unaligned_chunks);
        if (!umem->pool) {
                err = -ENOMEM;
                goto out_pin;
 
        kvfree(pool);
 }
 
-struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
+struct xsk_buff_pool *xp_create(struct xdp_umem *umem, u32 chunks,
                                u32 chunk_size, u32 headroom, u64 size,
                                bool unaligned)
 {
        pool->chunk_size = chunk_size;
        pool->unaligned = unaligned;
        pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
+       pool->umem = umem;
        INIT_LIST_HEAD(&pool->free_list);
 
        for (i = 0; i < pool->free_heads_cnt; i++) {
                pool->free_heads[i] = xskb;
        }
 
-       err = xp_addr_map(pool, pages, nr_pages);
+       err = xp_addr_map(pool, umem->pgs, umem->npgs);
        if (!err)
                return pool;