* false   - packet should be passed to the stack.
  */
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
-                struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+                struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
                 unsigned int *len, u8 *event)
 {
        struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
 
        txr = rxr->bnapi->tx_ring[0];
        /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
-       orig_data = xdp.data;
+       orig_data = xdp->data;
 
-       act = bpf_prog_run_xdp(xdp_prog, &xdp);
+       act = bpf_prog_run_xdp(xdp_prog, xdp);
 
        tx_avail = bnxt_tx_avail(bp, txr);
        /* If the tx ring is not full, we must not update the rx producer yet
        if (tx_avail != bp->tx_ring_size)
                *event &= ~BNXT_RX_EVENT;
 
-       *len = xdp.data_end - xdp.data;
-       if (orig_data != xdp.data) {
-               offset = xdp.data - xdp.data_hard_start;
-               *data_ptr = xdp.data_hard_start + offset;
+       *len = xdp->data_end - xdp->data;
+       if (orig_data != xdp->data) {
+               offset = xdp->data - xdp->data_hard_start;
+               *data_ptr = xdp->data_hard_start + offset;
        }
 
        switch (act) {
                mapping = rx_buf->mapping - bp->rx_dma_offset;
                *event &= BNXT_TX_CMP_EVENT;
 
-               if (unlikely(xdp_buff_has_frags(&xdp))) {
-                       struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+               if (unlikely(xdp_buff_has_frags(xdp))) {
+                       struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
 
                        tx_needed += sinfo->nr_frags;
                        *event = BNXT_AGG_EVENT;
 
                if (tx_avail < tx_needed) {
                        trace_xdp_exception(bp->dev, xdp_prog, act);
-                       bnxt_xdp_buff_frags_free(rxr, &xdp);
+                       bnxt_xdp_buff_frags_free(rxr, xdp);
                        bnxt_reuse_rx_data(rxr, cons, page);
                        return true;
                }
 
                *event |= BNXT_TX_EVENT;
                __bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
-                               NEXT_RX(rxr->rx_prod), &xdp);
+                               NEXT_RX(rxr->rx_prod), xdp);
                bnxt_reuse_rx_data(rxr, cons, page);
                return true;
        case XDP_REDIRECT:
                /* if we are unable to allocate a new buffer, abort and reuse */
                if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
                        trace_xdp_exception(bp->dev, xdp_prog, act);
-                       bnxt_xdp_buff_frags_free(rxr, &xdp);
+                       bnxt_xdp_buff_frags_free(rxr, xdp);
                        bnxt_reuse_rx_data(rxr, cons, page);
                        return true;
                }
 
-               if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
+               if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
                        trace_xdp_exception(bp->dev, xdp_prog, act);
                        page_pool_recycle_direct(rxr->page_pool, page);
                        return true;
                trace_xdp_exception(bp->dev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
-               bnxt_xdp_buff_frags_free(rxr, &xdp);
+               bnxt_xdp_buff_frags_free(rxr, xdp);
                bnxt_reuse_rx_data(rxr, cons, page);
                break;
        }