prog = rcu_dereference(rq->xdp_prog);
        if (prog) {
-               struct mlx5e_xdp_buff mxbuf;
+               struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
 
                net_prefetchw(va); /* xdp_frame data area */
                mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
-                                cqe_bcnt, &mxbuf);
-               if (mlx5e_xdp_handle(rq, prog, &mxbuf))
+                                cqe_bcnt, mxbuf);
+               if (mlx5e_xdp_handle(rq, prog, mxbuf))
                        return NULL; /* page/packet was consumed by XDP */
 
-               rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
-               metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
-               cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+               rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+               metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+               cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
        }
        frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
        skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
                             struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
 {
        struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+       struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
        struct mlx5e_wqe_frag_info *head_wi = wi;
        u16 rx_headroom = rq->buff.headroom;
        struct mlx5e_frag_page *frag_page;
        struct skb_shared_info *sinfo;
-       struct mlx5e_xdp_buff mxbuf;
        u32 frag_consumed_bytes;
        struct bpf_prog *prog;
        struct sk_buff *skb;
        net_prefetch(va + rx_headroom);
 
        mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
-                        frag_consumed_bytes, &mxbuf);
-       sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+                        frag_consumed_bytes, mxbuf);
+       sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
        truesize = 0;
 
        cqe_bcnt -= frag_consumed_bytes;
 
                frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
 
-               mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
-                                              wi->offset, frag_consumed_bytes);
+               mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+                                              frag_page, wi->offset,
+                                              frag_consumed_bytes);
                truesize += frag_info->frag_stride;
 
                cqe_bcnt -= frag_consumed_bytes;
        }
 
        prog = rcu_dereference(rq->xdp_prog);
-       if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+       if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
                        struct mlx5e_wqe_frag_info *pwi;
 
                return NULL; /* page/packet was consumed by XDP */
        }
 
-       skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
-                                    mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
-                                    mxbuf.xdp.data_end - mxbuf.xdp.data,
-                                    mxbuf.xdp.data - mxbuf.xdp.data_meta);
+       skb = mlx5e_build_linear_skb(
+               rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
+               mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
+               mxbuf->xdp.data_end - mxbuf->xdp.data,
+               mxbuf->xdp.data - mxbuf->xdp.data_meta);
        if (unlikely(!skb))
                return NULL;
 
        skb_mark_for_recycle(skb);
        head_wi->frag_page->frags++;
 
-       if (xdp_buff_has_frags(&mxbuf.xdp)) {
+       if (xdp_buff_has_frags(&mxbuf->xdp)) {
                /* sinfo->nr_frags is reset by build_skb, calculate again. */
                xdp_update_skb_shared_info(skb, wi - head_wi - 1,
                                           sinfo->xdp_frags_size, truesize,
-                                          xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+                                          xdp_buff_is_frag_pfmemalloc(
+                                               &mxbuf->xdp));
 
                for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
                        pwi->frag_page->frags++;
        struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
        u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
        struct mlx5e_frag_page *head_page = frag_page;
+       struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
        u32 frag_offset    = head_offset;
        u32 byte_cnt       = cqe_bcnt;
        struct skb_shared_info *sinfo;
-       struct mlx5e_xdp_buff mxbuf;
        unsigned int truesize = 0;
        struct bpf_prog *prog;
        struct sk_buff *skb;
                }
        }
 
-       mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
+       mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
+                        linear_data_len, mxbuf);
 
-       sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+       sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
 
        while (byte_cnt) {
                /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
                else
                        truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
 
-               mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
+               mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+                                              frag_page, frag_offset,
                                               pg_consumed_bytes);
                byte_cnt -= pg_consumed_bytes;
                frag_offset = 0;
        }
 
        if (prog) {
-               if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+               if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
                        if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
                                struct mlx5e_frag_page *pfp;
 
                        return NULL; /* page/packet was consumed by XDP */
                }
 
-               skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
-                                            linear_frame_sz,
-                                            mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
-                                            mxbuf.xdp.data - mxbuf.xdp.data_meta);
+               skb = mlx5e_build_linear_skb(
+                       rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
+                       mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
+                       mxbuf->xdp.data - mxbuf->xdp.data_meta);
                if (unlikely(!skb)) {
                        mlx5e_page_release_fragmented(rq, &wi->linear_page);
                        return NULL;
                wi->linear_page.frags++;
                mlx5e_page_release_fragmented(rq, &wi->linear_page);
 
-               if (xdp_buff_has_frags(&mxbuf.xdp)) {
+               if (xdp_buff_has_frags(&mxbuf->xdp)) {
                        struct mlx5e_frag_page *pagep;
 
                        /* sinfo->nr_frags is reset by build_skb, calculate again. */
                        xdp_update_skb_shared_info(skb, frag_page - head_page,
                                                   sinfo->xdp_frags_size, truesize,
-                                                  xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+                                                  xdp_buff_is_frag_pfmemalloc(
+                                                       &mxbuf->xdp));
 
                        pagep = head_page;
                        do
        } else {
                dma_addr_t addr;
 
-               if (xdp_buff_has_frags(&mxbuf.xdp)) {
+               if (xdp_buff_has_frags(&mxbuf->xdp)) {
                        struct mlx5e_frag_page *pagep;
 
                        xdp_update_skb_shared_info(skb, sinfo->nr_frags,
                                                   sinfo->xdp_frags_size, truesize,
-                                                  xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+                                                  xdp_buff_is_frag_pfmemalloc(
+                                                       &mxbuf->xdp));
 
                        pagep = frag_page - sinfo->nr_frags;
                        do
 
        prog = rcu_dereference(rq->xdp_prog);
        if (prog) {
-               struct mlx5e_xdp_buff mxbuf;
+               struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
 
                net_prefetchw(va); /* xdp_frame data area */
                mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
-                                cqe_bcnt, &mxbuf);
-               if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+                                cqe_bcnt, mxbuf);
+               if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
                        if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
                                frag_page->frags++;
                        return NULL; /* page/packet was consumed by XDP */
                }
 
-               rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
-               metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
-               cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+               rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+               metasize =  mxbuf->xdp.data -  mxbuf->xdp.data_meta;
+               cqe_bcnt =  mxbuf->xdp.data_end -  mxbuf->xdp.data;
        }
        frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
        skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);