return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
 }
 
+bool mlx5e_verify_params_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
+                                         struct mlx5e_params *params,
+                                         struct mlx5e_xsk_param *xsk)
+{
+       u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+       u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+       enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+       u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+
+       return mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
+                                            log_wqe_num_of_strides,
+                                            page_shift, umr_mode);
+}
+
 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
                                  struct mlx5e_params *params,
                                  struct mlx5e_xsk_param *xsk)
        if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
                return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
 
+       /* XDP in mlx5e doesn't support multiple packets per page. */
+       if (params->xdp_prog)
+               return PAGE_SHIFT;
+
        return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
 }
 
        if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
                return -EOPNOTSUPP;
 
-       if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
-               return -EINVAL;
-
        return 0;
 }
 
 
                pool_size = rq->mpwqe.pages_per_wqe <<
                        mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
 
+               if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk) && params->xdp_prog)
+                       pool_size *= 2; /* additional page per packet for the linear part */
+
                rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
                rq->mpwqe.num_strides =
                        BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
 
        val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
              NETDEV_XDP_ACT_XSK_ZEROCOPY |
+             NETDEV_XDP_ACT_RX_SG |
              NETDEV_XDP_ACT_NDO_XMIT |
              NETDEV_XDP_ACT_NDO_XMIT_SG;
-       if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
-               val |= NETDEV_XDP_ACT_RX_SG;
        xdp_set_features_flag(netdev, val);
 }
 
                mlx5e_rx_is_linear_skb(mdev, params, NULL) :
                mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL);
 
-       /* XDP affects striding RQ parameters. Block XDP if striding RQ won't be
-        * supported with the new parameters: if PAGE_SIZE is bigger than
-        * MLX5_MPWQE_LOG_STRIDE_SZ_MAX, striding RQ can't be used, even though
-        * the MTU is small enough for the linear mode, because XDP uses strides
-        * of PAGE_SIZE on regular RQs.
-        */
-       if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
-               netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
-                           params->sw_mtu,
-                           mlx5e_xdp_max_mtu(params, NULL));
-               return false;
-       }
-       if (!is_linear && !params->xdp_prog->aux->xdp_has_frags) {
-               netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
-                           params->sw_mtu,
-                           mlx5e_xdp_max_mtu(params, NULL));
-               return false;
+       if (!is_linear) {
+               if (!params->xdp_prog->aux->xdp_has_frags) {
+                       netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
+                                   params->sw_mtu,
+                                   mlx5e_xdp_max_mtu(params, NULL));
+                       return false;
+               }
+               if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+                   !mlx5e_verify_params_rx_mpwqe_strides(mdev, params, NULL)) {
+                       netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
+                                   params->sw_mtu,
+                                   mlx5e_xdp_max_mtu(params, NULL));
+                       return false;
+               }
        }
 
        return true;
 
        struct skb_shared_info *sinfo;
        struct mlx5e_xdp_buff mxbuf;
        unsigned int truesize = 0;
+       struct bpf_prog *prog;
        struct sk_buff *skb;
        u32 linear_frame_sz;
        u16 linear_data_len;
-       dma_addr_t addr;
        u16 linear_hr;
        void *va;
 
-       skb = napi_alloc_skb(rq->cq.napi,
-                            ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
-       if (unlikely(!skb)) {
-               rq->stats->buff_alloc_err++;
-               return NULL;
-       }
-
-       va = skb->head;
-       net_prefetchw(skb->data);
+       prog = rcu_dereference(rq->xdp_prog);
 
-       frag_offset += headlen;
-       byte_cnt -= headlen;
-       linear_hr = skb_headroom(skb);
-       linear_data_len = headlen;
-       linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
-       if (unlikely(frag_offset >= PAGE_SIZE)) {
-               frag_page++;
-               frag_offset -= PAGE_SIZE;
+       if (prog) {
+               /* area for bpf_xdp_[store|load]_bytes */
+               net_prefetchw(page_address(frag_page->page) + frag_offset);
+               if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
+                       rq->stats->buff_alloc_err++;
+                       return NULL;
+               }
+               va = page_address(wi->linear_page.page);
+               net_prefetchw(va); /* xdp_frame data area */
+               linear_hr = XDP_PACKET_HEADROOM;
+               linear_data_len = 0;
+               linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
+       } else {
+               skb = napi_alloc_skb(rq->cq.napi,
+                                    ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
+               if (unlikely(!skb)) {
+                       rq->stats->buff_alloc_err++;
+                       return NULL;
+               }
+               skb_mark_for_recycle(skb);
+               va = skb->head;
+               net_prefetchw(va); /* xdp_frame data area */
+               net_prefetchw(skb->data);
+
+               frag_offset += headlen;
+               byte_cnt -= headlen;
+               linear_hr = skb_headroom(skb);
+               linear_data_len = headlen;
+               linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
+               if (unlikely(frag_offset >= PAGE_SIZE)) {
+                       frag_page++;
+                       frag_offset -= PAGE_SIZE;
+               }
        }
 
-       skb_mark_for_recycle(skb);
        mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
-       net_prefetch(mxbuf.xdp.data);
 
        sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
 
                frag_offset = 0;
                frag_page++;
        }
-       if (xdp_buff_has_frags(&mxbuf.xdp)) {
-               struct mlx5e_frag_page *pagep;
 
-               xdp_update_skb_shared_info(skb, sinfo->nr_frags,
-                                          sinfo->xdp_frags_size, truesize,
-                                          xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+       if (prog) {
+               if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+                       if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+                               int i;
+
+                               for (i = 0; i < sinfo->nr_frags; i++)
+                                       /* non-atomic */
+                                       __set_bit(page_idx + i, wi->skip_release_bitmap);
+                               return NULL;
+                       }
+                       mlx5e_page_release_fragmented(rq, &wi->linear_page);
+                       return NULL; /* page/packet was consumed by XDP */
+               }
+
+               skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
+                                            linear_frame_sz,
+                                            mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
+                                            mxbuf.xdp.data - mxbuf.xdp.data_meta);
+               if (unlikely(!skb)) {
+                       mlx5e_page_release_fragmented(rq, &wi->linear_page);
+                       return NULL;
+               }
 
-               pagep = frag_page - sinfo->nr_frags;
-               do
-                       pagep->frags++;
-               while (++pagep < frag_page);
-       }
-       /* copy header */
-       addr = page_pool_get_dma_addr(head_page->page);
-       mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
-                             head_offset, head_offset, headlen);
-       /* skb linear part was allocated with headlen and aligned to long */
-       skb->tail += headlen;
-       skb->len  += headlen;
+               skb_mark_for_recycle(skb);
+               wi->linear_page.frags++;
+               mlx5e_page_release_fragmented(rq, &wi->linear_page);
+
+               if (xdp_buff_has_frags(&mxbuf.xdp)) {
+                       struct mlx5e_frag_page *pagep;
+
+                       /* sinfo->nr_frags is reset by build_skb, calculate again. */
+                       xdp_update_skb_shared_info(skb, frag_page - head_page,
+                                                  sinfo->xdp_frags_size, truesize,
+                                                  xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+
+                       pagep = head_page;
+                       do
+                               pagep->frags++;
+                       while (++pagep < frag_page);
+               }
+               __pskb_pull_tail(skb, headlen);
+       } else {
+               dma_addr_t addr;
+
+               if (xdp_buff_has_frags(&mxbuf.xdp)) {
+                       struct mlx5e_frag_page *pagep;
+
+                       xdp_update_skb_shared_info(skb, sinfo->nr_frags,
+                                                  sinfo->xdp_frags_size, truesize,
+                                                  xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+
+                       pagep = frag_page - sinfo->nr_frags;
+                       do
+                               pagep->frags++;
+                       while (++pagep < frag_page);
+               }
+               /* copy header */
+               addr = page_pool_get_dma_addr(head_page->page);
+               mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
+                                     head_offset, head_offset, headlen);
+               /* skb linear part was allocated with headlen and aligned to long */
+               skb->tail += headlen;
+               skb->len  += headlen;
+       }
 
        return skb;
 }