struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
        struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
        struct mlx5_wqe_data_seg      *dseg = &wqe->data;
-       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+       struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
        u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
        u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
 
        int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
        int i;
 
-       rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
-                                   GFP_KERNEL, cpu_to_node(c->cpu));
-       if (!rq->wqe_info)
+       rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
+                                     GFP_KERNEL, cpu_to_node(c->cpu));
+       if (!rq->mpwqe.info)
                goto err_out;
 
        /* We allocate more than mtt_sz as we will align the pointer */
-       rq->mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+       rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
                                        cpu_to_node(c->cpu));
-       if (unlikely(!rq->mtt_no_align))
+       if (unlikely(!rq->mpwqe.mtt_no_align))
                goto err_free_wqe_info;
 
        for (i = 0; i < wq_sz; i++) {
-               struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+               struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
 
-               wi->umr.mtt = PTR_ALIGN(rq->mtt_no_align + i * mtt_alloc,
+               wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
                                        MLX5_UMR_ALIGN);
                wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
                                                  PCI_DMA_TODEVICE);
 
 err_unmap_mtts:
        while (--i >= 0) {
-               struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+               struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
 
                dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
                                 PCI_DMA_TODEVICE);
        }
-       kfree(rq->mtt_no_align);
+       kfree(rq->mpwqe.mtt_no_align);
 err_free_wqe_info:
-       kfree(rq->wqe_info);
+       kfree(rq->mpwqe.info);
 
 err_out:
        return -ENOMEM;
        int i;
 
        for (i = 0; i < wq_sz; i++) {
-               struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+               struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
 
                dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
                                 PCI_DMA_TODEVICE);
        }
-       kfree(rq->mtt_no_align);
-       kfree(rq->wqe_info);
+       kfree(rq->mpwqe.mtt_no_align);
+       kfree(rq->mpwqe.info);
 }
 
 static int mlx5e_create_rq(struct mlx5e_channel *c,
                rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
                rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
 
-               rq->mpwqe_mtt_offset = c->ix *
+               rq->mpwqe.mtt_offset = c->ix *
                        MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
 
                rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
 
        /* UMR WQE (if in progress) is always at wq->head */
        if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
-               mlx5e_free_rx_mpwqe(rq, &rq->wqe_info[wq->head]);
+               mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
 
        while (!mlx5_wq_ll_is_empty(wq)) {
                wqe_ix_be = *wq->tail_next;
 
 
 static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
 {
-       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+       struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
        struct mlx5e_sq *sq = &rq->channel->icosq;
        struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5e_umr_wqe *wqe;
                                    struct mlx5e_rx_wqe *wqe,
                                    u16 ix)
 {
-       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+       struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
        u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
        int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
        int err;
        clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
 
        if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
-               mlx5e_free_rx_mpwqe(rq, &rq->wqe_info[wq->head]);
+               mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
                return;
        }
 
 
 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
 {
-       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+       struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
 
        mlx5e_free_rx_mpwqe(rq, wi);
 }
 {
        u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
        u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
-       struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+       struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
        struct mlx5e_rx_wqe  *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
        struct sk_buff *skb;
        u16 cqe_bcnt;