]> www.infradead.org Git - users/hch/configfs.git/commitdiff
net/mlx5e: SHAMPO, Specialize mlx5e_fill_skb_data()
authorDragos Tatulea <dtatulea@nvidia.com>
Mon, 3 Jun 2024 21:22:12 +0000 (00:22 +0300)
committerJakub Kicinski <kuba@kernel.org>
Thu, 6 Jun 2024 03:20:46 +0000 (20:20 -0700)
mlx5e_fill_skb_data() used to have multiple callers. But after the XDP
multibuf refactoring from commit 2cb0e27d43b4 ("net/mlx5e: RX, Prepare
non-linear striding RQ for XDP multi-buffer support") the SHAMPO code
path is the only caller.

Take advantage of this and specialize the function:
- Drop the redundant check.
- Assume that data_bcnt is > 0. This is needed in a downstream patch.

Rename the function as well to make things clear.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Suggested-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20240603212219.1037656-8-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index bb59ee0b15673336637966d8a4da17c17ef10312..1e3a5b2afeae5f6fb6b9a59477db9ef88ea96e15 100644 (file)
@@ -1948,21 +1948,16 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
 #endif
 
 static void
-mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
-                   struct mlx5e_frag_page *frag_page,
-                   u32 data_bcnt, u32 data_offset)
+mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
+                          struct mlx5e_frag_page *frag_page,
+                          u32 data_bcnt, u32 data_offset)
 {
        net_prefetchw(skb->data);
 
-       while (data_bcnt) {
+       do {
                /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
                u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
-               unsigned int truesize;
-
-               if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
-                       truesize = pg_consumed_bytes;
-               else
-                       truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
+               unsigned int truesize = pg_consumed_bytes;
 
                frag_page->frags++;
                mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset,
@@ -1971,7 +1966,7 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
                data_bcnt -= pg_consumed_bytes;
                data_offset = 0;
                frag_page++;
-       }
+       } while (data_bcnt);
 }
 
 static struct sk_buff *
@@ -2330,10 +2325,12 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
        }
 
        if (likely(head_size)) {
-               struct mlx5e_frag_page *frag_page;
+               if (data_bcnt) {
+                       struct mlx5e_frag_page *frag_page;
 
-               frag_page = &wi->alloc_units.frag_pages[page_idx];
-               mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
+                       frag_page = &wi->alloc_units.frag_pages[page_idx];
+                       mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
+               }
        }
 
        mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);