]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
net/mlx5e: SHAMPO, Change frag page setup order during allocation
authorDragos Tatulea <dtatulea@nvidia.com>
Thu, 7 Nov 2024 19:43:55 +0000 (21:43 +0200)
committerJakub Kicinski <kuba@kernel.org>
Tue, 12 Nov 2024 03:28:18 +0000 (19:28 -0800)
Now that the UMR allocation has been simplified, it is no longer
possible to have a leftover page from a previous call to
mlx5e_build_shampo_hd_umr().

This patch simplifies the code by switching the order of operations:
first take the frag page and then increment the index. This is more
straightforward and it also paves the way for dropping the info
array.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20241107194357.683732-11-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index 76a975667c77bba415572dc0293b90d152310e81..637069c1b988053fd12a67d5563269317bc665ca 100644 (file)
@@ -651,7 +651,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
        u16 pi, header_offset, err, wqe_bbs;
        u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
        u16 page_index = shampo->curr_page_index;
-       struct mlx5e_frag_page *frag_page;
+       struct mlx5e_frag_page *frag_page = NULL;
        struct mlx5e_dma_info *dma_info;
        struct mlx5e_umr_wqe *umr_wqe;
        int headroom, i;
@@ -663,16 +663,14 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
        umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
        build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries);
 
-       frag_page = &shampo->pages[page_index];
-
        WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
        for (i = 0; i < ksm_entries; i++, index++) {
                dma_info = &shampo->info[index];
                header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
                        MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
                if (!(header_offset & (PAGE_SIZE - 1))) {
-                       page_index = (page_index + 1) & (shampo->pages_per_wq - 1);
                        frag_page = &shampo->pages[page_index];
+                       page_index = (page_index + 1) & (shampo->pages_per_wq - 1);
 
                        err = mlx5e_page_alloc_fragmented(rq, frag_page);
                        if (unlikely(err))