]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
xsk: Wrap duplicated code to function
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Mon, 7 Oct 2024 12:24:57 +0000 (14:24 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 14 Oct 2024 15:23:45 +0000 (17:23 +0200)
Both allocation paths have exactly the same code responsible for getting
and initializing xskb. Pull it out to common function.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20241007122458.282590-6-maciej.fijalkowski@intel.com
net/xdp/xsk_buff_pool.c

index e946ba4a5ccf94a6c95e16d02a76874ef1544963..ae71da7d2cd6ae19a2040aa76f8b8001aefbe0ad 100644 (file)
@@ -503,6 +503,22 @@ static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
        return *addr < pool->addrs_cnt;
 }
 
+static struct xdp_buff_xsk *xp_get_xskb(struct xsk_buff_pool *pool, u64 addr)
+{
+       struct xdp_buff_xsk *xskb;
+
+       if (pool->unaligned) {
+               xskb = pool->free_heads[--pool->free_heads_cnt];
+               xp_init_xskb_addr(xskb, pool, addr);
+               if (pool->dma_pages)
+                       xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
+       } else {
+               xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
+       }
+
+       return xskb;
+}
+
 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
 {
        struct xdp_buff_xsk *xskb;
@@ -528,14 +544,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
                break;
        }
 
-       if (pool->unaligned) {
-               xskb = pool->free_heads[--pool->free_heads_cnt];
-               xp_init_xskb_addr(xskb, pool, addr);
-               if (pool->dma_pages)
-                       xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
-       } else {
-               xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
-       }
+       xskb = xp_get_xskb(pool, addr);
 
        xskq_cons_release(pool->fq);
        return xskb;
@@ -593,14 +602,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd
                        continue;
                }
 
-               if (pool->unaligned) {
-                       xskb = pool->free_heads[--pool->free_heads_cnt];
-                       xp_init_xskb_addr(xskb, pool, addr);
-                       if (pool->dma_pages)
-                               xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
-               } else {
-                       xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
-               }
+               xskb = xp_get_xskb(pool, addr);
 
                *xdp = &xskb->xdp;
                xdp++;