]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
xsk: Use xsk_buff_pool directly for cq functions
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Mon, 7 Oct 2024 12:24:58 +0000 (14:24 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 14 Oct 2024 15:23:49 +0000 (17:23 +0200)
Currently xsk_cq_{reserve_addr,submit,cancel}_locked() take xdp_sock as
an input argument but it is only used for pulling out xsk_buff_pool
pointer from it.

Change mentioned functions to take pool pointer as an input argument to
avoid unnecessary dereferences.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20241007122458.282590-7-maciej.fijalkowski@intel.com
net/xdp/xsk.c

index 6c31c1de16197c27251f024be6af4091f0ca6e49..7d7e37f53708cf40acc33edc53f38a764ace53ca 100644 (file)
@@ -527,34 +527,34 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
        return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
 }
 
-static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
+static int xsk_cq_reserve_addr_locked(struct xsk_buff_pool *pool, u64 addr)
 {
        unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&xs->pool->cq_lock, flags);
-       ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
-       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+       spin_lock_irqsave(&pool->cq_lock, flags);
+       ret = xskq_prod_reserve_addr(pool->cq, addr);
+       spin_unlock_irqrestore(&pool->cq_lock, flags);
 
        return ret;
 }
 
-static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
+static void xsk_cq_submit_locked(struct xsk_buff_pool *pool, u32 n)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&xs->pool->cq_lock, flags);
-       xskq_prod_submit_n(xs->pool->cq, n);
-       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+       spin_lock_irqsave(&pool->cq_lock, flags);
+       xskq_prod_submit_n(pool->cq, n);
+       spin_unlock_irqrestore(&pool->cq_lock, flags);
 }
 
-static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
+static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&xs->pool->cq_lock, flags);
-       xskq_prod_cancel_n(xs->pool->cq, n);
-       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+       spin_lock_irqsave(&pool->cq_lock, flags);
+       xskq_prod_cancel_n(pool->cq, n);
+       spin_unlock_irqrestore(&pool->cq_lock, flags);
 }
 
 static u32 xsk_get_num_desc(struct sk_buff *skb)
@@ -571,7 +571,7 @@ static void xsk_destruct_skb(struct sk_buff *skb)
                *compl->tx_timestamp = ktime_get_tai_fast_ns();
        }
 
-       xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb));
+       xsk_cq_submit_locked(xdp_sk(skb->sk)->pool, xsk_get_num_desc(skb));
        sock_wfree(skb);
 }
 
@@ -587,7 +587,7 @@ static void xsk_consume_skb(struct sk_buff *skb)
        struct xdp_sock *xs = xdp_sk(skb->sk);
 
        skb->destructor = sock_wfree;
-       xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
+       xsk_cq_cancel_locked(xs->pool, xsk_get_num_desc(skb));
        /* Free skb without triggering the perf drop trace */
        consume_skb(skb);
        xs->skb = NULL;
@@ -765,7 +765,7 @@ free_err:
                xskq_cons_release(xs->tx);
        } else {
                /* Let application retry */
-               xsk_cq_cancel_locked(xs, 1);
+               xsk_cq_cancel_locked(xs->pool, 1);
        }
 
        return ERR_PTR(err);
@@ -802,7 +802,7 @@ static int __xsk_generic_xmit(struct sock *sk)
                 * if there is space in it. This avoids having to implement
                 * any buffering in the Tx path.
                 */
-               if (xsk_cq_reserve_addr_locked(xs, desc.addr))
+               if (xsk_cq_reserve_addr_locked(xs->pool, desc.addr))
                        goto out;
 
                skb = xsk_build_skb(xs, &desc);