Test for dma_need_sync earlier to increase
performance. xsk_buff_dma_sync_for_cpu() takes an xdp_buff as
parameter and from that the xsk_buff_pool reference is dug out. Perf
shows that this dereference causes a lot of cache misses. But as the
buffer pool is now sent down to the driver at zero-copy initialization
time, we might as well use this pointer directly, instead of going via
the xsk_buff and we can do so already in xsk_buff_dma_sync_for_cpu()
instead of in xp_dma_sync_for_cpu. This gets rid of these cache
misses.
Throughput increases with 3% for the xdpsock l2fwd sample application
on my machine.
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-11-git-send-email-magnus.karlsson@intel.com
 
 
                bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
                (*bi)->data_end = (*bi)->data + size;
-               xsk_buff_dma_sync_for_cpu(*bi);
+               xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
 
                xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
                if (xdp_res) {
 
 
                rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
                rx_buf->xdp->data_end = rx_buf->xdp->data + size;
-               xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
+               xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
 
                xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
                if (xdp_res) {
 
                }
 
                bi->xdp->data_end = bi->xdp->data + size;
-               xsk_buff_dma_sync_for_cpu(bi->xdp);
+               xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
                xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
 
                if (xdp_res) {
 
 
        xdp->data_end = xdp->data + cqe_bcnt32;
        xdp_set_data_meta_invalid(xdp);
-       xsk_buff_dma_sync_for_cpu(xdp);
+       xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
        prefetch(xdp->data);
 
        rcu_read_lock();
 
        xdp->data_end = xdp->data + cqe_bcnt;
        xdp_set_data_meta_invalid(xdp);
-       xsk_buff_dma_sync_for_cpu(xdp);
+       xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
        prefetch(xdp->data);
 
        if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
 
        return xp_raw_get_data(pool, addr);
 }
 
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
 {
        struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
 
+       if (!pool->dma_need_sync)
+               return;
+
        xp_dma_sync_for_cpu(xskb);
 }
 
        return NULL;
 }
 
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
 {
 }
 
 
 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
 {
-       if (!xskb->pool->dma_need_sync)
-               return;
-
        xp_dma_sync_for_cpu_slow(xskb);
 }