]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
xsk: Carry a copy of xdp_zc_max_segs within xsk_buff_pool
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Mon, 7 Oct 2024 12:24:56 +0000 (14:24 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 14 Oct 2024 15:23:30 +0000 (17:23 +0200)
This so we avoid dereferencing struct net_device within hot path.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20241007122458.282590-5-maciej.fijalkowski@intel.com
include/net/xsk_buff_pool.h
net/xdp/xsk_buff_pool.c
net/xdp/xsk_queue.h

index 468a23b1b4c555123fc07155703bed450cf185f0..bb03cee716b316f22c3181240a1823a4477f7f6b 100644 (file)
@@ -76,6 +76,7 @@ struct xsk_buff_pool {
        u32 chunk_size;
        u32 chunk_shift;
        u32 frame_len;
+       u32 xdp_zc_max_segs;
        u8 tx_metadata_len; /* inherited from umem */
        u8 cached_need_wakeup;
        bool uses_need_wakeup;
index 7ecd4ccd24735da2f9af3b9665a835a4fde36fdb..e946ba4a5ccf94a6c95e16d02a76874ef1544963 100644 (file)
@@ -229,6 +229,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
                goto err_unreg_xsk;
        }
        pool->umem->zc = true;
+       pool->xdp_zc_max_segs = netdev->xdp_zc_max_segs;
        return 0;
 
 err_unreg_xsk:
index 406b20dfee8d4783a28bba3e03b7c7553301b087..46d87e961ad6d3b5dbce81ac30bcf1956b9f2605 100644 (file)
@@ -260,7 +260,7 @@ u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
                        nr_frags = 0;
                } else {
                        nr_frags++;
-                       if (nr_frags == pool->netdev->xdp_zc_max_segs) {
+                       if (nr_frags == pool->xdp_zc_max_segs) {
                                nr_frags = 0;
                                break;
                        }