]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mlxsw: Switch to napi_gro_receive()
authorIdo Schimmel <idosch@nvidia.com>
Mon, 16 Dec 2024 13:18:44 +0000 (14:18 +0100)
committerJakub Kicinski <kuba@kernel.org>
Wed, 18 Dec 2024 03:40:11 +0000 (19:40 -0800)
Benefit from the recent conversion of the driver to NAPI and enable GRO
support through the use of napi_gro_receive(). Pass the NAPI pointer
from the bus driver (mlxsw_pci) to the switch driver (mlxsw_spectrum)
through the skb control block where various packet metadata is already
encoded.

The main motivation is to improve forwarding performance through the use
of GRO fraglist [1]. In my testing, when the forwarding data path is
simple (routing between two ports) there is not much difference in
forwarding performance between GRO disabled and GRO enabled with
fraglist.

The improvement becomes more noticeable as the data path becomes more
complex since it is traversed less times with GRO enabled. For example,
with 10 ingress and 10 egress flower filters with different priorities
on the two ports between which routing is performed, there is an
improvement of about 140% in forwarded bandwidth.

[1] https://lore.kernel.org/netdev/20200125102645.4782-1-steffen.klassert@secunet.com/

Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Amit Cohen <amcohen@nvidia.com>
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://patch.msgid.link/21258fe55f608ccf1ee2783a5a4534220af28903.1734354812.git.petrm@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c

index 6d11225594dd13f73aa68719f7f827dfbf1caa57..24c3ff6fcf71be4ee0a33df54e52e82fd1536319 100644 (file)
@@ -73,6 +73,7 @@ struct mlxsw_tx_info {
 };
 
 struct mlxsw_rx_md_info {
+       struct napi_struct *napi;
        u32 cookie_index;
        u32 latency;
        u32 tx_congestion;
index d6f37456fb317d9cf7fba07bab261baaa09ba49c..0863dca2fc0be32b524b8157e7c61032a97bd897 100644 (file)
@@ -737,6 +737,7 @@ static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
 }
 
 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+                                    struct napi_struct *napi,
                                     struct mlxsw_pci_queue *q,
                                     u16 consumer_counter_limit,
                                     enum mlxsw_pci_cqe_v cqe_v, char *cqe)
@@ -807,6 +808,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
        }
 
        mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+       mlxsw_skb_cb(skb)->rx_md_info.napi = napi;
 
        mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
 
@@ -869,7 +871,7 @@ static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
                        continue;
                }
 
-               mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+               mlxsw_pci_cqe_rdq_handle(mlxsw_pci, napi, rdq,
                                         wqe_counter, q->u.cq.v, cqe);
 
                if (++work_done == budget)
index 3f5e5d99251b75312e2d74a4da16704fc34de366..aa71993daf282dcf17e62a243870e225c7036c97 100644 (file)
@@ -2449,7 +2449,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
        u64_stats_update_end(&pcpu_stats->syncp);
 
        skb->protocol = eth_type_trans(skb, skb->dev);
-       netif_receive_skb(skb);
+       napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
 }
 
 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
index 899c954e0e5f8f26a5b523df7d13713cf7650992..1f9c1c86839f190ba7cad04a3dcfaee238a91ce4 100644 (file)
@@ -173,7 +173,7 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
        if (err)
                return;
 
-       netif_receive_skb(skb);
+       napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
 }
 
 static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,