From 87c8f8496a05de71dc42f5f2ed2b1ea64ea8b77d Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Tue, 14 Jan 2025 14:28:49 +0000 Subject: [PATCH 01/16] bnxt_en: add support for tcp-data-split ethtool command NICs that uses bnxt_en driver supports tcp-data-split feature by the name of HDS(header-data-split). But there is no implementation for the HDS to enable by ethtool. Only getting the current HDS status is implemented and The HDS is just automatically enabled only when either LRO, HW-GRO, or JUMBO is enabled. The hds_threshold follows rx-copybreak value. and it was unchangeable. This implements `ethtool -G tcp-data-split ` command option. The value can be and . The value is and one of LRO/GRO/JUMBO is enabled, HDS is automatically enabled and all LRO/GRO/JUMBO are disabled, HDS is automatically disabled. HDS feature relies on the aggregation ring. So, if HDS is enabled, the bnxt_en driver initializes the aggregation ring. This is the reason why BNXT_FLAG_AGG_RINGS contains HDS condition. Acked-by: Jakub Kicinski Tested-by: Stanislav Fomichev Tested-by: Andy Gospodarek Signed-off-by: Taehee Yoo Reviewed-by: Michael Chan Link: https://patch.msgid.link/20250114142852.3364986-8-ap420073@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 5 +++-- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 20 +++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 4 ++++ 4 files changed, 28 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d19c4fb588e5..f029559a581e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4630,7 +4630,7 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->rx_agg_ring_size = 0; bp->rx_agg_nr_pages = 0; - if (bp->flags & BNXT_FLAG_TPA) + if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS) agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); bp->flags &= ~BNXT_FLAG_JUMBO; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7edb92ce5976..7dc06e07bae2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2244,8 +2244,6 @@ struct bnxt { #define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO) #define BNXT_FLAG_JUMBO 0x10 #define BNXT_FLAG_STRIP_VLAN 0x20 - #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \ - BNXT_FLAG_LRO) #define BNXT_FLAG_RFS 0x100 #define BNXT_FLAG_SHARED_RINGS 0x200 #define BNXT_FLAG_PORT_STATS 0x400 @@ -2266,6 +2264,9 @@ struct bnxt { #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 #define BNXT_FLAG_TX_COAL_CMPL 0x8000000 #define BNXT_FLAG_PORT_STATS_EXT 0x10000000 + #define BNXT_FLAG_HDS 0x20000000 + #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \ + BNXT_FLAG_LRO | BNXT_FLAG_HDS) #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index e9e63d95df17..413007190f50 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -840,16 +840,35 @@ static int bnxt_set_ringparam(struct net_device *dev, struct kernel_ethtool_ringparam *kernel_ering, struct netlink_ext_ack *extack) { + u8 tcp_data_split = kernel_ering->tcp_data_split; struct bnxt *bp = netdev_priv(dev); + u8 hds_config_mod; if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || (ering->tx_pending < BNXT_MIN_TX_DESC_CNT)) return -EINVAL; + hds_config_mod = tcp_data_split != dev->ethtool->hds_config; + if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod) + return -EINVAL; + + if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && + hds_config_mod && BNXT_RX_PAGE_MODE(bp)) { + NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached"); + return -EINVAL; + } + if (netif_running(dev)) bnxt_close_nic(bp, false, false); + if (hds_config_mod) { + if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED) + bp->flags |= BNXT_FLAG_HDS; + else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) + bp->flags &= ~BNXT_FLAG_HDS; + } + bp->rx_ring_size = ering->rx_pending; bp->tx_ring_size = ering->tx_pending; bnxt_set_ring_params(bp); @@ -5371,6 +5390,7 @@ const struct ethtool_ops bnxt_ethtool_ops = { ETHTOOL_COALESCE_STATS_BLOCK_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_CQE, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, .get_fec_stats = bnxt_get_fec_stats, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index f88b641533fc..1bfff7f29310 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -395,6 +395,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); return -EOPNOTSUPP; } + if (prog && bp->flags & BNXT_FLAG_HDS) { + netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n"); + return -EOPNOTSUPP; + } if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); return -EOPNOTSUPP; -- 2.51.0 From 6b43673a25c3666d42f5524e59aed8a3914924cc Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Tue, 14 Jan 2025 14:28:50 +0000 Subject: [PATCH 02/16] bnxt_en: add support for hds-thresh ethtool command The bnxt_en driver has configured the hds_threshold value automatically when TPA is enabled based on the rx-copybreak default value. Now the hds-thresh ethtool command is added, so it adds an implementation of hds-thresh option. Configuration of the hds-thresh is applied only when the tcp-data-split is enabled. The default value of hds-thresh is 256, which is the default value of rx-copybreak, which used to be the hds_thresh value. The maximum hds-thresh is 1023. # Example: # ethtool -G enp14s0f0np0 tcp-data-split on hds-thresh 256 # ethtool -g enp14s0f0np0 Ring parameters for enp14s0f0np0: Pre-set maximums: ... HDS thresh: 1023 Current hardware settings: ... TCP data split: on HDS thresh: 256 Tested-by: Stanislav Fomichev Tested-by: Andy Gospodarek Signed-off-by: Taehee Yoo Reviewed-by: Michael Chan Link: https://patch.msgid.link/20250114142852.3364986-9-ap420073@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 +++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 ++ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 6 +++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f029559a581e..caddb5cbc024 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4610,6 +4610,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp) static void bnxt_init_ring_params(struct bnxt *bp) { bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK; + bp->dev->ethtool->hds_thresh = BNXT_DEFAULT_RX_COPYBREAK; } /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must @@ -6569,6 +6570,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) { + u16 hds_thresh = (u16)bp->dev->ethtool->hds_thresh; struct hwrm_vnic_plcmodes_cfg_input *req; int rc; @@ -6585,7 +6587,7 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); req->enables |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); - req->hds_threshold = cpu_to_le16(bp->rx_copybreak); + req->hds_threshold = cpu_to_le16(hds_thresh); } req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); return hwrm_req_send(bp, req); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7dc06e07bae2..8f481dd9c224 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2779,6 +2779,8 @@ struct bnxt { #define SFF_MODULE_ID_QSFP28 0x11 #define BNXT_MAX_PHY_I2C_RESP_SIZE 64 +#define BNXT_HDS_THRESHOLD_MAX 1023 + static inline u32 bnxt_tx_avail(struct bnxt *bp, const struct bnxt_tx_ring_info *txr) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 413007190f50..540c140d52dc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -833,6 +833,9 @@ static void bnxt_get_ringparam(struct net_device *dev, ering->rx_pending = bp->rx_ring_size; ering->rx_jumbo_pending = bp->rx_agg_ring_size; ering->tx_pending = bp->tx_ring_size; + + kernel_ering->hds_thresh = dev->ethtool->hds_thresh; + kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX; } static int bnxt_set_ringparam(struct net_device *dev, @@ -5390,7 +5393,8 @@ const struct ethtool_ops bnxt_ethtool_ops = { ETHTOOL_COALESCE_STATS_BLOCK_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_CQE, - .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | + ETHTOOL_RING_USE_HDS_THRS, .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, .get_fec_stats = bnxt_get_fec_stats, -- 2.51.0 From f394d07b192b67a895dbed76253ce95dcbb5d17c Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Tue, 14 Jan 2025 14:28:51 +0000 Subject: [PATCH 03/16] netdevsim: add HDS feature HDS options(tcp-data-split, hds-thresh) have dependencies between other features like XDP. Basic dependencies are checked in the core API. netdevsim is very useful to check basic dependencies. The default tcp-data-split mode is UNKNOWN but netdevsim driver returns ENABLED when ethtool dumps tcp-data-split mode. The default value of HDS threshold is 0 and the maximum value is 1024. ethtool shows like this. ethtool -g eni1np1 Ring parameters for eni1np1: Pre-set maximums: ... HDS thresh: 1024 Current hardware settings: ... TCP data split: on HDS thresh: 0 ethtool -G eni1np1 tcp-data-split on hds-thresh 1024 ethtool -g eni1np1 Ring parameters for eni1np1: Pre-set maximums: ... HDS thresh: 1024 Current hardware settings: ... TCP data split: on HDS thresh: 1024 Signed-off-by: Taehee Yoo Link: https://patch.msgid.link/20250114142852.3364986-10-ap420073@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/netdevsim/ethtool.c | 12 +++++++++++- drivers/net/netdevsim/netdev.c | 9 +++++++++ drivers/net/netdevsim/netdevsim.h | 3 +++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index 5fe1eaef99b5..9e0df40c71e1 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -2,7 +2,6 @@ // Copyright (c) 2020 Facebook #include -#include #include #include "netdevsim.h" @@ -72,6 +71,12 @@ static void nsim_get_ringparam(struct net_device *dev, struct netdevsim *ns = netdev_priv(dev); memcpy(ring, &ns->ethtool.ring, sizeof(ns->ethtool.ring)); + kernel_ring->tcp_data_split = dev->ethtool->hds_config; + kernel_ring->hds_thresh = dev->ethtool->hds_thresh; + kernel_ring->hds_thresh_max = NSIM_HDS_THRESHOLD_MAX; + + if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) + kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; } static int nsim_set_ringparam(struct net_device *dev, @@ -161,6 +166,8 @@ static int nsim_get_ts_info(struct net_device *dev, static const struct ethtool_ops nsim_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | + ETHTOOL_RING_USE_HDS_THRS, .get_pause_stats = nsim_get_pause_stats, .get_pauseparam = nsim_get_pauseparam, .set_pauseparam = nsim_set_pauseparam, @@ -182,6 +189,9 @@ static void nsim_ethtool_ring_init(struct netdevsim *ns) ns->ethtool.ring.rx_jumbo_max_pending = 4096; ns->ethtool.ring.rx_mini_max_pending = 4096; ns->ethtool.ring.tx_max_pending = 4096; + + ns->netdev->ethtool->hds_config = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; + ns->netdev->ethtool->hds_thresh = 0; } void nsim_ethtool_init(struct netdevsim *ns) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index d013b6498539..f92b05ccdca9 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -54,6 +55,7 @@ static int nsim_forward_skb(struct net_device *dev, struct sk_buff *skb, static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + struct ethtool_netdev_state *ethtool; struct net_device *peer_dev; unsigned int len = skb->len; struct netdevsim *peer_ns; @@ -74,6 +76,13 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) rxq = rxq % peer_dev->num_rx_queues; rq = peer_ns->rq[rxq]; + ethtool = peer_dev->ethtool; + if (skb_is_nonlinear(skb) && + (ethtool->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED || + (ethtool->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED && + ethtool->hds_thresh > len))) + skb_linearize(skb); + skb_tx_timestamp(skb); if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP)) goto out_drop_cnt; diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index a70f62af4c88..dcf073bc4802 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -36,6 +37,8 @@ #define NSIM_IPSEC_VALID BIT(31) #define NSIM_UDP_TUNNEL_N_PORTS 4 +#define NSIM_HDS_THRESHOLD_MAX 1024 + struct nsim_sa { struct xfrm_state *xs; __be32 ipaddr[4]; -- 2.51.0 From cfd70e3eba2b68aa230d431e3c6ca0a1566e8d2e Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Tue, 14 Jan 2025 14:28:52 +0000 Subject: [PATCH 04/16] selftest: net-drv: hds: add test for HDS feature HDS/HDS-thresh features were updated/implemented. so add some tests for these features. HDS tests are the same with `ethtool -G eth0 tcp-data-split ` but `auto` depends on driver specification. So, it doesn't include `auto` case. HDS-thresh tests are same with `ethtool -G eth0 hds-thresh <0 - MAX>` It includes both 0 and MAX cases. It also includes exceed case, MAX + 1. Signed-off-by: Taehee Yoo Link: https://patch.msgid.link/20250114142852.3364986-11-ap420073@gmail.com Signed-off-by: Jakub Kicinski --- tools/testing/selftests/drivers/net/Makefile | 1 + tools/testing/selftests/drivers/net/hds.py | 120 +++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100755 tools/testing/selftests/drivers/net/hds.py diff --git a/tools/testing/selftests/drivers/net/Makefile b/tools/testing/selftests/drivers/net/Makefile index 469179c18935..137470bdee0c 100644 --- a/tools/testing/selftests/drivers/net/Makefile +++ b/tools/testing/selftests/drivers/net/Makefile @@ -12,6 +12,7 @@ TEST_PROGS := \ queues.py \ stats.py \ shaper.py \ + hds.py \ # end of TEST_PROGS include ../../lib.mk diff --git a/tools/testing/selftests/drivers/net/hds.py b/tools/testing/selftests/drivers/net/hds.py new file mode 100755 index 000000000000..394971b25c0b --- /dev/null +++ b/tools/testing/selftests/drivers/net/hds.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +import errno +from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_raises, KsftSkipEx +from lib.py import EthtoolFamily, NlError +from lib.py import NetDrvEnv + +def get_hds(cfg, netnl) -> None: + try: + rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}}) + except NlError as e: + raise KsftSkipEx('ring-get not supported by device') + if 'tcp-data-split' not in rings: + raise KsftSkipEx('tcp-data-split not supported by device') + +def get_hds_thresh(cfg, netnl) -> None: + try: + rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}}) + except NlError as e: + raise KsftSkipEx('ring-get not supported by device') + if 'hds-thresh' not in rings: + raise KsftSkipEx('hds-thresh not supported by device') + +def set_hds_enable(cfg, netnl) -> None: + try: + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'tcp-data-split': 'enabled'}) + except NlError as e: + if e.error == errno.EINVAL: + raise KsftSkipEx("disabling of HDS not supported by the device") + elif e.error == errno.EOPNOTSUPP: + raise KsftSkipEx("ring-set not supported by the device") + try: + rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}}) + except NlError as e: + raise KsftSkipEx('ring-get not supported by device') + if 'tcp-data-split' not in rings: + raise KsftSkipEx('tcp-data-split not supported by device') + + ksft_eq('enabled', rings['tcp-data-split']) + +def set_hds_disable(cfg, netnl) -> None: + try: + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'tcp-data-split': 'disabled'}) + except NlError as e: + if e.error == errno.EINVAL: + raise KsftSkipEx("disabling of HDS not supported by the device") + elif e.error == errno.EOPNOTSUPP: + raise KsftSkipEx("ring-set not supported by the device") + try: + rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}}) + except NlError as e: + raise KsftSkipEx('ring-get not supported by device') + if 'tcp-data-split' not in rings: + raise KsftSkipEx('tcp-data-split not supported by device') + + ksft_eq('disabled', rings['tcp-data-split']) + +def set_hds_thresh_zero(cfg, netnl) -> None: + try: + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'hds-thresh': 0}) + except NlError as e: + if e.error == errno.EINVAL: + raise KsftSkipEx("hds-thresh-set not supported by the device") + elif e.error == errno.EOPNOTSUPP: + raise KsftSkipEx("ring-set not supported by the device") + try: + rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}}) + except NlError as e: + raise KsftSkipEx('ring-get not supported by device') + if 'hds-thresh' not in rings: + raise KsftSkipEx('hds-thresh not supported by device') + + ksft_eq(0, rings['hds-thresh']) + +def set_hds_thresh_max(cfg, netnl) -> None: + try: + rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}}) + except NlError as e: + raise KsftSkipEx('ring-get not supported by device') + if 'hds-thresh' not in rings: + raise KsftSkipEx('hds-thresh not supported by device') + try: + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'hds-thresh': rings['hds-thresh-max']}) + except NlError as e: + if e.error == errno.EINVAL: + raise KsftSkipEx("hds-thresh-set not supported by the device") + elif e.error == errno.EOPNOTSUPP: + raise KsftSkipEx("ring-set not supported by the device") + rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}}) + ksft_eq(rings['hds-thresh'], rings['hds-thresh-max']) + +def set_hds_thresh_gt(cfg, netnl) -> None: + try: + rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}}) + except NlError as e: + raise KsftSkipEx('ring-get not supported by device') + if 'hds-thresh' not in rings: + raise KsftSkipEx('hds-thresh not supported by device') + if 'hds-thresh-max' not in rings: + raise KsftSkipEx('hds-thresh-max not defined by device') + hds_gt = rings['hds-thresh-max'] + 1 + with ksft_raises(NlError) as e: + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'hds-thresh': hds_gt}) + ksft_eq(e.exception.nl_msg.error, -errno.EINVAL) + +def main() -> None: + with NetDrvEnv(__file__, queue_count=3) as cfg: + ksft_run([get_hds, + get_hds_thresh, + set_hds_disable, + set_hds_enable, + set_hds_thresh_zero, + set_hds_thresh_max, + set_hds_thresh_gt], + args=(cfg, EthtoolFamily())) + ksft_exit() + +if __name__ == "__main__": + main() -- 2.51.0 From 3440fa34ad99d471f1085bc2f4dedeaebc310261 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 14 Jan 2025 22:10:49 +0000 Subject: [PATCH 05/16] inet: ipmr: fix data-races Following fields of 'struct mr_mfc' can be updated concurrently (no lock protection) from ip_mr_forward() and ip6_mr_forward() - bytes - pkt - wrong_if - lastuse They also can be read from other functions. Convert bytes, pkt and wrong_if to atomic_long_t, and use READ_ONCE()/WRITE_ONCE() for lastuse. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Eric Dumazet Reviewed-by: David Ahern Link: https://patch.msgid.link/20250114221049.1190631-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlxsw/spectrum_mr.c | 8 +++--- include/linux/mroute_base.h | 6 ++-- net/ipv4/ipmr.c | 28 +++++++++---------- net/ipv4/ipmr_base.c | 6 ++-- net/ipv6/ip6mr.c | 28 +++++++++---------- 5 files changed, 38 insertions(+), 38 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 69cd689dbc83..5afe6b155ef0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -1003,10 +1003,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp, mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets, &bytes); - if (mr_route->mfc->mfc_un.res.pkt != packets) - mr_route->mfc->mfc_un.res.lastuse = jiffies; - mr_route->mfc->mfc_un.res.pkt = packets; - mr_route->mfc->mfc_un.res.bytes = bytes; + if (atomic_long_read(&mr_route->mfc->mfc_un.res.pkt) != packets) + WRITE_ONCE(mr_route->mfc->mfc_un.res.lastuse, jiffies); + atomic_long_set(&mr_route->mfc->mfc_un.res.pkt, packets); + atomic_long_set(&mr_route->mfc->mfc_un.res.bytes, bytes); } static void mlxsw_sp_mr_stats_update(struct work_struct *work) diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index 9dd4bf157255..58a2401e4b55 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -146,9 +146,9 @@ struct mr_mfc { unsigned long last_assert; int minvif; int maxvif; - unsigned long bytes; - unsigned long pkt; - unsigned long wrong_if; + atomic_long_t bytes; + atomic_long_t pkt; + atomic_long_t wrong_if; unsigned long lastuse; unsigned char ttls[MAXVIFS]; refcount_t refcount; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 99d8faa508e5..21ae7594a852 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -831,7 +831,7 @@ static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache, cache->mfc_un.res.maxvif = vifi + 1; } } - cache->mfc_un.res.lastuse = jiffies; + WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies); } static int vif_add(struct net *net, struct mr_table *mrt, @@ -1681,9 +1681,9 @@ int ipmr_ioctl(struct sock *sk, int cmd, void *arg) rcu_read_lock(); c = ipmr_cache_find(mrt, sr->src.s_addr, sr->grp.s_addr); if (c) { - sr->pktcnt = c->_c.mfc_un.res.pkt; - sr->bytecnt = c->_c.mfc_un.res.bytes; - sr->wrong_if = c->_c.mfc_un.res.wrong_if; + sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt); + sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes); + sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if); rcu_read_unlock(); return 0; } @@ -1753,9 +1753,9 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) rcu_read_lock(); c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); if (c) { - sr.pktcnt = c->_c.mfc_un.res.pkt; - sr.bytecnt = c->_c.mfc_un.res.bytes; - sr.wrong_if = c->_c.mfc_un.res.wrong_if; + sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt); + sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes); + sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if); rcu_read_unlock(); if (copy_to_user(arg, &sr, sizeof(sr))) @@ -1988,9 +1988,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt, int vif, ct; vif = c->_c.mfc_parent; - c->_c.mfc_un.res.pkt++; - c->_c.mfc_un.res.bytes += skb->len; - c->_c.mfc_un.res.lastuse = jiffies; + atomic_long_inc(&c->_c.mfc_un.res.pkt); + atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes); + WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies); if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) { struct mfc_cache *cache_proxy; @@ -2021,7 +2021,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt, goto dont_forward; } - c->_c.mfc_un.res.wrong_if++; + atomic_long_inc(&c->_c.mfc_un.res.wrong_if); if (true_vifi >= 0 && mrt->mroute_do_assert && /* pimsm uses asserts, when switching from RPT to SPT, @@ -3029,9 +3029,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) if (it->cache != &mrt->mfc_unres_queue) { seq_printf(seq, " %8lu %8lu %8lu", - mfc->_c.mfc_un.res.pkt, - mfc->_c.mfc_un.res.bytes, - mfc->_c.mfc_un.res.wrong_if); + atomic_long_read(&mfc->_c.mfc_un.res.pkt), + atomic_long_read(&mfc->_c.mfc_un.res.bytes), + atomic_long_read(&mfc->_c.mfc_un.res.wrong_if)); for (n = mfc->_c.mfc_un.res.minvif; n < mfc->_c.mfc_un.res.maxvif; n++) { if (VIF_EXISTS(mrt, n) && diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c index f0af12a2f70b..03b6eee407a2 100644 --- a/net/ipv4/ipmr_base.c +++ b/net/ipv4/ipmr_base.c @@ -263,9 +263,9 @@ int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, lastuse = READ_ONCE(c->mfc_un.res.lastuse); lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0; - mfcs.mfcs_packets = c->mfc_un.res.pkt; - mfcs.mfcs_bytes = c->mfc_un.res.bytes; - mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if; + mfcs.mfcs_packets = atomic_long_read(&c->mfc_un.res.pkt); + mfcs.mfcs_bytes = atomic_long_read(&c->mfc_un.res.bytes); + mfcs.mfcs_wrong_if = atomic_long_read(&c->mfc_un.res.wrong_if); if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) || nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse), RTA_PAD)) diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 578ff1336afe..535e9f72514c 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -520,9 +520,9 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) if (it->cache != &mrt->mfc_unres_queue) { seq_printf(seq, " %8lu %8lu %8lu", - mfc->_c.mfc_un.res.pkt, - mfc->_c.mfc_un.res.bytes, - mfc->_c.mfc_un.res.wrong_if); + atomic_long_read(&mfc->_c.mfc_un.res.pkt), + atomic_long_read(&mfc->_c.mfc_un.res.bytes), + atomic_long_read(&mfc->_c.mfc_un.res.wrong_if)); for (n = mfc->_c.mfc_un.res.minvif; n < mfc->_c.mfc_un.res.maxvif; n++) { if (VIF_EXISTS(mrt, n) && @@ -884,7 +884,7 @@ static void ip6mr_update_thresholds(struct mr_table *mrt, cache->mfc_un.res.maxvif = vifi + 1; } } - cache->mfc_un.res.lastuse = jiffies; + WRITE_ONCE(cache->mfc_un.res.lastuse, jiffies); } static int mif6_add(struct net *net, struct mr_table *mrt, @@ -1945,9 +1945,9 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void *arg) c = ip6mr_cache_find(mrt, &sr->src.sin6_addr, &sr->grp.sin6_addr); if (c) { - sr->pktcnt = c->_c.mfc_un.res.pkt; - sr->bytecnt = c->_c.mfc_un.res.bytes; - sr->wrong_if = c->_c.mfc_un.res.wrong_if; + sr->pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt); + sr->bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes); + sr->wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if); rcu_read_unlock(); return 0; } @@ -2017,9 +2017,9 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) rcu_read_lock(); c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); if (c) { - sr.pktcnt = c->_c.mfc_un.res.pkt; - sr.bytecnt = c->_c.mfc_un.res.bytes; - sr.wrong_if = c->_c.mfc_un.res.wrong_if; + sr.pktcnt = atomic_long_read(&c->_c.mfc_un.res.pkt); + sr.bytecnt = atomic_long_read(&c->_c.mfc_un.res.bytes); + sr.wrong_if = atomic_long_read(&c->_c.mfc_un.res.wrong_if); rcu_read_unlock(); if (copy_to_user(arg, &sr, sizeof(sr))) @@ -2142,9 +2142,9 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt, int true_vifi = ip6mr_find_vif(mrt, dev); vif = c->_c.mfc_parent; - c->_c.mfc_un.res.pkt++; - c->_c.mfc_un.res.bytes += skb->len; - c->_c.mfc_un.res.lastuse = jiffies; + atomic_long_inc(&c->_c.mfc_un.res.pkt); + atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes); + WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies); if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) { struct mfc6_cache *cache_proxy; @@ -2162,7 +2162,7 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt, * Wrong interface: drop packet and (maybe) send PIM assert. */ if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) { - c->_c.mfc_un.res.wrong_if++; + atomic_long_inc(&c->_c.mfc_un.res.wrong_if); if (true_vifi >= 0 && mrt->mroute_do_assert && /* pimsm uses asserts, when switching from RPT to SPT, -- 2.51.0 From 0b6f6593aa8c3a05f155c12fd0e7ad33a5149c31 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Thu, 9 Jan 2025 00:33:50 +0100 Subject: [PATCH 06/16] net: wwan: iosm: Fix hibernation by re-binding the driver around it Currently, the driver is seriously broken with respect to the hibernation (S4): after image restore the device is back into IPC_MEM_EXEC_STAGE_BOOT (which AFAIK means bootloader stage) and needs full re-launch of the rest of its firmware, but the driver restore handler treats the device as merely sleeping and just sends it a wake-up command. This wake-up command times out but device nodes (/dev/wwan*) remain accessible. However attempting to use them causes the bootloader to crash and enter IPC_MEM_EXEC_STAGE_CD_READY stage (which apparently means "a crash dump is ready"). It seems that the device cannot be re-initialized from this crashed stage without toggling some reset pin (on my test platform that's apparently what the device _RST ACPI method does). While it would theoretically be possible to rewrite the driver to tear down the whole MUX / IPC layers on hibernation (so the bootloader does not crash from improper access) and then re-launch the device on restore this would require significant refactoring of the driver (believe me, I've tried), since there are quite a few assumptions hard-coded in the driver about the device never being partially de-initialized (like channels other than devlink cannot be closed, for example). Probably this would also need some programming guide for this hardware. Considering that the driver seems orphaned [1] and other people are hitting this issue too [2] fix it by simply unbinding the PCI driver before hibernation and re-binding it after restore, much like USB_QUIRK_RESET_RESUME does for USB devices that exhibit a similar problem. Tested on XMM7360 in HP EliteBook 855 G7 both with s2idle (which uses the existing suspend / resume handlers) and S4 (which uses the new code). [1]: https://lore.kernel.org/all/c248f0b4-2114-4c61-905f-466a786bdebb@leemhuis.info/ [2]: https://github.com/xmm7360/xmm7360-pci/issues/211#issuecomment-1804139413 Reviewed-by: Sergey Ryazanov Signed-off-by: Maciej S. Szmigiero Link: https://patch.msgid.link/e60287ebdb0ab54c4075071b72568a40a75d0205.1736372610.git.mail@maciej.szmigiero.name Signed-off-by: Jakub Kicinski --- drivers/net/wwan/iosm/iosm_ipc_pcie.c | 56 ++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c index 04517bd3325a..a066977af0be 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c +++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "iosm_ipc_imem.h" @@ -18,6 +19,7 @@ MODULE_LICENSE("GPL v2"); /* WWAN GUID */ static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92, 0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d); +static bool pci_registered; static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie) { @@ -448,7 +450,6 @@ static struct pci_driver iosm_ipc_driver = { }, .id_table = iosm_ipc_ids, }; -module_pci_driver(iosm_ipc_driver); int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data, size_t size, dma_addr_t *mapping, int direction) @@ -530,3 +531,56 @@ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb) IPC_CB(skb)->mapping = 0; dev_kfree_skb(skb); } + +static int pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused) +{ + if (mode == PM_HIBERNATION_PREPARE || mode == PM_RESTORE_PREPARE) { + if (pci_registered) { + pci_unregister_driver(&iosm_ipc_driver); + pci_registered = false; + } + } else if (mode == PM_POST_HIBERNATION || mode == PM_POST_RESTORE) { + if (!pci_registered) { + int ret; + + ret = pci_register_driver(&iosm_ipc_driver); + if (ret) { + pr_err(KBUILD_MODNAME ": unable to re-register PCI driver: %d\n", + ret); + } else { + pci_registered = true; + } + } + } + + return 0; +} + +static struct notifier_block pm_notifier = { + .notifier_call = pm_notify, +}; + +static int __init iosm_ipc_driver_init(void) +{ + int ret; + + ret = pci_register_driver(&iosm_ipc_driver); + if (ret) + return ret; + + pci_registered = true; + + register_pm_notifier(&pm_notifier); + + return 0; +} +module_init(iosm_ipc_driver_init); + +static void __exit iosm_ipc_driver_exit(void) +{ + unregister_pm_notifier(&pm_notifier); + + if (pci_registered) + pci_unregister_driver(&iosm_ipc_driver); +} +module_exit(iosm_ipc_driver_exit); -- 2.51.0 From ebda2f0bbde540ff7da168d2837f8cfb14581e2e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jan 2025 19:53:09 -0800 Subject: [PATCH 07/16] net: add netdev_lock() / netdev_unlock() helpers Add helpers for locking the netdev instance, use it in drivers and the shaper code. This will make grepping for the lock usage much easier, as we extend the lock to cover more fields. Reviewed-by: Joe Damato Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Reviewed-by: Przemek Kitszel Link: https://patch.msgid.link/20250115035319.559603-2-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/iavf/iavf_main.c | 74 ++++++++++----------- drivers/net/netdevsim/ethtool.c | 4 +- include/linux/netdevice.h | 23 ++++++- net/shaper/shaper.c | 6 +- 4 files changed, 63 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 7740f446c73f..ab908d620285 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1977,7 +1977,7 @@ static void iavf_finish_config(struct work_struct *work) * The dev->lock is needed to update the queue number */ rtnl_lock(); - mutex_lock(&adapter->netdev->lock); + netdev_lock(adapter->netdev); mutex_lock(&adapter->crit_lock); if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && @@ -1997,7 +1997,7 @@ static void iavf_finish_config(struct work_struct *work) netif_set_real_num_tx_queues(adapter->netdev, pairs); if (adapter->netdev->reg_state != NETREG_REGISTERED) { - mutex_unlock(&adapter->netdev->lock); + netdev_unlock(adapter->netdev); netdev_released = true; err = register_netdevice(adapter->netdev); if (err) { @@ -2027,7 +2027,7 @@ static void iavf_finish_config(struct work_struct *work) out: mutex_unlock(&adapter->crit_lock); if (!netdev_released) - mutex_unlock(&adapter->netdev->lock); + netdev_unlock(adapter->netdev); rtnl_unlock(); } @@ -2724,10 +2724,10 @@ static void iavf_watchdog_task(struct work_struct *work) struct iavf_hw *hw = &adapter->hw; u32 reg_val; - mutex_lock(&netdev->lock); + netdev_lock(netdev); if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state == __IAVF_REMOVE) { - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; } @@ -2741,35 +2741,35 @@ static void iavf_watchdog_task(struct work_struct *work) case __IAVF_STARTUP: iavf_startup(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_VERSION_CHECK: iavf_init_version_check(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_GET_RESOURCES: iavf_init_get_resources(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_EXTENDED_CAPS: iavf_init_process_extended_caps(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_CONFIG_ADAPTER: iavf_init_config_adapter(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; @@ -2781,7 +2781,7 @@ static void iavf_watchdog_task(struct work_struct *work) * as it can loop forever */ mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; } if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { @@ -2790,7 +2790,7 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, (5 * HZ)); return; @@ -2798,7 +2798,7 @@ static void iavf_watchdog_task(struct work_struct *work) /* Try again from failed step*/ iavf_change_state(adapter, adapter->last_state); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); return; case __IAVF_COMM_FAILED: @@ -2811,7 +2811,7 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_change_state(adapter, __IAVF_INIT_FAILED); adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; } reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & @@ -2831,14 +2831,14 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(10)); return; case __IAVF_RESETTING: mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; @@ -2869,7 +2869,7 @@ static void iavf_watchdog_task(struct work_struct *work) case __IAVF_REMOVE: default: mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; } @@ -2881,14 +2881,14 @@ static void iavf_watchdog_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; } mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); restart_watchdog: if (adapter->state >= __IAVF_DOWN) queue_work(adapter->wq, &adapter->adminq_task); @@ -3015,12 +3015,12 @@ static void iavf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - mutex_lock(&netdev->lock); + netdev_lock(netdev); if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state != __IAVF_REMOVE) queue_work(adapter->wq, &adapter->reset_task); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; } @@ -3068,7 +3068,7 @@ static void iavf_reset_task(struct work_struct *work) reg_val); iavf_disable_vf(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -3209,7 +3209,7 @@ continue_reset: wake_up(&adapter->reset_waitqueue); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return; reset_err: @@ -3220,7 +3220,7 @@ reset_err: iavf_disable_vf(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); } @@ -3692,10 +3692,10 @@ exit: if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return 0; - mutex_lock(&netdev->lock); + netdev_lock(netdev); netif_set_real_num_rx_queues(netdev, total_qps); netif_set_real_num_tx_queues(netdev, total_qps); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return ret; } @@ -4365,7 +4365,7 @@ static int iavf_open(struct net_device *netdev) return -EIO; } - mutex_lock(&netdev->lock); + netdev_lock(netdev); while (!mutex_trylock(&adapter->crit_lock)) { /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock * is already taken and iavf_open is called from an upper @@ -4373,7 +4373,7 @@ static int iavf_open(struct net_device *netdev) * We have to leave here to avoid dead lock. */ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) { - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return -EBUSY; } @@ -4424,7 +4424,7 @@ static int iavf_open(struct net_device *netdev) iavf_irq_enable(adapter, true); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return 0; @@ -4437,7 +4437,7 @@ err_setup_tx: iavf_free_all_tx_resources(adapter); err_unlock: mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return err; } @@ -4459,12 +4459,12 @@ static int iavf_close(struct net_device *netdev) u64 aq_to_restore; int status; - mutex_lock(&netdev->lock); + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); if (adapter->state <= __IAVF_DOWN_PENDING) { mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return 0; } @@ -4498,7 +4498,7 @@ static int iavf_close(struct net_device *netdev) iavf_free_traffic_irqs(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in @@ -5375,7 +5375,7 @@ static int iavf_suspend(struct device *dev_d) netif_device_detach(netdev); - mutex_lock(&netdev->lock); + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); if (netif_running(netdev)) { @@ -5387,7 +5387,7 @@ static int iavf_suspend(struct device *dev_d) iavf_reset_interrupt_capability(adapter); mutex_unlock(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); return 0; } @@ -5486,7 +5486,7 @@ static void iavf_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); - mutex_lock(&netdev->lock); + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); dev_info(&adapter->pdev->dev, "Removing device\n"); iavf_change_state(adapter, __IAVF_REMOVE); @@ -5523,7 +5523,7 @@ static void iavf_remove(struct pci_dev *pdev) mutex_destroy(&hw->aq.asq_mutex); mutex_unlock(&adapter->crit_lock); mutex_destroy(&adapter->crit_lock); - mutex_unlock(&netdev->lock); + netdev_unlock(netdev); iounmap(hw->hw_addr); pci_release_regions(pdev); diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index 9e0df40c71e1..12163635b759 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -108,10 +108,10 @@ nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch) struct netdevsim *ns = netdev_priv(dev); int err; - mutex_lock(&dev->lock); + netdev_lock(dev); err = netif_set_real_num_queues(dev, ch->combined_count, ch->combined_count); - mutex_unlock(&dev->lock); + netdev_unlock(dev); if (err) return err; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3e6336775baf..6d440db35d5f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2444,8 +2444,12 @@ struct net_device { u32 napi_defer_hard_irqs; /** - * @lock: protects @net_shaper_hierarchy, feel free to use for other - * netdev-scope protection. Ordering: take after rtnl_lock. + * @lock: netdev-scope lock, protects a small selection of fields. + * Should always be taken using netdev_lock() / netdev_unlock() helpers. + * Drivers are free to use it for other protection. + * + * Protects: @net_shaper_hierarchy. + * Ordering: take after rtnl_lock. */ struct mutex lock; @@ -2671,6 +2675,21 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, enum netdev_queue_type type, struct napi_struct *napi); +static inline void netdev_lock(struct net_device *dev) +{ + mutex_lock(&dev->lock); +} + +static inline void netdev_unlock(struct net_device *dev) +{ + mutex_unlock(&dev->lock); +} + +static inline void netdev_assert_locked(struct net_device *dev) +{ + lockdep_assert_held(&dev->lock); +} + static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) { napi->irq = irq; diff --git a/net/shaper/shaper.c b/net/shaper/shaper.c index 15463062fe7b..7101a48bce54 100644 --- a/net/shaper/shaper.c +++ b/net/shaper/shaper.c @@ -40,7 +40,7 @@ static void net_shaper_lock(struct net_shaper_binding *binding) { switch (binding->type) { case NET_SHAPER_BINDING_TYPE_NETDEV: - mutex_lock(&binding->netdev->lock); + netdev_lock(binding->netdev); break; } } @@ -49,7 +49,7 @@ static void net_shaper_unlock(struct net_shaper_binding *binding) { switch (binding->type) { case NET_SHAPER_BINDING_TYPE_NETDEV: - mutex_unlock(&binding->netdev->lock); + netdev_unlock(binding->netdev); break; } } @@ -1398,7 +1398,7 @@ void net_shaper_set_real_num_tx_queues(struct net_device *dev, /* Only drivers implementing shapers support ensure * the lock is acquired in advance. */ - lockdep_assert_held(&dev->lock); + netdev_assert_locked(dev); /* Take action only when decreasing the tx queue number. */ for (i = txq; i < dev->real_num_tx_queues; ++i) { -- 2.51.0 From 5fda3f35349b6b7f22f5f5095a3821261d515075 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jan 2025 19:53:10 -0800 Subject: [PATCH 08/16] net: make netdev_lock() protect netdev->reg_state Protect writes to netdev->reg_state with netdev_lock(). From now on holding netdev_lock() is sufficient to prevent the net_device from getting unregistered, so code which wants to hold just a single netdev around no longer needs to hold rtnl_lock. We do not protect the NETREG_UNREGISTERED -> NETREG_RELEASED transition. We'd need to move mutex_destroy(netdev->lock) to .release, but the real reason is that trying to stop the unregistration process mid-way would be unsafe / crazy. Taking references on such devices is not safe, either. So the intended semantics are to lock REGISTERED devices. Reviewed-by: Joe Damato Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250115035319.559603-3-kuba@kernel.org Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 2 +- net/core/dev.c | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6d440db35d5f..007bcfa383c9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2448,7 +2448,7 @@ struct net_device { * Should always be taken using netdev_lock() / netdev_unlock() helpers. * Drivers are free to use it for other protection. * - * Protects: @net_shaper_hierarchy. + * Protects: @reg_state, @net_shaper_hierarchy. * Ordering: take after rtnl_lock. */ struct mutex lock; diff --git a/net/core/dev.c b/net/core/dev.c index 47e6b0f73cfc..bbe6fb9e32cd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10695,7 +10695,9 @@ int register_netdevice(struct net_device *dev) ret = netdev_register_kobject(dev); + netdev_lock(dev); WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED); + netdev_unlock(dev); if (ret) goto err_uninit_notify; @@ -10969,7 +10971,9 @@ void netdev_run_todo(void) continue; } + netdev_lock(dev); WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED); + netdev_unlock(dev); linkwatch_sync_dev(dev); } @@ -11575,7 +11579,9 @@ void unregister_netdevice_many_notify(struct list_head *head, list_for_each_entry(dev, head, unreg_list) { /* And unlink it from device chain. */ unlist_netdevice(dev); + netdev_lock(dev); WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING); + netdev_unlock(dev); } flush_all_backlogs(); -- 2.51.0 From 2628f4958cd4a8be2f14b611c67ef9766c5ee564 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jan 2025 19:53:11 -0800 Subject: [PATCH 09/16] net: add helpers for lookup and walking netdevs under netdev_lock() Add helpers for accessing netdevs under netdev_lock(). There's some careful handling needed to find the device and lock it safely, without it getting unregistered, and without taking rtnl_lock (the latter being the whole point of the new locking, after all). Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250115035319.559603-4-kuba@kernel.org Signed-off-by: Jakub Kicinski --- net/core/dev.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++++ net/core/dev.h | 16 +++++++ 2 files changed, 126 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index bbe6fb9e32cd..968603cfed09 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -784,6 +784,49 @@ struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id) return napi; } +/** + * netdev_napi_by_id_lock() - find a device by NAPI ID and lock it + * @net: the applicable net namespace + * @napi_id: ID of a NAPI of a target device + * + * Find a NAPI instance with @napi_id. Lock its device. + * The device must be in %NETREG_REGISTERED state for lookup to succeed. + * netdev_unlock() must be called to release it. + * + * Return: pointer to NAPI, its device with lock held, NULL if not found. + */ +struct napi_struct * +netdev_napi_by_id_lock(struct net *net, unsigned int napi_id) +{ + struct napi_struct *napi; + struct net_device *dev; + + rcu_read_lock(); + napi = netdev_napi_by_id(net, napi_id); + if (!napi || READ_ONCE(napi->dev->reg_state) != NETREG_REGISTERED) { + rcu_read_unlock(); + return NULL; + } + + dev = napi->dev; + dev_hold(dev); + rcu_read_unlock(); + + dev = __netdev_put_lock(dev); + if (!dev) + return NULL; + + rcu_read_lock(); + napi = netdev_napi_by_id(net, napi_id); + if (napi && napi->dev != dev) + napi = NULL; + rcu_read_unlock(); + + if (!napi) + netdev_unlock(dev); + return napi; +} + /** * __dev_get_by_name - find a device by its name * @net: the applicable net namespace @@ -972,6 +1015,73 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id) return napi ? napi->dev : NULL; } +/* Release the held reference on the net_device, and if the net_device + * is still registered try to lock the instance lock. If device is being + * unregistered NULL will be returned (but the reference has been released, + * either way!) + * + * This helper is intended for locking net_device after it has been looked up + * using a lockless lookup helper. Lock prevents the instance from going away. + */ +struct net_device *__netdev_put_lock(struct net_device *dev) +{ + netdev_lock(dev); + if (dev->reg_state > NETREG_REGISTERED) { + netdev_unlock(dev); + dev_put(dev); + return NULL; + } + dev_put(dev); + return dev; +} + +/** + * netdev_get_by_index_lock() - find a device by its ifindex + * @net: the applicable net namespace + * @ifindex: index of device + * + * Search for an interface by index. If a valid device + * with @ifindex is found it will be returned with netdev->lock held. + * netdev_unlock() must be called to release it. + * + * Return: pointer to a device with lock held, NULL if not found. + */ +struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex) +{ + struct net_device *dev; + + dev = dev_get_by_index(net, ifindex); + if (!dev) + return NULL; + + return __netdev_put_lock(dev); +} + +struct net_device * +netdev_xa_find_lock(struct net *net, struct net_device *dev, + unsigned long *index) +{ + if (dev) + netdev_unlock(dev); + + do { + rcu_read_lock(); + dev = xa_find(&net->dev_by_index, index, ULONG_MAX, XA_PRESENT); + if (!dev) { + rcu_read_unlock(); + return NULL; + } + dev_hold(dev); + rcu_read_unlock(); + + dev = __netdev_put_lock(dev); + if (dev) + return dev; + + (*index)++; + } while (true); +} + static DEFINE_SEQLOCK(netdev_rename_lock); void netdev_copy_name(struct net_device *dev, char *name) diff --git a/net/core/dev.h b/net/core/dev.h index d8966847794c..25ae732c0775 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -2,6 +2,7 @@ #ifndef _NET_CORE_DEV_H #define _NET_CORE_DEV_H +#include #include #include #include @@ -23,8 +24,23 @@ struct sd_flow_limit { extern int netdev_flow_limit_table_len; struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id); +struct napi_struct * +netdev_napi_by_id_lock(struct net *net, unsigned int napi_id); struct net_device *dev_get_by_napi_id(unsigned int napi_id); +struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex); +struct net_device *__netdev_put_lock(struct net_device *dev); +struct net_device * +netdev_xa_find_lock(struct net *net, struct net_device *dev, + unsigned long *index); + +DEFINE_FREE(netdev_unlock, struct net_device *, if (_T) netdev_unlock(_T)); + +#define for_each_netdev_lock_scoped(net, var_name, ifindex) \ + for (struct net_device *var_name __free(netdev_unlock) = NULL; \ + (var_name = netdev_xa_find_lock(net, var_name, &ifindex)); \ + ifindex++) + #ifdef CONFIG_PROC_FS int __init dev_proc_init(void); #else -- 2.51.0 From 5112457f3d8e41f987908266068af88ef9f3ab78 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jan 2025 19:53:12 -0800 Subject: [PATCH 10/16] net: add netdev->up protected by netdev_lock() Some uAPI (netdev netlink) hide net_device's sub-objects while the interface is down to ensure uniform behavior across drivers. To remove the rtnl_lock dependency from those uAPIs we need a way to safely tell if the device is down or up. Add an indication of whether device is open or closed, protected by netdev->lock. The semantics are the same as IFF_UP, but taking netdev_lock around every write to ->flags would be a lot of code churn. We don't want to blanket the entire open / close path by netdev_lock, because it will prevent us from applying it to specific structures - core helpers won't be able to take that lock from any function called by the drivers on open/close paths. So the state of the flag is "pessimistic", as in it may report false negatives, but never false positives. Reviewed-by: Joe Damato Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250115035319.559603-5-kuba@kernel.org Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 14 +++++++++++++- net/core/dev.c | 4 ++-- net/core/dev.h | 12 ++++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 007bcfa383c9..cac81b0a166f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2443,12 +2443,24 @@ struct net_device { unsigned long gro_flush_timeout; u32 napi_defer_hard_irqs; + /** + * @up: copy of @state's IFF_UP, but safe to read with just @lock. + * May report false negatives while the device is being opened + * or closed (@lock does not protect .ndo_open, or .ndo_close). + */ + bool up; + /** * @lock: netdev-scope lock, protects a small selection of fields. * Should always be taken using netdev_lock() / netdev_unlock() helpers. * Drivers are free to use it for other protection. * - * Protects: @reg_state, @net_shaper_hierarchy. + * Protects: + * @net_shaper_hierarchy, @reg_state + * + * Partially protects (writers must hold both @lock and rtnl_lock): + * @up + * * Ordering: take after rtnl_lock. */ struct mutex lock; diff --git a/net/core/dev.c b/net/core/dev.c index 968603cfed09..65bf95593da7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1619,7 +1619,7 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) if (ret) clear_bit(__LINK_STATE_START, &dev->state); else { - dev->flags |= IFF_UP; + netif_set_up(dev, true); dev_set_rx_mode(dev); dev_activate(dev); add_device_randomness(dev->dev_addr, dev->addr_len); @@ -1698,7 +1698,7 @@ static void __dev_close_many(struct list_head *head) if (ops->ndo_stop) ops->ndo_stop(dev); - dev->flags &= ~IFF_UP; + netif_set_up(dev, false); netpoll_poll_enable(dev); } } diff --git a/net/core/dev.h b/net/core/dev.h index 25ae732c0775..ef37e2dd44f4 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -128,6 +128,18 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, void unregister_netdevice_many_notify(struct list_head *head, u32 portid, const struct nlmsghdr *nlh); +static inline void netif_set_up(struct net_device *dev, bool value) +{ + if (value) + dev->flags |= IFF_UP; + else + dev->flags &= ~IFF_UP; + + netdev_lock(dev); + dev->up = value; + netdev_unlock(dev); +} + static inline void netif_set_gso_max_size(struct net_device *dev, unsigned int size) { -- 2.51.0 From 1b23cdbd2bbc4b40e21c12ae86c2781e347ff0f8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jan 2025 19:53:13 -0800 Subject: [PATCH 11/16] net: protect netdev->napi_list with netdev_lock() Hold netdev->lock when NAPIs are getting added or removed. This will allow safe access to NAPI instances of a net_device without rtnl_lock. Create a family of helpers which assume the lock is already taken. Switch iavf to them, as it makes extensive use of netdev->lock, already. Reviewed-by: Joe Damato Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250115035319.559603-6-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/iavf/iavf_main.c | 6 +-- include/linux/netdevice.h | 54 ++++++++++++++++++--- net/core/dev.c | 15 ++++-- 3 files changed, 60 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index ab908d620285..2db97c5d9f9e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1800,8 +1800,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter) q_vector->v_idx = q_idx; q_vector->reg_idx = q_idx; cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); - netif_napi_add(adapter->netdev, &q_vector->napi, - iavf_napi_poll); + netif_napi_add_locked(adapter->netdev, &q_vector->napi, + iavf_napi_poll); } return 0; @@ -1827,7 +1827,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter) for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx]; - netif_napi_del(&q_vector->napi); + netif_napi_del_locked(&q_vector->napi); } kfree(adapter->q_vectors); adapter->q_vectors = NULL; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cac81b0a166f..3130a8c807dd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2456,7 +2456,7 @@ struct net_device { * Drivers are free to use it for other protection. * * Protects: - * @net_shaper_hierarchy, @reg_state + * @napi_list, @net_shaper_hierarchy, @reg_state * * Partially protects (writers must hold both @lock and rtnl_lock): * @up @@ -2712,8 +2712,19 @@ static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) */ #define NAPI_POLL_WEIGHT 64 -void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, - int (*poll)(struct napi_struct *, int), int weight); +void netif_napi_add_weight_locked(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight); + +static inline void +netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + netdev_lock(dev); + netif_napi_add_weight_locked(dev, napi, poll, weight); + netdev_unlock(dev); +} /** * netif_napi_add() - initialize a NAPI context @@ -2731,6 +2742,13 @@ netif_napi_add(struct net_device *dev, struct napi_struct *napi, netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); } +static inline void +netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int)) +{ + netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT); +} + static inline void netif_napi_add_tx_weight(struct net_device *dev, struct napi_struct *napi, @@ -2741,6 +2759,15 @@ netif_napi_add_tx_weight(struct net_device *dev, netif_napi_add_weight(dev, napi, poll, weight); } +static inline void +netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int index) +{ + napi->index = index; + napi->config = &dev->napi_config[index]; + netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT); +} + /** * netif_napi_add_config - initialize a NAPI context with persistent config * @dev: network device @@ -2752,9 +2779,9 @@ static inline void netif_napi_add_config(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int index) { - napi->index = index; - napi->config = &dev->napi_config[index]; - netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); + netdev_lock(dev); + netif_napi_add_config_locked(dev, napi, poll, index); + netdev_unlock(dev); } /** @@ -2774,6 +2801,8 @@ static inline void netif_napi_add_tx(struct net_device *dev, netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); } +void __netif_napi_del_locked(struct napi_struct *napi); + /** * __netif_napi_del - remove a NAPI context * @napi: NAPI context @@ -2782,7 +2811,18 @@ static inline void netif_napi_add_tx(struct net_device *dev, * containing @napi. Drivers might want to call this helper to combine * all the needed RCU grace periods into a single one. */ -void __netif_napi_del(struct napi_struct *napi); +static inline void __netif_napi_del(struct napi_struct *napi) +{ + netdev_lock(napi->dev); + __netif_napi_del_locked(napi); + netdev_unlock(napi->dev); +} + +static inline void netif_napi_del_locked(struct napi_struct *napi) +{ + __netif_napi_del_locked(napi); + synchronize_net(); +} /** * netif_napi_del - remove a NAPI context diff --git a/net/core/dev.c b/net/core/dev.c index 65bf95593da7..235707c0f631 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6910,9 +6910,12 @@ netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi) list_add_rcu(&napi->dev_list, higher); /* adds after higher */ } -void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, - int (*poll)(struct napi_struct *, int), int weight) +void netif_napi_add_weight_locked(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight) { + netdev_assert_locked(dev); if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) return; @@ -6953,7 +6956,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, dev->threaded = false; netif_napi_set_irq(napi, -1); } -EXPORT_SYMBOL(netif_napi_add_weight); +EXPORT_SYMBOL(netif_napi_add_weight_locked); void napi_disable(struct napi_struct *n) { @@ -7024,8 +7027,10 @@ static void flush_gro_hash(struct napi_struct *napi) } /* Must be called in process context */ -void __netif_napi_del(struct napi_struct *napi) +void __netif_napi_del_locked(struct napi_struct *napi) { + netdev_assert_locked(napi->dev); + if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) return; @@ -7045,7 +7050,7 @@ void __netif_napi_del(struct napi_struct *napi) napi->thread = NULL; } } -EXPORT_SYMBOL(__netif_napi_del); +EXPORT_SYMBOL(__netif_napi_del_locked); static int __napi_poll(struct napi_struct *n, bool *repoll) { -- 2.51.0 From 413f0271f3966e0c73d4937963f19335af19e628 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jan 2025 19:53:14 -0800 Subject: [PATCH 12/16] net: protect NAPI enablement with netdev_lock() Wrap napi_enable() / napi_disable() with netdev_lock(). Provide the "already locked" flavor of the API. iavf needs the usual adjustment. A number of drivers call napi_enable() under a spin lock, so they have to be modified to take netdev_lock() first, then spin lock then call napi_enable_locked(). Protecting napi_enable() implies that napi->napi_id is protected by netdev_lock(). Acked-by: Francois Romieu # via-velocity Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250115035319.559603-7-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/amd/pcnet32.c | 11 +++++- drivers/net/ethernet/intel/iavf/iavf_main.c | 4 +- drivers/net/ethernet/marvell/mvneta.c | 5 ++- drivers/net/ethernet/via/via-velocity.c | 6 ++- include/linux/netdevice.h | 11 ++---- net/core/dev.c | 41 +++++++++++++++++---- 6 files changed, 56 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 72db9f9e7bee..c6bd803f5b0c 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -462,7 +462,7 @@ static void pcnet32_netif_start(struct net_device *dev) val = lp->a->read_csr(ioaddr, CSR3); val &= 0x00ff; lp->a->write_csr(ioaddr, CSR3, val); - napi_enable(&lp->napi); + napi_enable_locked(&lp->napi); } /* @@ -889,6 +889,7 @@ static int pcnet32_set_ringparam(struct net_device *dev, if (netif_running(dev)) pcnet32_netif_stop(dev); + netdev_lock(dev); spin_lock_irqsave(&lp->lock, flags); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ @@ -920,6 +921,7 @@ static int pcnet32_set_ringparam(struct net_device *dev, } spin_unlock_irqrestore(&lp->lock, flags); + netdev_unlock(dev); netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size); @@ -985,6 +987,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) if (netif_running(dev)) pcnet32_netif_stop(dev); + netdev_lock(dev); spin_lock_irqsave(&lp->lock, flags); lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ @@ -1122,6 +1125,7 @@ clean_up: lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ } spin_unlock_irqrestore(&lp->lock, flags); + netdev_unlock(dev); return rc; } /* end pcnet32_loopback_test */ @@ -2101,6 +2105,7 @@ static int pcnet32_open(struct net_device *dev) return -EAGAIN; } + netdev_lock(dev); spin_lock_irqsave(&lp->lock, flags); /* Check for a valid station address */ if (!is_valid_ether_addr(dev->dev_addr)) { @@ -2266,7 +2271,7 @@ static int pcnet32_open(struct net_device *dev) goto err_free_ring; } - napi_enable(&lp->napi); + napi_enable_locked(&lp->napi); /* Re-initialize the PCNET32, and start it when done. */ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); @@ -2300,6 +2305,7 @@ static int pcnet32_open(struct net_device *dev) lp->a->read_csr(ioaddr, CSR0)); spin_unlock_irqrestore(&lp->lock, flags); + netdev_unlock(dev); return 0; /* Always succeed */ @@ -2315,6 +2321,7 @@ err_free_ring: err_free_irq: spin_unlock_irqrestore(&lp->lock, flags); + netdev_unlock(dev); free_irq(dev->irq, dev); return rc; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 2db97c5d9f9e..cbfaaa5b7d02 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1180,7 +1180,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter) q_vector = &adapter->q_vectors[q_idx]; napi = &q_vector->napi; - napi_enable(napi); + napi_enable_locked(napi); } } @@ -1196,7 +1196,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = &adapter->q_vectors[q_idx]; - napi_disable(&q_vector->napi); + napi_disable_locked(&q_vector->napi); } } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 9e79a60baebc..aa049cee576d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4392,6 +4392,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) if (pp->neta_armada3700) return 0; + netdev_lock(port->napi.dev); spin_lock(&pp->lock); /* * Configuring the driver for a new CPU while the driver is @@ -4418,7 +4419,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) /* Mask all ethernet port interrupts */ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); - napi_enable(&port->napi); + napi_enable_locked(&port->napi); /* * Enable per-CPU interrupts on the CPU that is @@ -4439,6 +4440,8 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) MVNETA_CAUSE_LINK_CHANGE); netif_tx_start_all_queues(pp->dev); spin_unlock(&pp->lock); + netdev_unlock(port->napi.dev); + return 0; } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index dd4a07c97eee..5aa93144a4f5 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2320,7 +2320,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) if (ret < 0) goto out_free_tmp_vptr_1; - napi_disable(&vptr->napi); + netdev_lock(dev); + napi_disable_locked(&vptr->napi); spin_lock_irqsave(&vptr->lock, flags); @@ -2342,12 +2343,13 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) velocity_give_many_rx_descs(vptr); - napi_enable(&vptr->napi); + napi_enable_locked(&vptr->napi); mac_enable_int(vptr->mac_regs); netif_start_queue(dev); spin_unlock_irqrestore(&vptr->lock, flags); + netdev_unlock(dev); velocity_free_rings(tmp_vptr); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3130a8c807dd..3941e4d0073e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -382,7 +382,7 @@ struct napi_struct { struct sk_buff *skb; struct list_head rx_list; /* Pending GRO_NORMAL skbs */ int rx_count; /* length of rx_list */ - unsigned int napi_id; + unsigned int napi_id; /* protected by netdev_lock */ struct hrtimer timer; struct task_struct *thread; unsigned long gro_flush_timeout; @@ -570,16 +570,11 @@ static inline bool napi_complete(struct napi_struct *n) int dev_set_threaded(struct net_device *dev, bool threaded); -/** - * napi_disable - prevent NAPI from scheduling - * @n: NAPI context - * - * Stop NAPI from being scheduled on this context. - * Waits till any outstanding processing completes. - */ void napi_disable(struct napi_struct *n); +void napi_disable_locked(struct napi_struct *n); void napi_enable(struct napi_struct *n); +void napi_enable_locked(struct napi_struct *n); /** * napi_synchronize - wait until NAPI is not running diff --git a/net/core/dev.c b/net/core/dev.c index 235707c0f631..cfd88bc6ce5f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6958,11 +6958,13 @@ void netif_napi_add_weight_locked(struct net_device *dev, } EXPORT_SYMBOL(netif_napi_add_weight_locked); -void napi_disable(struct napi_struct *n) +void napi_disable_locked(struct napi_struct *n) { unsigned long val, new; might_sleep(); + netdev_assert_locked(n->dev); + set_bit(NAPI_STATE_DISABLE, &n->state); val = READ_ONCE(n->state); @@ -6985,16 +6987,25 @@ void napi_disable(struct napi_struct *n) clear_bit(NAPI_STATE_DISABLE, &n->state); } -EXPORT_SYMBOL(napi_disable); +EXPORT_SYMBOL(napi_disable_locked); /** - * napi_enable - enable NAPI scheduling - * @n: NAPI context + * napi_disable() - prevent NAPI from scheduling + * @n: NAPI context * - * Resume NAPI from being scheduled on this context. - * Must be paired with napi_disable. + * Stop NAPI from being scheduled on this context. + * Waits till any outstanding processing completes. + * Takes netdev_lock() for associated net_device. */ -void napi_enable(struct napi_struct *n) +void napi_disable(struct napi_struct *n) +{ + netdev_lock(n->dev); + napi_disable_locked(n); + netdev_unlock(n->dev); +} +EXPORT_SYMBOL(napi_disable); + +void napi_enable_locked(struct napi_struct *n) { unsigned long new, val = READ_ONCE(n->state); @@ -7011,6 +7022,22 @@ void napi_enable(struct napi_struct *n) new |= NAPIF_STATE_THREADED; } while (!try_cmpxchg(&n->state, &val, new)); } +EXPORT_SYMBOL(napi_enable_locked); + +/** + * napi_enable() - enable NAPI scheduling + * @n: NAPI context + * + * Enable scheduling of a NAPI instance. + * Must be paired with napi_disable(). + * Takes netdev_lock() for associated net_device. + */ +void napi_enable(struct napi_struct *n) +{ + netdev_lock(n->dev); + napi_enable_locked(n); + netdev_unlock(n->dev); +} EXPORT_SYMBOL(napi_enable); static void flush_gro_hash(struct napi_struct *napi) -- 2.51.0 From eeeec1d4c6930691fc59858799b8a7443d9d30ee Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jan 2025 19:53:15 -0800 Subject: [PATCH 13/16] net: make netdev netlink ops hold netdev_lock() In prep for dropping rtnl_lock, start locking netdev->lock in netlink genl ops. We need to be using netdev->up instead of flags & IFF_UP. We can remove the RCU lock protection for the NAPI since NAPI list is protected by netdev->lock already. Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250115035319.559603-8-kuba@kernel.org Signed-off-by: Jakub Kicinski --- net/core/dev.c | 3 ++- net/core/dev.h | 1 - net/core/netdev-genl.c | 46 +++++++++++++++++++++++------------------- 3 files changed, 27 insertions(+), 23 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index cfd88bc6ce5f..2ef50a3ee4a1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -768,7 +768,8 @@ static struct napi_struct *napi_by_id(unsigned int napi_id) } /* must be called under rcu_read_lock(), as we dont take a reference */ -struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id) +static struct napi_struct * +netdev_napi_by_id(struct net *net, unsigned int napi_id) { struct napi_struct *napi; diff --git a/net/core/dev.h b/net/core/dev.h index ef37e2dd44f4..a5b166bbd169 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -23,7 +23,6 @@ struct sd_flow_limit { extern int netdev_flow_limit_table_len; -struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id); struct napi_struct * netdev_napi_by_id_lock(struct net *net, unsigned int napi_id); struct net_device *dev_get_by_napi_id(unsigned int napi_id); diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c index c59619a2ec23..810a446ab62c 100644 --- a/net/core/netdev-genl.c +++ b/net/core/netdev-genl.c @@ -167,7 +167,7 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, void *hdr; pid_t pid; - if (!(napi->dev->flags & IFF_UP)) + if (!napi->dev->up) return 0; hdr = genlmsg_iput(rsp, info); @@ -230,17 +230,16 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; rtnl_lock(); - rcu_read_lock(); - napi = netdev_napi_by_id(genl_info_net(info), napi_id); + napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_fill_one(rsp, napi, info); + netdev_unlock(napi->dev); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } - rcu_read_unlock(); rtnl_unlock(); if (err) { @@ -266,7 +265,7 @@ netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, unsigned int prev_id; int err = 0; - if (!(netdev->flags & IFF_UP)) + if (!netdev->up) return err; prev_id = UINT_MAX; @@ -303,13 +302,15 @@ int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) rtnl_lock(); if (ifindex) { - netdev = __dev_get_by_index(net, ifindex); - if (netdev) + netdev = netdev_get_by_index_lock(net, ifindex); + if (netdev) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); - else + netdev_unlock(netdev); + } else { err = -ENODEV; + } } else { - for_each_netdev_dump(net, netdev, ctx->ifindex) { + for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); if (err < 0) break; @@ -358,17 +359,16 @@ int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info) napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); rtnl_lock(); - rcu_read_lock(); - napi = netdev_napi_by_id(genl_info_net(info), napi_id); + napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_set_config(napi, info); + netdev_unlock(napi->dev); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } - rcu_read_unlock(); rtnl_unlock(); return err; @@ -442,7 +442,7 @@ netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, { int err; - if (!(netdev->flags & IFF_UP)) + if (!netdev->up) return -ENOENT; err = netdev_nl_queue_validate(netdev, q_idx, q_type); @@ -474,11 +474,13 @@ int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) rtnl_lock(); - netdev = __dev_get_by_index(genl_info_net(info), ifindex); - if (netdev) + netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); + if (netdev) { err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); - else + netdev_unlock(netdev); + } else { err = -ENODEV; + } rtnl_unlock(); @@ -499,7 +501,7 @@ netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, { int err = 0; - if (!(netdev->flags & IFF_UP)) + if (!netdev->up) return err; for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) { @@ -532,13 +534,15 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) rtnl_lock(); if (ifindex) { - netdev = __dev_get_by_index(net, ifindex); - if (netdev) + netdev = netdev_get_by_index_lock(net, ifindex); + if (netdev) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); - else + netdev_unlock(netdev); + } else { err = -ENODEV; + } } else { - for_each_netdev_dump(net, netdev, ctx->ifindex) { + for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); if (err < 0) break; -- 2.51.0 From 1bb86cf8f44b1c1a320566558250b1f5121f6fd3 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jan 2025 19:53:16 -0800 Subject: [PATCH 14/16] net: protect threaded status of NAPI with netdev_lock() Now that NAPI instances can't come and go without holding netdev->lock we can trivially switch from rtnl_lock() to netdev_lock() for setting netdev->threaded via sysfs. Note that since we do not lock netdev_lock around sysfs calls in the core we don't have to "trylock" like we do with rtnl_lock. Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250115035319.559603-9-kuba@kernel.org Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 13 +++++++++++-- net/core/dev.c | 2 ++ net/core/net-sysfs.c | 34 ++++++++++++++++++++++++++++++++-- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3941e4d0073e..20e773bbd181 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -384,7 +384,7 @@ struct napi_struct { int rx_count; /* length of rx_list */ unsigned int napi_id; /* protected by netdev_lock */ struct hrtimer timer; - struct task_struct *thread; + struct task_struct *thread; /* protected by netdev_lock */ unsigned long gro_flush_timeout; unsigned long irq_suspend_timeout; u32 defer_hard_irqs; @@ -2451,11 +2451,13 @@ struct net_device { * Drivers are free to use it for other protection. * * Protects: - * @napi_list, @net_shaper_hierarchy, @reg_state + * @napi_list, @net_shaper_hierarchy, @reg_state, @threaded * * Partially protects (writers must hold both @lock and rtnl_lock): * @up * + * Also protects some fields in struct napi_struct. + * * Ordering: take after rtnl_lock. */ struct mutex lock; @@ -2697,6 +2699,13 @@ static inline void netdev_assert_locked(struct net_device *dev) lockdep_assert_held(&dev->lock); } +static inline void netdev_assert_locked_or_invisible(struct net_device *dev) +{ + if (dev->reg_state == NETREG_REGISTERED || + dev->reg_state == NETREG_UNREGISTERING) + netdev_assert_locked(dev); +} + static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) { napi->irq = irq; diff --git a/net/core/dev.c b/net/core/dev.c index 2ef50a3ee4a1..34db90f345d5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6785,6 +6785,8 @@ int dev_set_threaded(struct net_device *dev, bool threaded) struct napi_struct *napi; int err = 0; + netdev_assert_locked_or_invisible(dev); + if (dev->threaded == threaded) return 0; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 2d9afc6e2161..9365a7185a1d 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -36,7 +36,7 @@ static const char fmt_uint[] = "%u\n"; static const char fmt_ulong[] = "%lu\n"; static const char fmt_u64[] = "%llu\n"; -/* Caller holds RTNL or RCU */ +/* Caller holds RTNL, netdev->lock or RCU */ static inline int dev_isalive(const struct net_device *dev) { return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED; @@ -108,6 +108,36 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, return ret; } +/* Same as netdev_store() but takes netdev_lock() instead of rtnl_lock() */ +static ssize_t +netdev_lock_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len, + int (*set)(struct net_device *, unsigned long)) +{ + struct net_device *netdev = to_net_dev(dev); + struct net *net = dev_net(netdev); + unsigned long new; + int ret; + + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + + ret = kstrtoul(buf, 0, &new); + if (ret) + return ret; + + netdev_lock(netdev); + + if (dev_isalive(netdev)) { + ret = (*set)(netdev, new); + if (ret == 0) + ret = len; + } + netdev_unlock(netdev); + + return ret; +} + NETDEVICE_SHOW_RO(dev_id, fmt_hex); NETDEVICE_SHOW_RO(dev_port, fmt_dec); NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); @@ -638,7 +668,7 @@ static ssize_t threaded_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - return netdev_store(dev, attr, buf, len, modify_napi_threaded); + return netdev_lock_store(dev, attr, buf, len, modify_napi_threaded); } static DEVICE_ATTR_RW(threaded); -- 2.51.0 From 53ed30800d3fd36e1e9f7ba8014b150632f714b1 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jan 2025 19:53:17 -0800 Subject: [PATCH 15/16] net: protect napi->irq with netdev_lock() Take netdev_lock() in netif_napi_set_irq(). All NAPI "control fields" are now protected by that lock (most of the other ones are set during napi add/del). The napi_hash_node is fully protected by the hash spin lock, but close enough for the kdoc... Reviewed-by: Joe Damato Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250115035319.559603-10-kuba@kernel.org Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 10 +++++++++- net/core/dev.c | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 20e773bbd181..a47ff20365f9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -388,6 +388,7 @@ struct napi_struct { unsigned long gro_flush_timeout; unsigned long irq_suspend_timeout; u32 defer_hard_irqs; + /* all fields past this point are write-protected by netdev_lock */ /* control-path-only fields follow */ struct list_head dev_list; struct hlist_node napi_hash_node; @@ -2706,11 +2707,18 @@ static inline void netdev_assert_locked_or_invisible(struct net_device *dev) netdev_assert_locked(dev); } -static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) +static inline void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) { napi->irq = irq; } +static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) +{ + netdev_lock(napi->dev); + netif_napi_set_irq_locked(napi, irq); + netdev_unlock(napi->dev); +} + /* Default NAPI poll() weight * Device drivers are strongly advised to not use bigger value */ diff --git a/net/core/dev.c b/net/core/dev.c index 34db90f345d5..b6722ed9767a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6957,7 +6957,7 @@ void netif_napi_add_weight_locked(struct net_device *dev, */ if (dev->threaded && napi_kthread_create(napi)) dev->threaded = false; - netif_napi_set_irq(napi, -1); + netif_napi_set_irq_locked(napi, -1); } EXPORT_SYMBOL(netif_napi_add_weight_locked); -- 2.51.0 From e7ed2ba757bf86a4f90ae9c4080235fc9c74d8a2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jan 2025 19:53:18 -0800 Subject: [PATCH 16/16] net: protect NAPI config fields with netdev_lock() Protect the following members of netdev and napi by netdev_lock: - defer_hard_irqs, - gro_flush_timeout, - irq_suspend_timeout. The first two are written via sysfs (which this patch switches to new lock), and netdev genl which holds both netdev and rtnl locks. irq_suspend_timeout is only written by netdev genl. Reviewed-by: Joe Damato Reviewed-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250115035319.559603-11-kuba@kernel.org Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 7 ++++--- net/core/net-sysfs.c | 5 +++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a47ff20365f9..8308d9c75918 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -384,11 +384,11 @@ struct napi_struct { int rx_count; /* length of rx_list */ unsigned int napi_id; /* protected by netdev_lock */ struct hrtimer timer; - struct task_struct *thread; /* protected by netdev_lock */ + /* all fields past this point are write-protected by netdev_lock */ + struct task_struct *thread; unsigned long gro_flush_timeout; unsigned long irq_suspend_timeout; u32 defer_hard_irqs; - /* all fields past this point are write-protected by netdev_lock */ /* control-path-only fields follow */ struct list_head dev_list; struct hlist_node napi_hash_node; @@ -2452,7 +2452,8 @@ struct net_device { * Drivers are free to use it for other protection. * * Protects: - * @napi_list, @net_shaper_hierarchy, @reg_state, @threaded + * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list, + * @net_shaper_hierarchy, @reg_state, @threaded * * Partially protects (writers must hold both @lock and rtnl_lock): * @up diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 9365a7185a1d..07cb99b114bd 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -450,7 +450,7 @@ static ssize_t gro_flush_timeout_store(struct device *dev, if (!capable(CAP_NET_ADMIN)) return -EPERM; - return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); + return netdev_lock_store(dev, attr, buf, len, change_gro_flush_timeout); } NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); @@ -470,7 +470,8 @@ static ssize_t napi_defer_hard_irqs_store(struct device *dev, if (!capable(CAP_NET_ADMIN)) return -EPERM; - return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs); + return netdev_lock_store(dev, attr, buf, len, + change_napi_defer_hard_irqs); } NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_uint); -- 2.51.0