From d1a16dbbd84e02d2a6dcfcb8d5c4b8b2c0289f00 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Fri, 8 Nov 2024 16:02:00 +0000 Subject: [PATCH 01/16] net: phylink: remove switch() statement in resolve handling The switch() statement doesn't sit very well with the preceeding if() statements, so let's just convert everything to if()s. As a result of the two preceding commits, there is now only one case in the switch() statement. Remove the switch statement and reduce the code indentation. Code reformatting will be in the following commit. Signed-off-by: Russell King (Oracle) Link: https://patch.msgid.link/E1t9RQu-002Fez-AA@rmk-PC.armlinux.org.uk Signed-off-by: Jakub Kicinski --- drivers/net/phy/phylink.c | 94 +++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 49 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 3af6368a9fbf..aaeb8b11e758 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1470,60 +1470,56 @@ static void phylink_resolve(struct work_struct *w) link_state = pl->phy_state; mac_config = link_state.link; } else { - switch (pl->cur_link_an_mode) { - case MLO_AN_INBAND: - phylink_mac_pcs_get_state(pl, &link_state); - - /* The PCS may have a latching link-fail indicator. - * If the link was up, bring the link down and - * re-trigger the resolve. Otherwise, re-read the - * PCS state to get the current status of the link. + phylink_mac_pcs_get_state(pl, &link_state); + + /* The PCS may have a latching link-fail indicator. + * If the link was up, bring the link down and + * re-trigger the resolve. Otherwise, re-read the + * PCS state to get the current status of the link. + */ + if (!link_state.link) { + if (cur_link_state) + retrigger = true; + else + phylink_mac_pcs_get_state(pl, + &link_state); + } + + /* If we have a phy, the "up" state is the union of + * both the PHY and the MAC + */ + if (pl->phydev) + link_state.link &= pl->phy_state.link; + + /* Only update if the PHY link is up */ + if (pl->phydev && pl->phy_state.link) { + /* If the interface has changed, force a + * link down event if the link isn't already + * down, and re-resolve. */ - if (!link_state.link) { - if (cur_link_state) - retrigger = true; - else - phylink_mac_pcs_get_state(pl, - &link_state); + if (link_state.interface != + pl->phy_state.interface) { + retrigger = true; + link_state.link = false; } + link_state.interface = pl->phy_state.interface; - /* If we have a phy, the "up" state is the union of - * both the PHY and the MAC + /* If we are doing rate matching, then the + * link speed/duplex comes from the PHY */ - if (pl->phydev) - link_state.link &= pl->phy_state.link; - - /* Only update if the PHY link is up */ - if (pl->phydev && pl->phy_state.link) { - /* If the interface has changed, force a - * link down event if the link isn't already - * down, and re-resolve. - */ - if (link_state.interface != - pl->phy_state.interface) { - retrigger = true; - link_state.link = false; - } - link_state.interface = pl->phy_state.interface; - - /* If we are doing rate matching, then the - * link speed/duplex comes from the PHY - */ - if (pl->phy_state.rate_matching) { - link_state.rate_matching = - pl->phy_state.rate_matching; - link_state.speed = pl->phy_state.speed; - link_state.duplex = - pl->phy_state.duplex; - } - - /* If we have a PHY, we need to update with - * the PHY flow control bits. - */ - link_state.pause = pl->phy_state.pause; - mac_config = true; + if (pl->phy_state.rate_matching) { + link_state.rate_matching = + pl->phy_state.rate_matching; + link_state.speed = pl->phy_state.speed; + link_state.duplex = + pl->phy_state.duplex; } - break; + + /* If we have a PHY, we need to update with + * the PHY flow control bits. + */ + link_state.pause = pl->phy_state.pause; + mac_config = true; } } -- 2.51.0 From bc08ce37d99a3992e975a0f397503cb23404f25a Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Fri, 8 Nov 2024 16:02:05 +0000 Subject: [PATCH 02/16] net: phylink: clean up phylink_resolve() Now that we have reduced the indentation level, clean up the code formatting. Signed-off-by: Russell King (Oracle) Link: https://patch.msgid.link/E1t9RQz-002Ff5-EA@rmk-PC.armlinux.org.uk Signed-off-by: Jakub Kicinski --- drivers/net/phy/phylink.c | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index aaeb8b11e758..b1e828a4286d 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1472,51 +1472,48 @@ static void phylink_resolve(struct work_struct *w) } else { phylink_mac_pcs_get_state(pl, &link_state); - /* The PCS may have a latching link-fail indicator. - * If the link was up, bring the link down and - * re-trigger the resolve. Otherwise, re-read the - * PCS state to get the current status of the link. + /* The PCS may have a latching link-fail indicator. If the link + * was up, bring the link down and re-trigger the resolve. + * Otherwise, re-read the PCS state to get the current status + * of the link. */ if (!link_state.link) { if (cur_link_state) retrigger = true; else - phylink_mac_pcs_get_state(pl, - &link_state); + phylink_mac_pcs_get_state(pl, &link_state); } - /* If we have a phy, the "up" state is the union of - * both the PHY and the MAC + /* If we have a phy, the "up" state is the union of both the + * PHY and the MAC */ if (pl->phydev) link_state.link &= pl->phy_state.link; /* Only update if the PHY link is up */ if (pl->phydev && pl->phy_state.link) { - /* If the interface has changed, force a - * link down event if the link isn't already - * down, and re-resolve. + /* If the interface has changed, force a link down + * event if the link isn't already down, and re-resolve. */ - if (link_state.interface != - pl->phy_state.interface) { + if (link_state.interface != pl->phy_state.interface) { retrigger = true; link_state.link = false; } + link_state.interface = pl->phy_state.interface; - /* If we are doing rate matching, then the - * link speed/duplex comes from the PHY + /* If we are doing rate matching, then the link + * speed/duplex comes from the PHY */ if (pl->phy_state.rate_matching) { link_state.rate_matching = pl->phy_state.rate_matching; link_state.speed = pl->phy_state.speed; - link_state.duplex = - pl->phy_state.duplex; + link_state.duplex = pl->phy_state.duplex; } - /* If we have a PHY, we need to update with - * the PHY flow control bits. + /* If we have a PHY, we need to update with the PHY + * flow control bits. */ link_state.pause = pl->phy_state.pause; mac_config = true; -- 2.51.0 From 43271bb5bf67e78def9c2898040505e7cb5935f3 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Fri, 8 Nov 2024 06:59:25 -0800 Subject: [PATCH 03/16] net: netconsole: selftests: Check if netdevsim is available The netconsole selftest relies on the availability of the netdevsim module. To ensure the test can run correctly, we need to check if the netdevsim module is either loaded or built-in before proceeding. Update the netconsole selftest to check for the existence of the /sys/bus/netdevsim/new_device file before running the test. If the file is not found, the test is skipped with an explanation that the CONFIG_NETDEVSIM kernel config option may not be enabled. Signed-off-by: Breno Leitao Reviewed-by: Simon Horman Link: https://patch.msgid.link/20241108-netcon_selftest_deps-v1-1-1789cbf3adcd@debian.org Signed-off-by: Jakub Kicinski --- tools/testing/selftests/drivers/net/netcons_basic.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/drivers/net/netcons_basic.sh b/tools/testing/selftests/drivers/net/netcons_basic.sh index 182eb1a97e59..b175f4d966e5 100755 --- a/tools/testing/selftests/drivers/net/netcons_basic.sh +++ b/tools/testing/selftests/drivers/net/netcons_basic.sh @@ -39,6 +39,7 @@ NAMESPACE="" # IDs for netdevsim NSIM_DEV_1_ID=$((256 + RANDOM % 256)) NSIM_DEV_2_ID=$((512 + RANDOM % 256)) +NSIM_DEV_SYS_NEW="/sys/bus/netdevsim/new_device" # Used to create and delete namespaces source "${SCRIPTDIR}"/../../net/lib.sh @@ -46,7 +47,6 @@ source "${SCRIPTDIR}"/../../net/net_helper.sh # Create netdevsim interfaces create_ifaces() { - local NSIM_DEV_SYS_NEW=/sys/bus/netdevsim/new_device echo "$NSIM_DEV_2_ID" > "$NSIM_DEV_SYS_NEW" echo "$NSIM_DEV_1_ID" > "$NSIM_DEV_SYS_NEW" @@ -212,6 +212,11 @@ function check_for_dependencies() { exit "${ksft_skip}" fi + if [ ! -f "${NSIM_DEV_SYS_NEW}" ]; then + echo "SKIP: file ${NSIM_DEV_SYS_NEW} does not exist. Check if CONFIG_NETDEVSIM is enabled" >&2 + exit "${ksft_skip}" + fi + if [ ! -d "${NETCONS_CONFIGFS}" ]; then echo "SKIP: directory ${NETCONS_CONFIGFS} does not exist. Check if NETCONSOLE_DYNAMIC is enabled" >&2 exit "${ksft_skip}" -- 2.51.0 From 7d3f3b4367f315a61fc615e3138f3d320da8c466 Mon Sep 17 00:00:00 2001 From: Vladimir Vdovin Date: Fri, 8 Nov 2024 09:34:24 +0000 Subject: [PATCH 04/16] net: ipv4: Cache pmtu for all packet paths if multipath enabled Check number of paths by fib_info_num_path(), and update_or_create_fnhe() for every path. Problem is that pmtu is cached only for the oif that has received icmp message "need to frag", other oifs will still try to use "default" iface mtu. An example topology showing the problem: | host1 +---------+ | dummy0 | 10.179.20.18/32 mtu9000 +---------+ +-----------+----------------+ +---------+ +---------+ | ens17f0 | 10.179.2.141/31 | ens17f1 | 10.179.2.13/31 +---------+ +---------+ | (all here have mtu 9000) | +------+ +------+ | ro1 | 10.179.2.140/31 | ro2 | 10.179.2.12/31 +------+ +------+ | | ---------+------------+-------------------+------ | +-----+ | ro3 | 10.10.10.10 mtu1500 +-----+ | ======================================== some networks ======================================== | +-----+ | eth0| 10.10.30.30 mtu9000 +-----+ | host2 host1 have enabled multipath and sysctl net.ipv4.fib_multipath_hash_policy = 1: default proto static src 10.179.20.18 nexthop via 10.179.2.12 dev ens17f1 weight 1 nexthop via 10.179.2.140 dev ens17f0 weight 1 When host1 tries to do pmtud from 10.179.20.18/32 to host2, host1 receives at ens17f1 iface an icmp packet from ro3 that ro3 mtu=1500. And host1 caches it in nexthop exceptions cache. Problem is that it is cached only for the iface that has received icmp, and there is no way that ro3 will send icmp msg to host1 via another path. Host1 now have this routes to host2: ip r g 10.10.30.30 sport 30000 dport 443 10.10.30.30 via 10.179.2.12 dev ens17f1 src 10.179.20.18 uid 0 cache expires 521sec mtu 1500 ip r g 10.10.30.30 sport 30033 dport 443 10.10.30.30 via 10.179.2.140 dev ens17f0 src 10.179.20.18 uid 0 cache So when host1 tries again to reach host2 with mtu>1500, if packet flow is lucky enough to be hashed with oif=ens17f1 its ok, if oif=ens17f0 it blackholes and still gets icmp msgs from ro3 to ens17f1, until lucky day when ro3 will send it through another flow to ens17f0. Signed-off-by: Vladimir Vdovin Reviewed-by: Ido Schimmel Link: https://patch.msgid.link/20241108093427.317942-1-deliran@verdict.gg Signed-off-by: Jakub Kicinski --- net/ipv4/route.c | 13 ++++ tools/testing/selftests/net/pmtu.sh | 112 +++++++++++++++++++++++----- 2 files changed, 108 insertions(+), 17 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 4c5e773002fe..ccdbe9c70132 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1027,6 +1027,19 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) struct fib_nh_common *nhc; fib_select_path(net, &res, fl4, NULL); +#ifdef CONFIG_IP_ROUTE_MULTIPATH + if (fib_info_num_path(res.fi) > 1) { + int nhsel; + + for (nhsel = 0; nhsel < fib_info_num_path(res.fi); nhsel++) { + nhc = fib_info_nhc(res.fi, nhsel); + update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, + jiffies + net->ipv4.ip_rt_mtu_expires); + } + rcu_read_unlock(); + return; + } +#endif /* CONFIG_IP_ROUTE_MULTIPATH */ nhc = FIB_RES_NHC(res); update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, jiffies + net->ipv4.ip_rt_mtu_expires); diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 6c651c880fe8..66be7699c72c 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -197,6 +197,12 @@ # # - pmtu_ipv6_route_change # Same as above but with IPv6 +# +# - pmtu_ipv4_mp_exceptions +# Use the same topology as in pmtu_ipv4, but add routeable addresses +# on host A and B on lo reachable via both routers. Host A and B +# addresses have multipath routes to each other, b_r1 mtu = 1500. +# Check that PMTU exceptions are created for both paths. source lib.sh source net_helper.sh @@ -266,7 +272,8 @@ tests=" list_flush_ipv4_exception ipv4: list and flush cached exceptions 1 list_flush_ipv6_exception ipv6: list and flush cached exceptions 1 pmtu_ipv4_route_change ipv4: PMTU exception w/route replace 1 - pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1" + pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1 + pmtu_ipv4_mp_exceptions ipv4: PMTU multipath nh exceptions 1" # Addressing and routing for tests with routers: four network segments, with # index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an @@ -343,6 +350,9 @@ tunnel6_a_addr="fd00:2::a" tunnel6_b_addr="fd00:2::b" tunnel6_mask="64" +host4_a_addr="192.168.99.99" +host4_b_addr="192.168.88.88" + dummy6_0_prefix="fc00:1000::" dummy6_1_prefix="fc00:1001::" dummy6_mask="64" @@ -984,6 +994,52 @@ setup_ovs_bridge() { run_cmd ip route add ${prefix6}:${b_r1}::1 via ${prefix6}:${a_r1}::2 } +setup_multipath_new() { + # Set up host A with multipath routes to host B host4_b_addr + run_cmd ${ns_a} ip addr add ${host4_a_addr} dev lo + run_cmd ${ns_a} ip nexthop add id 401 via ${prefix4}.${a_r1}.2 dev veth_A-R1 + run_cmd ${ns_a} ip nexthop add id 402 via ${prefix4}.${a_r2}.2 dev veth_A-R2 + run_cmd ${ns_a} ip nexthop add id 403 group 401/402 + run_cmd ${ns_a} ip route add ${host4_b_addr} src ${host4_a_addr} nhid 403 + + # Set up host B with multipath routes to host A host4_a_addr + run_cmd ${ns_b} ip addr add ${host4_b_addr} dev lo + run_cmd ${ns_b} ip nexthop add id 401 via ${prefix4}.${b_r1}.2 dev veth_B-R1 + run_cmd ${ns_b} ip nexthop add id 402 via ${prefix4}.${b_r2}.2 dev veth_B-R2 + run_cmd ${ns_b} ip nexthop add id 403 group 401/402 + run_cmd ${ns_b} ip route add ${host4_a_addr} src ${host4_b_addr} nhid 403 +} + +setup_multipath_old() { + # Set up host A with multipath routes to host B host4_b_addr + run_cmd ${ns_a} ip addr add ${host4_a_addr} dev lo + run_cmd ${ns_a} ip route add ${host4_b_addr} \ + src ${host4_a_addr} \ + nexthop via ${prefix4}.${a_r1}.2 weight 1 \ + nexthop via ${prefix4}.${a_r2}.2 weight 1 + + # Set up host B with multipath routes to host A host4_a_addr + run_cmd ${ns_b} ip addr add ${host4_b_addr} dev lo + run_cmd ${ns_b} ip route add ${host4_a_addr} \ + src ${host4_b_addr} \ + nexthop via ${prefix4}.${b_r1}.2 weight 1 \ + nexthop via ${prefix4}.${b_r2}.2 weight 1 +} + +setup_multipath() { + if [ "$USE_NH" = "yes" ]; then + setup_multipath_new + else + setup_multipath_old + fi + + # Set up routers with routes to dummies + run_cmd ${ns_r1} ip route add ${host4_a_addr} via ${prefix4}.${a_r1}.1 + run_cmd ${ns_r2} ip route add ${host4_a_addr} via ${prefix4}.${a_r2}.1 + run_cmd ${ns_r1} ip route add ${host4_b_addr} via ${prefix4}.${b_r1}.1 + run_cmd ${ns_r2} ip route add ${host4_b_addr} via ${prefix4}.${b_r2}.1 +} + setup() { [ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip @@ -1076,23 +1132,15 @@ link_get_mtu() { } route_get_dst_exception() { - ns_cmd="${1}" - dst="${2}" - dsfield="${3}" + ns_cmd="${1}"; shift - if [ -z "${dsfield}" ]; then - dsfield=0 - fi - - ${ns_cmd} ip route get "${dst}" dsfield "${dsfield}" + ${ns_cmd} ip route get "$@" } route_get_dst_pmtu_from_exception() { - ns_cmd="${1}" - dst="${2}" - dsfield="${3}" + ns_cmd="${1}"; shift - mtu_parse "$(route_get_dst_exception "${ns_cmd}" "${dst}" "${dsfield}")" + mtu_parse "$(route_get_dst_exception "${ns_cmd}" "$@")" } check_pmtu_value() { @@ -1235,10 +1283,10 @@ test_pmtu_ipv4_dscp_icmp_exception() { run_cmd "${ns_a}" ping -q -M want -Q "${dsfield}" -c 1 -w 1 -s "${len}" "${dst2}" # Check that exceptions have been created with the correct PMTU - pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" "${policy_mark}")" + pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" dsfield "${policy_mark}")" check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1 - pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" "${policy_mark}")" + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" dsfield "${policy_mark}")" check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1 } @@ -1285,9 +1333,9 @@ test_pmtu_ipv4_dscp_udp_exception() { UDP:"${dst2}":50000,tos="${dsfield}" # Check that exceptions have been created with the correct PMTU - pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" "${policy_mark}")" + pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" dsfield "${policy_mark}")" check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1 - pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" "${policy_mark}")" + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" dsfield "${policy_mark}")" check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1 } @@ -2329,6 +2377,36 @@ test_pmtu_ipv6_route_change() { test_pmtu_ipvX_route_change 6 } +test_pmtu_ipv4_mp_exceptions() { + setup namespaces routing multipath || return $ksft_skip + + trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \ + "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \ + "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \ + "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2 + + # Set up initial MTU values + mtu "${ns_a}" veth_A-R1 2000 + mtu "${ns_r1}" veth_R1-A 2000 + mtu "${ns_r1}" veth_R1-B 1500 + mtu "${ns_b}" veth_B-R1 1500 + + mtu "${ns_a}" veth_A-R2 2000 + mtu "${ns_r2}" veth_R2-A 2000 + mtu "${ns_r2}" veth_R2-B 1500 + mtu "${ns_b}" veth_B-R2 1500 + + # Ping and expect two nexthop exceptions for two routes + run_cmd ${ns_a} ping -q -M want -i 0.1 -c 1 -s 1800 "${host4_b_addr}" + + # Check that exceptions have been created with the correct PMTU + pmtu_a_R1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${host4_b_addr}" oif veth_A-R1)" + pmtu_a_R2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${host4_b_addr}" oif veth_A-R2)" + + check_pmtu_value "1500" "${pmtu_a_R1}" "exceeding MTU (veth_A-R1)" || return 1 + check_pmtu_value "1500" "${pmtu_a_R2}" "exceeding MTU (veth_A-R2)" || return 1 +} + usage() { echo echo "$0 [OPTIONS] [TEST]..." -- 2.51.0 From ab85ebf437231ceaf359c2a4679bebd7e8d6bdb2 Mon Sep 17 00:00:00 2001 From: Patrisious Haddad Date: Thu, 7 Nov 2024 21:43:46 +0200 Subject: [PATCH 05/16] net/mlx5: E-switch, refactor eswitch mode change The E-switch mode was previously updated before removing and re-adding the IB device, which could cause a temporary mismatch between the E-switch mode and the IB device configuration. To prevent this discrepancy, the IB device is now removed first, then the E-switch mode is updated, and finally, the IB device is re-added. This sequence ensures consistent alignment between the E-switch mode and the IB device whenever the mode changes, regardless of the new mode value. Signed-off-by: Patrisious Haddad Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-2-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 1 - .../mellanox/mlx5/core/eswitch_offloads.c | 26 +++++++++++++++---- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index cead41ddbc38..d0dab8f4e1a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1490,7 +1490,6 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) if (esw->mode == MLX5_ESWITCH_LEGACY) { err = esw_legacy_enable(esw); } else { - mlx5_rescan_drivers(esw->dev); err = esw_offloads_enable(esw); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index fd34f43d18d5..5f1adebd9669 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2332,18 +2332,35 @@ out_free: return err; } +static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode) +{ + mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp); + + if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) { + esw->mode = mode; + mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp); + return; + } + + esw->dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(esw->dev); + esw->mode = mode; + esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(esw->dev); + mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp); +} + static int esw_offloads_start(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { int err; - esw->mode = MLX5_ESWITCH_OFFLOADS; + esw_mode_change(esw, MLX5_ESWITCH_OFFLOADS); err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to offloads"); - esw->mode = MLX5_ESWITCH_LEGACY; - mlx5_rescan_drivers(esw->dev); + esw_mode_change(esw, MLX5_ESWITCH_LEGACY); return err; } if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { @@ -3584,7 +3601,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, { int err; - esw->mode = MLX5_ESWITCH_LEGACY; + esw_mode_change(esw, MLX5_ESWITCH_LEGACY); /* If changing from switchdev to legacy mode without sriov enabled, * no need to create legacy fdb. @@ -3770,7 +3787,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, err = esw_offloads_start(esw, extack); } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { err = esw_offloads_stop(esw, extack); - mlx5_rescan_drivers(esw->dev); } else { err = -EINVAL; } -- 2.51.0 From 5a731857656e3988935108f48800cd764a550005 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:47 +0200 Subject: [PATCH 06/16] net/mlx5: Simplify QoS normalization by removing error handling This change updates esw_qos_normalize_min_rate to not return errors, significantly simplifying the code. Normalization failures are software bugs, and it's unnecessary to handle them with rollback mechanisms. Instead, `esw_qos_update_sched_node_bw_share` and `esw_qos_normalize_min_rate` now return void, with any errors logged as warnings to indicate potential software issues. This approach avoids compensating for hidden bugs and removes error handling from all places that perform normalization, streamlining future patches. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-3-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 72 +++++-------------- 1 file changed, 17 insertions(+), 55 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 940e1c2d1e39..0c371f27c693 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -208,64 +208,49 @@ static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max) return min_t(u32, max_t(u32, DIV_ROUND_UP(min_rate, divider), MLX5_MIN_BW_SHARE), fw_max); } -static int esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node, - u32 divider, - struct netlink_ext_ack *extack) +static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node, + u32 divider, + struct netlink_ext_ack *extack) { u32 fw_max_bw_share = MLX5_CAP_QOS(node->esw->dev, max_tsar_bw_share); u32 bw_share; - int err; bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share); if (bw_share == node->bw_share) - return 0; - - err = esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack); - if (err) - return err; + return; + esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack); node->bw_share = bw_share; - - return err; } -static int esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, - struct mlx5_esw_sched_node *parent, - struct netlink_ext_ack *extack) +static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, + struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) { struct list_head *nodes = parent ? &parent->children : &esw->qos.domain->nodes; u32 divider = esw_qos_calculate_min_rate_divider(esw, parent); struct mlx5_esw_sched_node *node; list_for_each_entry(node, nodes, entry) { - int err; - if (node->esw != esw || node->ix == esw->qos.root_tsar_ix) continue; - err = esw_qos_update_sched_node_bw_share(node, divider, extack); - if (err) - return err; + esw_qos_update_sched_node_bw_share(node, divider, extack); if (list_empty(&node->children)) continue; - err = esw_qos_normalize_min_rate(node->esw, node, extack); - if (err) - return err; + esw_qos_normalize_min_rate(node->esw, node, extack); } - - return 0; } static int esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 fw_max_bw_share, previous_min_rate; bool min_rate_supported; - int err; + u32 fw_max_bw_share; esw_assert_qos_lock_held(vport_node->esw); fw_max_bw_share = MLX5_CAP_QOS(vport->dev, max_tsar_bw_share); @@ -276,13 +261,10 @@ static int esw_qos_set_vport_min_rate(struct mlx5_vport *vport, if (min_rate == vport_node->min_rate) return 0; - previous_min_rate = vport_node->min_rate; vport_node->min_rate = min_rate; - err = esw_qos_normalize_min_rate(vport_node->parent->esw, vport_node->parent, extack); - if (err) - vport_node->min_rate = previous_min_rate; + esw_qos_normalize_min_rate(vport_node->parent->esw, vport_node->parent, extack); - return err; + return 0; } static int esw_qos_set_vport_max_rate(struct mlx5_vport *vport, @@ -316,8 +298,6 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = node->esw; - u32 previous_min_rate; - int err; if (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE) @@ -326,19 +306,10 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, if (min_rate == node->min_rate) return 0; - previous_min_rate = node->min_rate; node->min_rate = min_rate; - err = esw_qos_normalize_min_rate(esw, NULL, extack); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch node min rate setting failed"); - - /* Attempt restoring previous configuration */ - node->min_rate = previous_min_rate; - if (esw_qos_normalize_min_rate(esw, NULL, extack)) - NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed"); - } + esw_qos_normalize_min_rate(esw, NULL, extack); - return err; + return 0; } static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node, @@ -552,17 +523,11 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch goto err_alloc_node; } - err = esw_qos_normalize_min_rate(esw, NULL, extack); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed"); - goto err_min_rate; - } + esw_qos_normalize_min_rate(esw, NULL, extack); trace_mlx5_esw_node_qos_create(esw->dev, node, node->ix); return node; -err_min_rate: - __esw_qos_free_node(node); err_alloc_node: if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -609,10 +574,7 @@ static int __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netli NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed"); __esw_qos_free_node(node); - err = esw_qos_normalize_min_rate(esw, NULL, extack); - if (err) - NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed"); - + esw_qos_normalize_min_rate(esw, NULL, extack); return err; } -- 2.51.0 From ac778fefed340e019bc9c022842b4a2cc5713559 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:48 +0200 Subject: [PATCH 07/16] net/mlx5: Generalize max_rate and min_rate setting for nodes Refactor max_rate and min_rate setting functions to operate on mlx5_esw_sched_node, allowing for generalized handling of both vports and nodes. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-4-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 69 ++++--------------- 1 file changed, 13 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 0c371f27c693..82805bb20c76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -245,69 +245,20 @@ static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, } } -static int esw_qos_set_vport_min_rate(struct mlx5_vport *vport, - u32 min_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - bool min_rate_supported; - u32 fw_max_bw_share; - - esw_assert_qos_lock_held(vport_node->esw); - fw_max_bw_share = MLX5_CAP_QOS(vport->dev, max_tsar_bw_share); - min_rate_supported = MLX5_CAP_QOS(vport->dev, esw_bw_share) && - fw_max_bw_share >= MLX5_MIN_BW_SHARE; - if (min_rate && !min_rate_supported) - return -EOPNOTSUPP; - if (min_rate == vport_node->min_rate) - return 0; - - vport_node->min_rate = min_rate; - esw_qos_normalize_min_rate(vport_node->parent->esw, vport_node->parent, extack); - - return 0; -} - -static int esw_qos_set_vport_max_rate(struct mlx5_vport *vport, - u32 max_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 act_max_rate = max_rate; - bool max_rate_supported; - int err; - - esw_assert_qos_lock_held(vport_node->esw); - max_rate_supported = MLX5_CAP_QOS(vport->dev, esw_rate_limit); - - if (max_rate && !max_rate_supported) - return -EOPNOTSUPP; - if (max_rate == vport_node->max_rate) - return 0; - - /* Use parent node limit if new max rate is 0. */ - if (!max_rate) - act_max_rate = vport_node->parent->max_rate; - - err = esw_qos_sched_elem_config(vport_node, act_max_rate, vport_node->bw_share, extack); - if (!err) - vport_node->max_rate = max_rate; - - return err; -} - static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = node->esw; - if (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || - MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE) + if (min_rate && (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || + MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)) return -EOPNOTSUPP; if (min_rate == node->min_rate) return 0; node->min_rate = min_rate; - esw_qos_normalize_min_rate(esw, NULL, extack); + esw_qos_normalize_min_rate(esw, node->parent, extack); return 0; } @@ -321,11 +272,17 @@ static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node, if (node->max_rate == max_rate) return 0; + /* Use parent node limit if new max rate is 0. */ + if (!max_rate && node->parent) + max_rate = node->parent->max_rate; + err = esw_qos_sched_elem_config(node, max_rate, node->bw_share, extack); if (err) return err; node->max_rate = max_rate; + if (node->type != SCHED_NODE_TYPE_VPORTS_TSAR) + return 0; /* Any unlimited vports in the node should be set with the value of the node. */ list_for_each_entry(vport_node, &node->children, entry) { @@ -748,9 +705,9 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_ if (err) goto unlock; - err = esw_qos_set_vport_min_rate(vport, min_rate, NULL); + err = esw_qos_set_node_min_rate(vport->qos.sched_node, min_rate, NULL); if (!err) - err = esw_qos_set_vport_max_rate(vport, max_rate, NULL); + err = esw_qos_set_node_max_rate(vport->qos.sched_node, max_rate, NULL); unlock: esw_qos_unlock(esw); return err; @@ -947,7 +904,7 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void if (err) goto unlock; - err = esw_qos_set_vport_min_rate(vport, tx_share, extack); + err = esw_qos_set_node_min_rate(vport->qos.sched_node, tx_share, extack); unlock: esw_qos_unlock(esw); return err; @@ -973,7 +930,7 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * if (err) goto unlock; - err = esw_qos_set_vport_max_rate(vport, tx_max, extack); + err = esw_qos_set_node_max_rate(vport->qos.sched_node, tx_max, extack); unlock: esw_qos_unlock(esw); return err; -- 2.51.0 From cc4bb15ffa8412bfe1e189d37edb6ca7d9918cb4 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:49 +0200 Subject: [PATCH 08/16] net/mlx5: Refactor scheduling element configuration bitmasks Refactor esw_qos_sched_elem_config to set bitmasks only when max_rate or bw_share values change, allowing the function to configure nodes with only one of these parameters. This enables more flexible usage for nodes where only one parameter requires configuration. Remove scattered assignments and checks to centralize them within this function, removing the now redundant esw_qos_set_node_max_rate entirely. With this refactor, also remove the assignment of the vport scheduling node max rate to the parent max rate for unlimited vports (where max rate is set to zero), as firmware already handles this behavior. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-5-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 80 ++++++------------- 1 file changed, 24 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 82805bb20c76..c1e7b2425ebe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -143,10 +143,21 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) return -EOPNOTSUPP; - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + if (bw_share && (!MLX5_CAP_QOS(dev, esw_bw_share) || + MLX5_CAP_QOS(dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)) + return -EOPNOTSUPP; + + if (node->max_rate == max_rate && node->bw_share == bw_share) + return 0; + + if (node->max_rate != max_rate) { + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + } + if (node->bw_share != bw_share) { + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + } err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -160,6 +171,8 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r return err; } + node->max_rate = max_rate; + node->bw_share = bw_share; if (node->type == SCHED_NODE_TYPE_VPORTS_TSAR) trace_mlx5_esw_node_qos_config(dev, node, node->ix, bw_share, max_rate); else if (node->type == SCHED_NODE_TYPE_VPORT) @@ -217,11 +230,7 @@ static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node, bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share); - if (bw_share == node->bw_share) - return; - esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack); - node->bw_share = bw_share; } static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, @@ -250,10 +259,6 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, { struct mlx5_eswitch *esw = node->esw; - if (min_rate && (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || - MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)) - return -EOPNOTSUPP; - if (min_rate == node->min_rate) return 0; @@ -263,41 +268,6 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, return 0; } -static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node, - u32 max_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node; - int err; - - if (node->max_rate == max_rate) - return 0; - - /* Use parent node limit if new max rate is 0. */ - if (!max_rate && node->parent) - max_rate = node->parent->max_rate; - - err = esw_qos_sched_elem_config(node, max_rate, node->bw_share, extack); - if (err) - return err; - - node->max_rate = max_rate; - if (node->type != SCHED_NODE_TYPE_VPORTS_TSAR) - return 0; - - /* Any unlimited vports in the node should be set with the value of the node. */ - list_for_each_entry(vport_node, &node->children, entry) { - if (vport_node->max_rate) - continue; - - err = esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, extack); - if (err) - NL_SET_ERR_MSG_MOD(extack, - "E-Switch vport implicit rate limit setting failed"); - } - - return err; -} - static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id, u32 *tsar_ix) { @@ -367,7 +337,6 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, struct netlink_ext_ack *extack) { struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 max_rate; int err; err = mlx5_destroy_scheduling_element_cmd(curr_node->esw->dev, @@ -378,9 +347,7 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, return err; } - /* Use new node max rate if vport max rate is unlimited. */ - max_rate = vport_node->max_rate ? vport_node->max_rate : new_node->max_rate; - err = esw_qos_vport_create_sched_element(vport, new_node, max_rate, + err = esw_qos_vport_create_sched_element(vport, new_node, vport_node->max_rate, vport_node->bw_share, &vport_node->ix); if (err) { @@ -393,8 +360,7 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, return 0; err_sched: - max_rate = vport_node->max_rate ? vport_node->max_rate : curr_node->max_rate; - if (esw_qos_vport_create_sched_element(vport, curr_node, max_rate, + if (esw_qos_vport_create_sched_element(vport, curr_node, vport_node->max_rate, vport_node->bw_share, &vport_node->ix)) esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", @@ -707,7 +673,8 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_ err = esw_qos_set_node_min_rate(vport->qos.sched_node, min_rate, NULL); if (!err) - err = esw_qos_set_node_max_rate(vport->qos.sched_node, max_rate, NULL); + err = esw_qos_sched_elem_config(vport->qos.sched_node, max_rate, + vport->qos.sched_node->bw_share, NULL); unlock: esw_qos_unlock(esw); return err; @@ -930,7 +897,8 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * if (err) goto unlock; - err = esw_qos_set_node_max_rate(vport->qos.sched_node, tx_max, extack); + err = esw_qos_sched_elem_config(vport->qos.sched_node, tx_max, + vport->qos.sched_node->bw_share, extack); unlock: esw_qos_unlock(esw); return err; @@ -965,7 +933,7 @@ int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void * return err; esw_qos_lock(esw); - err = esw_qos_set_node_max_rate(node, tx_max, extack); + err = esw_qos_sched_elem_config(node, tx_max, node->bw_share, extack); esw_qos_unlock(esw); return err; } -- 2.51.0 From 663bc605d0db8782ff9c2704db5ce6cf2ac7fa93 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:50 +0200 Subject: [PATCH 09/16] net/mlx5: Generalize scheduling element operations Introduce helper functions to create and destroy scheduling elements, allowing flexible configuration for different scheduling element types. The new helper functions streamline the process by centralizing error handling and logging through esw_qos_sched_elem_op_warn, which now accepts the operation type (create, destroy, or modify). The changes also adjust the esw_qos_vport_enable and mlx5_esw_qos_vport_disable functions to leverage the new generalized create/destroy helpers. The destroy functions now log errors with esw_warn without returning them. This prevents unnecessary error handling since the node was already destroyed and no further action is required from callers. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-6-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 157 +++++++++--------- 1 file changed, 76 insertions(+), 81 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index c1e7b2425ebe..155400d36a1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -118,18 +118,49 @@ mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport) return vport->qos.sched_node->parent; } -static void esw_qos_sched_elem_config_warn(struct mlx5_esw_sched_node *node, int err) +static void esw_qos_sched_elem_warn(struct mlx5_esw_sched_node *node, int err, const char *op) { if (node->vport) { esw_warn(node->esw->dev, - "E-Switch modify %s scheduling element failed (vport=%d,err=%d)\n", - sched_node_type_str[node->type], node->vport->vport, err); + "E-Switch %s %s scheduling element failed (vport=%d,err=%d)\n", + op, sched_node_type_str[node->type], node->vport->vport, err); return; } esw_warn(node->esw->dev, - "E-Switch modify %s scheduling element failed (err=%d)\n", - sched_node_type_str[node->type], err); + "E-Switch %s %s scheduling element failed (err=%d)\n", + op, sched_node_type_str[node->type], err); +} + +static int esw_qos_node_create_sched_element(struct mlx5_esw_sched_node *node, void *ctx, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlx5_create_scheduling_element_cmd(node->esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, ctx, + &node->ix); + if (err) { + esw_qos_sched_elem_warn(node, err, "create"); + NL_SET_ERR_MSG_MOD(extack, "E-Switch create scheduling element failed"); + } + + return err; +} + +static int esw_qos_node_destroy_sched_element(struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlx5_destroy_scheduling_element_cmd(node->esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + node->ix); + if (err) { + esw_qos_sched_elem_warn(node, err, "destroy"); + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroying scheduling element failed."); + } + + return err; } static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_rate, u32 bw_share, @@ -165,7 +196,7 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r node->ix, bitmask); if (err) { - esw_qos_sched_elem_config_warn(node, err); + esw_qos_sched_elem_warn(node, err, "modify"); NL_SET_ERR_MSG_MOD(extack, "E-Switch modify scheduling element failed"); return err; @@ -295,14 +326,12 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_ tsar_ix); } -static int -esw_qos_vport_create_sched_element(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, - u32 max_rate, u32 bw_share, u32 *sched_elem_ix) +static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, u32 bw_share, + struct netlink_ext_ack *extack) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; - struct mlx5_core_dev *dev = parent->esw->dev; + struct mlx5_core_dev *dev = vport_node->esw->dev; void *attr; - int err; if (!mlx5_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT, @@ -312,23 +341,12 @@ esw_qos_vport_create_sched_element(struct mlx5_vport *vport, struct mlx5_esw_sch MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); - MLX5_SET(vport_element, attr, vport_number, vport->vport); - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent->ix); - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport); + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, vport_node->parent->ix); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, vport_node->max_rate); MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - err = mlx5_create_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - sched_ctx, - sched_elem_ix); - if (err) { - esw_warn(dev, - "E-Switch create vport scheduling element failed (vport=%d,err=%d)\n", - vport->vport, err); - return err; - } - - return 0; + return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack); } static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, @@ -339,30 +357,22 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; int err; - err = mlx5_destroy_scheduling_element_cmd(curr_node->esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - vport_node->ix); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy vport scheduling element failed"); + err = esw_qos_node_destroy_sched_element(vport_node, extack); + if (err) return err; - } - err = esw_qos_vport_create_sched_element(vport, new_node, vport_node->max_rate, - vport_node->bw_share, - &vport_node->ix); + esw_qos_node_set_parent(vport_node, new_node); + err = esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, extack); if (err) { NL_SET_ERR_MSG_MOD(extack, "E-Switch vport node set failed."); goto err_sched; } - esw_qos_node_set_parent(vport->qos.sched_node, new_node); - return 0; err_sched: - if (esw_qos_vport_create_sched_element(vport, curr_node, vport_node->max_rate, - vport_node->bw_share, - &vport_node->ix)) + esw_qos_node_set_parent(vport_node, curr_node); + if (esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, NULL)) esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", vport->vport); @@ -425,6 +435,12 @@ static void __esw_qos_free_node(struct mlx5_esw_sched_node *node) kfree(node); } +static void esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) +{ + esw_qos_node_destroy_sched_element(node, extack); + __esw_qos_free_node(node); +} + static struct mlx5_esw_sched_node * __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sched_node *parent, struct netlink_ext_ack *extack) @@ -483,23 +499,13 @@ esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct netlink_ext_ac return node; } -static int __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) +static void __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = node->esw; - int err; trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix); - - err = mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - node->ix); - if (err) - NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed"); - __esw_qos_free_node(node); - + esw_qos_destroy_node(node, extack); esw_qos_normalize_min_rate(esw, NULL, extack); - - return err; } static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) @@ -584,11 +590,11 @@ static void esw_qos_put(struct mlx5_eswitch *esw) esw_qos_destroy(esw); } -static int esw_qos_vport_enable(struct mlx5_vport *vport, - u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) +static int esw_qos_vport_enable(struct mlx5_vport *vport, u32 max_rate, u32 bw_share, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - u32 sched_elem_ix; + struct mlx5_esw_sched_node *sched_node; int err; esw_assert_qos_lock_held(esw); @@ -599,29 +605,28 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, if (err) return err; - err = esw_qos_vport_create_sched_element(vport, esw->qos.node0, max_rate, bw_share, - &sched_elem_ix); - if (err) - goto err_out; - - vport->qos.sched_node = __esw_qos_alloc_node(esw, sched_elem_ix, SCHED_NODE_TYPE_VPORT, - esw->qos.node0); - if (!vport->qos.sched_node) { + sched_node = __esw_qos_alloc_node(esw, 0, SCHED_NODE_TYPE_VPORT, esw->qos.node0); + if (!sched_node) { err = -ENOMEM; goto err_alloc; } - vport->qos.sched_node->vport = vport; + sched_node->max_rate = max_rate; + sched_node->min_rate = 0; + sched_node->bw_share = bw_share; + sched_node->vport = vport; + err = esw_qos_vport_create_sched_element(sched_node, 0, extack); + if (err) + goto err_vport_create; trace_mlx5_esw_vport_qos_create(vport->dev, vport, bw_share, max_rate); + vport->qos.sched_node = sched_node; return 0; +err_vport_create: + __esw_qos_free_node(sched_node); err_alloc: - if (mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, sched_elem_ix)) - esw_warn(esw->dev, "E-Switch destroy vport scheduling element failed.\n"); -err_out: esw_qos_put(esw); return err; @@ -632,7 +637,6 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) struct mlx5_eswitch *esw = vport->dev->priv.eswitch; struct mlx5_esw_sched_node *vport_node; struct mlx5_core_dev *dev; - int err; lockdep_assert_held(&esw->state_lock); esw_qos_lock(esw); @@ -645,15 +649,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) dev = vport_node->esw->dev; trace_mlx5_esw_vport_qos_destroy(dev, vport); - err = mlx5_destroy_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - vport_node->ix); - if (err) - esw_warn(dev, - "E-Switch destroy vport scheduling element failed (vport=%d,err=%d)\n", - vport->vport, err); - - __esw_qos_free_node(vport_node); + esw_qos_destroy_node(vport_node, NULL); memset(&vport->qos, 0, sizeof(vport->qos)); esw_qos_put(esw); @@ -974,13 +970,12 @@ int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, { struct mlx5_esw_sched_node *node = priv; struct mlx5_eswitch *esw = node->esw; - int err; esw_qos_lock(esw); - err = __esw_qos_destroy_node(node, extack); + __esw_qos_destroy_node(node, extack); esw_qos_put(esw); esw_qos_unlock(esw); - return err; + return 0; } int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, -- 2.51.0 From d67bfd10e668bfca717e0d94112f04f61c58dad7 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:51 +0200 Subject: [PATCH 10/16] net/mlx5: Integrate esw_qos_vport_enable logic into rate operations Fold the esw_qos_vport_enable function into operations for configuring maximum and minimum rates, simplifying QoS logic. This change consolidates enabling and updating the scheduling element configuration, streamlining how vport QoS is initialized and adjusted. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-7-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 87 +++++++++---------- 1 file changed, 39 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 155400d36a1e..35e493924c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -590,22 +590,21 @@ static void esw_qos_put(struct mlx5_eswitch *esw) esw_qos_destroy(esw); } -static int esw_qos_vport_enable(struct mlx5_vport *vport, u32 max_rate, u32 bw_share, - struct netlink_ext_ack *extack) +static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; struct mlx5_esw_sched_node *sched_node; int err; esw_assert_qos_lock_held(esw); - if (vport->qos.sched_node) - return 0; err = esw_qos_get(esw, extack); if (err) return err; - sched_node = __esw_qos_alloc_node(esw, 0, SCHED_NODE_TYPE_VPORT, esw->qos.node0); + parent = parent ?: esw->qos.node0; + sched_node = __esw_qos_alloc_node(parent->esw, 0, SCHED_NODE_TYPE_VPORT, parent); if (!sched_node) { err = -ENOMEM; goto err_alloc; @@ -657,21 +656,42 @@ unlock: esw_qos_unlock(esw); } +static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rate, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + if (!vport_node) + return esw_qos_vport_enable(vport, NULL, max_rate, 0, extack); + else + return esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, + extack); +} + +static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rate, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + if (!vport_node) + return esw_qos_vport_enable(vport, NULL, 0, min_rate, extack); + else + return esw_qos_set_node_min_rate(vport_node, min_rate, extack); +} + int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_rate) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; int err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, NULL); - if (err) - goto unlock; - - err = esw_qos_set_node_min_rate(vport->qos.sched_node, min_rate, NULL); + err = mlx5_esw_qos_set_vport_min_rate(vport, min_rate, NULL); if (!err) - err = esw_qos_sched_elem_config(vport->qos.sched_node, max_rate, - vport->qos.sched_node->bw_share, NULL); -unlock: + err = mlx5_esw_qos_set_vport_max_rate(vport, max_rate, NULL); esw_qos_unlock(esw); return err; } @@ -757,10 +777,8 @@ static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev, int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) { - u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_vport *vport; u32 link_speed_max; - u32 bitmask; int err; vport = mlx5_eswitch_get_vport(esw, vport_num); @@ -779,20 +797,7 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 } esw_qos_lock(esw); - if (!vport->qos.sched_node) { - /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */ - err = esw_qos_vport_enable(vport, rate_mbps, 0, NULL); - } else { - struct mlx5_core_dev *dev = vport->qos.sched_node->parent->esw->dev; - - MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); - bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - err = mlx5_modify_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - ctx, - vport->qos.sched_node->ix, - bitmask); - } + err = mlx5_esw_qos_set_vport_max_rate(vport, rate_mbps, NULL); esw_qos_unlock(esw); return err; @@ -863,12 +868,7 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void return err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (err) - goto unlock; - - err = esw_qos_set_node_min_rate(vport->qos.sched_node, tx_share, extack); -unlock: + err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack); esw_qos_unlock(esw); return err; } @@ -889,13 +889,7 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * return err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (err) - goto unlock; - - err = esw_qos_sched_elem_config(vport->qos.sched_node, tx_max, - vport->qos.sched_node->bw_share, extack); -unlock: + err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack); esw_qos_unlock(esw); return err; } @@ -991,13 +985,10 @@ int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, } esw_qos_lock(esw); - if (!vport->qos.sched_node && !node) - goto unlock; - - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (!err) + if (!vport->qos.sched_node && node) + err = esw_qos_vport_enable(vport, node, 0, 0, extack); + else if (vport->qos.sched_node) err = esw_qos_vport_update_node(vport, node, extack); -unlock: esw_qos_unlock(esw); return err; } -- 2.51.0 From be034baba83e2a80a0b2c0f24c08547b6eedc79a Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:52 +0200 Subject: [PATCH 11/16] net/mlx5: Make vport QoS enablement more flexible for future extensions Refactor esw_qos_vport_enable to support more generic configurations, allowing it to be reused for new vport node types in future patches. This refactor includes a new way to change the vport parent node by disabling the current setup and re-enabling it with the new parent. This change sets the foundation for adapting configuration based on the parent type in future patches. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-8-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/esw/devlink_port.c | 2 +- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 193 ++++++++---------- .../net/ethernet/mellanox/mlx5/core/esw/qos.h | 1 + .../net/ethernet/mellanox/mlx5/core/eswitch.c | 6 +- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 5 +- 5 files changed, 96 insertions(+), 111 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index d0f38818363f..982fe3714683 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -195,7 +195,7 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport) return; dl_port = vport->dl_port; - mlx5_esw_qos_vport_update_node(vport, NULL, NULL); + mlx5_esw_qos_vport_update_parent(vport, NULL, NULL); devl_rate_leaf_destroy(&dl_port->dl_port); devl_port_unregister(&dl_port->dl_port); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 35e493924c09..8b7c843446e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -101,6 +101,12 @@ esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_ node->esw = parent->esw; } +void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport) +{ + kfree(vport->qos.sched_node); + memset(&vport->qos, 0, sizeof(vport->qos)); +} + u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport) { if (!vport->qos.sched_node) @@ -326,7 +332,7 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_ tsar_ix); } -static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, u32 bw_share, +static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, struct netlink_ext_ack *extack) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; @@ -344,69 +350,10 @@ static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_ MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport); MLX5_SET(scheduling_context, sched_ctx, parent_element_id, vport_node->parent->ix); MLX5_SET(scheduling_context, sched_ctx, max_average_bw, vport_node->max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack); } -static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *curr_node, - struct mlx5_esw_sched_node *new_node, - struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - int err; - - err = esw_qos_node_destroy_sched_element(vport_node, extack); - if (err) - return err; - - esw_qos_node_set_parent(vport_node, new_node); - err = esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, extack); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch vport node set failed."); - goto err_sched; - } - - return 0; - -err_sched: - esw_qos_node_set_parent(vport_node, curr_node); - if (esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, NULL)) - esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", - vport->vport); - - return err; -} - -static int esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - struct mlx5_esw_sched_node *new_node, *curr_node; - int err; - - esw_assert_qos_lock_held(esw); - curr_node = vport_node->parent; - new_node = node ?: esw->qos.node0; - if (curr_node == new_node) - return 0; - - err = esw_qos_update_node_scheduling_element(vport, curr_node, new_node, extack); - if (err) - return err; - - /* Recalculate bw share weights of old and new nodes */ - if (vport_node->bw_share || new_node->bw_share) { - esw_qos_normalize_min_rate(curr_node->esw, curr_node, extack); - esw_qos_normalize_min_rate(new_node->esw, new_node, extack); - } - - return 0; -} - static struct mlx5_esw_sched_node * __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type, struct mlx5_esw_sched_node *parent) @@ -590,43 +537,62 @@ static void esw_qos_put(struct mlx5_eswitch *esw) esw_qos_destroy(esw); } +static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + struct mlx5_esw_sched_node *parent = vport_node->parent; + + esw_qos_node_destroy_sched_element(vport_node, extack); + + vport_node->bw_share = 0; + list_del_init(&vport_node->entry); + esw_qos_normalize_min_rate(parent->esw, parent, extack); + + trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport); +} + static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, - u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack) +{ + int err; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + esw_qos_node_set_parent(vport->qos.sched_node, parent); + err = esw_qos_vport_create_sched_element(vport->qos.sched_node, extack); + if (err) + return err; + + esw_qos_normalize_min_rate(parent->esw, parent, extack); + + return 0; +} + +static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_type type, + struct mlx5_esw_sched_node *parent, u32 max_rate, + u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; struct mlx5_esw_sched_node *sched_node; int err; esw_assert_qos_lock_held(esw); - err = esw_qos_get(esw, extack); if (err) return err; parent = parent ?: esw->qos.node0; - sched_node = __esw_qos_alloc_node(parent->esw, 0, SCHED_NODE_TYPE_VPORT, parent); - if (!sched_node) { - err = -ENOMEM; - goto err_alloc; - } + sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent); + if (!sched_node) + return -ENOMEM; sched_node->max_rate = max_rate; - sched_node->min_rate = 0; - sched_node->bw_share = bw_share; + sched_node->min_rate = min_rate; sched_node->vport = vport; - err = esw_qos_vport_create_sched_element(sched_node, 0, extack); - if (err) - goto err_vport_create; - - trace_mlx5_esw_vport_qos_create(vport->dev, vport, bw_share, max_rate); vport->qos.sched_node = sched_node; - - return 0; - -err_vport_create: - __esw_qos_free_node(sched_node); -err_alloc: - esw_qos_put(esw); + err = esw_qos_vport_enable(vport, parent, extack); + if (err) + esw_qos_put(esw); return err; } @@ -634,23 +600,18 @@ err_alloc: void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - struct mlx5_esw_sched_node *vport_node; - struct mlx5_core_dev *dev; + struct mlx5_esw_sched_node *parent; lockdep_assert_held(&esw->state_lock); esw_qos_lock(esw); - vport_node = vport->qos.sched_node; - if (!vport_node) + if (!vport->qos.sched_node) goto unlock; - WARN(vport_node->parent != esw->qos.node0, - "Disabling QoS on port before detaching it from node"); - - dev = vport_node->esw->dev; - trace_mlx5_esw_vport_qos_destroy(dev, vport); - esw_qos_destroy_node(vport_node, NULL); - memset(&vport->qos, 0, sizeof(vport->qos)); + parent = vport->qos.sched_node->parent; + WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node"); + esw_qos_vport_disable(vport, NULL); + mlx5_esw_qos_vport_qos_free(vport); esw_qos_put(esw); unlock: esw_qos_unlock(esw); @@ -664,7 +625,8 @@ static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rat esw_assert_qos_lock_held(vport->dev->priv.eswitch); if (!vport_node) - return esw_qos_vport_enable(vport, NULL, max_rate, 0, extack); + return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, max_rate, 0, + extack); else return esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, extack); @@ -678,7 +640,8 @@ static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rat esw_assert_qos_lock_held(vport->dev->priv.eswitch); if (!vport_node) - return esw_qos_vport_enable(vport, NULL, 0, min_rate, extack); + return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, 0, min_rate, + extack); else return esw_qos_set_node_min_rate(vport_node, min_rate, extack); } @@ -711,6 +674,31 @@ bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *m return enabled; } +static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + struct mlx5_esw_sched_node *curr_parent; + int err; + + esw_assert_qos_lock_held(esw); + curr_parent = vport->qos.sched_node->parent; + parent = parent ?: esw->qos.node0; + if (curr_parent == parent) + return 0; + + esw_qos_vport_disable(vport, extack); + + err = esw_qos_vport_enable(vport, parent, extack); + if (err) { + if (esw_qos_vport_enable(vport, curr_parent, NULL)) + esw_warn(parent->esw->dev, "vport restore QoS failed (vport=%d)\n", + vport->vport); + } + + return err; +} + static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) { struct ethtool_link_ksettings lksettings; @@ -972,23 +960,22 @@ int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, return 0; } -int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack) +int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; int err = 0; - if (node && node->esw != esw) { + if (parent && parent->esw != esw) { NL_SET_ERR_MSG_MOD(extack, "Cross E-Switch scheduling is not supported"); return -EOPNOTSUPP; } esw_qos_lock(esw); - if (!vport->qos.sched_node && node) - err = esw_qos_vport_enable(vport, node, 0, 0, extack); + if (!vport->qos.sched_node && parent) + err = mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, parent, 0, 0, extack); else if (vport->qos.sched_node) - err = esw_qos_vport_update_node(vport, node, extack); + err = esw_qos_vport_update_parent(vport, parent, extack); esw_qos_unlock(esw); return err; } @@ -1002,8 +989,8 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, struct mlx5_vport *vport = priv; if (!parent) - return mlx5_esw_qos_vport_update_node(vport, NULL, extack); + return mlx5_esw_qos_vport_update_parent(vport, NULL, extack); node = parent_priv; - return mlx5_esw_qos_vport_update_node(vport, node, extack); + return mlx5_esw_qos_vport_update_parent(vport, node, extack); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h index 61a6fdd5c267..6eb8f6a648c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h @@ -13,6 +13,7 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *evport, u32 max_rate, u32 min bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate); void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport); +void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport); u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport); struct mlx5_esw_sched_node *mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d0dab8f4e1a3..7fb8a3381f84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1061,8 +1061,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) unsigned long i; mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { - kfree(vport->qos.sched_node); - memset(&vport->qos, 0, sizeof(vport->qos)); + mlx5_esw_qos_vport_qos_free(vport); memset(&vport->info, 0, sizeof(vport->info)); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; } @@ -1074,8 +1073,7 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw) unsigned long i; mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) { - kfree(vport->qos.sched_node); - memset(&vport->qos, 0, sizeof(vport->qos)); + mlx5_esw_qos_vport_qos_free(vport); memset(&vport->info, 0, sizeof(vport->info)); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 14dd42d44e6f..a83d41121db6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -427,9 +427,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, u16 vport_num, bool setting); int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, u32 max_rate, u32 min_rate); -int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack); +int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack); int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, -- 2.51.0 From 8a0ee54027b1fbccda3f2683dafec9b7216993a4 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:53 +0200 Subject: [PATCH 12/16] net/mlx5e: SHAMPO, Simplify UMR allocation for headers Allocating page fragments for header data split is currently more complicated than it should be. That's because the number of KSM entries allocated is not aligned to the number of headers per page. This leads to having leftovers in the next allocation which require additional accounting and needlessly complicated code. This patch aligns (down) the number of KSM entries in the UMR WQE to the number of headers per page by: 1) Aligning the max number of entries allocated per UMR WQE (max_ksm_entries) to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE. 2) Aligning the total number of free headers to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE. ... and then it drops the extra accounting code from mlx5e_build_shampo_hd_umr(). Although the number of entries allocated per UMR WQE is slightly smaller due to aligning down, no performance impact was observed. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-9-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - .../net/ethernet/mellanox/mlx5/core/en_rx.c | 29 ++++++++----------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 58f3df784ded..4449a57ba5b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -633,7 +633,6 @@ struct mlx5e_shampo_hd { u16 pi; u16 ci; __be32 key; - u64 last_addr; }; struct mlx5e_hw_gro_data { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d81083f4f316..e044e5d11f05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -648,30 +648,26 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 ksm_entries, u16 index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u16 entries, pi, header_offset, err, wqe_bbs, new_entries; + u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; u16 page_index = shampo->curr_page_index; struct mlx5e_frag_page *frag_page; - u64 addr = shampo->last_addr; struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; int headroom, i; + u64 addr = 0; headroom = rq->buff.headroom; - new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); - entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); - wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries); + wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries); pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); - build_ksm_umr(sq, umr_wqe, shampo->key, index, entries); + build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries); frag_page = &shampo->pages[page_index]; - for (i = 0; i < entries; i++, index++) { + WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); + for (i = 0; i < ksm_entries; i++, index++) { dma_info = &shampo->info[index]; - if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index < - MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)) - goto update_ksm; header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { @@ -691,7 +687,6 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, dma_info->frag_page = frag_page; } -update_ksm: umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { .key = cpu_to_be32(lkey), .va = cpu_to_be64(dma_info->addr + headroom), @@ -701,12 +696,11 @@ update_ksm: sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, .num_wqebbs = wqe_bbs, - .shampo.len = new_entries, + .shampo.len = ksm_entries, }; - shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1); + shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1); shampo->curr_page_index = page_index; - shampo->last_addr = addr; sq->pc += wqe_bbs; sq->doorbell_cseg = &umr_wqe->ctrl; @@ -731,7 +725,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) struct mlx5e_icosq *sq = rq->icosq; int i, err, max_ksm_entries, len; - max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev); + max_ksm_entries = ALIGN_DOWN(MLX5E_MAX_KSM_PER_WQE(rq->mdev), + MLX5E_SHAMPO_WQ_HEADER_PER_PAGE); ksm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); @@ -739,8 +734,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) if (!ksm_entries) return 0; - ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); - index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); + /* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */ + index = shampo->pi; entries_before = shampo->hd_per_wq - index; if (unlikely(entries_before < ksm_entries)) -- 2.51.0 From 1a4b5885770401b7e8de6546760686dcd2d9b784 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:54 +0200 Subject: [PATCH 13/16] net/mlx5e: SHAMPO, Fix page_index calculation inconsistency When calculating the index for the next frag page slot, the divisor is incorrect: it should be the number of pages per queue not the number of headers per queue. This is currently harmless because frag pages are not used directly, but they are intermediated through the info array. But it needs to be fixed as an upcoming patch will get rid of the info array. This patch introduces a new pages per queue variable and plugs it in the formula. Now that this variable exists, additional code can be simplified in the SHAMPO initialization code. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-10-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 +++----- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 4449a57ba5b2..b4abb094f01a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -629,6 +629,7 @@ struct mlx5e_shampo_hd { u16 curr_page_index; u32 hd_per_wq; u16 hd_per_wqe; + u16 pages_per_wq; unsigned long *bitmap; u16 pi; u16 ci; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 59d7a0e28f24..3ca1ef1f39a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -767,8 +767,6 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, u32 *pool_size, int node) { - void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq); - int wq_size; int err; if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) @@ -793,9 +791,9 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, cpu_to_be32(rq->mpwqe.shampo->mkey); rq->mpwqe.shampo->hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp); - wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); - *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) / - MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + rq->mpwqe.shampo->pages_per_wq = + rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + *pool_size += rq->mpwqe.shampo->pages_per_wq; return 0; err_hw_gro_data: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e044e5d11f05..76a975667c77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -671,7 +671,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { - page_index = (page_index + 1) & (shampo->hd_per_wq - 1); + page_index = (page_index + 1) & (shampo->pages_per_wq - 1); frag_page = &shampo->pages[page_index]; err = mlx5e_page_alloc_fragmented(rq, frag_page); -- 2.51.0 From 4f56868b7132bb3c7e5a2c1930b6402718248a35 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:55 +0200 Subject: [PATCH 14/16] net/mlx5e: SHAMPO, Change frag page setup order during allocation Now that the UMR allocation has been simplified, it is no longer possible to have a leftover page from a previous call to mlx5e_build_shampo_hd_umr(). This patch simplifies the code by switching the order of operations: first take the frag page and then increment the index. This is more straightforward and it also paves the way for dropping the info array. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-11-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 76a975667c77..637069c1b988 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -651,7 +651,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; u16 page_index = shampo->curr_page_index; - struct mlx5e_frag_page *frag_page; + struct mlx5e_frag_page *frag_page = NULL; struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; int headroom, i; @@ -663,16 +663,14 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries); - frag_page = &shampo->pages[page_index]; - WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); for (i = 0; i < ksm_entries; i++, index++) { dma_info = &shampo->info[index]; header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { - page_index = (page_index + 1) & (shampo->pages_per_wq - 1); frag_page = &shampo->pages[page_index]; + page_index = (page_index + 1) & (shampo->pages_per_wq - 1); err = mlx5e_page_alloc_fragmented(rq, frag_page); if (unlikely(err)) -- 2.51.0 From 945ca432bfd0788b960f8f721594dae4fc3c02c1 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:56 +0200 Subject: [PATCH 15/16] net/mlx5e: SHAMPO, Drop info array The info array is used to store a pointer to the dma address of the header and to the frag page. However, this array is not really required: - The frag page can be calculated from the header index frag page index = header index / headers per page. - The dma address can be calculated through a formula: dma page address + header offset. This series gets rid of the info array and uses the above formulas instead. The current_page_index was used in conjunction with the info array to store page fragment indices. This variable is dropped as well. There was no performance regression observed. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-12-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 7 +- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 76 ++++++++++--------- 3 files changed, 42 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b4abb094f01a..979fc56205e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -83,6 +83,7 @@ struct page_pool; #define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8) #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9) #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) +#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) #define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024) #define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096) @@ -624,9 +625,7 @@ struct mlx5e_dma_info { struct mlx5e_shampo_hd { u32 mkey; - struct mlx5e_dma_info *info; struct mlx5e_frag_page *pages; - u16 curr_page_index; u32 hd_per_wq; u16 hd_per_wqe; u16 pages_per_wq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3ca1ef1f39a5..2e27e9d6b820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -350,19 +350,15 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL, node); - shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq, - sizeof(*shampo->info)), - GFP_KERNEL, node); shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq, sizeof(*shampo->pages)), GFP_KERNEL, node); - if (!shampo->bitmap || !shampo->info || !shampo->pages) + if (!shampo->bitmap || !shampo->pages) goto err_nomem; return 0; err_nomem: - kvfree(shampo->info); kvfree(shampo->bitmap); kvfree(shampo->pages); @@ -372,7 +368,6 @@ err_nomem: static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) { kvfree(rq->mpwqe.shampo->bitmap); - kvfree(rq->mpwqe.shampo->info); kvfree(rq->mpwqe.shampo->pages); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 637069c1b988..3de575875586 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -643,6 +643,21 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } +static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index) +{ + BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT); + + return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE]; +} + +static u64 mlx5e_shampo_hd_offset(int header_index) +{ + return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << + MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; +} + +static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index); + static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, u16 ksm_entries, u16 index) @@ -650,9 +665,6 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; - u16 page_index = shampo->curr_page_index; - struct mlx5e_frag_page *frag_page = NULL; - struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; int headroom, i; u64 addr = 0; @@ -665,29 +677,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); for (i = 0; i < ksm_entries; i++, index++) { - dma_info = &shampo->info[index]; - header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; - if (!(header_offset & (PAGE_SIZE - 1))) { - frag_page = &shampo->pages[page_index]; - page_index = (page_index + 1) & (shampo->pages_per_wq - 1); + header_offset = mlx5e_shampo_hd_offset(index); + if (!header_offset) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); err = mlx5e_page_alloc_fragmented(rq, frag_page); if (unlikely(err)) goto err_unmap; addr = page_pool_get_dma_addr(frag_page->page); - - dma_info->addr = addr; - dma_info->frag_page = frag_page; - } else { - dma_info->addr = addr + header_offset; - dma_info->frag_page = frag_page; } umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { .key = cpu_to_be32(lkey), - .va = cpu_to_be64(dma_info->addr + headroom), + .va = cpu_to_be64(addr + header_offset + headroom), }; } @@ -698,20 +701,22 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, }; shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1); - shampo->curr_page_index = page_index; sq->pc += wqe_bbs; sq->doorbell_cseg = &umr_wqe->ctrl; return 0; err_unmap: - while (--i >= 0) { - dma_info = &shampo->info[--index]; - if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { - dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); - mlx5e_page_release_fragmented(rq, dma_info->frag_page); + while (--i) { + --index; + header_offset = mlx5e_shampo_hd_offset(index); + if (!header_offset) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + + mlx5e_page_release_fragmented(rq, frag_page); } } + rq->stats->buff_alloc_err++; return err; } @@ -844,13 +849,11 @@ static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u64 addr = shampo->info[header_index].addr; if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { - struct mlx5e_dma_info *dma_info = &shampo->info[header_index]; + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); - dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE); - mlx5e_page_release_fragmented(rq, dma_info->frag_page); + mlx5e_page_release_fragmented(rq, frag_page); } clear_bit(header_index, shampo->bitmap); } @@ -1204,10 +1207,10 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) { - struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index]; - u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom; + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); + u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom; - return page_address(last_head->frag_page->page) + head_offset; + return page_address(frag_page->page) + head_offset; } static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) @@ -2178,29 +2181,30 @@ static struct sk_buff * mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 header_index) { - struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index]; - u16 head_offset = head->addr & (PAGE_SIZE - 1); + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); + dma_addr_t page_dma_addr = page_pool_get_dma_addr(frag_page->page); + u16 head_offset = mlx5e_shampo_hd_offset(header_index); + dma_addr_t dma_addr = page_dma_addr + head_offset; u16 head_size = cqe->shampo.header_size; u16 rx_headroom = rq->buff.headroom; struct sk_buff *skb = NULL; void *hdr, *data; u32 frag_size; - hdr = page_address(head->frag_page->page) + head_offset; + hdr = page_address(frag_page->page) + head_offset; data = hdr + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { /* build SKB around header */ - dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir); + dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir); net_prefetchw(hdr); net_prefetch(data); skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); - if (unlikely(!skb)) return NULL; - head->frag_page->frags++; + frag_page->frags++; } else { /* allocate SKB and copy header for large header */ rq->stats->gro_large_hds++; @@ -2212,7 +2216,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, } net_prefetchw(skb->data); - mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr, + mlx5e_copy_skb_header(rq, skb, frag_page->page, dma_addr, head_offset + rx_headroom, rx_headroom, head_size); /* skb linear part was allocated with headlen and aligned to long */ -- 2.51.0 From ab4219db89da1d019ef45675e4cd56a6841bbc1e Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:57 +0200 Subject: [PATCH 16/16] net/mlx5e: SHAMPO, Rework header allocation loop The current loop code was based on the assumption that there can be page leftovers from previous function calls. This patch changes the allocation loop to make it clearer how pages get allocated every MLX5E_SHAMPO_WQ_HEADER_PER_PAGE headers. This change has no functional implications. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-13-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3de575875586..1963bc5adb18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -666,8 +666,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; struct mlx5e_umr_wqe *umr_wqe; - int headroom, i; - u64 addr = 0; + int headroom, i = 0; headroom = rq->buff.headroom; wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries); @@ -676,22 +675,25 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries); WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); - for (i = 0; i < ksm_entries; i++, index++) { - header_offset = mlx5e_shampo_hd_offset(index); - if (!header_offset) { - struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + while (i < ksm_entries) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + u64 addr; + + err = mlx5e_page_alloc_fragmented(rq, frag_page); + if (unlikely(err)) + goto err_unmap; - err = mlx5e_page_alloc_fragmented(rq, frag_page); - if (unlikely(err)) - goto err_unmap; - addr = page_pool_get_dma_addr(frag_page->page); - } + addr = page_pool_get_dma_addr(frag_page->page); - umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { - .key = cpu_to_be32(lkey), - .va = cpu_to_be64(addr + header_offset + headroom), - }; + for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) { + header_offset = mlx5e_shampo_hd_offset(index++); + + umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) { + .key = cpu_to_be32(lkey), + .va = cpu_to_be64(addr + header_offset + headroom), + }; + } } sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { -- 2.51.0