From 8c40d99e5f43e0545a3f4fea9156313847e2eb79 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 9 Apr 2025 21:05:37 +0200 Subject: [PATCH 01/16] r8169: add helper rtl_csi_mod for accessing extended config space Add a helper for the Realtek-specific mechanism for accessing extended config space if native access isn't possible. This avoids code duplication. Signed-off-by: Heiner Kallweit Link: https://patch.msgid.link/b368fd91-57d7-4cb5-9342-98b4d8fe9aea@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/realtek/r8169_main.c | 26 ++++++++++++++--------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 4eebd9cb40a3..8e62b109518b 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2852,10 +2852,23 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) RTL_R32(tp, CSIDR) : ~0; } +static void rtl_csi_mod(struct rtl8169_private *tp, int addr, + u32 mask, u32 set) +{ + u32 val; + + WARN(addr % 4, "Invalid CSI address %#x\n", addr); + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + + val = rtl_csi_read(tp, addr); + rtl_csi_write(tp, addr, (val & ~mask) | set); +} + static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp) { struct pci_dev *pdev = tp->pci_dev; - u32 csi; int rc; u8 val; @@ -2872,16 +2885,12 @@ static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp) } } - netdev_notice_once(tp->dev, - "No native access to PCI extended config space, falling back to CSI\n"); - csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF); - rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL); + rtl_csi_mod(tp, RTL_GEN3_RELATED_OFF, RTL_GEN3_ZRXDC_NONCOMPL, 0); } static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) { struct pci_dev *pdev = tp->pci_dev; - u32 csi; /* According to Realtek the value at config space address 0x070f * controls the L0s/L1 entrance latency. We try standard ECAM access @@ -2893,10 +2902,7 @@ static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) return; - netdev_notice_once(tp->dev, - "No native access to PCI extended config space, falling back to CSI\n"); - csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; - rtl_csi_write(tp, 0x070c, csi | val << 24); + rtl_csi_mod(tp, 0x070c, 0xff000000, val << 24); } static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) -- 2.51.0 From 0c49baf099ba2147a6ff3bbdc3197c6ddbee5469 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 9 Apr 2025 21:14:47 +0200 Subject: [PATCH 02/16] r8169: add helper rtl8125_phy_param The integrated PHY's of RTL8125/8126 have an own mechanism to access PHY parameters, similar to what r8168g_phy_param does on earlier PHY versions. Add helper rtl8125_phy_param to simplify the code. Signed-off-by: Heiner Kallweit Link: https://patch.msgid.link/847b7356-12d6-441b-ade9-4b6e1539b84a@gmail.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/realtek/r8169_phy_config.c | 36 +++++++++---------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index cf95e579c65d..748ca8b212ba 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -50,6 +50,15 @@ static void r8168g_phy_param(struct phy_device *phydev, u16 parm, phy_restore_page(phydev, oldpage, 0); } +static void rtl8125_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + phy_lock_mdio_bus(phydev); + __phy_write_mmd(phydev, MDIO_MMD_VEND2, 0xb87c, parm); + __phy_modify_mmd(phydev, MDIO_MMD_VEND2, 0xb87e, mask, val); + phy_unlock_mdio_bus(phydev); +} + struct phy_reg { u16 reg; u16 val; @@ -1004,12 +1013,8 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); - phy_write(phydev, 0x1f, 0x0b87); - phy_write(phydev, 0x16, 0x80a2); - phy_write(phydev, 0x17, 0x0153); - phy_write(phydev, 0x16, 0x809c); - phy_write(phydev, 0x17, 0x0153); - phy_write(phydev, 0x1f, 0x0000); + rtl8125_phy_param(phydev, 0x80a2, 0xffff, 0x0153); + rtl8125_phy_param(phydev, 0x809c, 0xffff, 0x0153); phy_write(phydev, 0x1f, 0x0a43); phy_write(phydev, 0x13, 0x81B3); @@ -1061,14 +1066,9 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); - phy_write(phydev, 0x1f, 0x0b87); - phy_write(phydev, 0x16, 0x80f5); - phy_write(phydev, 0x17, 0x760e); - phy_write(phydev, 0x16, 0x8107); - phy_write(phydev, 0x17, 0x360e); - phy_write(phydev, 0x16, 0x8551); - phy_modify(phydev, 0x17, 0xff00, 0x0800); - phy_write(phydev, 0x1f, 0x0000); + rtl8125_phy_param(phydev, 0x80f5, 0xffff, 0x760e); + rtl8125_phy_param(phydev, 0x8107, 0xffff, 0x360e); + rtl8125_phy_param(phydev, 0x8551, 0xff00, 0x0800); phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); @@ -1110,12 +1110,8 @@ static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp, r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000); - phy_write(phydev, 0x1f, 0x0b87); - phy_write(phydev, 0x16, 0x8088); - phy_modify(phydev, 0x17, 0xff00, 0x9000); - phy_write(phydev, 0x16, 0x808f); - phy_modify(phydev, 0x17, 0xff00, 0x9000); - phy_write(phydev, 0x1f, 0x0000); + rtl8125_phy_param(phydev, 0x8088, 0xff00, 0x9000); + rtl8125_phy_param(phydev, 0x808f, 0xff00, 0x9000); r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800); -- 2.51.0 From 61499764e5cc5918c9f63026d3b7a34c8668d4b8 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 7 Apr 2025 20:15:35 +0100 Subject: [PATCH 03/16] net: stmmac: stm32: simplify clock handling Some stm32 implementations need the receive clock running in suspend, as indicated by dwmac->ops->clk_rx_enable_in_suspend. The existing code achieved this in a rather complex way, by passing a flag around. However, the clk API prepare/enables are counted - which means that a clock won't be stopped as long as there are more prepare and enables than disables and unprepares, just like a reference count. Therefore, we can simplify this logic by calling clk_prepare_enable() an additional time in the probe function if this flag is set, and then balancing that at remove time. With this, we can avoid passing a "are we suspending" and "are we resuming" flag to various functions in the driver. Signed-off-by: Russell King (Oracle) Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/dwmac-stm32.c | 57 ++++++++++++------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index c3d321192581..1eb16eec9c0d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -119,7 +119,7 @@ struct stm32_ops { u32 syscfg_clr_off; }; -static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume) +static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac) { int ret; @@ -127,11 +127,9 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume) if (ret) goto err_clk_tx; - if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) { - ret = clk_prepare_enable(dwmac->clk_rx); - if (ret) - goto err_clk_rx; - } + ret = clk_prepare_enable(dwmac->clk_rx); + if (ret) + goto err_clk_rx; ret = clk_prepare_enable(dwmac->syscfg_clk); if (ret) @@ -148,15 +146,14 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume) err_clk_eth_ck: clk_disable_unprepare(dwmac->syscfg_clk); err_syscfg_clk: - if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) - clk_disable_unprepare(dwmac->clk_rx); + clk_disable_unprepare(dwmac->clk_rx); err_clk_rx: clk_disable_unprepare(dwmac->clk_tx); err_clk_tx: return ret; } -static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume) +static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; int ret; @@ -167,7 +164,7 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume) return ret; } - return stm32_dwmac_clk_enable(dwmac, resume); + return stm32_dwmac_clk_enable(dwmac); } static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat) @@ -382,12 +379,10 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) SYSCFG_MCU_ETH_MASK, val << 23); } -static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend) +static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac) { clk_disable_unprepare(dwmac->clk_tx); - if (!dwmac->ops->clk_rx_enable_in_suspend || !suspend) - clk_disable_unprepare(dwmac->clk_rx); - + clk_disable_unprepare(dwmac->clk_rx); clk_disable_unprepare(dwmac->syscfg_clk); if (dwmac->enable_eth_ck) clk_disable_unprepare(dwmac->clk_eth_ck); @@ -541,18 +536,32 @@ static int stm32_dwmac_probe(struct platform_device *pdev) plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP; plat_dat->bsp_priv = dwmac; - ret = stm32_dwmac_init(plat_dat, false); + ret = stm32_dwmac_init(plat_dat); if (ret) return ret; + /* If this platform requires the clock to be running in suspend, + * prepare and enable the receive clock an additional time to keep + * it running. + */ + if (dwmac->ops->clk_rx_enable_in_suspend) { + ret = clk_prepare_enable(dwmac->clk_rx); + if (ret) + goto err_clk_disable; + } + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) - goto err_clk_disable; + goto err_clk_disable_suspend; return 0; +err_clk_disable_suspend: + if (dwmac->ops->clk_rx_enable_in_suspend) + clk_disable_unprepare(dwmac->clk_rx); + err_clk_disable: - stm32_dwmac_clk_disable(dwmac, false); + stm32_dwmac_clk_disable(dwmac); return ret; } @@ -565,7 +574,15 @@ static void stm32_dwmac_remove(struct platform_device *pdev) stmmac_dvr_remove(&pdev->dev); - stm32_dwmac_clk_disable(dwmac, false); + /* If this platform requires the clock to be running in suspend, + * we need to disable and unprepare the receive clock an additional + * time to balance the extra clk_prepare_enable() in the probe + * function. + */ + if (dwmac->ops->clk_rx_enable_in_suspend) + clk_disable_unprepare(dwmac->clk_rx); + + stm32_dwmac_clk_disable(dwmac); if (dwmac->irq_pwr_wakeup >= 0) { dev_pm_clear_wake_irq(&pdev->dev); @@ -596,7 +613,7 @@ static int stm32_dwmac_suspend(struct device *dev) if (ret) return ret; - stm32_dwmac_clk_disable(dwmac, true); + stm32_dwmac_clk_disable(dwmac); if (dwmac->ops->suspend) ret = dwmac->ops->suspend(dwmac); @@ -614,7 +631,7 @@ static int stm32_dwmac_resume(struct device *dev) if (dwmac->ops->resume) dwmac->ops->resume(dwmac); - ret = stm32_dwmac_init(priv->plat, true); + ret = stm32_dwmac_init(priv->plat); if (ret) return ret; -- 2.51.0 From a808691df39b52cd9db861b118e88e18b63e2299 Mon Sep 17 00:00:00 2001 From: Mateusz Pacuszka Date: Fri, 14 Feb 2025 09:50:35 +0100 Subject: [PATCH 04/16] ice: fix check for existing switch rule In case the rule already exists and another VSI wants to subscribe to it new VSI list is being created and both VSIs are moved to it. Currently, the check for already existing VSI with the same rule is done based on fdw_id.hw_vsi_id, which applies only to LOOKUP_RX flag. Change it to vsi_handle. This is software VSI ID, but it can be applied here, because vsi_map itself is also based on it. Additionally change return status in case the VSI already exists in the VSI map to "Already exists". Such case should be handled by the caller. Signed-off-by: Mateusz Pacuszka Reviewed-by: Przemek Kitszel Reviewed-by: Michal Swiatkowski Signed-off-by: Larysa Zaremba Reviewed-by: Simon Horman Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_switch.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 4a91e0aaf0a5..9d9a7edd3618 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -3146,7 +3146,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle_arr[2]; /* A rule already exists with the new VSI being added */ - if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) + if (cur_fltr->vsi_handle == new_fltr->vsi_handle) return -EEXIST; vsi_handle_arr[0] = cur_fltr->vsi_handle; @@ -5978,7 +5978,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) - return 0; + return -EEXIST; /* Update the previously created VSI list set with * the new VSI ID passed in -- 2.51.0 From 4d5a1c4e6d49e1a521c97c991d1aa81702f4c3d9 Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Fri, 14 Feb 2025 09:50:36 +0100 Subject: [PATCH 05/16] ice: do not add LLDP-specific filter if not necessary Commit 34295a3696fb ("ice: implement new LLDP filter command") introduced the ability to use LLDP-specific filter that directs all LLDP traffic to a single VSI. However, current goal is for all trusted VFs to be able to see LLDP neighbors, which is impossible to do with the special filter. Make using the generic filter the default choice and fall back to special one only if a generic filter cannot be added. That way setups with "NVMs where an already existent LLDP filter is blocking the creation of a filter to allow LLDP packets" will still be able to configure software Rx LLDP on PF only, while all other setups would be able to forward them to VFs too. Reviewed-by: Michal Swiatkowski Signed-off-by: Larysa Zaremba Reviewed-by: Simon Horman Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 1 + drivers/net/ethernet/intel/ice/ice_common.c | 14 +++++++++---- drivers/net/ethernet/intel/ice/ice_common.h | 3 +-- drivers/net/ethernet/intel/ice/ice_lib.c | 23 ++++++++++++++------- 4 files changed, 28 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index fd083647c14a..2694951a0b1d 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -515,6 +515,7 @@ enum ice_pf_flags { ICE_FLAG_MTU_CHANGED, ICE_FLAG_GNSS, /* GNSS successfully initialized */ ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ + ICE_FLAG_LLDP_AQ_FLTR, ICE_PF_FLAGS_NBITS /* must be last */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 59df31c2c83f..e725b785d093 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -6011,15 +6011,21 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) /** * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter * @hw: pointer to HW struct - * @vsi_num: absolute HW index for VSI + * @vsi: VSI to add the filter to * @add: boolean for if adding or removing a filter + * + * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed + * with this HW or VSI, otherwise an error corresponding to + * the AQ transaction result. */ -int -ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) +int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add) { struct ice_aqc_lldp_filter_ctrl *cmd; struct ice_aq_desc desc; + if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw)) + return -EOPNOTSUPP; + cmd = &desc.params.lldp_filter_ctrl; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); @@ -6029,7 +6035,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) else cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; - cmd->vsi_num = cpu_to_le16(vsi_num); + cmd->vsi_num = cpu_to_le16(vsi->vsi_num); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 9b00aa0ddf10..64c530b39191 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -290,8 +290,7 @@ int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); -int -ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add); int ice_lldp_execute_pending_mib(struct ice_hw *hw); int ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 0bcf9d127ac9..542b76b5707a 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2085,19 +2085,28 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, ICE_DROP_PACKET); } else { - if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { - status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, - create); - } else { + if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) { status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI); + if (!status || !create) + goto report; + + dev_info(dev, + "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n", + vsi->vsi_num, status); } + + status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create); + if (!status) + set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags); + } +report: if (status) - dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", - create ? "adding" : "removing", tx ? "TX" : "RX", - vsi->vsi_num, status); + dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n", + create ? "add" : "remove", tx ? "Tx" : "Rx", + vsi->vsi_num, status); } /** -- 2.51.0 From 2296345416b02aa9a7216bfec2725948f034feff Mon Sep 17 00:00:00 2001 From: Mateusz Pacuszka Date: Fri, 14 Feb 2025 09:50:37 +0100 Subject: [PATCH 06/16] ice: receive LLDP on trusted VFs When a trusted VF tries to configure an LLDP multicast address, configure a rule that would mirror the traffic to this VF, untrusted VFs are not allowed to receive LLDP at all, so the request to add LLDP MAC address will always fail for them. Add a forwarding LLDP filter to a trusted VF when it tries to add an LLDP multicast MAC address. The MAC address has to be added after enabling trust (through restarting the LLDP service). Signed-off-by: Mateusz Pacuszka Co-developed-by: Larysa Zaremba Signed-off-by: Larysa Zaremba Reviewed-by: Michal Swiatkowski Reviewed-by: Simon Horman Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 2 +- drivers/net/ethernet/intel/ice/ice_ethtool.c | 2 +- drivers/net/ethernet/intel/ice/ice_lib.c | 48 ++++++++++++++--- drivers/net/ethernet/intel/ice/ice_lib.h | 3 +- drivers/net/ethernet/intel/ice/ice_sriov.c | 4 ++ drivers/net/ethernet/intel/ice/ice_vf_lib.c | 26 +++++++++ drivers/net/ethernet/intel/ice/ice_vf_lib.h | 8 +++ drivers/net/ethernet/intel/ice/ice_virtchnl.c | 53 +++++++++++++++++-- 8 files changed, 134 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index a7c510832824..67988c7ab08e 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -846,7 +846,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) goto dcb_init_err; } - ice_cfg_sw_lldp(pf_vsi, false, true); + ice_cfg_sw_rx_lldp(pf, true); pf->dcbx_cap = ice_dcb_get_mode(port_info, true); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 7c2dc347e4e5..648815170477 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1818,7 +1818,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) /* Remove rule to direct LLDP packets to default VSI. * The FW LLDP engine will now be consuming them. */ - ice_cfg_sw_lldp(vsi, false, false); + ice_cfg_sw_rx_lldp(vsi->back, false); /* AQ command to start FW LLDP agent will return an * error if the agent is already started diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 542b76b5707a..03bb16191237 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2065,12 +2065,15 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) } /** - * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling + * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling * @vsi: the VSI being configured * @tx: bool to determine Tx or Rx rule * @create: bool to determine create or remove Rule + * + * Adding an ethtype Tx rule to the uplink VSI results in it being applied + * to the whole port, so LLDP transmission for VFs will be blocked too. */ -void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) +void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) { int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, enum ice_sw_fwd_act_type act); @@ -2109,6 +2112,37 @@ report: vsi->vsi_num, status); } +/** + * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP + * @pf: the PF being configured + * @enable: enable or disable + * + * Configure switch rules to enable/disable LLDP handling by software + * across PF. + */ +void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable) +{ + struct ice_vsi *vsi; + struct ice_vf *vf; + unsigned int bkt; + + vsi = ice_get_main_vsi(pf); + ice_vsi_cfg_sw_lldp(vsi, false, enable); + + if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) + return; + + ice_for_each_vf(pf, bkt, vf) { + vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + continue; + + if (ice_vf_is_lldp_ena(vf)) + ice_vsi_cfg_sw_lldp(vsi, false, enable); + } +} + /** * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it * @vsi: pointer to the VSI @@ -2537,7 +2571,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, ICE_DROP_PACKET); - ice_cfg_sw_lldp(vsi, true, true); + ice_vsi_cfg_sw_lldp(vsi, true, true); } if (!vsi->agg_node) @@ -2834,9 +2868,11 @@ int ice_vsi_release(struct ice_vsi *vsi) /* The Rx rule will only exist to remove if the LLDP FW * engine is currently stopped */ - if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && - !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) - ice_cfg_sw_lldp(vsi, false, false); + if (!ice_is_safe_mode(pf) && + !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) && + (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF && + ice_vf_is_lldp_ena(vsi->vf)))) + ice_vsi_cfg_sw_lldp(vsi, false, false); ice_vsi_decfg(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index b4c9cb28a016..654516c5fc3e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -29,7 +29,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); -void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); +void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); +void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable); int ice_set_link(struct ice_vsi *vsi, bool ena); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index f1648cf103b7..0e4dc1a5cff0 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -63,6 +63,7 @@ static void ice_free_vf_res(struct ice_vf *vf) if (vf->lan_vsi_idx != ICE_NO_VSI) { ice_vf_vsi_release(vf); vf->num_mac = 0; + vf->num_mac_lldp = 0; } last_vector_idx = vf->first_vector_idx + vf->num_msix - 1; @@ -1402,6 +1403,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) mutex_lock(&vf->cfg_lock); + while (!trusted && vf->num_mac_lldp) + ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false); + vf->trusted = trusted; ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 815ad0bfe832..48cd533e93b7 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -226,6 +226,7 @@ static void ice_vf_clear_counters(struct ice_vf *vf) vsi->num_vlan = 0; vf->num_mac = 0; + vf->num_mac_lldp = 0; memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); } @@ -1401,3 +1402,28 @@ struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) rcu_read_unlock(); return ctrl_vsi; } + +/** + * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses + * @vf: a VF to add the address to + * @vsi: the corresponding VSI + * @incr: is the rule added or removed + */ +void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi, + bool incr) +{ + bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags); + bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw; + bool is_ena; + + if (WARN_ON(!vsi)) { + vf->num_mac_lldp = 0; + return; + } + + vf->num_mac_lldp += incr ? 1 : -1; + is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw; + + if (was_ena != is_ena) + ice_vsi_cfg_sw_lldp(vsi, false, is_ena); +} diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 799b2c1f1184..f4c9ca1f51ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -134,6 +134,7 @@ struct ice_vf { unsigned long vf_caps; /* VF's adv. capabilities */ u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; + u16 num_mac_lldp; u16 num_vf_qs; /* num of queue configured per VF */ u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */ #define ICE_INNER_VLAN_STRIP_ENA BIT(0) @@ -180,6 +181,11 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) return vf->port_vlan_info.tpid; } +static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf) +{ + return vf->num_mac_lldp && vf->trusted; +} + /* VF Hash Table access functions * * These functions provide abstraction for interacting with the VF hash table. @@ -245,6 +251,8 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m); int ice_reset_vf(struct ice_vf *vf, u32 flags); void ice_reset_all_vfs(struct ice_pf *pf); struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi); +void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi, + bool incr); #else /* CONFIG_PCI_IOV */ static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) { diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 7c3006eb68dd..b147f6cf2615 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -2265,6 +2265,51 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) } } +/** + * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address + * @mac: address to check + * + * Return: true if the address is one of the three possible LLDP multicast + * addresses, false otherwise. + */ +static bool ice_is_mc_lldp_eth_addr(const u8 *mac) +{ + const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00}; + + if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base))) + return false; + + return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00); +} + +/** + * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC + * @vf: a VF to add the address to + * @mac: address to check + * + * Return: true if the VF is allowed to add such MAC address, false otherwise. + */ +static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + + if (is_unicast_ether_addr(mac) && + !ice_can_vf_change_mac((struct ice_vf *)vf)) { + dev_err(dev, + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + return false; + } + + if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) { + dev_warn(dev, + "An untrusted VF %u is attempting to configure an LLDP multicast address\n", + vf->vf_id); + return false; + } + + return true; +} + /** * ice_vc_add_mac_addr - attempt to add the MAC address passed in * @vf: pointer to the VF info @@ -2283,10 +2328,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, if (ether_addr_equal(mac_addr, vf->dev_lan_addr)) return 0; - if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) { - dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + if (!ice_vc_can_add_mac(vf, mac_addr)) return -EPERM; - } ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); if (ret == -EEXIST) { @@ -2301,6 +2344,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, return ret; } else { vf->num_mac++; + if (ice_is_mc_lldp_eth_addr(mac_addr)) + ice_vf_update_mac_lldp_num(vf, vsi, true); } ice_vfhw_mac_add(vf, vc_ether_addr); @@ -2395,6 +2440,8 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, ice_vfhw_mac_del(vf, vc_ether_addr); vf->num_mac--; + if (ice_is_mc_lldp_eth_addr(mac_addr)) + ice_vf_update_mac_lldp_num(vf, vsi, false); return 0; } -- 2.51.0 From 5787179c5183c99d9d2d49818186d43c49d6cc04 Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Fri, 14 Feb 2025 09:50:38 +0100 Subject: [PATCH 07/16] ice: remove headers argument from ice_tc_count_lkups Remove the headers argument from the ice_tc_count_lkups() function, because it is not used anywhere. Reviewed-by: Michal Swiatkowski Signed-off-by: Larysa Zaremba Reviewed-by: Simon Horman Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_tc_lib.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index ea39b999a0d0..d8d28d74222a 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -12,14 +12,11 @@ /** * ice_tc_count_lkups - determine lookup count for switch filter * @flags: TC-flower flags - * @headers: Pointer to TC flower filter header structure * @fltr: Pointer to outer TC filter structure * - * Determine lookup count based on TC flower input for switch filter. + * Return: lookup count based on TC flower input for a switch filter. */ -static int -ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, - struct ice_tc_flower_fltr *fltr) +static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr) { int lkups_cnt = 1; /* 0th lookup is metadata */ @@ -770,7 +767,6 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, static int ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) { - struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct ice_adv_rule_info rule_info = { 0 }; struct ice_rule_query_data rule_added; struct ice_hw *hw = &vsi->back->hw; @@ -785,7 +781,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) return -EOPNOTSUPP; } - lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); + lkups_cnt = ice_tc_count_lkups(flags, fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) return -ENOMEM; @@ -985,7 +981,6 @@ static int ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) { - struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; struct ice_adv_rule_info rule_info = {0}; struct ice_rule_query_data rule_added; struct ice_adv_lkup_elem *list; @@ -1021,7 +1016,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, return PTR_ERR(dest_vsi); } - lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); + lkups_cnt = ice_tc_count_lkups(flags, tc_fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) return -ENOMEM; -- 2.51.0 From 40f42dc1cbb6b5841eb350fc81a5c26f9aa5b420 Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Fri, 14 Feb 2025 09:50:39 +0100 Subject: [PATCH 08/16] ice: support egress drop rules on PF tc clsact qdisc allows us to add offloaded egress rules with commands such as the following one: tc filter add dev egress protocol lldp flower skip_sw action drop Support the egress rule drop action when added to PF, with a few caveats: * in switchdev mode, all PF traffic has to go uplink with an exception for LLDP that can be delegated to a single VSI at a time * in legacy mode, we cannot delegate LLDP functionality to another VSI, so such packets from PF should not be blocked. Also, simplify the rule direction logic, it was previously derived from actions, but actually can be inherited from the tc block (and flipped in case of port representors). Reviewed-by: Michal Swiatkowski Signed-off-by: Larysa Zaremba Reviewed-by: Simon Horman Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_eswitch.c | 4 + drivers/net/ethernet/intel/ice/ice_main.c | 63 ++++++++++++-- drivers/net/ethernet/intel/ice/ice_repr.c | 3 +- drivers/net/ethernet/intel/ice/ice_tc_lib.c | 86 +++++++++++++++----- drivers/net/ethernet/intel/ice/ice_tc_lib.h | 9 +- drivers/net/ethernet/intel/ice/ice_txrx.c | 17 ++-- 6 files changed, 138 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index ed21d7f55ac1..206a7d839aef 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -245,6 +245,10 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, u64 cd_cmd, dst_vsi; if (!dst) { + struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); + + if (unlikely(eth->h_proto == htons(ETH_P_LLDP))) + return; cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); } else { diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index d390157b59fe..1fbe13ee93a8 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -8330,11 +8330,16 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) * @np: net device to configure * @filter_dev: device on which filter is added * @cls_flower: offload data + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was successfully added or deleted, + * negative error code otherwise. */ static int ice_setup_tc_cls_flower(struct ice_netdev_priv *np, struct net_device *filter_dev, - struct flow_cls_offload *cls_flower) + struct flow_cls_offload *cls_flower, + bool ingress) { struct ice_vsi *vsi = np->vsi; @@ -8343,7 +8348,7 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np, switch (cls_flower->command) { case FLOW_CLS_REPLACE: - return ice_add_cls_flower(filter_dev, vsi, cls_flower); + return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress); case FLOW_CLS_DESTROY: return ice_del_cls_flower(vsi, cls_flower); default: @@ -8352,20 +8357,46 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np, } /** - * ice_setup_tc_block_cb - callback handler registered for TC block + * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block * @type: TC SETUP type * @type_data: TC flower offload data that contains user input * @cb_priv: netdev private data + * + * Return: 0 if the setup was successful, negative error code otherwise. */ static int -ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data, + void *cb_priv) { struct ice_netdev_priv *np = cb_priv; switch (type) { case TC_SETUP_CLSFLOWER: return ice_setup_tc_cls_flower(np, np->vsi->netdev, - type_data); + type_data, true); + default: + return -EOPNOTSUPP; + } +} + +/** + * ice_setup_tc_block_cb_egress - callback handler for egress TC block + * @type: TC SETUP type + * @type_data: TC flower offload data that contains user input + * @cb_priv: netdev private data + * + * Return: 0 if the setup was successful, negative error code otherwise. + */ +static int +ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct ice_netdev_priv *np = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return ice_setup_tc_cls_flower(np, np->vsi->netdev, + type_data, false); default: return -EOPNOTSUPP; } @@ -9310,16 +9341,32 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { struct ice_netdev_priv *np = netdev_priv(netdev); + enum flow_block_binder_type binder_type; struct ice_pf *pf = np->vsi->back; + flow_setup_cb_t *flower_handler; bool locked = false; int err; switch (type) { case TC_SETUP_BLOCK: + binder_type = + ((struct flow_block_offload *)type_data)->binder_type; + + switch (binder_type) { + case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: + flower_handler = ice_setup_tc_block_cb_ingress; + break; + case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: + flower_handler = ice_setup_tc_block_cb_egress; + break; + default: + return -EOPNOTSUPP; + } + return flow_block_cb_setup_simple(type_data, &ice_block_cb_list, - ice_setup_tc_block_cb, - np, np, true); + flower_handler, + np, np, false); case TC_SETUP_QDISC_MQPRIO: if (ice_is_eswitch_mode_switchdev(pf)) { netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); @@ -9380,7 +9427,7 @@ ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, case TC_SETUP_CLSFLOWER: return ice_setup_tc_cls_flower(np, priv->netdev, (struct flow_cls_offload *) - type_data); + type_data, false); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index fb7a1b9a4313..f81bf60f8365 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -219,7 +219,8 @@ ice_repr_setup_tc_cls_flower(struct ice_repr *repr, { switch (flower->command) { case FLOW_CLS_REPLACE: - return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower); + return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower, + true); case FLOW_CLS_DESTROY: return ice_del_cls_flower(repr->src_vsi, flower); default: diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index d8d28d74222a..229cd29ff92a 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -681,26 +681,26 @@ static int ice_tc_setup_action(struct net_device *filter_dev, fltr->action.fltr_act = action; if (ice_is_port_repr_netdev(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { + ice_is_port_repr_netdev(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { repr = ice_netdev_to_repr(target_dev); fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; } else if (ice_is_port_repr_netdev(filter_dev) && - ice_tc_is_dev_uplink(target_dev)) { + ice_tc_is_dev_uplink(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { repr = ice_netdev_to_repr(filter_dev); fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; } else if (ice_tc_is_dev_uplink(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { + ice_is_port_repr_netdev(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { repr = ice_netdev_to_repr(target_dev); fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; } else { NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported netdevice in switchdev mode"); + "The action is not supported for this netdevice"); return -EINVAL; } @@ -713,13 +713,11 @@ ice_tc_setup_drop_action(struct net_device *filter_dev, { fltr->action.fltr_act = ICE_DROP_PACKET; - if (ice_is_port_repr_netdev(filter_dev)) { - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else if (ice_tc_is_dev_uplink(filter_dev)) { - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else { + if (!ice_tc_is_dev_uplink(filter_dev) && + !(ice_is_port_repr_netdev(filter_dev) && + fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) { NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported netdevice in switchdev mode"); + "The action is not supported for this netdevice"); return -EINVAL; } @@ -809,6 +807,11 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_info.sw_act.flag |= ICE_FLTR_RX; rule_info.sw_act.src = hw->pf_id; rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; + } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) { + /* PF to Uplink */ + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) { /* VF to Uplink */ @@ -1051,8 +1054,13 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->action.fwd.q.hw_queue, lkups_cnt); break; case ICE_DROP_PACKET: - rule_info.sw_act.flag |= ICE_FLTR_RX; - rule_info.sw_act.src = hw->pf_id; + if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + } else { + rule_info.sw_act.flag |= ICE_FLTR_RX; + rule_info.sw_act.src = hw->pf_id; + } rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; break; default: @@ -1458,11 +1466,16 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, * @filter_dev: Pointer to device on which filter is being added * @f: Pointer to struct flow_cls_offload * @fltr: Pointer to filter structure + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was parsed successfully, -EINVAL if the flower + * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured + * for the given VSI. */ static int ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, struct flow_cls_offload *f, - struct ice_tc_flower_fltr *fltr) + struct ice_tc_flower_fltr *fltr, bool ingress) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct flow_rule *rule = flow_cls_offload_flow_rule(f); @@ -1546,6 +1559,20 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; } + if (!ingress) { + bool switchdev = + ice_is_eswitch_mode_switchdev(vsi->back); + + if (switchdev != (n_proto_key == ETH_P_LLDP)) { + NL_SET_ERR_MSG_FMT_MOD(fltr->extack, + "%sLLDP filtering is not supported on egress in %s mode", + switchdev ? "Non-" : "", + switchdev ? "switchdev" : + "legacy"); + return -EOPNOTSUPP; + } + } + headers->l2_key.n_proto = cpu_to_be16(n_proto_key); headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); headers->l3_key.ip_proto = match.key->ip_proto; @@ -1721,6 +1748,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, return -EINVAL; } } + + /* Ingress filter on representor results in an egress filter in HW + * and vice versa + */ + ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress; + fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS : + ICE_ESWITCH_FLTR_EGRESS; + return 0; } @@ -1970,14 +2005,18 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) * @vsi: Pointer to VSI * @f: Pointer to flower offload structure * @__fltr: Pointer to struct ice_tc_flower_fltr + * @ingress: if the rule is added to an ingress block * * This function parses TC-flower input fields, parses action, * and adds a filter. + * + * Return: 0 if the filter was successfully added, + * negative error code otherwise. */ static int ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, struct flow_cls_offload *f, - struct ice_tc_flower_fltr **__fltr) + struct ice_tc_flower_fltr **__fltr, bool ingress) { struct ice_tc_flower_fltr *fltr; int err; @@ -1994,7 +2033,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, fltr->src_vsi = vsi; INIT_HLIST_NODE(&fltr->tc_flower_node); - err = ice_parse_cls_flower(netdev, vsi, f, fltr); + err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress); if (err < 0) goto err; @@ -2037,10 +2076,13 @@ ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie) * @netdev: Pointer to filter device * @vsi: Pointer to VSI * @cls_flower: Pointer to flower offload structure + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was successfully added, + * negative error code otherwise. */ -int -ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, - struct flow_cls_offload *cls_flower) +int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, bool ingress) { struct netlink_ext_ack *extack = cls_flower->common.extack; struct net_device *vsi_netdev = vsi->netdev; @@ -2075,7 +2117,7 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, } /* prep and add TC-flower filter in HW */ - err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr); + err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress); if (err) return err; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index d84f153517ec..df9f90f793b9 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -211,11 +211,10 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf) } struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue); -int -ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, - struct flow_cls_offload *cls_flower); -int -ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); +int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, bool ingress); +int ice_del_cls_flower(struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower); void ice_replay_tc_fltrs(struct ice_pf *pf); bool ice_is_tunnel_supported(struct net_device *dev); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 1e4f6f6ee449..0e5107fe62ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2440,19 +2440,20 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ eth = (struct ethhdr *)skb_mac_header(skb); - if (unlikely((skb->priority == TC_PRIO_CONTROL || - eth->h_proto == htons(ETH_P_LLDP)) && - vsi->type == ICE_VSI_PF && - vsi->port_info->qos_cfg.is_sw_lldp)) - offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | - ICE_TX_CTX_DESC_SWTCH_UPLINK << - ICE_TXD_CTX_QW1_CMD_S); - ice_tstamp(tx_ring, skb, first, &offload); if ((ice_is_switchdev_running(vsi->back) || ice_lag_is_switchdev_running(vsi->back)) && vsi->type != ICE_VSI_SF) ice_eswitch_set_target_vsi(skb, &offload); + else if (unlikely((skb->priority == TC_PRIO_CONTROL || + eth->h_proto == htons(ETH_P_LLDP)) && + vsi->type == ICE_VSI_PF && + vsi->port_info->qos_cfg.is_sw_lldp)) + offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | + ICE_TX_CTX_DESC_SWTCH_UPLINK << + ICE_TXD_CTX_QW1_CMD_S); + + ice_tstamp(tx_ring, skb, first, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { struct ice_tx_ctx_desc *cdesc; -- 2.51.0 From 517f7a08ca5fa82dc1d9e4c153f2dc93dc61a20f Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Fri, 14 Feb 2025 09:50:40 +0100 Subject: [PATCH 09/16] ice: enable LLDP TX for VFs through tc Only a single VSI can be in charge of sending LLDP frames, sometimes it is beneficial to assign this function to a VF, that is possible to do with tc capabilities in the switchdev mode. It requires first blocking the PF from sending the LLDP frames with a following command: tc filter add dev egress protocol lldp flower skip_sw action drop Then it becomes possible to configure a forward rule from a VF port representor to uplink instead. tc filter add dev ingress protocol lldp flower skip_sw action mirred egress redirect dev How LLDP exclusivity was done previously is LLDP traffic was blocked for a whole port by a single rule and PF was bypassing that. Now at least in the switchdev mode, every separate VSI has to have its own drop rule. Another complication is the fact that tc does not respect when the driver refuses to delete a rule, so returning an error results in a HW rule still present with no way to reference it through tc. This is addressed by allowing the PF rule to be deleted at any time, but making the VF forward rule "dormant" in such case, this means it is deleted from HW but stays in tc and driver's bookkeeping to be restored when drop rule is added back to the PF. Implement tc configuration handling which enables the user to transmit LLDP packets from VF instead of PF. Reviewed-by: Michal Swiatkowski Signed-off-by: Larysa Zaremba Reviewed-by: Simon Horman Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_eswitch.c | 2 + drivers/net/ethernet/intel/ice/ice_repr.c | 7 + drivers/net/ethernet/intel/ice/ice_tc_lib.c | 160 +++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_tc_lib.h | 2 + drivers/net/ethernet/intel/ice/ice_vf_lib.h | 4 + 5 files changed, 175 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 206a7d839aef..6aae03771746 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -29,6 +29,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) return -ENODEV; ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); + ice_vsi_cfg_sw_lldp(uplink_vsi, true, false); netif_addr_lock_bh(netdev); __dev_uc_unsync(netdev, NULL); @@ -282,6 +283,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf) ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); + ice_vsi_cfg_sw_lldp(uplink_vsi, true, true); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index f81bf60f8365..cb08746556a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -337,6 +337,7 @@ void ice_repr_destroy(struct ice_repr *repr) static void ice_repr_rem_vf(struct ice_repr *repr) { ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac); + ice_pass_vf_tx_lldp(repr->src_vsi, true); unregister_netdev(repr->netdev); ice_devlink_destroy_vf_port(repr->vf); ice_virtchnl_set_dflt_ops(repr->vf); @@ -418,6 +419,10 @@ static int ice_repr_add_vf(struct ice_repr *repr) if (err) goto err_netdev; + err = ice_drop_vf_tx_lldp(repr->src_vsi, true); + if (err) + goto err_drop_lldp; + err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac); if (err) goto err_cfg_vsi; @@ -430,6 +435,8 @@ static int ice_repr_add_vf(struct ice_repr *repr) return 0; err_cfg_vsi: + ice_pass_vf_tx_lldp(repr->src_vsi, true); +err_drop_lldp: unregister_netdev(repr->netdev); err_netdev: ice_devlink_destroy_vf_port(vf); diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 229cd29ff92a..49b87071b7dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -762,6 +762,154 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, return 0; } +static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr) +{ + return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP); +} + +static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr) +{ + struct ice_vsi *vsi = fltr->src_vsi, *uplink; + + if (!ice_is_switchdev_running(vsi->back)) + return false; + + uplink = vsi->back->eswitch.uplink_vsi; + return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET && + ice_is_fltr_lldp(fltr) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID; +} + +static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr) +{ + struct ice_vsi *vsi = fltr->src_vsi, *uplink; + + uplink = vsi->back->eswitch.uplink_vsi; + return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->dest_vsi == uplink; +} + +static struct ice_tc_flower_fltr * +ice_find_pf_tx_lldp_fltr(struct ice_pf *pf) +{ + struct ice_tc_flower_fltr *fltr; + + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) + if (ice_is_fltr_pf_tx_lldp(fltr)) + return fltr; + + return NULL; +} + +static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + + ice_for_each_vf(pf, bkt, vf) + if (vf->lldp_tx_ena) + return true; + + return false; +} + +int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit) +{ + struct ice_rule_query_data remove_entry = { + .rid = vsi->vf->lldp_recipe_id, + .rule_id = vsi->vf->lldp_rule_id, + .vsi_handle = vsi->idx, + }; + struct ice_pf *pf = vsi->back; + int err; + + if (vsi->vf->lldp_tx_ena) + return 0; + + if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back)) + return -EINVAL; + + if (!deinit && ice_any_vf_lldp_tx_ena(pf)) + return -EINVAL; + + err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry); + if (!err) + vsi->vf->lldp_tx_ena = true; + + return err; +} + +int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init) +{ + struct ice_rule_query_data rule_added; + struct ice_adv_rule_info rinfo = { + .priority = 7, + .src_vsi = vsi->idx, + .sw_act = { + .src = vsi->idx, + .flag = ICE_FLTR_TX, + .fltr_act = ICE_DROP_PACKET, + .vsi_handle = vsi->idx, + }, + .flags_info.act_valid = true, + }; + struct ice_adv_lkup_elem list[3]; + struct ice_pf *pf = vsi->back; + int err; + + if (!init && !vsi->vf->lldp_tx_ena) + return 0; + + memset(list, 0, sizeof(list)); + ice_rule_add_direction_metadata(&list[0]); + ice_rule_add_src_vsi_metadata(&list[1]); + list[2].type = ICE_ETYPE_OL; + list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP); + list[2].m_u.ethertype.ethtype_id = htons(0xFFFF); + + err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo, + &rule_added); + if (err) { + dev_err(&pf->pdev->dev, + "Failed to add an LLDP rule to VSI 0x%X: %d\n", + vsi->idx, err); + } else { + vsi->vf->lldp_recipe_id = rule_added.rid; + vsi->vf->lldp_rule_id = rule_added.rule_id; + vsi->vf->lldp_tx_ena = false; + } + + return err; +} + +static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi) +{ + struct ice_tc_flower_fltr *fltr; + struct ice_pf *pf = vsi->back; + + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) { + if (!ice_is_fltr_vf_tx_lldp(fltr)) + continue; + ice_pass_vf_tx_lldp(fltr->src_vsi, true); + break; + } +} + +static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf) +{ + int i; + + /* Make the VF LLDP fwd to uplink rule dormant */ + ice_for_each_vsi(pf, i) { + struct ice_vsi *vf_vsi = pf->vsi[i]; + + if (vf_vsi && vf_vsi->type == ICE_VSI_VF) + ice_drop_vf_tx_lldp(vf_vsi, false); + } +} + static int ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) { @@ -779,6 +927,9 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) return -EOPNOTSUPP; } + if (ice_is_fltr_vf_tx_lldp(fltr)) + return ice_pass_vf_tx_lldp(vsi, false); + lkups_cnt = ice_tc_count_lkups(flags, fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) @@ -850,6 +1001,9 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) goto exit; } + if (ice_is_fltr_pf_tx_lldp(fltr)) + ice_handle_add_pf_lldp_drop_rule(vsi); + /* store the output params, which are needed later for removing * advanced switch filter */ @@ -1969,6 +2123,12 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) struct ice_pf *pf = vsi->back; int err; + if (ice_is_fltr_pf_tx_lldp(fltr)) + ice_handle_del_pf_lldp_drop_rule(pf); + + if (ice_is_fltr_vf_tx_lldp(fltr)) + return ice_drop_vf_tx_lldp(vsi, false); + rule_rem.rid = fltr->rid; rule_rem.rule_id = fltr->rule_id; rule_rem.vsi_handle = fltr->dest_vsi_handle; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index df9f90f793b9..8a3ab2f22af9 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -217,6 +217,8 @@ int ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); void ice_replay_tc_fltrs(struct ice_pf *pf); bool ice_is_tunnel_supported(struct net_device *dev); +int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init); +int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit); static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act) { diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index f4c9ca1f51ce..482f4285fd35 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -124,6 +124,7 @@ struct ice_vf { u8 spoofchk:1; u8 link_forced:1; u8 link_up:1; /* only valid if VF link is forced */ + u8 lldp_tx_ena:1; u32 ptp_caps; @@ -150,6 +151,9 @@ struct ice_vf { /* devlink port data */ struct devlink_port devlink_port; + u16 lldp_recipe_id; + u16 lldp_rule_id; + u16 num_msix; /* num of MSI-X configured on this VF */ struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF]; }; -- 2.51.0 From 1e05c5a05d0d91085f2c0cca8a038ee5251a99cc Mon Sep 17 00:00:00 2001 From: Karol Kolacinski Date: Thu, 20 Mar 2025 14:15:36 +0100 Subject: [PATCH 10/16] ice: remove SW side band access workaround for E825 Due to the bug in FW/NVM autoload mechanism (wrong default SB_REM_DEV_CTL register settings), the access to peer PHY and CGU clients was disabled by default. As the workaround solution, the register value was overwritten by the driver at the probe or reset handling. Remove workaround as it's not needed anymore. The fix in autoload procedure has been provided with NVM 3.80 version. NOTE: at the time the fix was provided in NVM, the E825C product was not officially available on the market, so it's not expected this change will cause regression when running with older driver/kernel versions. Reviewed-by: Michal Swiatkowski Reviewed-by: Przemek Kitszel Signed-off-by: Karol Kolacinski Signed-off-by: Grzegorz Nitka Reviewed-by: Simon Horman Tested-by: Rinitha S (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 23 --------------------- 1 file changed, 23 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 89bb8461284a..a5df081ffc19 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -2630,25 +2630,6 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port) return 0; } -/** - * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access - * @hw: pointer to HW struct - * @enable: Enable or disable access - * - * Enable sideband devices (PHY and others) access. - */ -static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable) -{ - u32 val = rd32(hw, PF_SB_REM_DEV_CTL); - - if (enable) - val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1); - else - val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1)); - - wr32(hw, PF_SB_REM_DEV_CTL, val); -} - /** * ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization * @hw: pointer to HW struct @@ -2659,8 +2640,6 @@ static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable) */ static int ice_ptp_init_phc_e825(struct ice_hw *hw) { - ice_sb_access_ena_eth56g(hw, true); - /* Initialize the Clock Generation Unit */ return ice_init_cgu_e82x(hw); } @@ -2747,8 +2726,6 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw) params->num_phys = 2; ptp->ports_per_phy = 4; ptp->num_lports = params->num_phys * ptp->ports_per_phy; - - ice_sb_access_ena_eth56g(hw, true); } /* E822 family functions -- 2.51.0 From 1fd9c91f7e8f2e0d91043e09f3b61c6783d75e5c Mon Sep 17 00:00:00 2001 From: Karol Kolacinski Date: Thu, 20 Mar 2025 14:15:37 +0100 Subject: [PATCH 11/16] ice: refactor ice_sbq_msg_dev enum Rename ice_sbq_msg_dev to ice_sbq_dev_id to reflect the meaning of this type more precisely. This enum type describes RDA (Remote Device Access) client ids, accessible over SB (Side Band) interface. Rename enum elements to make a driver namespace more cleaner and consistent with other definitions within SB Remove unused 'rmn_x' entries, specific to unsupported E824 device. Adjust clients '2' and '13' names (phy_0 and phy_0_peer respectively) to be compliant with EAS doc. According to the specification, regardless of the complex entity (single or dual), when accessing its own ports, they're accessed always as 'phy_0' client. And referred as 'phy_0_peer' when handling ports connected to the other complex. Reviewed-by: Simon Horman Reviewed-by: Przemek Kitszel Signed-off-by: Karol Kolacinski Co-developed-by: Grzegorz Nitka Signed-off-by: Grzegorz Nitka Tested-by: Rinitha S (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 2 +- drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 20 ++++++++++---------- drivers/net/ethernet/intel/ice/ice_sbq_cmd.h | 11 ++++------- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index e725b785d093..bec6b3e6b4de 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -3434,7 +3434,7 @@ int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, msg.msg_addr_low = lower_16_bits(reg_offset); msg.msg_addr_high = receiver_id; msg.opcode = ice_sbq_msg_rd; - msg.dest_dev = rmn_0; + msg.dest_dev = ice_sbq_dev_phy_0; err = ice_sbq_rw_reg(hw, &msg, flag); if (err) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index a5df081ffc19..eb1893dd8979 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -240,7 +240,7 @@ static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) { struct ice_sbq_msg_input cgu_msg = { .opcode = ice_sbq_msg_rd, - .dest_dev = cgu, + .dest_dev = ice_sbq_dev_cgu, .msg_addr_low = addr }; int err; @@ -272,7 +272,7 @@ static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) { struct ice_sbq_msg_input cgu_msg = { .opcode = ice_sbq_msg_wr, - .dest_dev = cgu, + .dest_dev = ice_sbq_dev_cgu, .msg_addr_low = addr, .data = val }; @@ -919,16 +919,16 @@ static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay) * * Return: destination sideband queue PHY device. */ -static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw, - u8 port) +static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw, + u8 port) { /* On a single complex E825, PHY 0 is always destination device phy_0 * and PHY 1 is phy_0_peer. */ if (port >= hw->ptp.ports_per_phy) - return eth56g_phy_1; + return ice_sbq_dev_phy_0_peer; else - return eth56g_phy_0; + return ice_sbq_dev_phy_0; } /** @@ -2758,7 +2758,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw, msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port); } - msg->dest_dev = rmn_0; + msg->dest_dev = ice_sbq_dev_phy_0; } /** @@ -3081,7 +3081,7 @@ static int ice_fill_quad_msg_e82x(struct ice_hw *hw, if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports)) return -EINVAL; - msg->dest_dev = rmn_0; + msg->dest_dev = ice_sbq_dev_phy_0; if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy))) addr = Q_0_BASE + offset; @@ -4800,7 +4800,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) msg.msg_addr_low = lower_16_bits(addr); msg.msg_addr_high = upper_16_bits(addr); msg.opcode = ice_sbq_msg_rd; - msg.dest_dev = rmn_0; + msg.dest_dev = ice_sbq_dev_phy_0; err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { @@ -4830,7 +4830,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) msg.msg_addr_low = lower_16_bits(addr); msg.msg_addr_high = upper_16_bits(addr); msg.opcode = ice_sbq_msg_wr; - msg.dest_dev = rmn_0; + msg.dest_dev = ice_sbq_dev_phy_0; msg.data = val; err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h index 3b0054faf70c..183dd5457d6a 100644 --- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h @@ -46,13 +46,10 @@ struct ice_sbq_evt_desc { u8 data[24]; }; -enum ice_sbq_msg_dev { - eth56g_phy_0 = 0x02, - rmn_0 = 0x02, - rmn_1 = 0x03, - rmn_2 = 0x04, - cgu = 0x06, - eth56g_phy_1 = 0x0D, +enum ice_sbq_dev_id { + ice_sbq_dev_phy_0 = 0x02, + ice_sbq_dev_cgu = 0x06, + ice_sbq_dev_phy_0_peer = 0x0D, }; enum ice_sbq_msg_opcode { -- 2.51.0 From e2193f9f9ec989d7d3e5ff1fd96f71abc4426fc5 Mon Sep 17 00:00:00 2001 From: Karol Kolacinski Date: Thu, 20 Mar 2025 14:15:38 +0100 Subject: [PATCH 12/16] ice: enable timesync operation on 2xNAC E825 devices According to the E825C specification, SBQ address for ports on a single complex is device 2 for PHY 0 and device 13 for PHY1. For accessing ports on a dual complex E825C (so called 2xNAC mode), the driver should use destination device 2 (referred as phy_0) for the current complex PHY and device 13 (referred as phy_0_peer) for peer complex PHY. Differentiate SBQ destination device by checking if current PF port number is on the same PHY as target port number. Adjust 'ice_get_lane_number' function to provide unique port number for ports from PHY1 in 'dual' mode config (by adding fixed offset for PHY1 ports). Cache this value in ice_hw struct. Introduce ice_get_primary_hw wrapper to get access to timesync register not available from second NAC. Reviewed-by: Simon Horman Reviewed-by: Przemek Kitszel Signed-off-by: Karol Kolacinski Co-developed-by: Grzegorz Nitka Signed-off-by: Grzegorz Nitka Tested-by: Rinitha S (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 60 ++++++++++++++++++++- drivers/net/ethernet/intel/ice/ice_common.c | 6 ++- drivers/net/ethernet/intel/ice/ice_ptp.c | 49 ++++++++++++----- drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 39 +++++++++++--- drivers/net/ethernet/intel/ice/ice_ptp_hw.h | 5 -- drivers/net/ethernet/intel/ice/ice_type.h | 1 + 6 files changed, 134 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 2694951a0b1d..e42572ae7631 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -193,8 +193,6 @@ #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) -#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned) - enum ice_feature { ICE_F_DSCP, ICE_F_PHY_RCLK, @@ -1046,4 +1044,62 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) } extern const struct xdp_metadata_ops ice_xdp_md_ops; + +/** + * ice_is_dual - Check if given config is multi-NAC + * @hw: pointer to HW structure + * + * Return: true if the device is running in mutli-NAC (Network + * Acceleration Complex) configuration variant, false otherwise + * (always false for non-E825 devices). + */ +static inline bool ice_is_dual(struct ice_hw *hw) +{ + return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && + (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M); +} + +/** + * ice_is_primary - Check if given device belongs to the primary complex + * @hw: pointer to HW structure + * + * Check if given PF/HW is running on primary complex in multi-NAC + * configuration. + * + * Return: true if the device is dual, false otherwise (always true + * for non-E825 devices). + */ +static inline bool ice_is_primary(struct ice_hw *hw) +{ + return hw->mac_type != ICE_MAC_GENERIC_3K_E825 || + !ice_is_dual(hw) || + (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M); +} + +/** + * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF + * @pf: pointer to PF structure + * + * Return: true if PF owns primary timer, false otherwise. + */ +static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf) +{ + return pf->hw.func_caps.ts_func_info.src_tmr_owned && + ice_is_primary(&pf->hw); +} + +/** + * ice_get_primary_hw - Get pointer to primary ice_hw structure + * @pf: pointer to PF structure + * + * Return: A pointer to ice_hw structure with access to timesync + * register space. + */ +static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf) +{ + if (!pf->adapter->ctrl_pf) + return &pf->hw; + else + return &pf->adapter->ctrl_pf->hw; +} #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index bec6b3e6b4de..4fedf0181c4e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1135,6 +1135,8 @@ int ice_init_hw(struct ice_hw *hw) } } + hw->lane_num = ice_get_phy_lane_number(hw); + return 0; err_unroll_fltr_mgmt_struct: ice_cleanup_fltr_mgmt_struct(hw); @@ -4082,10 +4084,12 @@ int ice_get_phy_lane_number(struct ice_hw *hw) continue; if (hw->pf_id == lport) { + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && + ice_is_dual(hw) && !ice_is_primary(hw)) + lane += ICE_PORTS_PER_QUAD; kfree(options); return lane; } - lport++; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 1fd1ae03eb90..0ea5e52303f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -305,6 +305,9 @@ u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, u32 hi, lo, lo2; u8 tmr_idx; + if (!ice_is_primary(hw)) + hw = ice_get_primary_hw(pf); + tmr_idx = ice_get_ptp_src_clock_index(hw); guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); /* Read the system timestamp pre PHC read */ @@ -2985,6 +2988,32 @@ static void ice_ptp_periodic_work(struct kthread_work *work) msecs_to_jiffies(err ? 10 : 500)); } +/** + * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild + * @pf: Board private structure + * @rebuild: rebuild if true, prepare if false + * @reset_type: the reset type being performed + */ +static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild, + enum ice_reset_req reset_type) +{ + struct list_head *entry; + + list_for_each(entry, &pf->adapter->ports.ports) { + struct ice_ptp_port *port = list_entry(entry, + struct ice_ptp_port, + list_node); + struct ice_pf *peer_pf = ptp_port_to_pf(port); + + if (!ice_is_primary(&peer_pf->hw)) { + if (rebuild) + ice_ptp_rebuild(peer_pf, reset_type); + else + ice_ptp_prepare_for_reset(peer_pf, reset_type); + } + } +} + /** * ice_ptp_prepare_for_reset - Prepare PTP for reset * @pf: Board private structure @@ -2993,6 +3022,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work) void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) { struct ice_ptp *ptp = &pf->ptp; + struct ice_hw *hw = &pf->hw; u8 src_tmr; if (ptp->state != ICE_PTP_READY) @@ -3008,6 +3038,9 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) if (reset_type == ICE_RESET_PFR) return; + if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825) + ice_ptp_prepare_rebuild_sec(pf, false, reset_type); + ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); /* Disable periodic outputs */ @@ -3129,13 +3162,6 @@ err: dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); } -static bool ice_is_primary(struct ice_hw *hw) -{ - return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ? - !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : - true; -} - static int ice_ptp_setup_adapter(struct ice_pf *pf) { if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw)) @@ -3355,17 +3381,16 @@ void ice_ptp_init(struct ice_pf *pf) { struct ice_ptp *ptp = &pf->ptp; struct ice_hw *hw = &pf->hw; - int lane_num, err; + int err; ptp->state = ICE_PTP_INITIALIZING; - lane_num = ice_get_phy_lane_number(hw); - if (lane_num < 0) { - err = lane_num; + if (hw->lane_num < 0) { + err = hw->lane_num; goto err_exit; } + ptp->port.port_num = hw->lane_num; - ptp->port.port_num = (u8)lane_num; ice_ptp_init_hw(hw); ice_ptp_init_tx_interrupt_mode(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index eb1893dd8979..ccac84eb34c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -874,8 +874,12 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw, */ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd); + if (!ice_is_primary(hw)) + hw = ice_get_primary_hw(pf); + wr32(hw, GLTSYN_CMD, cmd_val); } @@ -891,6 +895,9 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) { struct ice_pf *pf = container_of(hw, struct ice_pf, hw); + if (!ice_is_primary(hw)) + hw = ice_get_primary_hw(pf); + guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); ice_flush(hw); @@ -922,10 +929,18 @@ static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay) static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw, u8 port) { - /* On a single complex E825, PHY 0 is always destination device phy_0 + u8 curr_phy, tgt_phy; + + tgt_phy = port >= hw->ptp.ports_per_phy; + curr_phy = hw->lane_num >= hw->ptp.ports_per_phy; + /* In the driver, lanes 4..7 are in fact 0..3 on a second PHY. + * On a single complex E825C, PHY 0 is always destination device phy_0 * and PHY 1 is phy_0_peer. + * On dual complex E825C, device phy_0 points to PHY on a current + * complex and phy_0_peer to PHY on a different complex. */ - if (port >= hw->ptp.ports_per_phy) + if ((!ice_is_dual(hw) && tgt_phy == 1) || + (ice_is_dual(hw) && tgt_phy != curr_phy)) return ice_sbq_dev_phy_0_peer; else return ice_sbq_dev_phy_0; @@ -2417,6 +2432,7 @@ int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold) static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port, u64 *phy_time, u64 *phc_time) { + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); u64 tx_time, rx_time; u32 zo, lo; u8 tmr_idx; @@ -2436,8 +2452,13 @@ static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port, ice_ptp_exec_tmr_cmd(hw); /* Read the captured PHC time from the shadow time registers */ - zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); - lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); + if (ice_is_primary(hw)) { + zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); + lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); + } else { + zo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_0(tmr_idx)); + lo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_L(tmr_idx)); + } *phc_time = (u64)lo << 32 | zo; /* Read the captured PHY time from the PHY shadow registers */ @@ -2574,6 +2595,7 @@ int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset) */ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port) { + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); u32 lo, hi; u64 incval; u8 tmr_idx; @@ -2599,8 +2621,13 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port) if (err) return err; - lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); - hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); + if (ice_is_primary(hw)) { + lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); + } else { + lo = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_H(tmr_idx)); + } incval = (u64)hi << 32 | lo; err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index e5925ccc2613..83f20fa7ace7 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -444,11 +444,6 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw) } } -static inline bool ice_is_dual(struct ice_hw *hw) -{ - return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M); -} - #define PFTSYN_SEM_BYTES 4 #define ICE_PTP_CLOCK_INDEX_0 0x00 diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 0aab21113cc4..ccf53cc6403e 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -970,6 +970,7 @@ struct ice_hw { u8 intrl_gran; struct ice_ptp_hw ptp; + s8 lane_num; /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; -- 2.51.0 From 6cb10c063d6c2b4ae659f8221f4dc70343185319 Mon Sep 17 00:00:00 2001 From: Martyna Szapar-Mudlaw Date: Fri, 14 Mar 2025 09:11:11 +0100 Subject: [PATCH 13/16] ice: improve error message for insufficient filter space When adding a rule to switch through tc, if the operation fails due to not enough free recipes (-ENOSPC), provide a clearer error message: "Unable to add filter: insufficient space available." This improves user feedback by distinguishing space limitations from other generic failures. Reviewed-by: Michal Swiatkowski Signed-off-by: Martyna Szapar-Mudlaw Reviewed-by: Simon Horman Tested-by: Sujai Buvaneswaran Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_tc_lib.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 49b87071b7dc..fb9ea7f8ef44 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -996,6 +996,9 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; + } else if (ret == -ENOSPC) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available."); + goto exit; } else if (ret) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); goto exit; @@ -1228,6 +1231,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; + } else if (ret == -ENOSPC) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, + "Unable to add filter: insufficient space available."); + goto exit; } else if (ret) { NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter due to error"); -- 2.51.0 From fee4a79a12240af90283034709518296544a9039 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 17 Mar 2025 17:20:24 +0000 Subject: [PATCH 14/16] ice: make const read-only array dflt_rules static Don't populate the const read-only array dflt_rules on the stack at run time, instead make it static. Signed-off-by: Colin Ian King Tested-by: Rinitha S (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 1d118171de37..aceec184e89b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1605,7 +1605,7 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf) */ int ice_fdir_create_dflt_rules(struct ice_pf *pf) { - const enum ice_fltr_ptype dflt_rules[] = { + static const enum ice_fltr_ptype dflt_rules[] = { ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP, ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP, }; -- 2.51.0 From 015bac5daca978448f2671478c553ce1f300c21e Mon Sep 17 00:00:00 2001 From: Kyungwook Boo Date: Tue, 11 Mar 2025 14:16:02 +0900 Subject: [PATCH 15/16] i40e: fix MMIO write access to an invalid page in i40e_clear_hw When the device sends a specific input, an integer underflow can occur, leading to MMIO write access to an invalid page. Prevent the integer underflow by changing the type of related variables. Signed-off-by: Kyungwook Boo Link: https://lore.kernel.org/lkml/ffc91764-1142-4ba2-91b6-8c773f6f7095@gmail.com/T/ Reviewed-by: Przemek Kitszel Reviewed-by: Simon Horman Reviewed-by: Aleksandr Loktionov Tested-by: Rinitha S (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e_common.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 370b4bddee44..b11c35e307ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -817,10 +817,11 @@ int i40e_pf_reset(struct i40e_hw *hw) void i40e_clear_hw(struct i40e_hw *hw) { u32 num_queues, base_queue; - u32 num_pf_int; - u32 num_vf_int; + s32 num_pf_int; + s32 num_vf_int; u32 num_vfs; - u32 i, j; + s32 i; + u32 j; u32 val; u32 eol = 0x7ff; -- 2.51.0 From cdcb3804eeda24d588348bbab6766cf14fddbeaa Mon Sep 17 00:00:00 2001 From: Rand Deeb Date: Thu, 6 Mar 2025 13:12:00 +0300 Subject: [PATCH 16/16] ixgbe: Fix unreachable retry logic in combined and byte I2C write functions The current implementation of `ixgbe_write_i2c_combined_generic_int` and `ixgbe_write_i2c_byte_generic_int` sets `max_retry` to `1`, which makes the condition `retry < max_retry` always evaluate to `false`. This renders the retry mechanism ineffective, as the debug message and retry logic are never executed. This patch increases `max_retry` to `3` in both functions, aligning them with the retry logic in `ixgbe_read_i2c_combined_generic_int`. This ensures that the retry mechanism functions as intended, improving robustness in case of I2C write failures. Found by Linux Verification Center (linuxtesting.org) with SVACE. Signed-off-by: Rand Deeb Tested-by: Rinitha S (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 0a03a8bb5f88..2d54828bdfbb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -167,7 +167,7 @@ int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 1; + int max_retry = 3; int retry = 0; u8 reg_high; u8 csum; @@ -2285,7 +2285,7 @@ static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - u32 max_retry = 1; + u32 max_retry = 3; u32 retry = 0; int status; -- 2.51.0