From 9ba8e351efd4d003a7fc22bc7455f0e95665cf44 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 9 May 2025 08:42:12 -0700 Subject: [PATCH 01/16] tools: ynl-gen: auto-indent else We auto-indent if statements (increase the indent of the subsequent line by 1), do the same thing for else branches without a block. There hasn't been any else branches before but we're about to add one. Reviewed-by: Donald Hunter Signed-off-by: Jakub Kicinski Link: https://patch.msgid.link/20250509154213.1747885-3-kuba@kernel.org Signed-off-by: Paolo Abeni --- tools/net/ynl/pyynl/ynl_gen_c.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/net/ynl/pyynl/ynl_gen_c.py b/tools/net/ynl/pyynl/ynl_gen_c.py index 4e2ae738c0aa..9a5c65966e9d 100755 --- a/tools/net/ynl/pyynl/ynl_gen_c.py +++ b/tools/net/ynl/pyynl/ynl_gen_c.py @@ -1458,6 +1458,7 @@ class CodeWriter: if self._silent_block: ind += 1 self._silent_block = line.endswith(')') and CodeWriter._is_cond(line) + self._silent_block |= line.strip() == 'else' if line[0] == '#': ind = 0 if add_ind: -- 2.51.0 From 25e37418c87249369fe546f2cb6af578c16b68b9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 9 May 2025 08:42:13 -0700 Subject: [PATCH 02/16] tools: ynl-gen: support struct for binary attributes Support using a struct pointer for binary attrs. Len field is maintained because the structs may grow with newer kernel versions. Or, which matters more, be shorter if the binary is built against newer uAPI than kernel against which it's executed. Since we are storing a pointer to a struct type - always allocate at least the amount of memory needed by the struct per current uAPI headers (unused mem is zeroed). Technically users should check the length field but per modern ASAN checks storing a short object under a pointer seems like a bad idea. Signed-off-by: Jakub Kicinski Reviewed-by: Donald Hunter Link: https://patch.msgid.link/20250509154213.1747885-4-kuba@kernel.org Signed-off-by: Paolo Abeni --- tools/net/ynl/pyynl/ynl_gen_c.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/tools/net/ynl/pyynl/ynl_gen_c.py b/tools/net/ynl/pyynl/ynl_gen_c.py index 9a5c65966e9d..3b064c61a374 100755 --- a/tools/net/ynl/pyynl/ynl_gen_c.py +++ b/tools/net/ynl/pyynl/ynl_gen_c.py @@ -566,6 +566,23 @@ class TypeBinary(Type): f'memcpy({member}, {self.c_name}, {presence});'] +class TypeBinaryStruct(TypeBinary): + def struct_member(self, ri): + ri.cw.p(f'struct {c_lower(self.get("struct"))} *{self.c_name};') + + def _attr_get(self, ri, var): + struct_sz = 'sizeof(struct ' + c_lower(self.get("struct")) + ')' + len_mem = var + '->_' + self.presence_type() + '.' + self.c_name + return [f"{len_mem} = len;", + f"if (len < {struct_sz})", + f"{var}->{self.c_name} = calloc(1, {struct_sz});", + "else", + f"{var}->{self.c_name} = malloc(len);", + f"memcpy({var}->{self.c_name}, ynl_attr_data(attr), len);"], \ + ['len = ynl_attr_data_len(attr);'], \ + ['unsigned int len;'] + + class TypeBinaryScalarArray(TypeBinary): def arg_member(self, ri): return [f'__{self.get("sub-type")} *{self.c_name}', 'size_t count'] @@ -1010,7 +1027,9 @@ class AttrSet(SpecAttrSet): elif elem['type'] == 'string': t = TypeString(self.family, self, elem, value) elif elem['type'] == 'binary': - if elem.get('sub-type') in scalars: + if 'struct' in elem: + t = TypeBinaryStruct(self.family, self, elem, value) + elif elem.get('sub-type') in scalars: t = TypeBinaryScalarArray(self.family, self, elem, value) else: t = TypeBinary(self.family, self, elem, value) -- 2.51.0 From 2d4407160f601876a88376b3789ce2f59df21f4b Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Fri, 9 May 2025 21:23:21 +0530 Subject: [PATCH 03/16] amd-xgbe: reorganize the code of XPCS access The xgbe_{read/write}_mmd_regs_v* functions have common code which can be moved to helper functions. Add new helper functions to calculate the mmd_address for v1/v2 of xpcs access. Signed-off-by: Raju Rangoju Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250509155325.720499-2-Raju.Rangoju@amd.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 65 +++++++++++------------- 1 file changed, 29 insertions(+), 36 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index d9c8f2af20ae..cdab1c24dd69 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1053,18 +1053,19 @@ static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) return 0; } -static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, - int mmd_reg) +static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata, + int mmd_reg) { - unsigned long flags; - unsigned int mmd_address, index, offset; - int mmd_data; - - if (mmd_reg & XGBE_ADDR_C45) - mmd_address = mmd_reg & ~XGBE_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + return (mmd_reg & XGBE_ADDR_C45) ? + mmd_reg & ~XGBE_ADDR_C45 : + (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); +} +static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata, + unsigned int mmd_address, + unsigned int *index, + unsigned int *offset) +{ /* The PCS registers are accessed using mmio. The underlying * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two @@ -1075,8 +1076,20 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, * offset 1 bit and reading 16 bits of data. */ mmd_address <<= 1; - index = mmd_address & ~pdata->xpcs_window_mask; - offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + *index = mmd_address & ~pdata->xpcs_window_mask; + *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); +} + +static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg) +{ + unsigned int mmd_address, index, offset; + unsigned long flags; + int mmd_data; + + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); + + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); @@ -1092,23 +1105,9 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, unsigned long flags; unsigned int mmd_address, index, offset; - if (mmd_reg & XGBE_ADDR_C45) - mmd_address = mmd_reg & ~XGBE_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); - /* The PCS registers are accessed using mmio. The underlying - * management interface uses indirect addressing to access the MMD - * register sets. This requires accessing of the PCS register in two - * phases, an address phase and a data phase. - * - * The mmio interface is based on 16-bit offsets and values. All - * register offsets must therefore be adjusted by left shifting the - * offset 1 bit and writing 16 bits of data. - */ - mmd_address <<= 1; - index = mmd_address & ~pdata->xpcs_window_mask; - offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); @@ -1123,10 +1122,7 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, unsigned int mmd_address; int mmd_data; - if (mmd_reg & XGBE_ADDR_C45) - mmd_address = mmd_reg & ~XGBE_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD @@ -1151,10 +1147,7 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, unsigned int mmd_address; unsigned long flags; - if (mmd_reg & XGBE_ADDR_C45) - mmd_address = mmd_reg & ~XGBE_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD -- 2.51.0 From bbbd7303ea1851e24365058d424163d5cbdbb678 Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Fri, 9 May 2025 21:23:22 +0530 Subject: [PATCH 04/16] amd-xgbe: reorganize the xgbe_pci_probe() code path Reorganize the xgbe_pci_probe() code path to convert if/else statements to switch case to help add future code. This helps code look cleaner. Signed-off-by: Raju Rangoju Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250509155325.720499-3-Raju.Rangoju@amd.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 35 ++++++++++++++---------- drivers/net/ethernet/amd/xgbe/xgbe.h | 4 +++ 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 3e9f31256dce..d36446e76d0a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -165,20 +165,27 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Set the PCS indirect addressing definition registers */ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); - if (rdev && - (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) { - pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; - pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; - } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) && - (rdev->device == 0x14b5)) { - pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF; - pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT; - - /* Yellow Carp devices do not need cdr workaround */ - pdata->vdata->an_cdr_workaround = 0; - - /* Yellow Carp devices do not need rrc */ - pdata->vdata->enable_rrc = 0; + if (rdev && rdev->vendor == PCI_VENDOR_ID_AMD) { + switch (rdev->device) { + case XGBE_RV_PCI_DEVICE_ID: + pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; + break; + case XGBE_YC_PCI_DEVICE_ID: + pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT; + + /* Yellow Carp devices do not need cdr workaround */ + pdata->vdata->an_cdr_workaround = 0; + + /* Yellow Carp devices do not need rrc */ + pdata->vdata->enable_rrc = 0; + break; + default: + pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; + break; + } } else { pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 8ac0f3a22fb6..37e5f8fad6b2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -238,6 +238,10 @@ (_src)->link_modes._sname, \ __ETHTOOL_LINK_MODE_MASK_NBITS) +/* XGBE PCI device id */ +#define XGBE_RV_PCI_DEVICE_ID 0x15d0 +#define XGBE_YC_PCI_DEVICE_ID 0x14b5 + struct xgbe_prv_data; struct xgbe_packet_data { -- 2.51.0 From e49479f30ef9b3576dbfd24c3d98f05567371dcd Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Fri, 9 May 2025 21:23:23 +0530 Subject: [PATCH 05/16] amd-xgbe: add support for new XPCS routines Add the necessary support to enable Renoir ethernet device. Since the BAR1 address cannot be used to access the XPCS registers on Renoir, use the smn functions. Some of the ethernet add-in-cards have dual PHY but share a single MDIO line (between the ports). In such cases, link inconsistencies are noticed during the heavy traffic and during reboot stress tests. Using smn calls helps avoid such race conditions. Suggested-by: Sudheesh Mavila Signed-off-by: Raju Rangoju Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250509155325.720499-4-Raju.Rangoju@amd.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 86 ++++++++++++++++++++++++ drivers/net/ethernet/amd/xgbe/xgbe-smn.h | 30 +++++++++ drivers/net/ethernet/amd/xgbe/xgbe.h | 6 ++ 3 files changed, 122 insertions(+) create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-smn.h diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index cdab1c24dd69..466b5f6e5578 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -11,9 +11,11 @@ #include #include #include +#include #include "xgbe.h" #include "xgbe-common.h" +#include "xgbe-smn.h" static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) { @@ -1080,6 +1082,84 @@ static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata, *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); } +static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg) +{ + unsigned int mmd_address, index, offset; + u32 smn_address; + int mmd_data; + int ret; + + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); + + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); + + smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg; + ret = amd_smn_write(0, smn_address, index); + if (ret) + return ret; + + ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data); + if (ret) + return ret; + + mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) : + FIELD_GET(XGBE_GEN_LO_MASK, mmd_data); + + return mmd_data; +} + +static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg, int mmd_data) +{ + unsigned int pci_mmd_data, hi_mask, lo_mask; + unsigned int mmd_address, index, offset; + struct pci_dev *dev; + u32 smn_address; + int ret; + + dev = pdata->pcidev; + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); + + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); + + smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg; + ret = amd_smn_write(0, smn_address, index); + if (ret) { + pci_err(dev, "Failed to write data 0x%x\n", index); + return; + } + + ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data); + if (ret) { + pci_err(dev, "Failed to read data\n"); + return; + } + + if (offset % 4) { + hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data); + lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data); + } else { + hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, + FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data)); + lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data); + } + + pci_mmd_data = hi_mask | lo_mask; + + ret = amd_smn_write(0, smn_address, index); + if (ret) { + pci_err(dev, "Failed to write data 0x%x\n", index); + return; + } + + ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data); + if (ret) { + pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data); + return; + } +} + static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { @@ -1174,6 +1254,9 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, case XGBE_XPCS_ACCESS_V2: default: return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); + + case XGBE_XPCS_ACCESS_V3: + return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg); } } @@ -1184,6 +1267,9 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, case XGBE_XPCS_ACCESS_V1: return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data); + case XGBE_XPCS_ACCESS_V3: + return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data); + case XGBE_XPCS_ACCESS_V2: default: return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-smn.h b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h new file mode 100644 index 000000000000..3fd03d39c18a --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */ +/* + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved + * + * Author: Raju Rangoju + */ + +#ifndef __SMN_H__ +#define __SMN_H__ + +#ifdef CONFIG_AMD_NB + +#include + +#else + +static inline int amd_smn_write(u16 node, u32 address, u32 value) +{ + return -ENODEV; +} + +static inline int amd_smn_read(u16 node, u32 address, u32 *value) +{ + return -ENODEV; +} + +#endif +#endif diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 37e5f8fad6b2..44ba6b02cdeb 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -242,6 +242,10 @@ #define XGBE_RV_PCI_DEVICE_ID 0x15d0 #define XGBE_YC_PCI_DEVICE_ID 0x14b5 + /* Generic low and high masks */ +#define XGBE_GEN_HI_MASK GENMASK(31, 16) +#define XGBE_GEN_LO_MASK GENMASK(15, 0) + struct xgbe_prv_data; struct xgbe_packet_data { @@ -460,6 +464,7 @@ enum xgbe_speed { enum xgbe_xpcs_access { XGBE_XPCS_ACCESS_V1 = 0, XGBE_XPCS_ACCESS_V2, + XGBE_XPCS_ACCESS_V3, }; enum xgbe_an_mode { @@ -955,6 +960,7 @@ struct xgbe_prv_data { struct device *dev; struct platform_device *phy_platdev; struct device *phy_dev; + unsigned int smn_base; /* Version related data */ struct xgbe_version_data *vdata; -- 2.51.0 From ab95bc9aa795aa987b16f6cc1192138e62036f99 Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Fri, 9 May 2025 21:23:24 +0530 Subject: [PATCH 06/16] amd-xgbe: Add XGBE_XPCS_ACCESS_V3 support to xgbe_pci_probe() A new version of XPCS access routines have been introduced, add the support to xgbe_pci_probe() to use these routines. Signed-off-by: Raju Rangoju Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250509155325.720499-5-Raju.Rangoju@amd.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 5 +++ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 34 ++++++++++++++++----- drivers/net/ethernet/amd/xgbe/xgbe.h | 1 + 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index e3d33f5b9642..e1296cbf4ff3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -791,6 +791,11 @@ #define PCS_V2_RV_WINDOW_SELECT 0x1064 #define PCS_V2_YC_WINDOW_DEF 0x18060 #define PCS_V2_YC_WINDOW_SELECT 0x18064 +#define PCS_V3_RN_WINDOW_DEF 0xf8078 +#define PCS_V3_RN_WINDOW_SELECT 0xf807c + +#define PCS_RN_SMN_BASE_ADDR 0x11e00000 +#define PCS_RN_PORT_ADDR_SIZE 0x100000 /* PCS register entry bit positions and sizes */ #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index d36446e76d0a..718534d30651 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -9,6 +9,7 @@ #include #include #include +#include "xgbe-smn.h" #include "xgbe.h" #include "xgbe-common.h" @@ -98,14 +99,14 @@ out: static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct xgbe_prv_data *pdata; - struct device *dev = &pdev->dev; void __iomem * const *iomap_table; - struct pci_dev *rdev; + unsigned int port_addr_size, reg; + struct device *dev = &pdev->dev; + struct xgbe_prv_data *pdata; unsigned int ma_lo, ma_hi; - unsigned int reg; - int bar_mask; - int ret; + struct pci_dev *rdev; + int bar_mask, ret; + u32 address; pdata = xgbe_alloc_pdata(dev); if (IS_ERR(pdata)) { @@ -181,6 +182,10 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Yellow Carp devices do not need rrc */ pdata->vdata->enable_rrc = 0; break; + case XGBE_RN_PCI_DEVICE_ID: + pdata->xpcs_window_def_reg = PCS_V3_RN_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V3_RN_WINDOW_SELECT; + break; default: pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; @@ -193,7 +198,22 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_dev_put(rdev); /* Configure the PCS indirect addressing support */ - reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); + if (pdata->vdata->xpcs_access == XGBE_XPCS_ACCESS_V3) { + reg = XP_IOREAD(pdata, XP_PROP_0); + port_addr_size = PCS_RN_PORT_ADDR_SIZE * + XP_GET_BITS(reg, XP_PROP_0, PORT_ID); + pdata->smn_base = PCS_RN_SMN_BASE_ADDR + port_addr_size; + + address = pdata->smn_base + (pdata->xpcs_window_def_reg); + ret = amd_smn_read(0, address, ®); + if (ret) { + pci_err(pdata->pcidev, "Failed to read data\n"); + goto err_pci_enable; + } + } else { + reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); + } + pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); pdata->xpcs_window <<= 6; pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 44ba6b02cdeb..6359bb87dc13 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -241,6 +241,7 @@ /* XGBE PCI device id */ #define XGBE_RV_PCI_DEVICE_ID 0x15d0 #define XGBE_YC_PCI_DEVICE_ID 0x14b5 +#define XGBE_RN_PCI_DEVICE_ID 0x1630 /* Generic low and high masks */ #define XGBE_GEN_HI_MASK GENMASK(31, 16) -- 2.51.0 From 795f86ff050509d34a0e2b8bf8beb943d7e81897 Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Fri, 9 May 2025 21:23:25 +0530 Subject: [PATCH 07/16] amd-xgbe: add support for new pci device id 0x1641 Add support for new pci device id 0x1641 to register Renoir device with PCIe. Signed-off-by: Raju Rangoju Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250509155325.720499-6-Raju.Rangoju@amd.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 718534d30651..097ec5e4f261 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -391,6 +391,22 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev) return ret; } +static struct xgbe_version_data xgbe_v3 = { + .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, + .xpcs_access = XGBE_XPCS_ACCESS_V3, + .mmc_64bit = 1, + .tx_max_fifo_size = 65536, + .rx_max_fifo_size = 65536, + .tx_tstamp_workaround = 1, + .ecc_support = 1, + .i2c_support = 1, + .irq_reissue_support = 1, + .tx_desc_prefetch = 5, + .rx_desc_prefetch = 5, + .an_cdr_workaround = 0, + .enable_rrc = 0, +}; + static struct xgbe_version_data xgbe_v2a = { .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, .xpcs_access = XGBE_XPCS_ACCESS_V2, @@ -428,6 +444,8 @@ static const struct pci_device_id xgbe_pci_table[] = { .driver_data = (kernel_ulong_t)&xgbe_v2a }, { PCI_VDEVICE(AMD, 0x1459), .driver_data = (kernel_ulong_t)&xgbe_v2b }, + { PCI_VDEVICE(AMD, 0x1641), + .driver_data = (kernel_ulong_t)&xgbe_v3 }, /* Last entry must be zero */ { 0, } }; -- 2.51.0 From d2338a27fcee9158d0378d759152b8e0a5933c88 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 11 May 2025 22:38:01 +0300 Subject: [PATCH 08/16] net/mlx5: HWS, expose function mlx5hws_table_ft_set_next_ft in header In preparation for complex matcher support, make function mlx5hws_table_ft_set_next_ft() non-static and expose it in header. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Vlad Dogaru Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/1746992290-568936-2-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/steering/hws/table.c | 16 ++++++++-------- .../mellanox/mlx5/core/steering/hws/table.h | 5 +++++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c index ab1297531232..568f691733f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c @@ -342,10 +342,10 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx, return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id); } -static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx, - u32 ft_id, - u32 fw_ft_type, - u32 next_ft_id) +int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx, + u32 ft_id, + u32 fw_ft_type, + u32 next_ft_id) { struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; @@ -389,10 +389,10 @@ int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl, if (dst_tbl) { if (list_empty(&dst_tbl->matchers_list)) { /* Connect src_tbl last_ft to dst_tbl start anchor */ - ret = hws_table_ft_set_next_ft(src_tbl->ctx, - last_ft_id, - src_tbl->fw_ft_type, - dst_tbl->ft_id); + ret = mlx5hws_table_ft_set_next_ft(src_tbl->ctx, + last_ft_id, + src_tbl->fw_ft_type, + dst_tbl->ft_id); if (ret) return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h index dd50420eec9e..0400cce0c317 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h @@ -65,4 +65,9 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx, u32 rtc_0_id, u32 rtc_1_id); +int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx, + u32 ft_id, + u32 fw_ft_type, + u32 next_ft_id); + #endif /* MLX5HWS_TABLE_H_ */ -- 2.51.0 From fed5f4831281593a4bda2f8ef6912fdbcad6e670 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 11 May 2025 22:38:02 +0300 Subject: [PATCH 09/16] net/mlx5: HWS, add definer function to get field name str In preparation for complex matcher support, add function for converting definer fname to str, which will be used in following patches. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Vlad Dogaru Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/1746992290-568936-3-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/steering/hws/definer.c | 212 ++++++++++++++++++ .../mellanox/mlx5/core/steering/hws/definer.h | 2 + 2 files changed, 214 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c index 1061a46811ac..5cc0dc002ac1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c @@ -158,6 +158,218 @@ struct mlx5hws_definer_conv_data { u32 match_flags; }; +#define HWS_DEFINER_ENTRY(name)[MLX5HWS_DEFINER_FNAME_##name] = #name + +static const char * const hws_definer_fname_to_str[] = { + HWS_DEFINER_ENTRY(ETH_SMAC_47_16_O), + HWS_DEFINER_ENTRY(ETH_SMAC_47_16_I), + HWS_DEFINER_ENTRY(ETH_SMAC_15_0_O), + HWS_DEFINER_ENTRY(ETH_SMAC_15_0_I), + HWS_DEFINER_ENTRY(ETH_DMAC_47_16_O), + HWS_DEFINER_ENTRY(ETH_DMAC_47_16_I), + HWS_DEFINER_ENTRY(ETH_DMAC_15_0_O), + HWS_DEFINER_ENTRY(ETH_DMAC_15_0_I), + HWS_DEFINER_ENTRY(ETH_TYPE_O), + HWS_DEFINER_ENTRY(ETH_TYPE_I), + HWS_DEFINER_ENTRY(ETH_L3_TYPE_O), + HWS_DEFINER_ENTRY(ETH_L3_TYPE_I), + HWS_DEFINER_ENTRY(VLAN_TYPE_O), + HWS_DEFINER_ENTRY(VLAN_TYPE_I), + HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_O), + HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_I), + HWS_DEFINER_ENTRY(VLAN_CFI_O), + HWS_DEFINER_ENTRY(VLAN_CFI_I), + HWS_DEFINER_ENTRY(VLAN_ID_O), + HWS_DEFINER_ENTRY(VLAN_ID_I), + HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_O), + HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_I), + HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_O), + HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_I), + HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_O), + HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_I), + HWS_DEFINER_ENTRY(VLAN_SECOND_ID_O), + HWS_DEFINER_ENTRY(VLAN_SECOND_ID_I), + HWS_DEFINER_ENTRY(IPV4_IHL_O), + HWS_DEFINER_ENTRY(IPV4_IHL_I), + HWS_DEFINER_ENTRY(IP_DSCP_O), + HWS_DEFINER_ENTRY(IP_DSCP_I), + HWS_DEFINER_ENTRY(IP_ECN_O), + HWS_DEFINER_ENTRY(IP_ECN_I), + HWS_DEFINER_ENTRY(IP_TTL_O), + HWS_DEFINER_ENTRY(IP_TTL_I), + HWS_DEFINER_ENTRY(IPV4_DST_O), + HWS_DEFINER_ENTRY(IPV4_DST_I), + HWS_DEFINER_ENTRY(IPV4_SRC_O), + HWS_DEFINER_ENTRY(IPV4_SRC_I), + HWS_DEFINER_ENTRY(IP_VERSION_O), + HWS_DEFINER_ENTRY(IP_VERSION_I), + HWS_DEFINER_ENTRY(IP_FRAG_O), + HWS_DEFINER_ENTRY(IP_FRAG_I), + HWS_DEFINER_ENTRY(IP_LEN_O), + HWS_DEFINER_ENTRY(IP_LEN_I), + HWS_DEFINER_ENTRY(IP_TOS_O), + HWS_DEFINER_ENTRY(IP_TOS_I), + HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_O), + HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_I), + HWS_DEFINER_ENTRY(IPV6_DST_127_96_O), + HWS_DEFINER_ENTRY(IPV6_DST_95_64_O), + HWS_DEFINER_ENTRY(IPV6_DST_63_32_O), + HWS_DEFINER_ENTRY(IPV6_DST_31_0_O), + HWS_DEFINER_ENTRY(IPV6_DST_127_96_I), + HWS_DEFINER_ENTRY(IPV6_DST_95_64_I), + HWS_DEFINER_ENTRY(IPV6_DST_63_32_I), + HWS_DEFINER_ENTRY(IPV6_DST_31_0_I), + HWS_DEFINER_ENTRY(IPV6_SRC_127_96_O), + HWS_DEFINER_ENTRY(IPV6_SRC_95_64_O), + HWS_DEFINER_ENTRY(IPV6_SRC_63_32_O), + HWS_DEFINER_ENTRY(IPV6_SRC_31_0_O), + HWS_DEFINER_ENTRY(IPV6_SRC_127_96_I), + HWS_DEFINER_ENTRY(IPV6_SRC_95_64_I), + HWS_DEFINER_ENTRY(IPV6_SRC_63_32_I), + HWS_DEFINER_ENTRY(IPV6_SRC_31_0_I), + HWS_DEFINER_ENTRY(IP_PROTOCOL_O), + HWS_DEFINER_ENTRY(IP_PROTOCOL_I), + HWS_DEFINER_ENTRY(L4_SPORT_O), + HWS_DEFINER_ENTRY(L4_SPORT_I), + HWS_DEFINER_ENTRY(L4_DPORT_O), + HWS_DEFINER_ENTRY(L4_DPORT_I), + HWS_DEFINER_ENTRY(TCP_FLAGS_I), + HWS_DEFINER_ENTRY(TCP_FLAGS_O), + HWS_DEFINER_ENTRY(TCP_SEQ_NUM), + HWS_DEFINER_ENTRY(TCP_ACK_NUM), + HWS_DEFINER_ENTRY(GTP_TEID), + HWS_DEFINER_ENTRY(GTP_MSG_TYPE), + HWS_DEFINER_ENTRY(GTP_EXT_FLAG), + HWS_DEFINER_ENTRY(GTP_NEXT_EXT_HDR), + HWS_DEFINER_ENTRY(GTP_EXT_HDR_PDU), + HWS_DEFINER_ENTRY(GTP_EXT_HDR_QFI), + HWS_DEFINER_ENTRY(GTPU_DW0), + HWS_DEFINER_ENTRY(GTPU_FIRST_EXT_DW0), + HWS_DEFINER_ENTRY(GTPU_DW2), + HWS_DEFINER_ENTRY(FLEX_PARSER_0), + HWS_DEFINER_ENTRY(FLEX_PARSER_1), + HWS_DEFINER_ENTRY(FLEX_PARSER_2), + HWS_DEFINER_ENTRY(FLEX_PARSER_3), + HWS_DEFINER_ENTRY(FLEX_PARSER_4), + HWS_DEFINER_ENTRY(FLEX_PARSER_5), + HWS_DEFINER_ENTRY(FLEX_PARSER_6), + HWS_DEFINER_ENTRY(FLEX_PARSER_7), + HWS_DEFINER_ENTRY(VPORT_REG_C_0), + HWS_DEFINER_ENTRY(VXLAN_FLAGS), + HWS_DEFINER_ENTRY(VXLAN_VNI), + HWS_DEFINER_ENTRY(VXLAN_GPE_FLAGS), + HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD0), + HWS_DEFINER_ENTRY(VXLAN_GPE_PROTO), + HWS_DEFINER_ENTRY(VXLAN_GPE_VNI), + HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD1), + HWS_DEFINER_ENTRY(GENEVE_OPT_LEN), + HWS_DEFINER_ENTRY(GENEVE_OAM), + HWS_DEFINER_ENTRY(GENEVE_PROTO), + HWS_DEFINER_ENTRY(GENEVE_VNI), + HWS_DEFINER_ENTRY(SOURCE_QP), + HWS_DEFINER_ENTRY(SOURCE_GVMI), + HWS_DEFINER_ENTRY(REG_0), + HWS_DEFINER_ENTRY(REG_1), + HWS_DEFINER_ENTRY(REG_2), + HWS_DEFINER_ENTRY(REG_3), + HWS_DEFINER_ENTRY(REG_4), + HWS_DEFINER_ENTRY(REG_5), + HWS_DEFINER_ENTRY(REG_6), + HWS_DEFINER_ENTRY(REG_7), + HWS_DEFINER_ENTRY(REG_8), + HWS_DEFINER_ENTRY(REG_9), + HWS_DEFINER_ENTRY(REG_10), + HWS_DEFINER_ENTRY(REG_11), + HWS_DEFINER_ENTRY(REG_A), + HWS_DEFINER_ENTRY(REG_B), + HWS_DEFINER_ENTRY(GRE_KEY_PRESENT), + HWS_DEFINER_ENTRY(GRE_C), + HWS_DEFINER_ENTRY(GRE_K), + HWS_DEFINER_ENTRY(GRE_S), + HWS_DEFINER_ENTRY(GRE_PROTOCOL), + HWS_DEFINER_ENTRY(GRE_OPT_KEY), + HWS_DEFINER_ENTRY(GRE_OPT_SEQ), + HWS_DEFINER_ENTRY(GRE_OPT_CHECKSUM), + HWS_DEFINER_ENTRY(INTEGRITY_O), + HWS_DEFINER_ENTRY(INTEGRITY_I), + HWS_DEFINER_ENTRY(ICMP_DW1), + HWS_DEFINER_ENTRY(ICMP_DW2), + HWS_DEFINER_ENTRY(ICMP_DW3), + HWS_DEFINER_ENTRY(IPSEC_SPI), + HWS_DEFINER_ENTRY(IPSEC_SEQUENCE_NUMBER), + HWS_DEFINER_ENTRY(IPSEC_SYNDROME), + HWS_DEFINER_ENTRY(MPLS0_O), + HWS_DEFINER_ENTRY(MPLS1_O), + HWS_DEFINER_ENTRY(MPLS2_O), + HWS_DEFINER_ENTRY(MPLS3_O), + HWS_DEFINER_ENTRY(MPLS4_O), + HWS_DEFINER_ENTRY(MPLS0_I), + HWS_DEFINER_ENTRY(MPLS1_I), + HWS_DEFINER_ENTRY(MPLS2_I), + HWS_DEFINER_ENTRY(MPLS3_I), + HWS_DEFINER_ENTRY(MPLS4_I), + HWS_DEFINER_ENTRY(FLEX_PARSER0_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER1_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER2_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER3_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER4_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER5_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER6_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER7_OK), + HWS_DEFINER_ENTRY(OKS2_MPLS0_O), + HWS_DEFINER_ENTRY(OKS2_MPLS1_O), + HWS_DEFINER_ENTRY(OKS2_MPLS2_O), + HWS_DEFINER_ENTRY(OKS2_MPLS3_O), + HWS_DEFINER_ENTRY(OKS2_MPLS4_O), + HWS_DEFINER_ENTRY(OKS2_MPLS0_I), + HWS_DEFINER_ENTRY(OKS2_MPLS1_I), + HWS_DEFINER_ENTRY(OKS2_MPLS2_I), + HWS_DEFINER_ENTRY(OKS2_MPLS3_I), + HWS_DEFINER_ENTRY(OKS2_MPLS4_I), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_0), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_1), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_2), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_3), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_4), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_5), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_6), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_7), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_0), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_1), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_2), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_3), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_4), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_5), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_6), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_7), + HWS_DEFINER_ENTRY(IB_L4_OPCODE), + HWS_DEFINER_ENTRY(IB_L4_QPN), + HWS_DEFINER_ENTRY(IB_L4_A), + HWS_DEFINER_ENTRY(RANDOM_NUM), + HWS_DEFINER_ENTRY(PTYPE_L2_O), + HWS_DEFINER_ENTRY(PTYPE_L2_I), + HWS_DEFINER_ENTRY(PTYPE_L3_O), + HWS_DEFINER_ENTRY(PTYPE_L3_I), + HWS_DEFINER_ENTRY(PTYPE_L4_O), + HWS_DEFINER_ENTRY(PTYPE_L4_I), + HWS_DEFINER_ENTRY(PTYPE_L4_EXT_O), + HWS_DEFINER_ENTRY(PTYPE_L4_EXT_I), + HWS_DEFINER_ENTRY(PTYPE_FRAG_O), + HWS_DEFINER_ENTRY(PTYPE_FRAG_I), + HWS_DEFINER_ENTRY(TNL_HDR_0), + HWS_DEFINER_ENTRY(TNL_HDR_1), + HWS_DEFINER_ENTRY(TNL_HDR_2), + HWS_DEFINER_ENTRY(TNL_HDR_3), + [MLX5HWS_DEFINER_FNAME_MAX] = "DEFINER_FNAME_UNKNOWN", +}; + +const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname) +{ + if (fname > MLX5HWS_DEFINER_FNAME_MAX) + fname = MLX5HWS_DEFINER_FNAME_MAX; + return hws_definer_fname_to_str[fname]; +} + static void hws_definer_ones_set(struct mlx5hws_definer_fc *fc, void *match_param, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h index 5c1a2086efba..62da55389331 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h @@ -831,4 +831,6 @@ mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx, u32 *match_param, int *fc_sz); +const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname); + #endif /* HWS_DEFINER_H_ */ -- 2.51.0 From 3c739d1624e3c3186a0a0248e91851a085f6e45b Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 11 May 2025 22:38:03 +0300 Subject: [PATCH 10/16] net/mlx5: HWS, expose polling function in header file In preparation for complex matcher, expose the function that is polling queue for completion (mlx5hws_bwc_queue_poll) in header file, so that it will be used by complex matcher code. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Vlad Dogaru Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/1746992290-568936-4-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/steering/hws/bwc.c | 29 ++++++++++--------- .../mellanox/mlx5/core/steering/hws/bwc.h | 5 ++++ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c index 510bfbbe5991..27b6420678d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c @@ -223,10 +223,10 @@ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher) return 0; } -static int hws_bwc_queue_poll(struct mlx5hws_context *ctx, - u16 queue_id, - u32 *pending_rules, - bool drain) +int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx, + u16 queue_id, + u32 *pending_rules, + bool drain) { unsigned long timeout = jiffies + secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT); @@ -361,7 +361,8 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule, if (unlikely(ret)) return ret; - ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); + ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id, + &expected_completions, true); if (unlikely(ret)) return ret; @@ -442,9 +443,8 @@ hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule, if (unlikely(ret)) return ret; - ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); - - return ret; + return mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id, + &expected_completions, true); } static int @@ -465,7 +465,8 @@ hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule, if (unlikely(ret)) return ret; - ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); + ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id, + &expected_completions, true); if (unlikely(ret)) mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret); @@ -651,8 +652,10 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match &bwc_matcher->rules[i]) ? NULL : list_next_entry(bwc_rules[i], list_node); - ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id, - &pending_rules[i], false); + ret = mlx5hws_bwc_queue_poll(ctx, + rule_attr.queue_id, + &pending_rules[i], + false); if (unlikely(ret)) { mlx5hws_err(ctx, "Moving BWC rule failed during rehash (%d)\n", @@ -669,8 +672,8 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i); mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]); - ret = hws_bwc_queue_poll(ctx, queue_id, - &pending_rules[i], true); + ret = mlx5hws_bwc_queue_poll(ctx, queue_id, + &pending_rules[i], true); if (unlikely(ret)) { mlx5hws_err(ctx, "Moving BWC rule failed during rehash (%d)\n", ret); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h index bb0cf4b922ce..a2aa2d5da694 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h @@ -64,6 +64,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher, u32 flow_source, struct mlx5hws_rule_attr *rule_attr); +int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx, + u16 queue_id, + u32 *pending_rules, + bool drain); + static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx) { /* Besides the control queue, half of the queues are -- 2.51.0 From b816743a182f532faaeaa9aaed147ff09513e375 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 11 May 2025 22:38:04 +0300 Subject: [PATCH 11/16] net/mlx5: HWS, introduce isolated matchers In preparation for complex matcher support, introduce the isolated matcher. Isolated matcher is a matcher that has its own isolated table. It is used as the second half of the complex matcher: when the rule is split into two parts (complex rule), then matching on the first part will send the packet to the isolated matcher that will try to match on the second part. In case of miss, the packet goes back to the matcher's end flow table. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Vlad Dogaru Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/1746992290-568936-5-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/steering/hws/matcher.c | 278 +++++++++++++++++- .../mellanox/mlx5/core/steering/hws/matcher.h | 9 + .../mellanox/mlx5/core/steering/hws/mlx5hws.h | 2 + 3 files changed, 288 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c index 5b0c1623499b..ce28ee1c0e41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c @@ -23,19 +23,199 @@ static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher) mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id); } +int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl, + u32 miss_ft_id) +{ + struct mlx5hws_matcher *tmp_matcher; + + if (list_empty(&tbl->matchers_list)) + return -EINVAL; + + /* Update isolated_matcher_end_ft_id attribute for all + * the matchers in isolated table. + */ + list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) + tmp_matcher->attr.isolated_matcher_end_ft_id = miss_ft_id; + + tmp_matcher = list_last_entry(&tbl->matchers_list, + struct mlx5hws_matcher, + list_node); + + return mlx5hws_table_ft_set_next_ft(tbl->ctx, + tmp_matcher->end_ft_id, + tbl->fw_ft_type, + miss_ft_id); +} + +static int hws_matcher_connect_end_ft_isolated(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + u32 end_ft_id; + int ret; + + /* Reset end_ft next RTCs */ + ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx, + matcher->end_ft_id, + matcher->tbl->fw_ft_type, + 0, 0); + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n"); + return ret; + } + + /* Connect isolated matcher's end_ft to the complex matcher's end FT */ + end_ft_id = matcher->attr.isolated_matcher_end_ft_id; + ret = mlx5hws_table_ft_set_next_ft(tbl->ctx, + matcher->end_ft_id, + matcher->tbl->fw_ft_type, + end_ft_id); + + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n"); + return ret; + } + + return 0; +} + +static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + int ret; + + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, + tbl, + &matcher->end_ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n"); + return ret; + } + + ret = hws_matcher_connect_end_ft_isolated(matcher); + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to connect end FT\n"); + goto destroy_default_ft; + } + + return 0; + +destroy_default_ft: + mlx5hws_table_destroy_default_ft(tbl, matcher->end_ft_id); + return ret; +} + static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher) { struct mlx5hws_table *tbl = matcher->tbl; int ret; - ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id); + if (mlx5hws_matcher_is_isolated(matcher)) + ret = hws_matcher_create_end_ft_isolated(matcher); + else + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, + &matcher->end_ft_id); + if (ret) { mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n"); return ret; } + + return 0; +} + +static int hws_matcher_connect_isolated_first(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + int ret; + + /* Isolated matcher's end_ft is already pointing to the end_ft + * of the complex matcher - it was set at creation of end_ft, + * so no need to connect it. + * We still need to connect the isolated table's start FT to + * this matcher's RTC. + */ + ret = mlx5hws_table_ft_set_next_rtc(ctx, + tbl->ft_id, + tbl->fw_ft_type, + matcher->match_ste.rtc_0_id, + matcher->match_ste.rtc_1_id); + if (ret) { + mlx5hws_err(ctx, "Isolated matcher: failed to connect start FT to match RTC\n"); + return ret; + } + + /* Reset table's FT default miss (drop refcount) */ + ret = mlx5hws_table_ft_set_default_next_ft(tbl, tbl->ft_id); + if (ret) { + mlx5hws_err(ctx, "Isolated matcher: failed to reset table ft default miss\n"); + return ret; + } + + list_add(&matcher->list_node, &tbl->matchers_list); + + return ret; +} + +static int hws_matcher_connect_isolated_last(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + struct mlx5hws_matcher *last; + int ret; + + last = list_last_entry(&tbl->matchers_list, + struct mlx5hws_matcher, + list_node); + + /* New matcher's end_ft is already pointing to the end_ft of + * the complex matcher. + * Connect previous matcher's end_ft to this new matcher RTC. + */ + ret = mlx5hws_table_ft_set_next_rtc(ctx, + last->end_ft_id, + tbl->fw_ft_type, + matcher->match_ste.rtc_0_id, + matcher->match_ste.rtc_1_id); + if (ret) { + mlx5hws_err(ctx, + "Isolated matcher: failed to connect matcher end_ft to new match RTC\n"); + return ret; + } + + /* Reset prev matcher FT default miss (drop refcount) */ + ret = mlx5hws_table_ft_set_default_next_ft(tbl, last->end_ft_id); + if (ret) { + mlx5hws_err(ctx, "Isolated matcher: failed to reset matcher ft default miss\n"); + return ret; + } + + /* Insert after the last matcher */ + list_add(&matcher->list_node, &last->list_node); + return 0; } +static int hws_matcher_connect_isolated(struct mlx5hws_matcher *matcher) +{ + /* Isolated matcher is expected to be the only one in its table. + * However, it can have a collision matcher, and it can go through + * rehash process, in which case we will temporary have both old and + * new matchers in the isolated table. + * Check if this is the first matcher in the isolated table. + */ + if (list_empty(&matcher->tbl->matchers_list)) + return hws_matcher_connect_isolated_first(matcher); + + /* If this wasn't the first matcher, then we have 3 possible cases: + * - this is a collision matcher for the first matcher + * - this is a new rehash dest matcher + * - this is a collision matcher for the new rehash dest matcher + * The logic to add new matcher is the same for all these cases. + */ + return hws_matcher_connect_isolated_last(matcher); +} + static int hws_matcher_connect(struct mlx5hws_matcher *matcher) { struct mlx5hws_table *tbl = matcher->tbl; @@ -45,6 +225,9 @@ static int hws_matcher_connect(struct mlx5hws_matcher *matcher) struct mlx5hws_matcher *tmp_matcher; int ret; + if (mlx5hws_matcher_is_isolated(matcher)) + return hws_matcher_connect_isolated(matcher); + /* Find location in matcher list */ if (list_empty(&tbl->matchers_list)) { list_add(&matcher->list_node, &tbl->matchers_list); @@ -121,6 +304,92 @@ remove_from_list: return ret; } +static int hws_matcher_disconnect_isolated(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher *first, *last, *prev, *next; + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + u32 end_ft_id; + int ret; + + first = list_first_entry(&tbl->matchers_list, + struct mlx5hws_matcher, + list_node); + last = list_last_entry(&tbl->matchers_list, + struct mlx5hws_matcher, + list_node); + prev = list_prev_entry(matcher, list_node); + next = list_next_entry(matcher, list_node); + + list_del_init(&matcher->list_node); + + if (first == last) { + /* This was the only matcher in the list. + * Reset isolated table FT next RTCs and connect it + * to the whole complex matcher end FT instead. + */ + ret = mlx5hws_table_ft_set_next_rtc(ctx, + tbl->ft_id, + tbl->fw_ft_type, + 0, 0); + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n"); + return ret; + } + + end_ft_id = matcher->attr.isolated_matcher_end_ft_id; + ret = mlx5hws_table_ft_set_next_ft(tbl->ctx, + tbl->ft_id, + tbl->fw_ft_type, + end_ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n"); + return ret; + } + + return 0; + } + + /* At this point we know that there are more matchers in the list */ + + if (matcher == first) { + /* We've disconnected the first matcher. + * Now update isolated table default FT. + */ + if (!next) + return -EINVAL; + return mlx5hws_table_ft_set_next_rtc(ctx, + tbl->ft_id, + tbl->fw_ft_type, + next->match_ste.rtc_0_id, + next->match_ste.rtc_1_id); + } + + if (matcher == last) { + /* If we've disconnected the last matcher - update prev + * matcher's end_ft to point to the complex matcher end_ft. + */ + if (!prev) + return -EINVAL; + return hws_matcher_connect_end_ft_isolated(prev); + } + + /* This wasn't the first or the last matcher, which means that it has + * both prev and next matchers. Note that this only happens if we're + * disconnecting collision matcher of the old matcher during rehash. + */ + if (!prev || !next || + !(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) + return -EINVAL; + + /* Update prev end FT to point to next match RTC */ + return mlx5hws_table_ft_set_next_rtc(ctx, + prev->end_ft_id, + tbl->fw_ft_type, + next->match_ste.rtc_0_id, + next->match_ste.rtc_1_id); +} + static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher) { struct mlx5hws_matcher *next = NULL, *prev = NULL; @@ -128,6 +397,9 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher) u32 prev_ft_id = tbl->ft_id; int ret; + if (mlx5hws_matcher_is_isolated(matcher)) + return hws_matcher_disconnect_isolated(matcher); + if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) { prev = list_prev_entry(matcher, list_node); prev_ft_id = prev->end_ft_id; @@ -531,6 +803,8 @@ hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps, attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log); matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0; + matcher->flags |= attr->isolated_matcher_end_ft_id ? + MLX5HWS_MATCHER_FLAGS_ISOLATED : 0; return hws_matcher_check_attr_sz(caps, matcher); } @@ -617,6 +891,8 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher) col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO; col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach; + col_matcher->attr.isolated_matcher_end_ft_id = + matcher->attr.isolated_matcher_end_ft_id; ret = hws_matcher_process_attr(ctx->caps, col_matcher); if (ret) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h index 8e95158a66b5..32e83cddcd60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h @@ -34,6 +34,7 @@ enum mlx5hws_matcher_offset { enum mlx5hws_matcher_flags { MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2, MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3, + MLX5HWS_MATCHER_FLAGS_ISOLATED = 1 << 4, }; struct mlx5hws_match_template { @@ -96,9 +97,17 @@ static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher) return !!matcher->resize_dst; } +static inline bool mlx5hws_matcher_is_isolated(struct mlx5hws_matcher *matcher) +{ + return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED); +} + static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher) { return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX; } +int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl, + u32 miss_ft_id); + #endif /* HWS_MATCHER_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h index 5121951f2778..fbd63369da10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h @@ -119,6 +119,8 @@ struct mlx5hws_matcher_attr { }; /* Optional AT attach configuration - Max number of additional AT */ u8 max_num_of_at_attach; + /* Optional end FT (miss FT ID) for match RTC (for isolated matcher) */ + u32 isolated_matcher_end_ft_id; }; struct mlx5hws_rule_attr { -- 2.51.0 From 17e0accac577fd6ea2090934d71a8c6f36702a26 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 11 May 2025 22:38:05 +0300 Subject: [PATCH 12/16] net/mlx5: HWS, support complex matchers MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit This patch adds support for Complex Matchers/Rules Overview: -------- A matcher can match on a certain set of match parameters. However, the number and size of match params for a single matcher are limited: all the parameters must fit within a single definer. A common example of this limitation is IPv6 address matching, where matching both source and destination IPs requires more bits than a single definer can support. SW Steering addresses this limitation by chaining multiple Steering Table Entries (STEs) within the same matcher, where each STE matches on a subset of the parameters. In HW Steering, such chaining is not possible — the matcher's STEs are managed in a hash table, and a single definer is used to calculate the hash index for STEs. To address this limitation in HW Steering, we introduce Complex Matchers, which consist of two chained matchers. This allows matching on twice as many parameters. Complex Matchers are filled with Complex Rules — rules that are split into two parts and inserted into their respective matchers. The first half of the Complex Matcher is a regular matcher and points to the second half, which is an Isolated Matcher. An Isolated Matcher has its own isolated table and is accessible only by traffic coming from the first half of the Complex Matcher. This splitting of matchers/rules into multiple parts is transparent to users. It is hidden under the BWC HWS API. It becomes visible only when dumping steering debug information, where the Complex Matcher appears as two separate matchers: one in the user-created table and another in its isolated table. Some implementation details: --------------------------- All user actions are performed on the second part of the rules only. The first part handles matching and applies two actions: modify header (set metadata, see details below) and go-to-table (directing traffic to the isolated table containing the isolated matcher). Rule updates (updating rule actions) are applied to the second part of the rule since user-provided actions are not executed in the first matcher. We use REG_C_6 metadata register to set and match on unique per-rule tag (see details below). Splitting rules into two parts introduces new challenges: 1. Invalid Combinations Consider two rules with different matching values: - Rule 1: A+B - Rule 2: C+D Let's split the rules into two parts as follows: |---| |---| | A | | B | |---| --> |---| | C | | D | |---| |---| Splitting these rules results in invalid combinations like A+D and C+B. To resolve this, we assign unique tags to each rule on the first matcher and match these tags on the second matcher (the tag is implemented through modify_hdr action that sets value to metadata register REG_C_6): |----------| |---------| | A | | B, TagA | | action: | | | | set TagA | | | |----------| --> |---------| | C | | D, TagB | | action: | | | | set TagB | | | |----------| |---------| 2. Duplicated Entries: Consider two rules with overlapping values: - Rule 1: A+B - Rule 2: A+D Let's split the rules into two parts as follows: |---| |---| | A | | B | |---| --> |---| | | | D | |---| |---| This leads to the duplicated entries on the first matcher, which HWS doesn't allow: subsequent delete of either of the rules will delete the only entry in the first matcher, leaving the remaining rule broken. To address this, we use a reference count for entries in the first matcher and delete STEs only when their refcount reaches zero. Both challenges are resolved by having a per-matcher data structure (implemented with rhashtable) that manages refcounts for the first part of the rules and holds unique tags (managed via IDA) for these rules to set and to match on the second matcher. Limitations: ----------- We utilize metadata register REG_C_6 in this implementation, so its usage anywhere along the steering of the flow that might include the need for Complex Matcher is prohibited. The number and size of match parameters remain limited — now it is constrained by what can be represented by two definers instead of one. This architectural limitation arises from the structure of Complex Matchers. If future requirements demand more parameters, Complex Matchers can be extended beyond two matchers. Additionally, there is an implementation limit of 32 match parameters per rule (disregarding parameter size). This limit can be lifted if needed. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Vlad Dogaru Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/1746992290-568936-6-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/steering/hws/bwc.c | 63 +- .../mellanox/mlx5/core/steering/hws/bwc.h | 5 + .../mlx5/core/steering/hws/bwc_complex.c | 1348 ++++++++++++++++- .../mlx5/core/steering/hws/bwc_complex.h | 21 + 4 files changed, 1410 insertions(+), 27 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c index 27b6420678d8..d70db6948dbb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c @@ -46,10 +46,14 @@ static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx) } } -static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr, +static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher, u32 priority, - u8 size_log) + u8 size_log, + struct mlx5hws_matcher_attr *attr) { + struct mlx5hws_bwc_matcher *first_matcher = + bwc_matcher->complex_first_bwc_matcher; + memset(attr, 0, sizeof(*attr)); attr->priority = priority; @@ -61,6 +65,9 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr, attr->rule.num_log = size_log; attr->resizable = true; attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM; + + attr->isolated_matcher_end_ft_id = + first_matcher ? first_matcher->matcher->end_ft_id : 0; } int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher, @@ -83,9 +90,10 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher, for (i = 0; i < bwc_queues; i++) INIT_LIST_HEAD(&bwc_matcher->rules[i]); - hws_bwc_matcher_init_attr(&attr, + hws_bwc_matcher_init_attr(bwc_matcher, priority, - MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG); + MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG, + &attr); bwc_matcher->priority = priority; bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG; @@ -217,7 +225,10 @@ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher) "BWC matcher destroy: matcher still has %d rules\n", num_of_rules); - mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); + if (bwc_matcher->complex) + mlx5hws_bwc_matcher_destroy_complex(bwc_matcher); + else + mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); kfree(bwc_matcher); return 0; @@ -401,9 +412,13 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule) int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule) { - int ret; + bool is_complex = !!bwc_rule->bwc_matcher->complex; + int ret = 0; - ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule); + if (is_complex) + ret = mlx5hws_bwc_rule_destroy_complex(bwc_rule); + else + ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule); mlx5hws_bwc_rule_free(bwc_rule); return ret; @@ -692,7 +707,10 @@ free_pending_rules: static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher) { - return hws_bwc_matcher_move_all_simple(bwc_matcher); + if (!bwc_matcher->complex) + return hws_bwc_matcher_move_all_simple(bwc_matcher); + + return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher); } static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher) @@ -703,9 +721,10 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher) struct mlx5hws_matcher *new_matcher; int ret; - hws_bwc_matcher_init_attr(&matcher_attr, + hws_bwc_matcher_init_attr(bwc_matcher, bwc_matcher->priority, - bwc_matcher->size_log); + bwc_matcher->size_log, + &matcher_attr); old_matcher = bwc_matcher->matcher; new_matcher = mlx5hws_matcher_create(old_matcher->tbl, @@ -910,11 +929,18 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher, bwc_queue_idx = hws_bwc_gen_queue_idx(ctx); - ret = mlx5hws_bwc_rule_create_simple(bwc_rule, - params->match_buf, - rule_actions, - flow_source, - bwc_queue_idx); + if (bwc_matcher->complex) + ret = mlx5hws_bwc_rule_create_complex(bwc_rule, + params, + flow_source, + rule_actions, + bwc_queue_idx); + else + ret = mlx5hws_bwc_rule_create_simple(bwc_rule, + params->match_buf, + rule_actions, + flow_source, + bwc_queue_idx); if (unlikely(ret)) { mlx5hws_bwc_rule_free(bwc_rule); return NULL; @@ -996,5 +1022,10 @@ int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, return -EINVAL; } - return hws_bwc_rule_action_update(bwc_rule, rule_actions); + /* For complex rule, the update should happen on the second matcher */ + if (bwc_rule->isolated_bwc_rule) + return hws_bwc_rule_action_update(bwc_rule->isolated_bwc_rule, + rule_actions); + else + return hws_bwc_rule_action_update(bwc_rule, rule_actions); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h index a2aa2d5da694..cf2b65146317 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h @@ -18,10 +18,13 @@ #define MLX5HWS_BWC_POLLING_TIMEOUT 60 +struct mlx5hws_bwc_matcher_complex_data; struct mlx5hws_bwc_matcher { struct mlx5hws_matcher *matcher; struct mlx5hws_match_template *mt; struct mlx5hws_action_template **at; + struct mlx5hws_bwc_matcher_complex_data *complex; + struct mlx5hws_bwc_matcher *complex_first_bwc_matcher; u8 num_of_at; u8 size_of_at_array; u8 size_log; @@ -33,6 +36,8 @@ struct mlx5hws_bwc_matcher { struct mlx5hws_bwc_rule { struct mlx5hws_bwc_matcher *bwc_matcher; struct mlx5hws_rule *rule; + struct mlx5hws_bwc_rule *isolated_bwc_rule; + struct mlx5hws_bwc_complex_rule_hash_node *complex_hash_node; u16 bwc_queue_idx; struct list_head list_node; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c index 9fb059a6511f..5d30c5b094fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c @@ -3,6 +3,22 @@ #include "internal.h" +#define HWS_CLEAR_MATCH_PARAM(mask, field) \ + MLX5_SET(fte_match_param, (mask)->match_buf, field, 0) + +#define HWS_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) + +static const struct rhashtable_params hws_refcount_hash = { + .key_len = sizeof_field(struct mlx5hws_bwc_complex_rule_hash_node, + match_buf), + .key_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node, + match_buf), + .head_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node, + hash_node), + .automatic_shrinking = true, + .min_size = 1, +}; + bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, u8 match_criteria_enable, struct mlx5hws_match_parameters *mask) @@ -48,20 +64,1078 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, return is_complex; } +static void +hws_bwc_matcher_complex_params_clear_fld(struct mlx5hws_context *ctx, + enum mlx5hws_definer_fname fname, + struct mlx5hws_match_parameters *mask) +{ + struct mlx5hws_cmd_query_caps *caps = ctx->caps; + + switch (fname) { + case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O: + case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I: + case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O: + case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I: + case MLX5HWS_DEFINER_FNAME_IP_VERSION_O: + case MLX5HWS_DEFINER_FNAME_IP_VERSION_I: + /* Because of the strict requirements for IP address matching + * that require ethtype/ip_version matching as well, don't clear + * these fields - have them in both parts of the complex matcher + */ + break; + case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_47_16); + break; + case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_47_16); + break; + case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_15_0); + break; + case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_15_0); + break; + case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_47_16); + break; + case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_47_16); + break; + case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_15_0); + break; + case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_15_0); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.cvlan_tag); + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.svlan_tag); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.cvlan_tag); + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.svlan_tag); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_prio); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_prio); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_CFI_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_cfi); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_CFI_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_cfi); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_ID_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_vid); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_ID_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_vid); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.outer_second_cvlan_tag); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.outer_second_svlan_tag); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.inner_second_cvlan_tag); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.inner_second_svlan_tag); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_prio); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_prio); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_cfi); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_cfi); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_vid); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_vid); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_IHL_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ipv4_ihl); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_IHL_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ipv4_ihl); + break; + case MLX5HWS_DEFINER_FNAME_IP_DSCP_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_dscp); + break; + case MLX5HWS_DEFINER_FNAME_IP_DSCP_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_dscp); + break; + case MLX5HWS_DEFINER_FNAME_IP_ECN_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_ecn); + break; + case MLX5HWS_DEFINER_FNAME_IP_ECN_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_ecn); + break; + case MLX5HWS_DEFINER_FNAME_IP_TTL_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ttl_hoplimit); + break; + case MLX5HWS_DEFINER_FNAME_IP_TTL_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ttl_hoplimit); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_DST_O: + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_SRC_O: + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_DST_I: + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_SRC_I: + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IP_FRAG_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.frag); + break; + case MLX5HWS_DEFINER_FNAME_IP_FRAG_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.frag); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.outer_ipv6_flow_label); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.inner_ipv6_flow_label); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O: + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O: + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I: + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I: + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_protocol); + break; + case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_protocol); + break; + case MLX5HWS_DEFINER_FNAME_L4_SPORT_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_sport); + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_sport); + break; + case MLX5HWS_DEFINER_FNAME_L4_SPORT_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport); + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport); + break; + case MLX5HWS_DEFINER_FNAME_L4_DPORT_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_dport); + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_dport); + break; + case MLX5HWS_DEFINER_FNAME_L4_DPORT_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport); + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport); + break; + case MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_flags); + break; + case MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM: + case MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.outer_tcp_seq_num); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.outer_tcp_ack_num); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.inner_tcp_seq_num); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.inner_tcp_ack_num); + break; + case MLX5HWS_DEFINER_FNAME_GTP_TEID: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_teid); + break; + case MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_type); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_flags); + break; + case MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.gtpu_first_ext_dw_0); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_0); + break; + case MLX5HWS_DEFINER_FNAME_GTPU_DW2: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_2); + break; + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_2.outer_first_mpls_over_gre); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_2.outer_first_mpls_over_udp); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.geneve_tlv_option_0_data); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_id_0); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_value_0); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_value_1); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_id_2); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_value_2); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_id_3); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_value_3); + break; + case MLX5HWS_DEFINER_FNAME_VXLAN_VNI: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.vxlan_vni); + break; + case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.outer_vxlan_gpe_flags); + break; + case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0: + break; + case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.outer_vxlan_gpe_next_protocol); + break; + case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.outer_vxlan_gpe_vni); + break; + case MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_opt_len); + break; + case MLX5HWS_DEFINER_FNAME_GENEVE_OAM: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_oam); + break; + case MLX5HWS_DEFINER_FNAME_GENEVE_PROTO: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.geneve_protocol_type); + break; + case MLX5HWS_DEFINER_FNAME_GENEVE_VNI: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_vni); + break; + case MLX5HWS_DEFINER_FNAME_SOURCE_QP: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_sqn); + break; + case MLX5HWS_DEFINER_FNAME_SOURCE_GVMI: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_port); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.source_eswitch_owner_vhca_id); + break; + case MLX5HWS_DEFINER_FNAME_REG_0: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_0); + break; + case MLX5HWS_DEFINER_FNAME_REG_1: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_1); + break; + case MLX5HWS_DEFINER_FNAME_REG_2: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_2); + break; + case MLX5HWS_DEFINER_FNAME_REG_3: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_3); + break; + case MLX5HWS_DEFINER_FNAME_REG_4: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_4); + break; + case MLX5HWS_DEFINER_FNAME_REG_5: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_5); + break; + case MLX5HWS_DEFINER_FNAME_REG_7: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_7); + break; + case MLX5HWS_DEFINER_FNAME_REG_A: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_a); + break; + case MLX5HWS_DEFINER_FNAME_GRE_C: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_c_present); + break; + case MLX5HWS_DEFINER_FNAME_GRE_K: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_k_present); + break; + case MLX5HWS_DEFINER_FNAME_GRE_S: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_s_present); + break; + case MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_protocol); + break; + case MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_key.key); + break; + case MLX5HWS_DEFINER_FNAME_ICMP_DW1: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_header_data); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_type); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_code); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.icmpv6_header_data); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_type); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_code); + break; + case MLX5HWS_DEFINER_FNAME_MPLS0_O: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.outer_first_mpls); + break; + case MLX5HWS_DEFINER_FNAME_MPLS0_I: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.inner_first_mpls); + break; + case MLX5HWS_DEFINER_FNAME_TNL_HDR_0: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_0); + break; + case MLX5HWS_DEFINER_FNAME_TNL_HDR_1: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_1); + break; + case MLX5HWS_DEFINER_FNAME_TNL_HDR_2: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_2); + break; + case MLX5HWS_DEFINER_FNAME_TNL_HDR_3: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_3); + break; + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK: + /* assuming this is flex parser for geneve option */ + if ((fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 0) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 1) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 2) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 3) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 4) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 5) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 6) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 7)) { + mlx5hws_err(ctx, + "Complex params: unsupported field %s (%d), flex parser ID for geneve is %d\n", + mlx5hws_definer_fname_to_str(fname), fname, + caps->flex_parser_id_geneve_tlv_option_0); + break; + } + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.geneve_tlv_option_0_exist); + break; + case MLX5HWS_DEFINER_FNAME_REG_6: + default: + mlx5hws_err(ctx, "Complex params: unsupported field %s (%d)\n", + mlx5hws_definer_fname_to_str(fname), fname); + break; + } +} + +static bool +hws_bwc_matcher_complex_params_comb_is_valid(struct mlx5hws_definer_fc *fc, + int fc_sz, + u32 combination_num) +{ + bool m1[MLX5HWS_DEFINER_FNAME_MAX] = {0}; + bool m2[MLX5HWS_DEFINER_FNAME_MAX] = {0}; + bool is_first_matcher; + int i; + + for (i = 0; i < fc_sz; i++) { + is_first_matcher = !(combination_num & BIT(i)); + if (is_first_matcher) + m1[fc[i].fname] = true; + else + m2[fc[i].fname] = true; + } + + /* Not all the fields can be split into separate matchers. + * Some should be together on the same matcher. + * For example, IPv6 parts - the whole IPv6 address should be on the + * same matcher in order for us to deduce if it's IPv6 or IPv4 address. + */ + if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] && + (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] || + m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] || + m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] || + m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O])) + return false; + + if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] && + (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] || + m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] || + m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] || + m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O])) + return false; + + if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] && + (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] || + m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] || + m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] || + m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I])) + return false; + + if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] && + (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] || + m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] || + m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] || + m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I])) + return false; + + /* Don't split outer IPv6 dest address. */ + if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]) && + (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O])) + return false; + + /* Don't split outer IPv6 source address. */ + if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]) && + (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O])) + return false; + + /* Don't split inner IPv6 dest address. */ + if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]) && + (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I])) + return false; + + /* Don't split inner IPv6 source address. */ + if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]) && + (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I])) + return false; + + /* Don't split GRE parameters. */ + if ((m1[MLX5HWS_DEFINER_FNAME_GRE_C] || + m1[MLX5HWS_DEFINER_FNAME_GRE_K] || + m1[MLX5HWS_DEFINER_FNAME_GRE_S] || + m1[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]) && + (m2[MLX5HWS_DEFINER_FNAME_GRE_C] || + m2[MLX5HWS_DEFINER_FNAME_GRE_K] || + m2[MLX5HWS_DEFINER_FNAME_GRE_S] || + m2[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL])) + return false; + + /* Don't split TCP ack/seq numbers. */ + if ((m1[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] || + m1[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]) && + (m2[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] || + m2[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM])) + return false; + + /* Don't split flex parser. */ + if ((m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]) && + (m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7])) + return false; + + return true; +} + +static void +hws_bwc_matcher_complex_params_comb_create(struct mlx5hws_context *ctx, + struct mlx5hws_match_parameters *m, + struct mlx5hws_match_parameters *m1, + struct mlx5hws_match_parameters *m2, + struct mlx5hws_definer_fc *fc, + int fc_sz, + u32 combination_num) +{ + bool is_first_matcher; + int i; + + memcpy(m1->match_buf, m->match_buf, m->match_sz); + memcpy(m2->match_buf, m->match_buf, m->match_sz); + + for (i = 0; i < fc_sz; i++) { + is_first_matcher = !(combination_num & BIT(i)); + hws_bwc_matcher_complex_params_clear_fld(ctx, + fc[i].fname, + is_first_matcher ? + m2 : m1); + } + + MLX5_SET(fte_match_param, m2->match_buf, + misc_parameters_2.metadata_reg_c_6, -1); +} + +static void +hws_bwc_matcher_complex_params_destroy(struct mlx5hws_match_parameters *mask_1, + struct mlx5hws_match_parameters *mask_2) +{ + kfree(mask_1->match_buf); + kfree(mask_2->match_buf); +} + +static int +hws_bwc_matcher_complex_params_create(struct mlx5hws_context *ctx, + u8 match_criteria, + struct mlx5hws_match_parameters *mask, + struct mlx5hws_match_parameters *mask_1, + struct mlx5hws_match_parameters *mask_2) +{ + struct mlx5hws_definer_fc *fc; + u32 num_of_combinations; + int fc_sz = 0; + int res = 0; + u32 i; + + if (MLX5_GET(fte_match_param, mask->match_buf, + misc_parameters_2.metadata_reg_c_6)) { + mlx5hws_err(ctx, "Complex matcher: REG_C_6 matching is reserved\n"); + res = -EINVAL; + goto out; + } + + mask_1->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), + GFP_KERNEL); + mask_2->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), + GFP_KERNEL); + if (!mask_1->match_buf || !mask_2->match_buf) { + mlx5hws_err(ctx, "Complex matcher: failed to allocate match_param\n"); + res = -ENOMEM; + goto free_params; + } + + mask_1->match_sz = mask->match_sz; + mask_2->match_sz = mask->match_sz; + + fc = mlx5hws_definer_conv_match_params_to_compressed_fc(ctx, + match_criteria, + mask->match_buf, + &fc_sz); + if (!fc) { + res = -ENOMEM; + goto free_params; + } + + if (fc_sz >= sizeof(num_of_combinations) * BITS_PER_BYTE) { + mlx5hws_err(ctx, + "Complex matcher: too many match parameters (%d)\n", + fc_sz); + res = -EINVAL; + goto free_fc; + } + + /* We have list of all the match fields from the match parameter. + * Now try all the possibilities of splitting them into two match + * buffers and look for the supported combination. + */ + num_of_combinations = 1 << fc_sz; + + /* Start from combination at index 1 - we know that 0 is unsupported */ + for (i = 1; i < num_of_combinations; i++) { + if (!hws_bwc_matcher_complex_params_comb_is_valid(fc, fc_sz, i)) + continue; + + hws_bwc_matcher_complex_params_comb_create(ctx, + mask, mask_1, mask_2, + fc, fc_sz, i); + /* We now have two separate sets of match params. + * Check if each of them can be used in its own matcher. + */ + if (!mlx5hws_bwc_match_params_is_complex(ctx, + match_criteria, + mask_1) && + !mlx5hws_bwc_match_params_is_complex(ctx, + match_criteria, + mask_2)) + break; + } + + if (i == num_of_combinations) { + /* We've scanned all the combinations, but to no avail */ + mlx5hws_err(ctx, "Complex matcher: couldn't find match params combination\n"); + res = -EINVAL; + goto free_fc; + } + + kfree(fc); + return 0; + +free_fc: + kfree(fc); +free_params: + hws_bwc_matcher_complex_params_destroy(mask_1, mask_2); +out: + return res; +} + +static int +hws_bwc_isolated_table_create(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table) +{ + struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; + struct mlx5hws_context *ctx = table->ctx; + struct mlx5hws_table_attr tbl_attr = {0}; + struct mlx5hws_table *isolated_tbl; + int ret = 0; + + tbl_attr.type = table->type; + tbl_attr.level = table->level; + + bwc_matcher->complex->isolated_tbl = + mlx5hws_table_create(ctx, &tbl_attr); + isolated_tbl = bwc_matcher->complex->isolated_tbl; + if (!isolated_tbl) + return -EINVAL; + + /* Set the default miss of the isolated table to + * point to the end anchor of the original matcher. + */ + mlx5hws_cmd_set_attr_connect_miss_tbl(ctx, + isolated_tbl->fw_ft_type, + isolated_tbl->type, + &ft_attr); + ft_attr.table_miss_id = bwc_matcher->matcher->end_ft_id; + + ret = mlx5hws_cmd_flow_table_modify(ctx->mdev, + &ft_attr, + isolated_tbl->ft_id); + if (ret) { + mlx5hws_err(ctx, "Failed setting isolated tbl default miss\n"); + goto destroy_tbl; + } + + return 0; + +destroy_tbl: + mlx5hws_table_destroy(isolated_tbl); + return ret; +} + +static void hws_bwc_isolated_table_destroy(struct mlx5hws_table *isolated_tbl) +{ + /* This table is isolated - no table is pointing to it, no need to + * disconnect it from anywhere, it won't affect any other table's miss. + */ + mlx5hws_table_destroy(isolated_tbl); +} + +static int +hws_bwc_isolated_matcher_create(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask) +{ + struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl; + struct mlx5hws_bwc_matcher *isolated_bwc_matcher; + struct mlx5hws_context *ctx = table->ctx; + int ret; + + isolated_bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL); + if (!isolated_bwc_matcher) + return -ENOMEM; + + bwc_matcher->complex->isolated_bwc_matcher = isolated_bwc_matcher; + + /* Isolated BWC matcher needs access to the first BWC matcher */ + isolated_bwc_matcher->complex_first_bwc_matcher = bwc_matcher; + + /* Isolated matcher needs to match on REG_C_6, + * so make sure its criteria bit is on. + */ + match_criteria_enable |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2; + + ret = mlx5hws_bwc_matcher_create_simple(isolated_bwc_matcher, + isolated_tbl, + 0, + match_criteria_enable, + mask, + NULL); + if (ret) { + mlx5hws_err(ctx, "Complex matcher: failed creating isolated BWC matcher\n"); + goto free_matcher; + } + + return 0; + +free_matcher: + kfree(bwc_matcher->complex->isolated_bwc_matcher); + return ret; +} + +static void +hws_bwc_isolated_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); + kfree(bwc_matcher); +} + +static int +hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table) +{ + struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl; + u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0}; + struct mlx5hws_context *ctx = table->ctx; + struct mlx5hws_action_mh_pattern ptrn; + int ret = 0; + + /* Create action to jump to isolated table */ + + bwc_matcher->complex->action_go_to_tbl = + mlx5hws_action_create_dest_table(ctx, + isolated_tbl, + MLX5HWS_ACTION_FLAG_HWS_FDB); + if (!bwc_matcher->complex->action_go_to_tbl) { + mlx5hws_err(ctx, "Complex matcher: failed to create go-to-tbl action\n"); + return -EINVAL; + } + + /* Create modify header action to set REG_C_6 */ + + MLX5_SET(set_action_in, modify_hdr_action, + action_type, MLX5_MODIFICATION_TYPE_SET); + MLX5_SET(set_action_in, modify_hdr_action, + field, MLX5_MODI_META_REG_C_6); + MLX5_SET(set_action_in, modify_hdr_action, + length, 0); /* zero means length of 32 */ + MLX5_SET(set_action_in, modify_hdr_action, offset, 0); + MLX5_SET(set_action_in, modify_hdr_action, data, 0); + + ptrn.data = (void *)modify_hdr_action; + ptrn.sz = MLX5HWS_ACTION_DOUBLE_SIZE; + + bwc_matcher->complex->action_metadata = + mlx5hws_action_create_modify_header(ctx, 1, &ptrn, 0, + MLX5HWS_ACTION_FLAG_HWS_FDB); + if (!bwc_matcher->complex->action_metadata) { + ret = -EINVAL; + goto destroy_action_go_to_tbl; + } + + /* Create last action */ + + bwc_matcher->complex->action_last = + mlx5hws_action_create_last(ctx, MLX5HWS_ACTION_FLAG_HWS_FDB); + if (!bwc_matcher->complex->action_last) { + mlx5hws_err(ctx, "Complex matcher: failed to create last action\n"); + ret = -EINVAL; + goto destroy_action_metadata; + } + + return 0; + +destroy_action_metadata: + mlx5hws_action_destroy(bwc_matcher->complex->action_metadata); +destroy_action_go_to_tbl: + mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl); + return ret; +} + +static void +hws_bwc_isolated_actions_destroy(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + mlx5hws_action_destroy(bwc_matcher->complex->action_last); + mlx5hws_action_destroy(bwc_matcher->complex->action_metadata); + mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl); +} + int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher, struct mlx5hws_table *table, u32 priority, u8 match_criteria_enable, struct mlx5hws_match_parameters *mask) { - mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n"); - return -EOPNOTSUPP; + enum mlx5hws_action_type complex_init_action_types[3]; + struct mlx5hws_bwc_matcher *isolated_bwc_matcher; + struct mlx5hws_match_parameters mask_1 = {0}; + struct mlx5hws_match_parameters mask_2 = {0}; + struct mlx5hws_context *ctx = table->ctx; + int ret; + + ret = hws_bwc_matcher_complex_params_create(table->ctx, + match_criteria_enable, + mask, &mask_1, &mask_2); + if (ret) + goto err; + + bwc_matcher->complex = + kzalloc(sizeof(*bwc_matcher->complex), GFP_KERNEL); + if (!bwc_matcher->complex) { + ret = -ENOMEM; + goto free_masks; + } + + ret = rhashtable_init(&bwc_matcher->complex->refcount_hash, + &hws_refcount_hash); + if (ret) { + mlx5hws_err(ctx, "Complex matcher: failed to initialize rhashtable\n"); + goto free_complex; + } + + mutex_init(&bwc_matcher->complex->hash_lock); + ida_init(&bwc_matcher->complex->metadata_ida); + + /* Create initial action template for the first matcher. + * Usually the initial AT is just dummy, but in case of complex + * matcher we know exactly which actions should it have. + */ + + complex_init_action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR; + complex_init_action_types[1] = MLX5HWS_ACTION_TYP_TBL; + complex_init_action_types[2] = MLX5HWS_ACTION_TYP_LAST; + + /* Create the first matcher */ + + ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher, + table, + priority, + match_criteria_enable, + &mask_1, + complex_init_action_types); + if (ret) + goto destroy_ida; + + /* Create isolated table to hold the second isolated matcher */ + + ret = hws_bwc_isolated_table_create(bwc_matcher, table); + if (ret) { + mlx5hws_err(ctx, "Complex matcher: failed creating isolated table\n"); + goto destroy_first_matcher; + } + + /* Now create the second BWC matcher - the isolated one */ + + ret = hws_bwc_isolated_matcher_create(bwc_matcher, table, + match_criteria_enable, &mask_2); + if (ret) { + mlx5hws_err(ctx, "Complex matcher: failed creating isolated matcher\n"); + goto destroy_isolated_tbl; + } + + /* Create action for isolated matcher's rules */ + + ret = hws_bwc_isolated_actions_create(bwc_matcher, table); + if (ret) { + mlx5hws_err(ctx, "Complex matcher: failed creating isolated actions\n"); + goto destroy_isolated_matcher; + } + + hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2); + return 0; + +destroy_isolated_matcher: + isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher; + hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher); +destroy_isolated_tbl: + hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl); +destroy_first_matcher: + mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); +destroy_ida: + ida_destroy(&bwc_matcher->complex->metadata_ida); + mutex_destroy(&bwc_matcher->complex->hash_lock); + rhashtable_destroy(&bwc_matcher->complex->refcount_hash); +free_complex: + kfree(bwc_matcher->complex); + bwc_matcher->complex = NULL; +free_masks: + hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2); +err: + return ret; } void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher) { - /* nothing to do here */ + struct mlx5hws_bwc_matcher *isolated_bwc_matcher = + bwc_matcher->complex->isolated_bwc_matcher; + + hws_bwc_isolated_actions_destroy(bwc_matcher); + hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher); + hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl); + mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); + ida_destroy(&bwc_matcher->complex->metadata_ida); + mutex_destroy(&bwc_matcher->complex->hash_lock); + rhashtable_destroy(&bwc_matcher->complex->refcount_hash); + kfree(bwc_matcher->complex); + bwc_matcher->complex = NULL; +} + +static void +hws_bwc_matcher_complex_hash_lock(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + mutex_lock(&bwc_matcher->complex->hash_lock); +} + +static void +hws_bwc_matcher_complex_hash_unlock(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + mutex_unlock(&bwc_matcher->complex->hash_lock); +} + +static int +hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_match_parameters *params) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_bwc_complex_rule_hash_node *node, *old_node; + struct rhashtable *refcount_hash; + int i; + + bwc_rule->complex_hash_node = NULL; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (unlikely(!node)) + return -ENOMEM; + + node->tag = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL); + refcount_set(&node->refcount, 1); + + /* Clear match buffer - turn off all the unrelated fields + * in accordance with the match params mask for the first + * matcher out of the two parts of the complex matcher. + * The resulting mask is the key for the hash. + */ + for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++) + node->match_buf[i] = params->match_buf[i] & + bwc_matcher->mt->match_param[i]; + + refcount_hash = &bwc_matcher->complex->refcount_hash; + old_node = rhashtable_lookup_get_insert_fast(refcount_hash, + &node->hash_node, + hws_refcount_hash); + if (old_node) { + /* Rule with the same tag already exists - update refcount */ + refcount_inc(&old_node->refcount); + /* Let the new rule use the same tag as the existing rule. + * Note that we don't have any indication for the rule creation + * process that a rule with similar matching params already + * exists - no harm done when this rule is be overwritten by + * the same STE. + * There's some performance advantage in skipping such cases, + * so this is left for future optimizations. + */ + ida_free(&bwc_matcher->complex->metadata_ida, node->tag); + kfree(node); + node = old_node; + } + + bwc_rule->complex_hash_node = node; + return 0; +} + +static void +hws_bwc_rule_complex_hash_node_put(struct mlx5hws_bwc_rule *bwc_rule, + bool *is_last_rule) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_bwc_complex_rule_hash_node *node; + + if (is_last_rule) + *is_last_rule = false; + + node = bwc_rule->complex_hash_node; + if (refcount_dec_and_test(&node->refcount)) { + rhashtable_remove_fast(&bwc_matcher->complex->refcount_hash, + &node->hash_node, + hws_refcount_hash); + ida_free(&bwc_matcher->complex->metadata_ida, node->tag); + kfree(node); + if (is_last_rule) + *is_last_rule = true; + } + + bwc_rule->complex_hash_node = NULL; } int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule, @@ -70,19 +1144,271 @@ int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule, struct mlx5hws_rule_action rule_actions[], u16 bwc_queue_idx) { - mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx, - "Complex rule is not supported yet\n"); - return -EOPNOTSUPP; + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0}; + struct mlx5hws_rule_action rule_actions_1[3] = {0}; + struct mlx5hws_bwc_matcher *isolated_bwc_matcher; + u32 *match_buf_2; + u32 metadata_val; + int ret = 0; + + isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher; + bwc_rule->isolated_bwc_rule = + mlx5hws_bwc_rule_alloc(isolated_bwc_matcher); + if (unlikely(!bwc_rule->isolated_bwc_rule)) + return -ENOMEM; + + hws_bwc_matcher_complex_hash_lock(bwc_matcher); + + /* Get a new hash node for this complex rule. + * If this is a unique set of match params for the first matcher, + * we will get a new hash node with newly allocated IDA. + * Otherwise we will get an existing node with IDA and updated refcount. + */ + ret = hws_bwc_rule_complex_hash_node_get(bwc_rule, params); + if (unlikely(ret)) { + mlx5hws_err(ctx, "Complex rule: failed getting RHT node for this rule\n"); + goto free_isolated_rule; + } + + /* No need to clear match buffer's fields in accordance to what + * will actually be matched on first and second matchers. + * Both matchers were created with the appropriate masks + * and each of them holds the appropriate field copy array, + * so rule creation will use only the fields that will be copied + * in accordance with setters in field copy array. + * We do, however, need to temporary allocate match buffer + * for the second (isolated) rule in order to not modify + * user's match params buffer. + */ + + match_buf_2 = kmemdup(params->match_buf, + MLX5_ST_SZ_BYTES(fte_match_param), + GFP_KERNEL); + if (unlikely(!match_buf_2)) { + mlx5hws_err(ctx, "Complex rule: failed allocating match_buf\n"); + ret = ENOMEM; + goto hash_node_put; + } + + /* On 2nd matcher, use unique 32-bit ID as a matching tag */ + metadata_val = bwc_rule->complex_hash_node->tag; + MLX5_SET(fte_match_param, match_buf_2, + misc_parameters_2.metadata_reg_c_6, metadata_val); + + /* Isolated rule's rule_actions contain all the original actions */ + ret = mlx5hws_bwc_rule_create_simple(bwc_rule->isolated_bwc_rule, + match_buf_2, + rule_actions, + flow_source, + bwc_queue_idx); + kfree(match_buf_2); + if (unlikely(ret)) { + mlx5hws_err(ctx, + "Complex rule: failed creating isolated BWC rule (%d)\n", + ret); + goto hash_node_put; + } + + /* First rule's rule_actions contain setting metadata and + * jump to isolated table that contains the second matcher. + * Set metadata value to a unique value for this rule. + */ + + MLX5_SET(set_action_in, modify_hdr_action, + action_type, MLX5_MODIFICATION_TYPE_SET); + MLX5_SET(set_action_in, modify_hdr_action, + field, MLX5_MODI_META_REG_C_6); + MLX5_SET(set_action_in, modify_hdr_action, + length, 0); /* zero means length of 32 */ + MLX5_SET(set_action_in, modify_hdr_action, + offset, 0); + MLX5_SET(set_action_in, modify_hdr_action, + data, metadata_val); + + rule_actions_1[0].action = bwc_matcher->complex->action_metadata; + rule_actions_1[0].modify_header.offset = 0; + rule_actions_1[0].modify_header.data = modify_hdr_action; + + rule_actions_1[1].action = bwc_matcher->complex->action_go_to_tbl; + rule_actions_1[2].action = bwc_matcher->complex->action_last; + + ret = mlx5hws_bwc_rule_create_simple(bwc_rule, + params->match_buf, + rule_actions_1, + flow_source, + bwc_queue_idx); + + if (unlikely(ret)) { + mlx5hws_err(ctx, + "Complex rule: failed creating first BWC rule (%d)\n", + ret); + goto destroy_isolated_rule; + } + + hws_bwc_matcher_complex_hash_unlock(bwc_matcher); + + return 0; + +destroy_isolated_rule: + mlx5hws_bwc_rule_destroy_simple(bwc_rule->isolated_bwc_rule); +hash_node_put: + hws_bwc_rule_complex_hash_node_put(bwc_rule, NULL); +free_isolated_rule: + hws_bwc_matcher_complex_hash_unlock(bwc_matcher); + mlx5hws_bwc_rule_free(bwc_rule->isolated_bwc_rule); + return ret; } int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule) { - return 0; + struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_bwc_rule *isolated_bwc_rule; + int ret_isolated, ret; + bool is_last_rule; + + hws_bwc_matcher_complex_hash_lock(bwc_rule->bwc_matcher); + + hws_bwc_rule_complex_hash_node_put(bwc_rule, &is_last_rule); + bwc_rule->rule->skip_delete = !is_last_rule; + + ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule); + if (unlikely(ret)) + mlx5hws_err(ctx, "BWC complex rule: failed destroying first rule\n"); + + isolated_bwc_rule = bwc_rule->isolated_bwc_rule; + ret_isolated = mlx5hws_bwc_rule_destroy_simple(isolated_bwc_rule); + if (unlikely(ret_isolated)) + mlx5hws_err(ctx, "BWC complex rule: failed destroying second (isolated) rule\n"); + + hws_bwc_matcher_complex_hash_unlock(bwc_rule->bwc_matcher); + + mlx5hws_bwc_rule_free(isolated_bwc_rule); + + return ret || ret_isolated; +} + +static void +hws_bwc_matcher_clear_hash_rtcs(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_bwc_complex_rule_hash_node *node; + struct rhashtable_iter iter; + + rhashtable_walk_enter(&bwc_matcher->complex->refcount_hash, &iter); + rhashtable_walk_start(&iter); + + while ((node = rhashtable_walk_next(&iter)) != NULL) { + if (IS_ERR(node)) + continue; + node->rtc_valid = false; + } + + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); } -int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher) +int +mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher) { - mlx5hws_err(bwc_matcher->matcher->tbl->ctx, - "Moving complex rule is not supported yet\n"); - return -EOPNOTSUPP; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_matcher *matcher = bwc_matcher->matcher; + bool move_error = false, poll_error = false; + u16 bwc_queues = mlx5hws_bwc_queues(ctx); + struct mlx5hws_bwc_rule *tmp_bwc_rule; + struct mlx5hws_rule_attr rule_attr; + struct mlx5hws_table *isolated_tbl; + struct mlx5hws_rule *tmp_rule; + struct list_head *rules_list; + u32 expected_completions = 1; + u32 end_ft_id; + int i, ret; + + /* We are rehashing the matcher that is the first part of the complex + * matcher. Need to update the isolated matcher to point to the end_ft + * of this new matcher. This needs to be done before moving any rules + * to prevent possible steering loops. + */ + isolated_tbl = bwc_matcher->complex->isolated_tbl; + end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id; + ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl, end_ft_id); + if (ret) { + mlx5hws_err(ctx, + "Failed updating end_ft of isolated matcher (%d)\n", + ret); + return ret; + } + + hws_bwc_matcher_clear_hash_rtcs(bwc_matcher); + + mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr); + + for (i = 0; i < bwc_queues; i++) { + rules_list = &bwc_matcher->rules[i]; + if (list_empty(rules_list)) + continue; + + rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i); + + list_for_each_entry(tmp_bwc_rule, rules_list, list_node) { + /* Check if a rule with similar tag has already + * been moved. + */ + if (tmp_bwc_rule->complex_hash_node->rtc_valid) { + /* This rule is a duplicate of rule with similar + * tag that has already been moved earlier. + * Just update this rule's RTCs. + */ + tmp_bwc_rule->rule->rtc_0 = + tmp_bwc_rule->complex_hash_node->rtc_0; + tmp_bwc_rule->rule->rtc_1 = + tmp_bwc_rule->complex_hash_node->rtc_1; + tmp_bwc_rule->rule->matcher = + tmp_bwc_rule->rule->matcher->resize_dst; + continue; + } + + /* First time we're moving rule with this tag. + * Move it for real. + */ + tmp_rule = tmp_bwc_rule->rule; + tmp_rule->skip_delete = false; + ret = mlx5hws_matcher_resize_rule_move(matcher, + tmp_rule, + &rule_attr); + if (unlikely(ret && !move_error)) { + mlx5hws_err(ctx, + "Moving complex BWC rule failed (%d), attempting to move rest of the rules\n", + ret); + move_error = true; + } + + expected_completions = 1; + ret = mlx5hws_bwc_queue_poll(ctx, + rule_attr.queue_id, + &expected_completions, + true); + if (unlikely(ret && !poll_error)) { + mlx5hws_err(ctx, + "Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n", + ret); + poll_error = true; + } + + /* Done moving the rule to the new matcher, + * now update RTCs for all the duplicated rules. + */ + tmp_bwc_rule->complex_hash_node->rtc_0 = + tmp_bwc_rule->rule->rtc_0; + tmp_bwc_rule->complex_hash_node->rtc_1 = + tmp_bwc_rule->rule->rtc_1; + + tmp_bwc_rule->complex_hash_node->rtc_valid = true; + } + } + + if (move_error || poll_error) + ret = -EINVAL; + + return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h index 340f0688e394..a6887c7e39d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h @@ -4,6 +4,27 @@ #ifndef HWS_BWC_COMPLEX_H_ #define HWS_BWC_COMPLEX_H_ +struct mlx5hws_bwc_complex_rule_hash_node { + u32 match_buf[MLX5_ST_SZ_DW_MATCH_PARAM]; + u32 tag; + refcount_t refcount; + bool rtc_valid; + u32 rtc_0; + u32 rtc_1; + struct rhash_head hash_node; +}; + +struct mlx5hws_bwc_matcher_complex_data { + struct mlx5hws_table *isolated_tbl; + struct mlx5hws_bwc_matcher *isolated_bwc_matcher; + struct mlx5hws_action *action_metadata; + struct mlx5hws_action *action_go_to_tbl; + struct mlx5hws_action *action_last; + struct rhashtable refcount_hash; + struct mutex hash_lock; /* Protect the refcount rhashtable */ + struct ida metadata_ida; +}; + bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, u8 match_criteria_enable, struct mlx5hws_match_parameters *mask); -- 2.51.0 From 9d4024edce1063b616fa8bf7b2363290503cc322 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 11 May 2025 22:38:06 +0300 Subject: [PATCH 13/16] net/mlx5: HWS, force rehash when rule insertion failed Rules are inserted into hash table in accordance with their hash index. When a certain number of rules is reached, the table is rehashed: a bigger new table is allocated and all the rules are moved there. But sometimes a new rule can't be inserted into the hash table because its index is full, even though the number of rules in the table is well below the threshold. The hash function is not perfect, so such cases are not rare. When that happens, we want to do the same rehash, in order to increase the table size and lower the probability for such cases. This patch fixes the usecase where rule insertion was failing, but rehash couldn't be initiated due to low number of rules: it adds flag that denotes that rehash is required, even if the number of rules in the table is below the rehash threshold. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Vlad Dogaru Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/1746992290-568936-7-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c | 8 ++++++-- .../net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c index d70db6948dbb..dce2605fc99b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c @@ -169,6 +169,7 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table, return NULL; atomic_set(&bwc_matcher->num_of_rules, 0); + atomic_set(&bwc_matcher->rehash_required, false); /* Check if the required match params can be all matched * in single STE, otherwise complex matcher is needed. @@ -769,9 +770,9 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) /* It is possible that other rule has already performed rehash. * Need to check again if we really need rehash. - * If the reason for rehash was size, but not any more - skip rehash. */ - if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, + if (!atomic_read(&bwc_matcher->rehash_required) && + !hws_bwc_matcher_rehash_size_needed(bwc_matcher, atomic_read(&bwc_matcher->num_of_rules))) return 0; @@ -782,6 +783,8 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) * - destroy the old matcher */ + atomic_set(&bwc_matcher->rehash_required, false); + ret = hws_bwc_matcher_extend_size(bwc_matcher); if (ret) return ret; @@ -875,6 +878,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, * Try rehash by size and insert rule again - last chance. */ + atomic_set(&bwc_matcher->rehash_required, true); mutex_unlock(queue_lock); hws_bwc_lock_all_queues(ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h index cf2b65146317..d21fc247a510 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h @@ -30,6 +30,7 @@ struct mlx5hws_bwc_matcher { u8 size_log; u32 priority; atomic_t num_of_rules; + atomic_t rehash_required; struct list_head *rules; }; -- 2.51.0 From 4c56b5cbc323a10ebb6595500fb78fd8a4762efd Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 11 May 2025 22:38:07 +0300 Subject: [PATCH 14/16] net/mlx5: HWS, fix counting of rules in the matcher Currently the counter that counts number of rules in a matcher is increased only when rule insertion is completed. In a multi-threaded usecase this can lead to a scenario that many rules can be in process of insertion in the same matcher, while none of them has completed the insertion and the rule counter is not updated. This results in a rule insertion failure for many of them at first attempt, which leads to all of them requiring rehash and requiring locking of all the queue locks. This patch fixes the case by increasing the rule counter in the beginning of insertion process and decreasing in case of any failure. Signed-off-by: Vlad Dogaru Signed-off-by: Yevgeny Kliteynik Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/1746992290-568936-8-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c index dce2605fc99b..7d991a61eeb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c @@ -341,16 +341,12 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx) { struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; - atomic_inc(&bwc_matcher->num_of_rules); bwc_rule->bwc_queue_idx = idx; list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]); } static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule) { - struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; - - atomic_dec(&bwc_matcher->num_of_rules); list_del_init(&bwc_rule->list_node); } @@ -404,6 +400,7 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule) mutex_lock(queue_lock); ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr); + atomic_dec(&bwc_matcher->num_of_rules); hws_bwc_rule_list_remove(bwc_rule); mutex_unlock(queue_lock); @@ -840,7 +837,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, } /* check if number of rules require rehash */ - num_of_rules = atomic_read(&bwc_matcher->num_of_rules); + num_of_rules = atomic_inc_return(&bwc_matcher->num_of_rules); if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) { mutex_unlock(queue_lock); @@ -854,6 +851,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP, bwc_matcher->size_log, ret); + atomic_dec(&bwc_matcher->num_of_rules); return ret; } @@ -887,6 +885,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, if (ret) { mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret); + atomic_dec(&bwc_matcher->num_of_rules); return ret; } @@ -902,6 +901,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, if (unlikely(ret)) { mutex_unlock(queue_lock); mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret); + atomic_dec(&bwc_matcher->num_of_rules); return ret; } -- 2.51.0 From 041861b40f599311214a52075140db8be29fd27f Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 11 May 2025 22:38:08 +0300 Subject: [PATCH 15/16] net/mlx5: HWS, fix redundant extension of action templates When a rule is inserted into a matcher, we search for the suitable action template. If such template is not found, action template array is extended with the new template. However, when several threads are performing this in parallel, there is a race - we can end up with extending the action templates array with the same template. This patch is doing the following: - refactor the code to find action template index in rule create and update, have the common code in an auxiliary function - after locking all the queues, check again if the action template array still needs to be extended Signed-off-by: Vlad Dogaru Signed-off-by: Yevgeny Kliteynik Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/1746992290-568936-9-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/steering/hws/bwc.c | 105 +++++++++--------- 1 file changed, 54 insertions(+), 51 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c index 7d991a61eeb3..456fac895f5e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c @@ -789,6 +789,53 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) return hws_bwc_matcher_move(bwc_matcher); } +static int hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_action rule_actions[], + u16 bwc_queue_idx) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mutex *queue_lock; /* Protect the queue */ + int at_idx, ret; + + /* check if rehash needed due to missing action template */ + at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + if (likely(at_idx >= 0)) + return at_idx; + + /* we need to extend BWC matcher action templates array */ + queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx); + mutex_unlock(queue_lock); + hws_bwc_lock_all_queues(ctx); + + /* check again - perhaps other thread already did extend_at */ + at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + if (at_idx >= 0) + goto out; + + ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions); + if (unlikely(ret)) { + mlx5hws_err(ctx, "BWC rule: failed extending AT (%d)", ret); + at_idx = -EINVAL; + goto out; + } + + /* action templates array was extended, we need the last idx */ + at_idx = bwc_matcher->num_of_at - 1; + ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, + bwc_matcher->at[at_idx]); + if (unlikely(ret)) { + mlx5hws_err(ctx, "BWC rule: failed attaching new AT (%d)", ret); + at_idx = -EINVAL; + goto out; + } + +out: + hws_bwc_unlock_all_queues(ctx); + mutex_lock(queue_lock); + return at_idx; +} + int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, u32 *match_param, struct mlx5hws_rule_action rule_actions[], @@ -809,31 +856,12 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, mutex_lock(queue_lock); - /* check if rehash needed due to missing action template */ - at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, bwc_queue_idx); if (unlikely(at_idx < 0)) { - /* we need to extend BWC matcher action templates array */ mutex_unlock(queue_lock); - hws_bwc_lock_all_queues(ctx); - - ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions); - if (unlikely(ret)) { - hws_bwc_unlock_all_queues(ctx); - return ret; - } - - /* action templates array was extended, we need the last idx */ - at_idx = bwc_matcher->num_of_at - 1; - - ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, - bwc_matcher->at[at_idx]); - if (unlikely(ret)) { - hws_bwc_unlock_all_queues(ctx); - return ret; - } - - hws_bwc_unlock_all_queues(ctx); - mutex_lock(queue_lock); + mlx5hws_err(ctx, "BWC rule create: failed getting AT (%d)", + ret); + return -EINVAL; } /* check if number of rules require rehash */ @@ -971,36 +999,11 @@ hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, mutex_lock(queue_lock); - /* check if rehash needed due to missing action template */ - at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, idx); if (unlikely(at_idx < 0)) { - /* we need to extend BWC matcher action templates array */ mutex_unlock(queue_lock); - hws_bwc_lock_all_queues(ctx); - - /* check again - perhaps other thread already did extend_at */ - at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); - if (likely(at_idx < 0)) { - ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions); - if (unlikely(ret)) { - hws_bwc_unlock_all_queues(ctx); - mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret); - return -EINVAL; - } - - /* action templates array was extended, we need the last idx */ - at_idx = bwc_matcher->num_of_at - 1; - - ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, - bwc_matcher->at[at_idx]); - if (unlikely(ret)) { - hws_bwc_unlock_all_queues(ctx); - return ret; - } - } - - hws_bwc_unlock_all_queues(ctx); - mutex_lock(queue_lock); + mlx5hws_err(ctx, "BWC rule update: failed getting AT\n"); + return -EINVAL; } ret = hws_bwc_rule_update_sync(bwc_rule, -- 2.51.0 From ef94799a87415790d4297cf06075f99b70c420cd Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 11 May 2025 22:38:09 +0300 Subject: [PATCH 16/16] net/mlx5: HWS, rework rehash loop Reworking the rehash loop - simplifying the code and making it less error prone: - Instead of doing round-robin on all the queues with batch of rules in each cycle, just go over all the queues and move all the rules that belong to this queue. - If at some stage of moving the rule we get a failure (which should not happen), this can't be rolled back. So instead of aborting rehash and leaving the matcher in a broken state, allow the loop to continue: attempt to move the rest of the rules and delete the old matcher. A rule that failed to move to a new matcher will loose its match STE once the rehash is completed and the old matcher is deleted, so the rule won't match any traffic any more. This rule's packets will fall back to the steering pipeline w/o HW offload. Rehash procedure will return an error, which will cause the rule insertion to fail for the rule that started this whole rehash. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Vlad Dogaru Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/1746992290-568936-10-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/steering/hws/bwc.c | 127 +++++++----------- 1 file changed, 52 insertions(+), 75 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c index 456fac895f5e..9e057f808ea5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c @@ -610,95 +610,69 @@ hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher, static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher) { + bool move_error = false, poll_error = false, drain_error = false; struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_matcher *matcher = bwc_matcher->matcher; u16 bwc_queues = mlx5hws_bwc_queues(ctx); - struct mlx5hws_bwc_rule **bwc_rules; struct mlx5hws_rule_attr rule_attr; - u32 *pending_rules; - int i, j, ret = 0; - bool all_done; - u16 burst_th; + struct mlx5hws_bwc_rule *bwc_rule; + struct mlx5hws_send_engine *queue; + struct list_head *rules_list; + u32 pending_rules; + int i, ret = 0; mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr); - pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL); - if (!pending_rules) - return -ENOMEM; - - bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL); - if (!bwc_rules) { - ret = -ENOMEM; - goto free_pending_rules; - } - for (i = 0; i < bwc_queues; i++) { if (list_empty(&bwc_matcher->rules[i])) - bwc_rules[i] = NULL; - else - bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i], - struct mlx5hws_bwc_rule, - list_node); - } + continue; - do { - all_done = true; + pending_rules = 0; + rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i); + rules_list = &bwc_matcher->rules[i]; - for (i = 0; i < bwc_queues; i++) { - rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i); - burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id); - - for (j = 0; j < burst_th && bwc_rules[i]; j++) { - rule_attr.burst = !!((j + 1) % burst_th); - ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher, - bwc_rules[i]->rule, - &rule_attr); - if (unlikely(ret)) { - mlx5hws_err(ctx, - "Moving BWC rule failed during rehash (%d)\n", - ret); - goto free_bwc_rules; - } + list_for_each_entry(bwc_rule, rules_list, list_node) { + ret = mlx5hws_matcher_resize_rule_move(matcher, + bwc_rule->rule, + &rule_attr); + if (unlikely(ret && !move_error)) { + mlx5hws_err(ctx, + "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n", + ret); + move_error = true; + } - all_done = false; - pending_rules[i]++; - bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node, - &bwc_matcher->rules[i]) ? - NULL : list_next_entry(bwc_rules[i], list_node); - - ret = mlx5hws_bwc_queue_poll(ctx, - rule_attr.queue_id, - &pending_rules[i], - false); - if (unlikely(ret)) { - mlx5hws_err(ctx, - "Moving BWC rule failed during rehash (%d)\n", - ret); - goto free_bwc_rules; - } + pending_rules++; + ret = mlx5hws_bwc_queue_poll(ctx, + rule_attr.queue_id, + &pending_rules, + false); + if (unlikely(ret && !poll_error)) { + mlx5hws_err(ctx, + "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n", + ret); + poll_error = true; } } - } while (!all_done); - - /* drain all the bwc queues */ - for (i = 0; i < bwc_queues; i++) { - if (pending_rules[i]) { - u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i); - mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]); - ret = mlx5hws_bwc_queue_poll(ctx, queue_id, - &pending_rules[i], true); - if (unlikely(ret)) { + if (pending_rules) { + queue = &ctx->send_queue[rule_attr.queue_id]; + mlx5hws_send_engine_flush_queue(queue); + ret = mlx5hws_bwc_queue_poll(ctx, + rule_attr.queue_id, + &pending_rules, + true); + if (unlikely(ret && !drain_error)) { mlx5hws_err(ctx, - "Moving BWC rule failed during rehash (%d)\n", ret); - goto free_bwc_rules; + "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n", + ret); + drain_error = true; } } } -free_bwc_rules: - kfree(bwc_rules); -free_pending_rules: - kfree(pending_rules); + if (move_error || poll_error || drain_error) + ret = -EINVAL; return ret; } @@ -742,15 +716,18 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher) } ret = hws_bwc_matcher_move_all(bwc_matcher); - if (ret) { - mlx5hws_err(ctx, "Rehash error: moving rules failed\n"); - return -ENOMEM; - } + if (ret) + mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n"); + + /* Error during rehash can't be rolled back. + * The best option here is to allow the rehash to complete and remove + * the old matcher - can't leave the matcher in the 'in_resize' state. + */ bwc_matcher->matcher = new_matcher; mlx5hws_matcher_destroy(old_matcher); - return 0; + return ret; } static int -- 2.51.0