From 80369686737fe07c233a1152da0b84372dabdcd6 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 28 Feb 2025 11:54:15 +0100 Subject: [PATCH 01/16] net: airoha: Enable support for multiple net_devices In the current codebase airoha_eth driver supports just a single net_device connected to the Packet Switch Engine (PSE) lan port (GDM1). As shown in commit 23020f049327 ("net: airoha: Introduce ethernet support for EN7581 SoC"), PSE can switch packets between four GDM ports. Enable the capability to create a net_device for each GDM port of the PSE module. Moreover, since the QDMA blocks can be shared between net_devices, do not stop TX/RX DMA in airoha_dev_stop() if there are active net_devices for this QDMA block. This is a preliminary patch to enable flowtable hw offloading for EN7581 SoC. Co-developed-by: Christian Marangi Signed-off-by: Christian Marangi Signed-off-by: Lorenzo Bianconi Signed-off-by: Paolo Abeni --- drivers/net/ethernet/airoha/airoha_eth.c | 35 ++++++++++++++---------- drivers/net/ethernet/airoha/airoha_eth.h | 4 ++- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 347fc403f299..69775dfbc1c3 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -1562,6 +1562,7 @@ static int airoha_dev_open(struct net_device *dev) airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK | GLOBAL_CFG_RX_DMA_EN_MASK); + atomic_inc(&qdma->users); return 0; } @@ -1577,16 +1578,20 @@ static int airoha_dev_stop(struct net_device *dev) if (err) return err; - airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, - GLOBAL_CFG_TX_DMA_EN_MASK | - GLOBAL_CFG_RX_DMA_EN_MASK); + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) + netdev_tx_reset_subqueue(dev, i); - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { - if (!qdma->q_tx[i].ndesc) - continue; + if (atomic_dec_and_test(&qdma->users)) { + airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, + GLOBAL_CFG_TX_DMA_EN_MASK | + GLOBAL_CFG_RX_DMA_EN_MASK); - airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); - netdev_tx_reset_subqueue(dev, i); + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { + if (!qdma->q_tx[i].ndesc) + continue; + + airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); + } } return 0; @@ -2332,13 +2337,14 @@ static void airoha_metadata_dst_free(struct airoha_gdm_port *port) } } -static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) +static int airoha_alloc_gdm_port(struct airoha_eth *eth, + struct device_node *np, int index) { const __be32 *id_ptr = of_get_property(np, "reg", NULL); struct airoha_gdm_port *port; struct airoha_qdma *qdma; struct net_device *dev; - int err, index; + int err, p; u32 id; if (!id_ptr) { @@ -2347,14 +2353,14 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) } id = be32_to_cpup(id_ptr); - index = id - 1; + p = id - 1; if (!id || id > ARRAY_SIZE(eth->ports)) { dev_err(eth->dev, "invalid gdm port id: %d\n", id); return -EINVAL; } - if (eth->ports[index]) { + if (eth->ports[p]) { dev_err(eth->dev, "duplicate gdm port id: %d\n", id); return -EINVAL; } @@ -2402,7 +2408,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) port->qdma = qdma; port->dev = dev; port->id = id; - eth->ports[index] = port; + eth->ports[p] = port; err = airoha_metadata_dst_alloc(port); if (err) @@ -2474,6 +2480,7 @@ static int airoha_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) airoha_qdma_start_napi(ð->qdma[i]); + i = 0; for_each_child_of_node(pdev->dev.of_node, np) { if (!of_device_is_compatible(np, "airoha,eth-mac")) continue; @@ -2481,7 +2488,7 @@ static int airoha_probe(struct platform_device *pdev) if (!of_device_is_available(np)) continue; - err = airoha_alloc_gdm_port(eth, np); + err = airoha_alloc_gdm_port(eth, np, i++); if (err) { of_node_put(np); goto error_napi_stop; diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index fee6c10eaedf..74bdfd9e8d2f 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -13,7 +13,7 @@ #include #include -#define AIROHA_MAX_NUM_GDM_PORTS 1 +#define AIROHA_MAX_NUM_GDM_PORTS 4 #define AIROHA_MAX_NUM_QDMA 2 #define AIROHA_MAX_DSA_PORTS 7 #define AIROHA_MAX_NUM_RSTS 3 @@ -212,6 +212,8 @@ struct airoha_qdma { u32 irqmask[QDMA_INT_REG_MAX]; int irq; + atomic_t users; + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; -- 2.51.0 From 67fde5d58cd43d129a979e918ec9cd5d2e2fbcfb Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 28 Feb 2025 11:54:16 +0100 Subject: [PATCH 02/16] net: airoha: Move REG_GDM_FWD_CFG() initialization in airoha_dev_init() Move REG_GDM_FWD_CFG() register initialization in airoha_dev_init routine. Moreover, always send traffic PPE module in order to be processed by hw accelerator. This is a preliminary patch to enable netfilter flowtable hw offloading on EN7581 SoC. Signed-off-by: Lorenzo Bianconi Signed-off-by: Paolo Abeni --- drivers/net/ethernet/airoha/airoha_eth.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 69775dfbc1c3..4e74fd183991 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -107,25 +107,20 @@ static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable) { - u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP; - u32 vip_port, cfg_addr; + u32 vip_port; switch (port) { case XSI_PCIE0_PORT: vip_port = XSI_PCIE0_VIP_PORT_MASK; - cfg_addr = REG_GDM_FWD_CFG(3); break; case XSI_PCIE1_PORT: vip_port = XSI_PCIE1_VIP_PORT_MASK; - cfg_addr = REG_GDM_FWD_CFG(3); break; case XSI_USB_PORT: vip_port = XSI_USB_VIP_PORT_MASK; - cfg_addr = REG_GDM_FWD_CFG(4); break; case XSI_ETH_PORT: vip_port = XSI_ETH_VIP_PORT_MASK; - cfg_addr = REG_GDM_FWD_CFG(4); break; default: return -EINVAL; @@ -139,8 +134,6 @@ static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable) airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port); } - airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val); - return 0; } @@ -177,8 +170,6 @@ static void airoha_fe_maccr_init(struct airoha_eth *eth) airoha_fe_set(eth, REG_GDM_FWD_CFG(p), GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM | GDM_DROP_CRC_ERR); - airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p), - FE_PSE_PORT_CDM1); airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p), GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | @@ -1614,8 +1605,11 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p) static int airoha_dev_init(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->qdma->eth; airoha_set_macaddr(port, dev->dev_addr); + airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), + FE_PSE_PORT_PPE1); return 0; } -- 2.51.0 From c28b8375f6d02ef3b5e8c51234cc3f6d47d9fb7f Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 28 Feb 2025 11:54:17 +0100 Subject: [PATCH 03/16] net: airoha: Rename airoha_set_gdm_port_fwd_cfg() in airoha_set_vip_for_gdm_port() Rename airoha_set_gdm_port() in airoha_set_vip_for_gdm_port(). Get rid of airoha_set_gdm_ports routine. Signed-off-by: Lorenzo Bianconi Signed-off-by: Paolo Abeni --- drivers/net/ethernet/airoha/airoha_eth.c | 49 ++++++------------------ drivers/net/ethernet/airoha/airoha_eth.h | 8 ---- 2 files changed, 11 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 4e74fd183991..8a5f7c080b48 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -105,25 +105,23 @@ static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, FIELD_PREP(GDM_UCFQ_MASK, val)); } -static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable) +static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port, + bool enable) { + struct airoha_eth *eth = port->qdma->eth; u32 vip_port; - switch (port) { - case XSI_PCIE0_PORT: + switch (port->id) { + case 3: + /* FIXME: handle XSI_PCIE1_PORT */ vip_port = XSI_PCIE0_VIP_PORT_MASK; break; - case XSI_PCIE1_PORT: - vip_port = XSI_PCIE1_VIP_PORT_MASK; - break; - case XSI_USB_PORT: - vip_port = XSI_USB_VIP_PORT_MASK; - break; - case XSI_ETH_PORT: + case 4: + /* FIXME: handle XSI_USB_PORT */ vip_port = XSI_ETH_VIP_PORT_MASK; break; default: - return -EINVAL; + return 0; } if (enable) { @@ -137,31 +135,6 @@ static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable) return 0; } -static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable) -{ - const int port_list[] = { - XSI_PCIE0_PORT, - XSI_PCIE1_PORT, - XSI_USB_PORT, - XSI_ETH_PORT - }; - int i, err; - - for (i = 0; i < ARRAY_SIZE(port_list); i++) { - err = airoha_set_gdm_port(eth, port_list[i], enable); - if (err) - goto error; - } - - return 0; - -error: - for (i--; i >= 0; i--) - airoha_set_gdm_port(eth, port_list[i], false); - - return err; -} - static void airoha_fe_maccr_init(struct airoha_eth *eth) { int p; @@ -1539,7 +1512,7 @@ static int airoha_dev_open(struct net_device *dev) int err; netif_tx_start_all_queues(dev); - err = airoha_set_gdm_ports(qdma->eth, true); + err = airoha_set_vip_for_gdm_port(port, true); if (err) return err; @@ -1565,7 +1538,7 @@ static int airoha_dev_stop(struct net_device *dev) int i, err; netif_tx_disable(dev); - err = airoha_set_gdm_ports(qdma->eth, false); + err = airoha_set_vip_for_gdm_port(port, false); if (err) return err; diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index 74bdfd9e8d2f..44834227a589 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -57,14 +57,6 @@ enum { QDMA_INT_REG_MAX }; -enum { - XSI_PCIE0_PORT, - XSI_PCIE1_PORT, - XSI_USB_PORT, - XSI_AE_PORT, - XSI_ETH_PORT, -}; - enum { XSI_PCIE0_VIP_PORT_MASK = BIT(22), XSI_PCIE1_VIP_PORT_MASK = BIT(23), -- 2.51.0 From 266f7a0f81c059e68748d05f89258fc97f5b07f9 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 28 Feb 2025 11:54:18 +0100 Subject: [PATCH 04/16] dt-bindings: net: airoha: Add the NPU node for EN7581 SoC This patch adds the NPU document binding for EN7581 SoC. The Airoha Network Processor Unit (NPU) provides a configuration interface to implement wired and wireless hardware flow offloading programming Packet Processor Engine (PPE) flow table. Reviewed-by: Krzysztof Kozlowski Signed-off-by: Lorenzo Bianconi Signed-off-by: Paolo Abeni --- .../bindings/net/airoha,en7581-npu.yaml | 84 +++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml diff --git a/Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml b/Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml new file mode 100644 index 000000000000..76dd97c3fb40 --- /dev/null +++ b/Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/airoha,en7581-npu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Airoha Network Processor Unit for EN7581 SoC + +maintainers: + - Lorenzo Bianconi + +description: + The Airoha Network Processor Unit (NPU) provides a configuration interface + to implement wired and wireless hardware flow offloading programming Packet + Processor Engine (PPE) flow table. + +properties: + compatible: + enum: + - airoha,en7581-npu + + reg: + maxItems: 1 + + interrupts: + items: + - description: mbox host irq line + - description: watchdog0 irq line + - description: watchdog1 irq line + - description: watchdog2 irq line + - description: watchdog3 irq line + - description: watchdog4 irq line + - description: watchdog5 irq line + - description: watchdog6 irq line + - description: watchdog7 irq line + - description: wlan irq line0 + - description: wlan irq line1 + - description: wlan irq line2 + - description: wlan irq line3 + - description: wlan irq line4 + - description: wlan irq line5 + + memory-region: + maxItems: 1 + description: + Memory used to store NPU firmware binary. + +required: + - compatible + - reg + - interrupts + - memory-region + +additionalProperties: false + +examples: + - | + #include + #include + soc { + #address-cells = <2>; + #size-cells = <2>; + + npu@1e900000 { + compatible = "airoha,en7581-npu"; + reg = <0 0x1e900000 0 0x313000>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + memory-region = <&npu_binary>; + }; + }; -- 2.51.0 From 9b1a0b72264cafdbc1e06ef8531bc6bd693b49ca Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 28 Feb 2025 11:54:19 +0100 Subject: [PATCH 05/16] dt-bindings: net: airoha: Add airoha,npu phandle property Introduce the airoha,npu property for the NPU node available on EN7581 SoC. The airoha Network Processor Unit (NPU) is used to offload network traffic forwarded between Packet Switch Engine (PSE) ports. Reviewed-by: Krzysztof Kozlowski Signed-off-by: Lorenzo Bianconi Signed-off-by: Paolo Abeni --- .../devicetree/bindings/net/airoha,en7581-eth.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml index c578637c5826..0fdd11265417 100644 --- a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml +++ b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml @@ -63,6 +63,14 @@ properties: "#size-cells": const: 0 + airoha,npu: + $ref: /schemas/types.yaml#/definitions/phandle + description: + Phandle to the node used to configure the NPU module. + The Airoha Network Processor Unit (NPU) provides a configuration + interface to implement hardware flow offloading programming Packet + Processor Engine (PPE) flow table. + patternProperties: "^ethernet@[1-4]$": type: object @@ -132,6 +140,8 @@ examples: , ; + airoha,npu = <&npu>; + #address-cells = <1>; #size-cells = <0>; -- 2.51.0 From 23290c7bc190def4e1ca61610992d9b7c32e33f3 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 28 Feb 2025 11:54:20 +0100 Subject: [PATCH 06/16] net: airoha: Introduce Airoha NPU support Packet Processor Engine (PPE) module available on EN7581 SoC populates the PPE table with 5-tuples flower rules learned from traffic forwarded between the GDM ports connected to the Packet Switch Engine (PSE) module. The airoha_eth driver can enable hw acceleration of learned 5-tuples rules if the user configure them in netfilter flowtable (netfilter flowtable support will be added with subsequent patches). airoha_eth driver configures and collects data from the PPE module via a Network Processor Unit (NPU) RISC-V module available on the EN7581 SoC. Introduce basic support for Airoha NPU module. Tested-by: Sayantan Nandy Signed-off-by: Lorenzo Bianconi Signed-off-by: Paolo Abeni --- drivers/net/ethernet/airoha/Kconfig | 9 + drivers/net/ethernet/airoha/Makefile | 1 + drivers/net/ethernet/airoha/airoha_eth.h | 2 + drivers/net/ethernet/airoha/airoha_npu.c | 520 +++++++++++++++++++++++ drivers/net/ethernet/airoha/airoha_npu.h | 34 ++ 5 files changed, 566 insertions(+) create mode 100644 drivers/net/ethernet/airoha/airoha_npu.c create mode 100644 drivers/net/ethernet/airoha/airoha_npu.h diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig index b6a131845f13..1a4cf6a259f6 100644 --- a/drivers/net/ethernet/airoha/Kconfig +++ b/drivers/net/ethernet/airoha/Kconfig @@ -7,9 +7,18 @@ config NET_VENDOR_AIROHA if NET_VENDOR_AIROHA +config NET_AIROHA_NPU + tristate "Airoha NPU support" + select WANT_DEV_COREDUMP + select REGMAP_MMIO + help + This driver supports Airoha Network Processor (NPU) available + on the Airoha Soc family. + config NET_AIROHA tristate "Airoha SoC Gigabit Ethernet support" depends on NET_DSA || !NET_DSA + select NET_AIROHA_NPU select PAGE_POOL help This driver supports the gigabit ethernet MACs in the diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile index 73a6f3680a4c..fcd3fc775948 100644 --- a/drivers/net/ethernet/airoha/Makefile +++ b/drivers/net/ethernet/airoha/Makefile @@ -4,3 +4,4 @@ # obj-$(CONFIG_NET_AIROHA) += airoha_eth.o +obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index 44834227a589..7be932a8e8d2 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -240,6 +240,8 @@ struct airoha_eth { unsigned long state; void __iomem *fe_regs; + struct airoha_npu __rcu *npu; + struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c new file mode 100644 index 000000000000..7a5710f9ccf6 --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_npu.c @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025 AIROHA Inc + * Author: Lorenzo Bianconi + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "airoha_npu.h" + +#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin" +#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin" +#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000 +#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000 +#define NPU_DUMP_SIZE 512 + +#define REG_NPU_LOCAL_SRAM 0x0 + +#define NPU_PC_BASE_ADDR 0x305000 +#define REG_PC_DBG(_n) (0x305000 + ((_n) * 0x100)) + +#define NPU_CLUSTER_BASE_ADDR 0x306000 + +#define REG_CR_BOOT_TRIGGER (NPU_CLUSTER_BASE_ADDR + 0x000) +#define REG_CR_BOOT_CONFIG (NPU_CLUSTER_BASE_ADDR + 0x004) +#define REG_CR_BOOT_BASE(_n) (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2)) + +#define NPU_MBOX_BASE_ADDR 0x30c000 + +#define REG_CR_MBOX_INT_STATUS (NPU_MBOX_BASE_ADDR + 0x000) +#define MBOX_INT_STATUS_MASK BIT(8) + +#define REG_CR_MBOX_INT_MASK(_n) (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2)) +#define REG_CR_MBQ0_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2)) +#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2)) +#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2)) + +#define NPU_TIMER_BASE_ADDR 0x310100 +#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100)) +#define WDT_EN_MASK BIT(25) +#define WDT_INTR_MASK BIT(21) + +enum { + NPU_OP_SET = 1, + NPU_OP_SET_NO_WAIT, + NPU_OP_GET, + NPU_OP_GET_NO_WAIT, +}; + +enum { + NPU_FUNC_WIFI, + NPU_FUNC_TUNNEL, + NPU_FUNC_NOTIFY, + NPU_FUNC_DBA, + NPU_FUNC_TR471, + NPU_FUNC_PPE, +}; + +enum { + NPU_MBOX_ERROR, + NPU_MBOX_SUCCESS, +}; + +enum { + PPE_FUNC_SET_WAIT, + PPE_FUNC_SET_WAIT_HWNAT_INIT, + PPE_FUNC_SET_WAIT_HWNAT_DEINIT, + PPE_FUNC_SET_WAIT_API, +}; + +enum { + PPE2_SRAM_SET_ENTRY, + PPE_SRAM_SET_ENTRY, + PPE_SRAM_SET_VAL, + PPE_SRAM_RESET_VAL, +}; + +enum { + QDMA_WAN_ETHER = 1, + QDMA_WAN_PON_XDSL, +}; + +#define MBOX_MSG_FUNC_ID GENMASK(14, 11) +#define MBOX_MSG_STATIC_BUF BIT(5) +#define MBOX_MSG_STATUS GENMASK(4, 2) +#define MBOX_MSG_DONE BIT(1) +#define MBOX_MSG_WAIT_RSP BIT(0) + +#define PPE_TYPE_L2B_IPV4 2 +#define PPE_TYPE_L2B_IPV4_IPV6 3 + +struct ppe_mbox_data { + u32 func_type; + u32 func_id; + union { + struct { + u8 cds; + u8 xpon_hal_api; + u8 wan_xsi; + u8 ct_joyme4; + int ppe_type; + int wan_mode; + int wan_sel; + } init_info; + struct { + int func_id; + u32 size; + u32 data; + } set_info; + }; +}; + +static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id, + void *p, int size) +{ + u16 core = 0; /* FIXME */ + u32 val, offset = core << 4; + dma_addr_t dma_addr; + void *addr; + int ret; + + addr = kmemdup(p, size, GFP_ATOMIC); + if (!addr) + return -ENOMEM; + + dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE); + ret = dma_mapping_error(npu->dev, dma_addr); + if (ret) + goto out; + + spin_lock_bh(&npu->cores[core].lock); + + regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr); + regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size); + regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val); + regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1); + val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP; + regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val); + + ret = regmap_read_poll_timeout_atomic(npu->regmap, + REG_CR_MBQ0_CTRL(3) + offset, + val, (val & MBOX_MSG_DONE), + 100, 100 * MSEC_PER_SEC); + if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS) + ret = -EINVAL; + + spin_unlock_bh(&npu->cores[core].lock); + + dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE); +out: + kfree(addr); + + return ret; +} + +static int airoha_npu_run_firmware(struct device *dev, void __iomem *base, + struct reserved_mem *rmem) +{ + const struct firmware *fw; + void __iomem *addr; + int ret; + + ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev); + if (ret) + return ret == -ENOENT ? -EPROBE_DEFER : ret; + + if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) { + dev_err(dev, "%s: fw size too overlimit (%zu)\n", + NPU_EN7581_FIRMWARE_RV32, fw->size); + ret = -E2BIG; + goto out; + } + + addr = devm_ioremap(dev, rmem->base, rmem->size); + if (!addr) { + ret = -ENOMEM; + goto out; + } + + memcpy_toio(addr, fw->data, fw->size); + release_firmware(fw); + + ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev); + if (ret) + return ret == -ENOENT ? -EPROBE_DEFER : ret; + + if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) { + dev_err(dev, "%s: fw size too overlimit (%zu)\n", + NPU_EN7581_FIRMWARE_DATA, fw->size); + ret = -E2BIG; + goto out; + } + + memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size); +out: + release_firmware(fw); + + return ret; +} + +static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance) +{ + struct airoha_npu *npu = npu_instance; + + /* clear mbox interrupt status */ + regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS, + MBOX_INT_STATUS_MASK); + + /* acknowledge npu */ + regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3), + MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE); + + return IRQ_HANDLED; +} + +static void airoha_npu_wdt_work(struct work_struct *work) +{ + struct airoha_npu_core *core; + struct airoha_npu *npu; + void *dump; + u32 val[3]; + int c; + + core = container_of(work, struct airoha_npu_core, wdt_work); + npu = core->npu; + + dump = vzalloc(NPU_DUMP_SIZE); + if (!dump) + return; + + c = core - &npu->cores[0]; + regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val)); + snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n", + val[0], val[1], val[2]); + + dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL); +} + +static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance) +{ + struct airoha_npu_core *core = core_instance; + struct airoha_npu *npu = core->npu; + int c = core - &npu->cores[0]; + u32 val; + + regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK); + if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) && + FIELD_GET(WDT_EN_MASK, val)) + schedule_work(&core->wdt_work); + + return IRQ_HANDLED; +} + +static int airoha_npu_ppe_init(struct airoha_npu *npu) +{ + struct ppe_mbox_data ppe_data = { + .func_type = NPU_OP_SET, + .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT, + .init_info = { + .ppe_type = PPE_TYPE_L2B_IPV4_IPV6, + .wan_mode = QDMA_WAN_ETHER, + }, + }; + + return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, + sizeof(struct ppe_mbox_data)); +} + +static int airoha_npu_ppe_deinit(struct airoha_npu *npu) +{ + struct ppe_mbox_data ppe_data = { + .func_type = NPU_OP_SET, + .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT, + }; + + return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, + sizeof(struct ppe_mbox_data)); +} + +static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu, + dma_addr_t foe_addr, + int sram_num_entries) +{ + struct ppe_mbox_data ppe_data = { + .func_type = NPU_OP_SET, + .func_id = PPE_FUNC_SET_WAIT_API, + .set_info = { + .func_id = PPE_SRAM_RESET_VAL, + .data = foe_addr, + .size = sram_num_entries, + }, + }; + + return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, + sizeof(struct ppe_mbox_data)); +} + +static int airoha_npu_foe_commit_entry(struct airoha_npu *npu, + dma_addr_t foe_addr, + u32 entry_size, u32 hash, bool ppe2) +{ + struct ppe_mbox_data ppe_data = { + .func_type = NPU_OP_SET, + .func_id = PPE_FUNC_SET_WAIT_API, + .set_info = { + .data = foe_addr, + .size = entry_size, + }, + }; + int err; + + ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY + : PPE_SRAM_SET_ENTRY; + + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, + sizeof(struct ppe_mbox_data)); + if (err) + return err; + + ppe_data.set_info.func_id = PPE_SRAM_SET_VAL; + ppe_data.set_info.data = hash; + ppe_data.set_info.size = sizeof(u32); + + return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, + sizeof(struct ppe_mbox_data)); +} + +struct airoha_npu *airoha_npu_get(struct device *dev) +{ + struct platform_device *pdev; + struct device_node *np; + struct airoha_npu *npu; + + np = of_parse_phandle(dev->of_node, "airoha,npu", 0); + if (!np) + return ERR_PTR(-ENODEV); + + pdev = of_find_device_by_node(np); + of_node_put(np); + + if (!pdev) { + dev_err(dev, "cannot find device node %s\n", np->name); + return ERR_PTR(-ENODEV); + } + + if (!try_module_get(THIS_MODULE)) { + dev_err(dev, "failed to get the device driver module\n"); + npu = ERR_PTR(-ENODEV); + goto error_pdev_put; + } + + npu = platform_get_drvdata(pdev); + if (!npu) { + npu = ERR_PTR(-ENODEV); + goto error_module_put; + } + + if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) { + dev_err(&pdev->dev, + "failed to create device link to consumer %s\n", + dev_name(dev)); + npu = ERR_PTR(-EINVAL); + goto error_module_put; + } + + return npu; + +error_module_put: + module_put(THIS_MODULE); +error_pdev_put: + platform_device_put(pdev); + + return npu; +} +EXPORT_SYMBOL_GPL(airoha_npu_get); + +void airoha_npu_put(struct airoha_npu *npu) +{ + module_put(THIS_MODULE); + put_device(npu->dev); +} +EXPORT_SYMBOL_GPL(airoha_npu_put); + +static const struct of_device_id of_airoha_npu_match[] = { + { .compatible = "airoha,en7581-npu" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_airoha_npu_match); + +static const struct regmap_config regmap_config = { + .name = "npu", + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .disable_locking = true, +}; + +static int airoha_npu_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct reserved_mem *rmem; + struct airoha_npu *npu; + struct device_node *np; + void __iomem *base; + int i, irq, err; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL); + if (!npu) + return -ENOMEM; + + npu->dev = dev; + npu->ops.ppe_init = airoha_npu_ppe_init; + npu->ops.ppe_deinit = airoha_npu_ppe_deinit; + npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries; + npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry; + + npu->regmap = devm_regmap_init_mmio(dev, base, ®map_config); + if (IS_ERR(npu->regmap)) + return PTR_ERR(npu->regmap); + + np = of_parse_phandle(dev->of_node, "memory-region", 0); + if (!np) + return -ENODEV; + + rmem = of_reserved_mem_lookup(np); + of_node_put(np); + + if (!rmem) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + err = devm_request_irq(dev, irq, airoha_npu_mbox_handler, + IRQF_SHARED, "airoha-npu-mbox", npu); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(npu->cores); i++) { + struct airoha_npu_core *core = &npu->cores[i]; + + spin_lock_init(&core->lock); + core->npu = npu; + + irq = platform_get_irq(pdev, i + 1); + if (irq < 0) + return irq; + + err = devm_request_irq(dev, irq, airoha_npu_wdt_handler, + IRQF_SHARED, "airoha-npu-wdt", core); + if (err) + return err; + + INIT_WORK(&core->wdt_work, airoha_npu_wdt_work); + } + + err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + if (err) + return err; + + err = airoha_npu_run_firmware(dev, base, rmem); + if (err) + return dev_err_probe(dev, err, "failed to run npu firmware\n"); + + regmap_write(npu->regmap, REG_CR_NPU_MIB(10), + rmem->base + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE); + regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */ + regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0); + regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1); + msleep(100); + + /* setting booting address */ + for (i = 0; i < NPU_NUM_CORES; i++) + regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), rmem->base); + usleep_range(1000, 2000); + + /* enable NPU cores */ + /* do not start core3 since it is used for WiFi offloading */ + regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7); + regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1); + msleep(100); + + platform_set_drvdata(pdev, npu); + + return 0; +} + +static void airoha_npu_remove(struct platform_device *pdev) +{ + struct airoha_npu *npu = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < ARRAY_SIZE(npu->cores); i++) + cancel_work_sync(&npu->cores[i].wdt_work); +} + +static struct platform_driver airoha_npu_driver = { + .probe = airoha_npu_probe, + .remove = airoha_npu_remove, + .driver = { + .name = "airoha-npu", + .of_match_table = of_airoha_npu_match, + }, +}; +module_platform_driver(airoha_npu_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_DESCRIPTION("Airoha Network Processor Unit driver"); diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h new file mode 100644 index 000000000000..a2b8ae4d9473 --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_npu.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2025 AIROHA Inc + * Author: Lorenzo Bianconi + */ + +#define NPU_NUM_CORES 8 + +struct airoha_npu { + struct device *dev; + struct regmap *regmap; + + struct airoha_npu_core { + struct airoha_npu *npu; + /* protect concurrent npu memory accesses */ + spinlock_t lock; + struct work_struct wdt_work; + } cores[NPU_NUM_CORES]; + + struct { + int (*ppe_init)(struct airoha_npu *npu); + int (*ppe_deinit)(struct airoha_npu *npu); + int (*ppe_flush_sram_entries)(struct airoha_npu *npu, + dma_addr_t foe_addr, + int sram_num_entries); + int (*ppe_foe_commit_entry)(struct airoha_npu *npu, + dma_addr_t foe_addr, + u32 entry_size, u32 hash, + bool ppe2); + } ops; +}; + +struct airoha_npu *airoha_npu_get(struct device *dev); +void airoha_npu_put(struct airoha_npu *npu); -- 2.51.0 From 00a7678310fe3d3f408513e55d9a0b67f0db380f Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 28 Feb 2025 11:54:21 +0100 Subject: [PATCH 07/16] net: airoha: Introduce flowtable offload support Introduce netfilter flowtable integration in order to allow airoha_eth driver to offload 5-tuple flower rules learned by the PPE module if the user accelerates them using a nft configuration similar to the one reported below: table inet filter { flowtable ft { hook ingress priority filter devices = { lan1, lan2, lan3, lan4, eth1 } flags offload; } chain forward { type filter hook forward priority filter; policy accept; meta l4proto { tcp, udp } flow add @ft } } Tested-by: Sayantan Nandy Signed-off-by: Lorenzo Bianconi Signed-off-by: Paolo Abeni --- drivers/net/ethernet/airoha/Makefile | 3 +- drivers/net/ethernet/airoha/airoha_eth.c | 60 +- drivers/net/ethernet/airoha/airoha_eth.h | 250 ++++++ drivers/net/ethernet/airoha/airoha_ppe.c | 901 ++++++++++++++++++++++ drivers/net/ethernet/airoha/airoha_regs.h | 107 ++- 5 files changed, 1314 insertions(+), 7 deletions(-) create mode 100644 drivers/net/ethernet/airoha/airoha_ppe.c diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile index fcd3fc775948..6deff2f16229 100644 --- a/drivers/net/ethernet/airoha/Makefile +++ b/drivers/net/ethernet/airoha/Makefile @@ -3,5 +3,6 @@ # Airoha for the Mediatek SoCs built-in ethernet macs # -obj-$(CONFIG_NET_AIROHA) += airoha_eth.o +obj-$(CONFIG_NET_AIROHA) += airoha-eth.o +airoha-eth-y := airoha_eth.o airoha_ppe.o obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 8a5f7c080b48..382ed81df808 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -619,6 +618,7 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) while (done < budget) { struct airoha_queue_entry *e = &q->entry[q->tail]; struct airoha_qdma_desc *desc = &q->desc[q->tail]; + u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); dma_addr_t dma_addr = le32_to_cpu(desc->addr); u32 desc_ctrl = le32_to_cpu(desc->ctrl); struct airoha_gdm_port *port; @@ -681,6 +681,15 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) &port->dsa_meta[sptag]->dst); } + hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1); + if (hash != AIROHA_RXD4_FOE_ENTRY) + skb_set_hash(skb, jhash_1word(hash, 0), + PKT_HASH_TYPE_L4); + + reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1); + if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) + airoha_ppe_check_skb(eth->ppe, hash); + napi_gro_receive(&q->napi, skb); done++; @@ -1301,6 +1310,10 @@ static int airoha_hw_init(struct platform_device *pdev, return err; } + err = airoha_ppe_init(eth); + if (err) + return err; + set_bit(DEV_STATE_INITIALIZED, ð->state); return 0; @@ -2168,6 +2181,47 @@ static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port, return 0; } +static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port, + struct flow_block_offload *f) +{ + flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb; + static LIST_HEAD(block_cb_list); + struct flow_block_cb *block_cb; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + f->driver_block_list = &block_cb_list; + switch (f->command) { + case FLOW_BLOCK_BIND: + block_cb = flow_block_cb_lookup(f->block, cb, port->dev); + if (block_cb) { + flow_block_cb_incref(block_cb); + return 0; + } + block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_incref(block_cb); + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &block_cb_list); + return 0; + case FLOW_BLOCK_UNBIND: + block_cb = flow_block_cb_lookup(f->block, cb, port->dev); + if (!block_cb) + return -ENOENT; + + if (!flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } + return 0; + default: + return -EOPNOTSUPP; + } +} + static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue) { struct net_device *dev = port->dev; @@ -2251,6 +2305,9 @@ static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type, return airoha_tc_setup_qdisc_ets(port, type_data); case TC_SETUP_QDISC_HTB: return airoha_tc_setup_qdisc_htb(port, type_data); + case TC_SETUP_BLOCK: + case TC_SETUP_FT: + return airoha_dev_setup_tc_block(port, type_data); default: return -EOPNOTSUPP; } @@ -2507,6 +2564,7 @@ static void airoha_remove(struct platform_device *pdev) } free_netdev(eth->napi_dev); + airoha_ppe_deinit(eth); platform_set_drvdata(pdev, NULL); } diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index 7be932a8e8d2..db25a6d3b2cd 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -12,6 +12,7 @@ #include #include #include +#include #define AIROHA_MAX_NUM_GDM_PORTS 4 #define AIROHA_MAX_NUM_QDMA 2 @@ -44,6 +45,15 @@ #define QDMA_METER_IDX(_n) ((_n) & 0xff) #define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3) +#define PPE_NUM 2 +#define PPE1_SRAM_NUM_ENTRIES (8 * 1024) +#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES) +#define PPE_DRAM_NUM_ENTRIES (16 * 1024) +#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES) +#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1) +#define PPE_ENTRY_SIZE 80 +#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10)) + #define MTK_HDR_LEN 4 #define MTK_HDR_XMIT_TAGGED_TPID_8100 1 #define MTK_HDR_XMIT_TAGGED_TPID_88A8 2 @@ -195,6 +205,224 @@ struct airoha_hw_stats { u64 rx_len[7]; }; +enum { + PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f, +}; + +enum { + AIROHA_FOE_STATE_INVALID, + AIROHA_FOE_STATE_UNBIND, + AIROHA_FOE_STATE_BIND, + AIROHA_FOE_STATE_FIN +}; + +enum { + PPE_PKT_TYPE_IPV4_HNAPT = 0, + PPE_PKT_TYPE_IPV4_ROUTE = 1, + PPE_PKT_TYPE_BRIDGE = 2, + PPE_PKT_TYPE_IPV4_DSLITE = 3, + PPE_PKT_TYPE_IPV6_ROUTE_3T = 4, + PPE_PKT_TYPE_IPV6_ROUTE_5T = 5, + PPE_PKT_TYPE_IPV6_6RD = 7, +}; + +#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16) +#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0) + +struct airoha_foe_mac_info_common { + u16 vlan1; + u16 etype; + + u32 dest_mac_hi; + + u16 vlan2; + u16 dest_mac_lo; + + u32 src_mac_hi; +}; + +struct airoha_foe_mac_info { + struct airoha_foe_mac_info_common common; + + u16 pppoe_id; + u16 src_mac_lo; +}; + +#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24) +#define AIROHA_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8) +#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0) + +#define AIROHA_FOE_IB1_BIND_STATIC BIT(31) +#define AIROHA_FOE_IB1_BIND_UDP BIT(30) +#define AIROHA_FOE_IB1_BIND_STATE GENMASK(29, 28) +#define AIROHA_FOE_IB1_BIND_PACKET_TYPE GENMASK(27, 25) +#define AIROHA_FOE_IB1_BIND_TTL BIT(24) +#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP BIT(23) +#define AIROHA_FOE_IB1_BIND_PPPOE BIT(22) +#define AIROHA_FOE_IB1_BIND_VPM GENMASK(21, 20) +#define AIROHA_FOE_IB1_BIND_VLAN_LAYER GENMASK(19, 16) +#define AIROHA_FOE_IB1_BIND_KEEPALIVE BIT(15) +#define AIROHA_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0) + +#define AIROHA_FOE_IB2_DSCP GENMASK(31, 24) +#define AIROHA_FOE_IB2_PORT_AG GENMASK(23, 13) +#define AIROHA_FOE_IB2_PCP BIT(12) +#define AIROHA_FOE_IB2_MULTICAST BIT(11) +#define AIROHA_FOE_IB2_FAST_PATH BIT(10) +#define AIROHA_FOE_IB2_PSE_QOS BIT(9) +#define AIROHA_FOE_IB2_PSE_PORT GENMASK(8, 5) +#define AIROHA_FOE_IB2_NBQ GENMASK(4, 0) + +#define AIROHA_FOE_ACTDP GENMASK(31, 24) +#define AIROHA_FOE_SHAPER_ID GENMASK(23, 16) +#define AIROHA_FOE_CHANNEL GENMASK(15, 11) +#define AIROHA_FOE_QID GENMASK(10, 8) +#define AIROHA_FOE_DPI BIT(7) +#define AIROHA_FOE_TUNNEL BIT(6) +#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0) + +struct airoha_foe_bridge { + u32 dest_mac_hi; + + u16 src_mac_hi; + u16 dest_mac_lo; + + u32 src_mac_lo; + + u32 ib2; + + u32 rsv[5]; + + u32 data; + + struct airoha_foe_mac_info l2; +}; + +struct airoha_foe_ipv4_tuple { + u32 src_ip; + u32 dest_ip; + union { + struct { + u16 dest_port; + u16 src_port; + }; + struct { + u8 protocol; + u8 _pad[3]; /* fill with 0xa5a5a5 */ + }; + u32 ports; + }; +}; + +struct airoha_foe_ipv4 { + struct airoha_foe_ipv4_tuple orig_tuple; + + u32 ib2; + + struct airoha_foe_ipv4_tuple new_tuple; + + u32 rsv[2]; + + u32 data; + + struct airoha_foe_mac_info l2; +}; + +struct airoha_foe_ipv4_dslite { + struct airoha_foe_ipv4_tuple ip4; + + u32 ib2; + + u8 flow_label[3]; + u8 priority; + + u32 rsv[4]; + + u32 data; + + struct airoha_foe_mac_info l2; +}; + +struct airoha_foe_ipv6 { + u32 src_ip[4]; + u32 dest_ip[4]; + + union { + struct { + u16 dest_port; + u16 src_port; + }; + struct { + u8 protocol; + u8 pad[3]; + }; + u32 ports; + }; + + u32 data; + + u32 ib2; + + struct airoha_foe_mac_info_common l2; +}; + +struct airoha_foe_entry { + union { + struct { + u32 ib1; + union { + struct airoha_foe_bridge bridge; + struct airoha_foe_ipv4 ipv4; + struct airoha_foe_ipv4_dslite dslite; + struct airoha_foe_ipv6 ipv6; + DECLARE_FLEX_ARRAY(u32, d); + }; + }; + u8 data[PPE_ENTRY_SIZE]; + }; +}; + +struct airoha_flow_data { + struct ethhdr eth; + + union { + struct { + __be32 src_addr; + __be32 dst_addr; + } v4; + + struct { + struct in6_addr src_addr; + struct in6_addr dst_addr; + } v6; + }; + + __be16 src_port; + __be16 dst_port; + + struct { + struct { + u16 id; + __be16 proto; + } hdr[2]; + u8 num; + } vlan; + struct { + u16 sid; + u8 num; + } pppoe; +}; + +struct airoha_flow_table_entry { + struct hlist_node list; + + struct airoha_foe_entry data; + u32 hash; + + struct rhash_head node; + unsigned long cookie; +}; + struct airoha_qdma { struct airoha_eth *eth; void __iomem *regs; @@ -234,6 +462,19 @@ struct airoha_gdm_port { struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS]; }; +#define AIROHA_RXD4_PPE_CPU_REASON GENMASK(20, 16) +#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0) + +struct airoha_ppe { + struct airoha_eth *eth; + + void *foe; + dma_addr_t foe_dma; + + struct hlist_head *foe_flow; + u16 foe_check_time[PPE_NUM_ENTRIES]; +}; + struct airoha_eth { struct device *dev; @@ -242,6 +483,9 @@ struct airoha_eth { struct airoha_npu __rcu *npu; + struct airoha_ppe *ppe; + struct rhashtable flow_table; + struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; @@ -277,4 +521,10 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val); #define airoha_qdma_clear(qdma, offset, val) \ airoha_rmw((qdma)->regs, (offset), (val), 0) +void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash); +int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv); +int airoha_ppe_init(struct airoha_eth *eth); +void airoha_ppe_deinit(struct airoha_eth *eth); + #endif /* AIROHA_ETH_H */ diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c new file mode 100644 index 000000000000..37074121224a --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -0,0 +1,901 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025 AIROHA Inc + * Author: Lorenzo Bianconi + */ + +#include +#include +#include +#include +#include + +#include "airoha_npu.h" +#include "airoha_regs.h" +#include "airoha_eth.h" + +static DEFINE_MUTEX(flow_offload_mutex); +static DEFINE_SPINLOCK(ppe_lock); + +static const struct rhashtable_params airoha_flow_table_params = { + .head_offset = offsetof(struct airoha_flow_table_entry, node), + .key_offset = offsetof(struct airoha_flow_table_entry, cookie), + .key_len = sizeof(unsigned long), + .automatic_shrinking = true, +}; + +static bool airoha_ppe2_is_enabled(struct airoha_eth *eth) +{ + return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK; +} + +static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe) +{ + u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS); + + return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp); +} + +static void airoha_ppe_hw_init(struct airoha_ppe *ppe) +{ + u32 sram_tb_size, sram_num_entries, dram_num_entries; + struct airoha_eth *eth = ppe->eth; + int i; + + sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry); + dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES); + + for (i = 0; i < PPE_NUM; i++) { + int p; + + airoha_fe_wr(eth, REG_PPE_TB_BASE(i), + ppe->foe_dma + sram_tb_size); + + airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i), + PPE_BIND_AGE0_DELTA_NON_L4 | + PPE_BIND_AGE0_DELTA_UDP, + FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) | + FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12)); + airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i), + PPE_BIND_AGE1_DELTA_TCP_FIN | + PPE_BIND_AGE1_DELTA_TCP, + FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) | + FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7)); + + airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i), + PPE_SRAM_TABLE_EN_MASK | + PPE_SRAM_HASH1_EN_MASK | + PPE_DRAM_TABLE_EN_MASK | + PPE_SRAM_HASH0_MODE_MASK | + PPE_SRAM_HASH1_MODE_MASK | + PPE_DRAM_HASH0_MODE_MASK | + PPE_DRAM_HASH1_MODE_MASK, + FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) | + FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) | + FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) | + FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3)); + + airoha_fe_rmw(eth, REG_PPE_TB_CFG(i), + PPE_TB_CFG_SEARCH_MISS_MASK | + PPE_TB_ENTRY_SIZE_MASK, + FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) | + FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0)); + + airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED); + + for (p = 0; p < ARRAY_SIZE(eth->ports); p++) + airoha_fe_rmw(eth, REG_PPE_MTU(i, p), + FP0_EGRESS_MTU_MASK | + FP1_EGRESS_MTU_MASK, + FIELD_PREP(FP0_EGRESS_MTU_MASK, + AIROHA_MAX_MTU) | + FIELD_PREP(FP1_EGRESS_MTU_MASK, + AIROHA_MAX_MTU)); + } + + if (airoha_ppe2_is_enabled(eth)) { + sram_num_entries = + PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES); + airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), + PPE_SRAM_TB_NUM_ENTRY_MASK | + PPE_DRAM_TB_NUM_ENTRY_MASK, + FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, + sram_num_entries) | + FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, + dram_num_entries)); + airoha_fe_rmw(eth, REG_PPE_TB_CFG(1), + PPE_SRAM_TB_NUM_ENTRY_MASK | + PPE_DRAM_TB_NUM_ENTRY_MASK, + FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, + sram_num_entries) | + FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, + dram_num_entries)); + } else { + sram_num_entries = + PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES); + airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), + PPE_SRAM_TB_NUM_ENTRY_MASK | + PPE_DRAM_TB_NUM_ENTRY_MASK, + FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, + sram_num_entries) | + FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, + dram_num_entries)); + } +} + +static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth) +{ + void *dest = eth + act->mangle.offset; + const void *src = &act->mangle.val; + + if (act->mangle.offset > 8) + return; + + if (act->mangle.mask == 0xffff) { + src += 2; + dest += 2; + } + + memcpy(dest, src, act->mangle.mask ? 2 : 4); +} + +static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act, + struct airoha_flow_data *data) +{ + u32 val = be32_to_cpu((__force __be32)act->mangle.val); + + switch (act->mangle.offset) { + case 0: + if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff)) + data->dst_port = cpu_to_be16(val); + else + data->src_port = cpu_to_be16(val >> 16); + break; + case 2: + data->dst_port = cpu_to_be16(val); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act, + struct airoha_flow_data *data) +{ + __be32 *dest; + + switch (act->mangle.offset) { + case offsetof(struct iphdr, saddr): + dest = &data->v4.src_addr; + break; + case offsetof(struct iphdr, daddr): + dest = &data->v4.dst_addr; + break; + default: + return -EINVAL; + } + + memcpy(dest, &act->mangle.val, sizeof(u32)); + + return 0; +} + +static int airoha_get_dsa_port(struct net_device **dev) +{ +#if IS_ENABLED(CONFIG_NET_DSA) + struct dsa_port *dp = dsa_port_from_netdev(*dev); + + if (IS_ERR(dp)) + return -ENODEV; + + *dev = dsa_port_to_conduit(dp); + return dp->index; +#else + return -ENODEV; +#endif +} + +static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe, + struct net_device *dev, int type, + struct airoha_flow_data *data, + int l4proto) +{ + int dsa_port = airoha_get_dsa_port(&dev); + struct airoha_foe_mac_info_common *l2; + u32 qdata, ports_pad, val; + + memset(hwe, 0, sizeof(*hwe)); + + val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) | + FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) | + FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) | + FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) | + FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) | + AIROHA_FOE_IB1_BIND_TTL; + hwe->ib1 = val; + + val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f); + if (dsa_port >= 0) + val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port); + + if (dev) { + struct airoha_gdm_port *port = netdev_priv(dev); + u8 pse_port; + + pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; + val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port); + } + + /* FIXME: implement QoS support setting pse_port to 2 (loopback) + * for uplink and setting qos bit in ib2 + */ + + if (is_multicast_ether_addr(data->eth.h_dest)) + val |= AIROHA_FOE_IB2_MULTICAST; + + ports_pad = 0xa5a5a500 | (l4proto & 0xff); + if (type == PPE_PKT_TYPE_IPV4_ROUTE) + hwe->ipv4.orig_tuple.ports = ports_pad; + if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T) + hwe->ipv6.ports = ports_pad; + + qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f); + if (type == PPE_PKT_TYPE_BRIDGE) { + hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest); + hwe->bridge.dest_mac_lo = + get_unaligned_be16(data->eth.h_dest + 4); + hwe->bridge.src_mac_hi = + get_unaligned_be16(data->eth.h_source); + hwe->bridge.src_mac_lo = + get_unaligned_be32(data->eth.h_source + 2); + hwe->bridge.data = qdata; + hwe->bridge.ib2 = val; + l2 = &hwe->bridge.l2.common; + } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { + hwe->ipv6.data = qdata; + hwe->ipv6.ib2 = val; + l2 = &hwe->ipv6.l2; + } else { + hwe->ipv4.data = qdata; + hwe->ipv4.ib2 = val; + l2 = &hwe->ipv4.l2.common; + } + + l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest); + l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4); + if (type <= PPE_PKT_TYPE_IPV4_DSLITE) { + l2->src_mac_hi = get_unaligned_be32(data->eth.h_source); + hwe->ipv4.l2.src_mac_lo = + get_unaligned_be16(data->eth.h_source + 4); + } else { + l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf); + } + + if (data->vlan.num) { + l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0; + l2->vlan1 = data->vlan.hdr[0].id; + if (data->vlan.num == 2) + l2->vlan2 = data->vlan.hdr[1].id; + } else if (dsa_port >= 0) { + l2->etype = BIT(15) | BIT(dsa_port); + } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { + l2->etype = ETH_P_IPV6; + } else { + l2->etype = ETH_P_IP; + } + + return 0; +} + +static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe, + struct airoha_flow_data *data, + bool egress) +{ + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + struct airoha_foe_ipv4_tuple *t; + + switch (type) { + case PPE_PKT_TYPE_IPV4_HNAPT: + if (egress) { + t = &hwe->ipv4.new_tuple; + break; + } + fallthrough; + case PPE_PKT_TYPE_IPV4_DSLITE: + case PPE_PKT_TYPE_IPV4_ROUTE: + t = &hwe->ipv4.orig_tuple; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + t->src_ip = be32_to_cpu(data->v4.src_addr); + t->dest_ip = be32_to_cpu(data->v4.dst_addr); + + if (type != PPE_PKT_TYPE_IPV4_ROUTE) { + t->src_port = be16_to_cpu(data->src_port); + t->dest_port = be16_to_cpu(data->dst_port); + } + + return 0; +} + +static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe, + struct airoha_flow_data *data) + +{ + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + u32 *src, *dest; + + switch (type) { + case PPE_PKT_TYPE_IPV6_ROUTE_5T: + case PPE_PKT_TYPE_IPV6_6RD: + hwe->ipv6.src_port = be16_to_cpu(data->src_port); + hwe->ipv6.dest_port = be16_to_cpu(data->dst_port); + fallthrough; + case PPE_PKT_TYPE_IPV6_ROUTE_3T: + src = hwe->ipv6.src_ip; + dest = hwe->ipv6.dest_ip; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32); + ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32); + + return 0; +} + +static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) +{ + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + u32 hash, hv1, hv2, hv3; + + switch (type) { + case PPE_PKT_TYPE_IPV4_ROUTE: + case PPE_PKT_TYPE_IPV4_HNAPT: + hv1 = hwe->ipv4.orig_tuple.ports; + hv2 = hwe->ipv4.orig_tuple.dest_ip; + hv3 = hwe->ipv4.orig_tuple.src_ip; + break; + case PPE_PKT_TYPE_IPV6_ROUTE_3T: + case PPE_PKT_TYPE_IPV6_ROUTE_5T: + hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3]; + hv1 ^= hwe->ipv6.ports; + + hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2]; + hv2 ^= hwe->ipv6.dest_ip[0]; + + hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1]; + hv3 ^= hwe->ipv6.src_ip[0]; + break; + case PPE_PKT_TYPE_IPV4_DSLITE: + case PPE_PKT_TYPE_IPV6_6RD: + default: + WARN_ON_ONCE(1); + return PPE_HASH_MASK; + } + + hash = (hv1 & hv2) | ((~hv1) & hv3); + hash = (hash >> 24) | ((hash & 0xffffff) << 8); + hash ^= hv1 ^ hv2 ^ hv3; + hash ^= hash >> 16; + hash &= PPE_NUM_ENTRIES - 1; + + return hash; +} + +static struct airoha_foe_entry * +airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, u32 hash) +{ + if (hash < PPE_SRAM_NUM_ENTRIES) { + u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry); + struct airoha_eth *eth = ppe->eth; + bool ppe2; + u32 val; + int i; + + ppe2 = airoha_ppe2_is_enabled(ppe->eth) && + hash >= PPE1_SRAM_NUM_ENTRIES; + airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), + FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | + PPE_SRAM_CTRL_REQ_MASK); + if (read_poll_timeout_atomic(airoha_fe_rr, val, + val & PPE_SRAM_CTRL_ACK_MASK, + 10, 100, false, eth, + REG_PPE_RAM_CTRL(ppe2))) + return NULL; + + for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++) + hwe[i] = airoha_fe_rr(eth, + REG_PPE_RAM_ENTRY(ppe2, i)); + } + + return ppe->foe + hash * sizeof(struct airoha_foe_entry); +} + +static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e, + struct airoha_foe_entry *hwe) +{ + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1); + int len; + + if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP) + return false; + + if (type > PPE_PKT_TYPE_IPV4_DSLITE) + len = offsetof(struct airoha_foe_entry, ipv6.data); + else + len = offsetof(struct airoha_foe_entry, ipv4.ib2); + + return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1)); +} + +static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, + struct airoha_foe_entry *e, + u32 hash) +{ + struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); + u32 ts = airoha_ppe_get_timestamp(ppe); + struct airoha_eth *eth = ppe->eth; + + memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1)); + wmb(); + + e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP; + e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts); + hwe->ib1 = e->ib1; + + if (hash < PPE_SRAM_NUM_ENTRIES) { + dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe); + bool ppe2 = airoha_ppe2_is_enabled(eth) && + hash >= PPE1_SRAM_NUM_ENTRIES; + struct airoha_npu *npu; + int err = -ENODEV; + + rcu_read_lock(); + npu = rcu_dereference(eth->npu); + if (npu) + err = npu->ops.ppe_foe_commit_entry(npu, addr, + sizeof(*hwe), hash, + ppe2); + rcu_read_unlock(); + + return err; + } + + return 0; +} + +static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash) +{ + struct airoha_flow_table_entry *e; + struct airoha_foe_entry *hwe; + struct hlist_node *n; + u32 index, state; + + spin_lock_bh(&ppe_lock); + + hwe = airoha_ppe_foe_get_entry(ppe, hash); + if (!hwe) + goto unlock; + + state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); + if (state == AIROHA_FOE_STATE_BIND) + goto unlock; + + index = airoha_ppe_foe_get_entry_hash(hwe); + hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) { + if (airoha_ppe_foe_compare_entry(e, hwe)) { + airoha_ppe_foe_commit_entry(ppe, &e->data, hash); + e->hash = hash; + break; + } + } +unlock: + spin_unlock_bh(&ppe_lock); +} + +static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + u32 hash = airoha_ppe_foe_get_entry_hash(&e->data); + + e->hash = 0xffff; + + spin_lock_bh(&ppe_lock); + hlist_add_head(&e->list, &ppe->foe_flow[hash]); + spin_unlock_bh(&ppe_lock); + + return 0; +} + +static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + spin_lock_bh(&ppe_lock); + + hlist_del_init(&e->list); + if (e->hash != 0xffff) { + e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE; + e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, + AIROHA_FOE_STATE_INVALID); + airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash); + e->hash = 0xffff; + } + + spin_unlock_bh(&ppe_lock); +} + +static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port, + struct flow_cls_offload *f) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct airoha_eth *eth = port->qdma->eth; + struct airoha_flow_table_entry *e; + struct airoha_flow_data data = {}; + struct net_device *odev = NULL; + struct flow_action_entry *act; + struct airoha_foe_entry hwe; + int err, i, offload_type; + u16 addr_type = 0; + u8 l4proto = 0; + + if (rhashtable_lookup(ð->flow_table, &f->cookie, + airoha_flow_table_params)) + return -EEXIST; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) + return -EOPNOTSUPP; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + l4proto = match.key->ip_proto; + } else { + return -EOPNOTSUPP; + } + + switch (addr_type) { + case 0: + offload_type = PPE_PKT_TYPE_BRIDGE; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); + memcpy(data.eth.h_source, match.key->src, ETH_ALEN); + } else { + return -EOPNOTSUPP; + } + break; + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + offload_type = PPE_PKT_TYPE_IPV4_HNAPT; + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T; + break; + default: + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_MANGLE: + if (offload_type == PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + + if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) + airoha_ppe_flow_mangle_eth(act, &data.eth); + break; + case FLOW_ACTION_REDIRECT: + odev = act->dev; + break; + case FLOW_ACTION_CSUM: + break; + case FLOW_ACTION_VLAN_PUSH: + if (data.vlan.num == 2 || + act->vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; + + data.vlan.hdr[data.vlan.num].id = act->vlan.vid; + data.vlan.hdr[data.vlan.num].proto = act->vlan.proto; + data.vlan.num++; + break; + case FLOW_ACTION_VLAN_POP: + break; + case FLOW_ACTION_PPPOE_PUSH: + break; + default: + return -EOPNOTSUPP; + } + } + + if (!is_valid_ether_addr(data.eth.h_source) || + !is_valid_ether_addr(data.eth.h_dest)) + return -EINVAL; + + err = airoha_ppe_foe_entry_prepare(&hwe, odev, offload_type, + &data, l4proto); + if (err) + return err; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports ports; + + if (offload_type == PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + + flow_rule_match_ports(rule, &ports); + data.src_port = ports.key->src; + data.dst_port = ports.key->dst; + } else if (offload_type != PPE_PKT_TYPE_BRIDGE) { + return -EOPNOTSUPP; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs addrs; + + flow_rule_match_ipv4_addrs(rule, &addrs); + data.v4.src_addr = addrs.key->src; + data.v4.dst_addr = addrs.key->dst; + airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs addrs; + + flow_rule_match_ipv6_addrs(rule, &addrs); + + data.v6.src_addr = addrs.key->src; + data.v6.dst_addr = addrs.key->dst; + airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data); + } + + flow_action_for_each(i, act, &rule->action) { + if (act->id != FLOW_ACTION_MANGLE) + continue; + + if (offload_type == PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + + switch (act->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + err = airoha_ppe_flow_mangle_ports(act, &data); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + err = airoha_ppe_flow_mangle_ipv4(act, &data); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + /* handled earlier */ + break; + default: + return -EOPNOTSUPP; + } + + if (err) + return err; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true); + if (err) + return err; + } + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->cookie = f->cookie; + memcpy(&e->data, &hwe, sizeof(e->data)); + + err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e); + if (err) + goto free_entry; + + err = rhashtable_insert_fast(ð->flow_table, &e->node, + airoha_flow_table_params); + if (err < 0) + goto remove_foe_entry; + + return 0; + +remove_foe_entry: + airoha_ppe_foe_flow_remove_entry(eth->ppe, e); +free_entry: + kfree(e); + + return err; +} + +static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port, + struct flow_cls_offload *f) +{ + struct airoha_eth *eth = port->qdma->eth; + struct airoha_flow_table_entry *e; + + e = rhashtable_lookup(ð->flow_table, &f->cookie, + airoha_flow_table_params); + if (!e) + return -ENOENT; + + airoha_ppe_foe_flow_remove_entry(eth->ppe, e); + rhashtable_remove_fast(ð->flow_table, &e->node, + airoha_flow_table_params); + kfree(e); + + return 0; +} + +static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port, + struct flow_cls_offload *f) +{ + switch (f->command) { + case FLOW_CLS_REPLACE: + return airoha_ppe_flow_offload_replace(port, f); + case FLOW_CLS_DESTROY: + return airoha_ppe_flow_offload_destroy(port, f); + default: + break; + } + + return -EOPNOTSUPP; +} + +static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe, + struct airoha_npu *npu) +{ + int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES; + struct airoha_foe_entry *hwe = ppe->foe; + + if (airoha_ppe2_is_enabled(ppe->eth)) + sram_num_entries = sram_num_entries / 2; + + for (i = 0; i < sram_num_entries; i++) + memset(&hwe[i], 0, sizeof(*hwe)); + + return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma, + PPE_SRAM_NUM_ENTRIES); +} + +static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth) +{ + struct airoha_npu *npu = airoha_npu_get(eth->dev); + + if (IS_ERR(npu)) { + request_module("airoha-npu"); + npu = airoha_npu_get(eth->dev); + } + + return npu; +} + +static int airoha_ppe_offload_setup(struct airoha_eth *eth) +{ + struct airoha_npu *npu = airoha_ppe_npu_get(eth); + int err; + + if (IS_ERR(npu)) + return PTR_ERR(npu); + + err = npu->ops.ppe_init(npu); + if (err) + goto error_npu_put; + + airoha_ppe_hw_init(eth->ppe); + err = airoha_ppe_flush_sram_entries(eth->ppe, npu); + if (err) + goto error_npu_put; + + rcu_assign_pointer(eth->npu, npu); + synchronize_rcu(); + + return 0; + +error_npu_put: + airoha_npu_put(npu); + + return err; +} + +int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct flow_cls_offload *cls = type_data; + struct net_device *dev = cb_priv; + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->qdma->eth; + int err = 0; + + if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER) + return -EOPNOTSUPP; + + mutex_lock(&flow_offload_mutex); + + if (!eth->npu) + err = airoha_ppe_offload_setup(eth); + if (!err) + err = airoha_ppe_flow_offload_cmd(port, cls); + + mutex_unlock(&flow_offload_mutex); + + return err; +} + +void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash) +{ + u16 now, diff; + + if (hash > PPE_HASH_MASK) + return; + + now = (u16)jiffies; + diff = now - ppe->foe_check_time[hash]; + if (diff < HZ / 10) + return; + + ppe->foe_check_time[hash] = now; + airoha_ppe_foe_insert_entry(ppe, hash); +} + +int airoha_ppe_init(struct airoha_eth *eth) +{ + struct airoha_ppe *ppe; + int foe_size; + + ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL); + if (!ppe) + return -ENOMEM; + + foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry); + ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma, + GFP_KERNEL); + if (!ppe->foe) + return -ENOMEM; + + ppe->eth = eth; + eth->ppe = ppe; + + ppe->foe_flow = devm_kzalloc(eth->dev, + PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow), + GFP_KERNEL); + if (!ppe->foe_flow) + return -ENOMEM; + + return rhashtable_init(ð->flow_table, &airoha_flow_table_params); +} + +void airoha_ppe_deinit(struct airoha_eth *eth) +{ + struct airoha_npu *npu; + + rcu_read_lock(); + npu = rcu_dereference(eth->npu); + if (npu) { + npu->ops.ppe_deinit(npu); + airoha_npu_put(npu); + } + rcu_read_unlock(); + + rhashtable_destroy(ð->flow_table); +} diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h index e467dd81ff44..6cc64c60953a 100644 --- a/drivers/net/ethernet/airoha/airoha_regs.h +++ b/drivers/net/ethernet/airoha/airoha_regs.h @@ -15,6 +15,7 @@ #define CDM1_BASE 0x0400 #define GDM1_BASE 0x0500 #define PPE1_BASE 0x0c00 +#define PPE2_BASE 0x1c00 #define CDM2_BASE 0x1400 #define GDM2_BASE 0x1500 @@ -36,6 +37,7 @@ #define FE_RST_GDM3_MBI_ARB_MASK BIT(2) #define FE_RST_CORE_MASK BIT(0) +#define REG_FE_FOE_TS 0x0010 #define REG_FE_WAN_MAC_H 0x0030 #define REG_FE_LAN_MAC_H 0x0040 @@ -192,11 +194,106 @@ #define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198) #define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c) -#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250) -#define PPE1_SRAM_TABLE_EN_MASK BIT(0) -#define PPE1_SRAM_HASH1_EN_MASK BIT(8) -#define PPE1_DRAM_TABLE_EN_MASK BIT(16) -#define PPE1_DRAM_HASH1_EN_MASK BIT(24) +#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200) +#define PPE_GLO_CFG_BUSY_MASK BIT(31) +#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9) +#define PPE_GLO_CFG_PSE_HASH_OFS_MASK BIT(6) +#define PPE_GLO_CFG_PPE_BSWAP_MASK BIT(5) +#define PPE_GLO_CFG_TTL_DROP_MASK BIT(4) +#define PPE_GLO_CFG_IP4_CS_DROP_MASK BIT(3) +#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK BIT(2) +#define PPE_GLO_CFG_EN_MASK BIT(0) + +#define REG_PPE_PPE_FLOW_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204) +#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK BIT(20) +#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK BIT(19) +#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18) +#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK BIT(17) +#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK BIT(16) +#define PPE_FLOW_CFG_IP4_DSLITE_MASK BIT(14) +#define PPE_FLOW_CFG_IP4_NAPT_MASK BIT(13) +#define PPE_FLOW_CFG_IP4_NAT_MASK BIT(12) +#define PPE_FLOW_CFG_IP6_6RD_MASK BIT(10) +#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK BIT(9) +#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK BIT(8) +#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK BIT(7) +#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6) + +#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208) +#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(15, 0) +#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(31, 16) + +#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c) +#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24) +#define PPE_TB_CFG_KEEPALIVE_MASK GENMASK(13, 12) +#define PPE_TB_CFG_AGE_TCP_FIN_MASK BIT(11) +#define PPE_TB_CFG_AGE_UDP_MASK BIT(10) +#define PPE_TB_CFG_AGE_TCP_MASK BIT(9) +#define PPE_TB_CFG_AGE_UNBIND_MASK BIT(8) +#define PPE_TB_CFG_AGE_NON_L4_MASK BIT(7) +#define PPE_TB_CFG_AGE_PREBIND_MASK BIT(6) +#define PPE_TB_CFG_SEARCH_MISS_MASK GENMASK(5, 4) +#define PPE_TB_ENTRY_SIZE_MASK BIT(3) +#define PPE_DRAM_TB_NUM_ENTRY_MASK GENMASK(2, 0) + +#define REG_PPE_TB_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220) + +#define REG_PPE_BIND_RATE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228) +#define PPE_BIND_RATE_L2B_BIND_MASK GENMASK(31, 16) +#define PPE_BIND_RATE_BIND_MASK GENMASK(15, 0) + +#define REG_PPE_BIND_LIMIT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c) +#define PPE_BIND_LIMIT0_HALF_MASK GENMASK(29, 16) +#define PPE_BIND_LIMIT0_QUARTER_MASK GENMASK(13, 0) + +#define REG_PPE_BIND_LIMIT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230) +#define PPE_BIND_LIMIT1_NON_L4_MASK GENMASK(23, 16) +#define PPE_BIND_LIMIT1_FULL_MASK GENMASK(13, 0) + +#define REG_PPE_BND_AGE0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c) +#define PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16) +#define PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0) + +#define REG_PPE_UNBIND_AGE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238) +#define PPE_UNBIND_AGE_MIN_PACKETS_MASK GENMASK(31, 16) +#define PPE_UNBIND_AGE_DELTA_MASK GENMASK(7, 0) + +#define REG_PPE_BND_AGE1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240) +#define PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16) +#define PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0) + +#define REG_PPE_HASH_SEED(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244) +#define PPE_HASH_SEED 0x12345678 + +#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248) + +#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c) + +#define REG_PPE_TB_HASH_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250) +#define PPE_DRAM_HASH1_MODE_MASK GENMASK(31, 28) +#define PPE_DRAM_HASH1_EN_MASK BIT(24) +#define PPE_DRAM_HASH0_MODE_MASK GENMASK(23, 20) +#define PPE_DRAM_TABLE_EN_MASK BIT(16) +#define PPE_SRAM_HASH1_MODE_MASK GENMASK(15, 12) +#define PPE_SRAM_HASH1_EN_MASK BIT(8) +#define PPE_SRAM_HASH0_MODE_MASK GENMASK(7, 4) +#define PPE_SRAM_TABLE_EN_MASK BIT(0) + +#define REG_PPE_MTU_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304) +#define REG_PPE_MTU(_m, _n) (REG_PPE_MTU_BASE(_m) + ((_n) << 2)) +#define FP1_EGRESS_MTU_MASK GENMASK(29, 16) +#define FP0_EGRESS_MTU_MASK GENMASK(13, 0) + +#define REG_PPE_RAM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c) +#define PPE_SRAM_CTRL_ACK_MASK BIT(31) +#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK BIT(30) +#define PPE_SRAM_CTRL_ENTRY_MASK GENMASK(23, 8) +#define PPE_SRAM_WR_DUAL_DIRECTION_MASK BIT(2) +#define PPE_SRAM_CTRL_WR_MASK BIT(1) +#define PPE_SRAM_CTRL_REQ_MASK BIT(0) + +#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320) +#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2)) #define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) #define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) -- 2.51.0 From 9cd451d414f6e29f507a216fb3b19fa68c011f8c Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 28 Feb 2025 11:54:22 +0100 Subject: [PATCH 08/16] net: airoha: Add loopback support for GDM2 Enable hw redirection for traffic received on GDM2 port to GDM{3,4}. This is required to apply Qdisc offloading (HTB or ETS) for traffic to and from GDM{3,4} port. Signed-off-by: Lorenzo Bianconi Signed-off-by: Paolo Abeni --- drivers/net/ethernet/airoha/airoha_eth.c | 71 ++++++++++++++++++++++- drivers/net/ethernet/airoha/airoha_eth.h | 7 +++ drivers/net/ethernet/airoha/airoha_ppe.c | 12 ++-- drivers/net/ethernet/airoha/airoha_regs.h | 29 +++++++++ 4 files changed, 111 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 382ed81df808..ff837168845d 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -1588,14 +1588,81 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p) return 0; } +static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) +{ + u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4; + struct airoha_eth *eth = port->qdma->eth; + u32 chan = port->id == 3 ? 4 : 0; + + /* Forward the traffic to the proper GDM port */ + airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port); + airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC); + + /* Enable GDM2 loopback */ + airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff); + airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff); + airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2), + LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK, + FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK); + airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2), + GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, + FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | + FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU)); + + /* Disable VIP and IFC for GDM2 */ + airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2)); + airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2)); + + if (port->id == 3) { + /* FIXME: handle XSI_PCE1_PORT */ + airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 0x5500); + airoha_fe_rmw(eth, REG_FE_WAN_PORT, + WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, + FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT)); + airoha_fe_rmw(eth, + REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3), + SP_CPORT_PCIE0_MASK, + FIELD_PREP(SP_CPORT_PCIE0_MASK, + FE_PSE_PORT_CDM2)); + } else { + /* FIXME: handle XSI_USB_PORT */ + airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, + FC_ID_OF_SRC_PORT24_MASK, + FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2)); + airoha_fe_rmw(eth, REG_FE_WAN_PORT, + WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, + FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT)); + airoha_fe_rmw(eth, + REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3), + SP_CPORT_ETH_MASK, + FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2)); + } +} + static int airoha_dev_init(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); struct airoha_eth *eth = port->qdma->eth; + u32 pse_port; airoha_set_macaddr(port, dev->dev_addr); - airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), - FE_PSE_PORT_PPE1); + + switch (port->id) { + case 3: + case 4: + /* If GDM2 is active we can't enable loopback */ + if (!eth->ports[1]) + airhoha_set_gdm2_loopback(port); + fallthrough; + case 2: + pse_port = FE_PSE_PORT_PPE2; + break; + default: + pse_port = FE_PSE_PORT_PPE1; + break; + } + + airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port); return 0; } diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index db25a6d3b2cd..a59ff6e41890 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -67,6 +67,13 @@ enum { QDMA_INT_REG_MAX }; +enum { + HSGMII_LAN_PCIE0_SRCPORT = 0x16, + HSGMII_LAN_PCIE1_SRCPORT, + HSGMII_LAN_ETH_SRCPORT, + HSGMII_LAN_USB_SRCPORT, +}; + enum { XSI_PCIE0_VIP_PORT_MASK = BIT(22), XSI_PCIE1_VIP_PORT_MASK = BIT(23), diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index 37074121224a..de291f44cf14 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -216,7 +216,8 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe, AIROHA_FOE_IB1_BIND_TTL; hwe->ib1 = val; - val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f); + val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) | + AIROHA_FOE_IB2_PSE_QOS; if (dsa_port >= 0) val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port); @@ -224,14 +225,13 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe, struct airoha_gdm_port *port = netdev_priv(dev); u8 pse_port; - pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; + if (dsa_port >= 0) + pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; + else + pse_port = 2; /* uplink relies on GDM2 loopback */ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port); } - /* FIXME: implement QoS support setting pse_port to 2 (loopback) - * for uplink and setting qos bit in ib2 - */ - if (is_multicast_ether_addr(data->eth.h_dest)) val |= AIROHA_FOE_IB2_MULTICAST; diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h index 6cc64c60953a..1aa06cdffe23 100644 --- a/drivers/net/ethernet/airoha/airoha_regs.h +++ b/drivers/net/ethernet/airoha/airoha_regs.h @@ -38,6 +38,12 @@ #define FE_RST_CORE_MASK BIT(0) #define REG_FE_FOE_TS 0x0010 + +#define REG_FE_WAN_PORT 0x0024 +#define WAN1_EN_MASK BIT(16) +#define WAN1_MASK GENMASK(12, 8) +#define WAN0_MASK GENMASK(4, 0) + #define REG_FE_WAN_MAC_H 0x0030 #define REG_FE_LAN_MAC_H 0x0040 @@ -126,6 +132,7 @@ #define GDM_IP4_CKSUM BIT(22) #define GDM_TCP_CKSUM BIT(21) #define GDM_UDP_CKSUM BIT(20) +#define GDM_STRIP_CRC BIT(16) #define GDM_UCFQ_MASK GENMASK(15, 12) #define GDM_BCFQ_MASK GENMASK(11, 8) #define GDM_MCFQ_MASK GENMASK(7, 4) @@ -139,6 +146,16 @@ #define GDM_SHORT_LEN_MASK GENMASK(13, 0) #define GDM_LONG_LEN_MASK GENMASK(29, 16) +#define REG_GDM_LPBK_CFG(_n) (GDM_BASE(_n) + 0x1c) +#define LPBK_GAP_MASK GENMASK(31, 24) +#define LPBK_LEN_MASK GENMASK(23, 10) +#define LPBK_CHAN_MASK GENMASK(8, 4) +#define LPBK_MODE_MASK GENMASK(3, 1) +#define LPBK_EN_MASK BIT(0) + +#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24) +#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28) + #define REG_FE_CPORT_CFG (GDM1_BASE + 0x40) #define FE_CPORT_PAD BIT(26) #define FE_CPORT_PORT_XFC_MASK BIT(25) @@ -351,6 +368,18 @@ #define REG_MC_VLAN_DATA 0x2108 +#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2)) +#define SP_CPORT_PCIE1_MASK GENMASK(31, 28) +#define SP_CPORT_PCIE0_MASK GENMASK(27, 24) +#define SP_CPORT_USB_MASK GENMASK(7, 4) +#define SP_CPORT_ETH_MASK GENMASK(7, 4) + +#define REG_SRC_PORT_FC_MAP6 0x2298 +#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24) +#define FC_ID_OF_SRC_PORT26_MASK GENMASK(20, 16) +#define FC_ID_OF_SRC_PORT25_MASK GENMASK(12, 8) +#define FC_ID_OF_SRC_PORT24_MASK GENMASK(4, 0) + #define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4 /* QDMA */ -- 2.51.0 From 3fe15c640f3808c3faf235553c67c867d1389e5c Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 28 Feb 2025 11:54:23 +0100 Subject: [PATCH 09/16] net: airoha: Introduce PPE debugfs support Similar to PPE support for Mediatek devices, introduce PPE debugfs in order to dump binded and unbinded flows. Signed-off-by: Lorenzo Bianconi Signed-off-by: Paolo Abeni --- drivers/net/ethernet/airoha/Makefile | 1 + drivers/net/ethernet/airoha/airoha_eth.h | 14 ++ drivers/net/ethernet/airoha/airoha_ppe.c | 17 +- .../net/ethernet/airoha/airoha_ppe_debugfs.c | 181 ++++++++++++++++++ 4 files changed, 209 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ethernet/airoha/airoha_ppe_debugfs.c diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile index 6deff2f16229..94468053e34b 100644 --- a/drivers/net/ethernet/airoha/Makefile +++ b/drivers/net/ethernet/airoha/Makefile @@ -5,4 +5,5 @@ obj-$(CONFIG_NET_AIROHA) += airoha-eth.o airoha-eth-y := airoha_eth.o airoha_ppe.o +airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index a59ff6e41890..b7a3bd7a76b7 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -7,6 +7,7 @@ #ifndef AIROHA_ETH_H #define AIROHA_ETH_H +#include #include #include #include @@ -480,6 +481,8 @@ struct airoha_ppe { struct hlist_head *foe_flow; u16 foe_check_time[PPE_NUM_ENTRIES]; + + struct dentry *debugfs_dir; }; struct airoha_eth { @@ -533,5 +536,16 @@ int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv); int airoha_ppe_init(struct airoha_eth *eth); void airoha_ppe_deinit(struct airoha_eth *eth); +struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, + u32 hash); + +#if CONFIG_DEBUG_FS +int airoha_ppe_debugfs_init(struct airoha_ppe *ppe); +#else +static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe) +{ + return 0; +} +#endif #endif /* AIROHA_ETH_H */ diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index de291f44cf14..8b55e871352d 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -390,8 +390,8 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) return hash; } -static struct airoha_foe_entry * -airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, u32 hash) +struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, + u32 hash) { if (hash < PPE_SRAM_NUM_ENTRIES) { u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry); @@ -861,7 +861,7 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash) int airoha_ppe_init(struct airoha_eth *eth) { struct airoha_ppe *ppe; - int foe_size; + int foe_size, err; ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL); if (!ppe) @@ -882,7 +882,15 @@ int airoha_ppe_init(struct airoha_eth *eth) if (!ppe->foe_flow) return -ENOMEM; - return rhashtable_init(ð->flow_table, &airoha_flow_table_params); + err = rhashtable_init(ð->flow_table, &airoha_flow_table_params); + if (err) + return err; + + err = airoha_ppe_debugfs_init(ppe); + if (err) + rhashtable_destroy(ð->flow_table); + + return err; } void airoha_ppe_deinit(struct airoha_eth *eth) @@ -898,4 +906,5 @@ void airoha_ppe_deinit(struct airoha_eth *eth) rcu_read_unlock(); rhashtable_destroy(ð->flow_table); + debugfs_remove(eth->ppe->debugfs_dir); } diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c new file mode 100644 index 000000000000..3cdc6fd53fc7 --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025 AIROHA Inc + * Author: Lorenzo Bianconi + */ + +#include "airoha_eth.h" + +static void airoha_debugfs_ppe_print_tuple(struct seq_file *m, + void *src_addr, void *dest_addr, + u16 *src_port, u16 *dest_port, + bool ipv6) +{ + __be32 n_addr[IPV6_ADDR_WORDS]; + + if (ipv6) { + ipv6_addr_cpu_to_be32(n_addr, src_addr); + seq_printf(m, "%pI6", n_addr); + } else { + seq_printf(m, "%pI4h", src_addr); + } + if (src_port) + seq_printf(m, ":%d", *src_port); + + seq_puts(m, "->"); + + if (ipv6) { + ipv6_addr_cpu_to_be32(n_addr, dest_addr); + seq_printf(m, "%pI6", n_addr); + } else { + seq_printf(m, "%pI4h", dest_addr); + } + if (dest_port) + seq_printf(m, ":%d", *dest_port); +} + +static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private, + bool bind) +{ + static const char *const ppe_type_str[] = { + [PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", + [PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", + [PPE_PKT_TYPE_BRIDGE] = "L2B", + [PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", + [PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", + [PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", + [PPE_PKT_TYPE_IPV6_6RD] = "6RD", + }; + static const char *const ppe_state_str[] = { + [AIROHA_FOE_STATE_INVALID] = "INV", + [AIROHA_FOE_STATE_UNBIND] = "UNB", + [AIROHA_FOE_STATE_BIND] = "BND", + [AIROHA_FOE_STATE_FIN] = "FIN", + }; + struct airoha_ppe *ppe = m->private; + int i; + + for (i = 0; i < PPE_NUM_ENTRIES; i++) { + const char *state_str, *type_str = "UNKNOWN"; + void *src_addr = NULL, *dest_addr = NULL; + u16 *src_port = NULL, *dest_port = NULL; + struct airoha_foe_mac_info_common *l2; + unsigned char h_source[ETH_ALEN] = {}; + unsigned char h_dest[ETH_ALEN]; + struct airoha_foe_entry *hwe; + u32 type, state, ib2, data; + bool ipv6 = false; + + hwe = airoha_ppe_foe_get_entry(ppe, i); + if (!hwe) + continue; + + state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); + if (!state) + continue; + + if (bind && state != AIROHA_FOE_STATE_BIND) + continue; + + state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)]; + type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type]) + type_str = ppe_type_str[type]; + + seq_printf(m, "%05x %s %7s", i, state_str, type_str); + + switch (type) { + case PPE_PKT_TYPE_IPV4_HNAPT: + case PPE_PKT_TYPE_IPV4_DSLITE: + src_port = &hwe->ipv4.orig_tuple.src_port; + dest_port = &hwe->ipv4.orig_tuple.dest_port; + fallthrough; + case PPE_PKT_TYPE_IPV4_ROUTE: + src_addr = &hwe->ipv4.orig_tuple.src_ip; + dest_addr = &hwe->ipv4.orig_tuple.dest_ip; + break; + case PPE_PKT_TYPE_IPV6_ROUTE_5T: + src_port = &hwe->ipv6.src_port; + dest_port = &hwe->ipv6.dest_port; + fallthrough; + case PPE_PKT_TYPE_IPV6_ROUTE_3T: + case PPE_PKT_TYPE_IPV6_6RD: + src_addr = &hwe->ipv6.src_ip; + dest_addr = &hwe->ipv6.dest_ip; + ipv6 = true; + break; + default: + break; + } + + if (src_addr && dest_addr) { + seq_puts(m, " orig="); + airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr, + src_port, dest_port, ipv6); + } + + switch (type) { + case PPE_PKT_TYPE_IPV4_HNAPT: + case PPE_PKT_TYPE_IPV4_DSLITE: + src_port = &hwe->ipv4.new_tuple.src_port; + dest_port = &hwe->ipv4.new_tuple.dest_port; + fallthrough; + case PPE_PKT_TYPE_IPV4_ROUTE: + src_addr = &hwe->ipv4.new_tuple.src_ip; + dest_addr = &hwe->ipv4.new_tuple.dest_ip; + seq_puts(m, " new="); + airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr, + src_port, dest_port, + ipv6); + break; + default: + break; + } + + if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { + data = hwe->ipv6.data; + ib2 = hwe->ipv6.ib2; + l2 = &hwe->ipv6.l2; + } else { + data = hwe->ipv4.data; + ib2 = hwe->ipv4.ib2; + l2 = &hwe->ipv4.l2.common; + *((__be16 *)&h_source[4]) = + cpu_to_be16(hwe->ipv4.l2.src_mac_lo); + } + + *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi); + *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo); + *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi); + + seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x" + " vlan=%d,%d ib1=%08x ib2=%08x\n", + h_source, h_dest, l2->etype, data, + l2->vlan1, l2->vlan2, hwe->ib1, ib2); + } + + return 0; +} + +static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private) +{ + return airoha_ppe_debugfs_foe_show(m, private, false); +} +DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all); + +static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private) +{ + return airoha_ppe_debugfs_foe_show(m, private, true); +} +DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind); + +int airoha_ppe_debugfs_init(struct airoha_ppe *ppe) +{ + ppe->debugfs_dir = debugfs_create_dir("ppe", NULL); + debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe, + &airoha_ppe_debugfs_foe_all_fops); + debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe, + &airoha_ppe_debugfs_foe_bind_fops); + + return 0; +} -- 2.51.0 From c0bf9bf31e79273196cbdaa045972617fdd1a498 Mon Sep 17 00:00:00 2001 From: Jijie Shao Date: Fri, 28 Feb 2025 19:54:06 +0800 Subject: [PATCH 10/16] net: hibmcge: Add support for dump statistics The driver supports many hw statistics. This patch supports dump statistics through ethtool_ops and ndo.get_stats64(). The type of hw statistics register is u32, To prevent the statistics register from overflowing, the driver dump the statistics every 30 seconds. in a scheduled task. Signed-off-by: Jijie Shao Signed-off-by: Paolo Abeni --- .../ethernet/hisilicon/hibmcge/hbg_common.h | 111 +++++++ .../ethernet/hisilicon/hibmcge/hbg_ethtool.c | 297 ++++++++++++++++++ .../ethernet/hisilicon/hibmcge/hbg_ethtool.h | 5 + .../net/ethernet/hisilicon/hibmcge/hbg_main.c | 70 +++++ .../net/ethernet/hisilicon/hibmcge/hbg_reg.h | 96 ++++++ .../net/ethernet/hisilicon/hibmcge/hbg_txrx.c | 164 +++++++++- 6 files changed, 741 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index b4300d8ea4ad..920514a8e29a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -142,6 +142,115 @@ struct hbg_user_def { struct ethtool_pauseparam pause_param; }; +struct hbg_stats { + u64 rx_desc_drop; + u64 rx_desc_l2_err_cnt; + u64 rx_desc_pkt_len_err_cnt; + u64 rx_desc_l3l4_err_cnt; + u64 rx_desc_l3_wrong_head_cnt; + u64 rx_desc_l3_csum_err_cnt; + u64 rx_desc_l3_len_err_cnt; + u64 rx_desc_l3_zero_ttl_cnt; + u64 rx_desc_l3_other_cnt; + u64 rx_desc_l4_err_cnt; + u64 rx_desc_l4_wrong_head_cnt; + u64 rx_desc_l4_len_err_cnt; + u64 rx_desc_l4_csum_err_cnt; + u64 rx_desc_l4_zero_port_num_cnt; + u64 rx_desc_l4_other_cnt; + u64 rx_desc_frag_cnt; + u64 rx_desc_ip_ver_err_cnt; + u64 rx_desc_ipv4_pkt_cnt; + u64 rx_desc_ipv6_pkt_cnt; + u64 rx_desc_no_ip_pkt_cnt; + u64 rx_desc_ip_pkt_cnt; + u64 rx_desc_tcp_pkt_cnt; + u64 rx_desc_udp_pkt_cnt; + u64 rx_desc_vlan_pkt_cnt; + u64 rx_desc_icmp_pkt_cnt; + u64 rx_desc_arp_pkt_cnt; + u64 rx_desc_rarp_pkt_cnt; + u64 rx_desc_multicast_pkt_cnt; + u64 rx_desc_broadcast_pkt_cnt; + u64 rx_desc_ipsec_pkt_cnt; + u64 rx_desc_ip_opt_pkt_cnt; + u64 rx_desc_key_not_match_cnt; + + u64 rx_octets_total_ok_cnt; + u64 rx_uc_pkt_cnt; + u64 rx_mc_pkt_cnt; + u64 rx_bc_pkt_cnt; + u64 rx_vlan_pkt_cnt; + u64 rx_octets_bad_cnt; + u64 rx_octets_total_filt_cnt; + u64 rx_filt_pkt_cnt; + u64 rx_trans_pkt_cnt; + u64 rx_framesize_64; + u64 rx_framesize_65_127; + u64 rx_framesize_128_255; + u64 rx_framesize_256_511; + u64 rx_framesize_512_1023; + u64 rx_framesize_1024_1518; + u64 rx_framesize_bt_1518; + u64 rx_fcs_error_cnt; + u64 rx_data_error_cnt; + u64 rx_align_error_cnt; + u64 rx_pause_macctl_frame_cnt; + u64 rx_unknown_macctl_frame_cnt; + /* crc ok, > max_frm_size, < 2max_frm_size */ + u64 rx_frame_long_err_cnt; + /* crc fail, > max_frm_size, < 2max_frm_size */ + u64 rx_jabber_err_cnt; + /* > 2max_frm_size */ + u64 rx_frame_very_long_err_cnt; + /* < 64byte, >= short_runts_thr */ + u64 rx_frame_runt_err_cnt; + /* < short_runts_thr */ + u64 rx_frame_short_err_cnt; + /* PCU: dropped when the RX FIFO is full.*/ + u64 rx_overflow_cnt; + /* GMAC: the count of overflows of the RX FIFO */ + u64 rx_overrun_cnt; + /* PCU: the count of buffer alloc errors in RX */ + u64 rx_bufrq_err_cnt; + /* PCU: the count of write descriptor errors in RX */ + u64 rx_we_err_cnt; + /* GMAC: the count of pkts that contain PAD but length is not 64 */ + u64 rx_lengthfield_err_cnt; + u64 rx_fail_comma_cnt; + + u64 rx_dma_err_cnt; + + u64 tx_octets_total_ok_cnt; + u64 tx_uc_pkt_cnt; + u64 tx_mc_pkt_cnt; + u64 tx_bc_pkt_cnt; + u64 tx_vlan_pkt_cnt; + u64 tx_octets_bad_cnt; + u64 tx_trans_pkt_cnt; + u64 tx_pause_frame_cnt; + u64 tx_framesize_64; + u64 tx_framesize_65_127; + u64 tx_framesize_128_255; + u64 tx_framesize_256_511; + u64 tx_framesize_512_1023; + u64 tx_framesize_1024_1518; + u64 tx_framesize_bt_1518; + /* GMAC: the count of times that frames fail to be transmitted + * due to internal errors. + */ + u64 tx_underrun_err_cnt; + u64 tx_add_cs_fail_cnt; + /* PCU: the count of buffer free errors in TX */ + u64 tx_bufrl_err_cnt; + u64 tx_crc_err_cnt; + u64 tx_drop_cnt; + u64 tx_excessive_length_drop_cnt; + + u64 tx_timeout_cnt; + u64 tx_dma_err_cnt; +}; + struct hbg_priv { struct net_device *netdev; struct pci_dev *pdev; @@ -155,6 +264,8 @@ struct hbg_priv { struct hbg_mac_filter filter; enum hbg_reset_type reset_type; struct hbg_user_def user_def; + struct hbg_stats stats; + struct delayed_work service_task; }; #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c index 00364a438ec2..f5be8d0ef611 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c @@ -9,6 +9,135 @@ #include "hbg_ethtool.h" #include "hbg_hw.h" +struct hbg_ethtool_stats { + char name[ETH_GSTRING_LEN]; + unsigned long offset; + u32 reg; /* set to 0 if stats is not updated via dump reg */ +}; + +#define HBG_STATS_I(stats) { #stats, HBG_STATS_FIELD_OFF(stats), 0} +#define HBG_STATS_REG_I(stats, reg) { #stats, HBG_STATS_FIELD_OFF(stats), reg} + +static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = { + HBG_STATS_I(rx_desc_l2_err_cnt), + HBG_STATS_I(rx_desc_pkt_len_err_cnt), + HBG_STATS_I(rx_desc_l3_wrong_head_cnt), + HBG_STATS_I(rx_desc_l3_csum_err_cnt), + HBG_STATS_I(rx_desc_l3_len_err_cnt), + HBG_STATS_I(rx_desc_l3_zero_ttl_cnt), + HBG_STATS_I(rx_desc_l3_other_cnt), + HBG_STATS_I(rx_desc_l4_wrong_head_cnt), + HBG_STATS_I(rx_desc_l4_len_err_cnt), + HBG_STATS_I(rx_desc_l4_csum_err_cnt), + HBG_STATS_I(rx_desc_l4_zero_port_num_cnt), + HBG_STATS_I(rx_desc_l4_other_cnt), + HBG_STATS_I(rx_desc_ip_ver_err_cnt), + HBG_STATS_I(rx_desc_ipv4_pkt_cnt), + HBG_STATS_I(rx_desc_ipv6_pkt_cnt), + HBG_STATS_I(rx_desc_no_ip_pkt_cnt), + HBG_STATS_I(rx_desc_ip_pkt_cnt), + HBG_STATS_I(rx_desc_tcp_pkt_cnt), + HBG_STATS_I(rx_desc_udp_pkt_cnt), + HBG_STATS_I(rx_desc_vlan_pkt_cnt), + HBG_STATS_I(rx_desc_icmp_pkt_cnt), + HBG_STATS_I(rx_desc_arp_pkt_cnt), + HBG_STATS_I(rx_desc_rarp_pkt_cnt), + HBG_STATS_I(rx_desc_multicast_pkt_cnt), + HBG_STATS_I(rx_desc_broadcast_pkt_cnt), + HBG_STATS_I(rx_desc_ipsec_pkt_cnt), + HBG_STATS_I(rx_desc_ip_opt_pkt_cnt), + HBG_STATS_I(rx_desc_key_not_match_cnt), + + HBG_STATS_REG_I(rx_octets_bad_cnt, HBG_REG_RX_OCTETS_BAD_ADDR), + HBG_STATS_REG_I(rx_octets_total_filt_cnt, + HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR), + HBG_STATS_REG_I(rx_uc_pkt_cnt, HBG_REG_RX_UC_PKTS_ADDR), + HBG_STATS_REG_I(rx_vlan_pkt_cnt, HBG_REG_RX_TAGGED_ADDR), + HBG_STATS_REG_I(rx_filt_pkt_cnt, HBG_REG_RX_FILT_PKT_CNT_ADDR), + HBG_STATS_REG_I(rx_data_error_cnt, HBG_REG_RX_DATA_ERR_ADDR), + HBG_STATS_REG_I(rx_frame_long_err_cnt, HBG_REG_RX_LONG_ERRORS_ADDR), + HBG_STATS_REG_I(rx_jabber_err_cnt, HBG_REG_RX_JABBER_ERRORS_ADDR), + HBG_STATS_REG_I(rx_frame_very_long_err_cnt, + HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_frame_runt_err_cnt, HBG_REG_RX_RUNT_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_frame_short_err_cnt, HBG_REG_RX_SHORT_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_overflow_cnt, HBG_REG_RX_OVER_FLOW_CNT_ADDR), + HBG_STATS_REG_I(rx_bufrq_err_cnt, HBG_REG_RX_BUFRQ_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_we_err_cnt, HBG_REG_RX_WE_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_overrun_cnt, HBG_REG_RX_OVERRUN_CNT_ADDR), + HBG_STATS_REG_I(rx_lengthfield_err_cnt, + HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_fail_comma_cnt, HBG_REG_RX_FAIL_COMMA_CNT_ADDR), + HBG_STATS_I(rx_dma_err_cnt), + + HBG_STATS_REG_I(tx_uc_pkt_cnt, HBG_REG_TX_UC_PKTS_ADDR), + HBG_STATS_REG_I(tx_vlan_pkt_cnt, HBG_REG_TX_TAGGED_ADDR), + HBG_STATS_REG_I(tx_octets_bad_cnt, HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR), + + HBG_STATS_REG_I(tx_underrun_err_cnt, HBG_REG_TX_UNDERRUN_ADDR), + HBG_STATS_REG_I(tx_add_cs_fail_cnt, HBG_REG_TX_CS_FAIL_CNT_ADDR), + HBG_STATS_REG_I(tx_bufrl_err_cnt, HBG_REG_TX_BUFRL_ERR_CNT_ADDR), + HBG_STATS_REG_I(tx_crc_err_cnt, HBG_REG_TX_CRC_ERROR_ADDR), + HBG_STATS_REG_I(tx_drop_cnt, HBG_REG_TX_DROP_CNT_ADDR), + HBG_STATS_REG_I(tx_excessive_length_drop_cnt, + HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR), + HBG_STATS_I(tx_dma_err_cnt), + HBG_STATS_I(tx_timeout_cnt), +}; + +static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = { + HBG_STATS_I(rx_desc_frag_cnt), + HBG_STATS_REG_I(rx_framesize_64, HBG_REG_RX_PKTS_64OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_65_127, + HBG_REG_RX_PKTS_65TO127OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_128_255, + HBG_REG_RX_PKTS_128TO255OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_256_511, + HBG_REG_RX_PKTS_256TO511OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_512_1023, + HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_1024_1518, + HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_bt_1518, + HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_64, HBG_REG_TX_PKTS_64OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_65_127, + HBG_REG_TX_PKTS_65TO127OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_128_255, + HBG_REG_TX_PKTS_128TO255OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_256_511, + HBG_REG_TX_PKTS_256TO511OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_512_1023, + HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_1024_1518, + HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_bt_1518, + HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR), +}; + +static const struct hbg_ethtool_stats hbg_ethtool_mac_stats_info[] = { + HBG_STATS_REG_I(rx_mc_pkt_cnt, HBG_REG_RX_MC_PKTS_ADDR), + HBG_STATS_REG_I(rx_bc_pkt_cnt, HBG_REG_RX_BC_PKTS_ADDR), + HBG_STATS_REG_I(rx_align_error_cnt, HBG_REG_RX_ALIGN_ERRORS_ADDR), + HBG_STATS_REG_I(rx_octets_total_ok_cnt, + HBG_REG_RX_OCTETS_TOTAL_OK_ADDR), + HBG_STATS_REG_I(rx_trans_pkt_cnt, HBG_REG_RX_TRANS_PKG_CNT_ADDR), + HBG_STATS_REG_I(rx_fcs_error_cnt, HBG_REG_RX_FCS_ERRORS_ADDR), + HBG_STATS_REG_I(tx_mc_pkt_cnt, HBG_REG_TX_MC_PKTS_ADDR), + HBG_STATS_REG_I(tx_bc_pkt_cnt, HBG_REG_TX_BC_PKTS_ADDR), + HBG_STATS_REG_I(tx_octets_total_ok_cnt, + HBG_REG_OCTETS_TRANSMITTED_OK_ADDR), + HBG_STATS_REG_I(tx_trans_pkt_cnt, HBG_REG_TX_TRANS_PKG_CNT_ADDR), +}; + +static const struct hbg_ethtool_stats hbg_ethtool_ctrl_stats_info[] = { + HBG_STATS_REG_I(rx_pause_macctl_frame_cnt, + HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR), + HBG_STATS_REG_I(tx_pause_frame_cnt, HBG_REG_TX_PAUSE_FRAMES_ADDR), + HBG_STATS_REG_I(rx_unknown_macctl_frame_cnt, + HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR), +}; + enum hbg_reg_dump_type { HBG_DUMP_REG_TYPE_SPEC = 0, HBG_DUMP_REG_TYPE_MDIO, @@ -180,6 +309,167 @@ static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags) return hbg_reset(priv); } +static void hbg_update_stats_by_info(struct hbg_priv *priv, + const struct hbg_ethtool_stats *info, + u32 info_len) +{ + const struct hbg_ethtool_stats *stats; + u32 i; + + for (i = 0; i < info_len; i++) { + stats = &info[i]; + if (!stats->reg) + continue; + + HBG_STATS_U(&priv->stats, stats->offset, + hbg_reg_read(priv, stats->reg)); + } +} + +void hbg_update_stats(struct hbg_priv *priv) +{ + hbg_update_stats_by_info(priv, hbg_ethtool_stats_info, + ARRAY_SIZE(hbg_ethtool_stats_info)); + hbg_update_stats_by_info(priv, hbg_ethtool_rmon_stats_info, + ARRAY_SIZE(hbg_ethtool_rmon_stats_info)); + hbg_update_stats_by_info(priv, hbg_ethtool_mac_stats_info, + ARRAY_SIZE(hbg_ethtool_mac_stats_info)); + hbg_update_stats_by_info(priv, hbg_ethtool_ctrl_stats_info, + ARRAY_SIZE(hbg_ethtool_ctrl_stats_info)); +} + +static int hbg_ethtool_get_sset_count(struct net_device *netdev, int stringset) +{ + if (stringset != ETH_SS_STATS) + return -EOPNOTSUPP; + + return ARRAY_SIZE(hbg_ethtool_stats_info); +} + +static void hbg_ethtool_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + u32 i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++) + ethtool_puts(&data, hbg_ethtool_stats_info[i].name); +} + +static void hbg_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct hbg_priv *priv = netdev_priv(netdev); + u32 i; + + hbg_update_stats(priv); + for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++) + *data++ = HBG_STATS_R(&priv->stats, + hbg_ethtool_stats_info[i].offset); +} + +static void hbg_ethtool_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *epstats) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_stats *stats = &priv->stats; + + hbg_update_stats(priv); + epstats->rx_pause_frames = stats->rx_pause_macctl_frame_cnt; + epstats->tx_pause_frames = stats->tx_pause_frame_cnt; +} + +static void hbg_ethtool_get_eth_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *emstats) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_stats *stats = &priv->stats; + + hbg_update_stats(priv); + emstats->FramesTransmittedOK = stats->tx_trans_pkt_cnt; + emstats->FramesReceivedOK = stats->rx_trans_pkt_cnt; + emstats->FrameCheckSequenceErrors = stats->rx_fcs_error_cnt; + emstats->AlignmentErrors = stats->rx_align_error_cnt; + emstats->OctetsTransmittedOK = stats->tx_octets_total_ok_cnt; + emstats->OctetsReceivedOK = stats->rx_octets_total_ok_cnt; + + emstats->MulticastFramesXmittedOK = stats->tx_mc_pkt_cnt; + emstats->BroadcastFramesXmittedOK = stats->tx_bc_pkt_cnt; + emstats->MulticastFramesReceivedOK = stats->rx_mc_pkt_cnt; + emstats->BroadcastFramesReceivedOK = stats->rx_bc_pkt_cnt; + emstats->InRangeLengthErrors = stats->rx_fcs_error_cnt + + stats->rx_jabber_err_cnt + + stats->rx_unknown_macctl_frame_cnt + + stats->rx_bufrq_err_cnt + + stats->rx_we_err_cnt; + emstats->OutOfRangeLengthField = stats->rx_frame_short_err_cnt + + stats->rx_frame_runt_err_cnt + + stats->rx_lengthfield_err_cnt + + stats->rx_frame_long_err_cnt + + stats->rx_frame_very_long_err_cnt; + emstats->FrameTooLongErrors = stats->rx_frame_long_err_cnt + + stats->rx_frame_very_long_err_cnt; +} + +static void +hbg_ethtool_get_eth_ctrl_stats(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *ecstats) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_stats *s = &priv->stats; + + hbg_update_stats(priv); + ecstats->MACControlFramesTransmitted = s->tx_pause_frame_cnt; + ecstats->MACControlFramesReceived = s->rx_pause_macctl_frame_cnt; + ecstats->UnsupportedOpcodesReceived = s->rx_unknown_macctl_frame_cnt; +} + +static const struct ethtool_rmon_hist_range hbg_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 4095 }, +}; + +static void +hbg_ethtool_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_stats *stats = &priv->stats; + + hbg_update_stats(priv); + rmon_stats->undersize_pkts = stats->rx_frame_short_err_cnt + + stats->rx_frame_runt_err_cnt + + stats->rx_lengthfield_err_cnt; + rmon_stats->oversize_pkts = stats->rx_frame_long_err_cnt + + stats->rx_frame_very_long_err_cnt; + rmon_stats->fragments = stats->rx_desc_frag_cnt; + rmon_stats->hist[0] = stats->rx_framesize_64; + rmon_stats->hist[1] = stats->rx_framesize_65_127; + rmon_stats->hist[2] = stats->rx_framesize_128_255; + rmon_stats->hist[3] = stats->rx_framesize_256_511; + rmon_stats->hist[4] = stats->rx_framesize_512_1023; + rmon_stats->hist[5] = stats->rx_framesize_1024_1518; + rmon_stats->hist[6] = stats->rx_framesize_bt_1518; + + rmon_stats->hist_tx[0] = stats->tx_framesize_64; + rmon_stats->hist_tx[1] = stats->tx_framesize_65_127; + rmon_stats->hist_tx[2] = stats->tx_framesize_128_255; + rmon_stats->hist_tx[3] = stats->tx_framesize_256_511; + rmon_stats->hist_tx[4] = stats->tx_framesize_512_1023; + rmon_stats->hist_tx[5] = stats->tx_framesize_1024_1518; + rmon_stats->hist_tx[6] = stats->tx_framesize_bt_1518; + + *ranges = hbg_rmon_ranges; +} + static const struct ethtool_ops hbg_ethtool_ops = { .get_link = ethtool_op_get_link, .get_link_ksettings = phy_ethtool_get_link_ksettings, @@ -190,6 +480,13 @@ static const struct ethtool_ops hbg_ethtool_ops = { .set_pauseparam = hbg_ethtool_set_pauseparam, .reset = hbg_ethtool_reset, .nway_reset = phy_ethtool_nway_reset, + .get_sset_count = hbg_ethtool_get_sset_count, + .get_strings = hbg_ethtool_get_strings, + .get_ethtool_stats = hbg_ethtool_get_stats, + .get_pause_stats = hbg_ethtool_get_pause_stats, + .get_eth_mac_stats = hbg_ethtool_get_eth_mac_stats, + .get_eth_ctrl_stats = hbg_ethtool_get_eth_ctrl_stats, + .get_rmon_stats = hbg_ethtool_get_rmon_stats, }; void hbg_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h index 628707ec2686..e173155b146a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h @@ -6,6 +6,11 @@ #include +#define HBG_STATS_FIELD_OFF(f) (offsetof(struct hbg_stats, f)) +#define HBG_STATS_R(p, offset) (*(u64 *)((u8 *)(p) + (offset))) +#define HBG_STATS_U(p, offset, val) (HBG_STATS_R(p, offset) += (val)) + void hbg_ethtool_set_ops(struct net_device *netdev); +void hbg_update_stats(struct hbg_priv *priv); #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index bb0f25ac9760..969df6020942 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -214,6 +214,10 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue) char *buf = ring->tout_log_buf; u32 pos = 0; + priv->stats.tx_timeout_cnt++; + + pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos, + "tx_timeout cnt: %llu\n", priv->stats.tx_timeout_cnt); pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos, "ring used num: %u, fifo used num: %u\n", hbg_get_queue_used_num(ring), @@ -226,6 +230,39 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue) netdev_info(netdev, "%s", buf); } +static void hbg_net_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_stats *h_stats = &priv->stats; + + hbg_update_stats(priv); + dev_get_tstats64(netdev, stats); + + /* fifo empty */ + stats->tx_fifo_errors += h_stats->tx_drop_cnt; + + stats->tx_dropped += h_stats->tx_excessive_length_drop_cnt + + h_stats->tx_drop_cnt; + stats->tx_errors += h_stats->tx_add_cs_fail_cnt + + h_stats->tx_bufrl_err_cnt + + h_stats->tx_underrun_err_cnt + + h_stats->tx_crc_err_cnt; + stats->rx_errors += h_stats->rx_data_error_cnt; + stats->multicast += h_stats->rx_mc_pkt_cnt; + stats->rx_dropped += h_stats->rx_desc_drop; + stats->rx_length_errors += h_stats->rx_frame_very_long_err_cnt + + h_stats->rx_frame_long_err_cnt + + h_stats->rx_frame_runt_err_cnt + + h_stats->rx_frame_short_err_cnt + + h_stats->rx_lengthfield_err_cnt; + stats->rx_frame_errors += h_stats->rx_desc_l2_err_cnt + + h_stats->rx_desc_l3l4_err_cnt; + stats->rx_fifo_errors += h_stats->rx_overflow_cnt + + h_stats->rx_overrun_cnt; + stats->rx_crc_errors += h_stats->rx_fcs_error_cnt; +} + static const struct net_device_ops hbg_netdev_ops = { .ndo_open = hbg_net_open, .ndo_stop = hbg_net_stop, @@ -235,8 +272,37 @@ static const struct net_device_ops hbg_netdev_ops = { .ndo_change_mtu = hbg_net_change_mtu, .ndo_tx_timeout = hbg_net_tx_timeout, .ndo_set_rx_mode = hbg_net_set_rx_mode, + .ndo_get_stats64 = hbg_net_get_stats, }; +static void hbg_service_task(struct work_struct *work) +{ + struct hbg_priv *priv = container_of(work, struct hbg_priv, + service_task.work); + + /* The type of statistics register is u32, + * To prevent the statistics register from overflowing, + * the driver dumps the statistics every 30 seconds. + */ + hbg_update_stats(priv); + schedule_delayed_work(&priv->service_task, + msecs_to_jiffies(30 * MSEC_PER_SEC)); +} + +static void hbg_cancel_delayed_work_sync(void *data) +{ + cancel_delayed_work_sync(data); +} + +static int hbg_delaywork_init(struct hbg_priv *priv) +{ + INIT_DELAYED_WORK(&priv->service_task, hbg_service_task); + schedule_delayed_work(&priv->service_task, 0); + return devm_add_action_or_reset(&priv->pdev->dev, + hbg_cancel_delayed_work_sync, + &priv->service_task); +} + static int hbg_mac_filter_init(struct hbg_priv *priv) { struct hbg_dev_specs *dev_specs = &priv->dev_specs; @@ -291,6 +357,10 @@ static int hbg_init(struct hbg_priv *priv) if (ret) return ret; + ret = hbg_delaywork_init(priv); + if (ret) + return ret; + hbg_debugfs_init(priv); hbg_init_user_def(priv); return 0; diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h index f12efc12f3c5..106d0e0408ba 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -60,6 +60,48 @@ #define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5) #define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064) #define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0) +#define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080) +#define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084) +#define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088) +#define HBG_REG_RX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x008C) +#define HBG_REG_RX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0090) +#define HBG_REG_RX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0094) +#define HBG_REG_RX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0098) +#define HBG_REG_RX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x009C) +#define HBG_REG_RX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A0) +#define HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A4) +#define HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A8) +#define HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00AC) +#define HBG_REG_RX_FCS_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00B0) +#define HBG_REG_RX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x00B4) +#define HBG_REG_RX_DATA_ERR_ADDR (HBG_REG_SGMII_BASE + 0x00B8) +#define HBG_REG_RX_ALIGN_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00BC) +#define HBG_REG_RX_LONG_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C0) +#define HBG_REG_RX_JABBER_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C4) +#define HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00C8) +#define HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00CC) +#define HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D0) +#define HBG_REG_RX_RUNT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D4) +#define HBG_REG_RX_SHORT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D8) +#define HBG_REG_RX_FILT_PKT_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00E8) +#define HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR (HBG_REG_SGMII_BASE + 0x00EC) +#define HBG_REG_OCTETS_TRANSMITTED_OK_ADDR (HBG_REG_SGMII_BASE + 0x0100) +#define HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0104) +#define HBG_REG_TX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0108) +#define HBG_REG_TX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x010C) +#define HBG_REG_TX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0110) +#define HBG_REG_TX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0114) +#define HBG_REG_TX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0118) +#define HBG_REG_TX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x011C) +#define HBG_REG_TX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0120) +#define HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0124) +#define HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0128) +#define HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x012C) +#define HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR (HBG_REG_SGMII_BASE + 0x014C) +#define HBG_REG_TX_UNDERRUN_ADDR (HBG_REG_SGMII_BASE + 0x0150) +#define HBG_REG_TX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x0154) +#define HBG_REG_TX_CRC_ERROR_ADDR (HBG_REG_SGMII_BASE + 0x0158) +#define HBG_REG_TX_PAUSE_FRAMES_ADDR (HBG_REG_SGMII_BASE + 0x015C) #define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8) #define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0) #define HBG_REG_CF_CRC_STRIP_B BIT(0) @@ -69,6 +111,9 @@ #define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0) #define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3) #define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8) +#define HBG_REG_RX_OVERRUN_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01EC) +#define HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F4) +#define HBG_REG_RX_FAIL_COMMA_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F8) #define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200) #define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204) #define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208) @@ -111,12 +156,17 @@ #define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440) #define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444) #define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0) +#define HBG_REG_TX_DROP_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0448) +#define HBG_REG_RX_OVER_FLOW_CNT_ADDR (HBG_REG_SGMII_BASE + 0x044C) #define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450) #define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454) #define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458) #define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C) #define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0) #define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16) +#define HBG_REG_TX_CS_FAIL_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0460) +#define HBG_REG_RX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0464) +#define HBG_REG_TX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0468) #define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470) #define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488) #define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C) @@ -136,6 +186,9 @@ #define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0) #define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4) #define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21) +#define HBG_REG_RX_BUFRQ_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x058C) +#define HBG_REG_TX_BUFRL_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0590) +#define HBG_REG_RX_WE_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0594) #define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4) #define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8) #define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC) @@ -178,5 +231,48 @@ struct hbg_rx_desc { }; #define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16) +#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12) +#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30) +#define HBG_RX_DESC_W4_IPSEC_B BIT(29) +#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28) +#define HBG_RX_DESC_W4_L4_ERR_CODE_M GENMASK(26, 23) +#define HBG_RX_DESC_W4_FRAG_B BIT(22) +#define HBG_RX_DESC_W4_OPT_B BIT(21) +#define HBG_RX_DESC_W4_IP_VERSION_ERR_B BIT(20) +#define HBG_RX_DESC_W4_BRD_CST_B BIT(19) +#define HBG_RX_DESC_W4_MUL_CST_B BIT(18) +#define HBG_RX_DESC_W4_ARP_B BIT(17) +#define HBG_RX_DESC_W4_RARP_B BIT(16) +#define HBG_RX_DESC_W4_ICMP_B BIT(15) +#define HBG_RX_DESC_W4_VLAN_FLAG_B BIT(14) +#define HBG_RX_DESC_W4_DROP_B BIT(13) +#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9) +#define HBG_RX_DESC_W4_L2_ERR_B BIT(8) +#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7) + +enum hbg_l3_err_code { + HBG_L3_OK = 0, + HBG_L3_WRONG_HEAD, + HBG_L3_CSUM_ERR, + HBG_L3_LEN_ERR, + HBG_L3_ZERO_TTL, + HBG_L3_RSVD, +}; + +enum hbg_l4_err_code { + HBG_L4_OK = 0, + HBG_L4_WRONG_HEAD, + HBG_L4_LEN_ERR, + HBG_L4_CSUM_ERR, + HBG_L4_ZERO_PORT_NUM, + HBG_L4_RSVD, +}; + +enum hbg_pkt_type_code { + HBG_NO_IP_PKT = 0, + HBG_IP_PKT, + HBG_TCP_PKT, + HBG_UDP_PKT, +}; #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c index f4f256a0dfea..35dd3512d00e 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c @@ -38,8 +38,14 @@ static int hbg_dma_map(struct hbg_buffer *buffer) buffer->skb_dma = dma_map_single(&priv->pdev->dev, buffer->skb->data, buffer->skb_len, buffer_to_dma_dir(buffer)); - if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) + if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) { + if (buffer->dir == HBG_DIR_RX) + priv->stats.rx_dma_err_cnt++; + else + priv->stats.tx_dma_err_cnt++; + return -ENOMEM; + } return 0; } @@ -195,6 +201,156 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget) return packet_done; } +static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv, + struct hbg_rx_desc *desc) +{ + if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) && + !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4))) + return true; + + switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) { + case HBG_L3_OK: + break; + case HBG_L3_WRONG_HEAD: + priv->stats.rx_desc_l3_wrong_head_cnt++; + return false; + case HBG_L3_CSUM_ERR: + priv->stats.rx_desc_l3_csum_err_cnt++; + return false; + case HBG_L3_LEN_ERR: + priv->stats.rx_desc_l3_len_err_cnt++; + return false; + case HBG_L3_ZERO_TTL: + priv->stats.rx_desc_l3_zero_ttl_cnt++; + return false; + default: + priv->stats.rx_desc_l3_other_cnt++; + return false; + } + + switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) { + case HBG_L4_OK: + break; + case HBG_L4_WRONG_HEAD: + priv->stats.rx_desc_l4_wrong_head_cnt++; + return false; + case HBG_L4_LEN_ERR: + priv->stats.rx_desc_l4_len_err_cnt++; + return false; + case HBG_L4_CSUM_ERR: + priv->stats.rx_desc_l4_csum_err_cnt++; + return false; + case HBG_L4_ZERO_PORT_NUM: + priv->stats.rx_desc_l4_zero_port_num_cnt++; + return false; + default: + priv->stats.rx_desc_l4_other_cnt++; + return false; + } + + return true; +} + +static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv, + struct hbg_rx_desc *desc) +{ + if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) { + priv->stats.rx_desc_no_ip_pkt_cnt++; + return; + } + + if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) { + priv->stats.rx_desc_ip_ver_err_cnt++; + return; + } + + /* 0:ipv4, 1:ipv6 */ + if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4)) + priv->stats.rx_desc_ipv6_pkt_cnt++; + else + priv->stats.rx_desc_ipv4_pkt_cnt++; + + switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) { + case HBG_IP_PKT: + priv->stats.rx_desc_ip_pkt_cnt++; + if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4)) + priv->stats.rx_desc_ip_opt_pkt_cnt++; + if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4)) + priv->stats.rx_desc_frag_cnt++; + + if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4)) + priv->stats.rx_desc_icmp_pkt_cnt++; + else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4)) + priv->stats.rx_desc_ipsec_pkt_cnt++; + break; + case HBG_TCP_PKT: + priv->stats.rx_desc_tcp_pkt_cnt++; + break; + case HBG_UDP_PKT: + priv->stats.rx_desc_udp_pkt_cnt++; + break; + default: + priv->stats.rx_desc_no_ip_pkt_cnt++; + break; + } +} + +static void hbg_update_rx_protocol_stats(struct hbg_priv *priv, + struct hbg_rx_desc *desc) +{ + if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) { + priv->stats.rx_desc_key_not_match_cnt++; + return; + } + + if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4)) + priv->stats.rx_desc_broadcast_pkt_cnt++; + else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4)) + priv->stats.rx_desc_multicast_pkt_cnt++; + + if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4)) + priv->stats.rx_desc_vlan_pkt_cnt++; + + if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) { + priv->stats.rx_desc_arp_pkt_cnt++; + return; + } else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) { + priv->stats.rx_desc_rarp_pkt_cnt++; + return; + } + + hbg_update_rx_ip_protocol_stats(priv, desc); +} + +static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc) +{ + if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) > + priv->dev_specs.max_frame_len)) { + priv->stats.rx_desc_pkt_len_err_cnt++; + return false; + } + + if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) != + priv->dev_specs.mac_id || + FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) { + priv->stats.rx_desc_drop++; + return false; + } + + if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) { + priv->stats.rx_desc_l2_err_cnt++; + return false; + } + + if (unlikely(!hbg_rx_check_l3l4_error(priv, desc))) { + priv->stats.rx_desc_l3l4_err_cnt++; + return false; + } + + hbg_update_rx_protocol_stats(priv, desc); + return true; +} + static int hbg_rx_fill_one_buffer(struct hbg_priv *priv) { struct hbg_ring *ring = &priv->rx_ring; @@ -257,8 +413,12 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget) rx_desc = (struct hbg_rx_desc *)buffer->skb->data; pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2); - hbg_dma_unmap(buffer); + if (unlikely(!hbg_rx_pkt_check(priv, rx_desc))) { + hbg_buffer_free(buffer); + goto next_buffer; + } + hbg_dma_unmap(buffer); skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN); skb_put(buffer->skb, pkt_len); buffer->skb->protocol = eth_type_trans(buffer->skb, -- 2.51.0 From 833b65a3b54d715965d5c444ad1015321633b3f8 Mon Sep 17 00:00:00 2001 From: Jijie Shao Date: Fri, 28 Feb 2025 19:54:07 +0800 Subject: [PATCH 11/16] net: hibmcge: Add support for checksum offload This patch implements the rx checksum offload feature. The tx checksum offload processing in .ndo_start_xmit() has been accepted. This patch also adds the tx checksum feature, including NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM Signed-off-by: Jijie Shao Signed-off-by: Paolo Abeni --- .../net/ethernet/hisilicon/hibmcge/hbg_main.c | 6 ++++ .../net/ethernet/hisilicon/hibmcge/hbg_txrx.c | 29 +++++++++++++++---- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index 969df6020942..688f408de84c 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -14,6 +14,9 @@ #include "hbg_txrx.h" #include "hbg_debugfs.h" +#define HBG_SUPPORT_FEATURES (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_RXCSUM) + static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled) { struct hbg_irq_info *info; @@ -419,6 +422,9 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; + /* set default features */ + netdev->features |= HBG_SUPPORT_FEATURES; + netdev->hw_features |= HBG_SUPPORT_FEATURES; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c index 35dd3512d00e..8d814c8f19ea 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c @@ -202,8 +202,14 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget) } static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv, - struct hbg_rx_desc *desc) + struct hbg_rx_desc *desc, + struct sk_buff *skb) { + bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM); + + skb->ip_summed = rx_checksum_offload ? + CHECKSUM_UNNECESSARY : CHECKSUM_NONE; + if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) && !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4))) return true; @@ -215,8 +221,13 @@ static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv, priv->stats.rx_desc_l3_wrong_head_cnt++; return false; case HBG_L3_CSUM_ERR: + skb->ip_summed = CHECKSUM_NONE; priv->stats.rx_desc_l3_csum_err_cnt++; - return false; + + /* Don't drop packets on csum validation failure, + * suggest by Jakub + */ + break; case HBG_L3_LEN_ERR: priv->stats.rx_desc_l3_len_err_cnt++; return false; @@ -238,8 +249,13 @@ static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv, priv->stats.rx_desc_l4_len_err_cnt++; return false; case HBG_L4_CSUM_ERR: + skb->ip_summed = CHECKSUM_NONE; priv->stats.rx_desc_l4_csum_err_cnt++; - return false; + + /* Don't drop packets on csum validation failure, + * suggest by Jakub + */ + break; case HBG_L4_ZERO_PORT_NUM: priv->stats.rx_desc_l4_zero_port_num_cnt++; return false; @@ -322,7 +338,8 @@ static void hbg_update_rx_protocol_stats(struct hbg_priv *priv, hbg_update_rx_ip_protocol_stats(priv, desc); } -static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc) +static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc, + struct sk_buff *skb) { if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) > priv->dev_specs.max_frame_len)) { @@ -342,7 +359,7 @@ static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc) return false; } - if (unlikely(!hbg_rx_check_l3l4_error(priv, desc))) { + if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) { priv->stats.rx_desc_l3l4_err_cnt++; return false; } @@ -413,7 +430,7 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget) rx_desc = (struct hbg_rx_desc *)buffer->skb->data; pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2); - if (unlikely(!hbg_rx_pkt_check(priv, rx_desc))) { + if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) { hbg_buffer_free(buffer); goto next_buffer; } -- 2.51.0 From fd394a334b1c29caeae45f15d8b99b77c881bc63 Mon Sep 17 00:00:00 2001 From: Jijie Shao Date: Fri, 28 Feb 2025 19:54:08 +0800 Subject: [PATCH 12/16] net: hibmcge: Add support for abnormal irq handling feature the hardware error was reported by interrupt, and need be fixed by doing function reset, but the whole reset flow takes a long time, should not do it in irq handler, so do it in scheduled task. Signed-off-by: Jijie Shao Signed-off-by: Paolo Abeni --- .../ethernet/hisilicon/hibmcge/hbg_common.h | 5 ++ .../ethernet/hisilicon/hibmcge/hbg_debugfs.c | 5 +- .../net/ethernet/hisilicon/hibmcge/hbg_err.c | 58 +++++++++++++++++++ .../net/ethernet/hisilicon/hibmcge/hbg_err.h | 1 + .../ethernet/hisilicon/hibmcge/hbg_ethtool.c | 1 + .../net/ethernet/hisilicon/hibmcge/hbg_irq.c | 55 +++++++++++------- .../net/ethernet/hisilicon/hibmcge/hbg_main.c | 9 +++ .../net/ethernet/hisilicon/hibmcge/hbg_reg.h | 1 + 8 files changed, 112 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index 920514a8e29a..4e4d33d2832a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -36,6 +36,7 @@ enum hbg_nic_state { HBG_NIC_STATE_EVENT_HANDLING = 0, HBG_NIC_STATE_RESETTING, HBG_NIC_STATE_RESET_FAIL, + HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */ }; enum hbg_reset_type { @@ -104,6 +105,7 @@ struct hbg_irq_info { u32 mask; bool re_enable; bool need_print; + bool need_reset; u64 count; void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info); @@ -220,6 +222,7 @@ struct hbg_stats { u64 rx_fail_comma_cnt; u64 rx_dma_err_cnt; + u64 rx_fifo_less_empty_thrsld_cnt; u64 tx_octets_total_ok_cnt; u64 tx_uc_pkt_cnt; @@ -268,4 +271,6 @@ struct hbg_priv { struct delayed_work service_task; }; +void hbg_err_reset_task_schedule(struct hbg_priv *priv); + #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c index 8473c43d171a..55ce90b4319a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c @@ -67,10 +67,11 @@ static int hbg_dbg_irq_info(struct seq_file *s, void *unused) for (i = 0; i < priv->vectors.info_array_len; i++) { info = &priv->vectors.info_array[i]; seq_printf(s, - "%-20s: enabled: %-5s, logged: %-5s, count: %llu\n", + "%-20s: enabled: %-5s, reset: %-5s, logged: %-5s, count: %llu\n", info->name, str_true_false(hbg_hw_irq_is_enabled(priv, info->mask)), + str_true_false(info->need_reset), str_true_false(info->need_print), info->count); } @@ -114,6 +115,8 @@ static int hbg_dbg_nic_state(struct seq_file *s, void *unused) state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL)); seq_printf(s, "last reset type: %s\n", reset_type_str[priv->reset_type]); + seq_printf(s, "need reset state: %s\n", + state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET)); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c index 4d1f4a33391a..4e8cb66f601c 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -105,6 +105,62 @@ int hbg_reset(struct hbg_priv *priv) return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION); } +void hbg_err_reset(struct hbg_priv *priv) +{ + bool running; + + rtnl_lock(); + running = netif_running(priv->netdev); + if (running) + dev_close(priv->netdev); + + hbg_reset(priv); + + /* in hbg_pci_err_detected(), we will detach first, + * so we need to attach before open + */ + if (!netif_device_present(priv->netdev)) + netif_device_attach(priv->netdev); + + if (running) + dev_open(priv->netdev, NULL); + rtnl_unlock(); +} + +static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + pci_disable_device(pdev); + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct hbg_priv *priv = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "failed to re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + hbg_err_reset(priv); + netif_device_attach(netdev); + return PCI_ERS_RESULT_RECOVERED; +} + static void hbg_pci_err_reset_prepare(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -124,6 +180,8 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev) } static const struct pci_error_handlers hbg_pci_err_handler = { + .error_detected = hbg_pci_err_detected, + .slot_reset = hbg_pci_err_slot_reset, .reset_prepare = hbg_pci_err_reset_prepare, .reset_done = hbg_pci_err_reset_done, }; diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h index d7828e446308..fb9fbe7004e8 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h @@ -9,5 +9,6 @@ void hbg_set_pci_err_handler(struct pci_driver *pdrv); int hbg_reset(struct hbg_priv *priv); int hbg_rebuild(struct hbg_priv *priv); +void hbg_err_reset(struct hbg_priv *priv); #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c index f5be8d0ef611..8f1107b85fbb 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c @@ -69,6 +69,7 @@ static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = { HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR), HBG_STATS_REG_I(rx_fail_comma_cnt, HBG_REG_RX_FAIL_COMMA_CNT_ADDR), HBG_STATS_I(rx_dma_err_cnt), + HBG_STATS_I(rx_fifo_less_empty_thrsld_cnt), HBG_STATS_REG_I(tx_uc_pkt_cnt, HBG_REG_TX_UC_PKTS_ADDR), HBG_STATS_REG_I(tx_vlan_pkt_cnt, HBG_REG_TX_TAGGED_ADDR), diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c index 25dd25f096fe..e79e9ab3e530 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c @@ -11,6 +11,9 @@ static void hbg_irq_handle_err(struct hbg_priv *priv, if (irq_info->need_print) dev_err(&priv->pdev->dev, "receive error interrupt: %s\n", irq_info->name); + + if (irq_info->need_reset) + hbg_err_reset_task_schedule(priv); } static void hbg_irq_handle_tx(struct hbg_priv *priv, @@ -25,30 +28,38 @@ static void hbg_irq_handle_rx(struct hbg_priv *priv, napi_schedule(&priv->rx_ring.napi); } -#define HBG_TXRX_IRQ_I(name, handle) \ - {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle} -#define HBG_ERR_IRQ_I(name, need_print) \ - {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err} +static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv, + struct hbg_irq_info *irq_info) +{ + priv->stats.rx_fifo_less_empty_thrsld_cnt++; +} + +#define HBG_IRQ_I(name, handle) \ + {#name, HBG_INT_MSK_##name##_B, false, false, false, 0, handle} +#define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \ + {#name, HBG_INT_MSK_##name##_B, true, need_print, \ + ndde_reset, 0, hbg_irq_handle_err} static struct hbg_irq_info hbg_irqs[] = { - HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx), - HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx), - HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true), - HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true), - HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true), - HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true), - HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true), - HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true), - HBG_ERR_IRQ_I(TX_AHB_ERR, true), - HBG_ERR_IRQ_I(RX_BUF_AVL, false), - HBG_ERR_IRQ_I(REL_BUF_ERR, true), - HBG_ERR_IRQ_I(TXCFG_AVL, false), - HBG_ERR_IRQ_I(TX_DROP, false), - HBG_ERR_IRQ_I(RX_DROP, false), - HBG_ERR_IRQ_I(RX_AHB_ERR, true), - HBG_ERR_IRQ_I(MAC_FIFO_ERR, false), - HBG_ERR_IRQ_I(RBREQ_ERR, false), - HBG_ERR_IRQ_I(WE_ERR, false), + HBG_IRQ_I(RX, hbg_irq_handle_rx), + HBG_IRQ_I(TX, hbg_irq_handle_tx), + HBG_ERR_IRQ_I(TX_PKT_CPL, true, true), + HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true, false), + HBG_ERR_IRQ_I(TX_AHB_ERR, true, true), + HBG_IRQ_I(RX_BUF_AVL, hbg_irq_handle_rx_buf_val), + HBG_ERR_IRQ_I(REL_BUF_ERR, true, false), + HBG_ERR_IRQ_I(TXCFG_AVL, false, false), + HBG_ERR_IRQ_I(TX_DROP, false, false), + HBG_ERR_IRQ_I(RX_DROP, false, false), + HBG_ERR_IRQ_I(RX_AHB_ERR, true, false), + HBG_ERR_IRQ_I(MAC_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(RBREQ_ERR, true, true), + HBG_ERR_IRQ_I(WE_ERR, true, true), }; static irqreturn_t hbg_irq_handle(int irq_num, void *p) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index 688f408de84c..40f62db33fed 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -283,6 +283,9 @@ static void hbg_service_task(struct work_struct *work) struct hbg_priv *priv = container_of(work, struct hbg_priv, service_task.work); + if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state)) + hbg_err_reset(priv); + /* The type of statistics register is u32, * To prevent the statistics register from overflowing, * the driver dumps the statistics every 30 seconds. @@ -292,6 +295,12 @@ static void hbg_service_task(struct work_struct *work) msecs_to_jiffies(30 * MSEC_PER_SEC)); } +void hbg_err_reset_task_schedule(struct hbg_priv *priv) +{ + set_bit(HBG_NIC_STATE_NEED_RESET, &priv->state); + schedule_delayed_work(&priv->service_task, 0); +} + static void hbg_cancel_delayed_work_sync(void *data) { cancel_delayed_work_sync(data); diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h index 106d0e0408ba..c45450ab608c 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -148,6 +148,7 @@ #define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17) #define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16) #define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15) +#define HBG_INT_MSK_TX_PKT_CPL_B BIT(14) #define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */ #define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */ #define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434) -- 2.51.0 From e0306637e85da84a0695452f42dc29a552051402 Mon Sep 17 00:00:00 2001 From: Jijie Shao Date: Fri, 28 Feb 2025 19:54:09 +0800 Subject: [PATCH 13/16] net: hibmcge: Add support for mac link exception handling feature If the rate changed frequently, the PHY link ok, but the MAC link maybe fails. As a result, the network port is unavailable. According to the documents of the chip, core_reset needs to do to fix the fault. In hw_adjus_link(), the core_reset is added to try to ensure that MAC link status is normal. In addition, MAC link failure detection is added. If the MAC link fails after core_reset, driver invokes the phy_stop() and phy_start() to re-link. Due to phydev->lock, re-link cannot be triggered in adjust_link(). Therefore, this operation is invoked in a scheduled task. Signed-off-by: Jijie Shao Signed-off-by: Paolo Abeni --- .../ethernet/hisilicon/hibmcge/hbg_common.h | 5 +++++ .../ethernet/hisilicon/hibmcge/hbg_debugfs.c | 2 ++ .../net/ethernet/hisilicon/hibmcge/hbg_hw.c | 10 +++++++++ .../net/ethernet/hisilicon/hibmcge/hbg_main.c | 9 ++++++++ .../net/ethernet/hisilicon/hibmcge/hbg_mdio.c | 22 +++++++++++++++++++ .../net/ethernet/hisilicon/hibmcge/hbg_mdio.h | 2 ++ .../net/ethernet/hisilicon/hibmcge/hbg_reg.h | 1 + 7 files changed, 51 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index 4e4d33d2832a..8b943912184b 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -37,6 +37,7 @@ enum hbg_nic_state { HBG_NIC_STATE_RESETTING, HBG_NIC_STATE_RESET_FAIL, HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */ + HBG_NIC_STATE_NP_LINK_FAIL, }; enum hbg_reset_type { @@ -82,6 +83,7 @@ enum hbg_hw_event_type { HBG_HW_EVENT_NONE = 0, HBG_HW_EVENT_INIT, /* driver is loading */ HBG_HW_EVENT_RESET, + HBG_HW_EVENT_CORE_RESET, }; struct hbg_dev_specs { @@ -252,6 +254,8 @@ struct hbg_stats { u64 tx_timeout_cnt; u64 tx_dma_err_cnt; + + u64 np_link_fail_cnt; }; struct hbg_priv { @@ -272,5 +276,6 @@ struct hbg_priv { }; void hbg_err_reset_task_schedule(struct hbg_priv *priv); +void hbg_np_link_fail_task_schedule(struct hbg_priv *priv); #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c index 55ce90b4319a..5e0ba4d5b08d 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c @@ -117,6 +117,8 @@ static int hbg_dbg_nic_state(struct seq_file *s, void *unused) reset_type_str[priv->reset_type]); seq_printf(s, "need reset state: %s\n", state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET)); + seq_printf(s, "np_link fail state: %s\n", + state_str_true_false(priv, HBG_NIC_STATE_NP_LINK_FAIL)); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c index e7798f213645..74a18033b444 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -213,10 +213,20 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr) void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) { + hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE); + hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR, HBG_REG_PORT_MODE_M, speed); hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR, HBG_REG_DUPLEX_B, duplex); + + hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET); + + hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); + + if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR, + HBG_REG_AN_NEG_STATE_NP_LINK_OK_B)) + hbg_np_link_fail_task_schedule(priv); } /* only support uc filter */ diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index 40f62db33fed..3b7955f57b4d 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -286,6 +286,9 @@ static void hbg_service_task(struct work_struct *work) if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state)) hbg_err_reset(priv); + if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state)) + hbg_fix_np_link_fail(priv); + /* The type of statistics register is u32, * To prevent the statistics register from overflowing, * the driver dumps the statistics every 30 seconds. @@ -301,6 +304,12 @@ void hbg_err_reset_task_schedule(struct hbg_priv *priv) schedule_delayed_work(&priv->service_task, 0); } +void hbg_np_link_fail_task_schedule(struct hbg_priv *priv) +{ + set_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state); + schedule_delayed_work(&priv->service_task, 0); +} + static void hbg_cancel_delayed_work_sync(void *data) { cancel_delayed_work_sync(data); diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c index db6bc4cfb971..f29a937ad087 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c @@ -17,6 +17,8 @@ #define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000) #define HBG_MDIO_OP_INTERVAL_US (5 * 1000) +#define HBG_NP_LINK_FAIL_RETRY_TIMES 5 + static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd) { hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd); @@ -127,6 +129,26 @@ static void hbg_flowctrl_cfg(struct hbg_priv *priv) hbg_hw_set_pause_enable(priv, tx_pause, rx_pause); } +void hbg_fix_np_link_fail(struct hbg_priv *priv) +{ + struct device *dev = &priv->pdev->dev; + + if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) { + dev_err(dev, "failed to fix the MAC link status\n"); + priv->stats.np_link_fail_cnt = 0; + return; + } + + priv->stats.np_link_fail_cnt++; + dev_err(dev, "failed to link between MAC and PHY, try to fix...\n"); + + /* Replace phy_reset() with phy_stop() and phy_start(), + * as suggested by Andrew. + */ + hbg_phy_stop(priv); + hbg_phy_start(priv); +} + static void hbg_phy_adjust_link(struct net_device *netdev) { struct hbg_priv *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h index febd02a309c7..f3771c1bbd34 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h @@ -9,4 +9,6 @@ int hbg_mdio_init(struct hbg_priv *priv); void hbg_phy_start(struct hbg_priv *priv); void hbg_phy_stop(struct hbg_priv *priv); +void hbg_fix_np_link_fail(struct hbg_priv *priv); + #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h index c45450ab608c..23c55a24b284 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -54,6 +54,7 @@ #define HBG_REG_PAUSE_ENABLE_RX_B BIT(0) #define HBG_REG_PAUSE_ENABLE_TX_B BIT(1) #define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058) +#define HBG_REG_AN_NEG_STATE_NP_LINK_OK_B BIT(15) #define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060) #define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7) #define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6) -- 2.51.0 From 7a5d60dcf9981a5cc7b676e2d7472e10e0c1b681 Mon Sep 17 00:00:00 2001 From: Jijie Shao Date: Fri, 28 Feb 2025 19:54:10 +0800 Subject: [PATCH 14/16] net: hibmcge: Add support for BMC diagnose feature The MAC hardware is on the BMC side, and the driver is on the host side. When the driver is abnormal, the BMC cannot directly detect the exception cause. Therefore, this patch implements the BMC diagnosis feature. When users query driver diagnosis information on the BMC, the driver detects the query request in the scheduled task and reports driver statistics and link status to the BMC through the bar space. The BMC collects logs to analyze exception causes. Currently, the scheduled task is executed every 30 seconds To quickly respond to user query requests, this patch changes the scheduled task to once every second. Signed-off-by: Jijie Shao Signed-off-by: Paolo Abeni --- .../net/ethernet/hisilicon/hibmcge/Makefile | 2 +- .../ethernet/hisilicon/hibmcge/hbg_common.h | 1 + .../ethernet/hisilicon/hibmcge/hbg_diagnose.c | 348 ++++++++++++++++++ .../ethernet/hisilicon/hibmcge/hbg_diagnose.h | 11 + .../net/ethernet/hisilicon/hibmcge/hbg_main.c | 11 +- .../net/ethernet/hisilicon/hibmcge/hbg_reg.h | 7 + 6 files changed, 377 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c create mode 100644 drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile index 7ea15f9ef849..1a9da564b306 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile +++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile @@ -6,4 +6,4 @@ obj-$(CONFIG_HIBMCGE) += hibmcge.o hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \ - hbg_debugfs.o hbg_err.o + hbg_debugfs.o hbg_err.o hbg_diagnose.o diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index 8b943912184b..f8cdab62bf85 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -272,6 +272,7 @@ struct hbg_priv { enum hbg_reset_type reset_type; struct hbg_user_def user_def; struct hbg_stats stats; + unsigned long last_update_stats_time; struct delayed_work service_task; }; diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c new file mode 100644 index 000000000000..d61c03f34ff0 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2025 Hisilicon Limited. + +#include +#include +#include "hbg_common.h" +#include "hbg_ethtool.h" +#include "hbg_hw.h" +#include "hbg_diagnose.h" + +#define HBG_MSG_DATA_MAX_NUM 64 + +struct hbg_diagnose_message { + u32 opcode; + u32 status; + u32 data_num; + struct hbg_priv *priv; + + u32 data[HBG_MSG_DATA_MAX_NUM]; +}; + +#define HBG_HW_PUSH_WAIT_TIMEOUT_US (2 * 1000 * 1000) +#define HBG_HW_PUSH_WAIT_INTERVAL_US (1 * 1000) + +enum hbg_push_cmd { + HBG_PUSH_CMD_IRQ = 0, + HBG_PUSH_CMD_STATS, + HBG_PUSH_CMD_LINK, +}; + +struct hbg_push_stats_info { + /* id is used to match the name of the current stats item. + * and is used for pretty print on BMC + */ + u32 id; + u64 offset; +}; + +struct hbg_push_irq_info { + /* id is used to match the name of the current irq. + * and is used for pretty print on BMC + */ + u32 id; + u32 mask; +}; + +#define HBG_PUSH_IRQ_I(name, id) {id, HBG_INT_MSK_##name##_B} +static const struct hbg_push_irq_info hbg_push_irq_list[] = { + HBG_PUSH_IRQ_I(RX, 0), + HBG_PUSH_IRQ_I(TX, 1), + HBG_PUSH_IRQ_I(TX_PKT_CPL, 2), + HBG_PUSH_IRQ_I(MAC_MII_FIFO_ERR, 3), + HBG_PUSH_IRQ_I(MAC_PCS_RX_FIFO_ERR, 4), + HBG_PUSH_IRQ_I(MAC_PCS_TX_FIFO_ERR, 5), + HBG_PUSH_IRQ_I(MAC_APP_RX_FIFO_ERR, 6), + HBG_PUSH_IRQ_I(MAC_APP_TX_FIFO_ERR, 7), + HBG_PUSH_IRQ_I(SRAM_PARITY_ERR, 8), + HBG_PUSH_IRQ_I(TX_AHB_ERR, 9), + HBG_PUSH_IRQ_I(RX_BUF_AVL, 10), + HBG_PUSH_IRQ_I(REL_BUF_ERR, 11), + HBG_PUSH_IRQ_I(TXCFG_AVL, 12), + HBG_PUSH_IRQ_I(TX_DROP, 13), + HBG_PUSH_IRQ_I(RX_DROP, 14), + HBG_PUSH_IRQ_I(RX_AHB_ERR, 15), + HBG_PUSH_IRQ_I(MAC_FIFO_ERR, 16), + HBG_PUSH_IRQ_I(RBREQ_ERR, 17), + HBG_PUSH_IRQ_I(WE_ERR, 18), +}; + +#define HBG_PUSH_STATS_I(name, id) {id, HBG_STATS_FIELD_OFF(name)} +static const struct hbg_push_stats_info hbg_push_stats_list[] = { + HBG_PUSH_STATS_I(rx_desc_drop, 0), + HBG_PUSH_STATS_I(rx_desc_l2_err_cnt, 1), + HBG_PUSH_STATS_I(rx_desc_pkt_len_err_cnt, 2), + HBG_PUSH_STATS_I(rx_desc_l3_wrong_head_cnt, 3), + HBG_PUSH_STATS_I(rx_desc_l3_csum_err_cnt, 4), + HBG_PUSH_STATS_I(rx_desc_l3_len_err_cnt, 5), + HBG_PUSH_STATS_I(rx_desc_l3_zero_ttl_cnt, 6), + HBG_PUSH_STATS_I(rx_desc_l3_other_cnt, 7), + HBG_PUSH_STATS_I(rx_desc_l4_err_cnt, 8), + HBG_PUSH_STATS_I(rx_desc_l4_wrong_head_cnt, 9), + HBG_PUSH_STATS_I(rx_desc_l4_len_err_cnt, 10), + HBG_PUSH_STATS_I(rx_desc_l4_csum_err_cnt, 11), + HBG_PUSH_STATS_I(rx_desc_l4_zero_port_num_cnt, 12), + HBG_PUSH_STATS_I(rx_desc_l4_other_cnt, 13), + HBG_PUSH_STATS_I(rx_desc_frag_cnt, 14), + HBG_PUSH_STATS_I(rx_desc_ip_ver_err_cnt, 15), + HBG_PUSH_STATS_I(rx_desc_ipv4_pkt_cnt, 16), + HBG_PUSH_STATS_I(rx_desc_ipv6_pkt_cnt, 17), + HBG_PUSH_STATS_I(rx_desc_no_ip_pkt_cnt, 18), + HBG_PUSH_STATS_I(rx_desc_ip_pkt_cnt, 19), + HBG_PUSH_STATS_I(rx_desc_tcp_pkt_cnt, 20), + HBG_PUSH_STATS_I(rx_desc_udp_pkt_cnt, 21), + HBG_PUSH_STATS_I(rx_desc_vlan_pkt_cnt, 22), + HBG_PUSH_STATS_I(rx_desc_icmp_pkt_cnt, 23), + HBG_PUSH_STATS_I(rx_desc_arp_pkt_cnt, 24), + HBG_PUSH_STATS_I(rx_desc_rarp_pkt_cnt, 25), + HBG_PUSH_STATS_I(rx_desc_multicast_pkt_cnt, 26), + HBG_PUSH_STATS_I(rx_desc_broadcast_pkt_cnt, 27), + HBG_PUSH_STATS_I(rx_desc_ipsec_pkt_cnt, 28), + HBG_PUSH_STATS_I(rx_desc_ip_opt_pkt_cnt, 29), + HBG_PUSH_STATS_I(rx_desc_key_not_match_cnt, 30), + HBG_PUSH_STATS_I(rx_octets_total_ok_cnt, 31), + HBG_PUSH_STATS_I(rx_uc_pkt_cnt, 32), + HBG_PUSH_STATS_I(rx_mc_pkt_cnt, 33), + HBG_PUSH_STATS_I(rx_bc_pkt_cnt, 34), + HBG_PUSH_STATS_I(rx_vlan_pkt_cnt, 35), + HBG_PUSH_STATS_I(rx_octets_bad_cnt, 36), + HBG_PUSH_STATS_I(rx_octets_total_filt_cnt, 37), + HBG_PUSH_STATS_I(rx_filt_pkt_cnt, 38), + HBG_PUSH_STATS_I(rx_trans_pkt_cnt, 39), + HBG_PUSH_STATS_I(rx_framesize_64, 40), + HBG_PUSH_STATS_I(rx_framesize_65_127, 41), + HBG_PUSH_STATS_I(rx_framesize_128_255, 42), + HBG_PUSH_STATS_I(rx_framesize_256_511, 43), + HBG_PUSH_STATS_I(rx_framesize_512_1023, 44), + HBG_PUSH_STATS_I(rx_framesize_1024_1518, 45), + HBG_PUSH_STATS_I(rx_framesize_bt_1518, 46), + HBG_PUSH_STATS_I(rx_fcs_error_cnt, 47), + HBG_PUSH_STATS_I(rx_data_error_cnt, 48), + HBG_PUSH_STATS_I(rx_align_error_cnt, 49), + HBG_PUSH_STATS_I(rx_frame_long_err_cnt, 50), + HBG_PUSH_STATS_I(rx_jabber_err_cnt, 51), + HBG_PUSH_STATS_I(rx_pause_macctl_frame_cnt, 52), + HBG_PUSH_STATS_I(rx_unknown_macctl_frame_cnt, 53), + HBG_PUSH_STATS_I(rx_frame_very_long_err_cnt, 54), + HBG_PUSH_STATS_I(rx_frame_runt_err_cnt, 55), + HBG_PUSH_STATS_I(rx_frame_short_err_cnt, 56), + HBG_PUSH_STATS_I(rx_overflow_cnt, 57), + HBG_PUSH_STATS_I(rx_bufrq_err_cnt, 58), + HBG_PUSH_STATS_I(rx_we_err_cnt, 59), + HBG_PUSH_STATS_I(rx_overrun_cnt, 60), + HBG_PUSH_STATS_I(rx_lengthfield_err_cnt, 61), + HBG_PUSH_STATS_I(rx_fail_comma_cnt, 62), + HBG_PUSH_STATS_I(rx_dma_err_cnt, 63), + HBG_PUSH_STATS_I(rx_fifo_less_empty_thrsld_cnt, 64), + HBG_PUSH_STATS_I(tx_octets_total_ok_cnt, 65), + HBG_PUSH_STATS_I(tx_uc_pkt_cnt, 66), + HBG_PUSH_STATS_I(tx_mc_pkt_cnt, 67), + HBG_PUSH_STATS_I(tx_bc_pkt_cnt, 68), + HBG_PUSH_STATS_I(tx_vlan_pkt_cnt, 69), + HBG_PUSH_STATS_I(tx_octets_bad_cnt, 70), + HBG_PUSH_STATS_I(tx_trans_pkt_cnt, 71), + HBG_PUSH_STATS_I(tx_pause_frame_cnt, 72), + HBG_PUSH_STATS_I(tx_framesize_64, 73), + HBG_PUSH_STATS_I(tx_framesize_65_127, 74), + HBG_PUSH_STATS_I(tx_framesize_128_255, 75), + HBG_PUSH_STATS_I(tx_framesize_256_511, 76), + HBG_PUSH_STATS_I(tx_framesize_512_1023, 77), + HBG_PUSH_STATS_I(tx_framesize_1024_1518, 78), + HBG_PUSH_STATS_I(tx_framesize_bt_1518, 79), + HBG_PUSH_STATS_I(tx_underrun_err_cnt, 80), + HBG_PUSH_STATS_I(tx_add_cs_fail_cnt, 81), + HBG_PUSH_STATS_I(tx_bufrl_err_cnt, 82), + HBG_PUSH_STATS_I(tx_crc_err_cnt, 83), + HBG_PUSH_STATS_I(tx_drop_cnt, 84), + HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85), + HBG_PUSH_STATS_I(tx_dma_err_cnt, 86), +}; + +static int hbg_push_msg_send(struct hbg_priv *priv, + struct hbg_diagnose_message *msg) +{ + u32 header = 0; + u32 i; + + if (msg->data_num == 0) + return 0; + + for (i = 0; i < msg->data_num && i < HBG_MSG_DATA_MAX_NUM; i++) + hbg_reg_write(priv, + HBG_REG_MSG_DATA_BASE_ADDR + i * sizeof(u32), + msg->data[i]); + + hbg_field_modify(header, HBG_REG_MSG_HEADER_OPCODE_M, msg->opcode); + hbg_field_modify(header, HBG_REG_MSG_HEADER_DATA_NUM_M, msg->data_num); + hbg_field_modify(header, HBG_REG_MSG_HEADER_RESP_CODE_M, ETIMEDOUT); + + /* start status */ + hbg_field_modify(header, HBG_REG_MSG_HEADER_STATUS_M, 1); + + /* write header msg to start push */ + hbg_reg_write(priv, HBG_REG_MSG_HEADER_ADDR, header); + + /* wait done */ + readl_poll_timeout(priv->io_base + HBG_REG_MSG_HEADER_ADDR, header, + !FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header), + HBG_HW_PUSH_WAIT_INTERVAL_US, + HBG_HW_PUSH_WAIT_TIMEOUT_US); + + msg->status = FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header); + return -(int)FIELD_GET(HBG_REG_MSG_HEADER_RESP_CODE_M, header); +} + +static int hbg_push_data(struct hbg_priv *priv, + u32 opcode, u32 *data, u32 data_num) +{ + struct hbg_diagnose_message msg = {0}; + u32 data_left_num; + u32 i, j; + int ret; + + msg.priv = priv; + msg.opcode = opcode; + for (i = 0; i < data_num / HBG_MSG_DATA_MAX_NUM + 1; i++) { + if (i * HBG_MSG_DATA_MAX_NUM >= data_num) + break; + + data_left_num = data_num - i * HBG_MSG_DATA_MAX_NUM; + for (j = 0; j < data_left_num && j < HBG_MSG_DATA_MAX_NUM; j++) + msg.data[j] = data[i * HBG_MSG_DATA_MAX_NUM + j]; + + msg.data_num = j; + ret = hbg_push_msg_send(priv, &msg); + if (ret) + return ret; + } + + return 0; +} + +static int hbg_push_data_u64(struct hbg_priv *priv, u32 opcode, + u64 *data, u32 data_num) +{ + /* The length of u64 is twice that of u32, + * the data_num must be multiplied by 2. + */ + return hbg_push_data(priv, opcode, (u32 *)data, data_num * 2); +} + +static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask) +{ + u32 i = 0; + + for (i = 0; i < vectors->info_array_len; i++) + if (vectors->info_array[i].mask == mask) + return vectors->info_array[i].count; + + return 0; +} + +static int hbg_push_irq_cnt(struct hbg_priv *priv) +{ + /* An id needs to be added for each data. + * Therefore, the data_num must be multiplied by 2. + */ + u32 data_num = ARRAY_SIZE(hbg_push_irq_list) * 2; + struct hbg_vector *vectors = &priv->vectors; + const struct hbg_push_irq_info *info; + u32 i, j = 0; + u64 *data; + int ret; + + data = kcalloc(data_num, sizeof(u64), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* An id needs to be added for each data. + * So i + 2 for each loop. + */ + for (i = 0; i < data_num; i += 2) { + info = &hbg_push_irq_list[j++]; + data[i] = info->id; + data[i + 1] = hbg_get_irq_stats(vectors, info->mask); + } + + ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_IRQ, data, data_num); + kfree(data); + return ret; +} + +static int hbg_push_link_status(struct hbg_priv *priv) +{ + u32 link_status[2]; + + /* phy link status */ + link_status[0] = priv->mac.phydev->link; + /* mac link status */ + link_status[1] = hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR, + HBG_REG_AN_NEG_STATE_NP_LINK_OK_B); + + return hbg_push_data(priv, HBG_PUSH_CMD_LINK, + link_status, ARRAY_SIZE(link_status)); +} + +static int hbg_push_stats(struct hbg_priv *priv) +{ + /* An id needs to be added for each data. + * Therefore, the data_num must be multiplied by 2. + */ + u64 data_num = ARRAY_SIZE(hbg_push_stats_list) * 2; + struct hbg_stats *stats = &priv->stats; + const struct hbg_push_stats_info *info; + u32 i, j = 0; + u64 *data; + int ret; + + data = kcalloc(data_num, sizeof(u64), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* An id needs to be added for each data. + * So i + 2 for each loop. + */ + for (i = 0; i < data_num; i += 2) { + info = &hbg_push_stats_list[j++]; + data[i] = info->id; + data[i + 1] = HBG_STATS_R(stats, info->offset); + } + + ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_STATS, data, data_num); + kfree(data); + return ret; +} + +void hbg_diagnose_message_push(struct hbg_priv *priv) +{ + int ret; + + if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state)) + return; + + /* only 1 is the right value */ + if (hbg_reg_read(priv, HBG_REG_PUSH_REQ_ADDR) != 1) + return; + + ret = hbg_push_irq_cnt(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to push irq cnt, ret = %d\n", ret); + goto push_done; + } + + ret = hbg_push_link_status(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to push link status, ret = %d\n", ret); + goto push_done; + } + + ret = hbg_push_stats(priv); + if (ret) + dev_err(&priv->pdev->dev, + "failed to push stats, ret = %d\n", ret); + +push_done: + hbg_reg_write(priv, HBG_REG_PUSH_REQ_ADDR, 0); +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h new file mode 100644 index 000000000000..ba04c6d8c03d --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 Hisilicon Limited. */ + +#ifndef __HBG_DIAGNOSE_H +#define __HBG_DIAGNOSE_H + +#include "hbg_common.h" + +void hbg_diagnose_message_push(struct hbg_priv *priv); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index 3b7955f57b4d..c6a955e640fc 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -6,6 +6,7 @@ #include #include #include "hbg_common.h" +#include "hbg_diagnose.h" #include "hbg_err.h" #include "hbg_ethtool.h" #include "hbg_hw.h" @@ -289,13 +290,19 @@ static void hbg_service_task(struct work_struct *work) if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state)) hbg_fix_np_link_fail(priv); + hbg_diagnose_message_push(priv); + /* The type of statistics register is u32, * To prevent the statistics register from overflowing, * the driver dumps the statistics every 30 seconds. */ - hbg_update_stats(priv); + if (time_after(jiffies, priv->last_update_stats_time + 30 * HZ)) { + hbg_update_stats(priv); + priv->last_update_stats_time = jiffies; + } + schedule_delayed_work(&priv->service_task, - msecs_to_jiffies(30 * MSEC_PER_SEC)); + msecs_to_jiffies(MSEC_PER_SEC)); } void hbg_err_reset_task_schedule(struct hbg_priv *priv) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h index 23c55a24b284..cc2cc612770d 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -18,6 +18,13 @@ #define HBG_REG_TX_FIFO_NUM_ADDR 0x0030 #define HBG_REG_RX_FIFO_NUM_ADDR 0x0034 #define HBG_REG_VLAN_LAYERS_ADDR 0x0038 +#define HBG_REG_PUSH_REQ_ADDR 0x00F0 +#define HBG_REG_MSG_HEADER_ADDR 0x00F4 +#define HBG_REG_MSG_HEADER_OPCODE_M GENMASK(7, 0) +#define HBG_REG_MSG_HEADER_STATUS_M GENMASK(11, 8) +#define HBG_REG_MSG_HEADER_DATA_NUM_M GENMASK(19, 12) +#define HBG_REG_MSG_HEADER_RESP_CODE_M GENMASK(27, 20) +#define HBG_REG_MSG_DATA_BASE_ADDR 0x0100 /* MDIO */ #define HBG_REG_MDIO_BASE 0x8000 -- 2.51.0 From 615552c601ed3f7c8b88cdb9745e2ace769c264f Mon Sep 17 00:00:00 2001 From: Jijie Shao Date: Fri, 28 Feb 2025 19:54:11 +0800 Subject: [PATCH 15/16] net: hibmcge: Add support for ioctl This patch implements the .ndo_eth_ioctl() to read and write the PHY register. Signed-off-by: Jijie Shao Reviewed-by: Andrew Lunn Signed-off-by: Paolo Abeni --- drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index c6a955e640fc..2ac5454338e4 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "hbg_common.h" #include "hbg_diagnose.h" #include "hbg_err.h" @@ -277,6 +278,7 @@ static const struct net_device_ops hbg_netdev_ops = { .ndo_tx_timeout = hbg_net_tx_timeout, .ndo_set_rx_mode = hbg_net_set_rx_mode, .ndo_get_stats64 = hbg_net_get_stats, + .ndo_eth_ioctl = phy_do_ioctl_running, }; static void hbg_service_task(struct work_struct *work) -- 2.51.0 From 8a683295c2264e66cd8522ab6a15edfb52aa20bc Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Fri, 28 Feb 2025 04:50:17 -0800 Subject: [PATCH 16/16] netconsole: prefix CPU_NR sysdata feature with SYSDATA_ Rename the CPU_NR enum value to SYSDATA_CPU_NR to establish a consistent naming convention for sysdata features. This change prepares for upcoming additions to the sysdata feature set by clearly grouping related features under the SYSDATA prefix. This change is purely cosmetic and does not modify any functionality. Signed-off-by: Breno Leitao Reviewed-by: Simon Horman Signed-off-by: Paolo Abeni --- drivers/net/netconsole.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index f77eddf22185..c086e2fe51f8 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -103,7 +103,7 @@ struct netconsole_target_stats { */ enum sysdata_feature { /* Populate the CPU that sends the message */ - CPU_NR = BIT(0), + SYSDATA_CPU_NR = BIT(0), }; /** @@ -418,7 +418,7 @@ static ssize_t sysdata_cpu_nr_enabled_show(struct config_item *item, char *buf) bool cpu_nr_enabled; mutex_lock(&dynamic_netconsole_mutex); - cpu_nr_enabled = !!(nt->sysdata_fields & CPU_NR); + cpu_nr_enabled = !!(nt->sysdata_fields & SYSDATA_CPU_NR); mutex_unlock(&dynamic_netconsole_mutex); return sysfs_emit(buf, "%d\n", cpu_nr_enabled); @@ -699,7 +699,7 @@ static size_t count_extradata_entries(struct netconsole_target *nt) /* Userdata entries */ entries = list_count_nodes(&nt->userdata_group.cg_children); /* Plus sysdata entries */ - if (nt->sysdata_fields & CPU_NR) + if (nt->sysdata_fields & SYSDATA_CPU_NR) entries += 1; return entries; @@ -850,7 +850,7 @@ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item, return ret; mutex_lock(&dynamic_netconsole_mutex); - curr = nt->sysdata_fields & CPU_NR; + curr = nt->sysdata_fields & SYSDATA_CPU_NR; if (cpu_nr_enabled == curr) /* no change requested */ goto unlock_ok; @@ -865,13 +865,13 @@ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item, } if (cpu_nr_enabled) - nt->sysdata_fields |= CPU_NR; + nt->sysdata_fields |= SYSDATA_CPU_NR; else /* This is special because extradata_complete might have * remaining data from previous sysdata, and it needs to be * cleaned. */ - disable_sysdata_feature(nt, CPU_NR); + disable_sysdata_feature(nt, SYSDATA_CPU_NR); unlock_ok: ret = strnlen(buf, count); @@ -1130,7 +1130,7 @@ static int prepare_extradata(struct netconsole_target *nt) */ extradata_len = nt->userdata_length; - if (!(nt->sysdata_fields & CPU_NR)) + if (!(nt->sysdata_fields & SYSDATA_CPU_NR)) goto out; /* Append cpu=%d at extradata_complete after userdata str */ -- 2.51.0