From f66ef469a72d19764f943067307a570f83b00dca Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 19 Dec 2024 10:49:32 +0100 Subject: [PATCH 01/16] vsock/test: Add test for accept_queue memory leak Attempt to enqueue a child after the queue was flushed, but before SOCK_DONE flag has been set. Test tries to produce a memory leak, kmemleak should be employed. Dealing with a race condition, test by its very nature may lead to a false negative. Fixed by commit d7b0ff5a8667 ("virtio/vsock: Fix accept_queue memory leak"). Reviewed-by: Stefano Garzarella Signed-off-by: Michal Luczaj Link: https://patch.msgid.link/20241219-test-vsock-leaks-v4-5-a416e554d9d7@rbox.co Signed-off-by: Jakub Kicinski --- tools/testing/vsock/vsock_test.c | 52 ++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c index 8bb2ab41c55f..2a8fcb062d9d 100644 --- a/tools/testing/vsock/vsock_test.c +++ b/tools/testing/vsock/vsock_test.c @@ -29,6 +29,10 @@ #include "control.h" #include "util.h" +/* Basic messages for control_writeulong(), control_readulong() */ +#define CONTROL_CONTINUE 1 +#define CONTROL_DONE 0 + static void test_stream_connection_reset(const struct test_opts *opts) { union { @@ -1474,6 +1478,49 @@ static void test_stream_cred_upd_on_set_rcvlowat(const struct test_opts *opts) test_stream_credit_update_test(opts, false); } +/* The goal of test leak_acceptq is to stress the race between connect() and + * close(listener). Implementation of client/server loops boils down to: + * + * client server + * ------ ------ + * write(CONTINUE) + * expect(CONTINUE) + * listen() + * write(LISTENING) + * expect(LISTENING) + * connect() close() + */ +#define ACCEPTQ_LEAK_RACE_TIMEOUT 2 /* seconds */ + +static void test_stream_leak_acceptq_client(const struct test_opts *opts) +{ + time_t tout; + int fd; + + tout = current_nsec() + ACCEPTQ_LEAK_RACE_TIMEOUT * NSEC_PER_SEC; + do { + control_writeulong(CONTROL_CONTINUE); + + fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); + if (fd >= 0) + close(fd); + } while (current_nsec() < tout); + + control_writeulong(CONTROL_DONE); +} + +/* Test for a memory leak. User is expected to run kmemleak scan, see README. */ +static void test_stream_leak_acceptq_server(const struct test_opts *opts) +{ + int fd; + + while (control_readulong() == CONTROL_CONTINUE) { + fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port); + control_writeln("LISTENING"); + close(fd); + } +} + static struct test_case test_cases[] = { { .name = "SOCK_STREAM connection reset", @@ -1604,6 +1651,11 @@ static struct test_case test_cases[] = { .run_client = test_seqpacket_unsent_bytes_client, .run_server = test_seqpacket_unsent_bytes_server, }, + { + .name = "SOCK_STREAM leak accept queue", + .run_client = test_stream_leak_acceptq_client, + .run_server = test_stream_leak_acceptq_server, + }, {}, }; -- 2.51.0 From ec50efee8cf814035d82f3b42dad916144d98b38 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 19 Dec 2024 10:49:33 +0100 Subject: [PATCH 02/16] vsock/test: Add test for sk_error_queue memory leak Ask for MSG_ZEROCOPY completion notification, but do not recv() it. Test attempts to create a memory leak, kmemleak should be employed. Fixed by commit fbf7085b3ad1 ("vsock: Fix sk_error_queue memory leak"). Reviewed-by: Stefano Garzarella Signed-off-by: Michal Luczaj Link: https://patch.msgid.link/20241219-test-vsock-leaks-v4-6-a416e554d9d7@rbox.co Signed-off-by: Jakub Kicinski --- tools/testing/vsock/vsock_test.c | 45 ++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c index 2a8fcb062d9d..2dec6290b075 100644 --- a/tools/testing/vsock/vsock_test.c +++ b/tools/testing/vsock/vsock_test.c @@ -1521,6 +1521,46 @@ static void test_stream_leak_acceptq_server(const struct test_opts *opts) } } +/* Test for a memory leak. User is expected to run kmemleak scan, see README. */ +static void test_stream_msgzcopy_leak_errq_client(const struct test_opts *opts) +{ + struct pollfd fds = { 0 }; + int fd; + + fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); + if (fd < 0) { + perror("connect"); + exit(EXIT_FAILURE); + } + + enable_so_zerocopy_check(fd); + send_byte(fd, 1, MSG_ZEROCOPY); + + fds.fd = fd; + fds.events = 0; + if (poll(&fds, 1, -1) < 0) { + perror("poll"); + exit(EXIT_FAILURE); + } + + close(fd); +} + +static void test_stream_msgzcopy_leak_errq_server(const struct test_opts *opts) +{ + int fd; + + fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); + if (fd < 0) { + perror("accept"); + exit(EXIT_FAILURE); + } + + recv_byte(fd, 1, 0); + vsock_wait_remote_close(fd); + close(fd); +} + static struct test_case test_cases[] = { { .name = "SOCK_STREAM connection reset", @@ -1656,6 +1696,11 @@ static struct test_case test_cases[] = { .run_client = test_stream_leak_acceptq_client, .run_server = test_stream_leak_acceptq_server, }, + { + .name = "SOCK_STREAM MSG_ZEROCOPY leak MSG_ERRQUEUE", + .run_client = test_stream_msgzcopy_leak_errq_client, + .run_server = test_stream_msgzcopy_leak_errq_server, + }, {}, }; -- 2.51.0 From d127ac8b1d4d3524d292b597100fef96dd909c9b Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 19 Dec 2024 10:49:34 +0100 Subject: [PATCH 03/16] vsock/test: Add test for MSG_ZEROCOPY completion memory leak Exercise the ENOMEM error path by attempting to hit net.core.optmem_max limit on send(). Test aims to create a memory leak, kmemleak should be employed. Fixed by commit 60cf6206a1f5 ("virtio/vsock: Improve MSG_ZEROCOPY error handling"). Reviewed-by: Stefano Garzarella Signed-off-by: Michal Luczaj Link: https://patch.msgid.link/20241219-test-vsock-leaks-v4-7-a416e554d9d7@rbox.co Signed-off-by: Jakub Kicinski --- tools/testing/vsock/vsock_test.c | 152 +++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c index 2dec6290b075..1eebbc0d5f61 100644 --- a/tools/testing/vsock/vsock_test.c +++ b/tools/testing/vsock/vsock_test.c @@ -1561,6 +1561,153 @@ static void test_stream_msgzcopy_leak_errq_server(const struct test_opts *opts) close(fd); } +/* Test msgzcopy_leak_zcskb is meant to exercise sendmsg() error handling path, + * that might leak an skb. The idea is to fail virtio_transport_init_zcopy_skb() + * by hitting net.core.optmem_max limit in sock_omalloc(), specifically + * + * vsock_connectible_sendmsg + * virtio_transport_stream_enqueue + * virtio_transport_send_pkt_info + * virtio_transport_init_zcopy_skb + * . msg_zerocopy_realloc + * . msg_zerocopy_alloc + * . sock_omalloc + * . sk_omem_alloc + size > sysctl_optmem_max + * return -ENOMEM + * + * We abuse the implementation detail of net/socket.c:____sys_sendmsg(). + * sk_omem_alloc can be precisely bumped by sock_kmalloc(), as it is used to + * fetch user-provided control data. + * + * While this approach works for now, it relies on assumptions regarding the + * implementation and configuration (for example, order of net.core.optmem_max + * can not exceed MAX_PAGE_ORDER), which may not hold in the future. A more + * resilient testing could be implemented by leveraging the Fault injection + * framework (CONFIG_FAULT_INJECTION), e.g. + * + * client# echo N > /sys/kernel/debug/failslab/ignore-gfp-wait + * client# echo 0 > /sys/kernel/debug/failslab/verbose + * + * void client(const struct test_opts *opts) + * { + * char buf[16]; + * int f, s, i; + * + * f = open("/proc/self/fail-nth", O_WRONLY); + * + * for (i = 1; i < 32; i++) { + * control_writeulong(CONTROL_CONTINUE); + * + * s = vsock_stream_connect(opts->peer_cid, opts->peer_port); + * enable_so_zerocopy_check(s); + * + * sprintf(buf, "%d", i); + * write(f, buf, strlen(buf)); + * + * send(s, &(char){ 0 }, 1, MSG_ZEROCOPY); + * + * write(f, "0", 1); + * close(s); + * } + * + * control_writeulong(CONTROL_DONE); + * close(f); + * } + * + * void server(const struct test_opts *opts) + * { + * int fd; + * + * while (control_readulong() == CONTROL_CONTINUE) { + * fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); + * vsock_wait_remote_close(fd); + * close(fd); + * } + * } + * + * Refer to Documentation/fault-injection/fault-injection.rst. + */ +#define MAX_PAGE_ORDER 10 /* usually */ +#define PAGE_SIZE 4096 + +/* Test for a memory leak. User is expected to run kmemleak scan, see README. */ +static void test_stream_msgzcopy_leak_zcskb_client(const struct test_opts *opts) +{ + size_t optmem_max, ctl_len, chunk_size; + struct msghdr msg = { 0 }; + struct iovec iov; + char *chunk; + int fd, res; + FILE *f; + + f = fopen("/proc/sys/net/core/optmem_max", "r"); + if (!f) { + perror("fopen(optmem_max)"); + exit(EXIT_FAILURE); + } + + if (fscanf(f, "%zu", &optmem_max) != 1) { + fprintf(stderr, "fscanf(optmem_max) failed\n"); + exit(EXIT_FAILURE); + } + + fclose(f); + + fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); + if (fd < 0) { + perror("connect"); + exit(EXIT_FAILURE); + } + + enable_so_zerocopy_check(fd); + + ctl_len = optmem_max - 1; + if (ctl_len > PAGE_SIZE << MAX_PAGE_ORDER) { + fprintf(stderr, "Try with net.core.optmem_max = 100000\n"); + exit(EXIT_FAILURE); + } + + chunk_size = CMSG_SPACE(ctl_len); + chunk = malloc(chunk_size); + if (!chunk) { + perror("malloc"); + exit(EXIT_FAILURE); + } + memset(chunk, 0, chunk_size); + + iov.iov_base = &(char){ 0 }; + iov.iov_len = 1; + + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = chunk; + msg.msg_controllen = ctl_len; + + errno = 0; + res = sendmsg(fd, &msg, MSG_ZEROCOPY); + if (res >= 0 || errno != ENOMEM) { + fprintf(stderr, "Expected ENOMEM, got errno=%d res=%d\n", + errno, res); + exit(EXIT_FAILURE); + } + + close(fd); +} + +static void test_stream_msgzcopy_leak_zcskb_server(const struct test_opts *opts) +{ + int fd; + + fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); + if (fd < 0) { + perror("accept"); + exit(EXIT_FAILURE); + } + + vsock_wait_remote_close(fd); + close(fd); +} + static struct test_case test_cases[] = { { .name = "SOCK_STREAM connection reset", @@ -1701,6 +1848,11 @@ static struct test_case test_cases[] = { .run_client = test_stream_msgzcopy_leak_errq_client, .run_server = test_stream_msgzcopy_leak_errq_server, }, + { + .name = "SOCK_STREAM MSG_ZEROCOPY leak completion skb", + .run_client = test_stream_msgzcopy_leak_zcskb_client, + .run_server = test_stream_msgzcopy_leak_zcskb_server, + }, {}, }; -- 2.51.0 From d46ef4ee381f0f73b13714f319662f48f0c8b471 Mon Sep 17 00:00:00 2001 From: Divya Koppera Date: Thu, 19 Dec 2024 18:03:07 +0530 Subject: [PATCH 04/16] net: phy: microchip_rds_ptp: Add header file for Microchip rds ptp library This rds ptp header file will cover ptp macros for future phys in Microchip where addresses will be same but base offset and mmd address may changes. Reviewed-by: Andrew Lunn Reviewed-by: Vadim Fedorenko Signed-off-by: Divya Koppera Link: https://patch.msgid.link/20241219123311.30213-2-divya.koppera@microchip.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/microchip_rds_ptp.h | 223 ++++++++++++++++++++++++++++ 1 file changed, 223 insertions(+) create mode 100644 drivers/net/phy/microchip_rds_ptp.h diff --git a/drivers/net/phy/microchip_rds_ptp.h b/drivers/net/phy/microchip_rds_ptp.h new file mode 100644 index 000000000000..e95c065728b5 --- /dev/null +++ b/drivers/net/phy/microchip_rds_ptp.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Copyright (C) 2024 Microchip Technology + */ + +#ifndef _MICROCHIP_RDS_PTP_H +#define _MICROCHIP_RDS_PTP_H + +#include +#include +#include +#include +#include +#include + +#define MCHP_RDS_PTP_CMD_CTL 0x0 +#define MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC BIT(6) +#define MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC BIT(5) +#define MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD BIT(4) +#define MCHP_RDS_PTP_CMD_CTL_CLOCK_READ BIT(3) +#define MCHP_RDS_PTP_CMD_CTL_EN BIT(1) +#define MCHP_RDS_PTP_CMD_CTL_DIS BIT(0) + +#define MCHP_RDS_PTP_REF_CLK_CFG 0x2 +#define MCHP_RDS_PTP_REF_CLK_SRC_250MHZ 0x0 +#define MCHP_RDS_PTP_REF_CLK_PERIOD_OVERRIDE BIT(9) +#define MCHP_RDS_PTP_REF_CLK_PERIOD 4 +#define MCHP_RDS_PTP_REF_CLK_CFG_SET (MCHP_RDS_PTP_REF_CLK_SRC_250MHZ |\ + MCHP_RDS_PTP_REF_CLK_PERIOD_OVERRIDE |\ + MCHP_RDS_PTP_REF_CLK_PERIOD) + +#define MCHP_RDS_PTP_LTC_SEC_HI 0x5 +#define MCHP_RDS_PTP_LTC_SEC_MID 0x6 +#define MCHP_RDS_PTP_LTC_SEC_LO 0x7 +#define MCHP_RDS_PTP_LTC_NS_HI 0x8 +#define MCHP_RDS_PTP_LTC_NS_LO 0x9 +#define MCHP_RDS_PTP_LTC_RATE_ADJ_HI 0xc +#define MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR BIT(15) +#define MCHP_RDS_PTP_LTC_RATE_ADJ_LO 0xd +#define MCHP_RDS_PTP_STEP_ADJ_HI 0x12 +#define MCHP_RDS_PTP_STEP_ADJ_HI_DIR BIT(15) +#define MCHP_RDS_PTP_STEP_ADJ_LO 0x13 +#define MCHP_RDS_PTP_LTC_READ_SEC_HI 0x29 +#define MCHP_RDS_PTP_LTC_READ_SEC_MID 0x2a +#define MCHP_RDS_PTP_LTC_READ_SEC_LO 0x2b +#define MCHP_RDS_PTP_LTC_READ_NS_HI 0x2c +#define MCHP_RDS_PTP_LTC_READ_NS_LO 0x2d +#define MCHP_RDS_PTP_OP_MODE 0x41 +#define MCHP_RDS_PTP_OP_MODE_DIS 0 +#define MCHP_RDS_PTP_OP_MODE_STANDALONE 1 +#define MCHP_RDS_PTP_LATENCY_CORRECTION_CTL 0x44 +#define MCHP_RDS_PTP_PREDICTOR_EN BIT(6) +#define MCHP_RDS_PTP_TX_PRED_DIS BIT(1) +#define MCHP_RDS_PTP_RX_PRED_DIS BIT(0) +#define MCHP_RDS_PTP_LATENCY_SETTING (MCHP_RDS_PTP_PREDICTOR_EN | \ + MCHP_RDS_PTP_TX_PRED_DIS | \ + MCHP_RDS_PTP_RX_PRED_DIS) + +#define MCHP_RDS_PTP_INT_EN 0x0 +#define MCHP_RDS_PTP_INT_STS 0x01 +#define MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN BIT(3) +#define MCHP_RDS_PTP_INT_TX_TS_EN BIT(2) +#define MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN BIT(1) +#define MCHP_RDS_PTP_INT_RX_TS_EN BIT(0) +#define MCHP_RDS_PTP_INT_ALL_MSK (MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN | \ + MCHP_RDS_PTP_INT_TX_TS_EN | \ + MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN |\ + MCHP_RDS_PTP_INT_RX_TS_EN) + +#define MCHP_RDS_PTP_CAP_INFO 0x2e +#define MCHP_RDS_PTP_TX_TS_CNT(v) (((v) & GENMASK(11, 8)) >> 8) +#define MCHP_RDS_PTP_RX_TS_CNT(v) ((v) & GENMASK(3, 0)) + +#define MCHP_RDS_PTP_RX_PARSE_CONFIG 0x42 +#define MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN 0x44 +#define MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN 0x45 + +#define MCHP_RDS_PTP_RX_TIMESTAMP_CONFIG 0x4e +#define MCHP_RDS_PTP_RX_TIMESTAMP_CONFIG_PTP_FCS_DIS BIT(0) + +#define MCHP_RDS_PTP_RX_VERSION 0x48 +#define MCHP_RDS_PTP_RX_TIMESTAMP_EN 0x4d + +#define MCHP_RDS_PTP_RX_INGRESS_NS_HI 0x54 +#define MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID BIT(15) + +#define MCHP_RDS_PTP_RX_INGRESS_NS_LO 0x55 +#define MCHP_RDS_PTP_RX_INGRESS_SEC_HI 0x56 +#define MCHP_RDS_PTP_RX_INGRESS_SEC_LO 0x57 +#define MCHP_RDS_PTP_RX_MSG_HDR2 0x59 + +#define MCHP_RDS_PTP_TX_PARSE_CONFIG 0x82 +#define MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN BIT(0) +#define MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN BIT(1) +#define MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN BIT(2) + +#define MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN 0x84 +#define MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN 0x85 + +#define MCHP_RDS_PTP_TX_VERSION 0x88 +#define MCHP_RDS_PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8) +#define MCHP_RDS_PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0)) + +#define MCHP_RDS_PTP_TX_TIMESTAMP_EN 0x8d +#define MCHP_RDS_PTP_TIMESTAMP_EN_SYNC BIT(0) +#define MCHP_RDS_PTP_TIMESTAMP_EN_DREQ BIT(1) +#define MCHP_RDS_PTP_TIMESTAMP_EN_PDREQ BIT(2) +#define MCHP_RDS_PTP_TIMESTAMP_EN_PDRES BIT(3) +#define MCHP_RDS_PTP_TIMESTAMP_EN_ALL (MCHP_RDS_PTP_TIMESTAMP_EN_SYNC |\ + MCHP_RDS_PTP_TIMESTAMP_EN_DREQ |\ + MCHP_RDS_PTP_TIMESTAMP_EN_PDREQ |\ + MCHP_RDS_PTP_TIMESTAMP_EN_PDRES) + +#define MCHP_RDS_PTP_TX_TIMESTAMP_CONFIG 0x8e +#define MCHP_RDS_PTP_TX_TIMESTAMP_CONFIG_PTP_FCS_DIS BIT(0) + +#define MCHP_RDS_PTP_TX_MOD 0x8f +#define MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT BIT(12) + +#define MCHP_RDS_PTP_TX_EGRESS_NS_HI 0x94 +#define MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID BIT(15) + +#define MCHP_RDS_PTP_TX_EGRESS_NS_LO 0x95 +#define MCHP_RDS_PTP_TX_EGRESS_SEC_HI 0x96 +#define MCHP_RDS_PTP_TX_EGRESS_SEC_LO 0x97 +#define MCHP_RDS_PTP_TX_MSG_HDR2 0x99 + +#define MCHP_RDS_PTP_TSU_GEN_CONFIG 0xc0 +#define MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN BIT(0) + +#define MCHP_RDS_PTP_TSU_HARD_RESET 0xc1 +#define MCHP_RDS_PTP_TSU_HARDRESET BIT(0) + +/* Represents 1ppm adjustment in 2^32 format with + * each nsec contains 4 clock cycles in 250MHz. + * The value is calculated as following: (1/1000000)/((2^-32)/4) + */ +#define MCHP_RDS_PTP_1PPM_FORMAT 17179 +#define MCHP_RDS_PTP_FIFO_SIZE 8 +#define MCHP_RDS_PTP_MAX_ADJ 31249999 + +#define BASE_CLK(p) ((p)->clk_base_addr) +#define BASE_PORT(p) ((p)->port_base_addr) +#define PTP_MMD(p) ((p)->mmd) + +enum mchp_rds_ptp_base { + MCHP_RDS_PTP_PORT, + MCHP_RDS_PTP_CLOCK +}; + +enum mchp_rds_ptp_fifo_dir { + MCHP_RDS_PTP_INGRESS_FIFO, + MCHP_RDS_PTP_EGRESS_FIFO +}; + +struct mchp_rds_ptp_clock { + struct mii_timestamper mii_ts; + struct phy_device *phydev; + struct ptp_clock *ptp_clock; + + struct sk_buff_head tx_queue; + struct sk_buff_head rx_queue; + struct list_head rx_ts_list; + + struct ptp_clock_info caps; + + /* Lock for Rx ts fifo */ + spinlock_t rx_ts_lock; + int hwts_tx_type; + + enum hwtstamp_rx_filters rx_filter; + int layer; + int version; + u16 port_base_addr; + u16 clk_base_addr; + + /* Lock for phc */ + struct mutex ptp_lock; + u8 mmd; +}; + +struct mchp_rds_ptp_rx_ts { + struct list_head list; + u32 seconds; + u32 nsec; + u16 seq_id; +}; + +#if IS_ENABLED(CONFIG_MICROCHIP_PHY_RDS_PTP) + +struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd, + u16 clk_base, u16 port_base); + +int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock, + u16 reg, u16 val, bool enable); + +irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock); + +#else + +static inline struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device + *phydev, u8 mmd, + u16 clk_base, + u16 port_base) +{ + return NULL; +} + +static inline int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock, + u16 reg, u16 val, bool enable) +{ + return 0; +} + +static inline irqreturn_t mchp_rds_ptp_handle_interrupt(struct + mchp_rds_ptp_clock + * clock) +{ + return IRQ_NONE; +} + +#endif //CONFIG_MICROCHIP_PHY_RDS_PTP + +#endif //_MICROCHIP_RDS_PTP_H -- 2.51.0 From fa51199c5f34172fc7fd248ca9105e4e0ca6d80a Mon Sep 17 00:00:00 2001 From: Divya Koppera Date: Thu, 19 Dec 2024 18:03:08 +0530 Subject: [PATCH 05/16] net: phy: microchip_rds_ptp : Add rds ptp library for Microchip phys Add rds ptp library for Microchip phys 1-step and 2-step modes are supported, over Ethernet and UDP(ipv4, ipv6) Reviewed-by: Vadim Fedorenko Signed-off-by: Divya Koppera Link: https://patch.msgid.link/20241219123311.30213-3-divya.koppera@microchip.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/microchip_rds_ptp.c | 1039 +++++++++++++++++++++++++++ 1 file changed, 1039 insertions(+) create mode 100644 drivers/net/phy/microchip_rds_ptp.c diff --git a/drivers/net/phy/microchip_rds_ptp.c b/drivers/net/phy/microchip_rds_ptp.c new file mode 100644 index 000000000000..2936e46531cf --- /dev/null +++ b/drivers/net/phy/microchip_rds_ptp.c @@ -0,0 +1,1039 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2024 Microchip Technology + +#include "microchip_rds_ptp.h" + +static int mchp_rds_phy_read_mmd(struct mchp_rds_ptp_clock *clock, + u32 offset, enum mchp_rds_ptp_base base) +{ + struct phy_device *phydev = clock->phydev; + u32 addr; + + addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : + BASE_CLK(clock))); + + return phy_read_mmd(phydev, PTP_MMD(clock), addr); +} + +static int mchp_rds_phy_write_mmd(struct mchp_rds_ptp_clock *clock, + u32 offset, enum mchp_rds_ptp_base base, + u16 val) +{ + struct phy_device *phydev = clock->phydev; + u32 addr; + + addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : + BASE_CLK(clock))); + + return phy_write_mmd(phydev, PTP_MMD(clock), addr, val); +} + +static int mchp_rds_phy_modify_mmd(struct mchp_rds_ptp_clock *clock, + u32 offset, enum mchp_rds_ptp_base base, + u16 mask, u16 val) +{ + struct phy_device *phydev = clock->phydev; + u32 addr; + + addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : + BASE_CLK(clock))); + + return phy_modify_mmd(phydev, PTP_MMD(clock), addr, mask, val); +} + +static int mchp_rds_phy_set_bits_mmd(struct mchp_rds_ptp_clock *clock, + u32 offset, enum mchp_rds_ptp_base base, + u16 val) +{ + struct phy_device *phydev = clock->phydev; + u32 addr; + + addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) : + BASE_CLK(clock))); + + return phy_set_bits_mmd(phydev, PTP_MMD(clock), addr, val); +} + +static int mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock *clock, + enum mchp_rds_ptp_fifo_dir dir) +{ + int rc; + + if (dir == MCHP_RDS_PTP_EGRESS_FIFO) + skb_queue_purge(&clock->tx_queue); + else + skb_queue_purge(&clock->rx_queue); + + for (int i = 0; i < MCHP_RDS_PTP_FIFO_SIZE; ++i) { + rc = mchp_rds_phy_read_mmd(clock, + dir == MCHP_RDS_PTP_EGRESS_FIFO ? + MCHP_RDS_PTP_TX_MSG_HDR2 : + MCHP_RDS_PTP_RX_MSG_HDR2, + MCHP_RDS_PTP_PORT); + if (rc < 0) + return rc; + } + return mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS, + MCHP_RDS_PTP_PORT); +} + +static int mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock *clock, + bool enable) +{ + /* Enable or disable ptp interrupts */ + return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_INT_EN, + MCHP_RDS_PTP_PORT, + enable ? MCHP_RDS_PTP_INT_ALL_MSK : 0); +} + +static void mchp_rds_ptp_txtstamp(struct mii_timestamper *mii_ts, + struct sk_buff *skb, int type) +{ + struct mchp_rds_ptp_clock *clock = container_of(mii_ts, + struct mchp_rds_ptp_clock, + mii_ts); + + switch (clock->hwts_tx_type) { + case HWTSTAMP_TX_ONESTEP_SYNC: + if (ptp_msg_is_sync(skb, type)) { + kfree_skb(skb); + return; + } + fallthrough; + case HWTSTAMP_TX_ON: + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_queue_tail(&clock->tx_queue, skb); + break; + case HWTSTAMP_TX_OFF: + default: + kfree_skb(skb); + break; + } +} + +static bool mchp_rds_ptp_get_sig_rx(struct sk_buff *skb, u16 *sig) +{ + struct ptp_header *ptp_header; + int type; + + skb_push(skb, ETH_HLEN); + type = ptp_classify_raw(skb); + if (type == PTP_CLASS_NONE) + return false; + + ptp_header = ptp_parse_header(skb, type); + if (!ptp_header) + return false; + + skb_pull_inline(skb, ETH_HLEN); + + *sig = (__force u16)(ntohs(ptp_header->sequence_id)); + + return true; +} + +static bool mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock *clock, + struct mchp_rds_ptp_rx_ts *rx_ts) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct sk_buff *skb, *skb_tmp; + unsigned long flags; + bool rc = false; + u16 skb_sig; + + spin_lock_irqsave(&clock->rx_queue.lock, flags); + skb_queue_walk_safe(&clock->rx_queue, skb, skb_tmp) { + if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig)) + continue; + + if (skb_sig != rx_ts->seq_id) + continue; + + __skb_unlink(skb, &clock->rx_queue); + + rc = true; + break; + } + spin_unlock_irqrestore(&clock->rx_queue.lock, flags); + + if (rc) { + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec); + netif_rx(skb); + } + + return rc; +} + +static void mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock *clock, + struct mchp_rds_ptp_rx_ts *rx_ts) +{ + unsigned long flags; + + /* If we failed to match the skb add it to the queue for when + * the frame will come + */ + if (!mchp_rds_ptp_match_skb(clock, rx_ts)) { + spin_lock_irqsave(&clock->rx_ts_lock, flags); + list_add(&rx_ts->list, &clock->rx_ts_list); + spin_unlock_irqrestore(&clock->rx_ts_lock, flags); + } else { + kfree(rx_ts); + } +} + +static void mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock *clock, + struct sk_buff *skb) +{ + struct mchp_rds_ptp_rx_ts *rx_ts, *tmp, *rx_ts_var = NULL; + struct skb_shared_hwtstamps *shhwtstamps; + unsigned long flags; + u16 skb_sig; + + if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig)) + return; + + /* Iterate over all RX timestamps and match it with the received skbs */ + spin_lock_irqsave(&clock->rx_ts_lock, flags); + list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) { + /* Check if we found the signature we were looking for. */ + if (skb_sig != rx_ts->seq_id) + continue; + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec); + netif_rx(skb); + + rx_ts_var = rx_ts; + + break; + } + spin_unlock_irqrestore(&clock->rx_ts_lock, flags); + + if (rx_ts_var) { + list_del(&rx_ts_var->list); + kfree(rx_ts_var); + } else { + skb_queue_tail(&clock->rx_queue, skb); + } +} + +static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts, + struct sk_buff *skb, int type) +{ + struct mchp_rds_ptp_clock *clock = container_of(mii_ts, + struct mchp_rds_ptp_clock, + mii_ts); + + if (clock->rx_filter == HWTSTAMP_FILTER_NONE || + type == PTP_CLASS_NONE) + return false; + + if ((type & clock->version) == 0 || (type & clock->layer) == 0) + return false; + + /* Here if match occurs skb is sent to application, If not skb is added + * to queue and sending skb to application will get handled when + * interrupt occurs i.e., it get handles in interrupt handler. By + * any means skb will reach the application so we should not return + * false here if skb doesn't matches. + */ + mchp_rds_ptp_match_rx_skb(clock, skb); + + return true; +} + +static int mchp_rds_ptp_hwtstamp(struct mii_timestamper *mii_ts, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct mchp_rds_ptp_clock *clock = + container_of(mii_ts, struct mchp_rds_ptp_clock, + mii_ts); + struct mchp_rds_ptp_rx_ts *rx_ts, *tmp; + int txcfg = 0, rxcfg = 0; + unsigned long flags; + int rc; + + clock->hwts_tx_type = config->tx_type; + clock->rx_filter = config->rx_filter; + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + clock->layer = 0; + clock->version = 0; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + clock->layer = PTP_CLASS_L4; + clock->version = PTP_CLASS_V2; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + clock->layer = PTP_CLASS_L2; + clock->version = PTP_CLASS_V2; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + clock->layer = PTP_CLASS_L4 | PTP_CLASS_L2; + clock->version = PTP_CLASS_V2; + break; + default: + return -ERANGE; + } + + /* Setup parsing of the frames and enable the timestamping for ptp + * frames + */ + if (clock->layer & PTP_CLASS_L2) { + rxcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN; + txcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN; + } + if (clock->layer & PTP_CLASS_L4) { + rxcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN | + MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN; + txcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN | + MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN; + } + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG, + MCHP_RDS_PTP_PORT, rxcfg); + if (rc < 0) + return rc; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG, + MCHP_RDS_PTP_PORT, txcfg); + if (rc < 0) + return rc; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_TIMESTAMP_EN, + MCHP_RDS_PTP_PORT, + MCHP_RDS_PTP_TIMESTAMP_EN_ALL); + if (rc < 0) + return rc; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_TIMESTAMP_EN, + MCHP_RDS_PTP_PORT, + MCHP_RDS_PTP_TIMESTAMP_EN_ALL); + if (rc < 0) + return rc; + + if (clock->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) + /* Enable / disable of the TX timestamp in the SYNC frames */ + rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD, + MCHP_RDS_PTP_PORT, + MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT, + MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT); + else + rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD, + MCHP_RDS_PTP_PORT, + MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT, + (u16)~MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT); + + if (rc < 0) + return rc; + + /* In case of multiple starts and stops, these needs to be cleared */ + spin_lock_irqsave(&clock->rx_ts_lock, flags); + list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) { + list_del(&rx_ts->list); + kfree(rx_ts); + } + spin_unlock_irqrestore(&clock->rx_ts_lock, flags); + + rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_INGRESS_FIFO); + if (rc < 0) + return rc; + + rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_EGRESS_FIFO); + if (rc < 0) + return rc; + + /* Now enable the timestamping interrupts */ + rc = mchp_rds_ptp_config_intr(clock, + config->rx_filter != HWTSTAMP_FILTER_NONE); + + return rc < 0 ? rc : 0; +} + +static int mchp_rds_ptp_ts_info(struct mii_timestamper *mii_ts, + struct kernel_ethtool_ts_info *info) +{ + struct mchp_rds_ptp_clock *clock = container_of(mii_ts, + struct mchp_rds_ptp_clock, + mii_ts); + + info->phc_index = ptp_clock_index(clock->ptp_clock); + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +} + +static int mchp_rds_ptp_ltc_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct mchp_rds_ptp_clock *clock = container_of(info, + struct mchp_rds_ptp_clock, + caps); + struct timespec64 ts; + bool add = true; + int rc = 0; + u32 nsec; + s32 sec; + + /* The HW allows up to 15 sec to adjust the time, but here we limit to + * 10 sec the adjustment. The reason is, in case the adjustment is 14 + * sec and 999999999 nsec, then we add 8ns to compensate the actual + * increment so the value can be bigger than 15 sec. Therefore limit the + * possible adjustments so we will not have these corner cases + */ + if (delta > 10000000000LL || delta < -10000000000LL) { + /* The timeadjustment is too big, so fall back using set time */ + u64 now; + + info->gettime64(info, &ts); + + now = ktime_to_ns(timespec64_to_ktime(ts)); + ts = ns_to_timespec64(now + delta); + + info->settime64(info, &ts); + return 0; + } + sec = div_u64_rem(abs(delta), NSEC_PER_SEC, &nsec); + if (delta < 0 && nsec != 0) { + /* It is not allowed to adjust low the nsec part, therefore + * subtract more from second part and add to nanosecond such + * that would roll over, so the second part will increase + */ + sec--; + nsec = NSEC_PER_SEC - nsec; + } + + /* Calculate the adjustments and the direction */ + if (delta < 0) + add = false; + + if (nsec > 0) { + /* add 8 ns to cover the likely normal increment */ + nsec += 8; + + if (nsec >= NSEC_PER_SEC) { + /* carry into seconds */ + sec++; + nsec -= NSEC_PER_SEC; + } + } + + mutex_lock(&clock->ptp_lock); + if (sec) { + sec = abs(sec); + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO, + MCHP_RDS_PTP_CLOCK, sec); + if (rc < 0) + goto out_unlock; + + rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI, + MCHP_RDS_PTP_CLOCK, + ((add ? + MCHP_RDS_PTP_STEP_ADJ_HI_DIR : + 0) | ((sec >> 16) & + GENMASK(13, 0)))); + if (rc < 0) + goto out_unlock; + + rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, + MCHP_RDS_PTP_CLOCK, + MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC); + if (rc < 0) + goto out_unlock; + } + + if (nsec) { + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO, + MCHP_RDS_PTP_CLOCK, + nsec & GENMASK(15, 0)); + if (rc < 0) + goto out_unlock; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI, + MCHP_RDS_PTP_CLOCK, + (nsec >> 16) & GENMASK(13, 0)); + if (rc < 0) + goto out_unlock; + + rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, + MCHP_RDS_PTP_CLOCK, + MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC); + } + +out_unlock: + mutex_unlock(&clock->ptp_lock); + + return rc; +} + +static int mchp_rds_ptp_ltc_adjfine(struct ptp_clock_info *info, + long scaled_ppm) +{ + struct mchp_rds_ptp_clock *clock = container_of(info, + struct mchp_rds_ptp_clock, + caps); + u16 rate_lo, rate_hi; + bool faster = true; + u32 rate; + int rc; + + if (!scaled_ppm) + return 0; + + if (scaled_ppm < 0) { + scaled_ppm = -scaled_ppm; + faster = false; + } + + rate = MCHP_RDS_PTP_1PPM_FORMAT * (upper_16_bits(scaled_ppm)); + rate += (MCHP_RDS_PTP_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16; + + rate_lo = rate & GENMASK(15, 0); + rate_hi = (rate >> 16) & GENMASK(13, 0); + + if (faster) + rate_hi |= MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR; + + mutex_lock(&clock->ptp_lock); + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_HI, + MCHP_RDS_PTP_CLOCK, rate_hi); + if (rc < 0) + goto error; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_LO, + MCHP_RDS_PTP_CLOCK, rate_lo); + if (rc > 0) + rc = 0; +error: + mutex_unlock(&clock->ptp_lock); + + return rc; +} + +static int mchp_rds_ptp_ltc_gettime64(struct ptp_clock_info *info, + struct timespec64 *ts) +{ + struct mchp_rds_ptp_clock *clock = container_of(info, + struct mchp_rds_ptp_clock, + caps); + time64_t secs; + int rc = 0; + s64 nsecs; + + mutex_lock(&clock->ptp_lock); + /* Set read bit to 1 to save current values of 1588 local time counter + * into PTP LTC seconds and nanoseconds registers. + */ + rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, + MCHP_RDS_PTP_CLOCK, + MCHP_RDS_PTP_CMD_CTL_CLOCK_READ); + if (rc < 0) + goto out_unlock; + + /* Get LTC clock values */ + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_HI, + MCHP_RDS_PTP_CLOCK); + if (rc < 0) + goto out_unlock; + secs = rc << 16; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_MID, + MCHP_RDS_PTP_CLOCK); + if (rc < 0) + goto out_unlock; + secs |= rc; + secs <<= 16; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_LO, + MCHP_RDS_PTP_CLOCK); + if (rc < 0) + goto out_unlock; + secs |= rc; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_HI, + MCHP_RDS_PTP_CLOCK); + if (rc < 0) + goto out_unlock; + nsecs = (rc & GENMASK(13, 0)); + nsecs <<= 16; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_LO, + MCHP_RDS_PTP_CLOCK); + if (rc < 0) + goto out_unlock; + nsecs |= rc; + + set_normalized_timespec64(ts, secs, nsecs); + + if (rc > 0) + rc = 0; +out_unlock: + mutex_unlock(&clock->ptp_lock); + + return rc; +} + +static int mchp_rds_ptp_ltc_settime64(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct mchp_rds_ptp_clock *clock = container_of(info, + struct mchp_rds_ptp_clock, + caps); + int rc; + + mutex_lock(&clock->ptp_lock); + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_LO, + MCHP_RDS_PTP_CLOCK, + lower_16_bits(ts->tv_sec)); + if (rc < 0) + goto out_unlock; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_MID, + MCHP_RDS_PTP_CLOCK, + upper_16_bits(ts->tv_sec)); + if (rc < 0) + goto out_unlock; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_HI, + MCHP_RDS_PTP_CLOCK, + upper_32_bits(ts->tv_sec) & GENMASK(15, 0)); + if (rc < 0) + goto out_unlock; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_LO, + MCHP_RDS_PTP_CLOCK, + lower_16_bits(ts->tv_nsec)); + if (rc < 0) + goto out_unlock; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_HI, + MCHP_RDS_PTP_CLOCK, + upper_16_bits(ts->tv_nsec) & GENMASK(13, 0)); + if (rc < 0) + goto out_unlock; + + /* Set load bit to 1 to write PTP LTC seconds and nanoseconds + * registers to 1588 local time counter. + */ + rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL, + MCHP_RDS_PTP_CLOCK, + MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD); + if (rc > 0) + rc = 0; +out_unlock: + mutex_unlock(&clock->ptp_lock); + + return rc; +} + +static bool mchp_rds_ptp_get_sig_tx(struct sk_buff *skb, u16 *sig) +{ + struct ptp_header *ptp_header; + int type; + + type = ptp_classify_raw(skb); + if (type == PTP_CLASS_NONE) + return false; + + ptp_header = ptp_parse_header(skb, type); + if (!ptp_header) + return false; + + *sig = (__force u16)(ntohs(ptp_header->sequence_id)); + + return true; +} + +static void mchp_rds_ptp_match_tx_skb(struct mchp_rds_ptp_clock *clock, + u32 seconds, u32 nsec, u16 seq_id) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff *skb, *skb_tmp; + unsigned long flags; + bool rc = false; + u16 skb_sig; + + spin_lock_irqsave(&clock->tx_queue.lock, flags); + skb_queue_walk_safe(&clock->tx_queue, skb, skb_tmp) { + if (!mchp_rds_ptp_get_sig_tx(skb, &skb_sig)) + continue; + + if (skb_sig != seq_id) + continue; + + __skb_unlink(skb, &clock->tx_queue); + rc = true; + break; + } + spin_unlock_irqrestore(&clock->tx_queue.lock, flags); + + if (rc) { + shhwtstamps.hwtstamp = ktime_set(seconds, nsec); + skb_complete_tx_timestamp(skb, &shhwtstamps); + } +} + +static struct mchp_rds_ptp_rx_ts + *mchp_rds_ptp_get_rx_ts(struct mchp_rds_ptp_clock *clock) +{ + struct phy_device *phydev = clock->phydev; + struct mchp_rds_ptp_rx_ts *rx_ts = NULL; + u32 sec, nsec; + int rc; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_HI, + MCHP_RDS_PTP_PORT); + if (rc < 0) + goto error; + if (!(rc & MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID)) { + phydev_err(phydev, "RX Timestamp is not valid!\n"); + goto error; + } + nsec = (rc & GENMASK(13, 0)) << 16; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_LO, + MCHP_RDS_PTP_PORT); + if (rc < 0) + goto error; + nsec |= rc; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_HI, + MCHP_RDS_PTP_PORT); + if (rc < 0) + goto error; + sec = rc << 16; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_LO, + MCHP_RDS_PTP_PORT); + if (rc < 0) + goto error; + sec |= rc; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_MSG_HDR2, + MCHP_RDS_PTP_PORT); + if (rc < 0) + goto error; + + rx_ts = kmalloc(sizeof(*rx_ts), GFP_KERNEL); + if (!rx_ts) + return NULL; + + rx_ts->seconds = sec; + rx_ts->nsec = nsec; + rx_ts->seq_id = rc; + +error: + return rx_ts; +} + +static void mchp_rds_ptp_process_rx_ts(struct mchp_rds_ptp_clock *clock) +{ + int caps; + + do { + struct mchp_rds_ptp_rx_ts *rx_ts; + + rx_ts = mchp_rds_ptp_get_rx_ts(clock); + if (rx_ts) + mchp_rds_ptp_match_rx_ts(clock, rx_ts); + + caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO, + MCHP_RDS_PTP_PORT); + if (caps < 0) + return; + } while (MCHP_RDS_PTP_RX_TS_CNT(caps) > 0); +} + +static bool mchp_rds_ptp_get_tx_ts(struct mchp_rds_ptp_clock *clock, + u32 *sec, u32 *nsec, u16 *seq) +{ + int rc; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_HI, + MCHP_RDS_PTP_PORT); + if (rc < 0) + return false; + if (!(rc & MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID)) + return false; + *nsec = (rc & GENMASK(13, 0)) << 16; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_LO, + MCHP_RDS_PTP_PORT); + if (rc < 0) + return false; + *nsec = *nsec | rc; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_HI, + MCHP_RDS_PTP_PORT); + if (rc < 0) + return false; + *sec = rc << 16; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_LO, + MCHP_RDS_PTP_PORT); + if (rc < 0) + return false; + *sec = *sec | rc; + + rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_MSG_HDR2, + MCHP_RDS_PTP_PORT); + if (rc < 0) + return false; + + *seq = rc; + + return true; +} + +static void mchp_rds_ptp_process_tx_ts(struct mchp_rds_ptp_clock *clock) +{ + int caps; + + do { + u32 sec, nsec; + u16 seq; + + if (mchp_rds_ptp_get_tx_ts(clock, &sec, &nsec, &seq)) + mchp_rds_ptp_match_tx_skb(clock, sec, nsec, seq); + + caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO, + MCHP_RDS_PTP_PORT); + if (caps < 0) + return; + } while (MCHP_RDS_PTP_TX_TS_CNT(caps) > 0); +} + +int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock, + u16 reg, u16 val, bool clear) +{ + if (clear) + return phy_clear_bits_mmd(clock->phydev, PTP_MMD(clock), reg, + val); + else + return phy_set_bits_mmd(clock->phydev, PTP_MMD(clock), reg, + val); +} +EXPORT_SYMBOL_GPL(mchp_rds_ptp_top_config_intr); + +irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock) +{ + int irq_sts; + + /* To handle rogue interrupt scenarios */ + if (!clock) + return IRQ_NONE; + + do { + irq_sts = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS, + MCHP_RDS_PTP_PORT); + if (irq_sts < 0) + return IRQ_NONE; + + if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_EN) + mchp_rds_ptp_process_rx_ts(clock); + + if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_EN) + mchp_rds_ptp_process_tx_ts(clock); + + if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN) + mchp_rds_ptp_flush_fifo(clock, + MCHP_RDS_PTP_EGRESS_FIFO); + + if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN) + mchp_rds_ptp_flush_fifo(clock, + MCHP_RDS_PTP_INGRESS_FIFO); + } while (irq_sts & (MCHP_RDS_PTP_INT_RX_TS_EN | + MCHP_RDS_PTP_INT_TX_TS_EN | + MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN | + MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN)); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL_GPL(mchp_rds_ptp_handle_interrupt); + +static int mchp_rds_ptp_init(struct mchp_rds_ptp_clock *clock) +{ + int rc; + + /* Disable PTP */ + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL, + MCHP_RDS_PTP_CLOCK, + MCHP_RDS_PTP_CMD_CTL_DIS); + if (rc < 0) + return rc; + + /* Disable TSU */ + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG, + MCHP_RDS_PTP_PORT, 0); + if (rc < 0) + return rc; + + /* Clear PTP interrupt status registers */ + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_HARD_RESET, + MCHP_RDS_PTP_PORT, + MCHP_RDS_PTP_TSU_HARDRESET); + if (rc < 0) + return rc; + + /* Predictor enable */ + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LATENCY_CORRECTION_CTL, + MCHP_RDS_PTP_CLOCK, + MCHP_RDS_PTP_LATENCY_SETTING); + if (rc < 0) + return rc; + + /* Configure PTP operational mode */ + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_OP_MODE, + MCHP_RDS_PTP_CLOCK, + MCHP_RDS_PTP_OP_MODE_STANDALONE); + if (rc < 0) + return rc; + + /* Reference clock configuration */ + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_REF_CLK_CFG, + MCHP_RDS_PTP_CLOCK, + MCHP_RDS_PTP_REF_CLK_CFG_SET); + if (rc < 0) + return rc; + + /* Classifier configurations */ + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG, + MCHP_RDS_PTP_PORT, 0); + if (rc < 0) + return rc; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG, + MCHP_RDS_PTP_PORT, 0); + if (rc < 0) + return rc; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN, + MCHP_RDS_PTP_PORT, 0); + if (rc < 0) + return rc; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN, + MCHP_RDS_PTP_PORT, 0); + if (rc < 0) + return rc; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN, + MCHP_RDS_PTP_PORT, 0); + if (rc < 0) + return rc; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN, + MCHP_RDS_PTP_PORT, 0); + if (rc < 0) + return rc; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_VERSION, + MCHP_RDS_PTP_PORT, + MCHP_RDS_PTP_MAX_VERSION(0xff) | + MCHP_RDS_PTP_MIN_VERSION(0x0)); + if (rc < 0) + return rc; + + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_VERSION, + MCHP_RDS_PTP_PORT, + MCHP_RDS_PTP_MAX_VERSION(0xff) | + MCHP_RDS_PTP_MIN_VERSION(0x0)); + if (rc < 0) + return rc; + + /* Enable TSU */ + rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG, + MCHP_RDS_PTP_PORT, + MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN); + if (rc < 0) + return rc; + + /* Enable PTP */ + return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL, + MCHP_RDS_PTP_CLOCK, + MCHP_RDS_PTP_CMD_CTL_EN); +} + +struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd, + u16 clk_base_addr, + u16 port_base_addr) +{ + struct mchp_rds_ptp_clock *clock; + int rc; + + clock = devm_kzalloc(&phydev->mdio.dev, sizeof(*clock), GFP_KERNEL); + if (!clock) + return ERR_PTR(-ENOMEM); + + clock->port_base_addr = port_base_addr; + clock->clk_base_addr = clk_base_addr; + clock->mmd = mmd; + + mutex_init(&clock->ptp_lock); + /* Register PTP clock */ + clock->caps.owner = THIS_MODULE; + snprintf(clock->caps.name, 30, "%s", phydev->drv->name); + clock->caps.max_adj = MCHP_RDS_PTP_MAX_ADJ; + clock->caps.n_ext_ts = 0; + clock->caps.pps = 0; + clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine; + clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime; + clock->caps.gettime64 = mchp_rds_ptp_ltc_gettime64; + clock->caps.settime64 = mchp_rds_ptp_ltc_settime64; + clock->ptp_clock = ptp_clock_register(&clock->caps, + &phydev->mdio.dev); + if (IS_ERR(clock->ptp_clock)) + return ERR_PTR(-EINVAL); + + /* Check if PHC support is missing at the configuration level */ + if (!clock->ptp_clock) + return NULL; + + /* Initialize the SW */ + skb_queue_head_init(&clock->tx_queue); + skb_queue_head_init(&clock->rx_queue); + INIT_LIST_HEAD(&clock->rx_ts_list); + spin_lock_init(&clock->rx_ts_lock); + + clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp; + clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp; + clock->mii_ts.hwtstamp = mchp_rds_ptp_hwtstamp; + clock->mii_ts.ts_info = mchp_rds_ptp_ts_info; + + phydev->mii_ts = &clock->mii_ts; + + /* Timestamp selected by default to keep legacy API */ + phydev->default_timestamp = true; + + clock->phydev = phydev; + + rc = mchp_rds_ptp_init(clock); + if (rc < 0) + return ERR_PTR(rc); + + return clock; +} +EXPORT_SYMBOL_GPL(mchp_rds_ptp_probe); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MICROCHIP PHY RDS PTP driver"); +MODULE_AUTHOR("Divya Koppera"); -- 2.51.0 From 2550afc61ef54274ac6f9355df9d33dad2910d3f Mon Sep 17 00:00:00 2001 From: Divya Koppera Date: Thu, 19 Dec 2024 18:03:09 +0530 Subject: [PATCH 06/16] net: phy: Kconfig: Add rds ptp library support and 1588 optional flag in Microchip phys Add ptp library support in Kconfig As some of Microchip T1 phys support ptp, add dependency of 1588 optional flag in Kconfig Reviewed-by: Andrew Lunn Reviewed-by: Vadim Fedorenko Signed-off-by: Divya Koppera Link: https://patch.msgid.link/20241219123311.30213-4-divya.koppera@microchip.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/Kconfig | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 15828f4710a9..dc625f2b3ae4 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -287,8 +287,15 @@ config MICROCHIP_PHY config MICROCHIP_T1_PHY tristate "Microchip T1 PHYs" + select MICROCHIP_PHY_RDS_PTP if NETWORK_PHY_TIMESTAMPING && \ + PTP_1588_CLOCK_OPTIONAL help - Supports the LAN87XX PHYs. + Supports the LAN8XXX PHYs. + +config MICROCHIP_PHY_RDS_PTP + tristate + help + Currently supports LAN887X T1 PHY config MICROSEMI_PHY tristate "Microsemi PHYs" -- 2.51.0 From 85b39f7593e1383b235f1e9b3d943cc2e91b6b10 Mon Sep 17 00:00:00 2001 From: Divya Koppera Date: Thu, 19 Dec 2024 18:03:10 +0530 Subject: [PATCH 07/16] net: phy: Makefile: Add makefile support for rds ptp in Microchip phys Add makefile support for rds ptp library. Reviewed-by: Andrew Lunn Reviewed-by: Vadim Fedorenko Signed-off-by: Divya Koppera Link: https://patch.msgid.link/20241219123311.30213-5-divya.koppera@microchip.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index e6145153e837..39b72b464287 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -79,6 +79,7 @@ obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MICROCHIP_PHY) += microchip.o +obj-$(CONFIG_MICROCHIP_PHY_RDS_PTP) += microchip_rds_ptp.o obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o obj-$(CONFIG_MICROCHIP_T1S_PHY) += microchip_t1s.o obj-$(CONFIG_MICROSEMI_PHY) += mscc/ -- 2.51.0 From 9fc3d6fe802923b026ecac16e59c0acdd6744d5d Mon Sep 17 00:00:00 2001 From: Divya Koppera Date: Thu, 19 Dec 2024 18:03:11 +0530 Subject: [PATCH 08/16] net: phy: microchip_t1 : Add initialization of ptp for lan887x Add initialization of ptp for lan887x. Reviewed-by: Andrew Lunn Signed-off-by: Divya Koppera Link: https://patch.msgid.link/20241219123311.30213-6-divya.koppera@microchip.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/microchip_t1.c | 41 +++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index b17bf6708003..73f28463bc35 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -10,11 +10,15 @@ #include #include #include +#include "microchip_rds_ptp.h" #define PHY_ID_LAN87XX 0x0007c150 #define PHY_ID_LAN937X 0x0007c180 #define PHY_ID_LAN887X 0x0007c1f0 +#define MCHP_RDS_PTP_LTC_BASE_ADDR 0xe000 +#define MCHP_RDS_PTP_PORT_BASE_ADDR (MCHP_RDS_PTP_LTC_BASE_ADDR + 0x800) + /* External Register Control Register */ #define LAN87XX_EXT_REG_CTL (0x14) #define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000) @@ -229,6 +233,7 @@ #define LAN887X_INT_STS 0xf000 #define LAN887X_INT_MSK 0xf001 +#define LAN887X_INT_MSK_P1588_MOD_INT_MSK BIT(3) #define LAN887X_INT_MSK_T1_PHY_INT_MSK BIT(2) #define LAN887X_INT_MSK_LINK_UP_MSK BIT(1) #define LAN887X_INT_MSK_LINK_DOWN_MSK BIT(0) @@ -319,6 +324,8 @@ struct lan887x_regwr_map { struct lan887x_priv { u64 stats[ARRAY_SIZE(lan887x_hw_stats)]; + struct mchp_rds_ptp_clock *clock; + bool init_done; }; static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank) @@ -1269,8 +1276,19 @@ static int lan887x_get_features(struct phy_device *phydev) static int lan887x_phy_init(struct phy_device *phydev) { + struct lan887x_priv *priv = phydev->priv; int ret; + if (!priv->init_done && phy_interrupt_is_valid(phydev)) { + priv->clock = mchp_rds_ptp_probe(phydev, MDIO_MMD_VEND1, + MCHP_RDS_PTP_LTC_BASE_ADDR, + MCHP_RDS_PTP_PORT_BASE_ADDR); + if (IS_ERR(priv->clock)) + return PTR_ERR(priv->clock); + + priv->init_done = true; + } + /* Clear loopback */ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG2, @@ -1470,6 +1488,7 @@ static int lan887x_probe(struct phy_device *phydev) if (!priv) return -ENOMEM; + priv->init_done = false; phydev->priv = priv; return lan887x_phy_setup(phydev); @@ -1518,6 +1537,7 @@ static void lan887x_get_strings(struct phy_device *phydev, u8 *data) static int lan887x_config_intr(struct phy_device *phydev) { + struct lan887x_priv *priv = phydev->priv; int rc; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { @@ -1537,12 +1557,24 @@ static int lan887x_config_intr(struct phy_device *phydev) rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS); } + if (rc < 0) + return rc; - return rc < 0 ? rc : 0; + if (phy_is_default_hwtstamp(phydev)) { + return mchp_rds_ptp_top_config_intr(priv->clock, + LAN887X_INT_MSK, + LAN887X_INT_MSK_P1588_MOD_INT_MSK, + (phydev->interrupts == + PHY_INTERRUPT_ENABLED)); + } + + return 0; } static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev) { + struct lan887x_priv *priv = phydev->priv; + int rc = IRQ_NONE; int irq_status; irq_status = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS); @@ -1553,10 +1585,13 @@ static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev) if (irq_status & LAN887X_MX_CHIP_TOP_LINK_MSK) { phy_trigger_machine(phydev); - return IRQ_HANDLED; + rc = IRQ_HANDLED; } - return IRQ_NONE; + if (irq_status & LAN887X_INT_MSK_P1588_MOD_INT_MSK) + rc = mchp_rds_ptp_handle_interrupt(priv->clock); + + return rc; } static int lan887x_cd_reset(struct phy_device *phydev, -- 2.51.0 From ddbb5ddc43ad000a984149db5af1133433938404 Mon Sep 17 00:00:00 2001 From: Rongwei Liu Date: Thu, 19 Dec 2024 19:58:31 +0200 Subject: [PATCH 09/16] net/mlx5: LAG, Refactor lag logic Wrap the lag pf access into two new macros: 1. ldev_for_each() 2. ldev_for_each_reverse() The maximum number of lag ports and the index to `natvie_port_num` mapping will be handled by the two new macros. Users shouldn't use the for loop anymore. Signed-off-by: Rongwei Liu Reviewed-by: Saeed Mahameed Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241219175841.1094544-2-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../ethernet/mellanox/mlx5/core/lag/debugfs.c | 13 +- .../net/ethernet/mellanox/mlx5/core/lag/lag.c | 181 +++++++++--------- .../net/ethernet/mellanox/mlx5/core/lag/lag.h | 14 +- .../net/ethernet/mellanox/mlx5/core/lag/mp.c | 24 ++- .../ethernet/mellanox/mlx5/core/lag/mpesw.c | 10 +- .../mellanox/mlx5/core/lag/port_sel.c | 16 +- 6 files changed, 137 insertions(+), 121 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c index f4b777d4e108..62b6faa4276a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c @@ -105,20 +105,20 @@ static int mapping_show(struct seq_file *file, void *priv) struct mlx5_lag *ldev; bool hash = false; bool lag_active; + int i, idx = 0; int num_ports; - int i; ldev = mlx5_lag_dev(dev); mutex_lock(&ldev->lock); lag_active = __mlx5_lag_is_active(ldev); if (lag_active) { if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) { - mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports, + mlx5_infer_tx_enabled(&ldev->tracker, ldev, ports, &num_ports); hash = true; } else { - for (i = 0; i < ldev->ports; i++) - ports[i] = ldev->v2p_map[i]; + mlx5_ldev_for_each(i, 0, ldev) + ports[idx++] = ldev->v2p_map[i]; num_ports = ldev->ports; } } @@ -144,11 +144,8 @@ static int members_show(struct seq_file *file, void *priv) ldev = mlx5_lag_dev(dev); mutex_lock(&ldev->lock); - for (i = 0; i < ldev->ports; i++) { - if (!ldev->pf[i].dev) - continue; + mlx5_ldev_for_each(i, 0, ldev) seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device)); - } mutex_unlock(&ldev->lock); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 7f68468c2e75..ed539ac4fef1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -43,10 +43,6 @@ #include "mp.h" #include "mpesw.h" -enum { - MLX5_LAG_EGRESS_PORT_1 = 1, - MLX5_LAG_EGRESS_PORT_2, -}; /* General purpose, use for short periods of time. * Beware of lock dependencies (preferably, no locks should be acquired @@ -72,7 +68,7 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev) int num_enabled; int idx; - mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports, + mlx5_infer_tx_enabled(&ldev->tracker, ldev, enabled_ports, &num_enabled); for (idx = 0; idx < num_enabled; idx++) active_port |= BIT_MASK(enabled_ports[idx]); @@ -113,7 +109,7 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode, return mlx5_cmd_exec_in(dev, create_lag, in); } -static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports, +static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev, u8 *ports) { u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {}; @@ -148,33 +144,31 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag); -static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports, +static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, struct mlx5_lag *ldev, u8 *ports, int *num_disabled) { int i; *num_disabled = 0; - for (i = 0; i < num_ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) if (!tracker->netdev_state[i].tx_enabled || !tracker->netdev_state[i].link_up) ports[(*num_disabled)++] = i; - } } -void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, +void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev, u8 *ports, int *num_enabled) { int i; *num_enabled = 0; - for (i = 0; i < num_ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) if (tracker->netdev_state[i].tx_enabled && tracker->netdev_state[i].link_up) ports[(*num_enabled)++] = i; - } if (*num_enabled == 0) - mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled); + mlx5_infer_tx_disabled(tracker, ldev, ports, num_enabled); } static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev, @@ -192,7 +186,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev, int j; if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) { - mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports, + mlx5_infer_tx_enabled(tracker, ldev, enabled_ports, &num_enabled); for (i = 0; i < num_enabled; i++) { err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1); @@ -203,7 +197,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev, buf[written - 2] = 0; mlx5_core_info(dev, "lag map active ports: %s\n", buf); } else { - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < ldev->buckets; j++) { idx = i * ldev->buckets + j; err = scnprintf(buf + written, 10, @@ -286,7 +280,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, { int i; - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (ldev->pf[i].netdev == ndev) return i; @@ -310,7 +304,7 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev) * with mapping that points to active ports. */ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, - u8 num_ports, + struct mlx5_lag *ldev, u8 buckets, u8 *ports) { @@ -323,7 +317,7 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, int i; int j; - for (i = 0; i < num_ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { if (tracker->netdev_state[i].tx_enabled && tracker->netdev_state[i].link_up) enabled[enabled_ports_num++] = i; @@ -334,15 +328,16 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, /* Use native mapping by default where each port's buckets * point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc */ - for (i = 0; i < num_ports; i++) + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < buckets; j++) { idx = i * buckets + j; - ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i; + ports[idx] = i + 1; } + } /* If all ports are disabled/enabled keep native mapping */ - if (enabled_ports_num == num_ports || - disabled_ports_num == num_ports) + if (enabled_ports_num == ldev->ports || + disabled_ports_num == ldev->ports) return; /* Go over the disabled ports and for each assign a random active port */ @@ -358,7 +353,7 @@ static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev) { int i; - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (ldev->pf[i].has_drop) return true; return false; @@ -368,7 +363,7 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev) { int i; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { if (!ldev->pf[i].has_drop) continue; @@ -396,7 +391,7 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev, if (!ldev->tracker.has_inactive) return; - mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled); + mlx5_infer_tx_disabled(tracker, ldev, disabled_ports, &num_disabled); for (i = 0; i < num_disabled; i++) { disabled_index = disabled_ports[i]; @@ -442,7 +437,7 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports) return mlx5_cmd_modify_active_port(dev0, active_ports); } - return mlx5_cmd_modify_lag(dev0, ldev->ports, ports); + return mlx5_cmd_modify_lag(dev0, ldev, ports); } static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev) @@ -458,7 +453,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev if (!ldev) goto unlock; - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (ldev->tracker.netdev_state[i].tx_enabled) ndev = ldev->pf[i].netdev; if (!ndev) @@ -483,9 +478,9 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, int i; int j; - mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports); + mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ports); - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < ldev->buckets; j++) { idx = i * ldev->buckets + j; if (ports[idx] == ldev->v2p_map[idx]) @@ -596,9 +591,9 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev) struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_eswitch *master_esw = dev0->priv.eswitch; int err; - int i; + int i, j; - for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 1, ldev) { struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch; err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw, @@ -608,9 +603,9 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev) } return 0; err: - for (; i > MLX5_LAG_P1; i--) + mlx5_ldev_for_each_reverse(j, i, 1, ldev) mlx5_eswitch_offloads_single_fdb_del_one(master_esw, - ldev->pf[i].dev->priv.eswitch); + ldev->pf[j].dev->priv.eswitch); return err; } @@ -671,7 +666,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, return err; if (mode != MLX5_LAG_MODE_MPESW) { - mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map); + mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ldev->v2p_map); if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) { err = mlx5_lag_port_sel_create(ldev, tracker->hash_type, ldev->v2p_map); @@ -722,7 +717,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev) mlx5_lag_mp_reset(ldev); if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) { - for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 1, ldev) mlx5_eswitch_offloads_single_fdb_del_one(master_esw, ldev->pf[i].dev->priv.eswitch); clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); @@ -766,7 +761,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) return false; #ifdef CONFIG_MLX5_ESWITCH - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { dev = ldev->pf[i].dev; if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev)) return false; @@ -774,17 +769,17 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) dev = ldev->pf[MLX5_LAG_P1].dev; mode = mlx5_eswitch_mode(dev); - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode) return false; #else - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (mlx5_sriov_is_enabled(ldev->pf[i].dev)) return false; #endif roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev); - for (i = 1; i < ldev->ports; i++) + mlx5_ldev_for_each(i, MLX5_LAG_P2, ldev) if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support) return false; @@ -795,10 +790,7 @@ void mlx5_lag_add_devices(struct mlx5_lag *ldev) { int i; - for (i = 0; i < ldev->ports; i++) { - if (!ldev->pf[i].dev) - continue; - + mlx5_ldev_for_each(i, 0, ldev) { if (ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV) continue; @@ -812,10 +804,7 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev) { int i; - for (i = 0; i < ldev->ports; i++) { - if (!ldev->pf[i].dev) - continue; - + mlx5_ldev_for_each(i, 0, ldev) { if (ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV) continue; @@ -842,7 +831,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); } - for (i = 1; i < ldev->ports; i++) + mlx5_ldev_for_each(i, MLX5_LAG_P2, ldev) mlx5_nic_vport_disable_roce(ldev->pf[i].dev); } @@ -854,7 +843,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) mlx5_lag_add_devices(ldev); if (shared_fdb) - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); } @@ -864,7 +853,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) struct mlx5_core_dev *dev; int i; - for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, MLX5_LAG_P1 + 1, ldev) { dev = ldev->pf[i].dev; if (is_mdev_switchdev_mode(dev) && mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) && @@ -892,11 +881,11 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev) bool roce_lag = true; int i; - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev); #ifdef CONFIG_MLX5_ESWITCH - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev); #endif @@ -956,7 +945,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) } else if (roce_lag) { dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - for (i = 1; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, MLX5_LAG_P2, ldev) { if (mlx5_get_roce_state(ldev->pf[i].dev)) mlx5_nic_vport_enable_roce(ldev->pf[i].dev); } @@ -966,7 +955,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); if (err) break; @@ -977,7 +966,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) mlx5_rescan_drivers_locked(dev0); mlx5_deactivate_lag(ldev); mlx5_lag_add_devices(ldev); - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); mlx5_core_err(dev0, "Failed to enable lag\n"); return; @@ -1010,12 +999,9 @@ struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev) int i; mutex_lock(&ldev->lock); - for (i = 0; i < ldev->ports; i++) { - if (ldev->pf[i].dev) { - devcom = ldev->pf[i].dev->priv.hca_devcom_comp; - break; - } - } + i = mlx5_get_next_ldev_func(ldev, 0); + if (i < MLX5_MAX_PORTS) + devcom = ldev->pf[i].dev->priv.hca_devcom_comp; mutex_unlock(&ldev->lock); return devcom; } @@ -1068,7 +1054,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, u8 bond_status = 0; int num_slaves = 0; int changed = 0; - int idx; + int i, idx = -1; if (!netif_is_lag_master(upper)) return 0; @@ -1083,8 +1069,13 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, */ rcu_read_lock(); for_each_netdev_in_bond_rcu(upper, ndev_tmp) { - idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp); - if (idx >= 0) { + mlx5_ldev_for_each(i, 0, ldev) { + if (ldev->pf[i].netdev == ndev_tmp) { + idx++; + break; + } + } + if (i < MLX5_MAX_PORTS) { slave = bond_slave_get_rcu(ndev_tmp); if (slave) has_inactive |= bond_is_slave_inactive(slave); @@ -1234,15 +1225,12 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, } static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev, - struct mlx5_core_dev *dev, - struct net_device *netdev) + struct mlx5_core_dev *dev, + struct net_device *netdev) { unsigned int fn = mlx5_get_dev_index(dev); unsigned long flags; - if (fn >= ldev->ports) - return; - spin_lock_irqsave(&lag_lock, flags); ldev->pf[fn].netdev = netdev; ldev->tracker.netdev_state[fn].link_up = 0; @@ -1257,7 +1245,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev, int i; spin_lock_irqsave(&lag_lock, flags); - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { if (ldev->pf[i].netdev == netdev) { ldev->pf[i].netdev = NULL; break; @@ -1267,13 +1255,10 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev, } static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev, - struct mlx5_core_dev *dev) + struct mlx5_core_dev *dev) { unsigned int fn = mlx5_get_dev_index(dev); - if (fn >= ldev->ports) - return; - ldev->pf[fn].dev = dev; dev->priv.lag = ldev; } @@ -1281,16 +1266,13 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev, static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, struct mlx5_core_dev *dev) { - int i; + int fn; - for (i = 0; i < ldev->ports; i++) - if (ldev->pf[i].dev == dev) - break; - - if (i == ldev->ports) + fn = mlx5_get_dev_index(dev); + if (ldev->pf[fn].dev != dev) return; - ldev->pf[i].dev = NULL; + ldev->pf[fn].dev = NULL; dev->priv.lag = NULL; } @@ -1406,7 +1388,6 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, mutex_lock(&ldev->lock); mlx5_ldev_add_netdev(ldev, dev, netdev); - for (i = 0; i < ldev->ports; i++) if (!ldev->pf[i].netdev) break; @@ -1417,6 +1398,26 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, mlx5_queue_bond_work(ldev, 0); } +int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx) +{ + int i; + + for (i = start_idx; i >= end_idx; i--) + if (ldev->pf[i].dev) + return i; + return -1; +} + +int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx) +{ + int i; + + for (i = start_idx; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev) + return i; + return MLX5_MAX_PORTS; +} + bool mlx5_lag_is_roce(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; @@ -1467,7 +1468,7 @@ bool mlx5_lag_is_master(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; unsigned long flags; - bool res; + bool res = false; spin_lock_irqsave(&lag_lock, flags); ldev = mlx5_lag_dev(dev); @@ -1555,7 +1556,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, if (!(ldev && __mlx5_lag_is_roce(ldev))) goto unlock; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { if (ldev->pf[i].netdev == slave) { port = i; break; @@ -1594,13 +1595,13 @@ struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int if (!ldev) goto unlock; - if (*i == ldev->ports) + if (*i == MLX5_MAX_PORTS) goto unlock; - for (idx = *i; idx < ldev->ports; idx++) + mlx5_ldev_for_each(idx, *i, ldev) if (ldev->pf[idx].dev != dev) break; - if (idx == ldev->ports) { + if (idx == MLX5_MAX_PORTS) { *i = idx; goto unlock; } @@ -1621,10 +1622,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, { int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out); struct mlx5_core_dev **mdev; + int ret = 0, i, j, idx = 0; struct mlx5_lag *ldev; unsigned long flags; int num_ports; - int ret, i, j; void *out; out = kvzalloc(outlen, GFP_KERNEL); @@ -1643,8 +1644,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ldev = mlx5_lag_dev(dev); if (ldev && __mlx5_lag_is_active(ldev)) { num_ports = ldev->ports; - for (i = 0; i < ldev->ports; i++) - mdev[i] = ldev->pf[i].dev; + mlx5_ldev_for_each(i, 0, ldev) + mdev[idx++] = ldev->pf[i].dev; } else { num_ports = 1; mdev[MLX5_LAG_P1] = dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 50fcb1eee574..1dada791815e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -103,7 +103,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, struct net_device *ndev); char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags); -void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, +void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev, u8 *ports, int *num_enabled); void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev); @@ -119,9 +119,21 @@ static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev) if (!MLX5_CAP_GEN(dev, vport_group_manager) || !MLX5_CAP_GEN(dev, lag_master) || MLX5_CAP_GEN(dev, num_lag_ports) < 2 || + mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS || MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) return false; return true; } +#define mlx5_ldev_for_each(i, start_index, ldev) \ + for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \ + i = tmp, tmp < MLX5_MAX_PORTS; tmp++) + +#define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev) \ + for (int tmp = start_index, tmp1 = end_index; \ + tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \ + i = tmp, tmp >= tmp1; tmp--) + +int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx); +int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx); #endif /* __MLX5_LAG_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c index b1aa494c76ba..40406d04adc9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c @@ -153,6 +153,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event, struct net_device *nh_dev0, *nh_dev1; struct fib_info *fi = fen_info->fi; struct lag_mp *mp = &ldev->lag_mp; + int i, dev_idx = 0; /* Handle delete event */ if (event == FIB_EVENT_ENTRY_DEL) { @@ -186,10 +187,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event, if (!nh_dev1) { if (__mlx5_lag_is_active(ldev)) { - int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0); - - i++; - mlx5_lag_set_port_affinity(ldev, i); + mlx5_ldev_for_each(i, 0, ldev) { + dev_idx++; + if (ldev->pf[i].netdev == nh_dev0) + break; + } + mlx5_lag_set_port_affinity(ldev, dev_idx); mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len); } @@ -214,6 +217,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, struct fib_info *fi) { struct lag_mp *mp = &ldev->lag_mp; + int i, dev_idx = 0; /* Check the nh event is related to the route */ if (!mp->fib.mfi || mp->fib.mfi != fi) @@ -221,11 +225,15 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, /* nh added/removed */ if (event == FIB_EVENT_NH_DEL) { - int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev); + mlx5_ldev_for_each(i, 0, ldev) { + if (ldev->pf[i].netdev == fib_nh->fib_nh_dev) + break; + dev_idx++; + } - if (i >= 0) { - i = (i + 1) % 2 + 1; /* peer port */ - mlx5_lag_set_port_affinity(ldev, i); + if (dev_idx >= 0) { + dev_idx = (dev_idx + 1) % 2 + 1; /* peer port */ + mlx5_lag_set_port_affinity(ldev, dev_idx); } } else if (event == FIB_EVENT_NH_ADD && fib_info_num_path(fi) == 2) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index 571ea26edd0c..1123c8afcf9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -15,7 +15,7 @@ static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev) u32 pf_metadata; int i; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { dev = ldev->pf[i].dev; esw = dev->priv.eswitch; pf_metadata = ldev->lag_mpesw.pf_metadata[i]; @@ -36,7 +36,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev) u32 pf_metadata; int i, err; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { dev = ldev->pf[i].dev; esw = dev->priv.eswitch; pf_metadata = mlx5_esw_match_metadata_alloc(esw); @@ -52,7 +52,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev) goto err_metadata; } - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { dev = ldev->pf[i].dev; mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW, (void *)0); @@ -98,7 +98,7 @@ static int enable_mpesw(struct mlx5_lag *ldev) dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); if (err) goto err_rescan_drivers; @@ -112,7 +112,7 @@ err_rescan_drivers: mlx5_deactivate_lag(ldev); err_add_devices: mlx5_lag_add_devices(ldev); - for (i = 0; i < ldev->ports; i++) + mlx5_ldev_for_each(i, 0, ldev) mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); mlx5_mpesw_metadata_cleanup(ldev); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index ab2717012b79..f98f0735fce0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -44,9 +44,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_namespace *ns; - int err, i; - int idx; - int j; + int err, i, j, k, idx; ft_attr.max_fte = ldev->ports * ldev->buckets; ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER; @@ -74,7 +72,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; flow_act.flags |= FLOW_ACT_NO_APPEND; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < ldev->buckets; j++) { u8 affinity; @@ -88,13 +86,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, &dest, 1); if (IS_ERR(lag_definer->rules[idx])) { err = PTR_ERR(lag_definer->rules[idx]); - do { + mlx5_ldev_for_each_reverse(k, i, 0, ldev) { while (j--) { - idx = i * ldev->buckets + j; + idx = k * ldev->buckets + j; mlx5_del_flow_rules(lag_definer->rules[idx]); } j = ldev->buckets; - } while (i--); + }; goto destroy_fg; } } @@ -346,7 +344,7 @@ static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev, int i; int j; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < ldev->buckets; j++) { idx = i * ldev->buckets + j; mlx5_del_flow_rules(lag_definer->rules[idx]); @@ -565,7 +563,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; - for (i = 0; i < ldev->ports; i++) { + mlx5_ldev_for_each(i, 0, ldev) { for (j = 0; j < ldev->buckets; j++) { idx = i * ldev->buckets + j; if (ldev->v2p_map[idx] == ports[idx]) -- 2.51.0 From 60d01cc468fdb0fbd6b878d66ef86f7e946b0669 Mon Sep 17 00:00:00 2001 From: Rongwei Liu Date: Thu, 19 Dec 2024 19:58:32 +0200 Subject: [PATCH 10/16] net/mlx5: LAG, Support LAG over Multi-Host NICs New multi-host NICs provide each host with partial ports, allowing each host to maintain its unique LAG configuration. On these multi-host NICs, the 'native_port_num' capability is no longer continuous on each host and can exceed the 'num_lag_ports' capability. Therefore, it is necessary to skip the PFs with ldev->pf[i].dev == NULL when querying/modifying the lag devices' information. There is no need to check dev.native_port_num against ldev->ports. Signed-off-by: Rongwei Liu Reviewed-by: Saeed Mahameed Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241219175841.1094544-3-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/lag/lag.c | 200 ++++++++++++++---- .../net/ethernet/mellanox/mlx5/core/lag/lag.h | 3 + .../net/ethernet/mellanox/mlx5/core/lag/mp.c | 53 +++-- .../ethernet/mellanox/mlx5/core/lag/mpesw.c | 6 +- .../mellanox/mlx5/core/lag/port_sel.c | 41 +++- .../net/ethernet/mellanox/mlx5/core/main.c | 4 + 6 files changed, 229 insertions(+), 78 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index ed539ac4fef1..cea5aa314f6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -76,23 +76,30 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev) return active_port; } -static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode, - unsigned long flags) +static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev, + int mode, unsigned long flags) { bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, &flags); int port_sel_mode = get_port_sel_mode(mode, flags); u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {}; + u8 *ports = ldev->v2p_map; + int idx0, idx1; void *lag_ctx; lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG); MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode); + idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0); + idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1); + + if (idx0 < 0 || idx1 < 0) + return -EINVAL; switch (port_sel_mode) { case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]); break; case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT: if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass)) @@ -114,12 +121,18 @@ static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev, { u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {}; void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx); + int idx0, idx1; + + idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0); + idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1); + if (idx0 < 0 || idx1 < 0) + return -EINVAL; MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG); MLX5_SET(modify_lag_in, in, field_select, 0x1); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]); return mlx5_cmd_exec_in(dev, modify_lag, in); } @@ -287,6 +300,48 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, return -ENOENT; } +int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq) +{ + int i, num = 0; + + if (!ldev) + return -ENOENT; + + mlx5_ldev_for_each(i, 0, ldev) { + if (num == seq) + return i; + num++; + } + return -ENOENT; +} + +int mlx5_lag_num_devs(struct mlx5_lag *ldev) +{ + int i, num = 0; + + if (!ldev) + return 0; + + mlx5_ldev_for_each(i, 0, ldev) { + (void)i; + num++; + } + return num; +} + +int mlx5_lag_num_netdevs(struct mlx5_lag *ldev) +{ + int i, num = 0; + + if (!ldev) + return 0; + + mlx5_ldev_for_each(i, 0, ldev) + if (ldev->pf[i].netdev) + num++; + return num; +} + static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev) { return ldev->mode == MLX5_LAG_MODE_ROCE; @@ -423,10 +478,15 @@ static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports) static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_core_dev *dev0; u8 active_ports; int ret; + if (idx < 0) + return -EINVAL; + + dev0 = ldev->pf[idx].dev; if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) { ret = mlx5_lag_port_sel_modify(ldev, ports); if (ret || @@ -445,7 +505,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev struct net_device *ndev = NULL; struct mlx5_lag *ldev; unsigned long flags; - int i; + int i, last_idx; spin_lock_irqsave(&lag_lock, flags); ldev = mlx5_lag_dev(dev); @@ -456,8 +516,12 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev mlx5_ldev_for_each(i, 0, ldev) if (ldev->tracker.netdev_state[i].tx_enabled) ndev = ldev->pf[i].netdev; - if (!ndev) - ndev = ldev->pf[ldev->ports - 1].netdev; + if (!ndev) { + last_idx = mlx5_lag_get_dev_index_by_seq(ldev, ldev->ports - 1); + if (last_idx < 0) + goto unlock; + ndev = ldev->pf[last_idx].netdev; + } if (ndev) dev_hold(ndev); @@ -471,13 +535,18 @@ unlock: void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker) { + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {}; - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev0; int idx; int err; int i; int j; + if (first_idx < 0) + return; + + dev0 = ldev->pf[first_idx].dev; mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ports); mlx5_ldev_for_each(i, 0, ldev) { @@ -518,8 +587,13 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, unsigned long *flags) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_core_dev *dev0; + if (first_idx < 0) + return -EINVAL; + + dev0 = ldev->pf[first_idx].dev; if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) { if (ldev->ports > 2) return -EINVAL; @@ -539,11 +613,13 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, unsigned long *flags) { - struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1]; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct lag_func *dev0; - if (mode == MLX5_LAG_MODE_MPESW) + if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW) return; + dev0 = &ldev->pf[first_idx]; if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) { if (ldev->ports > 2) @@ -588,12 +664,18 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags) static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_eswitch *master_esw = dev0->priv.eswitch; - int err; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_eswitch *master_esw; + struct mlx5_core_dev *dev0; int i, j; + int err; - mlx5_ldev_for_each(i, 1, ldev) { + if (first_idx < 0) + return -EINVAL; + + dev0 = ldev->pf[first_idx].dev; + master_esw = dev0->priv.eswitch; + mlx5_ldev_for_each(i, first_idx + 1, ldev) { struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch; err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw, @@ -603,7 +685,7 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev) } return 0; err: - mlx5_ldev_for_each_reverse(j, i, 1, ldev) + mlx5_ldev_for_each_reverse(j, i, first_idx + 1, ldev) mlx5_eswitch_offloads_single_fdb_del_one(master_esw, ldev->pf[j].dev->priv.eswitch); return err; @@ -615,16 +697,21 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, unsigned long flags) { bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; + struct mlx5_core_dev *dev0; int err; + if (first_idx < 0) + return -EINVAL; + + dev0 = ldev->pf[first_idx].dev; if (tracker) mlx5_lag_print_mapping(dev0, ldev, tracker, flags); mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n", shared_fdb, mlx5_get_str_port_sel_mode(mode, flags)); - err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags); + err = mlx5_cmd_create_lag(dev0, ldev, mode, flags); if (err) { mlx5_core_err(dev0, "Failed to create LAG (%d)\n", @@ -656,11 +743,16 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, bool shared_fdb) { + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); bool roce_lag = mode == MLX5_LAG_MODE_ROCE; - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev0; unsigned long flags = 0; int err; + if (first_idx < 0) + return -EINVAL; + + dev0 = ldev->pf[first_idx].dev; err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags); if (err) return err; @@ -704,20 +796,26 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, int mlx5_deactivate_lag(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_eswitch *master_esw = dev0->priv.eswitch; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; bool roce_lag = __mlx5_lag_is_roce(ldev); unsigned long flags = ldev->mode_flags; + struct mlx5_eswitch *master_esw; + struct mlx5_core_dev *dev0; int err; int i; + if (first_idx < 0) + return -EINVAL; + + dev0 = ldev->pf[first_idx].dev; + master_esw = dev0->priv.eswitch; ldev->mode = MLX5_LAG_MODE_NONE; ldev->mode_flags = 0; mlx5_lag_mp_reset(ldev); if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) { - mlx5_ldev_for_each(i, 1, ldev) + mlx5_ldev_for_each(i, first_idx + 1, ldev) mlx5_eswitch_offloads_single_fdb_del_one(master_esw, ldev->pf[i].dev->priv.eswitch); clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); @@ -749,6 +847,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev) bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) { + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); #ifdef CONFIG_MLX5_ESWITCH struct mlx5_core_dev *dev; u8 mode; @@ -756,9 +855,8 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) bool roce_support; int i; - for (i = 0; i < ldev->ports; i++) - if (!ldev->pf[i].dev) - return false; + if (first_idx < 0 || mlx5_lag_num_devs(ldev) != ldev->ports) + return false; #ifdef CONFIG_MLX5_ESWITCH mlx5_ldev_for_each(i, 0, ldev) { @@ -767,7 +865,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) return false; } - dev = ldev->pf[MLX5_LAG_P1].dev; + dev = ldev->pf[first_idx].dev; mode = mlx5_eswitch_mode(dev); mlx5_ldev_for_each(i, 0, ldev) if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode) @@ -778,8 +876,8 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) if (mlx5_sriov_is_enabled(ldev->pf[i].dev)) return false; #endif - roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev); - mlx5_ldev_for_each(i, MLX5_LAG_P2, ldev) + roce_support = mlx5_get_roce_state(ldev->pf[first_idx].dev); + mlx5_ldev_for_each(i, first_idx + 1, ldev) if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support) return false; @@ -817,11 +915,16 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev) void mlx5_disable_lag(struct mlx5_lag *ldev) { bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_core_dev *dev0; bool roce_lag; int err; int i; + if (idx < 0) + return; + + dev0 = ldev->pf[idx].dev; roce_lag = __mlx5_lag_is_roce(ldev); if (shared_fdb) { @@ -831,7 +934,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); } - mlx5_ldev_for_each(i, MLX5_LAG_P2, ldev) + mlx5_ldev_for_each(i, idx + 1, ldev) mlx5_nic_vport_disable_roce(ldev->pf[i].dev); } @@ -850,10 +953,14 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) { + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_core_dev *dev; int i; - mlx5_ldev_for_each(i, MLX5_LAG_P1 + 1, ldev) { + if (idx < 0) + return false; + + mlx5_ldev_for_each(i, idx + 1, ldev) { dev = ldev->pf[i].dev; if (is_mdev_switchdev_mode(dev) && mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) && @@ -865,7 +972,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) return false; } - dev = ldev->pf[MLX5_LAG_P1].dev; + dev = ldev->pf[idx].dev; if (is_mdev_switchdev_mode(dev) && mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) && mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) && @@ -906,13 +1013,18 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond) static void mlx5_do_bond(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct lag_tracker tracker = { }; + struct mlx5_core_dev *dev0; struct net_device *ndev; bool do_bond, roce_lag; int err; int i; + if (idx < 0) + return; + + dev0 = ldev->pf[idx].dev; if (!mlx5_lag_is_ready(ldev)) { do_bond = false; } else { @@ -945,7 +1057,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) } else if (roce_lag) { dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - mlx5_ldev_for_each(i, MLX5_LAG_P2, ldev) { + mlx5_ldev_for_each(i, idx + 1, ldev) { if (mlx5_get_roce_state(ldev->pf[i].dev)) mlx5_nic_vport_enable_roce(ldev->pf[i].dev); } @@ -1380,7 +1492,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev) { struct mlx5_lag *ldev; - int i; + int num = 0; ldev = mlx5_lag_dev(dev); if (!ldev) @@ -1388,11 +1500,8 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, mutex_lock(&ldev->lock); mlx5_ldev_add_netdev(ldev, dev, netdev); - for (i = 0; i < ldev->ports; i++) - if (!ldev->pf[i].netdev) - break; - - if (i >= ldev->ports) + num = mlx5_lag_num_netdevs(ldev); + if (num >= ldev->ports) set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags); mutex_unlock(&ldev->lock); mlx5_queue_bond_work(ldev, 0); @@ -1469,11 +1578,12 @@ bool mlx5_lag_is_master(struct mlx5_core_dev *dev) struct mlx5_lag *ldev; unsigned long flags; bool res = false; + int idx; spin_lock_irqsave(&lag_lock, flags); ldev = mlx5_lag_dev(dev); - res = ldev && __mlx5_lag_is_active(ldev) && - dev == ldev->pf[MLX5_LAG_P1].dev; + idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + res = ldev && __mlx5_lag_is_active(ldev) && idx >= 0 && dev == ldev->pf[idx].dev; spin_unlock_irqrestore(&lag_lock, flags); return res; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 1dada791815e..01cf72366947 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -136,4 +136,7 @@ static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev) int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx); int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx); +int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq); +int mlx5_lag_num_devs(struct mlx5_lag *ldev); +int mlx5_lag_num_netdevs(struct mlx5_lag *ldev); #endif /* __MLX5_LAG_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c index 40406d04adc9..aee17fcf3b36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c @@ -17,7 +17,10 @@ static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev) #define MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS 2 static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev) { - if (!mlx5_lag_is_ready(ldev)) + int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2); + + if (idx0 < 0 || idx1 < 0 || !mlx5_lag_is_ready(ldev)) return false; if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev)) @@ -26,8 +29,8 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev) if (ldev->ports > MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS) return false; - return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev, - ldev->pf[MLX5_LAG_P2].dev); + return mlx5_esw_multipath_prereq(ldev->pf[idx0].dev, + ldev->pf[idx1].dev); } bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) @@ -50,43 +53,45 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, enum mlx5_lag_port_affinity port) { + int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2); struct lag_tracker tracker = {}; - if (!__mlx5_lag_is_multipath(ldev)) + if (idx0 < 0 || idx1 < 0 || !__mlx5_lag_is_multipath(ldev)) return; switch (port) { case MLX5_LAG_NORMAL_AFFINITY: - tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true; - tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true; - tracker.netdev_state[MLX5_LAG_P1].link_up = true; - tracker.netdev_state[MLX5_LAG_P2].link_up = true; + tracker.netdev_state[idx0].tx_enabled = true; + tracker.netdev_state[idx1].tx_enabled = true; + tracker.netdev_state[idx0].link_up = true; + tracker.netdev_state[idx1].link_up = true; break; case MLX5_LAG_P1_AFFINITY: - tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true; - tracker.netdev_state[MLX5_LAG_P1].link_up = true; - tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false; - tracker.netdev_state[MLX5_LAG_P2].link_up = false; + tracker.netdev_state[idx0].tx_enabled = true; + tracker.netdev_state[idx0].link_up = true; + tracker.netdev_state[idx1].tx_enabled = false; + tracker.netdev_state[idx1].link_up = false; break; case MLX5_LAG_P2_AFFINITY: - tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false; - tracker.netdev_state[MLX5_LAG_P1].link_up = false; - tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true; - tracker.netdev_state[MLX5_LAG_P2].link_up = true; + tracker.netdev_state[idx0].tx_enabled = false; + tracker.netdev_state[idx0].link_up = false; + tracker.netdev_state[idx1].tx_enabled = true; + tracker.netdev_state[idx1].link_up = true; break; default: - mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev, + mlx5_core_warn(ldev->pf[idx0].dev, "Invalid affinity port %d", port); return; } - if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled) - mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events, + if (tracker.netdev_state[idx0].tx_enabled) + mlx5_notifier_call_chain(ldev->pf[idx0].dev->priv.events, MLX5_DEV_EVENT_PORT_AFFINITY, (void *)0); - if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled) - mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events, + if (tracker.netdev_state[idx1].tx_enabled) + mlx5_notifier_call_chain(ldev->pf[idx1].dev->priv.events, MLX5_DEV_EVENT_PORT_AFFINITY, (void *)0); @@ -150,11 +155,15 @@ mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev, static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event, struct fib_entry_notifier_info *fen_info) { + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct net_device *nh_dev0, *nh_dev1; struct fib_info *fi = fen_info->fi; struct lag_mp *mp = &ldev->lag_mp; int i, dev_idx = 0; + if (idx < 0) + return; + /* Handle delete event */ if (event == FIB_EVENT_ENTRY_DEL) { /* stop track */ @@ -180,7 +189,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event, } if (nh_dev0 == nh_dev1) { - mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev, + mlx5_core_warn(ldev->pf[idx].dev, "Multipath offload doesn't support routes with multiple nexthops of the same device"); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index 1123c8afcf9e..ffac0bd6c895 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -68,13 +68,15 @@ err_metadata: #define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4 static int enable_mpesw(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_core_dev *dev0; int err; int i; - if (ldev->mode != MLX5_LAG_MODE_NONE) + if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE) return -EINVAL; + dev0 = ldev->pf[idx].dev; if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index f98f0735fce0..22241f52716c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -39,13 +39,18 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, struct mlx5_lag_definer *lag_definer, u8 *ports) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_namespace *ns; + struct mlx5_core_dev *dev; int err, i, j, k, idx; + if (first_idx < 0) + return -EINVAL; + + dev = ldev->pf[first_idx].dev; ft_attr.max_fte = ldev->ports * ldev->buckets; ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER; @@ -293,11 +298,16 @@ static struct mlx5_lag_definer * mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash, enum mlx5_traffic_types tt, bool tunnel, u8 *ports) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_lag_definer *lag_definer; + struct mlx5_core_dev *dev; u32 *match_definer_mask; int format_id, err; + if (first_idx < 0) + return ERR_PTR(-EINVAL); + + dev = ldev->pf[first_idx].dev; lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL); if (!lag_definer) return ERR_PTR(-ENOMEM); @@ -339,12 +349,15 @@ free_lag_definer: static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev, struct mlx5_lag_definer *lag_definer) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; - int idx; - int i; - int j; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); + struct mlx5_core_dev *dev; + int idx, i, j; - mlx5_ldev_for_each(i, 0, ldev) { + if (first_idx < 0) + return; + + dev = ldev->pf[first_idx].dev; + mlx5_ldev_for_each(i, first_idx, ldev) { for (j = 0; j < ldev->buckets; j++) { idx = i * ldev->buckets + j; mlx5_del_flow_rules(lag_definer->rules[idx]); @@ -499,10 +512,15 @@ static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev, static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct ttc_params ttc_params = {}; + struct mlx5_core_dev *dev; + if (first_idx < 0) + return -EINVAL; + + dev = ldev->pf[first_idx].dev; mlx5_lag_set_outer_ttc_params(ldev, &ttc_params); port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params); return PTR_ERR_OR_ZERO(port_sel->outer.ttc); @@ -510,10 +528,15 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev) static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct ttc_params ttc_params = {}; + struct mlx5_core_dev *dev; + + if (first_idx < 0) + return -EINVAL; + dev = ldev->pf[first_idx].dev; mlx5_lag_set_inner_ttc_params(ldev, &ttc_params); port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params); return PTR_ERR_OR_ZERO(port_sel->inner.ttc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 220a9ac75c8b..869bfecdd8ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -664,6 +664,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list, ilog2(max_uc_list)); + /* enable absolute native port num */ + if (MLX5_CAP_GEN_MAX(dev, abs_native_port_num)) + MLX5_SET(cmd_hca_cap, set_hca_cap, abs_native_port_num, 1); + return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); } -- 2.51.0 From 95f68e06b41b9e88291796efa3969409d13fdd4c Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Thu, 19 Dec 2024 19:58:33 +0200 Subject: [PATCH 11/16] net/mlx5: fs, add counter object to flow destination Currently mlx5_flow_destination includes counter_id which is assigned in case we use flow counter on the flow steering rule. However, counter_id is not enough data in case of using HW Steering. Thus, have mlx5_fc object as part of mlx5_flow_destination instead of counter_id and assign it where needed. In case counter_id is received from user space, create a local counter object to represent it. Signed-off-by: Moshe Shemesh Reviewed-by: Yevgeny Kliteynik Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241219175841.1094544-4-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/infiniband/hw/mlx5/fs.c | 37 +++++++++---- .../mellanox/mlx5/core/diag/fs_tracepoint.h | 2 +- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 20 +++---- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +- .../mellanox/mlx5/core/esw/acl/egress_lgcy.c | 2 +- .../mellanox/mlx5/core/esw/acl/ingress_lgcy.c | 2 +- .../ethernet/mellanox/mlx5/core/esw/bridge.c | 20 +++---- .../mellanox/mlx5/core/eswitch_offloads.c | 2 +- .../net/ethernet/mellanox/mlx5/core/fs_cmd.c | 2 +- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 1 + .../ethernet/mellanox/mlx5/core/fs_counters.c | 53 +++++++++++++++++++ .../mellanox/mlx5/core/lib/macsec_fs.c | 8 +-- .../mellanox/mlx5/core/steering/sws/fs_dr.c | 2 +- drivers/vdpa/mlx5/net/mlx5_vnet.c | 4 +- include/linux/mlx5/fs.h | 4 +- 15 files changed, 117 insertions(+), 44 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 520034acf73a..162814ae8cb4 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -943,7 +943,7 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num, } dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dst.counter_id = mlx5_fc_id(opfc->fc); + dst.counter = opfc->fc; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW; @@ -1113,8 +1113,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, handler->ibcounters = flow_act.counters; dest_arr[dest_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest_arr[dest_num].counter_id = - mlx5_fc_id(mcounters->hw_cntrs_hndl); + dest_arr[dest_num].counter = + mcounters->hw_cntrs_hndl; dest_num++; } @@ -1603,7 +1603,7 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher, static struct mlx5_ib_flow_handler *raw_fs_rule_add( struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act, - u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type) + struct mlx5_fc *counter, void *cmd_in, int inlen, int dest_id, int dest_type) { struct mlx5_flow_destination *dst; struct mlx5_ib_flow_prio *ft_prio; @@ -1652,8 +1652,12 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add( } if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + if (WARN_ON(!counter)) { + err = -EINVAL; + goto unlock; + } dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dst[dst_num].counter_id = counter_id; + dst[dst_num].counter = counter; dst_num++; } @@ -1878,7 +1882,8 @@ static int get_dests(struct uverbs_attr_bundle *attrs, return 0; } -static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id) +static bool +is_flow_counter(void *obj, u32 offset, u32 *counter_id, u32 *fc_bulk_size) { struct devx_obj *devx_obj = obj; u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); @@ -1888,6 +1893,7 @@ static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id) if (offset && offset >= devx_obj->flow_counter_bulk_size) return false; + *fc_bulk_size = devx_obj->flow_counter_bulk_size; *counter_id = MLX5_GET(dealloc_flow_counter_in, devx_obj->dinbox, flow_counter_id); @@ -1904,13 +1910,13 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( { struct mlx5_flow_context flow_context = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG}; - u32 *offset_attr, offset = 0, counter_id = 0; int dest_id, dest_type = -1, inlen, len, ret, i; struct mlx5_ib_flow_handler *flow_handler; struct mlx5_ib_flow_matcher *fs_matcher; struct ib_uobject **arr_flow_actions; struct ib_uflow_resources *uflow_res; struct mlx5_flow_act flow_act = {}; + struct mlx5_fc *counter = NULL; struct ib_qp *qp = NULL; void *devx_obj, *cmd_in; struct ib_uobject *uobj; @@ -1937,6 +1943,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( len = uverbs_attr_get_uobjs_arr(attrs, MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions); if (len) { + u32 *offset_attr, fc_bulk_size, offset = 0, counter_id = 0; devx_obj = arr_flow_actions[0]->object; if (uverbs_attr_is_valid(attrs, @@ -1956,8 +1963,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( offset = *offset_attr; } - if (!is_flow_counter(devx_obj, offset, &counter_id)) + if (!is_flow_counter(devx_obj, offset, &counter_id, &fc_bulk_size)) return -EINVAL; + counter = mlx5_fc_local_create(counter_id, offset, fc_bulk_size); + if (IS_ERR(counter)) + return PTR_ERR(counter); flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; } @@ -1968,8 +1978,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE); uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS); - if (!uflow_res) - return -ENOMEM; + if (!uflow_res) { + ret = -ENOMEM; + goto destroy_counter; + } len = uverbs_attr_get_uobjs_arr(attrs, MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions); @@ -1996,7 +2008,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( flow_handler = raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act, - counter_id, cmd_in, inlen, dest_id, dest_type); + counter, cmd_in, inlen, dest_id, dest_type); if (IS_ERR(flow_handler)) { ret = PTR_ERR(flow_handler); goto err_out; @@ -2007,6 +2019,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( return 0; err_out: ib_uverbs_flow_resources_free(uflow_res); +destroy_counter: + if (counter) + mlx5_fc_local_destroy(counter); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index 9aed29fa4900..d6e736c1fb24 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -292,7 +292,7 @@ TRACE_EVENT(mlx5_fs_add_rule, if (rule->dest_attr.type & MLX5_FLOW_DESTINATION_TYPE_COUNTER) __entry->counter_id = - rule->dest_attr.counter_id; + mlx5_fc_id(rule->dest_attr.counter); ), TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n", __entry->rule, __entry->fte, __entry->index, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index e51b03d4c717..687bd95d2c3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -194,7 +194,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.flags = FLOW_ACT_NO_APPEND; dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(flow_counter); + dest.counter = flow_counter; if (rx == ipsec->rx_esw) spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; @@ -223,7 +223,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry, } sa_entry->ipsec_rule.trailer.fc = flow_counter; - dest.counter_id = mlx5_fc_id(flow_counter); + dest.counter = flow_counter; MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2); rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { @@ -275,7 +275,7 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.flags = FLOW_ACT_NO_APPEND; dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(flow_counter); + dest.counter = flow_counter; if (rx == ipsec->rx_esw) spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; @@ -348,7 +348,7 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(flow_counter); + dest.counter = flow_counter; if (rx == ipsec->rx_esw) spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); @@ -686,7 +686,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, rx->ft.status = ft; dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[1].counter_id = mlx5_fc_id(rx->fc->cnt); + dest[1].counter = rx->fc->cnt; err = mlx5_ipsec_rx_status_create(ipsec, rx, dest); if (err) goto err_add; @@ -873,7 +873,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT; dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(tx->fc->cnt); + dest.counter = tx->fc->cnt; fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1); if (IS_ERR(fte)) { err = PTR_ERR(fte); @@ -1649,7 +1649,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[0].ft = rx->ft.status; dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[1].counter_id = mlx5_fc_id(counter); + dest[1].counter = counter; rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -1762,7 +1762,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) dest[0].ft = tx->ft.status; dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[1].counter_id = mlx5_fc_id(counter); + dest[1].counter = counter; rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -1835,7 +1835,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop); + dest[dstn].counter = tx->fc->drop; dstn++; break; default: @@ -1913,7 +1913,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) case XFRM_POLICY_BLOCK: flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop); + dest[dstn].counter = rx->fc->drop; dstn++; break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 6b3b1afe8312..9ba99609999f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1282,7 +1282,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[dest_ix].counter_id = mlx5_fc_id(attr->counter); + dest[dest_ix].counter = attr->counter; dest_ix++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c index 6b4c9ffad95b..7dd1dc3f77c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c @@ -135,7 +135,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, if (drop_counter) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter); + drop_ctr_dst.counter = drop_counter; dst = &drop_ctr_dst; dest_num++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index 093ed86a0acd..1c37098e09ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -260,7 +260,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, if (counter) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - drop_ctr_dst.counter_id = mlx5_fc_id(counter); + drop_ctr_dst.counter = counter; dst = &drop_ctr_dst; dest_num++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index c5ea1d1d2b03..5f647358a05c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -570,7 +570,8 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge) static struct mlx5_flow_handle * mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr, - struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge_vlan *vlan, + struct mlx5_fc *counter, struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw) { @@ -628,7 +629,7 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dests[0].ft = bridge->egress_ft; dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dests[1].counter_id = counter_id; + dests[1].counter = counter; handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests, ARRAY_SIZE(dests)); @@ -639,17 +640,19 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char static struct mlx5_flow_handle * mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, - struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge_vlan *vlan, + struct mlx5_fc *counter, struct mlx5_esw_bridge *bridge) { - return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, + return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter, bridge, bridge->br_offloads->esw); } static struct mlx5_flow_handle * mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr, - struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge_vlan *vlan, + struct mlx5_fc *counter, struct mlx5_esw_bridge *bridge) { struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos; @@ -671,7 +674,7 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id, goto out; } - handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, + handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter, bridge, peer_esw); out: @@ -1385,10 +1388,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow handle = peer ? mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id, - addr, vlan, mlx5_fc_id(counter), - bridge) : + addr, vlan, counter, bridge) : mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, - mlx5_fc_id(counter), bridge); + counter, bridge); if (IS_ERR(handle)) { err = PTR_ERR(handle); esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d5b42b3a19fd..8636f0485800 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -724,7 +724,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[i].counter_id = mlx5_fc_id(attr->counter); + dest[i].counter = attr->counter; i++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 676005854dad..6bf0aade69d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -718,7 +718,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, continue; MLX5_SET(flow_counter_list, in_dests, flow_counter_id, - dst->dest_attr.counter_id); + mlx5_fc_id(dst->dest_attr.counter)); in_dests += dst_cnt_size; list_size++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 2eabfcc247c6..f781f8f169b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -658,6 +658,7 @@ static void del_sw_hw_rule(struct fs_node *node) BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; + mlx5_fc_local_destroy(rule->dest_attr.counter); goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 62d0c689796b..7d56deaa4609 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -43,6 +43,11 @@ #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) #define MLX5_FC_POOL_USED_BUFF_RATIO 10 +enum mlx5_fc_type { + MLX5_FC_TYPE_ACQUIRED = 0, + MLX5_FC_TYPE_LOCAL, +}; + struct mlx5_fc_cache { u64 packets; u64 bytes; @@ -52,6 +57,7 @@ struct mlx5_fc_cache { struct mlx5_fc { u32 id; bool aging; + enum mlx5_fc_type type; struct mlx5_fc_bulk *bulk; struct mlx5_fc_cache cache; /* last{packets,bytes} are used for calculating deltas since last reading. */ @@ -186,6 +192,9 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter) { struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats; + if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL)) + return; + if (counter->bulk) mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter); else @@ -536,6 +545,50 @@ static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc return 0; } +/** + * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which + * was already acquired using its counter id and bulk data. + * + * @counter_id: counter acquired counter id + * @offset: counter offset from bulk base + * @bulk_size: counter's bulk size as was allocated + * + * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise. + */ +struct mlx5_fc * +mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size) +{ + struct mlx5_fc_bulk *fc_bulk; + struct mlx5_fc *counter; + + counter = kzalloc(sizeof(*counter), GFP_KERNEL); + if (!counter) + return ERR_PTR(-ENOMEM); + fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL); + if (!fc_bulk) { + kfree(counter); + return ERR_PTR(-ENOMEM); + } + + counter->type = MLX5_FC_TYPE_LOCAL; + counter->id = counter_id; + fc_bulk->base_id = counter_id - offset; + fc_bulk->bulk_len = bulk_size; + counter->bulk = fc_bulk; + return counter; +} +EXPORT_SYMBOL(mlx5_fc_local_create); + +void mlx5_fc_local_destroy(struct mlx5_fc *counter) +{ + if (!counter || counter->type != MLX5_FC_TYPE_LOCAL) + return; + + kfree(counter->bulk); + kfree(counter); +} +EXPORT_SYMBOL(mlx5_fc_local_destroy); + /* Flow counters pool API */ static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c index 4a078113e292..762d55ba9e51 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c @@ -497,7 +497,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs) memset(&dest, 0, sizeof(struct mlx5_flow_destination)); memset(&flow_act, 0, sizeof(flow_act)); dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter); + dest.counter = tx_tables->check_miss_rule_counter; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { @@ -519,7 +519,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs) flow_act.flags = FLOW_ACT_NO_APPEND; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT; dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter); + dest.counter = tx_tables->check_rule_counter; rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -1200,7 +1200,7 @@ static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | MLX5_FLOW_CONTEXT_ACTION_COUNT; roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter); + roce_dest[dstn].counter = rx_tables->check_rule_counter; rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1); if (IS_ERR(rule)) { @@ -1592,7 +1592,7 @@ static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs) memset(&flow_act, 0, sizeof(flow_act)); dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter); + dest.counter = rx_tables->check_miss_rule_counter; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c index 4b349d4005e4..8007d3f523c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c @@ -521,7 +521,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, goto free_actions; } - id = dst->dest_attr.counter_id; + id = mlx5_fc_id(dst->dest_attr.counter); tmp_action = mlx5dr_action_create_flow_counter(id); if (!tmp_action) { diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 5f581e71e201..36099047560d 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1952,7 +1952,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac, goto out_free; #if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG) - dests[1].counter_id = mlx5_fc_id(node->ucast_counter.counter); + dests[1].counter = node->ucast_counter.counter; #endif node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); if (IS_ERR(node->ucast_rule)) { @@ -1961,7 +1961,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac, } #if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG) - dests[1].counter_id = mlx5_fc_id(node->mcast_counter.counter); + dests[1].counter = node->mcast_counter.counter; #endif memset(dmac_c, 0, ETH_ALEN); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 438db888bde0..2a69d9d71276 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -163,7 +163,7 @@ struct mlx5_flow_destination { u32 tir_num; u32 ft_num; struct mlx5_flow_table *ft; - u32 counter_id; + struct mlx5_fc *counter; struct { u16 num; u16 vhca_id; @@ -299,6 +299,8 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); +struct mlx5_fc *mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size); +void mlx5_fc_local_destroy(struct mlx5_fc *counter); u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse); -- 2.51.0 From 31d1356b8fdcdb7fe845874b598cce552a151c64 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Thu, 19 Dec 2024 19:58:34 +0200 Subject: [PATCH 12/16] net/mlx5: fs, add mlx5_fs_pool API Refactor fc_pool API to create generic fs_pool API, as HW steering has more flow steering elements which can take advantage of the same pool of bulks API. Change fs_counters code to use the fs_pool API. Note, removed __counted_by from struct mlx5_fc_bulk as bulk_len is now inner struct member. It will be added back once __counted_by can support inner struct members. Signed-off-by: Moshe Shemesh Reviewed-by: Yevgeny Kliteynik Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241219175841.1094544-5-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../ethernet/mellanox/mlx5/core/fs_counters.c | 286 +++++------------- .../net/ethernet/mellanox/mlx5/core/fs_pool.c | 194 ++++++++++++ .../net/ethernet/mellanox/mlx5/core/fs_pool.h | 54 ++++ 4 files changed, 327 insertions(+), 209 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index be3d0876c521..79fe09de0a9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \ - fw_reset.o qos.o lib/tout.o lib/aso.o wc.o + fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o # # Netdev basic diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 7d56deaa4609..d8e1c4ebd364 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -34,6 +34,7 @@ #include #include "mlx5_core.h" #include "fs_core.h" +#include "fs_pool.h" #include "fs_cmd.h" #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) @@ -65,17 +66,6 @@ struct mlx5_fc { u64 lastbytes; }; -struct mlx5_fc_pool { - struct mlx5_core_dev *dev; - struct mutex pool_lock; /* protects pool lists */ - struct list_head fully_used; - struct list_head partially_used; - struct list_head unused; - int available_fcs; - int used_fcs; - int threshold; -}; - struct mlx5_fc_stats { struct xarray counters; @@ -86,13 +76,13 @@ struct mlx5_fc_stats { int bulk_query_len; bool bulk_query_alloc_failed; unsigned long next_bulk_query_alloc; - struct mlx5_fc_pool fc_pool; + struct mlx5_fs_pool fc_pool; }; -static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev); -static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool); -static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool); -static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc); +static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev); +static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool); +static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool); +static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc); static int get_init_bulk_query_len(struct mlx5_core_dev *dev) { @@ -447,11 +437,9 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, /* Flow counter bluks */ struct mlx5_fc_bulk { - struct list_head pool_list; + struct mlx5_fs_bulk fs_bulk; u32 base_id; - int bulk_len; - unsigned long *bitmask; - struct mlx5_fc fcs[] __counted_by(bulk_len); + struct mlx5_fc fcs[]; }; static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, @@ -461,16 +449,10 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, counter->id = id; } -static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk) -{ - return bitmap_weight(bulk->bitmask, bulk->bulk_len); -} - -static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) +static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) { enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask; - struct mlx5_fc_bulk *bulk; - int err = -ENOMEM; + struct mlx5_fc_bulk *fc_bulk; int bulk_len; u32 base_id; int i; @@ -478,71 +460,97 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; - bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL); - if (!bulk) - goto err_alloc_bulk; + fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL); + if (!fc_bulk) + return NULL; - bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), - GFP_KERNEL); - if (!bulk->bitmask) - goto err_alloc_bitmask; + if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len)) + goto fc_bulk_free; - err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id); - if (err) - goto err_mlx5_cmd_bulk_alloc; - - bulk->base_id = base_id; - bulk->bulk_len = bulk_len; - for (i = 0; i < bulk_len; i++) { - mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i); - set_bit(i, bulk->bitmask); - } + if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id)) + goto fs_bulk_cleanup; + fc_bulk->base_id = base_id; + for (i = 0; i < bulk_len; i++) + mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i); - return bulk; + return &fc_bulk->fs_bulk; -err_mlx5_cmd_bulk_alloc: - kvfree(bulk->bitmask); -err_alloc_bitmask: - kvfree(bulk); -err_alloc_bulk: - return ERR_PTR(err); +fs_bulk_cleanup: + mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk); +fc_bulk_free: + kvfree(fc_bulk); + return NULL; } static int -mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk) +mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk) { - if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) { + struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk, + struct mlx5_fc_bulk, + fs_bulk); + + if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) { mlx5_core_err(dev, "Freeing bulk before all counters were released\n"); return -EBUSY; } - mlx5_cmd_fc_free(dev, bulk->base_id); - kvfree(bulk->bitmask); - kvfree(bulk); + mlx5_cmd_fc_free(dev, fc_bulk->base_id); + mlx5_fs_bulk_cleanup(fs_bulk); + kvfree(fc_bulk); return 0; } -static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk) +static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool) { - int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len); + fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD, + fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO); +} - if (free_fc_index >= bulk->bulk_len) - return ERR_PTR(-ENOSPC); +/* Flow counters pool API */ - clear_bit(free_fc_index, bulk->bitmask); - return &bulk->fcs[free_fc_index]; +static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = { + .bulk_destroy = mlx5_fc_bulk_destroy, + .bulk_create = mlx5_fc_bulk_create, + .update_threshold = mlx5_fc_pool_update_threshold, +}; + +static void +mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev) +{ + mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops); } -static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc) +static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool) { - int fc_index = fc->id - bulk->base_id; + mlx5_fs_pool_cleanup(fc_pool); +} - if (test_bit(fc_index, bulk->bitmask)) - return -EINVAL; +static struct mlx5_fc * +mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool) +{ + struct mlx5_fs_pool_index pool_index = {}; + struct mlx5_fc_bulk *fc_bulk; + int err; - set_bit(fc_index, bulk->bitmask); - return 0; + err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index); + if (err) + return ERR_PTR(err); + fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk); + return &fc_bulk->fcs[pool_index.index]; +} + +static void +mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc) +{ + struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk; + struct mlx5_fs_pool_index pool_index = {}; + struct mlx5_core_dev *dev = fc_pool->dev; + + pool_index.fs_bulk = fs_bulk; + pool_index.index = fc->id - fc->bulk->base_id; + if (mlx5_fs_pool_release_index(fc_pool, &pool_index)) + mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n"); } /** @@ -573,7 +581,7 @@ mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size) counter->type = MLX5_FC_TYPE_LOCAL; counter->id = counter_id; fc_bulk->base_id = counter_id - offset; - fc_bulk->bulk_len = bulk_size; + fc_bulk->fs_bulk.bulk_len = bulk_size; counter->bulk = fc_bulk; return counter; } @@ -588,141 +596,3 @@ void mlx5_fc_local_destroy(struct mlx5_fc *counter) kfree(counter); } EXPORT_SYMBOL(mlx5_fc_local_destroy); - -/* Flow counters pool API */ - -static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev) -{ - fc_pool->dev = dev; - mutex_init(&fc_pool->pool_lock); - INIT_LIST_HEAD(&fc_pool->fully_used); - INIT_LIST_HEAD(&fc_pool->partially_used); - INIT_LIST_HEAD(&fc_pool->unused); - fc_pool->available_fcs = 0; - fc_pool->used_fcs = 0; - fc_pool->threshold = 0; -} - -static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool) -{ - struct mlx5_core_dev *dev = fc_pool->dev; - struct mlx5_fc_bulk *bulk; - struct mlx5_fc_bulk *tmp; - - list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list) - mlx5_fc_bulk_destroy(dev, bulk); - list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list) - mlx5_fc_bulk_destroy(dev, bulk); - list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list) - mlx5_fc_bulk_destroy(dev, bulk); -} - -static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool) -{ - fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD, - fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO); -} - -static struct mlx5_fc_bulk * -mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool) -{ - struct mlx5_core_dev *dev = fc_pool->dev; - struct mlx5_fc_bulk *new_bulk; - - new_bulk = mlx5_fc_bulk_create(dev); - if (!IS_ERR(new_bulk)) - fc_pool->available_fcs += new_bulk->bulk_len; - mlx5_fc_pool_update_threshold(fc_pool); - return new_bulk; -} - -static void -mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk) -{ - struct mlx5_core_dev *dev = fc_pool->dev; - - fc_pool->available_fcs -= bulk->bulk_len; - mlx5_fc_bulk_destroy(dev, bulk); - mlx5_fc_pool_update_threshold(fc_pool); -} - -static struct mlx5_fc * -mlx5_fc_pool_acquire_from_list(struct list_head *src_list, - struct list_head *next_list, - bool move_non_full_bulk) -{ - struct mlx5_fc_bulk *bulk; - struct mlx5_fc *fc; - - if (list_empty(src_list)) - return ERR_PTR(-ENODATA); - - bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list); - fc = mlx5_fc_bulk_acquire_fc(bulk); - if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0) - list_move(&bulk->pool_list, next_list); - return fc; -} - -static struct mlx5_fc * -mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool) -{ - struct mlx5_fc_bulk *new_bulk; - struct mlx5_fc *fc; - - mutex_lock(&fc_pool->pool_lock); - - fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used, - &fc_pool->fully_used, false); - if (IS_ERR(fc)) - fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused, - &fc_pool->partially_used, - true); - if (IS_ERR(fc)) { - new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool); - if (IS_ERR(new_bulk)) { - fc = ERR_CAST(new_bulk); - goto out; - } - fc = mlx5_fc_bulk_acquire_fc(new_bulk); - list_add(&new_bulk->pool_list, &fc_pool->partially_used); - } - fc_pool->available_fcs--; - fc_pool->used_fcs++; - -out: - mutex_unlock(&fc_pool->pool_lock); - return fc; -} - -static void -mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc) -{ - struct mlx5_core_dev *dev = fc_pool->dev; - struct mlx5_fc_bulk *bulk = fc->bulk; - int bulk_free_fcs_amount; - - mutex_lock(&fc_pool->pool_lock); - - if (mlx5_fc_bulk_release_fc(bulk, fc)) { - mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n"); - goto unlock; - } - - fc_pool->available_fcs++; - fc_pool->used_fcs--; - - bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk); - if (bulk_free_fcs_amount == 1) - list_move_tail(&bulk->pool_list, &fc_pool->partially_used); - if (bulk_free_fcs_amount == bulk->bulk_len) { - list_del(&bulk->pool_list); - if (fc_pool->available_fcs > fc_pool->threshold) - mlx5_fc_pool_free_bulk(fc_pool, bulk); - else - list_add(&bulk->pool_list, &fc_pool->unused); - } - -unlock: - mutex_unlock(&fc_pool->pool_lock); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c new file mode 100644 index 000000000000..b891d7b9e3e0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#include +#include "fs_pool.h" + +int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk, + int bulk_len) +{ + int i; + + fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long), + GFP_KERNEL); + if (!fs_bulk->bitmask) + return -ENOMEM; + + fs_bulk->bulk_len = bulk_len; + for (i = 0; i < bulk_len; i++) + set_bit(i, fs_bulk->bitmask); + + return 0; +} + +void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk) +{ + kvfree(fs_bulk->bitmask); +} + +int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk) +{ + return bitmap_weight(bulk->bitmask, bulk->bulk_len); +} + +static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk, + struct mlx5_fs_pool_index *pool_index) +{ + int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len); + + WARN_ON_ONCE(!pool_index || !fs_bulk); + if (free_index >= fs_bulk->bulk_len) + return -ENOSPC; + + clear_bit(free_index, fs_bulk->bitmask); + pool_index->fs_bulk = fs_bulk; + pool_index->index = free_index; + return 0; +} + +static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index) +{ + if (test_bit(index, fs_bulk->bitmask)) + return -EINVAL; + + set_bit(index, fs_bulk->bitmask); + return 0; +} + +void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev, + const struct mlx5_fs_pool_ops *ops) +{ + WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create || + !ops->update_threshold); + pool->dev = dev; + mutex_init(&pool->pool_lock); + INIT_LIST_HEAD(&pool->fully_used); + INIT_LIST_HEAD(&pool->partially_used); + INIT_LIST_HEAD(&pool->unused); + pool->available_units = 0; + pool->used_units = 0; + pool->threshold = 0; + pool->ops = ops; +} + +void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool) +{ + struct mlx5_core_dev *dev = pool->dev; + struct mlx5_fs_bulk *bulk; + struct mlx5_fs_bulk *tmp; + + list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list) + pool->ops->bulk_destroy(dev, bulk); + list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list) + pool->ops->bulk_destroy(dev, bulk); + list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list) + pool->ops->bulk_destroy(dev, bulk); +} + +static struct mlx5_fs_bulk * +mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool) +{ + struct mlx5_core_dev *dev = fs_pool->dev; + struct mlx5_fs_bulk *new_bulk; + + new_bulk = fs_pool->ops->bulk_create(dev); + if (new_bulk) + fs_pool->available_units += new_bulk->bulk_len; + fs_pool->ops->update_threshold(fs_pool); + return new_bulk; +} + +static void +mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk) +{ + struct mlx5_core_dev *dev = fs_pool->dev; + + fs_pool->available_units -= bulk->bulk_len; + fs_pool->ops->bulk_destroy(dev, bulk); + fs_pool->ops->update_threshold(fs_pool); +} + +static int +mlx5_fs_pool_acquire_from_list(struct list_head *src_list, + struct list_head *next_list, + bool move_non_full_bulk, + struct mlx5_fs_pool_index *pool_index) +{ + struct mlx5_fs_bulk *fs_bulk; + int err; + + if (list_empty(src_list)) + return -ENODATA; + + fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list); + err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index); + if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0) + list_move(&fs_bulk->pool_list, next_list); + return err; +} + +int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool, + struct mlx5_fs_pool_index *pool_index) +{ + struct mlx5_fs_bulk *new_bulk; + int err; + + mutex_lock(&fs_pool->pool_lock); + + err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used, + &fs_pool->fully_used, false, + pool_index); + if (err) + err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused, + &fs_pool->partially_used, + true, pool_index); + if (err) { + new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool); + if (!new_bulk) { + err = -ENOENT; + goto out; + } + err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index); + WARN_ON_ONCE(err); + list_add(&new_bulk->pool_list, &fs_pool->partially_used); + } + fs_pool->available_units--; + fs_pool->used_units++; + +out: + mutex_unlock(&fs_pool->pool_lock); + return err; +} + +int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool, + struct mlx5_fs_pool_index *pool_index) +{ + struct mlx5_fs_bulk *bulk = pool_index->fs_bulk; + int bulk_free_amount; + int err; + + mutex_lock(&fs_pool->pool_lock); + + /* TBD would rather return void if there was no warn here in original code */ + err = mlx5_fs_bulk_release_index(bulk, pool_index->index); + if (err) + goto unlock; + + fs_pool->available_units++; + fs_pool->used_units--; + + bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk); + if (bulk_free_amount == 1) + list_move_tail(&bulk->pool_list, &fs_pool->partially_used); + if (bulk_free_amount == bulk->bulk_len) { + list_del(&bulk->pool_list); + if (fs_pool->available_units > fs_pool->threshold) + mlx5_fs_pool_free_bulk(fs_pool, bulk); + else + list_add(&bulk->pool_list, &fs_pool->unused); + } + +unlock: + mutex_unlock(&fs_pool->pool_lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h new file mode 100644 index 000000000000..3b149863260c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ + +#ifndef __MLX5_FS_POOL_H__ +#define __MLX5_FS_POOL_H__ + +#include + +struct mlx5_fs_bulk { + struct list_head pool_list; + int bulk_len; + unsigned long *bitmask; +}; + +struct mlx5_fs_pool_index { + struct mlx5_fs_bulk *fs_bulk; + int index; +}; + +struct mlx5_fs_pool; + +struct mlx5_fs_pool_ops { + int (*bulk_destroy)(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *bulk); + struct mlx5_fs_bulk * (*bulk_create)(struct mlx5_core_dev *dev); + void (*update_threshold)(struct mlx5_fs_pool *pool); +}; + +struct mlx5_fs_pool { + struct mlx5_core_dev *dev; + void *pool_ctx; + const struct mlx5_fs_pool_ops *ops; + struct mutex pool_lock; /* protects pool lists */ + struct list_head fully_used; + struct list_head partially_used; + struct list_head unused; + int available_units; + int used_units; + int threshold; +}; + +int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk, + int bulk_len); +void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk); +int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk); + +void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev, + const struct mlx5_fs_pool_ops *ops); +void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool); +int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool, + struct mlx5_fs_pool_index *pool_index); +int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool, + struct mlx5_fs_pool_index *pool_index); + +#endif /* __MLX5_FS_POOL_H__ */ -- 2.51.0 From 586face88106481e8c527675a837da8a3ab6677d Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Thu, 19 Dec 2024 19:58:35 +0200 Subject: [PATCH 13/16] net/mlx5: fs, retry insertion to hash table on EBUSY When inserting into an rhashtable faster than it can grow, an -EBUSY error may be encountered. Modify the insertion logic to retry on -EBUSY until either a successful insertion or a genuine error is returned. Signed-off-by: Mark Bloch Signed-off-by: Tariq Toukan Reviewed-by: Przemek Kitszel Link: https://patch.msgid.link/20241219175841.1094544-6-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f781f8f169b9..ae1a5705b26d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -821,11 +821,17 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte) return index; fte->index = index + fg->start_index; +retry_insert: ret = rhashtable_insert_fast(&fg->ftes_hash, &fte->hash, rhash_fte); - if (ret) + if (ret) { + if (ret == -EBUSY) { + cond_resched(); + goto retry_insert; + } goto err_ida_remove; + } tree_add_node(&fte->node, &fg->node); list_add_tail(&fte->node.list, &fg->node.children); -- 2.51.0 From 9a0155a709fadaab468a24abca7996c5fdf0507b Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Thu, 19 Dec 2024 19:58:36 +0200 Subject: [PATCH 14/16] net/mlx5: HWS, no need to expose mlx5hws_send_queues_open/close No need to have mlx5hws_send_queues_open/close in header. Make them static and remove from header. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Itamar Gozlan Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241219175841.1094544-7-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/steering/hws/send.c | 8 ++++---- .../net/ethernet/mellanox/mlx5/core/steering/hws/send.h | 6 ------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c index 883b4ed30892..b68b0c368771 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c @@ -896,15 +896,15 @@ close_cq: return err; } -void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue) +static void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue) { hws_send_ring_close(queue); kfree(queue->completed.entries); } -int mlx5hws_send_queue_open(struct mlx5hws_context *ctx, - struct mlx5hws_send_engine *queue, - u16 queue_size) +static int mlx5hws_send_queue_open(struct mlx5hws_context *ctx, + struct mlx5hws_send_engine *queue, + u16 queue_size) { int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h index b50825d6dc53..f833092235c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h @@ -189,12 +189,6 @@ void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue); void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue); -void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue); - -int mlx5hws_send_queue_open(struct mlx5hws_context *ctx, - struct mlx5hws_send_engine *queue, - u16 queue_size); - void mlx5hws_send_queues_close(struct mlx5hws_context *ctx); int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, -- 2.51.0 From 429776b6019bbdcf04dcd49706fe7de6a280078b Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Thu, 19 Dec 2024 19:58:37 +0200 Subject: [PATCH 15/16] net/mlx5: HWS, do not initialize native API queues HWS has two types of APIs: - Native: fastest and slimmest, async API. The user of this API is required to manage rule handles memory, and to poll for completion for each rule. - BWC: backward compatible API, similar semantics to SWS API. This layer is implemented above native API and it does all the work for the user, so that it is easy to switch between SWS and HWS. Right now the existing users of HWS require only BWC API. Therefore, in order to not waste resources, this patch disables send queues allocation for native API. If in the future support for faster HWS rule insertion will be required (such as for Connection Tracking), native queues can be enabled. Signed-off-by: Yevgeny Kliteynik Reviewed-by: Itamar Gozlan Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241219175841.1094544-8-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../ethernet/mellanox/mlx5/core/steering/hws/bwc.h | 6 ++++-- .../mellanox/mlx5/core/steering/hws/context.c | 6 ++++-- .../mellanox/mlx5/core/steering/hws/context.h | 6 ++++++ .../mellanox/mlx5/core/steering/hws/mlx5hws.h | 1 - .../ethernet/mellanox/mlx5/core/steering/hws/send.c | 13 +++++++++++-- 5 files changed, 25 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h index 0b745968e21e..3d4965213b01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h @@ -60,9 +60,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher, static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx) { /* Besides the control queue, half of the queues are - * reguler HWS queues, and the other half are BWC queues. + * regular HWS queues, and the other half are BWC queues. */ - return (ctx->queues - 1) / 2; + if (mlx5hws_context_bwc_supported(ctx)) + return (ctx->queues - 1) / 2; + return 0; } static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c index fd48b05e91e0..4a8928f33bb9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c @@ -161,8 +161,10 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx, if (ret) goto uninit_pd; - if (attr->bwc) - ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT; + /* Context has support for backward compatible API, + * and does not have support for native HWS API. + */ + ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT; ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size); if (ret) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h index 47f5cc8de73f..1c9cc4fba083 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h @@ -8,6 +8,7 @@ enum mlx5hws_context_flags { MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0, MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1, MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2, + MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT = 1 << 3, }; enum mlx5hws_context_shared_stc_type { @@ -58,6 +59,11 @@ static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx) return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT; } +static inline bool mlx5hws_context_native_supported(struct mlx5hws_context *ctx) +{ + return ctx->flags & MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT; +} + bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx); u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h index f39d636ff39a..5121951f2778 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h @@ -70,7 +70,6 @@ enum mlx5hws_send_queue_actions { struct mlx5hws_context_attr { u16 queues; u16 queue_size; - bool bwc; /* add support for backward compatible API*/ }; struct mlx5hws_table_attr { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c index b68b0c368771..20fe126ffd22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c @@ -898,6 +898,9 @@ close_cq: static void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue) { + if (!queue->num_entries) + return; /* this queue wasn't initialized */ + hws_send_ring_close(queue); kfree(queue->completed.entries); } @@ -1005,7 +1008,7 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, u16 queue_size) { int err = 0; - u32 i; + int i = 0; /* Open one extra queue for control path */ ctx->queues = queues + 1; @@ -1021,7 +1024,13 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx, goto free_bwc_locks; } - for (i = 0; i < ctx->queues; i++) { + /* If native API isn't supported, skip the unused native queues: + * initialize BWC queues and control queue only. + */ + if (!mlx5hws_context_native_supported(ctx)) + i = mlx5hws_bwc_get_queue_id(ctx, 0); + + for (; i < ctx->queues; i++) { err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size); if (err) goto close_send_queues; -- 2.51.0 From aa90a30804a563763eb78f00f56f759b72b91cb0 Mon Sep 17 00:00:00 2001 From: Itamar Gozlan Date: Thu, 19 Dec 2024 19:58:38 +0200 Subject: [PATCH 16/16] net/mlx5: DR, expand SWS STE callbacks and consolidate common structs Expand SWS STE callbacks to support ConnectX-8 hardware. Move common enums and structures to a shared header file. Signed-off-by: Itamar Gozlan Signed-off-by: Yevgeny Kliteynik Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241219175841.1094544-9-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/steering/sws/dr_ste.c | 4 +- .../mellanox/mlx5/core/steering/sws/dr_ste.h | 18 +- .../mlx5/core/steering/sws/dr_ste_v0.c | 6 +- .../mlx5/core/steering/sws/dr_ste_v1.c | 207 ++++-------------- .../mlx5/core/steering/sws/dr_ste_v1.h | 147 ++++++++++++- .../mlx5/core/steering/sws/dr_ste_v2.c | 169 +------------- .../mlx5/core/steering/sws/dr_ste_v2.h | 168 ++++++++++++++ 7 files changed, 377 insertions(+), 342 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c index e94fbb015efa..01ba8eae2983 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c @@ -555,7 +555,7 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) { - ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps, + ste_ctx->set_actions_tx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps, hw_ste_arr, attr, added_stes); } @@ -566,7 +566,7 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) { - ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps, + ste_ctx->set_actions_rx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps, hw_ste_arr, attr, added_stes); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h index 54a6619c3ecb..b6ec8d30d990 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h @@ -160,13 +160,15 @@ struct mlx5dr_ste_ctx { /* Actions */ u32 actions_caps; - void (*set_actions_rx)(struct mlx5dr_domain *dmn, + void (*set_actions_rx)(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *hw_ste_arr, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); - void (*set_actions_tx)(struct mlx5dr_domain *dmn, + void (*set_actions_tx)(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *hw_ste_arr, @@ -197,7 +199,17 @@ struct mlx5dr_ste_ctx { u16 *used_hw_action_num); int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action); void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action); - + /* Actions bit set */ + void (*set_encap)(u8 *hw_ste_p, u8 *d_action, + u32 reformat_id, int size); + void (*set_push_vlan)(u8 *ste, u8 *d_action, + u32 vlan_hdr); + void (*set_pop_vlan)(u8 *hw_ste_p, u8 *s_action, + u8 vlans_num); + void (*set_rx_decap)(u8 *hw_ste_p, u8 *s_action); + void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action, + u8 *scnd_d_action, u32 reformat_id, + int size); /* Send */ void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size); }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c index e9f6c7ed7a7b..42536bee55e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c @@ -406,7 +406,8 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste, } static void -dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, +dr_ste_v0_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *last_ste, @@ -476,7 +477,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, } static void -dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, +dr_ste_v0_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *last_ste, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c index 1d49704b9542..7f83d77c43ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c @@ -5,136 +5,6 @@ #include "mlx5_ifc_dr_ste_v1.h" #include "dr_ste_v1.h" -#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \ - ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \ - DR_STE_V1_LU_TYPE_##lookup_type##_O) - -enum dr_ste_v1_entry_format { - DR_STE_V1_TYPE_BWC_BYTE = 0x0, - DR_STE_V1_TYPE_BWC_DW = 0x1, - DR_STE_V1_TYPE_MATCH = 0x2, - DR_STE_V1_TYPE_MATCH_RANGES = 0x7, -}; - -/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */ -enum { - DR_STE_V1_LU_TYPE_NOP = 0x0000, - DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002, - DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102, - DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003, - DR_STE_V1_LU_TYPE_IBL4 = 0x0103, - DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004, - DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104, - DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005, - DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105, - DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006, - DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106, - DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007, - DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107, - DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008, - DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108, - DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009, - DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109, - DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a, - DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a, - DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b, - DR_STE_V1_LU_TYPE_MPLS_O = 0x010b, - DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c, - DR_STE_V1_LU_TYPE_MPLS_I = 0x010c, - DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d, - DR_STE_V1_LU_TYPE_GRE = 0x010d, - DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e, - DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e, - DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f, - DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f, - DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110, - DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011, - DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111, - DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112, - DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113, - DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114, - DR_STE_V1_LU_TYPE_INVALID = 0x00ff, - DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE, -}; - -enum dr_ste_v1_header_anchors { - DR_STE_HEADER_ANCHOR_START_OUTER = 0x00, - DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02, - DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07, - DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13, - DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19, -}; - -enum dr_ste_v1_action_size { - DR_STE_ACTION_SINGLE_SZ = 4, - DR_STE_ACTION_DOUBLE_SZ = 8, - DR_STE_ACTION_TRIPLE_SZ = 12, -}; - -enum dr_ste_v1_action_insert_ptr_attr { - DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */ - DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */ - DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */ -}; - -enum dr_ste_v1_action_id { - DR_STE_V1_ACTION_ID_NOP = 0x00, - DR_STE_V1_ACTION_ID_COPY = 0x05, - DR_STE_V1_ACTION_ID_SET = 0x06, - DR_STE_V1_ACTION_ID_ADD = 0x07, - DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08, - DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09, - DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a, - DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b, - DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c, - DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d, - DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e, - DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f, - DR_STE_V1_ACTION_ID_ASO = 0x12, - DR_STE_V1_ACTION_ID_TRAILER = 0x13, - DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14, - DR_STE_V1_ACTION_ID_MAX = 0x21, - /* use for special cases */ - DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22, -}; - -enum { - DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, - DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, - DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, - DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, - DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, - DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, - DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, - DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, - DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, - DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, - DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, - DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, - DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, - DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, - DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, - DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, - DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91, -}; - -enum dr_ste_v1_aso_ctx_type { - DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2, -}; - static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = { [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, @@ -379,13 +249,12 @@ static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id) MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id); } -static void dr_ste_v1_set_reparse(u8 *hw_ste_p) +void dr_ste_v1_set_reparse(u8 *hw_ste_p) { MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1); } -static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, - u32 reformat_id, int size) +void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size) { MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER); @@ -432,8 +301,7 @@ static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, - u32 vlan_hdr) +void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr) { MLX5_SET(ste_double_action_insert_with_inline_v1, d_action, action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE); @@ -446,7 +314,7 @@ static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) +void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) { MLX5_SET(ste_single_action_remove_header_size_v1, s_action, action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); @@ -459,11 +327,8 @@ static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, - u8 *frst_s_action, - u8 *scnd_d_action, - u32 reformat_id, - int size) +void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action, + u32 reformat_id, int size) { /* Remove L2 headers */ MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id, @@ -483,7 +348,7 @@ static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action) +void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action) { MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id, DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER); @@ -620,7 +485,8 @@ static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste, dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES); } -void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, +void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *last_ste, @@ -640,7 +506,7 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); + ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; @@ -677,8 +543,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, action_sz = DR_STE_ACTION_TRIPLE_SZ; allow_encap = true; } - dr_ste_v1_set_push_vlan(last_ste, action, - attr->vlans.headers[i]); + ste_ctx->set_push_vlan(last_ste, action, + attr->vlans.headers[i]); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } @@ -691,9 +557,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, action_sz = DR_STE_ACTION_TRIPLE_SZ; allow_encap = true; } - dr_ste_v1_set_encap(last_ste, action, - attr->reformat.id, - attr->reformat.size); + ste_ctx->set_encap(last_ste, action, + attr->reformat.id, + attr->reformat.size); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) { @@ -706,10 +572,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, } d_action = action + DR_STE_ACTION_SINGLE_SZ; - dr_ste_v1_set_encap_l3(last_ste, - action, d_action, - attr->reformat.id, - attr->reformat.size); + ste_ctx->set_encap_l3(last_ste, + action, d_action, + attr->reformat.id, + attr->reformat.size); action_sz -= DR_STE_ACTION_TRIPLE_SZ; action += DR_STE_ACTION_TRIPLE_SZ; } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) { @@ -776,7 +642,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); } -void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, +void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *last_ste, @@ -799,7 +666,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, allow_modify_hdr = false; allow_ctr = false; } else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) { - dr_ste_v1_set_rx_decap(last_ste, action); + ste_ctx->set_rx_decap(last_ste, action); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; allow_modify_hdr = false; @@ -827,7 +694,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); + ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; allow_ctr = false; @@ -868,8 +735,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_push_vlan(last_ste, action, - attr->vlans.headers[i]); + ste_ctx->set_push_vlan(last_ste, action, + attr->vlans.headers[i]); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } @@ -895,9 +762,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_encap(last_ste, action, - attr->reformat.id, - attr->reformat.size); + ste_ctx->set_encap(last_ste, action, + attr->reformat.id, + attr->reformat.size); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; allow_modify_hdr = false; @@ -912,10 +779,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, d_action = action + DR_STE_ACTION_SINGLE_SZ; - dr_ste_v1_set_encap_l3(last_ste, - action, d_action, - attr->reformat.id, - attr->reformat.size); + ste_ctx->set_encap_l3(last_ste, + action, d_action, + attr->reformat.id, + attr->reformat.size); action_sz -= DR_STE_ACTION_TRIPLE_SZ; allow_modify_hdr = false; } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) { @@ -1027,9 +894,6 @@ void dr_ste_v1_set_action_copy(u8 *d_action, MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter); } -#define DR_STE_DECAP_L3_ACTION_NUM 8 -#define DR_STE_L2_HDR_MAX_SZ 20 - int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action, @@ -2330,7 +2194,12 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = { .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg, .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg, - + /* Actions bit set */ + .set_encap = &dr_ste_v1_set_encap, + .set_push_vlan = &dr_ste_v1_set_push_vlan, + .set_pop_vlan = &dr_ste_v1_set_pop_vlan, + .set_rx_decap = &dr_ste_v1_set_rx_decap, + .set_encap_l3 = &dr_ste_v1_set_encap_l3, /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h index e2fc69867088..a8d9e308d339 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h @@ -7,6 +7,138 @@ #include "dr_types.h" #include "dr_ste.h" +#define DR_STE_DECAP_L3_ACTION_NUM 8 +#define DR_STE_L2_HDR_MAX_SZ 20 +#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \ + ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \ + DR_STE_V1_LU_TYPE_##lookup_type##_O) + +enum dr_ste_v1_entry_format { + DR_STE_V1_TYPE_BWC_BYTE = 0x0, + DR_STE_V1_TYPE_BWC_DW = 0x1, + DR_STE_V1_TYPE_MATCH = 0x2, + DR_STE_V1_TYPE_MATCH_RANGES = 0x7, +}; + +/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */ +enum { + DR_STE_V1_LU_TYPE_NOP = 0x0000, + DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002, + DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102, + DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003, + DR_STE_V1_LU_TYPE_IBL4 = 0x0103, + DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004, + DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104, + DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005, + DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105, + DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006, + DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007, + DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008, + DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108, + DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009, + DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109, + DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a, + DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a, + DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b, + DR_STE_V1_LU_TYPE_MPLS_O = 0x010b, + DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c, + DR_STE_V1_LU_TYPE_MPLS_I = 0x010c, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d, + DR_STE_V1_LU_TYPE_GRE = 0x010d, + DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e, + DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e, + DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f, + DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f, + DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110, + DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011, + DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111, + DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112, + DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113, + DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114, + DR_STE_V1_LU_TYPE_INVALID = 0x00ff, + DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE, +}; + +enum dr_ste_v1_header_anchors { + DR_STE_HEADER_ANCHOR_START_OUTER = 0x00, + DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02, + DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07, + DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13, + DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19, +}; + +enum dr_ste_v1_action_size { + DR_STE_ACTION_SINGLE_SZ = 4, + DR_STE_ACTION_DOUBLE_SZ = 8, + DR_STE_ACTION_TRIPLE_SZ = 12, +}; + +enum dr_ste_v1_action_insert_ptr_attr { + DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */ + DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */ + DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */ +}; + +enum dr_ste_v1_action_id { + DR_STE_V1_ACTION_ID_NOP = 0x00, + DR_STE_V1_ACTION_ID_COPY = 0x05, + DR_STE_V1_ACTION_ID_SET = 0x06, + DR_STE_V1_ACTION_ID_ADD = 0x07, + DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08, + DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09, + DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a, + DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b, + DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c, + DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d, + DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e, + DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f, + DR_STE_V1_ACTION_ID_ASO = 0x12, + DR_STE_V1_ACTION_ID_TRAILER = 0x13, + DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14, + DR_STE_V1_ACTION_ID_MAX = 0x21, + /* use for special cases */ + DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22, +}; + +enum { + DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, + DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, + DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, + DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, + DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, + DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, + DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, + DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, + DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, + DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, + DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, + DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, + DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, + DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, + DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, + DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, + DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91, +}; + +enum dr_ste_v1_aso_ctx_type { + DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2, +}; + bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p); void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr); u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p); @@ -17,11 +149,18 @@ u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p); void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size); void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi); void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size); -void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set, - u32 actions_caps, u8 *last_ste, +void dr_ste_v1_set_reparse(u8 *hw_ste_p); +void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size); +void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr); +void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num); +void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action, + u32 reformat_id, int size); +void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action); +void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn, + u8 *action_type_set, u32 actions_caps, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); -void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set, - u32 actions_caps, u8 *last_ste, +void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn, + u8 *action_type_set, u32 actions_caps, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter, u8 length, u32 data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c index 808b013cf48c..0882dba0f64b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c @@ -2,167 +2,7 @@ /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ #include "dr_ste_v1.h" - -enum { - DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, - DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, - DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, - DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, - DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, - DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, - DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, - DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, - DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, - DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, - DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, - DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, - DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, - DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, - DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, - DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, - DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94, - DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95, -}; - -static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = { - [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15, - }, - [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24, - .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, - .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, - .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, - }, - [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, - }, - [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, - .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, - }, - [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, - .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, - }, - [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, - }, - [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31, - .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15, - }, - [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31, - }, - [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = { - .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15, - }, -}; +#include "dr_ste_v2.h" static struct mlx5dr_ste_ctx ste_ctx_v2 = { /* Builders */ @@ -223,7 +63,12 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = { .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg, .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg, - + /* Actions bit set */ + .set_encap = &dr_ste_v1_set_encap, + .set_push_vlan = &dr_ste_v1_set_push_vlan, + .set_pop_vlan = &dr_ste_v1_set_pop_vlan, + .set_rx_decap = &dr_ste_v1_set_rx_decap, + .set_encap_l3 = &dr_ste_v1_set_encap_l3, /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h new file mode 100644 index 000000000000..d853fde49cfc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef _DR_STE_V2_ +#define _DR_STE_V2_ + +enum { + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, + DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, + DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, + DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, + DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, + DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, + DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, + DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, + DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, + DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, + DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, + DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, + DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, + DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95, +}; + +static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = { + [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15, + }, +}; + +#endif /* _DR_STE_V2_ */ -- 2.51.0