From ac778fefed340e019bc9c022842b4a2cc5713559 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:48 +0200 Subject: [PATCH 01/16] net/mlx5: Generalize max_rate and min_rate setting for nodes Refactor max_rate and min_rate setting functions to operate on mlx5_esw_sched_node, allowing for generalized handling of both vports and nodes. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-4-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 69 ++++--------------- 1 file changed, 13 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 0c371f27c693..82805bb20c76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -245,69 +245,20 @@ static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, } } -static int esw_qos_set_vport_min_rate(struct mlx5_vport *vport, - u32 min_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - bool min_rate_supported; - u32 fw_max_bw_share; - - esw_assert_qos_lock_held(vport_node->esw); - fw_max_bw_share = MLX5_CAP_QOS(vport->dev, max_tsar_bw_share); - min_rate_supported = MLX5_CAP_QOS(vport->dev, esw_bw_share) && - fw_max_bw_share >= MLX5_MIN_BW_SHARE; - if (min_rate && !min_rate_supported) - return -EOPNOTSUPP; - if (min_rate == vport_node->min_rate) - return 0; - - vport_node->min_rate = min_rate; - esw_qos_normalize_min_rate(vport_node->parent->esw, vport_node->parent, extack); - - return 0; -} - -static int esw_qos_set_vport_max_rate(struct mlx5_vport *vport, - u32 max_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 act_max_rate = max_rate; - bool max_rate_supported; - int err; - - esw_assert_qos_lock_held(vport_node->esw); - max_rate_supported = MLX5_CAP_QOS(vport->dev, esw_rate_limit); - - if (max_rate && !max_rate_supported) - return -EOPNOTSUPP; - if (max_rate == vport_node->max_rate) - return 0; - - /* Use parent node limit if new max rate is 0. */ - if (!max_rate) - act_max_rate = vport_node->parent->max_rate; - - err = esw_qos_sched_elem_config(vport_node, act_max_rate, vport_node->bw_share, extack); - if (!err) - vport_node->max_rate = max_rate; - - return err; -} - static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = node->esw; - if (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || - MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE) + if (min_rate && (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || + MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)) return -EOPNOTSUPP; if (min_rate == node->min_rate) return 0; node->min_rate = min_rate; - esw_qos_normalize_min_rate(esw, NULL, extack); + esw_qos_normalize_min_rate(esw, node->parent, extack); return 0; } @@ -321,11 +272,17 @@ static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node, if (node->max_rate == max_rate) return 0; + /* Use parent node limit if new max rate is 0. */ + if (!max_rate && node->parent) + max_rate = node->parent->max_rate; + err = esw_qos_sched_elem_config(node, max_rate, node->bw_share, extack); if (err) return err; node->max_rate = max_rate; + if (node->type != SCHED_NODE_TYPE_VPORTS_TSAR) + return 0; /* Any unlimited vports in the node should be set with the value of the node. */ list_for_each_entry(vport_node, &node->children, entry) { @@ -748,9 +705,9 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_ if (err) goto unlock; - err = esw_qos_set_vport_min_rate(vport, min_rate, NULL); + err = esw_qos_set_node_min_rate(vport->qos.sched_node, min_rate, NULL); if (!err) - err = esw_qos_set_vport_max_rate(vport, max_rate, NULL); + err = esw_qos_set_node_max_rate(vport->qos.sched_node, max_rate, NULL); unlock: esw_qos_unlock(esw); return err; @@ -947,7 +904,7 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void if (err) goto unlock; - err = esw_qos_set_vport_min_rate(vport, tx_share, extack); + err = esw_qos_set_node_min_rate(vport->qos.sched_node, tx_share, extack); unlock: esw_qos_unlock(esw); return err; @@ -973,7 +930,7 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * if (err) goto unlock; - err = esw_qos_set_vport_max_rate(vport, tx_max, extack); + err = esw_qos_set_node_max_rate(vport->qos.sched_node, tx_max, extack); unlock: esw_qos_unlock(esw); return err; -- 2.51.0 From cc4bb15ffa8412bfe1e189d37edb6ca7d9918cb4 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:49 +0200 Subject: [PATCH 02/16] net/mlx5: Refactor scheduling element configuration bitmasks Refactor esw_qos_sched_elem_config to set bitmasks only when max_rate or bw_share values change, allowing the function to configure nodes with only one of these parameters. This enables more flexible usage for nodes where only one parameter requires configuration. Remove scattered assignments and checks to centralize them within this function, removing the now redundant esw_qos_set_node_max_rate entirely. With this refactor, also remove the assignment of the vport scheduling node max rate to the parent max rate for unlimited vports (where max rate is set to zero), as firmware already handles this behavior. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-5-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 80 ++++++------------- 1 file changed, 24 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 82805bb20c76..c1e7b2425ebe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -143,10 +143,21 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) return -EOPNOTSUPP; - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + if (bw_share && (!MLX5_CAP_QOS(dev, esw_bw_share) || + MLX5_CAP_QOS(dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)) + return -EOPNOTSUPP; + + if (node->max_rate == max_rate && node->bw_share == bw_share) + return 0; + + if (node->max_rate != max_rate) { + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + } + if (node->bw_share != bw_share) { + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + } err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -160,6 +171,8 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r return err; } + node->max_rate = max_rate; + node->bw_share = bw_share; if (node->type == SCHED_NODE_TYPE_VPORTS_TSAR) trace_mlx5_esw_node_qos_config(dev, node, node->ix, bw_share, max_rate); else if (node->type == SCHED_NODE_TYPE_VPORT) @@ -217,11 +230,7 @@ static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node, bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share); - if (bw_share == node->bw_share) - return; - esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack); - node->bw_share = bw_share; } static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, @@ -250,10 +259,6 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, { struct mlx5_eswitch *esw = node->esw; - if (min_rate && (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || - MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)) - return -EOPNOTSUPP; - if (min_rate == node->min_rate) return 0; @@ -263,41 +268,6 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, return 0; } -static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node, - u32 max_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node; - int err; - - if (node->max_rate == max_rate) - return 0; - - /* Use parent node limit if new max rate is 0. */ - if (!max_rate && node->parent) - max_rate = node->parent->max_rate; - - err = esw_qos_sched_elem_config(node, max_rate, node->bw_share, extack); - if (err) - return err; - - node->max_rate = max_rate; - if (node->type != SCHED_NODE_TYPE_VPORTS_TSAR) - return 0; - - /* Any unlimited vports in the node should be set with the value of the node. */ - list_for_each_entry(vport_node, &node->children, entry) { - if (vport_node->max_rate) - continue; - - err = esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, extack); - if (err) - NL_SET_ERR_MSG_MOD(extack, - "E-Switch vport implicit rate limit setting failed"); - } - - return err; -} - static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id, u32 *tsar_ix) { @@ -367,7 +337,6 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, struct netlink_ext_ack *extack) { struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 max_rate; int err; err = mlx5_destroy_scheduling_element_cmd(curr_node->esw->dev, @@ -378,9 +347,7 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, return err; } - /* Use new node max rate if vport max rate is unlimited. */ - max_rate = vport_node->max_rate ? vport_node->max_rate : new_node->max_rate; - err = esw_qos_vport_create_sched_element(vport, new_node, max_rate, + err = esw_qos_vport_create_sched_element(vport, new_node, vport_node->max_rate, vport_node->bw_share, &vport_node->ix); if (err) { @@ -393,8 +360,7 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, return 0; err_sched: - max_rate = vport_node->max_rate ? vport_node->max_rate : curr_node->max_rate; - if (esw_qos_vport_create_sched_element(vport, curr_node, max_rate, + if (esw_qos_vport_create_sched_element(vport, curr_node, vport_node->max_rate, vport_node->bw_share, &vport_node->ix)) esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", @@ -707,7 +673,8 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_ err = esw_qos_set_node_min_rate(vport->qos.sched_node, min_rate, NULL); if (!err) - err = esw_qos_set_node_max_rate(vport->qos.sched_node, max_rate, NULL); + err = esw_qos_sched_elem_config(vport->qos.sched_node, max_rate, + vport->qos.sched_node->bw_share, NULL); unlock: esw_qos_unlock(esw); return err; @@ -930,7 +897,8 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * if (err) goto unlock; - err = esw_qos_set_node_max_rate(vport->qos.sched_node, tx_max, extack); + err = esw_qos_sched_elem_config(vport->qos.sched_node, tx_max, + vport->qos.sched_node->bw_share, extack); unlock: esw_qos_unlock(esw); return err; @@ -965,7 +933,7 @@ int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void * return err; esw_qos_lock(esw); - err = esw_qos_set_node_max_rate(node, tx_max, extack); + err = esw_qos_sched_elem_config(node, tx_max, node->bw_share, extack); esw_qos_unlock(esw); return err; } -- 2.51.0 From 663bc605d0db8782ff9c2704db5ce6cf2ac7fa93 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:50 +0200 Subject: [PATCH 03/16] net/mlx5: Generalize scheduling element operations Introduce helper functions to create and destroy scheduling elements, allowing flexible configuration for different scheduling element types. The new helper functions streamline the process by centralizing error handling and logging through esw_qos_sched_elem_op_warn, which now accepts the operation type (create, destroy, or modify). The changes also adjust the esw_qos_vport_enable and mlx5_esw_qos_vport_disable functions to leverage the new generalized create/destroy helpers. The destroy functions now log errors with esw_warn without returning them. This prevents unnecessary error handling since the node was already destroyed and no further action is required from callers. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-6-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 157 +++++++++--------- 1 file changed, 76 insertions(+), 81 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index c1e7b2425ebe..155400d36a1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -118,18 +118,49 @@ mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport) return vport->qos.sched_node->parent; } -static void esw_qos_sched_elem_config_warn(struct mlx5_esw_sched_node *node, int err) +static void esw_qos_sched_elem_warn(struct mlx5_esw_sched_node *node, int err, const char *op) { if (node->vport) { esw_warn(node->esw->dev, - "E-Switch modify %s scheduling element failed (vport=%d,err=%d)\n", - sched_node_type_str[node->type], node->vport->vport, err); + "E-Switch %s %s scheduling element failed (vport=%d,err=%d)\n", + op, sched_node_type_str[node->type], node->vport->vport, err); return; } esw_warn(node->esw->dev, - "E-Switch modify %s scheduling element failed (err=%d)\n", - sched_node_type_str[node->type], err); + "E-Switch %s %s scheduling element failed (err=%d)\n", + op, sched_node_type_str[node->type], err); +} + +static int esw_qos_node_create_sched_element(struct mlx5_esw_sched_node *node, void *ctx, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlx5_create_scheduling_element_cmd(node->esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, ctx, + &node->ix); + if (err) { + esw_qos_sched_elem_warn(node, err, "create"); + NL_SET_ERR_MSG_MOD(extack, "E-Switch create scheduling element failed"); + } + + return err; +} + +static int esw_qos_node_destroy_sched_element(struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlx5_destroy_scheduling_element_cmd(node->esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + node->ix); + if (err) { + esw_qos_sched_elem_warn(node, err, "destroy"); + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroying scheduling element failed."); + } + + return err; } static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_rate, u32 bw_share, @@ -165,7 +196,7 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r node->ix, bitmask); if (err) { - esw_qos_sched_elem_config_warn(node, err); + esw_qos_sched_elem_warn(node, err, "modify"); NL_SET_ERR_MSG_MOD(extack, "E-Switch modify scheduling element failed"); return err; @@ -295,14 +326,12 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_ tsar_ix); } -static int -esw_qos_vport_create_sched_element(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, - u32 max_rate, u32 bw_share, u32 *sched_elem_ix) +static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, u32 bw_share, + struct netlink_ext_ack *extack) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; - struct mlx5_core_dev *dev = parent->esw->dev; + struct mlx5_core_dev *dev = vport_node->esw->dev; void *attr; - int err; if (!mlx5_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT, @@ -312,23 +341,12 @@ esw_qos_vport_create_sched_element(struct mlx5_vport *vport, struct mlx5_esw_sch MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); - MLX5_SET(vport_element, attr, vport_number, vport->vport); - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent->ix); - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport); + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, vport_node->parent->ix); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, vport_node->max_rate); MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - err = mlx5_create_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - sched_ctx, - sched_elem_ix); - if (err) { - esw_warn(dev, - "E-Switch create vport scheduling element failed (vport=%d,err=%d)\n", - vport->vport, err); - return err; - } - - return 0; + return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack); } static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, @@ -339,30 +357,22 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; int err; - err = mlx5_destroy_scheduling_element_cmd(curr_node->esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - vport_node->ix); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy vport scheduling element failed"); + err = esw_qos_node_destroy_sched_element(vport_node, extack); + if (err) return err; - } - err = esw_qos_vport_create_sched_element(vport, new_node, vport_node->max_rate, - vport_node->bw_share, - &vport_node->ix); + esw_qos_node_set_parent(vport_node, new_node); + err = esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, extack); if (err) { NL_SET_ERR_MSG_MOD(extack, "E-Switch vport node set failed."); goto err_sched; } - esw_qos_node_set_parent(vport->qos.sched_node, new_node); - return 0; err_sched: - if (esw_qos_vport_create_sched_element(vport, curr_node, vport_node->max_rate, - vport_node->bw_share, - &vport_node->ix)) + esw_qos_node_set_parent(vport_node, curr_node); + if (esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, NULL)) esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", vport->vport); @@ -425,6 +435,12 @@ static void __esw_qos_free_node(struct mlx5_esw_sched_node *node) kfree(node); } +static void esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) +{ + esw_qos_node_destroy_sched_element(node, extack); + __esw_qos_free_node(node); +} + static struct mlx5_esw_sched_node * __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sched_node *parent, struct netlink_ext_ack *extack) @@ -483,23 +499,13 @@ esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct netlink_ext_ac return node; } -static int __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) +static void __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = node->esw; - int err; trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix); - - err = mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - node->ix); - if (err) - NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed"); - __esw_qos_free_node(node); - + esw_qos_destroy_node(node, extack); esw_qos_normalize_min_rate(esw, NULL, extack); - - return err; } static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) @@ -584,11 +590,11 @@ static void esw_qos_put(struct mlx5_eswitch *esw) esw_qos_destroy(esw); } -static int esw_qos_vport_enable(struct mlx5_vport *vport, - u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) +static int esw_qos_vport_enable(struct mlx5_vport *vport, u32 max_rate, u32 bw_share, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - u32 sched_elem_ix; + struct mlx5_esw_sched_node *sched_node; int err; esw_assert_qos_lock_held(esw); @@ -599,29 +605,28 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, if (err) return err; - err = esw_qos_vport_create_sched_element(vport, esw->qos.node0, max_rate, bw_share, - &sched_elem_ix); - if (err) - goto err_out; - - vport->qos.sched_node = __esw_qos_alloc_node(esw, sched_elem_ix, SCHED_NODE_TYPE_VPORT, - esw->qos.node0); - if (!vport->qos.sched_node) { + sched_node = __esw_qos_alloc_node(esw, 0, SCHED_NODE_TYPE_VPORT, esw->qos.node0); + if (!sched_node) { err = -ENOMEM; goto err_alloc; } - vport->qos.sched_node->vport = vport; + sched_node->max_rate = max_rate; + sched_node->min_rate = 0; + sched_node->bw_share = bw_share; + sched_node->vport = vport; + err = esw_qos_vport_create_sched_element(sched_node, 0, extack); + if (err) + goto err_vport_create; trace_mlx5_esw_vport_qos_create(vport->dev, vport, bw_share, max_rate); + vport->qos.sched_node = sched_node; return 0; +err_vport_create: + __esw_qos_free_node(sched_node); err_alloc: - if (mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, sched_elem_ix)) - esw_warn(esw->dev, "E-Switch destroy vport scheduling element failed.\n"); -err_out: esw_qos_put(esw); return err; @@ -632,7 +637,6 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) struct mlx5_eswitch *esw = vport->dev->priv.eswitch; struct mlx5_esw_sched_node *vport_node; struct mlx5_core_dev *dev; - int err; lockdep_assert_held(&esw->state_lock); esw_qos_lock(esw); @@ -645,15 +649,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) dev = vport_node->esw->dev; trace_mlx5_esw_vport_qos_destroy(dev, vport); - err = mlx5_destroy_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - vport_node->ix); - if (err) - esw_warn(dev, - "E-Switch destroy vport scheduling element failed (vport=%d,err=%d)\n", - vport->vport, err); - - __esw_qos_free_node(vport_node); + esw_qos_destroy_node(vport_node, NULL); memset(&vport->qos, 0, sizeof(vport->qos)); esw_qos_put(esw); @@ -974,13 +970,12 @@ int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, { struct mlx5_esw_sched_node *node = priv; struct mlx5_eswitch *esw = node->esw; - int err; esw_qos_lock(esw); - err = __esw_qos_destroy_node(node, extack); + __esw_qos_destroy_node(node, extack); esw_qos_put(esw); esw_qos_unlock(esw); - return err; + return 0; } int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, -- 2.51.0 From d67bfd10e668bfca717e0d94112f04f61c58dad7 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:51 +0200 Subject: [PATCH 04/16] net/mlx5: Integrate esw_qos_vport_enable logic into rate operations Fold the esw_qos_vport_enable function into operations for configuring maximum and minimum rates, simplifying QoS logic. This change consolidates enabling and updating the scheduling element configuration, streamlining how vport QoS is initialized and adjusted. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-7-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 87 +++++++++---------- 1 file changed, 39 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 155400d36a1e..35e493924c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -590,22 +590,21 @@ static void esw_qos_put(struct mlx5_eswitch *esw) esw_qos_destroy(esw); } -static int esw_qos_vport_enable(struct mlx5_vport *vport, u32 max_rate, u32 bw_share, - struct netlink_ext_ack *extack) +static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; struct mlx5_esw_sched_node *sched_node; int err; esw_assert_qos_lock_held(esw); - if (vport->qos.sched_node) - return 0; err = esw_qos_get(esw, extack); if (err) return err; - sched_node = __esw_qos_alloc_node(esw, 0, SCHED_NODE_TYPE_VPORT, esw->qos.node0); + parent = parent ?: esw->qos.node0; + sched_node = __esw_qos_alloc_node(parent->esw, 0, SCHED_NODE_TYPE_VPORT, parent); if (!sched_node) { err = -ENOMEM; goto err_alloc; @@ -657,21 +656,42 @@ unlock: esw_qos_unlock(esw); } +static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rate, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + if (!vport_node) + return esw_qos_vport_enable(vport, NULL, max_rate, 0, extack); + else + return esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, + extack); +} + +static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rate, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + if (!vport_node) + return esw_qos_vport_enable(vport, NULL, 0, min_rate, extack); + else + return esw_qos_set_node_min_rate(vport_node, min_rate, extack); +} + int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_rate) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; int err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, NULL); - if (err) - goto unlock; - - err = esw_qos_set_node_min_rate(vport->qos.sched_node, min_rate, NULL); + err = mlx5_esw_qos_set_vport_min_rate(vport, min_rate, NULL); if (!err) - err = esw_qos_sched_elem_config(vport->qos.sched_node, max_rate, - vport->qos.sched_node->bw_share, NULL); -unlock: + err = mlx5_esw_qos_set_vport_max_rate(vport, max_rate, NULL); esw_qos_unlock(esw); return err; } @@ -757,10 +777,8 @@ static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev, int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) { - u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_vport *vport; u32 link_speed_max; - u32 bitmask; int err; vport = mlx5_eswitch_get_vport(esw, vport_num); @@ -779,20 +797,7 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 } esw_qos_lock(esw); - if (!vport->qos.sched_node) { - /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */ - err = esw_qos_vport_enable(vport, rate_mbps, 0, NULL); - } else { - struct mlx5_core_dev *dev = vport->qos.sched_node->parent->esw->dev; - - MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); - bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - err = mlx5_modify_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - ctx, - vport->qos.sched_node->ix, - bitmask); - } + err = mlx5_esw_qos_set_vport_max_rate(vport, rate_mbps, NULL); esw_qos_unlock(esw); return err; @@ -863,12 +868,7 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void return err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (err) - goto unlock; - - err = esw_qos_set_node_min_rate(vport->qos.sched_node, tx_share, extack); -unlock: + err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack); esw_qos_unlock(esw); return err; } @@ -889,13 +889,7 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * return err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (err) - goto unlock; - - err = esw_qos_sched_elem_config(vport->qos.sched_node, tx_max, - vport->qos.sched_node->bw_share, extack); -unlock: + err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack); esw_qos_unlock(esw); return err; } @@ -991,13 +985,10 @@ int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, } esw_qos_lock(esw); - if (!vport->qos.sched_node && !node) - goto unlock; - - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (!err) + if (!vport->qos.sched_node && node) + err = esw_qos_vport_enable(vport, node, 0, 0, extack); + else if (vport->qos.sched_node) err = esw_qos_vport_update_node(vport, node, extack); -unlock: esw_qos_unlock(esw); return err; } -- 2.51.0 From be034baba83e2a80a0b2c0f24c08547b6eedc79a Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:52 +0200 Subject: [PATCH 05/16] net/mlx5: Make vport QoS enablement more flexible for future extensions Refactor esw_qos_vport_enable to support more generic configurations, allowing it to be reused for new vport node types in future patches. This refactor includes a new way to change the vport parent node by disabling the current setup and re-enabling it with the new parent. This change sets the foundation for adapting configuration based on the parent type in future patches. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-8-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/esw/devlink_port.c | 2 +- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 193 ++++++++---------- .../net/ethernet/mellanox/mlx5/core/esw/qos.h | 1 + .../net/ethernet/mellanox/mlx5/core/eswitch.c | 6 +- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 5 +- 5 files changed, 96 insertions(+), 111 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index d0f38818363f..982fe3714683 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -195,7 +195,7 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport) return; dl_port = vport->dl_port; - mlx5_esw_qos_vport_update_node(vport, NULL, NULL); + mlx5_esw_qos_vport_update_parent(vport, NULL, NULL); devl_rate_leaf_destroy(&dl_port->dl_port); devl_port_unregister(&dl_port->dl_port); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 35e493924c09..8b7c843446e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -101,6 +101,12 @@ esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_ node->esw = parent->esw; } +void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport) +{ + kfree(vport->qos.sched_node); + memset(&vport->qos, 0, sizeof(vport->qos)); +} + u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport) { if (!vport->qos.sched_node) @@ -326,7 +332,7 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_ tsar_ix); } -static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, u32 bw_share, +static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, struct netlink_ext_ack *extack) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; @@ -344,69 +350,10 @@ static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_ MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport); MLX5_SET(scheduling_context, sched_ctx, parent_element_id, vport_node->parent->ix); MLX5_SET(scheduling_context, sched_ctx, max_average_bw, vport_node->max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack); } -static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *curr_node, - struct mlx5_esw_sched_node *new_node, - struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - int err; - - err = esw_qos_node_destroy_sched_element(vport_node, extack); - if (err) - return err; - - esw_qos_node_set_parent(vport_node, new_node); - err = esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, extack); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch vport node set failed."); - goto err_sched; - } - - return 0; - -err_sched: - esw_qos_node_set_parent(vport_node, curr_node); - if (esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, NULL)) - esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", - vport->vport); - - return err; -} - -static int esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - struct mlx5_esw_sched_node *new_node, *curr_node; - int err; - - esw_assert_qos_lock_held(esw); - curr_node = vport_node->parent; - new_node = node ?: esw->qos.node0; - if (curr_node == new_node) - return 0; - - err = esw_qos_update_node_scheduling_element(vport, curr_node, new_node, extack); - if (err) - return err; - - /* Recalculate bw share weights of old and new nodes */ - if (vport_node->bw_share || new_node->bw_share) { - esw_qos_normalize_min_rate(curr_node->esw, curr_node, extack); - esw_qos_normalize_min_rate(new_node->esw, new_node, extack); - } - - return 0; -} - static struct mlx5_esw_sched_node * __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type, struct mlx5_esw_sched_node *parent) @@ -590,43 +537,62 @@ static void esw_qos_put(struct mlx5_eswitch *esw) esw_qos_destroy(esw); } +static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + struct mlx5_esw_sched_node *parent = vport_node->parent; + + esw_qos_node_destroy_sched_element(vport_node, extack); + + vport_node->bw_share = 0; + list_del_init(&vport_node->entry); + esw_qos_normalize_min_rate(parent->esw, parent, extack); + + trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport); +} + static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, - u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack) +{ + int err; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + esw_qos_node_set_parent(vport->qos.sched_node, parent); + err = esw_qos_vport_create_sched_element(vport->qos.sched_node, extack); + if (err) + return err; + + esw_qos_normalize_min_rate(parent->esw, parent, extack); + + return 0; +} + +static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_type type, + struct mlx5_esw_sched_node *parent, u32 max_rate, + u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; struct mlx5_esw_sched_node *sched_node; int err; esw_assert_qos_lock_held(esw); - err = esw_qos_get(esw, extack); if (err) return err; parent = parent ?: esw->qos.node0; - sched_node = __esw_qos_alloc_node(parent->esw, 0, SCHED_NODE_TYPE_VPORT, parent); - if (!sched_node) { - err = -ENOMEM; - goto err_alloc; - } + sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent); + if (!sched_node) + return -ENOMEM; sched_node->max_rate = max_rate; - sched_node->min_rate = 0; - sched_node->bw_share = bw_share; + sched_node->min_rate = min_rate; sched_node->vport = vport; - err = esw_qos_vport_create_sched_element(sched_node, 0, extack); - if (err) - goto err_vport_create; - - trace_mlx5_esw_vport_qos_create(vport->dev, vport, bw_share, max_rate); vport->qos.sched_node = sched_node; - - return 0; - -err_vport_create: - __esw_qos_free_node(sched_node); -err_alloc: - esw_qos_put(esw); + err = esw_qos_vport_enable(vport, parent, extack); + if (err) + esw_qos_put(esw); return err; } @@ -634,23 +600,18 @@ err_alloc: void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - struct mlx5_esw_sched_node *vport_node; - struct mlx5_core_dev *dev; + struct mlx5_esw_sched_node *parent; lockdep_assert_held(&esw->state_lock); esw_qos_lock(esw); - vport_node = vport->qos.sched_node; - if (!vport_node) + if (!vport->qos.sched_node) goto unlock; - WARN(vport_node->parent != esw->qos.node0, - "Disabling QoS on port before detaching it from node"); - - dev = vport_node->esw->dev; - trace_mlx5_esw_vport_qos_destroy(dev, vport); - esw_qos_destroy_node(vport_node, NULL); - memset(&vport->qos, 0, sizeof(vport->qos)); + parent = vport->qos.sched_node->parent; + WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node"); + esw_qos_vport_disable(vport, NULL); + mlx5_esw_qos_vport_qos_free(vport); esw_qos_put(esw); unlock: esw_qos_unlock(esw); @@ -664,7 +625,8 @@ static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rat esw_assert_qos_lock_held(vport->dev->priv.eswitch); if (!vport_node) - return esw_qos_vport_enable(vport, NULL, max_rate, 0, extack); + return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, max_rate, 0, + extack); else return esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, extack); @@ -678,7 +640,8 @@ static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rat esw_assert_qos_lock_held(vport->dev->priv.eswitch); if (!vport_node) - return esw_qos_vport_enable(vport, NULL, 0, min_rate, extack); + return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, 0, min_rate, + extack); else return esw_qos_set_node_min_rate(vport_node, min_rate, extack); } @@ -711,6 +674,31 @@ bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *m return enabled; } +static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + struct mlx5_esw_sched_node *curr_parent; + int err; + + esw_assert_qos_lock_held(esw); + curr_parent = vport->qos.sched_node->parent; + parent = parent ?: esw->qos.node0; + if (curr_parent == parent) + return 0; + + esw_qos_vport_disable(vport, extack); + + err = esw_qos_vport_enable(vport, parent, extack); + if (err) { + if (esw_qos_vport_enable(vport, curr_parent, NULL)) + esw_warn(parent->esw->dev, "vport restore QoS failed (vport=%d)\n", + vport->vport); + } + + return err; +} + static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) { struct ethtool_link_ksettings lksettings; @@ -972,23 +960,22 @@ int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, return 0; } -int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack) +int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; int err = 0; - if (node && node->esw != esw) { + if (parent && parent->esw != esw) { NL_SET_ERR_MSG_MOD(extack, "Cross E-Switch scheduling is not supported"); return -EOPNOTSUPP; } esw_qos_lock(esw); - if (!vport->qos.sched_node && node) - err = esw_qos_vport_enable(vport, node, 0, 0, extack); + if (!vport->qos.sched_node && parent) + err = mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, parent, 0, 0, extack); else if (vport->qos.sched_node) - err = esw_qos_vport_update_node(vport, node, extack); + err = esw_qos_vport_update_parent(vport, parent, extack); esw_qos_unlock(esw); return err; } @@ -1002,8 +989,8 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, struct mlx5_vport *vport = priv; if (!parent) - return mlx5_esw_qos_vport_update_node(vport, NULL, extack); + return mlx5_esw_qos_vport_update_parent(vport, NULL, extack); node = parent_priv; - return mlx5_esw_qos_vport_update_node(vport, node, extack); + return mlx5_esw_qos_vport_update_parent(vport, node, extack); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h index 61a6fdd5c267..6eb8f6a648c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h @@ -13,6 +13,7 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *evport, u32 max_rate, u32 min bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate); void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport); +void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport); u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport); struct mlx5_esw_sched_node *mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d0dab8f4e1a3..7fb8a3381f84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1061,8 +1061,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) unsigned long i; mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { - kfree(vport->qos.sched_node); - memset(&vport->qos, 0, sizeof(vport->qos)); + mlx5_esw_qos_vport_qos_free(vport); memset(&vport->info, 0, sizeof(vport->info)); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; } @@ -1074,8 +1073,7 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw) unsigned long i; mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) { - kfree(vport->qos.sched_node); - memset(&vport->qos, 0, sizeof(vport->qos)); + mlx5_esw_qos_vport_qos_free(vport); memset(&vport->info, 0, sizeof(vport->info)); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 14dd42d44e6f..a83d41121db6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -427,9 +427,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, u16 vport_num, bool setting); int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, u32 max_rate, u32 min_rate); -int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack); +int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack); int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, -- 2.51.0 From 8a0ee54027b1fbccda3f2683dafec9b7216993a4 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:53 +0200 Subject: [PATCH 06/16] net/mlx5e: SHAMPO, Simplify UMR allocation for headers Allocating page fragments for header data split is currently more complicated than it should be. That's because the number of KSM entries allocated is not aligned to the number of headers per page. This leads to having leftovers in the next allocation which require additional accounting and needlessly complicated code. This patch aligns (down) the number of KSM entries in the UMR WQE to the number of headers per page by: 1) Aligning the max number of entries allocated per UMR WQE (max_ksm_entries) to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE. 2) Aligning the total number of free headers to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE. ... and then it drops the extra accounting code from mlx5e_build_shampo_hd_umr(). Although the number of entries allocated per UMR WQE is slightly smaller due to aligning down, no performance impact was observed. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-9-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - .../net/ethernet/mellanox/mlx5/core/en_rx.c | 29 ++++++++----------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 58f3df784ded..4449a57ba5b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -633,7 +633,6 @@ struct mlx5e_shampo_hd { u16 pi; u16 ci; __be32 key; - u64 last_addr; }; struct mlx5e_hw_gro_data { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d81083f4f316..e044e5d11f05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -648,30 +648,26 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 ksm_entries, u16 index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u16 entries, pi, header_offset, err, wqe_bbs, new_entries; + u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; u16 page_index = shampo->curr_page_index; struct mlx5e_frag_page *frag_page; - u64 addr = shampo->last_addr; struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; int headroom, i; + u64 addr = 0; headroom = rq->buff.headroom; - new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); - entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); - wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries); + wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries); pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); - build_ksm_umr(sq, umr_wqe, shampo->key, index, entries); + build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries); frag_page = &shampo->pages[page_index]; - for (i = 0; i < entries; i++, index++) { + WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); + for (i = 0; i < ksm_entries; i++, index++) { dma_info = &shampo->info[index]; - if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index < - MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)) - goto update_ksm; header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { @@ -691,7 +687,6 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, dma_info->frag_page = frag_page; } -update_ksm: umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { .key = cpu_to_be32(lkey), .va = cpu_to_be64(dma_info->addr + headroom), @@ -701,12 +696,11 @@ update_ksm: sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, .num_wqebbs = wqe_bbs, - .shampo.len = new_entries, + .shampo.len = ksm_entries, }; - shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1); + shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1); shampo->curr_page_index = page_index; - shampo->last_addr = addr; sq->pc += wqe_bbs; sq->doorbell_cseg = &umr_wqe->ctrl; @@ -731,7 +725,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) struct mlx5e_icosq *sq = rq->icosq; int i, err, max_ksm_entries, len; - max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev); + max_ksm_entries = ALIGN_DOWN(MLX5E_MAX_KSM_PER_WQE(rq->mdev), + MLX5E_SHAMPO_WQ_HEADER_PER_PAGE); ksm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); @@ -739,8 +734,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) if (!ksm_entries) return 0; - ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); - index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); + /* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */ + index = shampo->pi; entries_before = shampo->hd_per_wq - index; if (unlikely(entries_before < ksm_entries)) -- 2.51.0 From 1a4b5885770401b7e8de6546760686dcd2d9b784 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:54 +0200 Subject: [PATCH 07/16] net/mlx5e: SHAMPO, Fix page_index calculation inconsistency When calculating the index for the next frag page slot, the divisor is incorrect: it should be the number of pages per queue not the number of headers per queue. This is currently harmless because frag pages are not used directly, but they are intermediated through the info array. But it needs to be fixed as an upcoming patch will get rid of the info array. This patch introduces a new pages per queue variable and plugs it in the formula. Now that this variable exists, additional code can be simplified in the SHAMPO initialization code. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-10-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 +++----- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 4449a57ba5b2..b4abb094f01a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -629,6 +629,7 @@ struct mlx5e_shampo_hd { u16 curr_page_index; u32 hd_per_wq; u16 hd_per_wqe; + u16 pages_per_wq; unsigned long *bitmap; u16 pi; u16 ci; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 59d7a0e28f24..3ca1ef1f39a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -767,8 +767,6 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, u32 *pool_size, int node) { - void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq); - int wq_size; int err; if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) @@ -793,9 +791,9 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, cpu_to_be32(rq->mpwqe.shampo->mkey); rq->mpwqe.shampo->hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp); - wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); - *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) / - MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + rq->mpwqe.shampo->pages_per_wq = + rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + *pool_size += rq->mpwqe.shampo->pages_per_wq; return 0; err_hw_gro_data: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e044e5d11f05..76a975667c77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -671,7 +671,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { - page_index = (page_index + 1) & (shampo->hd_per_wq - 1); + page_index = (page_index + 1) & (shampo->pages_per_wq - 1); frag_page = &shampo->pages[page_index]; err = mlx5e_page_alloc_fragmented(rq, frag_page); -- 2.51.0 From 4f56868b7132bb3c7e5a2c1930b6402718248a35 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:55 +0200 Subject: [PATCH 08/16] net/mlx5e: SHAMPO, Change frag page setup order during allocation Now that the UMR allocation has been simplified, it is no longer possible to have a leftover page from a previous call to mlx5e_build_shampo_hd_umr(). This patch simplifies the code by switching the order of operations: first take the frag page and then increment the index. This is more straightforward and it also paves the way for dropping the info array. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-11-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 76a975667c77..637069c1b988 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -651,7 +651,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; u16 page_index = shampo->curr_page_index; - struct mlx5e_frag_page *frag_page; + struct mlx5e_frag_page *frag_page = NULL; struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; int headroom, i; @@ -663,16 +663,14 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries); - frag_page = &shampo->pages[page_index]; - WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); for (i = 0; i < ksm_entries; i++, index++) { dma_info = &shampo->info[index]; header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { - page_index = (page_index + 1) & (shampo->pages_per_wq - 1); frag_page = &shampo->pages[page_index]; + page_index = (page_index + 1) & (shampo->pages_per_wq - 1); err = mlx5e_page_alloc_fragmented(rq, frag_page); if (unlikely(err)) -- 2.51.0 From 945ca432bfd0788b960f8f721594dae4fc3c02c1 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:56 +0200 Subject: [PATCH 09/16] net/mlx5e: SHAMPO, Drop info array The info array is used to store a pointer to the dma address of the header and to the frag page. However, this array is not really required: - The frag page can be calculated from the header index frag page index = header index / headers per page. - The dma address can be calculated through a formula: dma page address + header offset. This series gets rid of the info array and uses the above formulas instead. The current_page_index was used in conjunction with the info array to store page fragment indices. This variable is dropped as well. There was no performance regression observed. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-12-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 7 +- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 76 ++++++++++--------- 3 files changed, 42 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b4abb094f01a..979fc56205e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -83,6 +83,7 @@ struct page_pool; #define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8) #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9) #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) +#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) #define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024) #define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096) @@ -624,9 +625,7 @@ struct mlx5e_dma_info { struct mlx5e_shampo_hd { u32 mkey; - struct mlx5e_dma_info *info; struct mlx5e_frag_page *pages; - u16 curr_page_index; u32 hd_per_wq; u16 hd_per_wqe; u16 pages_per_wq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3ca1ef1f39a5..2e27e9d6b820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -350,19 +350,15 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL, node); - shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq, - sizeof(*shampo->info)), - GFP_KERNEL, node); shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq, sizeof(*shampo->pages)), GFP_KERNEL, node); - if (!shampo->bitmap || !shampo->info || !shampo->pages) + if (!shampo->bitmap || !shampo->pages) goto err_nomem; return 0; err_nomem: - kvfree(shampo->info); kvfree(shampo->bitmap); kvfree(shampo->pages); @@ -372,7 +368,6 @@ err_nomem: static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) { kvfree(rq->mpwqe.shampo->bitmap); - kvfree(rq->mpwqe.shampo->info); kvfree(rq->mpwqe.shampo->pages); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 637069c1b988..3de575875586 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -643,6 +643,21 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } +static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index) +{ + BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT); + + return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE]; +} + +static u64 mlx5e_shampo_hd_offset(int header_index) +{ + return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << + MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; +} + +static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index); + static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, u16 ksm_entries, u16 index) @@ -650,9 +665,6 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; - u16 page_index = shampo->curr_page_index; - struct mlx5e_frag_page *frag_page = NULL; - struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; int headroom, i; u64 addr = 0; @@ -665,29 +677,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); for (i = 0; i < ksm_entries; i++, index++) { - dma_info = &shampo->info[index]; - header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; - if (!(header_offset & (PAGE_SIZE - 1))) { - frag_page = &shampo->pages[page_index]; - page_index = (page_index + 1) & (shampo->pages_per_wq - 1); + header_offset = mlx5e_shampo_hd_offset(index); + if (!header_offset) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); err = mlx5e_page_alloc_fragmented(rq, frag_page); if (unlikely(err)) goto err_unmap; addr = page_pool_get_dma_addr(frag_page->page); - - dma_info->addr = addr; - dma_info->frag_page = frag_page; - } else { - dma_info->addr = addr + header_offset; - dma_info->frag_page = frag_page; } umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { .key = cpu_to_be32(lkey), - .va = cpu_to_be64(dma_info->addr + headroom), + .va = cpu_to_be64(addr + header_offset + headroom), }; } @@ -698,20 +701,22 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, }; shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1); - shampo->curr_page_index = page_index; sq->pc += wqe_bbs; sq->doorbell_cseg = &umr_wqe->ctrl; return 0; err_unmap: - while (--i >= 0) { - dma_info = &shampo->info[--index]; - if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { - dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); - mlx5e_page_release_fragmented(rq, dma_info->frag_page); + while (--i) { + --index; + header_offset = mlx5e_shampo_hd_offset(index); + if (!header_offset) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + + mlx5e_page_release_fragmented(rq, frag_page); } } + rq->stats->buff_alloc_err++; return err; } @@ -844,13 +849,11 @@ static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u64 addr = shampo->info[header_index].addr; if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { - struct mlx5e_dma_info *dma_info = &shampo->info[header_index]; + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); - dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE); - mlx5e_page_release_fragmented(rq, dma_info->frag_page); + mlx5e_page_release_fragmented(rq, frag_page); } clear_bit(header_index, shampo->bitmap); } @@ -1204,10 +1207,10 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) { - struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index]; - u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom; + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); + u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom; - return page_address(last_head->frag_page->page) + head_offset; + return page_address(frag_page->page) + head_offset; } static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) @@ -2178,29 +2181,30 @@ static struct sk_buff * mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 header_index) { - struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index]; - u16 head_offset = head->addr & (PAGE_SIZE - 1); + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); + dma_addr_t page_dma_addr = page_pool_get_dma_addr(frag_page->page); + u16 head_offset = mlx5e_shampo_hd_offset(header_index); + dma_addr_t dma_addr = page_dma_addr + head_offset; u16 head_size = cqe->shampo.header_size; u16 rx_headroom = rq->buff.headroom; struct sk_buff *skb = NULL; void *hdr, *data; u32 frag_size; - hdr = page_address(head->frag_page->page) + head_offset; + hdr = page_address(frag_page->page) + head_offset; data = hdr + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { /* build SKB around header */ - dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir); + dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir); net_prefetchw(hdr); net_prefetch(data); skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); - if (unlikely(!skb)) return NULL; - head->frag_page->frags++; + frag_page->frags++; } else { /* allocate SKB and copy header for large header */ rq->stats->gro_large_hds++; @@ -2212,7 +2216,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, } net_prefetchw(skb->data); - mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr, + mlx5e_copy_skb_header(rq, skb, frag_page->page, dma_addr, head_offset + rx_headroom, rx_headroom, head_size); /* skb linear part was allocated with headlen and aligned to long */ -- 2.51.0 From ab4219db89da1d019ef45675e4cd56a6841bbc1e Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:57 +0200 Subject: [PATCH 10/16] net/mlx5e: SHAMPO, Rework header allocation loop The current loop code was based on the assumption that there can be page leftovers from previous function calls. This patch changes the allocation loop to make it clearer how pages get allocated every MLX5E_SHAMPO_WQ_HEADER_PER_PAGE headers. This change has no functional implications. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-13-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3de575875586..1963bc5adb18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -666,8 +666,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; struct mlx5e_umr_wqe *umr_wqe; - int headroom, i; - u64 addr = 0; + int headroom, i = 0; headroom = rq->buff.headroom; wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries); @@ -676,22 +675,25 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries); WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); - for (i = 0; i < ksm_entries; i++, index++) { - header_offset = mlx5e_shampo_hd_offset(index); - if (!header_offset) { - struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + while (i < ksm_entries) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + u64 addr; + + err = mlx5e_page_alloc_fragmented(rq, frag_page); + if (unlikely(err)) + goto err_unmap; - err = mlx5e_page_alloc_fragmented(rq, frag_page); - if (unlikely(err)) - goto err_unmap; - addr = page_pool_get_dma_addr(frag_page->page); - } + addr = page_pool_get_dma_addr(frag_page->page); - umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { - .key = cpu_to_be32(lkey), - .va = cpu_to_be64(addr + header_offset + headroom), - }; + for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) { + header_offset = mlx5e_shampo_hd_offset(index++); + + umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) { + .key = cpu_to_be32(lkey), + .va = cpu_to_be64(addr + header_offset + headroom), + }; + } } sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { -- 2.51.0 From 3f5495962824fbef3b9a577ccd9b02f967452c11 Mon Sep 17 00:00:00 2001 From: Donald Hunter Date: Fri, 1 Nov 2024 14:32:07 +0000 Subject: [PATCH 11/16] netfilter: nfnetlink: Report extack policy errors for batched ops The nftables batch processing does not currently populate extack with policy errors. Fix this by passing extack when parsing batch messages. Signed-off-by: Donald Hunter Reviewed-by: Simon Horman Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 7784ec094097..e598a2a252b0 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -517,7 +517,7 @@ replay_abort: err = nla_parse_deprecated(cda, ss->cb[cb_id].attr_count, attr, attrlen, - ss->cb[cb_id].policy, NULL); + ss->cb[cb_id].policy, &extack); if (err < 0) goto ack; -- 2.51.0 From 8340b0056ac723d04918573761b5d8f979d15a75 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Mon, 11 Nov 2024 14:47:51 +0000 Subject: [PATCH 12/16] netfilter: bpf: Pass string literal as format argument of request_module() MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Both gcc-14 and clang-18 report that passing a non-string literal as the format argument of request_module() is potentially insecure. E.g. clang-18 says: .../nf_bpf_link.c:46:24: warning: format string is not a string literal (potentially insecure) [-Wformat-security] 46 | err = request_module(mod); | ^~~ .../kmod.h:25:55: note: expanded from macro 'request_module' 25 | #define request_module(mod...) __request_module(true, mod) | ^~~ .../nf_bpf_link.c:46:24: note: treat the string as an argument to avoid this 46 | err = request_module(mod); | ^ | "%s", .../kmod.h:25:55: note: expanded from macro 'request_module' 25 | #define request_module(mod...) __request_module(true, mod) | ^ It is always the case where the contents of mod is safe to pass as the format argument. That is, in my understanding, it never contains any format escape sequences. But, it seems better to be safe than sorry. And, as a bonus, compiler output becomes less verbose by addressing this issue as suggested by clang-18. Compile tested only. Signed-off-by: Simon Horman Reviewed-by: Toke Høiland-Jørgensen Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_bpf_link.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c index 3d64a4511fcf..06b084844700 100644 --- a/net/netfilter/nf_bpf_link.c +++ b/net/netfilter/nf_bpf_link.c @@ -43,7 +43,7 @@ get_proto_defrag_hook(struct bpf_nf_link *link, hook = rcu_dereference(*ptr_global_hook); if (!hook) { rcu_read_unlock(); - err = request_module(mod); + err = request_module("%s", mod); if (err) return ERR_PTR(err < 0 ? err : -EINVAL); -- 2.51.0 From 4ee29181216d2acb7be210126324ec3bc0e3bd01 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 13 Nov 2024 16:35:49 +0100 Subject: [PATCH 13/16] netfilter: nf_tables: add nft_trans_commit_list_add_elem helper Add and use a wrapper to append trans_elem structures to the transaction log. Unlike the existing helper, pass a gfp_t to indicate if sleeping is allowed. This will be used by a followup patch to realloc nft_trans_elem structures after they gain a flexible array member to reduce number of such container structures on the transaction list. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 6090ba9f1bb2..75c84b17ab99 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -421,6 +421,17 @@ static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *tr } } +static void nft_trans_commit_list_add_elem(struct net *net, struct nft_trans *trans, + gfp_t gfp) +{ + WARN_ON_ONCE(trans->msg_type != NFT_MSG_NEWSETELEM && + trans->msg_type != NFT_MSG_DELSETELEM); + + might_alloc(gfp); + + nft_trans_commit_list_add_tail(net, trans); +} + static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) { struct nft_trans *trans; @@ -7205,7 +7216,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (update_flags) { nft_trans_elem_priv(trans) = elem_priv; nft_trans_elem_update_flags(trans) = update_flags; - nft_trans_commit_list_add_tail(ctx->net, trans); + nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL); goto err_elem_free; } } @@ -7229,7 +7240,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, } nft_trans_elem_priv(trans) = elem.priv; - nft_trans_commit_list_add_tail(ctx->net, trans); + nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL); return 0; err_set_full: @@ -7446,7 +7457,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, nft_setelem_data_deactivate(ctx->net, set, elem.priv); nft_trans_elem_priv(trans) = elem.priv; - nft_trans_commit_list_add_tail(ctx->net, trans); + nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL); return 0; fail_ops: @@ -7482,7 +7493,7 @@ static int nft_setelem_flush(const struct nft_ctx *ctx, nft_setelem_data_deactivate(ctx->net, set, elem_priv); nft_trans_elem_set(trans) = set; nft_trans_elem_priv(trans) = elem_priv; - nft_trans_commit_list_add_tail(ctx->net, trans); + nft_trans_commit_list_add_elem(ctx->net, trans, GFP_ATOMIC); return 0; } @@ -7499,7 +7510,7 @@ static int __nft_set_catchall_flush(const struct nft_ctx *ctx, nft_setelem_data_deactivate(ctx->net, set, elem_priv); nft_trans_elem_priv(trans) = elem_priv; - nft_trans_commit_list_add_tail(ctx->net, trans); + nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL); return 0; } -- 2.51.0 From a8ee6b900c147d3bedced6c52ba6cb603226aaa3 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 13 Nov 2024 16:35:50 +0100 Subject: [PATCH 14/16] netfilter: nf_tables: prepare for multiple elements in nft_trans_elem structure Add helpers to release the individual elements contained in the trans_elem container structure. No functional change intended. Followup patch will add 'nelems' member and will turn 'priv' into a flexible array. These helpers can then loop over all elements. Care needs to be taken to handle a mix of new elements and existing elements that are being updated (e.g. timeout refresh). Before this patch, NEWSETELEM transaction with update is released early so nft_trans_set_elem_destroy() won't get called, so we need to skip elements marked as update. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 21 ++- net/netfilter/nf_tables_api.c | 228 +++++++++++++++++++++--------- 2 files changed, 173 insertions(+), 76 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index f24278767bfd..37af0b174c39 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1759,28 +1759,25 @@ enum nft_trans_elem_flags { NFT_TRANS_UPD_EXPIRATION = (1 << 1), }; -struct nft_trans_elem { - struct nft_trans nft_trans; - struct nft_set *set; - struct nft_elem_priv *elem_priv; +struct nft_trans_one_elem { + struct nft_elem_priv *priv; u64 timeout; u64 expiration; u8 update_flags; +}; + +struct nft_trans_elem { + struct nft_trans nft_trans; + struct nft_set *set; bool bound; + unsigned int nelems; + struct nft_trans_one_elem elems[] __counted_by(nelems); }; #define nft_trans_container_elem(t) \ container_of(t, struct nft_trans_elem, nft_trans) #define nft_trans_elem_set(trans) \ nft_trans_container_elem(trans)->set -#define nft_trans_elem_priv(trans) \ - nft_trans_container_elem(trans)->elem_priv -#define nft_trans_elem_update_flags(trans) \ - nft_trans_container_elem(trans)->update_flags -#define nft_trans_elem_timeout(trans) \ - nft_trans_container_elem(trans)->timeout -#define nft_trans_elem_expiration(trans) \ - nft_trans_container_elem(trans)->expiration #define nft_trans_elem_set_bound(trans) \ nft_trans_container_elem(trans)->bound diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 75c84b17ab99..0882f78c2204 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -6446,13 +6446,17 @@ static struct nft_trans *nft_trans_elem_alloc(const struct nft_ctx *ctx, int msg_type, struct nft_set *set) { + struct nft_trans_elem *te; struct nft_trans *trans; - trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_elem)); + trans = nft_trans_alloc(ctx, msg_type, struct_size(te, elems, 1)); if (trans == NULL) return NULL; - nft_trans_elem_set(trans) = set; + te = nft_trans_container_elem(trans); + te->nelems = 1; + te->set = set; + return trans; } @@ -6574,28 +6578,51 @@ static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx, } /* Drop references and destroy. Called from gc, dynset and abort path. */ -void nft_set_elem_destroy(const struct nft_set *set, - const struct nft_elem_priv *elem_priv, - bool destroy_expr) +static void __nft_set_elem_destroy(const struct nft_ctx *ctx, + const struct nft_set *set, + const struct nft_elem_priv *elem_priv, + bool destroy_expr) { struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv); - struct nft_ctx ctx = { - .net = read_pnet(&set->net), - .family = set->table->family, - }; nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE); if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) nft_data_release(nft_set_ext_data(ext), set->dtype); if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS)) - nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext)); + nft_set_elem_expr_destroy(ctx, nft_set_ext_expr(ext)); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) nft_use_dec(&(*nft_set_ext_obj(ext))->use); kfree(elem_priv); } + +/* Drop references and destroy. Called from gc and dynset. */ +void nft_set_elem_destroy(const struct nft_set *set, + const struct nft_elem_priv *elem_priv, + bool destroy_expr) +{ + struct nft_ctx ctx = { + .net = read_pnet(&set->net), + .family = set->table->family, + }; + + __nft_set_elem_destroy(&ctx, set, elem_priv, destroy_expr); +} EXPORT_SYMBOL_GPL(nft_set_elem_destroy); +/* Drop references and destroy. Called from abort path. */ +static void nft_trans_set_elem_destroy(const struct nft_ctx *ctx, struct nft_trans_elem *te) +{ + int i; + + for (i = 0; i < te->nelems; i++) { + if (te->elems[i].update_flags) + continue; + + __nft_set_elem_destroy(ctx, te->set, te->elems[i].priv, true); + } +} + /* Destroy element. References have been already dropped in the preparation * path via nft_setelem_data_deactivate(). */ @@ -6611,6 +6638,15 @@ void nf_tables_set_elem_destroy(const struct nft_ctx *ctx, kfree(elem_priv); } +static void nft_trans_elems_destroy(const struct nft_ctx *ctx, + const struct nft_trans_elem *te) +{ + int i; + + for (i = 0; i < te->nelems; i++) + nf_tables_set_elem_destroy(ctx, te->set, te->elems[i].priv); +} + int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set, struct nft_expr *expr_array[]) { @@ -6767,6 +6803,36 @@ static void nft_setelem_activate(struct net *net, struct nft_set *set, } } +static void nft_trans_elem_update(const struct nft_set *set, + const struct nft_trans_one_elem *elem) +{ + const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); + + if (elem->update_flags & NFT_TRANS_UPD_TIMEOUT) + WRITE_ONCE(nft_set_ext_timeout(ext)->timeout, elem->timeout); + + if (elem->update_flags & NFT_TRANS_UPD_EXPIRATION) + WRITE_ONCE(nft_set_ext_timeout(ext)->expiration, get_jiffies_64() + elem->expiration); +} + +static void nft_trans_elems_add(const struct nft_ctx *ctx, + struct nft_trans_elem *te) +{ + int i; + + for (i = 0; i < te->nelems; i++) { + const struct nft_trans_one_elem *elem = &te->elems[i]; + + if (elem->update_flags) + nft_trans_elem_update(te->set, elem); + else + nft_setelem_activate(ctx->net, te->set, elem->priv); + + nf_tables_setelem_notify(ctx, te->set, elem->priv, + NFT_MSG_NEWSETELEM); + } +} + static int nft_setelem_catchall_deactivate(const struct net *net, struct nft_set *set, struct nft_set_elem *elem) @@ -6849,6 +6915,24 @@ static void nft_setelem_remove(const struct net *net, set->ops->remove(net, set, elem_priv); } +static void nft_trans_elems_remove(const struct nft_ctx *ctx, + const struct nft_trans_elem *te) +{ + int i; + + for (i = 0; i < te->nelems; i++) { + nf_tables_setelem_notify(ctx, te->set, + te->elems[i].priv, + te->nft_trans.msg_type); + + nft_setelem_remove(ctx->net, te->set, te->elems[i].priv); + if (!nft_setelem_is_catchall(te->set, te->elems[i].priv)) { + atomic_dec(&te->set->nelems); + te->set->ndeact--; + } + } +} + static bool nft_setelem_valid_key_end(const struct nft_set *set, struct nlattr **nla, u32 flags) { @@ -7200,22 +7284,26 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, else if (!(nlmsg_flags & NLM_F_EXCL)) { err = 0; if (nft_set_ext_exists(ext2, NFT_SET_EXT_TIMEOUT)) { + struct nft_trans_one_elem *update; + + update = &nft_trans_container_elem(trans)->elems[0]; + update_flags = 0; if (timeout != nft_set_ext_timeout(ext2)->timeout) { - nft_trans_elem_timeout(trans) = timeout; + update->timeout = timeout; if (expiration == 0) expiration = timeout; update_flags |= NFT_TRANS_UPD_TIMEOUT; } if (expiration) { - nft_trans_elem_expiration(trans) = expiration; + update->expiration = expiration; update_flags |= NFT_TRANS_UPD_EXPIRATION; } if (update_flags) { - nft_trans_elem_priv(trans) = elem_priv; - nft_trans_elem_update_flags(trans) = update_flags; + update->priv = elem_priv; + update->update_flags = update_flags; nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL); goto err_elem_free; } @@ -7239,7 +7327,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, } } - nft_trans_elem_priv(trans) = elem.priv; + nft_trans_container_elem(trans)->elems[0].priv = elem.priv; nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL); return 0; @@ -7377,6 +7465,50 @@ void nft_setelem_data_deactivate(const struct net *net, nft_use_dec(&(*nft_set_ext_obj(ext))->use); } +/* similar to nft_trans_elems_remove, but called from abort path to undo newsetelem. + * No notifications and no ndeact changes. + * + * Returns true if set had been added to (i.e., elements need to be removed again). + */ +static bool nft_trans_elems_new_abort(const struct nft_ctx *ctx, + const struct nft_trans_elem *te) +{ + bool removed = false; + int i; + + for (i = 0; i < te->nelems; i++) { + if (te->elems[i].update_flags) + continue; + + if (!te->set->ops->abort || nft_setelem_is_catchall(te->set, te->elems[i].priv)) + nft_setelem_remove(ctx->net, te->set, te->elems[i].priv); + + if (!nft_setelem_is_catchall(te->set, te->elems[i].priv)) + atomic_dec(&te->set->nelems); + + removed = true; + } + + return removed; +} + +/* Called from abort path to undo DELSETELEM/DESTROYSETELEM. */ +static void nft_trans_elems_destroy_abort(const struct nft_ctx *ctx, + const struct nft_trans_elem *te) +{ + int i; + + for (i = 0; i < te->nelems; i++) { + if (!nft_setelem_active_next(ctx->net, te->set, te->elems[i].priv)) { + nft_setelem_data_activate(ctx->net, te->set, te->elems[i].priv); + nft_setelem_activate(ctx->net, te->set, te->elems[i].priv); + } + + if (!nft_setelem_is_catchall(te->set, te->elems[i].priv)) + te->set->ndeact--; + } +} + static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, const struct nlattr *attr) { @@ -7456,7 +7588,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, nft_setelem_data_deactivate(ctx->net, set, elem.priv); - nft_trans_elem_priv(trans) = elem.priv; + nft_trans_container_elem(trans)->elems[0].priv = elem.priv; nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL); return 0; @@ -7483,7 +7615,8 @@ static int nft_setelem_flush(const struct nft_ctx *ctx, return 0; trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM, - sizeof(struct nft_trans_elem), GFP_ATOMIC); + struct_size_t(struct nft_trans_elem, elems, 1), + GFP_ATOMIC); if (!trans) return -ENOMEM; @@ -7492,7 +7625,8 @@ static int nft_setelem_flush(const struct nft_ctx *ctx, nft_setelem_data_deactivate(ctx->net, set, elem_priv); nft_trans_elem_set(trans) = set; - nft_trans_elem_priv(trans) = elem_priv; + nft_trans_container_elem(trans)->nelems = 1; + nft_trans_container_elem(trans)->elems[0].priv = elem_priv; nft_trans_commit_list_add_elem(ctx->net, trans, GFP_ATOMIC); return 0; @@ -7509,7 +7643,7 @@ static int __nft_set_catchall_flush(const struct nft_ctx *ctx, return -ENOMEM; nft_setelem_data_deactivate(ctx->net, set, elem_priv); - nft_trans_elem_priv(trans) = elem_priv; + nft_trans_container_elem(trans)->elems[0].priv = elem_priv; nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL); return 0; @@ -9691,9 +9825,7 @@ static void nft_commit_release(struct nft_trans *trans) break; case NFT_MSG_DELSETELEM: case NFT_MSG_DESTROYSETELEM: - nf_tables_set_elem_destroy(&ctx, - nft_trans_elem_set(trans), - nft_trans_elem_priv(trans)); + nft_trans_elems_destroy(&ctx, nft_trans_container_elem(trans)); break; case NFT_MSG_DELOBJ: case NFT_MSG_DESTROYOBJ: @@ -10546,25 +10678,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) case NFT_MSG_NEWSETELEM: te = nft_trans_container_elem(trans); - if (te->update_flags) { - const struct nft_set_ext *ext = - nft_set_elem_ext(te->set, te->elem_priv); + nft_trans_elems_add(&ctx, te); - if (te->update_flags & NFT_TRANS_UPD_TIMEOUT) { - WRITE_ONCE(nft_set_ext_timeout(ext)->timeout, - te->timeout); - } - if (te->update_flags & NFT_TRANS_UPD_EXPIRATION) { - WRITE_ONCE(nft_set_ext_timeout(ext)->expiration, - get_jiffies_64() + te->expiration); - } - } else { - nft_setelem_activate(net, te->set, te->elem_priv); - } - - nf_tables_setelem_notify(&ctx, te->set, - te->elem_priv, - NFT_MSG_NEWSETELEM); if (te->set->ops->commit && list_empty(&te->set->pending_update)) { list_add_tail(&te->set->pending_update, @@ -10576,14 +10691,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) case NFT_MSG_DESTROYSETELEM: te = nft_trans_container_elem(trans); - nf_tables_setelem_notify(&ctx, te->set, - te->elem_priv, - trans->msg_type); - nft_setelem_remove(net, te->set, te->elem_priv); - if (!nft_setelem_is_catchall(te->set, te->elem_priv)) { - atomic_dec(&te->set->nelems); - te->set->ndeact--; - } + nft_trans_elems_remove(&ctx, te); + if (te->set->ops->commit && list_empty(&te->set->pending_update)) { list_add_tail(&te->set->pending_update, @@ -10703,8 +10812,7 @@ static void nf_tables_abort_release(struct nft_trans *trans) nft_set_destroy(&ctx, nft_trans_set(trans)); break; case NFT_MSG_NEWSETELEM: - nft_set_elem_destroy(nft_trans_elem_set(trans), - nft_trans_elem_priv(trans), true); + nft_trans_set_elem_destroy(&ctx, nft_trans_container_elem(trans)); break; case NFT_MSG_NEWOBJ: nft_obj_destroy(&ctx, nft_trans_obj(trans)); @@ -10861,18 +10969,15 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; case NFT_MSG_NEWSETELEM: - if (nft_trans_elem_update_flags(trans) || - nft_trans_elem_set_bound(trans)) { + if (nft_trans_elem_set_bound(trans)) { nft_trans_destroy(trans); break; } te = nft_trans_container_elem(trans); - if (!te->set->ops->abort || - nft_setelem_is_catchall(te->set, te->elem_priv)) - nft_setelem_remove(net, te->set, te->elem_priv); - - if (!nft_setelem_is_catchall(te->set, te->elem_priv)) - atomic_dec(&te->set->nelems); + if (!nft_trans_elems_new_abort(&ctx, te)) { + nft_trans_destroy(trans); + break; + } if (te->set->ops->abort && list_empty(&te->set->pending_update)) { @@ -10884,12 +10989,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) case NFT_MSG_DESTROYSETELEM: te = nft_trans_container_elem(trans); - if (!nft_setelem_active_next(net, te->set, te->elem_priv)) { - nft_setelem_data_activate(net, te->set, te->elem_priv); - nft_setelem_activate(net, te->set, te->elem_priv); - } - if (!nft_setelem_is_catchall(te->set, te->elem_priv)) - te->set->ndeact--; + nft_trans_elems_destroy_abort(&ctx, te); if (te->set->ops->abort && list_empty(&te->set->pending_update)) { -- 2.51.0 From 466c9b3b2a92602360e9fa25943b8aa191122dfc Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 13 Nov 2024 16:35:51 +0100 Subject: [PATCH 15/16] netfilter: nf_tables: prepare nft audit for set element compaction nftables audit log format emits the number of added/deleted rules, sets, set elements and so on, to userspace: table=t1 family=2 entries=4 op=nft_register_set ~~~~~~~~~ At this time, the 'entries' key is the number of transactions that will be applied. The upcoming set element compression will coalesce subsequent adds/deletes to the same set requests in the same transaction request to conseve memory. Without this patch, we'd under-report the number of altered elements. Increment the audit counter by the number of elements to keep the reported entries value the same. Without this, nft_audit.sh selftest fails because the recorded (expected) entries key is smaller than the expected one. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 0882f78c2204..5b5178841553 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -10398,9 +10398,24 @@ static void nf_tables_commit_audit_free(struct list_head *adl) } } +/* nft audit emits the number of elements that get added/removed/updated, + * so NEW/DELSETELEM needs to increment based on the total elem count. + */ +static unsigned int nf_tables_commit_audit_entrycount(const struct nft_trans *trans) +{ + switch (trans->msg_type) { + case NFT_MSG_NEWSETELEM: + case NFT_MSG_DELSETELEM: + return nft_trans_container_elem(trans)->nelems; + } + + return 1; +} + static void nf_tables_commit_audit_collect(struct list_head *adl, - struct nft_table *table, u32 op) + const struct nft_trans *trans, u32 op) { + const struct nft_table *table = trans->table; struct nft_audit_data *adp; list_for_each_entry(adp, adl, list) { @@ -10410,7 +10425,7 @@ static void nf_tables_commit_audit_collect(struct list_head *adl, WARN_ONCE(1, "table=%s not expected in commit list", table->name); return; found: - adp->entries++; + adp->entries += nf_tables_commit_audit_entrycount(trans); if (!adp->op || adp->op > op) adp->op = op; } @@ -10569,7 +10584,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nft_ctx_update(&ctx, trans); - nf_tables_commit_audit_collect(&adl, table, trans->msg_type); + nf_tables_commit_audit_collect(&adl, trans, trans->msg_type); switch (trans->msg_type) { case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { -- 2.51.0 From b0c49466043a4878d8ef1263a4c9020698958a4c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 13 Nov 2024 16:35:52 +0100 Subject: [PATCH 16/16] netfilter: nf_tables: switch trans_elem to real flex array When queueing a set element add or removal operation to the transaction log, check if the previous operation already asks for a the identical operation on the same set. If so, store the element reference in the preceding operation. This significantlty reduces memory consumption when many set add/delete operations appear in a single transaction. Example: 10k elements require 937kb of memory (10k allocations from kmalloc-96 slab). Assuming we can compact 4 elements in the same set, 468 kbytes are needed (64 bytes for base struct, nft_trans_elemn, 32 bytes for nft_trans_one_elem structure, so 2500 allocations from kmalloc-192 slab). For large batch updates we can compact up to 62 elements into one single nft_trans_elem structure (~65% mem reduction): (64 bytes for base struct, nft_trans_elem, 32 byte for nft_trans_one_elem struct). We can halve size of nft_trans_one_elem struct by moving timeout/expire/update_flags into a dynamically allocated structure, this allows to store 124 elements in a 2k slab nft_trans_elem struct. This is done in a followup patch. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 90 +++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 5b5178841553..679312d71bbe 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -26,6 +26,9 @@ #define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-")) #define NFT_SET_MAX_ANONLEN 16 +/* limit compaction to avoid huge kmalloc/krealloc sizes. */ +#define NFT_MAX_SET_NELEMS ((2048 - sizeof(struct nft_trans_elem)) / sizeof(struct nft_trans_one_elem)) + unsigned int nf_tables_net_id __read_mostly; static LIST_HEAD(nf_tables_expressions); @@ -391,6 +394,86 @@ static void nf_tables_unregister_hook(struct net *net, return __nf_tables_unregister_hook(net, table, chain, false); } +static bool nft_trans_collapse_set_elem_allowed(const struct nft_trans_elem *a, const struct nft_trans_elem *b) +{ + /* NB: the ->bound equality check is defensive, at this time we only merge + * a new nft_trans_elem transaction request with the transaction tail + * element, but a->bound != b->bound would imply a NEWRULE transaction + * is queued in-between. + * + * The set check is mandatory, the NFT_MAX_SET_NELEMS check prevents + * huge krealloc() requests. + */ + return a->set == b->set && a->bound == b->bound && a->nelems < NFT_MAX_SET_NELEMS; +} + +static bool nft_trans_collapse_set_elem(struct nftables_pernet *nft_net, + struct nft_trans_elem *tail, + struct nft_trans_elem *trans, + gfp_t gfp) +{ + unsigned int nelems, old_nelems = tail->nelems; + struct nft_trans_elem *new_trans; + + if (!nft_trans_collapse_set_elem_allowed(tail, trans)) + return false; + + /* "cannot happen", at this time userspace element add + * requests always allocate a new transaction element. + * + * This serves as a reminder to adjust the list_add_tail + * logic below in case this ever changes. + */ + if (WARN_ON_ONCE(trans->nelems != 1)) + return false; + + if (check_add_overflow(old_nelems, trans->nelems, &nelems)) + return false; + + /* krealloc might free tail which invalidates list pointers */ + list_del_init(&tail->nft_trans.list); + + new_trans = krealloc(tail, struct_size(tail, elems, nelems), gfp); + if (!new_trans) { + list_add_tail(&tail->nft_trans.list, &nft_net->commit_list); + return false; + } + + /* + * new_trans->nft_trans.list contains garbage, but + * list_add_tail() doesn't care. + */ + new_trans->nelems = nelems; + new_trans->elems[old_nelems] = trans->elems[0]; + list_add_tail(&new_trans->nft_trans.list, &nft_net->commit_list); + + return true; +} + +static bool nft_trans_try_collapse(struct nftables_pernet *nft_net, + struct nft_trans *trans, gfp_t gfp) +{ + struct nft_trans *tail; + + if (list_empty(&nft_net->commit_list)) + return false; + + tail = list_last_entry(&nft_net->commit_list, struct nft_trans, list); + + if (tail->msg_type != trans->msg_type) + return false; + + switch (trans->msg_type) { + case NFT_MSG_NEWSETELEM: + case NFT_MSG_DELSETELEM: + return nft_trans_collapse_set_elem(nft_net, + nft_trans_container_elem(tail), + nft_trans_container_elem(trans), gfp); + } + + return false; +} + static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans) { struct nftables_pernet *nft_net = nft_pernet(net); @@ -424,11 +507,18 @@ static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *tr static void nft_trans_commit_list_add_elem(struct net *net, struct nft_trans *trans, gfp_t gfp) { + struct nftables_pernet *nft_net = nft_pernet(net); + WARN_ON_ONCE(trans->msg_type != NFT_MSG_NEWSETELEM && trans->msg_type != NFT_MSG_DELSETELEM); might_alloc(gfp); + if (nft_trans_try_collapse(nft_net, trans, gfp)) { + kfree(trans); + return; + } + nft_trans_commit_list_add_tail(net, trans); } -- 2.51.0