From 5a731857656e3988935108f48800cd764a550005 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:47 +0200 Subject: [PATCH 01/16] net/mlx5: Simplify QoS normalization by removing error handling This change updates esw_qos_normalize_min_rate to not return errors, significantly simplifying the code. Normalization failures are software bugs, and it's unnecessary to handle them with rollback mechanisms. Instead, `esw_qos_update_sched_node_bw_share` and `esw_qos_normalize_min_rate` now return void, with any errors logged as warnings to indicate potential software issues. This approach avoids compensating for hidden bugs and removes error handling from all places that perform normalization, streamlining future patches. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-3-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 72 +++++-------------- 1 file changed, 17 insertions(+), 55 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 940e1c2d1e39..0c371f27c693 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -208,64 +208,49 @@ static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max) return min_t(u32, max_t(u32, DIV_ROUND_UP(min_rate, divider), MLX5_MIN_BW_SHARE), fw_max); } -static int esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node, - u32 divider, - struct netlink_ext_ack *extack) +static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node, + u32 divider, + struct netlink_ext_ack *extack) { u32 fw_max_bw_share = MLX5_CAP_QOS(node->esw->dev, max_tsar_bw_share); u32 bw_share; - int err; bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share); if (bw_share == node->bw_share) - return 0; - - err = esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack); - if (err) - return err; + return; + esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack); node->bw_share = bw_share; - - return err; } -static int esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, - struct mlx5_esw_sched_node *parent, - struct netlink_ext_ack *extack) +static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, + struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) { struct list_head *nodes = parent ? &parent->children : &esw->qos.domain->nodes; u32 divider = esw_qos_calculate_min_rate_divider(esw, parent); struct mlx5_esw_sched_node *node; list_for_each_entry(node, nodes, entry) { - int err; - if (node->esw != esw || node->ix == esw->qos.root_tsar_ix) continue; - err = esw_qos_update_sched_node_bw_share(node, divider, extack); - if (err) - return err; + esw_qos_update_sched_node_bw_share(node, divider, extack); if (list_empty(&node->children)) continue; - err = esw_qos_normalize_min_rate(node->esw, node, extack); - if (err) - return err; + esw_qos_normalize_min_rate(node->esw, node, extack); } - - return 0; } static int esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 fw_max_bw_share, previous_min_rate; bool min_rate_supported; - int err; + u32 fw_max_bw_share; esw_assert_qos_lock_held(vport_node->esw); fw_max_bw_share = MLX5_CAP_QOS(vport->dev, max_tsar_bw_share); @@ -276,13 +261,10 @@ static int esw_qos_set_vport_min_rate(struct mlx5_vport *vport, if (min_rate == vport_node->min_rate) return 0; - previous_min_rate = vport_node->min_rate; vport_node->min_rate = min_rate; - err = esw_qos_normalize_min_rate(vport_node->parent->esw, vport_node->parent, extack); - if (err) - vport_node->min_rate = previous_min_rate; + esw_qos_normalize_min_rate(vport_node->parent->esw, vport_node->parent, extack); - return err; + return 0; } static int esw_qos_set_vport_max_rate(struct mlx5_vport *vport, @@ -316,8 +298,6 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = node->esw; - u32 previous_min_rate; - int err; if (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE) @@ -326,19 +306,10 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, if (min_rate == node->min_rate) return 0; - previous_min_rate = node->min_rate; node->min_rate = min_rate; - err = esw_qos_normalize_min_rate(esw, NULL, extack); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch node min rate setting failed"); - - /* Attempt restoring previous configuration */ - node->min_rate = previous_min_rate; - if (esw_qos_normalize_min_rate(esw, NULL, extack)) - NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed"); - } + esw_qos_normalize_min_rate(esw, NULL, extack); - return err; + return 0; } static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node, @@ -552,17 +523,11 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch goto err_alloc_node; } - err = esw_qos_normalize_min_rate(esw, NULL, extack); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed"); - goto err_min_rate; - } + esw_qos_normalize_min_rate(esw, NULL, extack); trace_mlx5_esw_node_qos_create(esw->dev, node, node->ix); return node; -err_min_rate: - __esw_qos_free_node(node); err_alloc_node: if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -609,10 +574,7 @@ static int __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netli NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed"); __esw_qos_free_node(node); - err = esw_qos_normalize_min_rate(esw, NULL, extack); - if (err) - NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed"); - + esw_qos_normalize_min_rate(esw, NULL, extack); return err; } -- 2.50.1 From ac778fefed340e019bc9c022842b4a2cc5713559 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:48 +0200 Subject: [PATCH 02/16] net/mlx5: Generalize max_rate and min_rate setting for nodes Refactor max_rate and min_rate setting functions to operate on mlx5_esw_sched_node, allowing for generalized handling of both vports and nodes. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-4-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 69 ++++--------------- 1 file changed, 13 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 0c371f27c693..82805bb20c76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -245,69 +245,20 @@ static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, } } -static int esw_qos_set_vport_min_rate(struct mlx5_vport *vport, - u32 min_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - bool min_rate_supported; - u32 fw_max_bw_share; - - esw_assert_qos_lock_held(vport_node->esw); - fw_max_bw_share = MLX5_CAP_QOS(vport->dev, max_tsar_bw_share); - min_rate_supported = MLX5_CAP_QOS(vport->dev, esw_bw_share) && - fw_max_bw_share >= MLX5_MIN_BW_SHARE; - if (min_rate && !min_rate_supported) - return -EOPNOTSUPP; - if (min_rate == vport_node->min_rate) - return 0; - - vport_node->min_rate = min_rate; - esw_qos_normalize_min_rate(vport_node->parent->esw, vport_node->parent, extack); - - return 0; -} - -static int esw_qos_set_vport_max_rate(struct mlx5_vport *vport, - u32 max_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 act_max_rate = max_rate; - bool max_rate_supported; - int err; - - esw_assert_qos_lock_held(vport_node->esw); - max_rate_supported = MLX5_CAP_QOS(vport->dev, esw_rate_limit); - - if (max_rate && !max_rate_supported) - return -EOPNOTSUPP; - if (max_rate == vport_node->max_rate) - return 0; - - /* Use parent node limit if new max rate is 0. */ - if (!max_rate) - act_max_rate = vport_node->parent->max_rate; - - err = esw_qos_sched_elem_config(vport_node, act_max_rate, vport_node->bw_share, extack); - if (!err) - vport_node->max_rate = max_rate; - - return err; -} - static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = node->esw; - if (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || - MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE) + if (min_rate && (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || + MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)) return -EOPNOTSUPP; if (min_rate == node->min_rate) return 0; node->min_rate = min_rate; - esw_qos_normalize_min_rate(esw, NULL, extack); + esw_qos_normalize_min_rate(esw, node->parent, extack); return 0; } @@ -321,11 +272,17 @@ static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node, if (node->max_rate == max_rate) return 0; + /* Use parent node limit if new max rate is 0. */ + if (!max_rate && node->parent) + max_rate = node->parent->max_rate; + err = esw_qos_sched_elem_config(node, max_rate, node->bw_share, extack); if (err) return err; node->max_rate = max_rate; + if (node->type != SCHED_NODE_TYPE_VPORTS_TSAR) + return 0; /* Any unlimited vports in the node should be set with the value of the node. */ list_for_each_entry(vport_node, &node->children, entry) { @@ -748,9 +705,9 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_ if (err) goto unlock; - err = esw_qos_set_vport_min_rate(vport, min_rate, NULL); + err = esw_qos_set_node_min_rate(vport->qos.sched_node, min_rate, NULL); if (!err) - err = esw_qos_set_vport_max_rate(vport, max_rate, NULL); + err = esw_qos_set_node_max_rate(vport->qos.sched_node, max_rate, NULL); unlock: esw_qos_unlock(esw); return err; @@ -947,7 +904,7 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void if (err) goto unlock; - err = esw_qos_set_vport_min_rate(vport, tx_share, extack); + err = esw_qos_set_node_min_rate(vport->qos.sched_node, tx_share, extack); unlock: esw_qos_unlock(esw); return err; @@ -973,7 +930,7 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * if (err) goto unlock; - err = esw_qos_set_vport_max_rate(vport, tx_max, extack); + err = esw_qos_set_node_max_rate(vport->qos.sched_node, tx_max, extack); unlock: esw_qos_unlock(esw); return err; -- 2.50.1 From cc4bb15ffa8412bfe1e189d37edb6ca7d9918cb4 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:49 +0200 Subject: [PATCH 03/16] net/mlx5: Refactor scheduling element configuration bitmasks Refactor esw_qos_sched_elem_config to set bitmasks only when max_rate or bw_share values change, allowing the function to configure nodes with only one of these parameters. This enables more flexible usage for nodes where only one parameter requires configuration. Remove scattered assignments and checks to centralize them within this function, removing the now redundant esw_qos_set_node_max_rate entirely. With this refactor, also remove the assignment of the vport scheduling node max rate to the parent max rate for unlimited vports (where max rate is set to zero), as firmware already handles this behavior. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-5-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 80 ++++++------------- 1 file changed, 24 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 82805bb20c76..c1e7b2425ebe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -143,10 +143,21 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) return -EOPNOTSUPP; - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + if (bw_share && (!MLX5_CAP_QOS(dev, esw_bw_share) || + MLX5_CAP_QOS(dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)) + return -EOPNOTSUPP; + + if (node->max_rate == max_rate && node->bw_share == bw_share) + return 0; + + if (node->max_rate != max_rate) { + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + } + if (node->bw_share != bw_share) { + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + } err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, @@ -160,6 +171,8 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r return err; } + node->max_rate = max_rate; + node->bw_share = bw_share; if (node->type == SCHED_NODE_TYPE_VPORTS_TSAR) trace_mlx5_esw_node_qos_config(dev, node, node->ix, bw_share, max_rate); else if (node->type == SCHED_NODE_TYPE_VPORT) @@ -217,11 +230,7 @@ static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node, bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share); - if (bw_share == node->bw_share) - return; - esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack); - node->bw_share = bw_share; } static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw, @@ -250,10 +259,6 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, { struct mlx5_eswitch *esw = node->esw; - if (min_rate && (!MLX5_CAP_QOS(esw->dev, esw_bw_share) || - MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)) - return -EOPNOTSUPP; - if (min_rate == node->min_rate) return 0; @@ -263,41 +268,6 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, return 0; } -static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node, - u32 max_rate, struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node; - int err; - - if (node->max_rate == max_rate) - return 0; - - /* Use parent node limit if new max rate is 0. */ - if (!max_rate && node->parent) - max_rate = node->parent->max_rate; - - err = esw_qos_sched_elem_config(node, max_rate, node->bw_share, extack); - if (err) - return err; - - node->max_rate = max_rate; - if (node->type != SCHED_NODE_TYPE_VPORTS_TSAR) - return 0; - - /* Any unlimited vports in the node should be set with the value of the node. */ - list_for_each_entry(vport_node, &node->children, entry) { - if (vport_node->max_rate) - continue; - - err = esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, extack); - if (err) - NL_SET_ERR_MSG_MOD(extack, - "E-Switch vport implicit rate limit setting failed"); - } - - return err; -} - static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id, u32 *tsar_ix) { @@ -367,7 +337,6 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, struct netlink_ext_ack *extack) { struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - u32 max_rate; int err; err = mlx5_destroy_scheduling_element_cmd(curr_node->esw->dev, @@ -378,9 +347,7 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, return err; } - /* Use new node max rate if vport max rate is unlimited. */ - max_rate = vport_node->max_rate ? vport_node->max_rate : new_node->max_rate; - err = esw_qos_vport_create_sched_element(vport, new_node, max_rate, + err = esw_qos_vport_create_sched_element(vport, new_node, vport_node->max_rate, vport_node->bw_share, &vport_node->ix); if (err) { @@ -393,8 +360,7 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, return 0; err_sched: - max_rate = vport_node->max_rate ? vport_node->max_rate : curr_node->max_rate; - if (esw_qos_vport_create_sched_element(vport, curr_node, max_rate, + if (esw_qos_vport_create_sched_element(vport, curr_node, vport_node->max_rate, vport_node->bw_share, &vport_node->ix)) esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", @@ -707,7 +673,8 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_ err = esw_qos_set_node_min_rate(vport->qos.sched_node, min_rate, NULL); if (!err) - err = esw_qos_set_node_max_rate(vport->qos.sched_node, max_rate, NULL); + err = esw_qos_sched_elem_config(vport->qos.sched_node, max_rate, + vport->qos.sched_node->bw_share, NULL); unlock: esw_qos_unlock(esw); return err; @@ -930,7 +897,8 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * if (err) goto unlock; - err = esw_qos_set_node_max_rate(vport->qos.sched_node, tx_max, extack); + err = esw_qos_sched_elem_config(vport->qos.sched_node, tx_max, + vport->qos.sched_node->bw_share, extack); unlock: esw_qos_unlock(esw); return err; @@ -965,7 +933,7 @@ int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void * return err; esw_qos_lock(esw); - err = esw_qos_set_node_max_rate(node, tx_max, extack); + err = esw_qos_sched_elem_config(node, tx_max, node->bw_share, extack); esw_qos_unlock(esw); return err; } -- 2.50.1 From 663bc605d0db8782ff9c2704db5ce6cf2ac7fa93 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:50 +0200 Subject: [PATCH 04/16] net/mlx5: Generalize scheduling element operations Introduce helper functions to create and destroy scheduling elements, allowing flexible configuration for different scheduling element types. The new helper functions streamline the process by centralizing error handling and logging through esw_qos_sched_elem_op_warn, which now accepts the operation type (create, destroy, or modify). The changes also adjust the esw_qos_vport_enable and mlx5_esw_qos_vport_disable functions to leverage the new generalized create/destroy helpers. The destroy functions now log errors with esw_warn without returning them. This prevents unnecessary error handling since the node was already destroyed and no further action is required from callers. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-6-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 157 +++++++++--------- 1 file changed, 76 insertions(+), 81 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index c1e7b2425ebe..155400d36a1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -118,18 +118,49 @@ mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport) return vport->qos.sched_node->parent; } -static void esw_qos_sched_elem_config_warn(struct mlx5_esw_sched_node *node, int err) +static void esw_qos_sched_elem_warn(struct mlx5_esw_sched_node *node, int err, const char *op) { if (node->vport) { esw_warn(node->esw->dev, - "E-Switch modify %s scheduling element failed (vport=%d,err=%d)\n", - sched_node_type_str[node->type], node->vport->vport, err); + "E-Switch %s %s scheduling element failed (vport=%d,err=%d)\n", + op, sched_node_type_str[node->type], node->vport->vport, err); return; } esw_warn(node->esw->dev, - "E-Switch modify %s scheduling element failed (err=%d)\n", - sched_node_type_str[node->type], err); + "E-Switch %s %s scheduling element failed (err=%d)\n", + op, sched_node_type_str[node->type], err); +} + +static int esw_qos_node_create_sched_element(struct mlx5_esw_sched_node *node, void *ctx, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlx5_create_scheduling_element_cmd(node->esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, ctx, + &node->ix); + if (err) { + esw_qos_sched_elem_warn(node, err, "create"); + NL_SET_ERR_MSG_MOD(extack, "E-Switch create scheduling element failed"); + } + + return err; +} + +static int esw_qos_node_destroy_sched_element(struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlx5_destroy_scheduling_element_cmd(node->esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + node->ix); + if (err) { + esw_qos_sched_elem_warn(node, err, "destroy"); + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroying scheduling element failed."); + } + + return err; } static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_rate, u32 bw_share, @@ -165,7 +196,7 @@ static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_r node->ix, bitmask); if (err) { - esw_qos_sched_elem_config_warn(node, err); + esw_qos_sched_elem_warn(node, err, "modify"); NL_SET_ERR_MSG_MOD(extack, "E-Switch modify scheduling element failed"); return err; @@ -295,14 +326,12 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_ tsar_ix); } -static int -esw_qos_vport_create_sched_element(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, - u32 max_rate, u32 bw_share, u32 *sched_elem_ix) +static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, u32 bw_share, + struct netlink_ext_ack *extack) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; - struct mlx5_core_dev *dev = parent->esw->dev; + struct mlx5_core_dev *dev = vport_node->esw->dev; void *attr; - int err; if (!mlx5_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT, @@ -312,23 +341,12 @@ esw_qos_vport_create_sched_element(struct mlx5_vport *vport, struct mlx5_esw_sch MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); - MLX5_SET(vport_element, attr, vport_number, vport->vport); - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent->ix); - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport); + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, vport_node->parent->ix); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, vport_node->max_rate); MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - err = mlx5_create_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - sched_ctx, - sched_elem_ix); - if (err) { - esw_warn(dev, - "E-Switch create vport scheduling element failed (vport=%d,err=%d)\n", - vport->vport, err); - return err; - } - - return 0; + return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack); } static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, @@ -339,30 +357,22 @@ static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; int err; - err = mlx5_destroy_scheduling_element_cmd(curr_node->esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - vport_node->ix); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy vport scheduling element failed"); + err = esw_qos_node_destroy_sched_element(vport_node, extack); + if (err) return err; - } - err = esw_qos_vport_create_sched_element(vport, new_node, vport_node->max_rate, - vport_node->bw_share, - &vport_node->ix); + esw_qos_node_set_parent(vport_node, new_node); + err = esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, extack); if (err) { NL_SET_ERR_MSG_MOD(extack, "E-Switch vport node set failed."); goto err_sched; } - esw_qos_node_set_parent(vport->qos.sched_node, new_node); - return 0; err_sched: - if (esw_qos_vport_create_sched_element(vport, curr_node, vport_node->max_rate, - vport_node->bw_share, - &vport_node->ix)) + esw_qos_node_set_parent(vport_node, curr_node); + if (esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, NULL)) esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", vport->vport); @@ -425,6 +435,12 @@ static void __esw_qos_free_node(struct mlx5_esw_sched_node *node) kfree(node); } +static void esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) +{ + esw_qos_node_destroy_sched_element(node, extack); + __esw_qos_free_node(node); +} + static struct mlx5_esw_sched_node * __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sched_node *parent, struct netlink_ext_ack *extack) @@ -483,23 +499,13 @@ esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct netlink_ext_ac return node; } -static int __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) +static void __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = node->esw; - int err; trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix); - - err = mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - node->ix); - if (err) - NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed"); - __esw_qos_free_node(node); - + esw_qos_destroy_node(node, extack); esw_qos_normalize_min_rate(esw, NULL, extack); - - return err; } static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) @@ -584,11 +590,11 @@ static void esw_qos_put(struct mlx5_eswitch *esw) esw_qos_destroy(esw); } -static int esw_qos_vport_enable(struct mlx5_vport *vport, - u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) +static int esw_qos_vport_enable(struct mlx5_vport *vport, u32 max_rate, u32 bw_share, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - u32 sched_elem_ix; + struct mlx5_esw_sched_node *sched_node; int err; esw_assert_qos_lock_held(esw); @@ -599,29 +605,28 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, if (err) return err; - err = esw_qos_vport_create_sched_element(vport, esw->qos.node0, max_rate, bw_share, - &sched_elem_ix); - if (err) - goto err_out; - - vport->qos.sched_node = __esw_qos_alloc_node(esw, sched_elem_ix, SCHED_NODE_TYPE_VPORT, - esw->qos.node0); - if (!vport->qos.sched_node) { + sched_node = __esw_qos_alloc_node(esw, 0, SCHED_NODE_TYPE_VPORT, esw->qos.node0); + if (!sched_node) { err = -ENOMEM; goto err_alloc; } - vport->qos.sched_node->vport = vport; + sched_node->max_rate = max_rate; + sched_node->min_rate = 0; + sched_node->bw_share = bw_share; + sched_node->vport = vport; + err = esw_qos_vport_create_sched_element(sched_node, 0, extack); + if (err) + goto err_vport_create; trace_mlx5_esw_vport_qos_create(vport->dev, vport, bw_share, max_rate); + vport->qos.sched_node = sched_node; return 0; +err_vport_create: + __esw_qos_free_node(sched_node); err_alloc: - if (mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, sched_elem_ix)) - esw_warn(esw->dev, "E-Switch destroy vport scheduling element failed.\n"); -err_out: esw_qos_put(esw); return err; @@ -632,7 +637,6 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) struct mlx5_eswitch *esw = vport->dev->priv.eswitch; struct mlx5_esw_sched_node *vport_node; struct mlx5_core_dev *dev; - int err; lockdep_assert_held(&esw->state_lock); esw_qos_lock(esw); @@ -645,15 +649,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) dev = vport_node->esw->dev; trace_mlx5_esw_vport_qos_destroy(dev, vport); - err = mlx5_destroy_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - vport_node->ix); - if (err) - esw_warn(dev, - "E-Switch destroy vport scheduling element failed (vport=%d,err=%d)\n", - vport->vport, err); - - __esw_qos_free_node(vport_node); + esw_qos_destroy_node(vport_node, NULL); memset(&vport->qos, 0, sizeof(vport->qos)); esw_qos_put(esw); @@ -974,13 +970,12 @@ int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, { struct mlx5_esw_sched_node *node = priv; struct mlx5_eswitch *esw = node->esw; - int err; esw_qos_lock(esw); - err = __esw_qos_destroy_node(node, extack); + __esw_qos_destroy_node(node, extack); esw_qos_put(esw); esw_qos_unlock(esw); - return err; + return 0; } int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, -- 2.50.1 From d67bfd10e668bfca717e0d94112f04f61c58dad7 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:51 +0200 Subject: [PATCH 05/16] net/mlx5: Integrate esw_qos_vport_enable logic into rate operations Fold the esw_qos_vport_enable function into operations for configuring maximum and minimum rates, simplifying QoS logic. This change consolidates enabling and updating the scheduling element configuration, streamlining how vport QoS is initialized and adjusted. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-7-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 87 +++++++++---------- 1 file changed, 39 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 155400d36a1e..35e493924c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -590,22 +590,21 @@ static void esw_qos_put(struct mlx5_eswitch *esw) esw_qos_destroy(esw); } -static int esw_qos_vport_enable(struct mlx5_vport *vport, u32 max_rate, u32 bw_share, - struct netlink_ext_ack *extack) +static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; struct mlx5_esw_sched_node *sched_node; int err; esw_assert_qos_lock_held(esw); - if (vport->qos.sched_node) - return 0; err = esw_qos_get(esw, extack); if (err) return err; - sched_node = __esw_qos_alloc_node(esw, 0, SCHED_NODE_TYPE_VPORT, esw->qos.node0); + parent = parent ?: esw->qos.node0; + sched_node = __esw_qos_alloc_node(parent->esw, 0, SCHED_NODE_TYPE_VPORT, parent); if (!sched_node) { err = -ENOMEM; goto err_alloc; @@ -657,21 +656,42 @@ unlock: esw_qos_unlock(esw); } +static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rate, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + if (!vport_node) + return esw_qos_vport_enable(vport, NULL, max_rate, 0, extack); + else + return esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, + extack); +} + +static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rate, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + if (!vport_node) + return esw_qos_vport_enable(vport, NULL, 0, min_rate, extack); + else + return esw_qos_set_node_min_rate(vport_node, min_rate, extack); +} + int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_rate) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; int err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, NULL); - if (err) - goto unlock; - - err = esw_qos_set_node_min_rate(vport->qos.sched_node, min_rate, NULL); + err = mlx5_esw_qos_set_vport_min_rate(vport, min_rate, NULL); if (!err) - err = esw_qos_sched_elem_config(vport->qos.sched_node, max_rate, - vport->qos.sched_node->bw_share, NULL); -unlock: + err = mlx5_esw_qos_set_vport_max_rate(vport, max_rate, NULL); esw_qos_unlock(esw); return err; } @@ -757,10 +777,8 @@ static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev, int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) { - u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_vport *vport; u32 link_speed_max; - u32 bitmask; int err; vport = mlx5_eswitch_get_vport(esw, vport_num); @@ -779,20 +797,7 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 } esw_qos_lock(esw); - if (!vport->qos.sched_node) { - /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */ - err = esw_qos_vport_enable(vport, rate_mbps, 0, NULL); - } else { - struct mlx5_core_dev *dev = vport->qos.sched_node->parent->esw->dev; - - MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); - bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - err = mlx5_modify_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - ctx, - vport->qos.sched_node->ix, - bitmask); - } + err = mlx5_esw_qos_set_vport_max_rate(vport, rate_mbps, NULL); esw_qos_unlock(esw); return err; @@ -863,12 +868,7 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void return err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (err) - goto unlock; - - err = esw_qos_set_node_min_rate(vport->qos.sched_node, tx_share, extack); -unlock: + err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack); esw_qos_unlock(esw); return err; } @@ -889,13 +889,7 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * return err; esw_qos_lock(esw); - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (err) - goto unlock; - - err = esw_qos_sched_elem_config(vport->qos.sched_node, tx_max, - vport->qos.sched_node->bw_share, extack); -unlock: + err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack); esw_qos_unlock(esw); return err; } @@ -991,13 +985,10 @@ int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, } esw_qos_lock(esw); - if (!vport->qos.sched_node && !node) - goto unlock; - - err = esw_qos_vport_enable(vport, 0, 0, extack); - if (!err) + if (!vport->qos.sched_node && node) + err = esw_qos_vport_enable(vport, node, 0, 0, extack); + else if (vport->qos.sched_node) err = esw_qos_vport_update_node(vport, node, extack); -unlock: esw_qos_unlock(esw); return err; } -- 2.50.1 From be034baba83e2a80a0b2c0f24c08547b6eedc79a Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 7 Nov 2024 21:43:52 +0200 Subject: [PATCH 06/16] net/mlx5: Make vport QoS enablement more flexible for future extensions Refactor esw_qos_vport_enable to support more generic configurations, allowing it to be reused for new vport node types in future patches. This refactor includes a new way to change the vport parent node by disabling the current setup and re-enabling it with the new parent. This change sets the foundation for adapting configuration based on the parent type in future patches. Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-8-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../mellanox/mlx5/core/esw/devlink_port.c | 2 +- .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 193 ++++++++---------- .../net/ethernet/mellanox/mlx5/core/esw/qos.h | 1 + .../net/ethernet/mellanox/mlx5/core/eswitch.c | 6 +- .../net/ethernet/mellanox/mlx5/core/eswitch.h | 5 +- 5 files changed, 96 insertions(+), 111 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index d0f38818363f..982fe3714683 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -195,7 +195,7 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport) return; dl_port = vport->dl_port; - mlx5_esw_qos_vport_update_node(vport, NULL, NULL); + mlx5_esw_qos_vport_update_parent(vport, NULL, NULL); devl_rate_leaf_destroy(&dl_port->dl_port); devl_port_unregister(&dl_port->dl_port); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 35e493924c09..8b7c843446e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -101,6 +101,12 @@ esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_ node->esw = parent->esw; } +void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport) +{ + kfree(vport->qos.sched_node); + memset(&vport->qos, 0, sizeof(vport->qos)); +} + u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport) { if (!vport->qos.sched_node) @@ -326,7 +332,7 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_ tsar_ix); } -static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, u32 bw_share, +static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node, struct netlink_ext_ack *extack) { u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; @@ -344,69 +350,10 @@ static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_ MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport); MLX5_SET(scheduling_context, sched_ctx, parent_element_id, vport_node->parent->ix); MLX5_SET(scheduling_context, sched_ctx, max_average_bw, vport_node->max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack); } -static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *curr_node, - struct mlx5_esw_sched_node *new_node, - struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - int err; - - err = esw_qos_node_destroy_sched_element(vport_node, extack); - if (err) - return err; - - esw_qos_node_set_parent(vport_node, new_node); - err = esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, extack); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "E-Switch vport node set failed."); - goto err_sched; - } - - return 0; - -err_sched: - esw_qos_node_set_parent(vport_node, curr_node); - if (esw_qos_vport_create_sched_element(vport_node, vport_node->bw_share, NULL)) - esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n", - vport->vport); - - return err; -} - -static int esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack) -{ - struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; - struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - struct mlx5_esw_sched_node *new_node, *curr_node; - int err; - - esw_assert_qos_lock_held(esw); - curr_node = vport_node->parent; - new_node = node ?: esw->qos.node0; - if (curr_node == new_node) - return 0; - - err = esw_qos_update_node_scheduling_element(vport, curr_node, new_node, extack); - if (err) - return err; - - /* Recalculate bw share weights of old and new nodes */ - if (vport_node->bw_share || new_node->bw_share) { - esw_qos_normalize_min_rate(curr_node->esw, curr_node, extack); - esw_qos_normalize_min_rate(new_node->esw, new_node, extack); - } - - return 0; -} - static struct mlx5_esw_sched_node * __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type, struct mlx5_esw_sched_node *parent) @@ -590,43 +537,62 @@ static void esw_qos_put(struct mlx5_eswitch *esw) esw_qos_destroy(esw); } +static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node; + struct mlx5_esw_sched_node *parent = vport_node->parent; + + esw_qos_node_destroy_sched_element(vport_node, extack); + + vport_node->bw_share = 0; + list_del_init(&vport_node->entry); + esw_qos_normalize_min_rate(parent->esw, parent, extack); + + trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport); +} + static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, - u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack) +{ + int err; + + esw_assert_qos_lock_held(vport->dev->priv.eswitch); + + esw_qos_node_set_parent(vport->qos.sched_node, parent); + err = esw_qos_vport_create_sched_element(vport->qos.sched_node, extack); + if (err) + return err; + + esw_qos_normalize_min_rate(parent->esw, parent, extack); + + return 0; +} + +static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_type type, + struct mlx5_esw_sched_node *parent, u32 max_rate, + u32 min_rate, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; struct mlx5_esw_sched_node *sched_node; int err; esw_assert_qos_lock_held(esw); - err = esw_qos_get(esw, extack); if (err) return err; parent = parent ?: esw->qos.node0; - sched_node = __esw_qos_alloc_node(parent->esw, 0, SCHED_NODE_TYPE_VPORT, parent); - if (!sched_node) { - err = -ENOMEM; - goto err_alloc; - } + sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent); + if (!sched_node) + return -ENOMEM; sched_node->max_rate = max_rate; - sched_node->min_rate = 0; - sched_node->bw_share = bw_share; + sched_node->min_rate = min_rate; sched_node->vport = vport; - err = esw_qos_vport_create_sched_element(sched_node, 0, extack); - if (err) - goto err_vport_create; - - trace_mlx5_esw_vport_qos_create(vport->dev, vport, bw_share, max_rate); vport->qos.sched_node = sched_node; - - return 0; - -err_vport_create: - __esw_qos_free_node(sched_node); -err_alloc: - esw_qos_put(esw); + err = esw_qos_vport_enable(vport, parent, extack); + if (err) + esw_qos_put(esw); return err; } @@ -634,23 +600,18 @@ err_alloc: void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; - struct mlx5_esw_sched_node *vport_node; - struct mlx5_core_dev *dev; + struct mlx5_esw_sched_node *parent; lockdep_assert_held(&esw->state_lock); esw_qos_lock(esw); - vport_node = vport->qos.sched_node; - if (!vport_node) + if (!vport->qos.sched_node) goto unlock; - WARN(vport_node->parent != esw->qos.node0, - "Disabling QoS on port before detaching it from node"); - - dev = vport_node->esw->dev; - trace_mlx5_esw_vport_qos_destroy(dev, vport); - esw_qos_destroy_node(vport_node, NULL); - memset(&vport->qos, 0, sizeof(vport->qos)); + parent = vport->qos.sched_node->parent; + WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node"); + esw_qos_vport_disable(vport, NULL); + mlx5_esw_qos_vport_qos_free(vport); esw_qos_put(esw); unlock: esw_qos_unlock(esw); @@ -664,7 +625,8 @@ static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rat esw_assert_qos_lock_held(vport->dev->priv.eswitch); if (!vport_node) - return esw_qos_vport_enable(vport, NULL, max_rate, 0, extack); + return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, max_rate, 0, + extack); else return esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share, extack); @@ -678,7 +640,8 @@ static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rat esw_assert_qos_lock_held(vport->dev->priv.eswitch); if (!vport_node) - return esw_qos_vport_enable(vport, NULL, 0, min_rate, extack); + return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, 0, min_rate, + extack); else return esw_qos_set_node_min_rate(vport_node, min_rate, extack); } @@ -711,6 +674,31 @@ bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *m return enabled; } +static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + struct mlx5_esw_sched_node *curr_parent; + int err; + + esw_assert_qos_lock_held(esw); + curr_parent = vport->qos.sched_node->parent; + parent = parent ?: esw->qos.node0; + if (curr_parent == parent) + return 0; + + esw_qos_vport_disable(vport, extack); + + err = esw_qos_vport_enable(vport, parent, extack); + if (err) { + if (esw_qos_vport_enable(vport, curr_parent, NULL)) + esw_warn(parent->esw->dev, "vport restore QoS failed (vport=%d)\n", + vport->vport); + } + + return err; +} + static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) { struct ethtool_link_ksettings lksettings; @@ -972,23 +960,22 @@ int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, return 0; } -int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack) +int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = vport->dev->priv.eswitch; int err = 0; - if (node && node->esw != esw) { + if (parent && parent->esw != esw) { NL_SET_ERR_MSG_MOD(extack, "Cross E-Switch scheduling is not supported"); return -EOPNOTSUPP; } esw_qos_lock(esw); - if (!vport->qos.sched_node && node) - err = esw_qos_vport_enable(vport, node, 0, 0, extack); + if (!vport->qos.sched_node && parent) + err = mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, parent, 0, 0, extack); else if (vport->qos.sched_node) - err = esw_qos_vport_update_node(vport, node, extack); + err = esw_qos_vport_update_parent(vport, parent, extack); esw_qos_unlock(esw); return err; } @@ -1002,8 +989,8 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, struct mlx5_vport *vport = priv; if (!parent) - return mlx5_esw_qos_vport_update_node(vport, NULL, extack); + return mlx5_esw_qos_vport_update_parent(vport, NULL, extack); node = parent_priv; - return mlx5_esw_qos_vport_update_node(vport, node, extack); + return mlx5_esw_qos_vport_update_parent(vport, node, extack); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h index 61a6fdd5c267..6eb8f6a648c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h @@ -13,6 +13,7 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *evport, u32 max_rate, u32 min bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate); void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport); +void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport); u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport); struct mlx5_esw_sched_node *mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d0dab8f4e1a3..7fb8a3381f84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1061,8 +1061,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) unsigned long i; mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { - kfree(vport->qos.sched_node); - memset(&vport->qos, 0, sizeof(vport->qos)); + mlx5_esw_qos_vport_qos_free(vport); memset(&vport->info, 0, sizeof(vport->info)); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; } @@ -1074,8 +1073,7 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw) unsigned long i; mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) { - kfree(vport->qos.sched_node); - memset(&vport->qos, 0, sizeof(vport->qos)); + mlx5_esw_qos_vport_qos_free(vport); memset(&vport->info, 0, sizeof(vport->info)); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 14dd42d44e6f..a83d41121db6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -427,9 +427,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, u16 vport_num, bool setting); int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, u32 max_rate, u32 min_rate); -int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport, - struct mlx5_esw_sched_node *node, - struct netlink_ext_ack *extack); +int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node, + struct netlink_ext_ack *extack); int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, -- 2.50.1 From 8a0ee54027b1fbccda3f2683dafec9b7216993a4 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:53 +0200 Subject: [PATCH 07/16] net/mlx5e: SHAMPO, Simplify UMR allocation for headers Allocating page fragments for header data split is currently more complicated than it should be. That's because the number of KSM entries allocated is not aligned to the number of headers per page. This leads to having leftovers in the next allocation which require additional accounting and needlessly complicated code. This patch aligns (down) the number of KSM entries in the UMR WQE to the number of headers per page by: 1) Aligning the max number of entries allocated per UMR WQE (max_ksm_entries) to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE. 2) Aligning the total number of free headers to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE. ... and then it drops the extra accounting code from mlx5e_build_shampo_hd_umr(). Although the number of entries allocated per UMR WQE is slightly smaller due to aligning down, no performance impact was observed. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-9-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - .../net/ethernet/mellanox/mlx5/core/en_rx.c | 29 ++++++++----------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 58f3df784ded..4449a57ba5b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -633,7 +633,6 @@ struct mlx5e_shampo_hd { u16 pi; u16 ci; __be32 key; - u64 last_addr; }; struct mlx5e_hw_gro_data { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d81083f4f316..e044e5d11f05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -648,30 +648,26 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 ksm_entries, u16 index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u16 entries, pi, header_offset, err, wqe_bbs, new_entries; + u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; u16 page_index = shampo->curr_page_index; struct mlx5e_frag_page *frag_page; - u64 addr = shampo->last_addr; struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; int headroom, i; + u64 addr = 0; headroom = rq->buff.headroom; - new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); - entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); - wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries); + wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries); pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); - build_ksm_umr(sq, umr_wqe, shampo->key, index, entries); + build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries); frag_page = &shampo->pages[page_index]; - for (i = 0; i < entries; i++, index++) { + WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); + for (i = 0; i < ksm_entries; i++, index++) { dma_info = &shampo->info[index]; - if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index < - MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)) - goto update_ksm; header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { @@ -691,7 +687,6 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, dma_info->frag_page = frag_page; } -update_ksm: umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { .key = cpu_to_be32(lkey), .va = cpu_to_be64(dma_info->addr + headroom), @@ -701,12 +696,11 @@ update_ksm: sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, .num_wqebbs = wqe_bbs, - .shampo.len = new_entries, + .shampo.len = ksm_entries, }; - shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1); + shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1); shampo->curr_page_index = page_index; - shampo->last_addr = addr; sq->pc += wqe_bbs; sq->doorbell_cseg = &umr_wqe->ctrl; @@ -731,7 +725,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) struct mlx5e_icosq *sq = rq->icosq; int i, err, max_ksm_entries, len; - max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev); + max_ksm_entries = ALIGN_DOWN(MLX5E_MAX_KSM_PER_WQE(rq->mdev), + MLX5E_SHAMPO_WQ_HEADER_PER_PAGE); ksm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); @@ -739,8 +734,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) if (!ksm_entries) return 0; - ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); - index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); + /* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */ + index = shampo->pi; entries_before = shampo->hd_per_wq - index; if (unlikely(entries_before < ksm_entries)) -- 2.50.1 From 1a4b5885770401b7e8de6546760686dcd2d9b784 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:54 +0200 Subject: [PATCH 08/16] net/mlx5e: SHAMPO, Fix page_index calculation inconsistency When calculating the index for the next frag page slot, the divisor is incorrect: it should be the number of pages per queue not the number of headers per queue. This is currently harmless because frag pages are not used directly, but they are intermediated through the info array. But it needs to be fixed as an upcoming patch will get rid of the info array. This patch introduces a new pages per queue variable and plugs it in the formula. Now that this variable exists, additional code can be simplified in the SHAMPO initialization code. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-10-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 +++----- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 4449a57ba5b2..b4abb094f01a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -629,6 +629,7 @@ struct mlx5e_shampo_hd { u16 curr_page_index; u32 hd_per_wq; u16 hd_per_wqe; + u16 pages_per_wq; unsigned long *bitmap; u16 pi; u16 ci; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 59d7a0e28f24..3ca1ef1f39a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -767,8 +767,6 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, u32 *pool_size, int node) { - void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq); - int wq_size; int err; if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) @@ -793,9 +791,9 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, cpu_to_be32(rq->mpwqe.shampo->mkey); rq->mpwqe.shampo->hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp); - wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); - *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) / - MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + rq->mpwqe.shampo->pages_per_wq = + rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + *pool_size += rq->mpwqe.shampo->pages_per_wq; return 0; err_hw_gro_data: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e044e5d11f05..76a975667c77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -671,7 +671,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { - page_index = (page_index + 1) & (shampo->hd_per_wq - 1); + page_index = (page_index + 1) & (shampo->pages_per_wq - 1); frag_page = &shampo->pages[page_index]; err = mlx5e_page_alloc_fragmented(rq, frag_page); -- 2.50.1 From 4f56868b7132bb3c7e5a2c1930b6402718248a35 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:55 +0200 Subject: [PATCH 09/16] net/mlx5e: SHAMPO, Change frag page setup order during allocation Now that the UMR allocation has been simplified, it is no longer possible to have a leftover page from a previous call to mlx5e_build_shampo_hd_umr(). This patch simplifies the code by switching the order of operations: first take the frag page and then increment the index. This is more straightforward and it also paves the way for dropping the info array. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-11-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 76a975667c77..637069c1b988 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -651,7 +651,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; u16 page_index = shampo->curr_page_index; - struct mlx5e_frag_page *frag_page; + struct mlx5e_frag_page *frag_page = NULL; struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; int headroom, i; @@ -663,16 +663,14 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries); - frag_page = &shampo->pages[page_index]; - WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); for (i = 0; i < ksm_entries; i++, index++) { dma_info = &shampo->info[index]; header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { - page_index = (page_index + 1) & (shampo->pages_per_wq - 1); frag_page = &shampo->pages[page_index]; + page_index = (page_index + 1) & (shampo->pages_per_wq - 1); err = mlx5e_page_alloc_fragmented(rq, frag_page); if (unlikely(err)) -- 2.50.1 From 945ca432bfd0788b960f8f721594dae4fc3c02c1 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:56 +0200 Subject: [PATCH 10/16] net/mlx5e: SHAMPO, Drop info array The info array is used to store a pointer to the dma address of the header and to the frag page. However, this array is not really required: - The frag page can be calculated from the header index frag page index = header index / headers per page. - The dma address can be calculated through a formula: dma page address + header offset. This series gets rid of the info array and uses the above formulas instead. The current_page_index was used in conjunction with the info array to store page fragment indices. This variable is dropped as well. There was no performance regression observed. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-12-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 7 +- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 76 ++++++++++--------- 3 files changed, 42 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b4abb094f01a..979fc56205e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -83,6 +83,7 @@ struct page_pool; #define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8) #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9) #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) +#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) #define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024) #define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096) @@ -624,9 +625,7 @@ struct mlx5e_dma_info { struct mlx5e_shampo_hd { u32 mkey; - struct mlx5e_dma_info *info; struct mlx5e_frag_page *pages; - u16 curr_page_index; u32 hd_per_wq; u16 hd_per_wqe; u16 pages_per_wq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3ca1ef1f39a5..2e27e9d6b820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -350,19 +350,15 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL, node); - shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq, - sizeof(*shampo->info)), - GFP_KERNEL, node); shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq, sizeof(*shampo->pages)), GFP_KERNEL, node); - if (!shampo->bitmap || !shampo->info || !shampo->pages) + if (!shampo->bitmap || !shampo->pages) goto err_nomem; return 0; err_nomem: - kvfree(shampo->info); kvfree(shampo->bitmap); kvfree(shampo->pages); @@ -372,7 +368,6 @@ err_nomem: static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) { kvfree(rq->mpwqe.shampo->bitmap); - kvfree(rq->mpwqe.shampo->info); kvfree(rq->mpwqe.shampo->pages); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 637069c1b988..3de575875586 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -643,6 +643,21 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } +static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index) +{ + BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT); + + return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE]; +} + +static u64 mlx5e_shampo_hd_offset(int header_index) +{ + return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << + MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; +} + +static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index); + static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, u16 ksm_entries, u16 index) @@ -650,9 +665,6 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; - u16 page_index = shampo->curr_page_index; - struct mlx5e_frag_page *frag_page = NULL; - struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; int headroom, i; u64 addr = 0; @@ -665,29 +677,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); for (i = 0; i < ksm_entries; i++, index++) { - dma_info = &shampo->info[index]; - header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; - if (!(header_offset & (PAGE_SIZE - 1))) { - frag_page = &shampo->pages[page_index]; - page_index = (page_index + 1) & (shampo->pages_per_wq - 1); + header_offset = mlx5e_shampo_hd_offset(index); + if (!header_offset) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); err = mlx5e_page_alloc_fragmented(rq, frag_page); if (unlikely(err)) goto err_unmap; addr = page_pool_get_dma_addr(frag_page->page); - - dma_info->addr = addr; - dma_info->frag_page = frag_page; - } else { - dma_info->addr = addr + header_offset; - dma_info->frag_page = frag_page; } umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { .key = cpu_to_be32(lkey), - .va = cpu_to_be64(dma_info->addr + headroom), + .va = cpu_to_be64(addr + header_offset + headroom), }; } @@ -698,20 +701,22 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, }; shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1); - shampo->curr_page_index = page_index; sq->pc += wqe_bbs; sq->doorbell_cseg = &umr_wqe->ctrl; return 0; err_unmap: - while (--i >= 0) { - dma_info = &shampo->info[--index]; - if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { - dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); - mlx5e_page_release_fragmented(rq, dma_info->frag_page); + while (--i) { + --index; + header_offset = mlx5e_shampo_hd_offset(index); + if (!header_offset) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + + mlx5e_page_release_fragmented(rq, frag_page); } } + rq->stats->buff_alloc_err++; return err; } @@ -844,13 +849,11 @@ static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u64 addr = shampo->info[header_index].addr; if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { - struct mlx5e_dma_info *dma_info = &shampo->info[header_index]; + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); - dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE); - mlx5e_page_release_fragmented(rq, dma_info->frag_page); + mlx5e_page_release_fragmented(rq, frag_page); } clear_bit(header_index, shampo->bitmap); } @@ -1204,10 +1207,10 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) { - struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index]; - u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom; + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); + u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom; - return page_address(last_head->frag_page->page) + head_offset; + return page_address(frag_page->page) + head_offset; } static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) @@ -2178,29 +2181,30 @@ static struct sk_buff * mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 header_index) { - struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index]; - u16 head_offset = head->addr & (PAGE_SIZE - 1); + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index); + dma_addr_t page_dma_addr = page_pool_get_dma_addr(frag_page->page); + u16 head_offset = mlx5e_shampo_hd_offset(header_index); + dma_addr_t dma_addr = page_dma_addr + head_offset; u16 head_size = cqe->shampo.header_size; u16 rx_headroom = rq->buff.headroom; struct sk_buff *skb = NULL; void *hdr, *data; u32 frag_size; - hdr = page_address(head->frag_page->page) + head_offset; + hdr = page_address(frag_page->page) + head_offset; data = hdr + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { /* build SKB around header */ - dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir); + dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir); net_prefetchw(hdr); net_prefetch(data); skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); - if (unlikely(!skb)) return NULL; - head->frag_page->frags++; + frag_page->frags++; } else { /* allocate SKB and copy header for large header */ rq->stats->gro_large_hds++; @@ -2212,7 +2216,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, } net_prefetchw(skb->data); - mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr, + mlx5e_copy_skb_header(rq, skb, frag_page->page, dma_addr, head_offset + rx_headroom, rx_headroom, head_size); /* skb linear part was allocated with headlen and aligned to long */ -- 2.50.1 From ab4219db89da1d019ef45675e4cd56a6841bbc1e Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 7 Nov 2024 21:43:57 +0200 Subject: [PATCH 11/16] net/mlx5e: SHAMPO, Rework header allocation loop The current loop code was based on the assumption that there can be page leftovers from previous function calls. This patch changes the allocation loop to make it clearer how pages get allocated every MLX5E_SHAMPO_WQ_HEADER_PER_PAGE headers. This change has no functional implications. Signed-off-by: Dragos Tatulea Signed-off-by: Tariq Toukan Link: https://patch.msgid.link/20241107194357.683732-13-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3de575875586..1963bc5adb18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -666,8 +666,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 pi, header_offset, err, wqe_bbs; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; struct mlx5e_umr_wqe *umr_wqe; - int headroom, i; - u64 addr = 0; + int headroom, i = 0; headroom = rq->buff.headroom; wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries); @@ -676,22 +675,25 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries); WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)); - for (i = 0; i < ksm_entries; i++, index++) { - header_offset = mlx5e_shampo_hd_offset(index); - if (!header_offset) { - struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + while (i < ksm_entries) { + struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index); + u64 addr; + + err = mlx5e_page_alloc_fragmented(rq, frag_page); + if (unlikely(err)) + goto err_unmap; - err = mlx5e_page_alloc_fragmented(rq, frag_page); - if (unlikely(err)) - goto err_unmap; - addr = page_pool_get_dma_addr(frag_page->page); - } + addr = page_pool_get_dma_addr(frag_page->page); - umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { - .key = cpu_to_be32(lkey), - .va = cpu_to_be64(addr + header_offset + headroom), - }; + for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) { + header_offset = mlx5e_shampo_hd_offset(index++); + + umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) { + .key = cpu_to_be32(lkey), + .va = cpu_to_be64(addr + header_offset + headroom), + }; + } } sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { -- 2.50.1 From 37653a0b8a6f5d6ab23daa8e585c5ed24a0fc500 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Thu, 7 Nov 2024 20:55:53 +0800 Subject: [PATCH 12/16] net: ip: make fib_validate_source() support drop reasons In this commit, we make fib_validate_source() and __fib_validate_source() return -reason instead of errno on error. The return value of fib_validate_source can be -errno, 0, and 1. It's hard to make fib_validate_source() return drop reasons directly. The fib_validate_source() will return 1 if the scope of the source(revert) route is HOST. And the __mkroute_input() will mark the skb with IPSKB_DOREDIRECT in this case (combine with some other conditions). And then, a REDIRECT ICMP will be sent in ip_forward() if this flag exists. We can't pass this information to __mkroute_input if we make fib_validate_source() return drop reasons. Therefore, we introduce the wrapper fib_validate_source_reason() for fib_validate_source(), which will return the drop reasons on error. In the origin logic, LINUX_MIB_IPRPFILTER will be counted if fib_validate_source() return -EXDEV. And now, we need to adjust it by checking "reason == SKB_DROP_REASON_IP_RPFILTER". However, this will take effect only after the patch "net: ip: make ip_route_input_noref() return drop reasons", as we can't pass the drop reasons from fib_validate_source() to ip_rcv_finish_core() in this patch. Following new drop reasons are added in this patch: SKB_DROP_REASON_IP_LOCAL_SOURCE SKB_DROP_REASON_IP_INVALID_SOURCE Signed-off-by: Menglong Dong Signed-off-by: Paolo Abeni --- include/net/dropreason-core.h | 10 ++++++++++ include/net/ip_fib.h | 12 ++++++++++++ net/ipv4/fib_frontend.c | 17 ++++++++++++----- net/ipv4/ip_input.c | 4 +--- net/ipv4/route.c | 33 +++++++++++++++++++-------------- 5 files changed, 54 insertions(+), 22 deletions(-) diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index d59bb96c5a02..62a60be1db84 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -76,6 +76,8 @@ FN(INVALID_PROTO) \ FN(IP_INADDRERRORS) \ FN(IP_INNOROUTES) \ + FN(IP_LOCAL_SOURCE) \ + FN(IP_INVALID_SOURCE) \ FN(PKT_TOO_BIG) \ FN(DUP_FRAG) \ FN(FRAG_REASM_TIMEOUT) \ @@ -373,6 +375,14 @@ enum skb_drop_reason { * IPSTATS_MIB_INADDRERRORS */ SKB_DROP_REASON_IP_INNOROUTES, + /** @SKB_DROP_REASON_IP_LOCAL_SOURCE: the source ip is local */ + SKB_DROP_REASON_IP_LOCAL_SOURCE, + /** + * @SKB_DROP_REASON_IP_INVALID_SOURCE: the source ip is invalid: + * 1) source ip is multicast or limited broadcast + * 2) source ip is zero and not IGMP + */ + SKB_DROP_REASON_IP_INVALID_SOURCE, /** * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the * MTU) diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index b6e44f4eaa4c..a113c11ab56b 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -452,6 +452,18 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, dscp_t dscp, int oif, struct net_device *dev, struct in_device *idev, u32 *itag); +static inline enum skb_drop_reason +fib_validate_source_reason(struct sk_buff *skb, __be32 src, __be32 dst, + dscp_t dscp, int oif, struct net_device *dev, + struct in_device *idev, u32 *itag) +{ + int err = fib_validate_source(skb, src, dst, dscp, oif, dev, idev, + itag); + if (err < 0) + return -err; + return SKB_NOT_DROPPED_YET; +} + #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 0c9ce934b490..87bb36a5bdec 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -346,6 +346,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, int rpf, struct in_device *idev, u32 *itag) { struct net *net = dev_net(dev); + enum skb_drop_reason reason; struct flow_keys flkeys; int ret, no_addr; struct fib_result res; @@ -377,9 +378,15 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, if (fib_lookup(net, &fl4, &res, 0)) goto last_resort; - if (res.type != RTN_UNICAST && - (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev))) - goto e_inval; + if (res.type != RTN_UNICAST) { + if (res.type != RTN_LOCAL) { + reason = SKB_DROP_REASON_IP_INVALID_SOURCE; + goto e_inval; + } else if (!IN_DEV_ACCEPT_LOCAL(idev)) { + reason = SKB_DROP_REASON_IP_LOCAL_SOURCE; + goto e_inval; + } + } fib_combine_itag(itag, &res); dev_match = fib_info_nh_uses_dev(res.fi, dev); @@ -412,9 +419,9 @@ last_resort: return 0; e_inval: - return -EINVAL; + return -reason; e_rpf: - return -EXDEV; + return -SKB_DROP_REASON_IP_RPFILTER; } /* Ignore rp_filter for packets protected by IPsec. */ diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 89bb63da6852..c40a26972884 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -425,10 +425,8 @@ drop: return NET_RX_DROP; drop_error: - if (err == -EXDEV) { - drop_reason = SKB_DROP_REASON_IP_RPFILTER; + if (drop_reason == SKB_DROP_REASON_IP_RPFILTER) __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER); - } goto drop; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ccdbe9c70132..c38e95d9da9e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1682,7 +1682,7 @@ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, dscp_t dscp, struct net_device *dev, struct in_device *in_dev, u32 *itag) { - int err; + enum skb_drop_reason reason; /* Primary sanity checks. */ if (!in_dev) @@ -1700,10 +1700,10 @@ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, ip_hdr(skb)->protocol != IPPROTO_IGMP) return -EINVAL; } else { - err = fib_validate_source(skb, saddr, 0, dscp, 0, dev, in_dev, - itag); - if (err < 0) - return err; + reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0, + dev, in_dev, itag); + if (reason) + return -EINVAL; } return 0; } @@ -1801,6 +1801,7 @@ static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, err = fib_validate_source(skb, saddr, daddr, dscp, FIB_RES_OIF(*res), in_dev->dev, in_dev, &itag); if (err < 0) { + err = -EINVAL; ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, saddr); @@ -2153,6 +2154,7 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, struct in_device *in_dev = __in_dev_get_rcu(dev); struct rtable *rt = skb_rtable(hint); struct net *net = dev_net(dev); + enum skb_drop_reason reason; int err = -EINVAL; u32 tag = 0; @@ -2171,9 +2173,9 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (rt->rt_type != RTN_LOCAL) goto skip_validate_source; - err = fib_validate_source(skb, saddr, daddr, dscp, 0, dev, in_dev, - &tag); - if (err < 0) + reason = fib_validate_source_reason(skb, saddr, daddr, dscp, 0, dev, + in_dev, &tag); + if (reason) goto martian_source; skip_validate_source: @@ -2215,6 +2217,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, dscp_t dscp, struct net_device *dev, struct fib_result *res) { + enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct in_device *in_dev = __in_dev_get_rcu(dev); struct flow_keys *flkeys = NULL, _flkeys; struct net *net = dev_net(dev); @@ -2309,10 +2312,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, goto brd_input; } + err = -EINVAL; if (res->type == RTN_LOCAL) { - err = fib_validate_source(skb, saddr, daddr, dscp, 0, dev, - in_dev, &itag); - if (err < 0) + reason = fib_validate_source_reason(skb, saddr, daddr, dscp, + 0, dev, in_dev, &itag); + if (reason) goto martian_source; goto local_input; } @@ -2333,9 +2337,10 @@ brd_input: goto e_inval; if (!ipv4_is_zeronet(saddr)) { - err = fib_validate_source(skb, saddr, 0, dscp, 0, dev, in_dev, - &itag); - if (err < 0) + err = -EINVAL; + reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0, + dev, in_dev, &itag); + if (reason) goto martian_source; } flags |= RTCF_BROADCAST; -- 2.50.1 From c6c670784b862878deba7e16210ca4b2a2966ca0 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Thu, 7 Nov 2024 20:55:54 +0800 Subject: [PATCH 13/16] net: ip: make ip_route_input_mc() return drop reason Make ip_route_input_mc() return drop reason, and adjust the call of it in ip_route_input_rcu(). Signed-off-by: Menglong Dong Signed-off-by: Paolo Abeni --- net/ipv4/route.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c38e95d9da9e..b20316789baa 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1709,8 +1709,9 @@ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, } /* called in rcu_read_lock() section */ -static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, int our) +static enum skb_drop_reason +ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, int our) { struct in_device *in_dev = __in_dev_get_rcu(dev); unsigned int flags = RTCF_MULTICAST; @@ -1721,7 +1722,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, err = ip_mc_validate_source(skb, daddr, saddr, dscp, dev, in_dev, &itag); if (err) - return err; + return SKB_DROP_REASON_NOT_SPECIFIED; if (our) flags |= RTCF_LOCAL; @@ -1732,7 +1733,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, false); if (!rth) - return -ENOBUFS; + return SKB_DROP_REASON_NOMEM; #ifdef CONFIG_IP_ROUTE_CLASSID rth->dst.tclassid = itag; @@ -1748,7 +1749,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, skb_dst_drop(skb); skb_dst_set(skb, &rth->dst); - return 0; + return SKB_NOT_DROPPED_YET; } @@ -2446,12 +2447,12 @@ static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, * route cache entry is created eventually. */ if (ipv4_is_multicast(daddr)) { + enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct in_device *in_dev = __in_dev_get_rcu(dev); int our = 0; - int err = -EINVAL; if (!in_dev) - return err; + return -EINVAL; our = ip_check_mc_rcu(in_dev, daddr, saddr, ip_hdr(skb)->protocol); @@ -2472,10 +2473,10 @@ static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, IN_DEV_MFORWARD(in_dev)) #endif ) { - err = ip_route_input_mc(skb, daddr, saddr, dscp, dev, - our); + reason = ip_route_input_mc(skb, daddr, saddr, dscp, + dev, our); } - return err; + return reason ? -EINVAL : 0; } return ip_route_input_slow(skb, daddr, saddr, dscp, dev, res); -- 2.50.1 From d46f827016d891dbc234cb05c406180f77fb3b2d Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Thu, 7 Nov 2024 20:55:55 +0800 Subject: [PATCH 14/16] net: ip: make ip_mc_validate_source() return drop reason Make ip_mc_validate_source() return drop reason, and adjust the call of it in ip_route_input_mc(). Another caller of it is ip_rcv_finish_core->udp_v4_early_demux, and the errno is not checked in detail, so we don't do more adjustment for it. The drop reason "SKB_DROP_REASON_IP_LOCALNET" is added in this commit. Signed-off-by: Menglong Dong Signed-off-by: Paolo Abeni --- include/net/dropreason-core.h | 3 +++ include/net/route.h | 7 ++++--- net/ipv4/route.c | 35 +++++++++++++++++++---------------- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index 62a60be1db84..a2a1fb90e0e5 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -78,6 +78,7 @@ FN(IP_INNOROUTES) \ FN(IP_LOCAL_SOURCE) \ FN(IP_INVALID_SOURCE) \ + FN(IP_LOCALNET) \ FN(PKT_TOO_BIG) \ FN(DUP_FRAG) \ FN(FRAG_REASM_TIMEOUT) \ @@ -383,6 +384,8 @@ enum skb_drop_reason { * 2) source ip is zero and not IGMP */ SKB_DROP_REASON_IP_INVALID_SOURCE, + /** @SKB_DROP_REASON_IP_LOCALNET: source or dest ip is local net */ + SKB_DROP_REASON_IP_LOCALNET, /** * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the * MTU) diff --git a/include/net/route.h b/include/net/route.h index 0a690adfdff5..e2e1922c58fb 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -199,9 +199,10 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 return ip_route_output_key(net, fl4); } -int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, - struct in_device *in_dev, u32 *itag); +enum skb_drop_reason +ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + struct in_device *in_dev, u32 *itag); int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, dscp_t dscp, struct net_device *dev); int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b20316789baa..ef0b5ffda843 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1678,34 +1678,37 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt) EXPORT_SYMBOL(rt_dst_clone); /* called in rcu_read_lock() section */ -int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, - struct in_device *in_dev, u32 *itag) +enum skb_drop_reason +ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + struct in_device *in_dev, u32 *itag) { enum skb_drop_reason reason; /* Primary sanity checks. */ if (!in_dev) - return -EINVAL; + return SKB_DROP_REASON_NOT_SPECIFIED; - if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || - skb->protocol != htons(ETH_P_IP)) - return -EINVAL; + if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) + return SKB_DROP_REASON_IP_INVALID_SOURCE; + + if (skb->protocol != htons(ETH_P_IP)) + return SKB_DROP_REASON_INVALID_PROTO; if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) - return -EINVAL; + return SKB_DROP_REASON_IP_LOCALNET; if (ipv4_is_zeronet(saddr)) { if (!ipv4_is_local_multicast(daddr) && ip_hdr(skb)->protocol != IPPROTO_IGMP) - return -EINVAL; + return SKB_DROP_REASON_IP_INVALID_SOURCE; } else { reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0, dev, in_dev, itag); if (reason) - return -EINVAL; + return reason; } - return 0; + return SKB_NOT_DROPPED_YET; } /* called in rcu_read_lock() section */ @@ -1715,14 +1718,14 @@ ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, { struct in_device *in_dev = __in_dev_get_rcu(dev); unsigned int flags = RTCF_MULTICAST; + enum skb_drop_reason reason; struct rtable *rth; u32 itag = 0; - int err; - err = ip_mc_validate_source(skb, daddr, saddr, dscp, dev, in_dev, - &itag); - if (err) - return SKB_DROP_REASON_NOT_SPECIFIED; + reason = ip_mc_validate_source(skb, daddr, saddr, dscp, dev, in_dev, + &itag); + if (reason) + return reason; if (our) flags |= RTCF_LOCAL; -- 2.50.1 From 5b92112acd8e2ed84a4df653fc20575f4da6fa49 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Thu, 7 Nov 2024 20:55:56 +0800 Subject: [PATCH 15/16] net: ip: make ip_route_input_slow() return drop reasons In this commit, we make ip_route_input_slow() return skb drop reasons, and following new skb drop reasons are added: SKB_DROP_REASON_IP_INVALID_DEST The only caller of ip_route_input_slow() is ip_route_input_rcu(), and we adjust it by making it return -EINVAL on error. Signed-off-by: Menglong Dong Signed-off-by: Paolo Abeni --- include/net/dropreason-core.h | 6 ++++ net/ipv4/route.c | 56 ++++++++++++++++++++++------------- 2 files changed, 41 insertions(+), 21 deletions(-) diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index a2a1fb90e0e5..74624d369d48 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -79,6 +79,7 @@ FN(IP_LOCAL_SOURCE) \ FN(IP_INVALID_SOURCE) \ FN(IP_LOCALNET) \ + FN(IP_INVALID_DEST) \ FN(PKT_TOO_BIG) \ FN(DUP_FRAG) \ FN(FRAG_REASM_TIMEOUT) \ @@ -386,6 +387,11 @@ enum skb_drop_reason { SKB_DROP_REASON_IP_INVALID_SOURCE, /** @SKB_DROP_REASON_IP_LOCALNET: source or dest ip is local net */ SKB_DROP_REASON_IP_LOCALNET, + /** + * @SKB_DROP_REASON_IP_INVALID_DEST: the dest ip is invalid: + * 1) dest ip is 0 + */ + SKB_DROP_REASON_IP_INVALID_DEST, /** * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the * MTU) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ef0b5ffda843..b73f0355cc38 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2217,9 +2217,10 @@ static struct net_device *ip_rt_get_dev(struct net *net, * called with rcu_read_lock() */ -static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, - struct fib_result *res) +static enum skb_drop_reason +ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + struct fib_result *res) { enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct in_device *in_dev = __in_dev_get_rcu(dev); @@ -2249,8 +2250,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.flowi4_tun_key.tun_id = 0; skb_dst_drop(skb); - if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) + if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) { + reason = SKB_DROP_REASON_IP_INVALID_SOURCE; goto martian_source; + } res->fi = NULL; res->table = NULL; @@ -2260,21 +2263,29 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, /* Accept zero addresses only to limited broadcast; * I even do not know to fix it or not. Waiting for complains :-) */ - if (ipv4_is_zeronet(saddr)) + if (ipv4_is_zeronet(saddr)) { + reason = SKB_DROP_REASON_IP_INVALID_SOURCE; goto martian_source; + } - if (ipv4_is_zeronet(daddr)) + if (ipv4_is_zeronet(daddr)) { + reason = SKB_DROP_REASON_IP_INVALID_DEST; goto martian_destination; + } /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(), * and call it once if daddr or/and saddr are loopback addresses */ if (ipv4_is_loopback(daddr)) { - if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) + if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) { + reason = SKB_DROP_REASON_IP_LOCALNET; goto martian_destination; + } } else if (ipv4_is_loopback(saddr)) { - if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) + if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) { + reason = SKB_DROP_REASON_IP_LOCALNET; goto martian_source; + } } /* @@ -2329,19 +2340,26 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, err = -EHOSTUNREACH; goto no_route; } - if (res->type != RTN_UNICAST) + if (res->type != RTN_UNICAST) { + reason = SKB_DROP_REASON_IP_INVALID_DEST; goto martian_destination; + } make_route: err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, dscp, flkeys); -out: return err; + if (!err) + reason = SKB_NOT_DROPPED_YET; + +out: + return reason; brd_input: - if (skb->protocol != htons(ETH_P_IP)) - goto e_inval; + if (skb->protocol != htons(ETH_P_IP)) { + reason = SKB_DROP_REASON_INVALID_PROTO; + goto out; + } if (!ipv4_is_zeronet(saddr)) { - err = -EINVAL; reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0, dev, in_dev, &itag); if (reason) @@ -2362,7 +2380,7 @@ local_input: rth = rcu_dereference(nhc->nhc_rth_input); if (rt_cache_valid(rth)) { skb_dst_set_noref(skb, &rth->dst); - err = 0; + reason = SKB_NOT_DROPPED_YET; goto out; } } @@ -2399,7 +2417,7 @@ local_input: rt_add_uncached_list(rth); } skb_dst_set(skb, &rth->dst); - err = 0; + reason = SKB_NOT_DROPPED_YET; goto out; no_route: @@ -2420,12 +2438,8 @@ martian_destination: &daddr, &saddr, dev->name); #endif -e_inval: - err = -EINVAL; - goto out; - e_nobufs: - err = -ENOBUFS; + reason = SKB_DROP_REASON_NOMEM; goto out; martian_source: @@ -2482,7 +2496,7 @@ static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, return reason ? -EINVAL : 0; } - return ip_route_input_slow(skb, daddr, saddr, dscp, dev, res); + return ip_route_input_slow(skb, daddr, saddr, dscp, dev, res) ? -EINVAL : 0; } int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, -- 2.50.1 From 61b95c70f3449c1c0bd1415c8ef37e2959cf1c41 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Thu, 7 Nov 2024 20:55:57 +0800 Subject: [PATCH 16/16] net: ip: make ip_route_input_rcu() return drop reasons In this commit, we make ip_route_input_rcu() return drop reasons, which come from ip_route_input_mc() and ip_route_input_slow(). The only caller of ip_route_input_rcu() is ip_route_input_noref(). We adjust it by making it return -EINVAL on error and ignore the reasons that ip_route_input_rcu() returns. In the following patch, we will make ip_route_input_noref() returns the drop reasons. Signed-off-by: Menglong Dong Signed-off-by: Paolo Abeni --- net/ipv4/route.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b73f0355cc38..270bc8c96619 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2448,9 +2448,10 @@ martian_source: } /* called with rcu_read_lock held */ -static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, - struct fib_result *res) +static enum skb_drop_reason +ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + struct fib_result *res) { /* Multicast recognition logic is moved from route cache to here. * The problem was that too many Ethernet cards have broken/missing @@ -2493,23 +2494,23 @@ static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, reason = ip_route_input_mc(skb, daddr, saddr, dscp, dev, our); } - return reason ? -EINVAL : 0; + return reason; } - return ip_route_input_slow(skb, daddr, saddr, dscp, dev, res) ? -EINVAL : 0; + return ip_route_input_slow(skb, daddr, saddr, dscp, dev, res); } int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, dscp_t dscp, struct net_device *dev) { + enum skb_drop_reason reason; struct fib_result res; - int err; rcu_read_lock(); - err = ip_route_input_rcu(skb, daddr, saddr, dscp, dev, &res); + reason = ip_route_input_rcu(skb, daddr, saddr, dscp, dev, &res); rcu_read_unlock(); - return err; + return reason ? -EINVAL : 0; } EXPORT_SYMBOL(ip_route_input_noref); @@ -3321,7 +3322,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, skb->mark = mark; err = ip_route_input_rcu(skb, dst, src, inet_dsfield_to_dscp(rtm->rtm_tos), - dev, &res); + dev, &res) ? -EINVAL : 0; rt = skb_rtable(skb); if (err == 0 && rt->dst.error) -- 2.50.1