]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
net/mlx5: E-switch, Create ingress ACL when needed
authorChris Mi <cmi@nvidia.com>
Thu, 27 Jun 2024 18:02:37 +0000 (21:02 +0300)
committerDavid S. Miller <davem@davemloft.net>
Fri, 28 Jun 2024 11:58:11 +0000 (12:58 +0100)
Currently, ingress acl is used for three features. It is created only
when vport metadata match and prio tag are enabled. But active-backup
lag mode also uses it. It is independent of vport metadata match and
prio tag. And vport metadata match can be disabled using the
following devlink command:

 # devlink dev param set pci/0000:08:00.0 name esw_port_metadata \
value false cmode runtime

If ingress acl is not created, will hit panic when creating drop rule
for active-backup lag mode. If always create it, there will be about
5% performance degradation.

Fix it by creating ingress acl when needed. If esw_port_metadata is
true, ingress acl exists, then create drop rule using existing
ingress acl. If esw_port_metadata is false, create ingress acl and
then create drop rule.

Fixes: 1749c4c51c16 ("net/mlx5: E-switch, add drop rule support to ingress ACL")
Signed-off-by: Chris Mi <cmi@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c

index 50d2ea32397982884bb6618d1b1da53bf7ca53d5..a436ce895e45a694c4cdba0ea7b4a11d31c27044 100644 (file)
@@ -6,6 +6,9 @@
 #include "helper.h"
 #include "ofld.h"
 
+static int
+acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
 static bool
 esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
                                 const struct mlx5_vport *vport)
@@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
 {
        struct mlx5_flow_act flow_act = {};
        struct mlx5_flow_handle *flow_rule;
+       bool created = false;
        int err = 0;
 
+       if (!vport->ingress.acl) {
+               err = acl_ingress_ofld_setup(esw, vport);
+               if (err)
+                       return err;
+               created = true;
+       }
+
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
        flow_act.fg = vport->ingress.offloads.drop_grp;
        flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
        if (IS_ERR(flow_rule)) {
                err = PTR_ERR(flow_rule);
-               goto out;
+               goto err_out;
        }
 
        vport->ingress.offloads.drop_rule = flow_rule;
-out:
+
+       return 0;
+err_out:
+       /* Only destroy ingress acl created in this function. */
+       if (created)
+               esw_acl_ingress_ofld_cleanup(esw, vport);
        return err;
 }
 
@@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
        }
 }
 
-int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
-                              struct mlx5_vport *vport)
+static int
+acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
 {
        int num_ftes = 0;
        int err;
 
-       if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
-           !esw_acl_ingress_prio_tag_enabled(esw, vport))
-               return 0;
-
        esw_acl_ingress_allow_rule_destroy(vport);
 
        if (mlx5_eswitch_vport_match_metadata_enabled(esw))
@@ -347,6 +359,15 @@ group_err:
        return err;
 }
 
+int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+       if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+           !esw_acl_ingress_prio_tag_enabled(esw, vport))
+               return 0;
+
+       return acl_ingress_ofld_setup(esw, vport);
+}
+
 void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
                                  struct mlx5_vport *vport)
 {