]> www.infradead.org Git - nvme.git/commitdiff
net/mlx5e: TC, Restore tunnel info for sample offload
authorChris Mi <cmi@nvidia.com>
Fri, 30 Apr 2021 07:17:33 +0000 (10:17 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Fri, 20 Aug 2021 04:50:38 +0000 (21:50 -0700)
Currently the sample offload actions send the encapsulated packet
to software. sFlow expects tunneled packets to be decapsulated while
having the tunnel properties on the skb metadata fields.

Reuse the functions used by connection tracking to map the outer
header properties to a unique id. The next patch  will use that id
to restore the tunnel information of decapsulated packets onto the
skb.

Signed-off-by: Chris Mi <cmi@nvidia.com>
Reviewed-by: Oz Shlomo <ozsh@nvidia.com>
Reviewed-by: Roi Dayan <roid@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h

index 756b85349a9513a2f81cbbb115be47aa092f8dc0..51a4d80f7fa34033d8229d1c9eb27bdc8e3c7669 100644 (file)
@@ -608,8 +608,8 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
        return true;
 }
 
-static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
-                             struct mlx5e_tc_update_priv *tc_priv)
+static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1,
+                                   struct mlx5e_tc_update_priv *tc_priv)
 {
        struct mlx5e_priv *priv = netdev_priv(skb->dev);
        u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
@@ -641,6 +641,21 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
        return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
 }
 
+static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
+                                    struct mlx5_mapped_obj *mapped_obj,
+                                    struct mlx5e_tc_update_priv *tc_priv)
+{
+       if (!mlx5e_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
+               netdev_dbg(priv->netdev,
+                          "Failed to restore tunnel info for sampled packet\n");
+               return;
+       }
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+       mlx5e_tc_sample_skb(skb, mapped_obj);
+#endif /* CONFIG_MLX5_TC_SAMPLE */
+       mlx5_rep_tc_post_napi_receive(tc_priv);
+}
+
 bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
                             struct sk_buff *skb,
                             struct mlx5e_tc_update_priv *tc_priv)
@@ -648,7 +663,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
        struct mlx5_mapped_obj mapped_obj;
        struct mlx5_eswitch *esw;
        struct mlx5e_priv *priv;
-       u32 reg_c0, reg_c1;
+       u32 reg_c0;
        int err;
 
        reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
@@ -660,8 +675,6 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
         */
        skb->mark = 0;
 
-       reg_c1 = be32_to_cpu(cqe->ft_metadata);
-
        priv = netdev_priv(skb->dev);
        esw = priv->mdev->priv.eswitch;
        err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj);
@@ -673,12 +686,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
        }
 
        if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
-               return mlx5e_restore_skb(skb, mapped_obj.chain, reg_c1, tc_priv);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+               u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
+
+               return mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, tc_priv);
        } else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
-               mlx5e_tc_sample_skb(skb, &mapped_obj);
+               mlx5e_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
                return false;
-#endif /* CONFIG_MLX5_TC_SAMPLE */
        } else {
                netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
                return false;
index a6e19946e80f5f95006dacda91c7a263f59d0f54..739292d52aca1fc96c22d3aabd70765c5f3a567d 100644 (file)
@@ -364,7 +364,8 @@ void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj
 struct mlx5_flow_handle *
 mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
                        struct mlx5_flow_spec *spec,
-                       struct mlx5_flow_attr *attr)
+                       struct mlx5_flow_attr *attr,
+                       u32 tunnel_id)
 {
        struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
        struct mlx5_vport_tbl_attr per_vport_tbl_attr;
@@ -438,6 +439,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
        restore_obj.sample.group_id = sample_attr->group_num;
        restore_obj.sample.rate = sample_attr->rate;
        restore_obj.sample.trunc_size = sample_attr->trunc_size;
+       restore_obj.sample.tunnel_id = tunnel_id;
        err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
        if (err)
                goto err_obj_id;
index c8aa42ee00753cff2116d2964944c27fceed9386..1bcf4d399ccd3b58bbc894656bcf5f33d289c8bb 100644 (file)
@@ -24,7 +24,8 @@ void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj
 struct mlx5_flow_handle *
 mlx5e_tc_sample_offload(struct mlx5e_tc_psample *sample_priv,
                        struct mlx5_flow_spec *spec,
-                       struct mlx5_flow_attr *attr);
+                       struct mlx5_flow_attr *attr,
+                       u32 tunnel_id);
 
 void
 mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *sample_priv,
index 8049c4ca898959df33ec83ca74a7a03a831e462f..38cf5bdfbd4bb37e84bea3c11c677a9da48f2a68 100644 (file)
@@ -1148,7 +1148,8 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
                                               mod_hdr_acts);
 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
        } else if (flow_flag_test(flow, SAMPLE)) {
-               rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr);
+               rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
+                                              mlx5e_tc_get_flow_tun_id(flow));
 #endif
        } else {
                rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
@@ -1625,17 +1626,22 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
        }
 }
 
-static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
+static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
 {
        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
        struct flow_action *flow_action = &rule->action;
        const struct flow_action_entry *act;
        int i;
 
+       if (chain)
+               return false;
+
        flow_action_for_each(i, act, flow_action) {
                switch (act->id) {
                case FLOW_ACTION_GOTO:
                        return true;
+               case FLOW_ACTION_SAMPLE:
+                       return true;
                default:
                        continue;
                }
@@ -1876,7 +1882,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                return -EOPNOTSUPP;
 
        needs_mapping = !!flow->attr->chain;
-       sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f);
+       sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
        *match_inner = !needs_mapping;
 
        if ((needs_mapping || sets_mapping) &&
index 3aae1152184b948ab9d7d0ec17f6539e9b4a80b7..3be34b24e737b747fd70c691319f567c4664f673 100644 (file)
@@ -61,6 +61,7 @@ struct mlx5_mapped_obj {
                        u32 group_id;
                        u32 rate;
                        u32 trunc_size;
+                       u32 tunnel_id;
                } sample;
        };
 };