u32 *match_c, u32 *match_v,
                                                u32 action, u32 flow_tag)
 {
-       struct mlx5_flow_destination dest = {
-               .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
-               {.ft = priv->fs.vlan.ft.t},
-       };
+       struct mlx5_core_dev *dev = priv->mdev;
+       struct mlx5_flow_destination dest = { 0 };
+       struct mlx5_fc *counter = NULL;
        struct mlx5_flow_rule *rule;
        bool table_created = false;
 
+       if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+               dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+               dest.ft = priv->fs.vlan.ft.t;
+       } else {
+               counter = mlx5_fc_create(dev, true);
+               if (IS_ERR(counter))
+                       return ERR_CAST(counter);
+
+               dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+               dest.counter = counter;
+       }
+
        if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
                priv->fs.tc.t =
                        mlx5_create_auto_grouped_flow_table(priv->fs.ns,
                if (IS_ERR(priv->fs.tc.t)) {
                        netdev_err(priv->netdev,
                                   "Failed to create tc offload table\n");
-                       return ERR_CAST(priv->fs.tc.t);
+                       rule = ERR_CAST(priv->fs.tc.t);
+                       goto err_create_ft;
                }
 
                table_created = true;
        rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
                                  match_c, match_v,
                                  action, flow_tag,
-                                 action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
+                                 &dest);
+
+       if (IS_ERR(rule))
+               goto err_add_rule;
+
+       return rule;
 
-       if (IS_ERR(rule) && table_created) {
+err_add_rule:
+       if (table_created) {
                mlx5_destroy_flow_table(priv->fs.tc.t);
                priv->fs.tc.t = NULL;
        }
+err_create_ft:
+       mlx5_fc_destroy(dev, counter);
 
        return rule;
 }
 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
                              struct mlx5_flow_rule *rule)
 {
+       struct mlx5_fc *counter = NULL;
+
+       counter = mlx5_flow_rule_counter(rule);
+
        mlx5_del_flow_rule(rule);
 
+       mlx5_fc_destroy(priv->mdev, counter);
+
        if (!mlx5e_tc_num_filters(priv)) {
                mlx5_destroy_flow_table(priv->fs.tc.t);
                priv->fs.tc.t = NULL;
 
                if (is_tcf_gact_shot(a)) {
                        *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+                       if (MLX5_CAP_FLOWTABLE(priv->mdev,
+                                              flow_table_properties_nic_receive.flow_counter))
+                               *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
                        continue;
                }
 
        return 0;
 }
 
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+                      struct tc_cls_flower_offload *f)
+{
+       struct mlx5e_tc_table *tc = &priv->fs.tc;
+       struct mlx5e_tc_flow *flow;
+       struct tc_action *a;
+       struct mlx5_fc *counter;
+       u64 bytes;
+       u64 packets;
+       u64 lastuse;
+
+       flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
+                                     tc->ht_params);
+       if (!flow)
+               return -EINVAL;
+
+       counter = mlx5_flow_rule_counter(flow->rule);
+       if (!counter)
+               return 0;
+
+       mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+
+       tc_for_each_action(a, f->exts)
+               tcf_action_stats_update(a, bytes, packets, lastuse);
+
+       return 0;
+}
+
 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
        .head_offset = offsetof(struct mlx5e_tc_flow, node),
        .key_offset = offsetof(struct mlx5e_tc_flow, cookie),