en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
                                        en/tc/act/redirect_ingress.o
 
-mlx5_core-$(CONFIG_MLX5_TC_CT)      += en/tc_ct.o
+mlx5_core-$(CONFIG_MLX5_TC_CT)      += en/tc_ct.o en/tc/ct_fs_dmfs.o
 mlx5_core-$(CONFIG_MLX5_TC_SAMPLE)   += en/tc/sample.o
 
 #
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_EN_TC_CT_FS_H__
+#define __MLX5_EN_TC_CT_FS_H__
+
+struct mlx5_ct_fs {
+       const struct net_device *netdev;
+       struct mlx5_core_dev *dev;
+};
+
+struct mlx5_ct_fs_rule {
+};
+
+struct mlx5_ct_fs_ops {
+       struct mlx5_ct_fs_rule * (*ct_rule_add)(struct mlx5_ct_fs *fs,
+                                               struct mlx5_flow_spec *spec,
+                                               struct mlx5_flow_attr *attr);
+       void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule);
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void);
+
+#endif /* __MLX5_EN_TC_CT_FS_H__ */
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+
+#define ct_dbg(fmt, args...)\
+       netdev_dbg(fs->netdev, "ct_fs_dmfs debug: " fmt "\n", ##args)
+
+struct mlx5_ct_fs_dmfs_rule {
+       struct mlx5_ct_fs_rule fs_rule;
+       struct mlx5_flow_handle *rule;
+       struct mlx5_flow_attr *attr;
+};
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_dmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+                           struct mlx5_flow_attr *attr)
+{
+       struct mlx5e_priv *priv = netdev_priv(fs->netdev);
+       struct mlx5_ct_fs_dmfs_rule *dmfs_rule;
+       int err;
+
+       dmfs_rule = kzalloc(sizeof(*dmfs_rule), GFP_KERNEL);
+       if (!dmfs_rule)
+               return ERR_PTR(-ENOMEM);
+
+       dmfs_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
+       if (IS_ERR(dmfs_rule->rule)) {
+               err = PTR_ERR(dmfs_rule->rule);
+               ct_dbg("Failed to add ct entry fs rule");
+               goto err_insert;
+       }
+
+       dmfs_rule->attr = attr;
+
+       return &dmfs_rule->fs_rule;
+
+err_insert:
+       kfree(dmfs_rule);
+       return ERR_PTR(err);
+}
+
+static void
+mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+       struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule,
+                                                             struct mlx5_ct_fs_dmfs_rule,
+                                                             fs_rule);
+
+       mlx5_tc_rule_delete(netdev_priv(fs->netdev), dmfs_rule->rule, dmfs_rule->attr);
+       kfree(dmfs_rule);
+}
+
+static struct mlx5_ct_fs_ops dmfs_ops = {
+       .ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add,
+       .ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del,
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void)
+{
+       return &dmfs_ops;
+}
 
 
 #include "lib/fs_chains.h"
 #include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
 #include "en/tc_priv.h"
 #include "en/mod_hdr.h"
 #include "en/mapping.h"
        struct mapping_ctx *labels_mapping;
        enum mlx5_flow_namespace_type ns_type;
        struct mlx5_fs_chains *chains;
+       struct mlx5_ct_fs *fs;
+       struct mlx5_ct_fs_ops *fs_ops;
        spinlock_t ht_lock; /* protects ft entries */
 };
 
 };
 
 struct mlx5_ct_zone_rule {
-       struct mlx5_flow_handle *rule;
+       struct mlx5_ct_fs_rule *rule;
        struct mlx5e_mod_hdr_handle *mh;
        struct mlx5_flow_attr *attr;
        bool nat;
 
        ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
 
-       mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
+       ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
        mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
        mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
        kfree(attr);
        mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
        mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
 
-       zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
+       zone_rule->rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr);
        if (IS_ERR(zone_rule->rule)) {
                err = PTR_ERR(zone_rule->rule);
                ct_dbg("Failed to add ct entry rule, nat: %d", nat);
        mutex_unlock(&priv->control_lock);
 }
 
+static int
+mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
+{
+       struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
+
+       ct_priv->fs = kzalloc(sizeof(*ct_priv->fs), GFP_KERNEL);
+       if (!ct_priv->fs)
+               return -ENOMEM;
+
+       ct_priv->fs->netdev = ct_priv->netdev;
+       ct_priv->fs->dev = ct_priv->dev;
+       ct_priv->fs_ops = fs_ops;
+
+       return 0;
+}
+
 static int
 mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
                                  const char **err_msg)
        if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
                goto err_ct_tuples_nat_ht;
 
+       err = mlx5_tc_ct_fs_init(ct_priv);
+       if (err)
+               goto err_init_fs;
+
        return ct_priv;
 
+err_init_fs:
+       rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
 err_ct_tuples_nat_ht:
        rhashtable_destroy(&ct_priv->ct_tuples_ht);
 err_ct_tuples_ht:
 
        chains = ct_priv->chains;
 
+       kfree(ct_priv->fs);
+
        mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
        mlx5_chains_destroy_global_table(chains, ct_priv->ct);
        mapping_destroy(ct_priv->zone_mapping);