blocking_notifier_call_chain(&esw->n_head, 0, &info);
 }
 
+static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
+{
+       struct mlx5_core_dev *dev = esw->dev;
+       int total_vports;
+       int err;
+
+       total_vports = mlx5_eswitch_get_total_vports(dev);
+
+       if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+               err = mlx5_fs_egress_acls_init(dev, total_vports);
+               if (err)
+                       return err;
+       } else {
+               esw_warn(dev, "engress ACL is not supported by FW\n");
+       }
+
+       if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+               err = mlx5_fs_ingress_acls_init(dev, total_vports);
+               if (err)
+                       goto err;
+       } else {
+               esw_warn(dev, "ingress ACL is not supported by FW\n");
+       }
+       return 0;
+
+err:
+       if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+               mlx5_fs_egress_acls_cleanup(dev);
+       return err;
+}
+
+static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
+{
+       struct mlx5_core_dev *dev = esw->dev;
+
+       if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+               mlx5_fs_ingress_acls_cleanup(dev);
+       if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+               mlx5_fs_egress_acls_cleanup(dev);
+}
+
 /**
  * mlx5_eswitch_enable_locked - Enable eswitch
  * @esw:       Pointer to eswitch
                return -EOPNOTSUPP;
        }
 
-       if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
-               esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
-
-       if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
-               esw_warn(esw->dev, "engress ACL is not supported by FW\n");
-
        mlx5_eswitch_get_devlink_param(esw);
 
+       err = mlx5_esw_acls_ns_init(esw);
+       if (err)
+               return err;
+
        mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
 
        esw_create_tsar(esw);
                mlx5_rescan_drivers(esw->dev);
 
        esw_destroy_tsar(esw);
+       mlx5_esw_acls_ns_cleanup(esw);
        return err;
 }
 
                mlx5_rescan_drivers(esw->dev);
 
        esw_destroy_tsar(esw);
+       mlx5_esw_acls_ns_cleanup(esw);
 
        if (clear_vf)
                mlx5_eswitch_clear_vf_vports_info(esw);
 
 {
        struct mlx5_flow_steering *steering = dev->priv.steering;
 
-       if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
+       if (!steering)
                return NULL;
 
        switch (type) {
        case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+               if (vport >= steering->esw_egress_acl_vports)
+                       return NULL;
                if (steering->esw_egress_root_ns &&
                    steering->esw_egress_root_ns[vport])
                        return &steering->esw_egress_root_ns[vport]->ns;
                else
                        return NULL;
        case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+               if (vport >= steering->esw_ingress_acl_vports)
+                       return NULL;
                if (steering->esw_ingress_root_ns &&
                    steering->esw_ingress_root_ns[vport])
                        return &steering->esw_ingress_root_ns[vport]->ns;
        clean_tree(&root_ns->ns.node);
 }
 
-static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
-{
-       struct mlx5_flow_steering *steering = dev->priv.steering;
-       int i;
-
-       if (!steering->esw_egress_root_ns)
-               return;
-
-       for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
-               cleanup_root_ns(steering->esw_egress_root_ns[i]);
-
-       kfree(steering->esw_egress_root_ns);
-       steering->esw_egress_root_ns = NULL;
-}
-
-static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
-{
-       struct mlx5_flow_steering *steering = dev->priv.steering;
-       int i;
-
-       if (!steering->esw_ingress_root_ns)
-               return;
-
-       for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
-               cleanup_root_ns(steering->esw_ingress_root_ns[i]);
-
-       kfree(steering->esw_ingress_root_ns);
-       steering->esw_ingress_root_ns = NULL;
-}
-
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
 {
        struct mlx5_flow_steering *steering = dev->priv.steering;
 
        cleanup_root_ns(steering->root_ns);
-       cleanup_egress_acls_root_ns(dev);
-       cleanup_ingress_acls_root_ns(dev);
        cleanup_root_ns(steering->fdb_root_ns);
        steering->fdb_root_ns = NULL;
        kfree(steering->fdb_sub_ns);
        return PTR_ERR_OR_ZERO(prio);
 }
 
-static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
 {
        struct mlx5_flow_steering *steering = dev->priv.steering;
-       int total_vports = mlx5_eswitch_get_total_vports(dev);
        int err;
        int i;
 
                if (err)
                        goto cleanup_root_ns;
        }
-
+       steering->esw_egress_acl_vports = total_vports;
        return 0;
 
 cleanup_root_ns:
        return err;
 }
 
-static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+       struct mlx5_flow_steering *steering = dev->priv.steering;
+       int i;
+
+       if (!steering->esw_egress_root_ns)
+               return;
+
+       for (i = 0; i < steering->esw_egress_acl_vports; i++)
+               cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+       kfree(steering->esw_egress_root_ns);
+       steering->esw_egress_root_ns = NULL;
+}
+
+int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
 {
        struct mlx5_flow_steering *steering = dev->priv.steering;
-       int total_vports = mlx5_eswitch_get_total_vports(dev);
        int err;
        int i;
 
                if (err)
                        goto cleanup_root_ns;
        }
-
+       steering->esw_ingress_acl_vports = total_vports;
        return 0;
 
 cleanup_root_ns:
        return err;
 }
 
+void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+       struct mlx5_flow_steering *steering = dev->priv.steering;
+       int i;
+
+       if (!steering->esw_ingress_root_ns)
+               return;
+
+       for (i = 0; i < steering->esw_ingress_acl_vports; i++)
+               cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+       kfree(steering->esw_ingress_root_ns);
+       steering->esw_ingress_root_ns = NULL;
+}
+
 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
 {
        int err;
                        if (err)
                                goto err;
                }
-               if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
-                       err = init_egress_acls_root_ns(dev);
-                       if (err)
-                               goto err;
-               }
-               if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
-                       err = init_ingress_acls_root_ns(dev);
-                       if (err)
-                               goto err;
-               }
        }
 
        if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
 
        struct mlx5_flow_root_namespace *rdma_rx_root_ns;
        struct mlx5_flow_root_namespace *rdma_tx_root_ns;
        struct mlx5_flow_root_namespace *egress_root_ns;
+       int esw_egress_acl_vports;
+       int esw_ingress_acl_vports;
 };
 
 struct fs_node {
 int mlx5_init_fs(struct mlx5_core_dev *dev);
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
 
+int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
+int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+
 #define fs_get_obj(v, _node)  {v = container_of((_node), typeof(*v), node); }
 
 #define fs_list_for_each_entry(pos, root)              \