]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
net/mlx5: Lag, move E-Switch prerequisite check into lag code
authorMark Bloch <mbloch@nvidia.com>
Tue, 1 Mar 2022 16:45:41 +0000 (16:45 +0000)
committerSaeed Mahameed <saeedm@nvidia.com>
Tue, 10 May 2022 05:54:01 +0000 (22:54 -0700)
There is no need to expose E-Switch function for something that can be
checked with already present API inside lag code.

Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c

index 25f2d2717aaa8440b69536b16dbc4077417e2760..8ef22893e5e67bba1d01c25f8e6c9991b2a33244 100644 (file)
@@ -1893,17 +1893,6 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
 }
 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
 
-bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
-{
-       if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
-            dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
-           (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
-            dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
-               return true;
-
-       return false;
-}
-
 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
                               struct mlx5_core_dev *dev1)
 {
index bac5160837c5101b28b74b8756703a6f7a794730..a5ae5df4d6f104e8e3348f26179c393bdbb5efab 100644 (file)
@@ -518,8 +518,6 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
                MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
 }
 
-bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
-                        struct mlx5_core_dev *dev1);
 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
                               struct mlx5_core_dev *dev1);
 
@@ -724,7 +722,6 @@ static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
-static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
 static inline
 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
index fe34cce77d0764172fae68a0fc50c6f9ff33dc96..1de843d2f248a2e5f04f9682c386d39a43f72900 100644 (file)
@@ -457,12 +457,19 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
 
 static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
 {
+#ifdef CONFIG_MLX5_ESWITCH
+       u8 mode;
+#endif
+
        if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
                return false;
 
 #ifdef CONFIG_MLX5_ESWITCH
-       return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
-                                  ldev->pf[MLX5_LAG_P2].dev);
+       mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev);
+
+       return (mode == MLX5_ESWITCH_NONE || mode == MLX5_ESWITCH_OFFLOADS) &&
+               (mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev) ==
+                mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P2].dev));
 #else
        return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
                !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));