When creating IPsec flow tables with tunnel mode enabled, the driver
uses mlx5_eswitch_block_encap() to prevent tunnel encapsulation
conflicts across different domains (NIC_RX/NIC_TX and FDB), since the
firmware doesn’t allow both at the same time.
Currently, the driver attempts to reserve tunnel mode unconditionally
for both NIC and FDB IPsec tables. This can lead to conflicting tunnel
mode setups, for example, if a flow table was created in the FDB
domain with tunnel offload enabled, and we later try to create another
one in the NIC, or vice versa.
To resolve this, adjust the blocking logic so that tunnel mode is only
reserved by NIC flows. This ensures that tunnel offload is exclusively
used in either the NIC or the FDB, and avoids unintended offload
conflicts.
Fixes: 1762f132d542 ("net/mlx5e: Support IPsec packet offload for RX in switchdev mode")
Fixes: c6c2bf5db4ea ("net/mlx5e: Support IPsec packet offload for TX in switchdev mode")
Signed-off-by: Carolina Jubran <cjubran@nvidia.com>
Reviewed-by: Jianbo Liu <jianbol@nvidia.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/1759652999-858513-2-git-send-email-tariqt@nvidia.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
/* Create FT */
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+ rx->allow_tunnel_mode =
+ mlx5_eswitch_block_encap(mdev, rx == ipsec->rx_esw);
+
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
goto err_status_rule;
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+ tx->allow_tunnel_mode =
+ mlx5_eswitch_block_encap(mdev, tx == ipsec->tx_esw);
+
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
struct mlx5_eswitch *slave_esw);
int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
-bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
+bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
return 0;
}
-static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+static inline bool
+mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
{
return true;
}
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
-bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
+ enum devlink_eswitch_encap_mode encap;
+ bool allow_tunnel = false;
if (!mlx5_esw_allowed(esw))
return true;
down_write(&esw->mode_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY &&
- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
- up_write(&esw->mode_lock);
- return false;
+ encap = esw->offloads.encap;
+ if (esw->mode == MLX5_ESWITCH_LEGACY ||
+ (encap == DEVLINK_ESWITCH_ENCAP_MODE_NONE && !from_fdb)) {
+ allow_tunnel = true;
+ esw->offloads.num_block_encap++;
}
-
- esw->offloads.num_block_encap++;
up_write(&esw->mode_lock);
- return true;
+
+ return allow_tunnel;
}
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)