}
 }
 
+#ifdef CONFIG_MLX5_ESWITCH
+static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
+{
+       struct mlx5_eswitch *esw = mdev->priv.eswitch;
+       int err = 0;
+
+       if (esw)
+               down_write(&esw->mode_lock);
+
+       if (mdev->num_block_ipsec) {
+               err = -EBUSY;
+               goto unlock;
+       }
+
+       mdev->num_block_tc++;
+
+unlock:
+       if (esw)
+               up_write(&esw->mode_lock);
+
+       return err;
+}
+#else
+static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
+{
+       if (mdev->num_block_ipsec)
+               return -EBUSY;
+
+       mdev->num_block_tc++;
+       return 0;
+}
+#endif
+
+static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
+{
+       mdev->num_block_tc++;
+}
+
 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
+       int err;
+
+       if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
+               err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
+               if (err)
+                       return err;
+       }
+
        if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
-               return tx_add_rule(sa_entry);
+               err = tx_add_rule(sa_entry);
+       else
+               err = rx_add_rule(sa_entry);
+
+       if (err)
+               goto err_out;
+
+       return 0;
 
-       return rx_add_rule(sa_entry);
+err_out:
+       if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
+               mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
+       return err;
 }
 
 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
        if (ipsec_rule->pkt_reformat)
                mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
 
+       if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
+               mlx5e_ipsec_unblock_tc_offload(mdev);
+
        if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
                tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
                return;
 
 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
 {
+       int err;
+
+       err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
+       if (err)
+               return err;
+
        if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
-               return tx_add_policy(pol_entry);
+               err = tx_add_policy(pol_entry);
+       else
+               err = rx_add_policy(pol_entry);
+
+       if (err)
+               goto err_out;
+
+       return 0;
 
-       return rx_add_policy(pol_entry);
+err_out:
+       mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
+       return err;
 }
 
 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
 
        mlx5_del_flow_rules(ipsec_rule->rule);
 
+       mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
+
        if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
                rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
                                 pol_entry->attrs.prio, pol_entry->attrs.type);
 
        return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
 }
 
+/* As IPsec and TC order is not aligned between software and hardware-offload,
+ * either IPsec offload or TC offload, not both, is allowed for a specific interface.
+ */
+static bool is_tc_ipsec_order_check_needed(struct net_device *filter, struct mlx5e_priv *priv)
+{
+       if (!IS_ENABLED(CONFIG_MLX5_EN_IPSEC))
+               return false;
+
+       if (filter != priv->netdev)
+               return false;
+
+       if (mlx5e_eswitch_vf_rep(priv->netdev))
+               return false;
+
+       return true;
+}
+
+static int mlx5e_tc_block_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       if (!is_tc_ipsec_order_check_needed(filter, priv))
+               return 0;
+
+       if (mdev->num_block_tc)
+               return -EBUSY;
+
+       mdev->num_block_ipsec++;
+
+       return 0;
+}
+
+static void mlx5e_tc_unblock_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv)
+{
+       if (!is_tc_ipsec_order_check_needed(filter, priv))
+               return;
+
+       priv->mdev->num_block_ipsec--;
+}
+
 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
                           struct flow_cls_offload *f, unsigned long flags)
 {
        if (!mlx5_esw_hold(priv->mdev))
                return -EBUSY;
 
+       err = mlx5e_tc_block_ipsec_offload(dev, priv);
+       if (err)
+               goto esw_release;
+
        mlx5_esw_get(priv->mdev);
 
        rcu_read_lock();
 err_free:
        mlx5e_flow_put(priv, flow);
 out:
+       mlx5e_tc_unblock_ipsec_offload(dev, priv);
        mlx5_esw_put(priv->mdev);
+esw_release:
        mlx5_esw_release(priv->mdev);
        return err;
 }
        trace_mlx5e_delete_flower(f);
        mlx5e_flow_put(priv, flow);
 
+       mlx5e_tc_unblock_ipsec_offload(dev, priv);
        mlx5_esw_put(priv->mdev);
        return 0;