return macvlan->mode == MACVLAN_MODE_PASSTHRU;
 }
 
-static int
-mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
-                          struct mlx5e_rep_priv *rpriv,
-                          struct flow_block_offload *f,
-                          flow_setup_cb_t *setup_cb,
-                          void *data,
-                          void (*cleanup)(struct flow_block_cb *block_cb))
+static bool
+mlx5e_rep_check_indr_block_supported(struct mlx5e_rep_priv *rpriv,
+                                    struct net_device *netdev,
+                                    struct flow_block_offload *f)
 {
        struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       bool is_ovs_int_port = netif_is_ovs_master(netdev);
-       struct mlx5e_rep_indr_block_priv *indr_priv;
-       struct flow_block_cb *block_cb;
 
-       if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
-           !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev) &&
-           !is_ovs_int_port) {
-               if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev))
-                       return -EOPNOTSUPP;
+       if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+           f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+               return false;
+
+       if (mlx5e_tc_tun_device_to_offload(priv, netdev))
+               return true;
+
+       if (is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)
+               return true;
+
+       if (netif_is_macvlan(netdev)) {
                if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
                        netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
-                       return -EOPNOTSUPP;
+                       return false;
                }
+
+               if (macvlan_dev_real_dev(netdev) == rpriv->netdev)
+                       return true;
        }
 
-       if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
-           f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
-               return -EOPNOTSUPP;
+       if (netif_is_ovs_master(netdev) && f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+           mlx5e_tc_int_port_supported(esw))
+               return true;
 
-       if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && !is_ovs_int_port)
-               return -EOPNOTSUPP;
+       return false;
+}
+
+static int
+mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
+                          struct mlx5e_rep_priv *rpriv,
+                          struct flow_block_offload *f,
+                          flow_setup_cb_t *setup_cb,
+                          void *data,
+                          void (*cleanup)(struct flow_block_cb *block_cb))
+{
+       struct mlx5e_rep_indr_block_priv *indr_priv;
+       struct flow_block_cb *block_cb;
 
-       if (is_ovs_int_port && !mlx5e_tc_int_port_supported(esw))
+       if (!mlx5e_rep_check_indr_block_supported(rpriv, netdev, f))
                return -EOPNOTSUPP;
 
        f->unlocked_driver_cb = true;