return err;
 }
 
+static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
+                                     struct ib_flow_attr *flow_attr,
+                                     enum mlx4_net_trans_promisc_mode *type)
+{
+       int err = 0;
+
+       if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
+           (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
+           (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
+               return -EOPNOTSUPP;
+       }
+
+       if (flow_attr->num_of_specs == 0) {
+               type[0] = MLX4_FS_MC_SNIFFER;
+               type[1] = MLX4_FS_UC_SNIFFER;
+       } else {
+               union ib_flow_spec *ib_spec;
+
+               ib_spec = (union ib_flow_spec *)(flow_attr + 1);
+               if (ib_spec->type !=  IB_FLOW_SPEC_ETH)
+                       return -EINVAL;
+
+               /* if all is zero than MC and UC */
+               if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
+                       type[0] = MLX4_FS_MC_SNIFFER;
+                       type[1] = MLX4_FS_UC_SNIFFER;
+               } else {
+                       u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
+                                           ib_spec->eth.mask.dst_mac[1],
+                                           ib_spec->eth.mask.dst_mac[2],
+                                           ib_spec->eth.mask.dst_mac[3],
+                                           ib_spec->eth.mask.dst_mac[4],
+                                           ib_spec->eth.mask.dst_mac[5]};
+
+                       /* Above xor was only on MC bit, non empty mask is valid
+                        * only if this bit is set and rest are zero.
+                        */
+                       if (!is_zero_ether_addr(&mac[0]))
+                               return -EINVAL;
+
+                       if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
+                               type[0] = MLX4_FS_MC_SNIFFER;
+                       else
+                               type[0] = MLX4_FS_UC_SNIFFER;
+               }
+       }
+
+       return err;
+}
+
 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                                    struct ib_flow_attr *flow_attr,
                                    int domain)
        struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
        int is_bonded = mlx4_is_bonded(dev);
 
-       if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)
+       if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
+           (flow_attr->type != IB_FLOW_ATTR_NORMAL))
                return ERR_PTR(-EOPNOTSUPP);
 
        memset(type, 0, sizeof(type));
 
        switch (flow_attr->type) {
        case IB_FLOW_ATTR_NORMAL:
-               type[0] = MLX4_FS_REGULAR;
+               /* If dont trap flag (continue match) is set, under specific
+                * condition traffic be replicated to given qp,
+                * without stealing it
+                */
+               if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
+                       err = mlx4_ib_add_dont_trap_rule(dev,
+                                                        flow_attr,
+                                                        type);
+                       if (err)
+                               goto err_free;
+               } else {
+                       type[0] = MLX4_FS_REGULAR;
+               }
                break;
 
        case IB_FLOW_ATTR_ALL_DEFAULT:
                break;
 
        case IB_FLOW_ATTR_SNIFFER:
-               type[0] = MLX4_FS_UC_SNIFFER;
-               type[1] = MLX4_FS_MC_SNIFFER;
+               type[0] = MLX4_FS_MIRROR_RX_PORT;
+               type[1] = MLX4_FS_MIRROR_SX_PORT;
                break;
 
        default:
 
                [29] = "802.1ad offload support",
                [31] = "Modifying loopback source checks using UPDATE_QP support",
                [32] = "Loopback source checks support",
-               [33] = "RoCEv2 support"
+               [33] = "RoCEv2 support",
+               [34] = "DMFS Sniffer support (UC & MC)"
        };
        int i;
 
        if (field & 0x80)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
        dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+       if (field & 0x20)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
        if (field & 0x80)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
 
        [MLX4_FS_REGULAR]   = 0x0,
        [MLX4_FS_ALL_DEFAULT] = 0x1,
        [MLX4_FS_MC_DEFAULT] = 0x3,
-       [MLX4_FS_UC_SNIFFER] = 0x4,
-       [MLX4_FS_MC_SNIFFER] = 0x5,
+       [MLX4_FS_MIRROR_RX_PORT] = 0x4,
+       [MLX4_FS_MIRROR_SX_PORT] = 0x5,
+       [MLX4_FS_UC_SNIFFER] = 0x6,
+       [MLX4_FS_MC_SNIFFER] = 0x7,
 };
 
 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,