return mlx5_eswitch_vport_rep(esw, vport);
 }
 
-int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
-                             struct mlx5_ib_sq *sq)
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+                                                  struct mlx5_ib_sq *sq,
+                                                  u16 port)
 {
-       struct mlx5_flow_handle *flow_rule;
        struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+       struct mlx5_eswitch_rep *rep;
 
-       if (!dev->is_rep)
-               return 0;
+       if (!dev->is_rep || !port)
+               return NULL;
 
-       flow_rule =
-               mlx5_eswitch_add_send_to_vport_rule(esw,
-                                                   dev->port[0].rep->vport,
-                                                   sq->base.mqp.qpn);
-       if (IS_ERR(flow_rule))
-               return PTR_ERR(flow_rule);
-       sq->flow_rule = flow_rule;
+       if (!dev->port[port - 1].rep)
+               return ERR_PTR(-EINVAL);
 
-       return 0;
+       rep = dev->port[port - 1].rep;
+
+       return mlx5_eswitch_add_send_to_vport_rule(esw, rep->vport,
+                                                  sq->base.mqp.qpn);
 }
 
                                           int vport_index);
 void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev);
 void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev);
-int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
-                             struct mlx5_ib_sq *sq);
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+                                                  struct mlx5_ib_sq *sq,
+                                                  u16 port);
 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
                                          int vport_index);
 #else /* CONFIG_MLX5_ESWITCH */
 
 static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {}
 static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {}
-static inline int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
-                                           struct mlx5_ib_sq *sq)
+static inline
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+                                                  struct mlx5_ib_sq *sq,
+                                                  u16 port)
 {
-       return 0;
+       return NULL;
 }
 
 static inline
 
        struct mlx5_rate_limit rl;
 
        u8 rq_q_ctr_id;
+       u16 port;
 };
 
 static void get_cqs(enum ib_qp_type qp_type,
        mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
 }
 
-static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
-                                      struct mlx5_ib_sq *sq)
+static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
 {
        if (sq->flow_rule)
                mlx5_del_flow_rules(sq->flow_rule);
+       sq->flow_rule = NULL;
 }
 
 static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
        if (err)
                goto err_umem;
 
-       err = create_flow_rule_vport_sq(dev, sq);
-       if (err)
-               goto err_flow;
-
        return 0;
 
-err_flow:
-       mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
-
 err_umem:
        ib_umem_release(sq->ubuffer.umem);
        sq->ubuffer.umem = NULL;
 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
                                     struct mlx5_ib_sq *sq)
 {
-       destroy_flow_rule_vport_sq(dev, sq);
+       destroy_flow_rule_vport_sq(sq);
        mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
        ib_umem_release(sq->ubuffer.umem);
 }
        }
 
        if (modify_sq) {
+               struct mlx5_flow_handle *flow_rule;
+
                if (tx_affinity) {
                        err = modify_raw_packet_tx_affinity(dev->mdev, sq,
                                                            tx_affinity,
                                return err;
                }
 
-               return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
-                                              raw_qp_param, qp->ibqp.pd);
+               flow_rule = create_flow_rule_vport_sq(dev, sq,
+                                                     raw_qp_param->port);
+               if (IS_ERR(flow_rule))
+                       return err;
+
+               err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
+                                             raw_qp_param, qp->ibqp.pd);
+               if (err) {
+                       if (flow_rule)
+                               mlx5_del_flow_rules(flow_rule);
+                       return err;
+               }
+
+               if (flow_rule) {
+                       destroy_flow_rule_vport_sq(sq);
+                       sq->flow_rule = flow_rule;
+               }
+
+               return err;
        }
 
        return 0;
                        raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
                }
 
+               if (attr_mask & IB_QP_PORT)
+                       raw_qp_param.port = attr->port_num;
+
                if (attr_mask & IB_QP_RATE_LIMIT) {
                        raw_qp_param.rl.rate = attr->rate_limit;