return err;
 }
 
-static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
+                          int next_state, bool update_rl, int rl_index)
 {
        struct mlx5e_channel *c = sq->channel;
        struct mlx5e_priv *priv = c->priv;
 
        MLX5_SET(modify_sq_in, in, sq_state, curr_state);
        MLX5_SET(sqc, sqc, state, next_state);
+       if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
+               MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+               MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, rl_index);
+       }
 
        err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
 
        struct mlx5_core_dev *mdev = priv->mdev;
 
        mlx5_core_destroy_sq(mdev, sq->sqn);
+       if (sq->rate_limit)
+               mlx5_rl_remove_rate(mdev, sq->rate_limit);
 }
 
 static int mlx5e_open_sq(struct mlx5e_channel *c,
        if (err)
                goto err_destroy_sq;
 
-       err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+       err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+                             false, 0);
        if (err)
                goto err_disable_sq;
 
                if (mlx5e_sq_has_room_for(sq, 1))
                        mlx5e_send_nop(sq, true);
 
-               mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+               mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR,
+                               false, 0);
        }
 
        while (sq->cc != sq->pc) /* wait till sq is empty */
                        ix + i * priv->params.num_channels;
 }
 
+static int mlx5e_set_sq_maxrate(struct net_device *dev,
+                               struct mlx5e_sq *sq, u32 rate)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u16 rl_index = 0;
+       int err;
+
+       if (rate == sq->rate_limit)
+               /* nothing to do */
+               return 0;
+
+       if (sq->rate_limit)
+               /* remove current rl index to free space to next ones */
+               mlx5_rl_remove_rate(mdev, sq->rate_limit);
+
+       sq->rate_limit = 0;
+
+       if (rate) {
+               err = mlx5_rl_add_rate(mdev, rate, &rl_index);
+               if (err) {
+                       netdev_err(dev, "Failed configuring rate %u: %d\n",
+                                  rate, err);
+                       return err;
+               }
+       }
+
+       err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+                             MLX5_SQC_STATE_RDY, true, rl_index);
+       if (err) {
+               netdev_err(dev, "Failed configuring rate %u: %d\n",
+                          rate, err);
+               /* remove the rate from the table */
+               if (rate)
+                       mlx5_rl_remove_rate(mdev, rate);
+               return err;
+       }
+
+       sq->rate_limit = rate;
+       return 0;
+}
+
+static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
+       int err = 0;
+
+       if (!mlx5_rl_is_supported(mdev)) {
+               netdev_err(dev, "Rate limiting is not supported on this device\n");
+               return -EINVAL;
+       }
+
+       /* rate is given in Mb/sec, HW config is in Kb/sec */
+       rate = rate << 10;
+
+       /* Check whether rate in valid range, 0 is always valid */
+       if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
+               netdev_err(dev, "TX rate %u, is not in range\n", rate);
+               return -ERANGE;
+       }
+
+       mutex_lock(&priv->state_lock);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               err = mlx5e_set_sq_maxrate(dev, sq, rate);
+       if (!err)
+               priv->tx_rates[index] = rate;
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
                              struct mlx5e_channel_param *cparam,
                              struct mlx5e_channel **cp)
        struct net_device *netdev = priv->netdev;
        int cpu = mlx5e_get_cpu(priv, ix);
        struct mlx5e_channel *c;
+       struct mlx5e_sq *sq;
        int err;
+       int i;
 
        c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
        if (!c)
        if (err)
                goto err_close_icosq;
 
+       for (i = 0; i < priv->params.num_tc; i++) {
+               u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
+
+               if (priv->tx_rates[txq_ix]) {
+                       sq = priv->txq_to_sq_map[txq_ix];
+                       mlx5e_set_sq_maxrate(priv->netdev, sq,
+                                            priv->tx_rates[txq_ix]);
+               }
+       }
+
        err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
        if (err)
                goto err_close_sqs;
        .ndo_set_features        = mlx5e_set_features,
        .ndo_change_mtu          = mlx5e_change_mtu,
        .ndo_do_ioctl            = mlx5e_ioctl,
+       .ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
 #endif
        .ndo_do_ioctl            = mlx5e_ioctl,
        .ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
        .ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
+       .ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
        .ndo_features_check      = mlx5e_features_check,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,