#define BYTES_IN_MBIT 125000
 
+int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
+{
+       if (nbytes < BYTES_IN_MBIT) {
+               qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n",
+                        nbytes, BYTES_IN_MBIT);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes)
+{
+       return div_u64(nbytes, BYTES_IN_MBIT);
+}
+
 int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
 {
        return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
 
        return err;
 }
+
+struct mlx5e_mqprio_rl {
+       struct mlx5_core_dev *mdev;
+       u32 root_id;
+       u32 *leaves_id;
+       u8 num_tc;
+};
+
+struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void)
+{
+       return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL);
+}
+
+void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl)
+{
+       kvfree(rl);
+}
+
+int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
+                        u64 max_rate[])
+{
+       int err;
+       int tc;
+
+       if (!mlx5_qos_is_supported(mdev)) {
+               qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+               return -EOPNOTSUPP;
+       }
+       if (num_tc > mlx5e_qos_max_leaf_nodes(mdev))
+               return -EINVAL;
+
+       rl->mdev = mdev;
+       rl->num_tc = num_tc;
+       rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL);
+       if (!rl->leaves_id)
+               return -ENOMEM;
+
+       err = mlx5_qos_create_root_node(mdev, &rl->root_id);
+       if (err)
+               goto err_free_leaves;
+
+       qos_dbg(mdev, "Root created, id %#x\n", rl->root_id);
+
+       for (tc = 0; tc < num_tc; tc++) {
+               u32 max_average_bw;
+
+               max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]);
+               err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw,
+                                               &rl->leaves_id[tc]);
+               if (err)
+                       goto err_destroy_leaves;
+
+               qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n",
+                       tc, rl->leaves_id[tc], max_average_bw);
+       }
+       return 0;
+
+err_destroy_leaves:
+       while (--tc >= 0)
+               mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]);
+       mlx5_qos_destroy_node(mdev, rl->root_id);
+err_free_leaves:
+       kvfree(rl->leaves_id);
+       return err;
+}
+
+void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl)
+{
+       int tc;
+
+       for (tc = 0; tc < rl->num_tc; tc++)
+               mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]);
+       mlx5_qos_destroy_node(rl->mdev, rl->root_id);
+       kvfree(rl->leaves_id);
+}
+
+int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id)
+{
+       if (tc >= rl->num_tc)
+               return -EINVAL;
+
+       *hw_id = rl->leaves_id[tc];
+       return 0;
+}
 
                mlx5e_close_cq(&c->sq[tc].cq);
 }
 
+static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
+{
+       int tc;
+
+       for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+               if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
+                       return tc;
+
+       WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
+       return -ENOENT;
+}
+
+static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
+                                       u32 *hw_id)
+{
+       int tc;
+
+       if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
+           !params->mqprio.channel.rl) {
+               *hw_id = 0;
+               return 0;
+       }
+
+       tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
+       if (tc < 0)
+               return tc;
+
+       return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+}
+
 static int mlx5e_open_sqs(struct mlx5e_channel *c,
                          struct mlx5e_params *params,
                          struct mlx5e_channel_param *cparam)
 
        for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
                int txq_ix = c->ix + tc * params->num_channels;
+               u32 qos_queue_group_id;
+
+               err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
+               if (err)
+                       goto err_close_sqs;
 
                err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
-                                      params, &cparam->txq_sq, &c->sq[tc], tc, 0,
+                                      params, &cparam->txq_sq, &c->sq[tc], tc,
+                                      qos_queue_group_id,
                                       &c->priv->channel_stats[c->ix].sq[tc]);
                if (err)
                        goto err_close_sqs;
                netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
                goto err_txqs;
        }
+       if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
+               if (priv->mqprio_rl) {
+                       mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+                       mlx5e_mqprio_rl_free(priv->mqprio_rl);
+               }
+               priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
+       }
 
        return 0;
 
 {
        params->mqprio.mode = TC_MQPRIO_MODE_DCB;
        params->mqprio.num_tc = num_tc;
+       params->mqprio.channel.rl = NULL;
        mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
                                             params->num_channels);
 }
 
 static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
-                                           struct tc_mqprio_qopt *qopt)
+                                           struct tc_mqprio_qopt *qopt,
+                                           struct mlx5e_mqprio_rl *rl)
 {
        params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
        params->mqprio.num_tc = qopt->num_tc;
+       params->mqprio.channel.rl = rl;
        mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
 }
 
                        netdev_err(netdev, "Min tx rate is not supported\n");
                        return -EINVAL;
                }
+
                if (mqprio->max_rate[i]) {
-                       netdev_err(netdev, "Max tx rate is not supported\n");
-                       return -EINVAL;
+                       int err;
+
+                       err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
+                       if (err)
+                               return err;
                }
 
                if (mqprio->qopt.offset[i] != agg_count) {
        return 0;
 }
 
+static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
+{
+       int tc;
+
+       for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
+               if (mqprio->max_rate[tc])
+                       return true;
+       return false;
+}
+
 static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
                                         struct tc_mqprio_qopt_offload *mqprio)
 {
        mlx5e_fp_preactivate preactivate;
        struct mlx5e_params new_params;
+       struct mlx5e_mqprio_rl *rl;
        bool nch_changed;
        int err;
 
        if (err)
                return err;
 
+       rl = NULL;
+       if (mlx5e_mqprio_rate_limit(mqprio)) {
+               rl = mlx5e_mqprio_rl_alloc();
+               if (!rl)
+                       return -ENOMEM;
+               err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
+                                          mqprio->max_rate);
+               if (err) {
+                       mlx5e_mqprio_rl_free(rl);
+                       return err;
+               }
+       }
+
        new_params = priv->channels.params;
-       mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);
+       mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
 
        nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
        preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
                mlx5e_update_netdev_queues_ctx;
-       return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+       err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+       if (err && rl) {
+               mlx5e_mqprio_rl_cleanup(rl);
+               mlx5e_mqprio_rl_free(rl);
+       }
+
+       return err;
 }
 
 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
                kfree(priv->htb.qos_sq_stats[i]);
        kvfree(priv->htb.qos_sq_stats);
 
+       if (priv->mqprio_rl) {
+               mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+               mlx5e_mqprio_rl_free(priv->mqprio_rl);
+       }
+
        memset(priv, 0, sizeof(*priv));
 }