case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
        case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
        case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
+       case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
+       case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
                return MLX5_CMD_STAT_OK;
 
        case MLX5_CMD_OP_QUERY_HCA_CAP:
        case MLX5_CMD_OP_QUERY_FLOW_TABLE:
        case MLX5_CMD_OP_CREATE_FLOW_GROUP:
        case MLX5_CMD_OP_QUERY_FLOW_GROUP:
-
        case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
        case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
        case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
        case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+       case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+       case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+       case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+       case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
                *status = MLX5_DRIVER_STATUS_ABORTED;
                *synd = MLX5_DRIVER_SYND;
                return -EIO;
        MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
        MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
        MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
+       MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
+       MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
+       MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
+       MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
+       MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
+       MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
        default: return "unknown command opcode";
        }
 }
 
 #include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
 
+/* Scheduling element fw management */
+int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+                                      void *ctx, u32 *element_id)
+{
+       u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)]  = {0};
+       u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
+       void *schedc;
+       int err;
+
+       schedc = MLX5_ADDR_OF(create_scheduling_element_in, in,
+                             scheduling_context);
+       MLX5_SET(create_scheduling_element_in, in, opcode,
+                MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT);
+       MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy,
+                hierarchy);
+       memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
+
+       err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+       if (err)
+               return err;
+
+       *element_id = MLX5_GET(create_scheduling_element_out, out,
+                              scheduling_element_id);
+       return 0;
+}
+
+int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+                                      void *ctx, u32 element_id,
+                                      u32 modify_bitmask)
+{
+       u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)]  = {0};
+       u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
+       void *schedc;
+
+       schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
+                             scheduling_context);
+       MLX5_SET(modify_scheduling_element_in, in, opcode,
+                MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT);
+       MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id,
+                element_id);
+       MLX5_SET(modify_scheduling_element_in, in, modify_bitmask,
+                modify_bitmask);
+       MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy,
+                hierarchy);
+       memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
+
+       return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+                                       u32 element_id)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)]  = {0};
+       u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
+
+       MLX5_SET(destroy_scheduling_element_in, in, opcode,
+                MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
+       MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id,
+                element_id);
+       MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
+                hierarchy);
+
+       return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
 /* Finds an entry where we can register the given rate
  * If the rate already exists, return the entry where it is registered,
  * otherwise return the first available entry.
 
        MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
        MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780,
        MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
+       MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT      = 0x782,
+       MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT     = 0x783,
+       MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT       = 0x784,
+       MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT      = 0x785,
+       MLX5_CMD_OP_CREATE_QOS_PARA_VPORT         = 0x786,
+       MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT        = 0x787,
        MLX5_CMD_OP_ALLOC_PD                      = 0x800,
        MLX5_CMD_OP_DEALLOC_PD                    = 0x801,
        MLX5_CMD_OP_ALLOC_UAR                     = 0x802,
 
 struct mlx5_ifc_qos_cap_bits {
        u8         packet_pacing[0x1];
-       u8         reserved_0[0x1f];
-       u8         reserved_1[0x20];
+       u8         esw_scheduling[0x1];
+       u8         reserved_at_2[0x1e];
+
+       u8         reserved_at_20[0x20];
+
        u8         packet_pacing_max_rate[0x20];
+
        u8         packet_pacing_min_rate[0x20];
-       u8         reserved_2[0x10];
+
+       u8         reserved_at_80[0x10];
        u8         packet_pacing_rate_table_size[0x10];
-       u8         reserved_3[0x760];
+
+       u8         esw_element_type[0x10];
+       u8         esw_tsar_type[0x10];
+
+       u8         reserved_at_c0[0x10];
+       u8         max_qos_para_vport[0x10];
+
+       u8         max_tsar_bw_share[0x20];
+
+       u8         reserved_at_100[0x700];
 };
 
 struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
        struct mlx5_ifc_wq_bits wq;
 };
 
+enum {
+       SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0,
+       SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
+       SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
+       SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+};
+
+struct mlx5_ifc_scheduling_context_bits {
+       u8         element_type[0x8];
+       u8         reserved_at_8[0x18];
+
+       u8         element_attributes[0x20];
+
+       u8         parent_element_id[0x20];
+
+       u8         reserved_at_60[0x40];
+
+       u8         bw_share[0x20];
+
+       u8         max_average_bw[0x20];
+
+       u8         reserved_at_e0[0x120];
+};
+
 struct mlx5_ifc_rqtc_bits {
        u8         reserved_at_0[0xa0];
 
        u8         reserved_at_20[0x60];
 };
 
+struct mlx5_ifc_vport_tc_element_bits {
+       u8         traffic_class[0x4];
+       u8         reserved_at_4[0xc];
+       u8         vport_number[0x10];
+};
+
+struct mlx5_ifc_vport_element_bits {
+       u8         reserved_at_0[0x10];
+       u8         vport_number[0x10];
+};
+
+enum {
+       TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
+       TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
+       TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+};
+
+struct mlx5_ifc_tsar_element_bits {
+       u8         reserved_at_0[0x8];
+       u8         tsar_type[0x8];
+       u8         reserved_at_10[0x10];
+};
+
 struct mlx5_ifc_teardown_hca_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
        u8         reserved_at_40[0x40];
 };
 
+struct mlx5_ifc_query_scheduling_element_out_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_at_40[0xc0];
+
+       struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+       u8         reserved_at_300[0x100];
+};
+
+enum {
+       SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+};
+
+struct mlx5_ifc_query_scheduling_element_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         scheduling_hierarchy[0x8];
+       u8         reserved_at_48[0x18];
+
+       u8         scheduling_element_id[0x20];
+
+       u8         reserved_at_80[0x180];
+};
+
 struct mlx5_ifc_query_rqt_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
        struct mlx5_ifc_sqc_bits ctx;
 };
 
+struct mlx5_ifc_modify_scheduling_element_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_at_40[0x1c0];
+};
+
+enum {
+       MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1,
+       MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2,
+};
+
+struct mlx5_ifc_modify_scheduling_element_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         scheduling_hierarchy[0x8];
+       u8         reserved_at_48[0x18];
+
+       u8         scheduling_element_id[0x20];
+
+       u8         reserved_at_80[0x20];
+
+       u8         modify_bitmask[0x20];
+
+       u8         reserved_at_c0[0x40];
+
+       struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+       u8         reserved_at_300[0x100];
+};
+
 struct mlx5_ifc_modify_rqt_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
        u8         reserved_at_60[0x20];
 };
 
+struct mlx5_ifc_destroy_scheduling_element_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_at_40[0x1c0];
+};
+
+struct mlx5_ifc_destroy_scheduling_element_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         scheduling_hierarchy[0x8];
+       u8         reserved_at_48[0x18];
+
+       u8         scheduling_element_id[0x20];
+
+       u8         reserved_at_80[0x180];
+};
+
 struct mlx5_ifc_destroy_rqt_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
        struct mlx5_ifc_sqc_bits ctx;
 };
 
+struct mlx5_ifc_create_scheduling_element_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_at_40[0x40];
+
+       u8         scheduling_element_id[0x20];
+
+       u8         reserved_at_a0[0x160];
+};
+
+struct mlx5_ifc_create_scheduling_element_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_at_10[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         scheduling_hierarchy[0x8];
+       u8         reserved_at_48[0x18];
+
+       u8         reserved_at_60[0xa0];
+
+       struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+       u8         reserved_at_300[0x100];
+};
+
 struct mlx5_ifc_create_rqt_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];