resp.response_length += sizeof(resp.cqe_comp_caps);
        }
 
+       if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
+               if (MLX5_CAP_QOS(mdev, packet_pacing) &&
+                   MLX5_CAP_GEN(mdev, qos)) {
+                       resp.packet_pacing_caps.qp_rate_limit_max =
+                               MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
+                       resp.packet_pacing_caps.qp_rate_limit_min =
+                               MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
+                       resp.packet_pacing_caps.supported_qpts |=
+                               1 << IB_QPT_RAW_PACKET;
+               }
+               resp.response_length += sizeof(resp.packet_pacing_caps);
+       }
+
        if (uhw->outlen) {
                err = ib_copy_to_udata(uhw, &resp, resp.response_length);
 
 
        __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
 };
 
+struct mlx5_packet_pacing_caps {
+       __u32 qp_rate_limit_min;
+       __u32 qp_rate_limit_max; /* In kpbs */
+
+       /* Corresponding bit will be set if qp type from
+        * 'enum ib_qp_type' is supported, e.g.
+        * supported_qpts |= 1 << IB_QPT_RAW_PACKET
+        */
+       __u32 supported_qpts;
+       __u32 reserved;
+};
+
 struct mlx5_ib_query_device_resp {
        __u32   comp_mask;
        __u32   response_length;
        struct  mlx5_ib_tso_caps tso_caps;
        struct  mlx5_ib_rss_caps rss_caps;
        struct  mlx5_ib_cqe_comp_caps cqe_comp_caps;
+       struct  mlx5_packet_pacing_caps packet_pacing_caps;
        __u32   mlx5_ib_support_multi_pkt_send_wqes;
        __u32   reserved;
 };