if (field_avail(typeof(resp), reserved, uhw->outlen))
                resp.response_length += sizeof(resp.reserved);
 
+       if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
+               resp.cqe_comp_caps.max_num =
+                       MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
+                       MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
+               resp.cqe_comp_caps.supported_format =
+                       MLX5_IB_CQE_RES_FORMAT_HASH |
+                       MLX5_IB_CQE_RES_FORMAT_CSUM;
+               resp.response_length += sizeof(resp.cqe_comp_caps);
+       }
+
        if (uhw->outlen) {
                err = ib_copy_to_udata(uhw, &resp, resp.response_length);
 
 
        __u8 reserved[7];
 };
 
+enum mlx5_ib_cqe_comp_res_format {
+       MLX5_IB_CQE_RES_FORMAT_HASH     = 1 << 0,
+       MLX5_IB_CQE_RES_FORMAT_CSUM     = 1 << 1,
+       MLX5_IB_CQE_RES_RESERVED        = 1 << 2,
+};
+
+struct mlx5_ib_cqe_comp_caps {
+       __u32 max_num;
+       __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
+};
+
 struct mlx5_ib_query_device_resp {
        __u32   comp_mask;
        __u32   response_length;
        struct  mlx5_ib_tso_caps tso_caps;
        struct  mlx5_ib_rss_caps rss_caps;
+       struct  mlx5_ib_cqe_comp_caps cqe_comp_caps;
        __u32   mlx5_ib_support_multi_pkt_send_wqes;
        __u32   reserved;
 };