int max_rq_sg;
        int max_sq_sg;
        u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+       struct mlx5_ib_query_device_resp resp = {};
+       size_t resp_len;
+       u64 max_tso;
 
-       if (uhw->inlen || uhw->outlen)
+       resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
+       if (uhw->outlen && uhw->outlen < resp_len)
+               return -EINVAL;
+       else
+               resp.response_length = resp_len;
+
+       if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
                return -EINVAL;
 
        memset(props, 0, sizeof(*props));
        if (MLX5_CAP_GEN(mdev, block_lb_mc))
                props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
 
-       if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
-           (MLX5_CAP_ETH(dev->mdev, csum_cap)))
+       if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
+               if (MLX5_CAP_ETH(mdev, csum_cap))
                        props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
 
+               if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
+                       max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
+                       if (max_tso) {
+                               resp.tso_caps.max_tso = 1 << max_tso;
+                               resp.tso_caps.supported_qpts |=
+                                       1 << IB_QPT_RAW_PACKET;
+                               resp.response_length += sizeof(resp.tso_caps);
+                       }
+               }
+       }
+
        if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
                props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
                props->device_cap_flags |= IB_DEVICE_UD_TSO;
        if (!mlx5_core_is_pf(mdev))
                props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
 
+       if (uhw->outlen) {
+               err = ib_copy_to_udata(uhw, &resp, resp.response_length);
+
+               if (err)
+                       return err;
+       }
+
        return 0;
 }
 
        if (field_avail(typeof(resp), cqe_version, udata->outlen))
                resp.response_length += sizeof(resp.cqe_version);
 
+       if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
+               resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
+               resp.response_length += sizeof(resp.cmds_supp_uhw);
+       }
+
        /*
         * We don't want to expose information from the PCI bar that is located
         * after 4096 bytes, so if the arch only supports larger pages, let's
                        offsetof(struct mlx5_init_seg, internal_timer_h) %
                        PAGE_SIZE;
                resp.response_length += sizeof(resp.hca_core_clock_offset) +
-                                       sizeof(resp.reserved2) +
-                                       sizeof(resp.reserved3);
+                                       sizeof(resp.reserved2);
        }
 
        err = ib_copy_to_udata(udata, &resp, resp.response_length);
 
        MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
 };
 
+enum mlx5_user_cmds_supp_uhw {
+       MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
+};
+
 struct mlx5_ib_alloc_ucontext_resp {
        __u32   qp_tab_size;
        __u32   bf_reg_size;
        __u32   comp_mask;
        __u32   response_length;
        __u8    cqe_version;
-       __u8    reserved2;
-       __u16   reserved3;
+       __u8    cmds_supp_uhw;
+       __u16   reserved2;
        __u64   hca_core_clock_offset;
 };
 
        __u32   pdn;
 };
 
+struct mlx5_ib_tso_caps {
+       __u32 max_tso; /* Maximum tso payload size in bytes */
+
+       /* Corresponding bit will be set if qp type from
+        * 'enum ib_qp_type' is supported, e.g.
+        * supported_qpts |= 1 << IB_QPT_UD
+        */
+       __u32 supported_qpts;
+};
+
+struct mlx5_ib_query_device_resp {
+       __u32   comp_mask;
+       __u32   response_length;
+       struct  mlx5_ib_tso_caps tso_caps;
+};
+
 struct mlx5_ib_create_cq {
        __u64   buf_addr;
        __u64   db_addr;