else
                dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
 
-       if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.send))
                caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
 
-       if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, ud_odp_caps.srq_receive))
                caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.send))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.receive))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.write))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.read))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.atomic))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
 
-       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, rc_odp_caps.srq_receive))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.send))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.receive))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.write))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.read))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.atomic))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
 
-       if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
+       if (MLX5_CAP_ODP_SCHEME(dev->mdev, xrc_odp_caps.srq_receive))
                caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
 
        if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
        int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
                     pfault->wqe.wq_num : pfault->token;
        u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
+       void *info;
        int err;
 
        MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
-       MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
-       MLX5_SET(page_fault_resume_in, in, token, pfault->token);
-       MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
-       MLX5_SET(page_fault_resume_in, in, error, !!error);
+
+       info = MLX5_ADDR_OF(page_fault_resume_in, in,
+                           page_fault_info.trans_page_fault_info);
+       MLX5_SET(trans_page_fault_info, info, page_fault_type, pfault->type);
+       MLX5_SET(trans_page_fault_info, info, fault_token, pfault->token);
+       MLX5_SET(trans_page_fault_info, info, wq_number, wq_num);
+       MLX5_SET(trans_page_fault_info, info, error, !!error);
 
        err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
        if (err)
 
                }                                                              \
        } while (0)
 
-       ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
-       ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
-       ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
-       ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.ud_odp_caps.srq_receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.rc_odp_caps.srq_receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.srq_receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.send);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.write);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.read);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.xrc_odp_caps.atomic);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.srq_receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.send);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.receive);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.write);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.read);
+       ODP_CAP_SET_MAX(dev, transport_page_fault_scheme_cap.dc_odp_caps.atomic);
 
        if (!do_set)
                return 0;
 
 #define MLX5_CAP_ODP(mdev, cap)\
        MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
 
+#define MLX5_CAP_ODP_SCHEME(mdev, cap)                       \
+       MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+                transport_page_fault_scheme_cap.cap)
+
 #define MLX5_CAP_ODP_MAX(mdev, cap)\
        MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
 
 
        u8         reserved_at_e0[0x720];
 };
 
-struct mlx5_ifc_odp_cap_bits {
+struct mlx5_ifc_odp_scheme_cap_bits {
        u8         reserved_at_0[0x40];
 
        u8         sig[0x1];
-       u8         reserved_at_41[0x1f];
+       u8         reserved_at_41[0x4];
+       u8         page_prefetch[0x1];
+       u8         reserved_at_46[0x1a];
 
        u8         reserved_at_60[0x20];
 
 
        struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
 
-       u8         reserved_at_120[0x6E0];
+       u8         reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+       struct mlx5_ifc_odp_scheme_cap_bits transport_page_fault_scheme_cap;
+
+       struct mlx5_ifc_odp_scheme_cap_bits memory_page_fault_scheme_cap;
+
+       u8         reserved_at_400[0x200];
+
+       u8         mem_page_fault[0x1];
+       u8         reserved_at_601[0x1f];
+
+       u8         reserved_at_620[0x1e0];
 };
 
 struct mlx5_ifc_tls_cap_bits {
        u8         min_mkey_log_entity_size_fixed_buffer[0x5];
        u8         ec_vf_vport_base[0x10];
 
-       u8         reserved_at_3a0[0x10];
+       u8         reserved_at_3a0[0xa];
+       u8         max_mkey_log_entity_size_mtt[0x6];
        u8         max_rqt_vhca_id[0x10];
 
        u8         reserved_at_3c0[0x20];
        u8         reserved_at_60[0x20];
 };
 
+struct mlx5_ifc_trans_page_fault_info_bits {
+       u8         error[0x1];
+       u8         reserved_at_1[0x4];
+       u8         page_fault_type[0x3];
+       u8         wq_number[0x18];
+
+       u8         reserved_at_20[0x8];
+       u8         fault_token[0x18];
+};
+
+struct mlx5_ifc_mem_page_fault_info_bits {
+       u8          error[0x1];
+       u8          reserved_at_1[0xf];
+       u8          fault_token_47_32[0x10];
+
+       u8          fault_token_31_0[0x20];
+};
+
+union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits {
+       struct mlx5_ifc_trans_page_fault_info_bits trans_page_fault_info;
+       struct mlx5_ifc_mem_page_fault_info_bits mem_page_fault_info;
+       u8          reserved_at_0[0x40];
+};
+
 struct mlx5_ifc_page_fault_resume_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         error[0x1];
-       u8         reserved_at_41[0x4];
-       u8         page_fault_type[0x3];
-       u8         wq_number[0x18];
-
-       u8         reserved_at_60[0x8];
-       u8         token[0x18];
+       union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits
+               page_fault_info;
 };
 
 struct mlx5_ifc_nop_out_bits {