KVM_X86_OP(enable_smi_window)
 #endif
 KVM_X86_OP_OPTIONAL(dev_get_attr)
-KVM_X86_OP_OPTIONAL(mem_enc_ioctl)
+KVM_X86_OP(mem_enc_ioctl)
 KVM_X86_OP_OPTIONAL(mem_enc_register_region)
 KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
 KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
 
 #define KVM_X86_SNP_VM         4
 #define KVM_X86_TDX_VM         5
 
+/* Trust Domain eXtension sub-ioctl() commands. */
+enum kvm_tdx_cmd_id {
+       KVM_TDX_CMD_NR_MAX,
+};
+
+struct kvm_tdx_cmd {
+       /* enum kvm_tdx_cmd_id */
+       __u32 id;
+       /* flags for sub-commend. If sub-command doesn't use this, set zero. */
+       __u32 flags;
+       /*
+        * data for each sub-command. An immediate or a pointer to the actual
+        * data in process virtual address.  If sub-command doesn't use it,
+        * set zero.
+        */
+       __u64 data;
+       /*
+        * Auxiliary error code.  The sub-command may return TDX SEAMCALL
+        * status code in addition to -Exxx.
+        */
+       __u64 hw_error;
+};
+
 #endif /* _ASM_X86_KVM_H */
 
        return 0;
 }
 
+static int vt_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
+{
+       if (!is_td(kvm))
+               return -ENOTTY;
+
+       return tdx_vm_ioctl(kvm, argp);
+}
+
 #define VMX_REQUIRED_APICV_INHIBITS                            \
        (BIT(APICV_INHIBIT_REASON_DISABLED) |                   \
         BIT(APICV_INHIBIT_REASON_ABSENT) |                     \
        .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
 
        .get_untagged_addr = vmx_get_untagged_addr,
+
+       .mem_enc_ioctl = vt_mem_enc_ioctl,
 };
 
 struct kvm_x86_init_ops vt_init_ops __initdata = {
 
 #include <asm/cpufeature.h>
 #include <asm/tdx.h>
 #include "capabilities.h"
+#include "x86_ops.h"
 #include "tdx.h"
 
 #pragma GCC poison to_vmx
        return container_of(vcpu, struct vcpu_tdx, vcpu);
 }
 
+int tdx_vm_ioctl(struct kvm *kvm, void __user *argp)
+{
+       struct kvm_tdx_cmd tdx_cmd;
+       int r;
+
+       if (copy_from_user(&tdx_cmd, argp, sizeof(struct kvm_tdx_cmd)))
+               return -EFAULT;
+
+       /*
+        * Userspace should never set hw_error. It is used to fill
+        * hardware-defined error by the kernel.
+        */
+       if (tdx_cmd.hw_error)
+               return -EINVAL;
+
+       mutex_lock(&kvm->lock);
+
+       switch (tdx_cmd.id) {
+       default:
+               r = -EINVAL;
+               goto out;
+       }
+
+       if (copy_to_user(argp, &tdx_cmd, sizeof(struct kvm_tdx_cmd)))
+               r = -EFAULT;
+
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
 static int tdx_online_cpu(unsigned int cpu)
 {
        unsigned long flags;
 
 #endif
 void vmx_setup_mce(struct kvm_vcpu *vcpu);
 
+#ifdef CONFIG_KVM_INTEL_TDX
+int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
+#else
+static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
+#endif
+
 #endif /* __KVM_X86_VMX_X86_OPS_H */
 
                goto out;
        }
        case KVM_MEMORY_ENCRYPT_OP: {
-               r = -ENOTTY;
-               if (!kvm_x86_ops.mem_enc_ioctl)
-                       goto out;
-
                r = kvm_x86_call(mem_enc_ioctl)(kvm, argp);
                break;
        }