-ENOENT on deassign if the conn_id isn't registered
        -EEXIST on assign if the conn_id is already registered
 
+4.114 KVM_GET_NESTED_STATE
+
+Capability: KVM_CAP_NESTED_STATE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_nested_state (in/out)
+Returns: 0 on success, -1 on error
+Errors:
+  E2BIG:     the total state size (including the fixed-size part of struct
+             kvm_nested_state) exceeds the value of 'size' specified by
+             the user; the size required will be written into size.
+
+struct kvm_nested_state {
+       __u16 flags;
+       __u16 format;
+       __u32 size;
+       union {
+               struct kvm_vmx_nested_state vmx;
+               struct kvm_svm_nested_state svm;
+               __u8 pad[120];
+       };
+       __u8 data[0];
+};
+
+#define KVM_STATE_NESTED_GUEST_MODE    0x00000001
+#define KVM_STATE_NESTED_RUN_PENDING   0x00000002
+
+#define KVM_STATE_NESTED_SMM_GUEST_MODE        0x00000001
+#define KVM_STATE_NESTED_SMM_VMXON     0x00000002
+
+struct kvm_vmx_nested_state {
+       __u64 vmxon_pa;
+       __u64 vmcs_pa;
+
+       struct {
+               __u16 flags;
+       } smm;
+};
+
+This ioctl copies the vcpu's nested virtualization state from the kernel to
+userspace.
+
+The maximum size of the state, including the fixed-size part of struct
+kvm_nested_state, can be retrieved by passing KVM_CAP_NESTED_STATE to
+the KVM_CHECK_EXTENSION ioctl().
+
+4.115 KVM_SET_NESTED_STATE
+
+Capability: KVM_CAP_NESTED_STATE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_nested_state (in)
+Returns: 0 on success, -1 on error
+
+This copies the vcpu's kvm_nested_state struct from userspace to the kernel.  For
+the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.
 
 5. The kvm_run structure
 ------------------------
 
        else
                kvm_disable_tdp();
 
+       if (!nested) {
+               kvm_x86_ops->get_nested_state = NULL;
+               kvm_x86_ops->set_nested_state = NULL;
+       }
+
        /*
         * Only enable PML when hardware supports PML feature, and both EPT
         * and EPT A/D bit features are enabled -- PML depends on them to work.
 }
 
 /*
- * If exit_qual is NULL, this is being called from RSM.
- * Otherwise it's called from vmlaunch/vmresume.
+ * If exit_qual is NULL, this is being called from state restore (either RSM
+ * or KVM_SET_NESTED_STATE).  Otherwise it's called from vmlaunch/vmresume.
  */
 static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
 {
        return 0;
 }
 
+static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
+                               struct kvm_nested_state __user *user_kvm_nested_state,
+                               u32 user_data_size)
+{
+       struct vcpu_vmx *vmx;
+       struct vmcs12 *vmcs12;
+       struct kvm_nested_state kvm_state = {
+               .flags = 0,
+               .format = 0,
+               .size = sizeof(kvm_state),
+               .vmx.vmxon_pa = -1ull,
+               .vmx.vmcs_pa = -1ull,
+       };
+
+       if (!vcpu)
+               return kvm_state.size + 2 * VMCS12_SIZE;
+
+       vmx = to_vmx(vcpu);
+       vmcs12 = get_vmcs12(vcpu);
+       if (nested_vmx_allowed(vcpu) &&
+           (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
+               kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
+               kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
+
+               if (vmx->nested.current_vmptr != -1ull)
+                       kvm_state.size += VMCS12_SIZE;
+
+               if (vmx->nested.smm.vmxon)
+                       kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
+
+               if (vmx->nested.smm.guest_mode)
+                       kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
+
+               if (is_guest_mode(vcpu)) {
+                       kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
+
+                       if (vmx->nested.nested_run_pending)
+                               kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
+               }
+       }
+
+       if (user_data_size < kvm_state.size)
+               goto out;
+
+       if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
+               return -EFAULT;
+
+       if (vmx->nested.current_vmptr == -1ull)
+               goto out;
+
+       /*
+        * When running L2, the authoritative vmcs12 state is in the
+        * vmcs02. When running L1, the authoritative vmcs12 state is
+        * in the shadow vmcs linked to vmcs01, unless
+        * sync_shadow_vmcs is set, in which case, the authoritative
+        * vmcs12 state is in the vmcs12 already.
+        */
+       if (is_guest_mode(vcpu))
+               sync_vmcs12(vcpu, vmcs12);
+       else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
+               copy_shadow_to_vmcs12(vmx);
+
+       if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
+               return -EFAULT;
+
+out:
+       return kvm_state.size;
+}
+
+static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
+                               struct kvm_nested_state __user *user_kvm_nested_state,
+                               struct kvm_nested_state *kvm_state)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct vmcs12 *vmcs12;
+       u32 exit_qual;
+       int ret;
+
+       if (kvm_state->format != 0)
+               return -EINVAL;
+
+       if (!nested_vmx_allowed(vcpu))
+               return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
+
+       if (kvm_state->vmx.vmxon_pa == -1ull) {
+               if (kvm_state->vmx.smm.flags)
+                       return -EINVAL;
+
+               if (kvm_state->vmx.vmcs_pa != -1ull)
+                       return -EINVAL;
+
+               vmx_leave_nested(vcpu);
+               return 0;
+       }
+
+       if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
+               return -EINVAL;
+
+       if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
+               return -EINVAL;
+
+       if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
+           !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
+               return -EINVAL;
+
+       if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
+           (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
+               return -EINVAL;
+
+       if (kvm_state->vmx.smm.flags &
+           ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
+               return -EINVAL;
+
+       if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
+           !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
+               return -EINVAL;
+
+       vmx_leave_nested(vcpu);
+       if (kvm_state->vmx.vmxon_pa == -1ull)
+               return 0;
+
+       vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
+       ret = enter_vmx_operation(vcpu);
+       if (ret)
+               return ret;
+
+       set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
+
+       if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
+               vmx->nested.smm.vmxon = true;
+               vmx->nested.vmxon = false;
+
+               if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
+                       vmx->nested.smm.guest_mode = true;
+       }
+
+       vmcs12 = get_vmcs12(vcpu);
+       if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
+               return -EFAULT;
+
+       if (vmcs12->revision_id != VMCS12_REVISION)
+               return -EINVAL;
+
+       if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
+               return 0;
+
+       vmx->nested.nested_run_pending =
+               !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+
+       if (check_vmentry_prereqs(vcpu, vmcs12) ||
+           check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+               return -EINVAL;
+
+       if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
+               vmx->nested.nested_run_pending = 1;
+
+       vmx->nested.dirty_vmcs12 = true;
+       ret = enter_vmx_non_root_mode(vcpu, NULL);
+       if (ret)
+               return -EINVAL;
+
+       return 0;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
 
        .setup_mce = vmx_setup_mce,
 
+       .get_nested_state = vmx_get_nested_state,
+       .set_nested_state = vmx_set_nested_state,
        .get_vmcs12_pages = nested_get_vmcs12_pages,
 
        .smi_allowed = vmx_smi_allowed,
 
        case KVM_CAP_X2APIC_API:
                r = KVM_X2APIC_API_VALID_FLAGS;
                break;
+       case KVM_CAP_NESTED_STATE:
+               r = kvm_x86_ops->get_nested_state ?
+                       kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
+               break;
        default:
                break;
        }
                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
                break;
        }
+       case KVM_GET_NESTED_STATE: {
+               struct kvm_nested_state __user *user_kvm_nested_state = argp;
+               u32 user_data_size;
+
+               r = -EINVAL;
+               if (!kvm_x86_ops->get_nested_state)
+                       break;
+
+               BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
+               if (get_user(user_data_size, &user_kvm_nested_state->size))
+                       return -EFAULT;
+
+               r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
+                                                 user_data_size);
+               if (r < 0)
+                       return r;
+
+               if (r > user_data_size) {
+                       if (put_user(r, &user_kvm_nested_state->size))
+                               return -EFAULT;
+                       return -E2BIG;
+               }
+               r = 0;
+               break;
+       }
+       case KVM_SET_NESTED_STATE: {
+               struct kvm_nested_state __user *user_kvm_nested_state = argp;
+               struct kvm_nested_state kvm_state;
+
+               r = -EINVAL;
+               if (!kvm_x86_ops->set_nested_state)
+                       break;
+
+               if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
+                       return -EFAULT;
+
+               if (kvm_state.size < sizeof(kvm_state))
+                       return -EINVAL;
+
+               if (kvm_state.flags &
+                   ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE))
+                       return -EINVAL;
+
+               /* nested_run_pending implies guest_mode.  */
+               if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING)
+                       return -EINVAL;
+
+               r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
+               break;
+       }
        default:
                r = -EINVAL;
        }