u64 nested_vmx_cr4_fixed1;
        u64 nested_vmx_vmcs_enum;
        u64 nested_vmx_vmfunc_controls;
+
+       /* SMM related state */
+       struct {
+               /* in VMX operation on SMM entry? */
+               bool vmxon;
+               /* in guest mode on SMM entry? */
+               bool guest_mode;
+       } smm;
 };
 
 #define POSTED_INTR_ON  0
        leave_guest_mode(vcpu);
 
        if (likely(!vmx->fail)) {
-               prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
-                              exit_qualification);
+               if (exit_reason == -1)
+                       sync_vmcs12(vcpu, vmcs12);
+               else
+                       prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
+                                      exit_qualification);
 
                if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
                                         vmcs12->vm_exit_msr_store_count))
         */
        kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
 
-       if (enable_shadow_vmcs)
+       if (enable_shadow_vmcs && exit_reason != -1)
                vmx->nested.sync_shadow_vmcs = true;
 
        /* in case we halted in L2 */
                                INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
                }
 
-               trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
-                                              vmcs12->exit_qualification,
-                                              vmcs12->idt_vectoring_info_field,
-                                              vmcs12->vm_exit_intr_info,
-                                              vmcs12->vm_exit_intr_error_code,
-                                              KVM_ISA_VMX);
+               if (exit_reason != -1)
+                       trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
+                                                      vmcs12->exit_qualification,
+                                                      vmcs12->idt_vectoring_info_field,
+                                                      vmcs12->vm_exit_intr_info,
+                                                      vmcs12->vm_exit_intr_error_code,
+                                                      KVM_ISA_VMX);
 
                load_vmcs12_host_state(vcpu, vmcs12);
 
 
 static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
 {
+       /* we need a nested vmexit to enter SMM, postpone if run is pending */
+       if (to_vmx(vcpu)->nested.nested_run_pending)
+               return 0;
        return 1;
 }
 
 static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 {
-       /* TODO: Implement */
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
+       if (vmx->nested.smm.guest_mode)
+               nested_vmx_vmexit(vcpu, -1, 0, 0);
+
+       vmx->nested.smm.vmxon = vmx->nested.vmxon;
+       vmx->nested.vmxon = false;
        return 0;
 }
 
 static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
 {
-       /* TODO: Implement */
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       int ret;
+
+       if (vmx->nested.smm.vmxon) {
+               vmx->nested.vmxon = true;
+               vmx->nested.smm.vmxon = false;
+       }
+
+       if (vmx->nested.smm.guest_mode) {
+               vcpu->arch.hflags &= ~HF_SMM_MASK;
+               ret = enter_vmx_non_root_mode(vcpu, false);
+               vcpu->arch.hflags |= HF_SMM_MASK;
+               if (ret)
+                       return ret;
+
+               vmx->nested.smm.guest_mode = false;
+       }
        return 0;
 }