int (*cpl)(struct x86_emulate_ctxt *ctxt);
        int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
        int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
+       u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt);
+       void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase);
        int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
        int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
        int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
 
 /* These match some of the HF_* flags defined in kvm_host.h  */
 #define X86EMUL_GUEST_MASK           (1 << 5) /* VCPU is in guest-mode */
+#define X86EMUL_SMM_MASK             (1 << 6)
+#define X86EMUL_SMM_INSIDE_NMI_MASK  (1 << 7)
 
 struct x86_emulate_ctxt {
        const struct x86_emulate_ops *ops;
 
        return rc;
 }
 
+static int em_rsm(struct x86_emulate_ctxt *ctxt)
+{
+       if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
+               return emulate_ud(ctxt);
+
+       return X86EMUL_UNHANDLEABLE;
+}
+
 static void
 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
                        struct desc_struct *cs, struct desc_struct *ss)
        F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
        /* 0xA8 - 0xAF */
        I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
-       DI(ImplicitOps, rsm),
+       II(No64 | EmulateOnUD | ImplicitOps, em_rsm, rsm),
        F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
        F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
        F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
 
        MSR_IA32_MISC_ENABLE,
        MSR_IA32_MCG_STATUS,
        MSR_IA32_MCG_CTL,
+       MSR_IA32_SMBASE,
 };
 
 static unsigned num_emulated_msrs;
        case MSR_IA32_MISC_ENABLE:
                vcpu->arch.ia32_misc_enable_msr = data;
                break;
+       case MSR_IA32_SMBASE:
+               if (!msr_info->host_initiated)
+                       return 1;
+               vcpu->arch.smbase = data;
+               break;
        case MSR_KVM_WALL_CLOCK_NEW:
        case MSR_KVM_WALL_CLOCK:
                vcpu->kvm->arch.wall_clock = data;
        case MSR_IA32_MISC_ENABLE:
                msr_info->data = vcpu->arch.ia32_misc_enable_msr;
                break;
+       case MSR_IA32_SMBASE:
+               if (!msr_info->host_initiated)
+                       return 1;
+               msr_info->data = vcpu->arch.smbase;
+               break;
        case MSR_IA32_PERF_STATUS:
                /* TSC increment by tick */
                msr_info->data = 1000ULL;
 
 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
 {
+       kvm_make_request(KVM_REQ_SMI, vcpu);
+
        return 0;
 }
 
        return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
 }
 
+static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+       return vcpu->arch.smbase;
+}
+
+static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+       vcpu->arch.smbase = smbase;
+}
+
 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
                              u32 pmc)
 {
        .cpl                 = emulator_get_cpl,
        .get_dr              = emulator_get_dr,
        .set_dr              = emulator_set_dr,
+       .get_smbase          = emulator_get_smbase,
+       .set_smbase          = emulator_set_smbase,
        .set_msr             = emulator_set_msr,
        .get_msr             = emulator_get_msr,
        .check_pmc           = emulator_check_pmc,
                     cs_db                              ? X86EMUL_MODE_PROT32 :
                                                          X86EMUL_MODE_PROT16;
        BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
+       BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
+       BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
        ctxt->emul_flags = vcpu->arch.hflags;
 
        init_decode_cache(ctxt);
 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
 
-void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
+static void kvm_smm_changed(struct kvm_vcpu *vcpu)
 {
+       if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
+               if (unlikely(vcpu->arch.smi_pending)) {
+                       kvm_make_request(KVM_REQ_SMI, vcpu);
+                       vcpu->arch.smi_pending = 0;
+               }
+       }
+}
+
+static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
+{
+       unsigned changed = vcpu->arch.hflags ^ emul_flags;
+
        vcpu->arch.hflags = emul_flags;
+
+       if (changed & HF_SMM_MASK)
+               kvm_smm_changed(vcpu);
 }
 
 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 }
 
+static void process_smi(struct kvm_vcpu *vcpu)
+{
+       if (is_smm(vcpu)) {
+               vcpu->arch.smi_pending = true;
+               return;
+       }
+
+       printk_once(KERN_DEBUG "Ignoring guest SMI\n");
+}
+
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
        u64 eoi_exit_bitmap[4];
                }
                if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
                        record_steal_time(vcpu);
+               if (kvm_check_request(KVM_REQ_SMI, vcpu))
+                       process_smi(vcpu);
                if (kvm_check_request(KVM_REQ_NMI, vcpu))
                        process_nmi(vcpu);
                if (kvm_check_request(KVM_REQ_PMU, vcpu))
        kvm_async_pf_hash_reset(vcpu);
        vcpu->arch.apf.halted = false;
 
-       if (!init_event)
+       if (!init_event) {
                kvm_pmu_reset(vcpu);
+               vcpu->arch.smbase = 0x30000;
+       }
 
        memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
        vcpu->arch.regs_avail = ~0;