From: Boris Ostrovsky Date: Tue, 26 Mar 2019 22:46:01 +0000 (-0400) Subject: Revert "KVM: x86: pass host_initiated to functions that read MSRs" X-Git-Tag: v4.1.12-124.31.3~223 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=659196a868a3a90da79c54c891e75affb8334737;p=users%2Fjedix%2Flinux-maple.git Revert "KVM: x86: pass host_initiated to functions that read MSRs" This reverts commit fe09396e31ddc452c8ef2e0a5474c28d5a501e4e. Revert due to performance regression. Orabug: 29542029 Signed-off-by: Boris Ostrovsky Reviewed-by: Mihai Carabas Signed-off-by: Brian Maly --- diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4bc0508591ad..1cb8405125a5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -729,7 +729,7 @@ struct kvm_x86_ops { void (*vcpu_put)(struct kvm_vcpu *vcpu); void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); - int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); + int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); void (*get_segment)(struct kvm_vcpu *vcpu, @@ -951,7 +951,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu, void kvm_enable_efer_bits(u64); bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); -int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); +int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); struct x86_emulate_ctxt; @@ -980,7 +980,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); -int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); +int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b5195e02c549..11d98d3f4070 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3112,42 +3112,42 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr) return 1; } -static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) { struct vcpu_svm *svm = to_svm(vcpu); - switch (msr_info->index) { + switch (ecx) { case MSR_IA32_TSC: { - msr_info->data = svm->vmcb->control.tsc_offset + + *data = svm->vmcb->control.tsc_offset + svm_scale_tsc(vcpu, native_read_tsc()); break; } case MSR_STAR: - msr_info->data = svm->vmcb->save.star; + *data = svm->vmcb->save.star; break; #ifdef CONFIG_X86_64 case MSR_LSTAR: - msr_info->data = svm->vmcb->save.lstar; + *data = svm->vmcb->save.lstar; break; case MSR_CSTAR: - msr_info->data = svm->vmcb->save.cstar; + *data = svm->vmcb->save.cstar; break; case MSR_KERNEL_GS_BASE: - msr_info->data = svm->vmcb->save.kernel_gs_base; + *data = svm->vmcb->save.kernel_gs_base; break; case MSR_SYSCALL_MASK: - msr_info->data = svm->vmcb->save.sfmask; + *data = svm->vmcb->save.sfmask; break; #endif case MSR_IA32_SYSENTER_CS: - msr_info->data = svm->vmcb->save.sysenter_cs; + *data = svm->vmcb->save.sysenter_cs; break; case MSR_IA32_SYSENTER_EIP: - msr_info->data = svm->sysenter_eip; + *data = svm->sysenter_eip; break; case MSR_IA32_SYSENTER_ESP: - msr_info->data = svm->sysenter_esp; + *data = svm->sysenter_esp; break; /* * Nobody will change the following 5 values in the VMCB so we can @@ -3155,37 +3155,37 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) * implemented. */ case MSR_IA32_DEBUGCTLMSR: - msr_info->data = svm->vmcb->save.dbgctl; + *data = svm->vmcb->save.dbgctl; break; case MSR_IA32_LASTBRANCHFROMIP: - msr_info->data = svm->vmcb->save.br_from; + *data = svm->vmcb->save.br_from; break; case MSR_IA32_LASTBRANCHTOIP: - msr_info->data = svm->vmcb->save.br_to; + *data = svm->vmcb->save.br_to; break; case MSR_IA32_LASTINTFROMIP: - msr_info->data = svm->vmcb->save.last_excp_from; + *data = svm->vmcb->save.last_excp_from; break; case MSR_IA32_LASTINTTOIP: - msr_info->data = svm->vmcb->save.last_excp_to; + *data = svm->vmcb->save.last_excp_to; break; case MSR_VM_HSAVE_PA: - msr_info->data = svm->nested.hsave_msr; + *data = svm->nested.hsave_msr; break; case MSR_VM_CR: - msr_info->data = svm->nested.vm_cr_msr; + *data = svm->nested.vm_cr_msr; break; case MSR_IA32_SPEC_CTRL: - msr_info->data = svm->spec_ctrl; + *data = svm->spec_ctrl; break; case MSR_AMD64_VIRT_SPEC_CTRL: - msr_info->data = svm->virt_spec_ctrl; + *data = svm->virt_spec_ctrl; break; case MSR_IA32_UCODE_REV: - msr_info->data = 0x01000065; + *data = 0x01000065; break; default: - return kvm_get_msr_common(vcpu, msr_info); + return kvm_get_msr_common(vcpu, ecx, data); } return 0; } @@ -3193,20 +3193,16 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) static int rdmsr_interception(struct vcpu_svm *svm) { u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); - struct msr_data msr_info; + u64 data; - msr_info.index = ecx; - msr_info.host_initiated = false; - if (svm_get_msr(&svm->vcpu, &msr_info)) { + if (svm_get_msr(&svm->vcpu, ecx, &data)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(&svm->vcpu, 0); } else { - trace_kvm_msr_read(ecx, msr_info.data); + trace_kvm_msr_read(ecx, data); - kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, - msr_info.data & 0xffffffff); - kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, - msr_info.data >> 32); + kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff); + kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32); svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; skip_emulated_instruction(&svm->vcpu); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9c83350297c5..293ca2e1e98f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2808,75 +2808,82 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr) * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ -static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { + u64 data; struct shared_msr_entry *msr; - switch (msr_info->index) { + if (!pdata) { + printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); + return -EINVAL; + } + + switch (msr_index) { #ifdef CONFIG_X86_64 case MSR_FS_BASE: - msr_info->data = vmcs_readl(GUEST_FS_BASE); + data = vmcs_readl(GUEST_FS_BASE); break; case MSR_GS_BASE: - msr_info->data = vmcs_readl(GUEST_GS_BASE); + data = vmcs_readl(GUEST_GS_BASE); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(to_vmx(vcpu)); - msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base; + data = to_vmx(vcpu)->msr_guest_kernel_gs_base; break; #endif case MSR_EFER: - return kvm_get_msr_common(vcpu, msr_info); + return kvm_get_msr_common(vcpu, msr_index, pdata); case MSR_IA32_TSC: - msr_info->data = guest_read_tsc(); + data = guest_read_tsc(); break; case MSR_IA32_SPEC_CTRL: - msr_info->data = to_vmx(vcpu)->spec_ctrl; + data = to_vmx(vcpu)->spec_ctrl; break; case MSR_IA32_ARCH_CAPABILITIES: - msr_info->data = to_vmx(vcpu)->arch_capabilities; + data = to_vmx(vcpu)->arch_capabilities; break; case MSR_IA32_SYSENTER_CS: - msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); + data = vmcs_read32(GUEST_SYSENTER_CS); break; case MSR_IA32_SYSENTER_EIP: - msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); + data = vmcs_readl(GUEST_SYSENTER_EIP); break; case MSR_IA32_SYSENTER_ESP: - msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); + data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: if (!vmx_mpx_supported()) return 1; - msr_info->data = vmcs_read64(GUEST_BNDCFGS); + data = vmcs_read64(GUEST_BNDCFGS); break; case MSR_IA32_FEATURE_CONTROL: if (!nested_vmx_allowed(vcpu)) return 1; - msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control; + data = to_vmx(vcpu)->nested.msr_ia32_feature_control; break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!nested_vmx_allowed(vcpu)) return 1; - return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data); + return vmx_get_vmx_msr(vcpu, msr_index, pdata); case MSR_IA32_XSS: if (!vmx_xsaves_supported()) return 1; - msr_info->data = vcpu->arch.ia32_xss; + data = vcpu->arch.ia32_xss; break; case MSR_TSC_AUX: if (!to_vmx(vcpu)->rdtscp_enabled) return 1; /* Otherwise falls through */ default: - msr = find_msr_entry(to_vmx(vcpu), msr_info->index); + msr = find_msr_entry(to_vmx(vcpu), msr_index); if (msr) { - msr_info->data = msr->data; + data = msr->data; break; } - return kvm_get_msr_common(vcpu, msr_info); + return kvm_get_msr_common(vcpu, msr_index, pdata); } + *pdata = data; return 0; } @@ -5780,21 +5787,19 @@ static int handle_cpuid(struct kvm_vcpu *vcpu) static int handle_rdmsr(struct kvm_vcpu *vcpu) { u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; - struct msr_data msr_info; + u64 data; - msr_info.index = ecx; - msr_info.host_initiated = false; - if (vmx_get_msr(vcpu, &msr_info)) { + if (vmx_get_msr(vcpu, ecx, &data)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(vcpu, 0); return 1; } - trace_kvm_msr_read(ecx, msr_info.data); + trace_kvm_msr_read(ecx, data); /* FIXME: handling of bits 32:63 of rax, rdx */ - vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; - vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; + vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; + vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; skip_emulated_instruction(vcpu); return 1; } @@ -9284,7 +9289,6 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) struct vmx_msr_entry e; for (i = 0; i < count; i++) { - struct msr_data msr_info; if (kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), &e, 2 * sizeof(u32))) { @@ -9299,9 +9303,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) __func__, i, e.index, e.reserved); return -EINVAL; } - msr_info.host_initiated = false; - msr_info.index = e.index; - if (kvm_get_msr(vcpu, &msr_info)) { + if (kvm_get_msr(vcpu, e.index, &e.value)) { pr_warn_ratelimited( "%s cannot read MSR (%u, 0x%x)\n", __func__, i, e.index); @@ -9310,10 +9312,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) if (kvm_write_guest(vcpu->kvm, gpa + i * sizeof(e) + offsetof(struct vmx_msr_entry, value), - &msr_info.data, sizeof(msr_info.data))) { + &e.value, sizeof(e.value))) { pr_warn_ratelimited( "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", - __func__, i, e.index, msr_info.data); + __func__, i, e.index, e.value); return -EINVAL; } } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 76299d88381d..64835d030e7a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1122,21 +1122,6 @@ EXPORT_SYMBOL_GPL(kvm_set_msr); /* * Adapt set_msr() to msr_io()'s calling convention */ -static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) -{ - struct msr_data msr; - int r; - - msr.index = index; - msr.host_initiated = true; - r = kvm_get_msr(vcpu, &msr); - if (r) - return r; - - *data = msr.data; - return 0; -} - static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { struct msr_data msr; @@ -2480,9 +2465,9 @@ EXPORT_SYMBOL_GPL(kvm_set_msr_common); * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ -int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) +int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { - return kvm_x86_ops->get_msr(vcpu, msr); + return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); } EXPORT_SYMBOL_GPL(kvm_get_msr); @@ -2619,11 +2604,11 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) return 0; } -int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; - switch (msr_info->index) { + switch (msr) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_DEBUGCTLMSR: @@ -2646,26 +2631,26 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: case MSR_AMD64_BU_CFG2: - msr_info->data = 0; + data = 0; break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: - if (kvm_pmu_msr(vcpu, msr_info->index)) - return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); - msr_info->data = 0; + if (kvm_pmu_msr(vcpu, msr)) + return kvm_pmu_get_msr(vcpu, msr, pdata); + data = 0; break; case MSR_IA32_UCODE_REV: - msr_info->data = 0x100000000ULL; + data = 0x100000000ULL; break; case MSR_MTRRcap: - msr_info->data = 0x500 | KVM_NR_VAR_MTRR; + data = 0x500 | KVM_NR_VAR_MTRR; break; case 0x200 ... 0x2ff: - return get_msr_mtrr(vcpu, msr_info->index, &msr_info->data); + return get_msr_mtrr(vcpu, msr, pdata); case 0xcd: /* fsb frequency */ - msr_info->data = 3; + data = 3; break; /* * MSR_EBC_FREQUENCY_ID @@ -2679,48 +2664,48 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) * multiplying by zero otherwise. */ case MSR_EBC_FREQUENCY_ID: - msr_info->data = 1 << 24; + data = 1 << 24; break; case MSR_IA32_APICBASE: - msr_info->data = kvm_get_apic_base(vcpu); + data = kvm_get_apic_base(vcpu); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: - return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); + return kvm_x2apic_msr_read(vcpu, msr, pdata); break; case MSR_IA32_TSCDEADLINE: - msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); + data = kvm_get_lapic_tscdeadline_msr(vcpu); break; case MSR_IA32_TSC_ADJUST: - msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; + data = (u64)vcpu->arch.ia32_tsc_adjust_msr; break; case MSR_IA32_MISC_ENABLE: - msr_info->data = vcpu->arch.ia32_misc_enable_msr; + data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ - msr_info->data = 1000ULL; + data = 1000ULL; /* CPU multiplier */ data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: - msr_info->data = vcpu->arch.efer; + data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK_NEW: - msr_info->data = vcpu->kvm->arch.wall_clock; + data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME_NEW: - msr_info->data = vcpu->arch.time; + data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: - msr_info->data = vcpu->arch.apf.msr_val; + data = vcpu->arch.apf.msr_val; break; case MSR_KVM_STEAL_TIME: - msr_info->data = vcpu->arch.st.msr_val; + data = vcpu->arch.st.msr_val; break; case MSR_KVM_PV_EOI_EN: - msr_info->data = vcpu->arch.pv_eoi.msr_val; + data = vcpu->arch.pv_eoi.msr_val; break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: @@ -2728,7 +2713,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: - return get_msr_mce(vcpu, msr_info->index, &msr_info->data); + return get_msr_mce(vcpu, msr, pdata); case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other @@ -2739,17 +2724,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) * type 6, model 8 and higher from exploding due to * the rdmsr failing. */ - msr_info->data = 0x20000000; + data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: - if (kvm_hv_msr_partition_wide(msr_info->index)) { + if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); - r = get_msr_hyperv_pw(vcpu, msr_info->index, &msr_info->data); + r = get_msr_hyperv_pw(vcpu, msr, pdata); mutex_unlock(&vcpu->kvm->lock); return r; } else - return get_msr_hyperv(vcpu, msr_info->index, &msr_info->data); + return get_msr_hyperv(vcpu, msr, pdata); break; case MSR_IA32_BBL_CR_CTL3: /* This legacy MSR exists but isn't fully documented in current @@ -2762,31 +2747,32 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) * L2 cache control register 3: 64GB range, 256KB size, * enabled, latency 0x1, configured */ - msr_info->data = 0xbe702111; + data = 0xbe702111; break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; - msr_info->data = vcpu->arch.osvw.length; + data = vcpu->arch.osvw.length; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; - msr_info->data = vcpu->arch.osvw.status; + data = vcpu->arch.osvw.status; break; default: - if (kvm_pmu_msr(vcpu, msr_info->index)) - return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); + if (kvm_pmu_msr(vcpu, msr)) + return kvm_pmu_get_msr(vcpu, msr, pdata); if (!ignore_msrs) { vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n", - msr_info->index); + msr); return 1; } else { - vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index); - msr_info->data = 0; + vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); + data = 0; } break; } + *pdata = data; return 0; } EXPORT_SYMBOL_GPL(kvm_get_msr_common); @@ -3585,7 +3571,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } case KVM_GET_MSRS: { int idx = srcu_read_lock(&vcpu->kvm->srcu); - r = msr_io(vcpu, argp, do_get_msr, 1); + r = msr_io(vcpu, argp, kvm_get_msr, 1); srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } @@ -5101,17 +5087,7 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata) { - struct msr_data msr; - int r; - - msr.index = msr_index; - msr.host_initiated = false; - r = kvm_get_msr(emul_to_vcpu(ctxt), &msr); - if (r) - return r; - - *pdata = msr.data; - return 0; + return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); } static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,