r |= KVM_X86_DISABLE_EXITS_MWAIT;
}
break;
+ case KVM_CAP_X86_USERSPACE_EXITS:
+ r = KVM_X86_USERSPACE_VALID_EXITS;
+ break;
case KVM_CAP_X86_SMM:
if (!IS_ENABLED(CONFIG_KVM_SMM))
break;
kvm->arch.cstate_in_guest = true;
}
+ r = 0;
+ break;
+ case KVM_CAP_X86_USERSPACE_EXITS:
+ r = -EINVAL;
+ if (cap->args[0] & ~KVM_X86_USERSPACE_VALID_EXITS)
+ break;
+
+ kvm->arch.userspace_exits = cap->args[0];
r = 0;
break;
case KVM_CAP_MSR_PLATFORM_INFO:
++vcpu->stat.halt_exits;
if (lapic_in_kernel(vcpu)) {
vcpu->arch.mp_state = state;
- return 1;
- } else {
- vcpu->run->exit_reason = reason;
- return 0;
+ if (reason != KVM_EXIT_HLT ||
+ !(vcpu->kvm->arch.userspace_exits & KVM_X86_USERSPACE_EXIT_HLT))
+ return 1;
}
+ vcpu->run->exit_reason = reason;
+ return 0;
}
int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
KVM_X86_DISABLE_EXITS_PAUSE | \
KVM_X86_DISABLE_EXITS_CSTATE)
+#define KVM_X86_USERSPACE_EXIT_MWAIT (1 << 0)
+#define KVM_X86_USERSPACE_EXIT_HLT (1 << 1)
+#define KVM_X86_USERSPACE_VALID_EXITS (KVM_X86_USERSPACE_EXIT_HLT)
+
/* for KVM_ENABLE_CAP */
struct kvm_enable_cap {
/* in */
#define KVM_CAP_COUNTER_OFFSET 227
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
+#define KVM_CAP_X86_USERSPACE_EXITS 230
#ifdef KVM_CAP_IRQ_ROUTING