gpa_t wall_clock;
 
-       bool mwait_in_guest;
-       bool hlt_in_guest;
-       bool pause_in_guest;
-       bool cstate_in_guest;
+       u64 disabled_exits;
 
        unsigned long irq_sources_bitmap;
        s64 kvmclock_offset;
 
        }
 
        if (!pause_filter_count || !pause_filter_thresh)
-               kvm->arch.pause_in_guest = true;
+               kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
 
        if (enable_apicv) {
                int ret = avic_vm_init(kvm);
 
 int vmx_vm_init(struct kvm *kvm)
 {
        if (!ple_gap)
-               kvm->arch.pause_in_guest = true;
+               kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
 
        if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
                switch (l1tf_mitigation) {
 
                    (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE))
                        pr_warn_once(SMT_RSB_MSG);
 
-               if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
-                       kvm->arch.pause_in_guest = true;
-               if (cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT)
-                       kvm->arch.mwait_in_guest = true;
-               if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
-                       kvm->arch.hlt_in_guest = true;
-               if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
-                       kvm->arch.cstate_in_guest = true;
+               kvm_disable_exits(kvm, cap->args[0]);
                r = 0;
 disable_exits_unlock:
                mutex_unlock(&kvm->lock);
 
            __rem;                                              \
         })
 
+static inline void kvm_disable_exits(struct kvm *kvm, u64 mask)
+{
+       kvm->arch.disabled_exits |= mask;
+}
+
 static inline bool kvm_mwait_in_guest(struct kvm *kvm)
 {
-       return kvm->arch.mwait_in_guest;
+       return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_MWAIT;
 }
 
 static inline bool kvm_hlt_in_guest(struct kvm *kvm)
 {
-       return kvm->arch.hlt_in_guest;
+       return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_HLT;
 }
 
 static inline bool kvm_pause_in_guest(struct kvm *kvm)
 {
-       return kvm->arch.pause_in_guest;
+       return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_PAUSE;
 }
 
 static inline bool kvm_cstate_in_guest(struct kvm *kvm)
 {
-       return kvm->arch.cstate_in_guest;
+       return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_CSTATE;
 }
 
 static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)