struct msr_bitmap_range ranges[16];
 };
 
-#define APICV_INHIBIT_REASON_DISABLE    0
-#define APICV_INHIBIT_REASON_HYPERV     1
-#define APICV_INHIBIT_REASON_NESTED     2
-#define APICV_INHIBIT_REASON_IRQWIN     3
-#define APICV_INHIBIT_REASON_PIT_REINJ  4
-#define APICV_INHIBIT_REASON_X2APIC    5
-#define APICV_INHIBIT_REASON_BLOCKIRQ  6
-#define APICV_INHIBIT_REASON_ABSENT    7
+enum kvm_apicv_inhibit {
+       APICV_INHIBIT_REASON_DISABLE,
+       APICV_INHIBIT_REASON_HYPERV,
+       APICV_INHIBIT_REASON_NESTED,
+       APICV_INHIBIT_REASON_IRQWIN,
+       APICV_INHIBIT_REASON_PIT_REINJ,
+       APICV_INHIBIT_REASON_X2APIC,
+       APICV_INHIBIT_REASON_BLOCKIRQ,
+       APICV_INHIBIT_REASON_ABSENT,
+};
 
 struct kvm_arch {
        unsigned long n_used_mmu_pages;
        void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
        void (*enable_irq_window)(struct kvm_vcpu *vcpu);
        void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
-       bool (*check_apicv_inhibit_reasons)(ulong bit);
+       bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit reason);
        void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
        void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
        void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
 bool kvm_apicv_activated(struct kvm *kvm);
 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
 void kvm_request_apicv_update(struct kvm *kvm, bool activate,
-                             unsigned long bit);
-
+                             enum kvm_apicv_inhibit reason);
 void __kvm_request_apicv_update(struct kvm *kvm, bool activate,
-                               unsigned long bit);
+                               enum kvm_apicv_inhibit reason);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
 
        return ret;
 }
 
-bool avic_check_apicv_inhibit_reasons(ulong bit)
+bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
                          BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_X2APIC) |
                          BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
 
-       return supported & BIT(bit);
+       return supported & BIT(reason);
 }
 
 
 
 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
-bool avic_check_apicv_inhibit_reasons(ulong bit);
+bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
 void avic_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
 void avic_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
 bool avic_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
 
 );
 
 TRACE_EVENT(kvm_apicv_update_request,
-           TP_PROTO(bool activate, unsigned long bit),
-           TP_ARGS(activate, bit),
+           TP_PROTO(bool activate, int reason),
+           TP_ARGS(activate, reason),
 
        TP_STRUCT__entry(
                __field(bool, activate)
-               __field(unsigned long, bit)
+               __field(int, reason)
        ),
 
        TP_fast_assign(
                __entry->activate = activate;
-               __entry->bit = bit;
+               __entry->reason = reason;
        ),
 
-       TP_printk("%s bit=%lu",
+       TP_printk("%s reason=%u",
                  __entry->activate ? "activate" : "deactivate",
-                 __entry->bit)
+                 __entry->reason)
 );
 
 TRACE_EVENT(kvm_apicv_accept_irq,
 
        free_kvm_area();
 }
 
-static bool vmx_check_apicv_inhibit_reasons(ulong bit)
+static bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
                          BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_HYPERV) |
                          BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
 
-       return supported & BIT(bit);
+       return supported & BIT(reason);
 }
 
 static struct kvm_x86_ops vmx_x86_ops __initdata = {
 
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
 
-void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+void __kvm_request_apicv_update(struct kvm *kvm, bool activate,
+                               enum kvm_apicv_inhibit reason)
 {
        unsigned long old, new;
 
        lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
 
-       if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(bit))
+       if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason))
                return;
 
        old = new = kvm->arch.apicv_inhibit_reasons;
 
        if (activate)
-               __clear_bit(bit, &new);
+               __clear_bit(reason, &new);
        else
-               __set_bit(bit, &new);
+               __set_bit(reason, &new);
 
        if (!!old != !!new) {
-               trace_kvm_apicv_update_request(activate, bit);
+               trace_kvm_apicv_update_request(activate, reason);
                /*
                 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid
                 * false positives in the sanity check WARN in svm_vcpu_run().
                        unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
                        kvm_zap_gfn_range(kvm, gfn, gfn+1);
                }
-       } else
+       } else {
                kvm->arch.apicv_inhibit_reasons = new;
+       }
 }
 
-void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
+void kvm_request_apicv_update(struct kvm *kvm, bool activate,
+                             enum kvm_apicv_inhibit reason)
 {
        if (!enable_apicv)
                return;
 
        down_write(&kvm->arch.apicv_update_lock);
-       __kvm_request_apicv_update(kvm, activate, bit);
+       __kvm_request_apicv_update(kvm, activate, reason);
        up_write(&kvm->arch.apicv_update_lock);
 }
 EXPORT_SYMBOL_GPL(kvm_request_apicv_update);