/* Update OSXSAVE bit */
                if (boot_cpu_has(X86_FEATURE_XSAVE))
                        cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
-                                  kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
+                                          kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
 
                cpuid_entry_change(best, X86_FEATURE_APIC,
                           vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
        best = cpuid_entry2_find(entries, nent, 7, 0);
        if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
                cpuid_entry_change(best, X86_FEATURE_OSPKE,
-                                  kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
+                                  kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
 
        best = cpuid_entry2_find(entries, nent, 0xD, 0);
        if (best)
 
        return vcpu->arch.cr0 & mask;
 }
 
+static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
+                                              unsigned long cr0_bit)
+{
+       BUILD_BUG_ON(!is_power_of_2(cr0_bit));
+
+       return !!kvm_read_cr0_bits(vcpu, cr0_bit);
+}
+
 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
 {
        return kvm_read_cr0_bits(vcpu, ~0UL);
        return vcpu->arch.cr4 & mask;
 }
 
+static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
+                                              unsigned long cr4_bit)
+{
+       BUILD_BUG_ON(!is_power_of_2(cr4_bit));
+
+       return !!kvm_read_cr4_bits(vcpu, cr4_bit);
+}
+
 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
 {
        if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
 
 {
        BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
 
-       return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
+       return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
               ? cr3 & X86_CR3_PCID_MASK
               : 0;
 }
 
        if (!pmc)
                return 1;
 
-       if (!(kvm_read_cr4_bits(vcpu, X86_CR4_PCE)) &&
+       if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
            (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
-           (kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
+           kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
                return 1;
 
        *data = pmc_read_counter(pmc) & mask;
 
         * does force CR0.PE=1, but only to also force VM86 in order to emulate
         * Real Mode, and so there's no need to check CR0.PE manually.
         */
-       if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
+       if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) {
                kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
        }
 
        if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
                return true;
 
-       return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) &&
+       return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
               (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
 }
 
 
 
 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
 {
-       if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+       if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE))
                return true;
 
        kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
 
        if (!(cr0 & X86_CR0_PG) &&
-           (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
+           (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
                return 1;
 
        static_call(kvm_x86_set_cr0)(vcpu, cr0);
        if (vcpu->arch.guest_state_protected)
                return;
 
-       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
+       if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
 
                if (vcpu->arch.xcr0 != host_xcr0)
                        xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
        if (static_cpu_has(X86_FEATURE_PKU) &&
            vcpu->arch.pkru != vcpu->arch.host_pkru &&
            ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
-            kvm_read_cr4_bits(vcpu, X86_CR4_PKE)))
+            kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
                write_pkru(vcpu->arch.pkru);
 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 }
 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
        if (static_cpu_has(X86_FEATURE_PKU) &&
            ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
-            kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) {
+            kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
                vcpu->arch.pkru = rdpkru();
                if (vcpu->arch.pkru != vcpu->arch.host_pkru)
                        write_pkru(vcpu->arch.host_pkru);
        }
 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 
-       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
+       if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
 
                if (vcpu->arch.xcr0 != host_xcr0)
                        xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
         * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
         * with PCIDE=0.
         */
-       if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
+       if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))
                return;
 
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
        bool skip_tlb_flush = false;
        unsigned long pcid = 0;
 #ifdef CONFIG_X86_64
-       bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
-
-       if (pcid_enabled) {
+       if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) {
                skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
                cr3 &= ~X86_CR3_PCID_NOFLUSH;
                pcid = cr3 & X86_CR3_PCID_MASK;
                return 0;
        if (mce->status & MCI_STATUS_UC) {
                if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
-                   !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
+                   !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) {
                        kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
                        return 0;
                }
                return 1;
        }
 
-       pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
+       pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
 
        switch (type) {
        case INVPCID_TYPE_INDIV_ADDR:
 
 
 static inline bool is_protmode(struct kvm_vcpu *vcpu)
 {
-       return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
+       return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
 }
 
 static inline int is_long_mode(struct kvm_vcpu *vcpu)
 
 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
 {
-       return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
+       return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
 }
 
 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)