]> www.infradead.org Git - nvme.git/commitdiff
KVM: x86: Replace static_call_cond() with static_call()
authorWei Wang <wei.w.wang@intel.com>
Tue, 7 May 2024 13:31:01 +0000 (21:31 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 16 Jul 2024 16:14:11 +0000 (12:14 -0400)
The use of static_call_cond() is essentially the same as static_call() on
x86 (e.g. static_call() now handles a NULL pointer as a NOP), so replace
it with static_call() to simplify the code.

Link: https://lore.kernel.org/all/3916caa1dcd114301a49beafa5030eca396745c1.1679456900.git.jpoimboe@kernel.org/
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Wei Wang <wei.w.wang@intel.com>
Link: https://lore.kernel.org/r/20240507133103.15052-2-wei.w.wang@intel.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm-x86-pmu-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/irq.c
arch/x86/kvm/lapic.c
arch/x86/kvm/pmu.c
arch/x86/kvm/x86.c

index 26e73d046569fae9a8261d9c8237d4c2941cdd63..68ad4f923664e2de83d0ec9def9cfdeec6fb4fd0 100644 (file)
@@ -9,8 +9,7 @@ BUILD_BUG_ON(1)
  * "static_call_update()" calls.
  *
  * KVM_X86_OP_OPTIONAL() can be used for those functions that can have
- * a NULL definition, for example if "static_call_cond()" will be used
- * at the call sites.  KVM_X86_OP_OPTIONAL_RET0() can be used likewise
+ * a NULL definition.  KVM_X86_OP_OPTIONAL_RET0() can be used likewise
  * to make a definition optional, but in this case the default will
  * be __static_call_return0.
  */
index f852b13aeefea7a15f811c62a7035d32527b8740..9159bf1a473074a644e595e4c4c52267429f202a 100644 (file)
@@ -9,8 +9,7 @@ BUILD_BUG_ON(1)
  * "static_call_update()" calls.
  *
  * KVM_X86_PMU_OP_OPTIONAL() can be used for those functions that can have
- * a NULL definition, for example if "static_call_cond()" will be used
- * at the call sites.
+ * a NULL definition.
  */
 KVM_X86_PMU_OP(rdpmc_ecx_to_pmc)
 KVM_X86_PMU_OP(msr_idx_to_pmc)
index 3e4248575c1bc0f5649a8ef4bc99dedae65171bb..2b99169ad5ce7372f2e0af440ffe960d4f9a1148 100644 (file)
@@ -2309,12 +2309,12 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
 
 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
 {
-       static_call_cond(kvm_x86_vcpu_blocking)(vcpu);
+       static_call(kvm_x86_vcpu_blocking)(vcpu);
 }
 
 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 {
-       static_call_cond(kvm_x86_vcpu_unblocking)(vcpu);
+       static_call(kvm_x86_vcpu_unblocking)(vcpu);
 }
 
 static inline int kvm_cpu_get_apicid(int mps_cpu)
index ad9ca8a60144c773dd2ab7de10af67a436f790cb..7cf93d427484df33a91167a4138ce2e84d25e64b 100644 (file)
@@ -157,7 +157,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
 {
        __kvm_migrate_apic_timer(vcpu);
        __kvm_migrate_pit_timer(vcpu);
-       static_call_cond(kvm_x86_migrate_timers)(vcpu);
+       static_call(kvm_x86_migrate_timers)(vcpu);
 }
 
 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
index 4e44c267959ae14047fd66d42590390335acd963..b6b8065e0de39ff1487bb7ff24c90198bfcf9306 100644 (file)
@@ -738,7 +738,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
        if (unlikely(apic->apicv_active)) {
                /* need to update RVI */
                kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
-               static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
+               static_call(kvm_x86_hwapic_irr_update)(apic->vcpu,
                                                            apic_find_highest_irr(apic));
        } else {
                apic->irr_pending = false;
@@ -765,7 +765,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
         * just set SVI.
         */
        if (unlikely(apic->apicv_active))
-               static_call_cond(kvm_x86_hwapic_isr_update)(vec);
+               static_call(kvm_x86_hwapic_isr_update)(vec);
        else {
                ++apic->isr_count;
                BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -810,7 +810,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
         * and must be left alone.
         */
        if (unlikely(apic->apicv_active))
-               static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
+               static_call(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
        else {
                --apic->isr_count;
                BUG_ON(apic->isr_count < 0);
@@ -2577,7 +2577,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
 
        if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
                kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
-               static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu);
+               static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
        }
 
        apic->base_address = apic->vcpu->arch.apic_base &
@@ -2687,7 +2687,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        u64 msr_val;
        int i;
 
-       static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
+       static_call(kvm_x86_apicv_pre_state_restore)(vcpu);
 
        if (!init_event) {
                msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
@@ -2742,9 +2742,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        vcpu->arch.pv_eoi.msr_val = 0;
        apic_update_ppr(apic);
        if (apic->apicv_active) {
-               static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
-               static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
-               static_call_cond(kvm_x86_hwapic_isr_update)(-1);
+               static_call(kvm_x86_apicv_post_state_restore)(vcpu);
+               static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
+               static_call(kvm_x86_hwapic_isr_update)(-1);
        }
 
        vcpu->arch.apic_arb_prio = 0;
@@ -3019,7 +3019,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
        struct kvm_lapic *apic = vcpu->arch.apic;
        int r;
 
-       static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
+       static_call(kvm_x86_apicv_pre_state_restore)(vcpu);
 
        kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
        /* set SPIV separately to get count of SW disabled APICs right */
@@ -3046,9 +3046,9 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
        kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
        kvm_apic_update_apicv(vcpu);
        if (apic->apicv_active) {
-               static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
-               static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
-               static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
+               static_call(kvm_x86_apicv_post_state_restore)(vcpu);
+               static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
+               static_call(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
        }
        kvm_make_request(KVM_REQ_EVENT, vcpu);
        if (ioapic_in_kernel(vcpu->kvm))
index 50d6c12852ac983ac6b0f0ba25bb5194c689c6e1..7a1395bc3f833d3cbeb369415a23e503a82cdb4b 100644 (file)
@@ -607,7 +607,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
 {
        if (lapic_in_kernel(vcpu)) {
-               static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
+               static_call(kvm_x86_pmu_deliver_pmi)(vcpu);
                kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
        }
 }
@@ -740,7 +740,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
 
        pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
 
-       static_call_cond(kvm_x86_pmu_reset)(vcpu);
+       static_call(kvm_x86_pmu_reset)(vcpu);
 }
 
 
@@ -818,7 +818,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
                        pmc_stop_counter(pmc);
        }
 
-       static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
+       static_call(kvm_x86_pmu_cleanup)(vcpu);
 
        bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
 }
index 6c07f7ff0eff623726c8d2e168c8590edc46e95c..d2c36f05ee92991014085a8474a83e69689f7da8 100644 (file)
@@ -5122,7 +5122,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
                                    struct kvm_lapic_state *s)
 {
-       static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
+       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
 
        return kvm_apic_get_state(vcpu, s);
 }
@@ -9336,7 +9336,7 @@ writeback:
                        kvm_rip_write(vcpu, ctxt->eip);
                        if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
                                r = kvm_vcpu_do_singlestep(vcpu);
-                       static_call_cond(kvm_x86_update_emulated_instruction)(vcpu);
+                       static_call(kvm_x86_update_emulated_instruction)(vcpu);
                        __kvm_set_rflags(vcpu, ctxt->eflags);
                }
 
@@ -10759,7 +10759,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 
        bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
 
-       static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
+       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
 
        if (irqchip_split(vcpu->kvm))
                kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
@@ -10784,17 +10784,17 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
                bitmap_or((ulong *)eoi_exit_bitmap,
                          vcpu->arch.ioapic_handled_vectors,
                          to_hv_synic(vcpu)->vec_bitmap, 256);
-               static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
+               static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
                return;
        }
 #endif
-       static_call_cond(kvm_x86_load_eoi_exitmap)(
+       static_call(kvm_x86_load_eoi_exitmap)(
                vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
 }
 
 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
 {
-       static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
+       static_call(kvm_x86_guest_memory_reclaimed)(kvm);
 }
 
 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
@@ -10802,7 +10802,7 @@ static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
        if (!lapic_in_kernel(vcpu))
                return;
 
-       static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu);
+       static_call(kvm_x86_set_apic_access_page_addr)(vcpu);
 }
 
 /*
@@ -11050,7 +11050,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
         * i.e. they can post interrupts even if APICv is temporarily disabled.
         */
        if (kvm_lapic_enabled(vcpu))
-               static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
+               static_call(kvm_x86_sync_pir_to_irr)(vcpu);
 
        if (kvm_vcpu_exit_request(vcpu)) {
                vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -11099,7 +11099,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        break;
 
                if (kvm_lapic_enabled(vcpu))
-                       static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
+                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
 
                if (unlikely(kvm_vcpu_exit_request(vcpu))) {
                        exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
@@ -11873,7 +11873,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
        *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
        vcpu->arch.cr3 = sregs->cr3;
        kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
-       static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3);
+       static_call(kvm_x86_post_set_cr3)(vcpu, sregs->cr3);
 
        kvm_set_cr8(vcpu, sregs->cr8);
 
@@ -12822,7 +12822,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                mutex_unlock(&kvm->slots_lock);
        }
        kvm_unload_vcpu_mmus(kvm);
-       static_call_cond(kvm_x86_vm_destroy)(kvm);
+       static_call(kvm_x86_vm_destroy)(kvm);
        kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
        kvm_pic_destroy(kvm);
        kvm_ioapic_destroy(kvm);
@@ -13513,7 +13513,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
 void kvm_arch_start_assignment(struct kvm *kvm)
 {
        if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
-               static_call_cond(kvm_x86_pi_start_assignment)(kvm);
+               static_call(kvm_x86_pi_start_assignment)(kvm);
 }
 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
 
@@ -13650,7 +13650,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord
 #ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
 void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
 {
-       static_call_cond(kvm_x86_gmem_invalidate)(start, end);
+       static_call(kvm_x86_gmem_invalidate)(start, end);
 }
 #endif