The following RCU lockdep warning led to adding irq_enter()/irq_exit() into
smp_reschedule_interrupt():
 RCU used illegally from idle CPU!
 rcu_scheduler_active = 1, debug_locks = 0
 RCU used illegally from extended quiescent state!
 no locks held by swapper/1/0.
  do_trace_write_msr
  native_write_msr
  native_apic_msr_eoi_write
  smp_reschedule_interrupt
  reschedule_interrupt
As Peterz pointed out:
| So now we're making a very frequent interrupt slower because of debug
| code.
|
| The thing is, many many smp_reschedule_interrupt() invocations don't
| actually execute anything much at all and are only sent to tickle the
| return to user path (which does the actual preemption).
|
| Having to do the whole irq_enter/irq_exit dance just for this unlikely
| debug case totally blows.
Use the wrmsr_notrace() variant in native_apic_msr_write_eoi, annotate the
kvm variant with notrace and add a native_apic_eoi callback to the apic
structure so KVM guests are covered as well.
This allows to revert the irq_enter/irq_exit dance in
smp_reschedule_interrupt().
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: kvm@vger.kernel.org
Cc: Mike Galbraith <efault@gmx.de>
Cc: Borislav Petkov <bp@alien8.de>
Link: http://lkml.kernel.org/r/1478488420-5982-3-git-send-email-wanpeng.li@hotmail.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
 
 
 static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
 {
-       wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+       wrmsr_notrace(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
 }
 
 static inline u32 native_apic_msr_read(u32 reg)
         * on write for EOI.
         */
        void (*eoi_write)(u32 reg, u32 v);
+       void (*native_eoi_write)(u32 reg, u32 v);
        u64 (*icr_read)(void);
        void (*icr_write)(u32 low, u32 high);
        void (*wait_icr_idle)(void);
 
        for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
                /* Should happen once for each apic */
                WARN_ON((*drv)->eoi_write == eoi_write);
+               (*drv)->native_eoi_write = (*drv)->eoi_write;
                (*drv)->eoi_write = eoi_write;
        }
 }
 
 
 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
 
-static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
+static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
 {
        /**
         * This relies on __test_and_clear_bit to modify the memory
         */
        if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
                return;
-       apic_write(APIC_EOI, APIC_EOI_ACK);
+       apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
 }
 
 static void kvm_guest_cpu_init(void)
 
 
 __visible void smp_reschedule_interrupt(struct pt_regs *regs)
 {
-       irq_enter();
        ack_APIC_irq();
        __smp_reschedule_interrupt();
-       irq_exit();
        /*
         * KVM uses this interrupt to force a cpu out of guest mode
         */