#ifndef _ASM_X86_MWAIT_H
 #define _ASM_X86_MWAIT_H
 
+#include <linux/sched.h>
+
 #define MWAIT_SUBSTATE_MASK            0xf
 #define MWAIT_CSTATE_MASK              0xf
 #define MWAIT_SUBSTATE_SIZE            4
 
 #define MWAIT_ECX_INTERRUPT_BREAK      0x1
 
+static inline void __monitor(const void *eax, unsigned long ecx,
+                            unsigned long edx)
+{
+       /* "monitor %eax, %ecx, %edx;" */
+       asm volatile(".byte 0x0f, 0x01, 0xc8;"
+                    :: "a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+       /* "mwait %eax, %ecx;" */
+       asm volatile(".byte 0x0f, 0x01, 0xc9;"
+                    :: "a" (eax), "c" (ecx));
+}
+
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
+ */
+static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+{
+       if (!current_set_polling_and_test()) {
+               if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
+                       clflush((void *)¤t_thread_info()->flags);
+
+               __monitor((void *)¤t_thread_info()->flags, 0, 0);
+               if (!need_resched())
+                       __mwait(eax, ecx);
+       }
+       __current_clr_polling();
+}
+
 #endif /* _ASM_X86_MWAIT_H */
 
 #endif
 }
 
-static inline void __monitor(const void *eax, unsigned long ecx,
-                            unsigned long edx)
-{
-       /* "monitor %eax, %ecx, %edx;" */
-       asm volatile(".byte 0x0f, 0x01, 0xc8;"
-                    :: "a" (eax), "c" (ecx), "d"(edx));
-}
-
-static inline void __mwait(unsigned long eax, unsigned long ecx)
-{
-       /* "mwait %eax, %ecx;" */
-       asm volatile(".byte 0x0f, 0x01, 0xc9;"
-                    :: "a" (eax), "c" (ecx));
-}
-
-static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
-{
-       trace_hardirqs_on();
-       /* "mwait %eax, %ecx;" */
-       asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
-                    :: "a" (eax), "c" (ecx));
-}
-
 extern void select_idle_routine(const struct cpuinfo_x86 *c);
 extern void init_amd_e400_c1e_mask(void);
 
 
 }
 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
 
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
-       if (!need_resched()) {
-               if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
-                       clflush((void *)¤t_thread_info()->flags);
-
-               __monitor((void *)¤t_thread_info()->flags, 0, 0);
-               smp_mb();
-               if (!need_resched())
-                       __mwait(ax, cx);
-       }
-}
-
 void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
 {
        unsigned int cpu = smp_processor_id();
 
                                        CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
                        stop_critical_timings();
 
-                       __monitor((void *)¤t_thread_info()->flags, 0, 0);
-                       smp_mb();
-                       if (!need_resched())
-                               __mwait(power_saving_mwait_eax, 1);
+                       mwait_idle_with_hints(power_saving_mwait_eax, 1);
 
                        start_critical_timings();
                        if (lapic_marked_unstable)
 
        if (unlikely(!pr))
                return -EINVAL;
 
-       if (cx->entry_method == ACPI_CSTATE_FFH) {
-               if (current_set_polling_and_test())
-                       return -EINVAL;
-       }
-
        lapic_timer_state_broadcast(pr, cx, 1);
        acpi_idle_do_entry(cx);
 
        if (unlikely(!pr))
                return -EINVAL;
 
-       if (cx->entry_method == ACPI_CSTATE_FFH) {
-               if (current_set_polling_and_test())
-                       return -EINVAL;
-       }
-
        /*
         * Must be done before busmaster disable as we might need to
         * access HPET !
                }
        }
 
-       if (cx->entry_method == ACPI_CSTATE_FFH) {
-               if (current_set_polling_and_test())
-                       return -EINVAL;
-       }
-
        acpi_unlazy_tlb(smp_processor_id());
 
        /* Tell the scheduler that we are going deep-idle: */
 
        if (!(lapic_timer_reliable_states & (1 << (cstate))))
                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 
-       if (!current_set_polling_and_test()) {
-
-               if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
-                       clflush((void *)¤t_thread_info()->flags);
-
-               __monitor((void *)¤t_thread_info()->flags, 0, 0);
-               smp_mb();
-               if (!need_resched())
-                       __mwait(eax, ecx);
-       }
+       mwait_idle_with_hints(eax, ecx);
 
        if (!(lapic_timer_reliable_states & (1 << (cstate))))
                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
 
                         */
                        local_touch_nmi();
                        stop_critical_timings();
-                       __monitor((void *)¤t_thread_info()->flags, 0, 0);
-                       cpu_relax(); /* allow HT sibling to run */
-                       __mwait(eax, ecx);
+                       mwait_idle_with_hints(eax, ecx);
                        start_critical_timings();
                        atomic_inc(&idle_wakeup_counter);
                }