BUG_ON(Page_dcache_dirty(page));
 
-       inc_preempt_count();
+       pagefault_disable();
        idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
 #ifdef CONFIG_MIPS_MT_SMTC
        idx += FIX_N_COLOURS * smp_processor_id() +
        write_c0_entryhi(old_ctx);
        EXIT_CRITICAL(flags);
 #endif
-       dec_preempt_count();
-       preempt_check_resched();
+       pagefault_enable();
 }
 
 void copy_user_highpage(struct page *to, struct page *from,
 
 
 static inline void preempt_conditional_sti(struct pt_regs *regs)
 {
-       inc_preempt_count();
+       preempt_count_inc();
        if (regs->flags & X86_EFLAGS_IF)
                local_irq_enable();
 }
 {
        if (regs->flags & X86_EFLAGS_IF)
                local_irq_disable();
-       dec_preempt_count();
+       preempt_count_dec();
 }
 
 static int __kprobes
 
        return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
 }
 
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+       *preempt_count_ptr() += val;
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+       *preempt_count_ptr() -= val;
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+       return !--*preempt_count_ptr();
+}
+
+/*
+ * Returns true when we need to resched -- even if we can not.
+ */
+static __always_inline bool need_resched(void)
+{
+       return unlikely(test_preempt_need_resched());
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(void)
+{
+       return unlikely(!*preempt_count_ptr());
+}
+
 #endif /* __ASM_PREEMPT_H */
 
 #define __irq_enter()                                  \
        do {                                            \
                account_irq_enter_time(current);        \
-               add_preempt_count(HARDIRQ_OFFSET);      \
+               preempt_count_add(HARDIRQ_OFFSET);      \
                trace_hardirq_enter();                  \
        } while (0)
 
        do {                                            \
                trace_hardirq_exit();                   \
                account_irq_exit_time(current);         \
-               sub_preempt_count(HARDIRQ_OFFSET);      \
+               preempt_count_sub(HARDIRQ_OFFSET);      \
        } while (0)
 
 /*
                lockdep_off();                                  \
                ftrace_nmi_enter();                             \
                BUG_ON(in_nmi());                               \
-               add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+               preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
                rcu_nmi_enter();                                \
                trace_hardirq_enter();                          \
        } while (0)
                trace_hardirq_exit();                           \
                rcu_nmi_exit();                                 \
                BUG_ON(!in_nmi());                              \
-               sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+               preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
                ftrace_nmi_exit();                              \
                lockdep_on();                                   \
        } while (0)
 
 #include <asm/preempt.h>
 
 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
-  extern void add_preempt_count(int val);
-  extern void sub_preempt_count(int val);
+extern void preempt_count_add(int val);
+extern void preempt_count_sub(int val);
+#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
 #else
-# define add_preempt_count(val)        do { *preempt_count_ptr() += (val); } while (0)
-# define sub_preempt_count(val)        do { *preempt_count_ptr() -= (val); } while (0)
+#define preempt_count_add(val) __preempt_count_add(val)
+#define preempt_count_sub(val) __preempt_count_sub(val)
+#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
 #endif
 
-#define inc_preempt_count() add_preempt_count(1)
-#define dec_preempt_count() sub_preempt_count(1)
-
-#ifdef CONFIG_PREEMPT
-
-asmlinkage void preempt_schedule(void);
-
-#define preempt_check_resched() \
-do { \
-       if (unlikely(!*preempt_count_ptr())) \
-               preempt_schedule(); \
-} while (0)
-
-#ifdef CONFIG_CONTEXT_TRACKING
-
-void preempt_schedule_context(void);
-
-#define preempt_check_resched_context() \
-do { \
-       if (unlikely(!*preempt_count_ptr())) \
-               preempt_schedule_context(); \
-} while (0)
-#else
-
-#define preempt_check_resched_context() preempt_check_resched()
-
-#endif /* CONFIG_CONTEXT_TRACKING */
-
-#else /* !CONFIG_PREEMPT */
-
-#define preempt_check_resched()                do { } while (0)
-#define preempt_check_resched_context()        do { } while (0)
-
-#endif /* CONFIG_PREEMPT */
+#define __preempt_count_inc() __preempt_count_add(1)
+#define __preempt_count_dec() __preempt_count_sub(1)
 
+#define preempt_count_inc() preempt_count_add(1)
+#define preempt_count_dec() preempt_count_sub(1)
 
 #ifdef CONFIG_PREEMPT_COUNT
 
 #define preempt_disable() \
 do { \
-       inc_preempt_count(); \
+       preempt_count_inc(); \
        barrier(); \
 } while (0)
 
 #define sched_preempt_enable_no_resched() \
 do { \
        barrier(); \
-       dec_preempt_count(); \
+       preempt_count_dec(); \
 } while (0)
 
-#define preempt_enable_no_resched()    sched_preempt_enable_no_resched()
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
 
+#ifdef CONFIG_PREEMPT
+asmlinkage void preempt_schedule(void);
 #define preempt_enable() \
 do { \
-       preempt_enable_no_resched(); \
-       preempt_check_resched(); \
+       barrier(); \
+       if (unlikely(preempt_count_dec_and_test())) \
+               preempt_schedule(); \
 } while (0)
 
-/* For debugging and tracer internals only! */
-#define add_preempt_count_notrace(val)                 \
-       do { *preempt_count_ptr() += (val); } while (0)
-#define sub_preempt_count_notrace(val)                 \
-       do { *preempt_count_ptr() -= (val); } while (0)
-#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
-#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
+#define preempt_check_resched() \
+do { \
+       if (should_resched()) \
+               preempt_schedule(); \
+} while (0)
+
+#else
+#define preempt_enable() preempt_enable_no_resched()
+#define preempt_check_resched() do { } while (0)
+#endif
 
 #define preempt_disable_notrace() \
 do { \
-       inc_preempt_count_notrace(); \
+       __preempt_count_inc(); \
        barrier(); \
 } while (0)
 
 #define preempt_enable_no_resched_notrace() \
 do { \
        barrier(); \
-       dec_preempt_count_notrace(); \
+       __preempt_count_dec(); \
 } while (0)
 
-/* preempt_check_resched is OK to trace */
+#ifdef CONFIG_PREEMPT
+
+#ifdef CONFIG_CONTEXT_TRACKING
+asmlinkage void preempt_schedule_context(void);
+#else
+#define preempt_schedule_context() preempt_schedule()
+#endif
+
 #define preempt_enable_notrace() \
 do { \
-       preempt_enable_no_resched_notrace(); \
-       preempt_check_resched_context(); \
+       barrier(); \
+       if (unlikely(__preempt_count_dec_and_test())) \
+               preempt_schedule_context(); \
 } while (0)
+#else
+#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
+#endif
 
 #else /* !CONFIG_PREEMPT_COUNT */
 
  * that can cause faults and scheduling migrate into our preempt-protected
  * region.
  */
-#define preempt_disable()              barrier()
+#define preempt_disable()                      barrier()
 #define sched_preempt_enable_no_resched()      barrier()
-#define preempt_enable_no_resched()    barrier()
-#define preempt_enable()               barrier()
+#define preempt_enable_no_resched()            barrier()
+#define preempt_enable()                       barrier()
+#define preempt_check_resched()                        do { } while (0)
 
 #define preempt_disable_notrace()              barrier()
 #define preempt_enable_no_resched_notrace()    barrier()
 
        return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
 }
 
-static inline int need_resched(void)
-{
-       return unlikely(test_preempt_need_resched());
-}
-
 /*
  * cond_resched() and cond_resched_lock(): latency reduction via
  * explicit rescheduling in places that are safe. The return
 
  */
 static inline void pagefault_disable(void)
 {
-       inc_preempt_count();
+       preempt_count_inc();
        /*
         * make sure to have issued the store before a pagefault
         * can hit.
         * the pagefault handler again.
         */
        barrier();
-       dec_preempt_count();
-       /*
-        * make sure we do..
-        */
-       barrier();
+       preempt_count_dec();
        preempt_check_resched();
 }
 
 
  * instead of preempt_schedule() to exit user context if needed before
  * calling the scheduler.
  */
-void __sched notrace preempt_schedule_context(void)
+asmlinkage void __sched notrace preempt_schedule_context(void)
 {
        enum ctx_state prev_ctx;
 
 
 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
                                defined(CONFIG_PREEMPT_TRACER))
 
-void __kprobes add_preempt_count(int val)
+void __kprobes preempt_count_add(int val)
 {
 #ifdef CONFIG_DEBUG_PREEMPT
        /*
        if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
                return;
 #endif
-       add_preempt_count_notrace(val);
+       __preempt_count_add(val);
 #ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Spinlock count overflowing soon?
        if (preempt_count() == val)
                trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 }
-EXPORT_SYMBOL(add_preempt_count);
+EXPORT_SYMBOL(preempt_count_add);
 
-void __kprobes sub_preempt_count(int val)
+void __kprobes preempt_count_sub(int val)
 {
 #ifdef CONFIG_DEBUG_PREEMPT
        /*
 
        if (preempt_count() == val)
                trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
-       sub_preempt_count_notrace(val);
+       __preempt_count_sub(val);
 }
-EXPORT_SYMBOL(sub_preempt_count);
+EXPORT_SYMBOL(preempt_count_sub);
 
 #endif
 
                return;
 
        do {
-               add_preempt_count_notrace(PREEMPT_ACTIVE);
+               __preempt_count_add(PREEMPT_ACTIVE);
                __schedule();
-               sub_preempt_count_notrace(PREEMPT_ACTIVE);
+               __preempt_count_sub(PREEMPT_ACTIVE);
 
                /*
                 * Check again in case we missed a preemption opportunity
        prev_state = exception_enter();
 
        do {
-               add_preempt_count(PREEMPT_ACTIVE);
+               __preempt_count_add(PREEMPT_ACTIVE);
                local_irq_enable();
                __schedule();
                local_irq_disable();
-               sub_preempt_count(PREEMPT_ACTIVE);
+               __preempt_count_sub(PREEMPT_ACTIVE);
 
                /*
                 * Check again in case we missed a preemption opportunity
        return 0;
 }
 
-static inline int should_resched(void)
-{
-       return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
-}
-
 static void __cond_resched(void)
 {
-       add_preempt_count(PREEMPT_ACTIVE);
+       __preempt_count_add(PREEMPT_ACTIVE);
        __schedule();
-       sub_preempt_count(PREEMPT_ACTIVE);
+       __preempt_count_sub(PREEMPT_ACTIVE);
 }
 
 int __sched _cond_resched(void)
 
 
        raw_local_irq_save(flags);
        /*
-        * The preempt tracer hooks into add_preempt_count and will break
+        * The preempt tracer hooks into preempt_count_add and will break
         * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
         * is set and before current->softirq_enabled is cleared.
         * We must manually increment preempt_count here and manually
         * call the trace_preempt_off later.
         */
-       add_preempt_count_notrace(cnt);
+       __preempt_count_add(cnt);
        /*
         * Were softirqs turned off above:
         */
 #else /* !CONFIG_TRACE_IRQFLAGS */
 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
 {
-       add_preempt_count(cnt);
+       preempt_count_add(cnt);
        barrier();
 }
 #endif /* CONFIG_TRACE_IRQFLAGS */
 
        if (softirq_count() == cnt)
                trace_softirqs_on(_RET_IP_);
-       sub_preempt_count(cnt);
+       preempt_count_sub(cnt);
 }
 
 /*
         * Keep preemption disabled until we are done with
         * softirq processing:
         */
-       sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
+       preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
 
        if (unlikely(!in_interrupt() && local_softirq_pending()))
                do_softirq();
 
-       dec_preempt_count();
+       preempt_count_dec();
 #ifdef CONFIG_TRACE_IRQFLAGS
        local_irq_enable();
 #endif
 
        account_irq_exit_time(current);
        trace_hardirq_exit();
-       sub_preempt_count(HARDIRQ_OFFSET);
+       preempt_count_sub(HARDIRQ_OFFSET);
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();