#endif
 
 #endif
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+extern ktime_t clockevents_get_next_event(int cpu);
+#else
+static inline ktime_t clockevents_get_next_event(int cpu)
+{
+       return (ktime_t) { .tv64 = KTIME_MAX };
+}
+#endif
 
 extern cpumask_var_t nohz_cpu_mask;
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
 extern int select_nohz_load_balancer(int cpu);
+extern int get_nohz_load_balancer(void);
 #else
 static inline int select_nohz_load_balancer(int cpu)
 {
                struct file *file, void __user *buffer, size_t *length,
                loff_t *ppos);
 #endif
+#ifdef CONFIG_SCHED_DEBUG
+static inline unsigned int get_sysctl_timer_migration(void)
+{
+       return sysctl_timer_migration;
+}
+#else
+static inline unsigned int get_sysctl_timer_migration(void)
+{
+       return 1;
+}
+#endif
 extern unsigned int sysctl_sched_rt_period;
 extern int sysctl_sched_rt_runtime;
 
 
 #include <linux/seq_file.h>
 #include <linux/err.h>
 #include <linux/debugobjects.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
 
 #include <asm/uaccess.h>
 
 {
        struct hrtimer_clock_base *new_base;
        struct hrtimer_cpu_base *new_cpu_base;
+       int cpu, preferred_cpu = -1;
+
+       cpu = smp_processor_id();
+#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
+       if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) {
+               preferred_cpu = get_nohz_load_balancer();
+               if (preferred_cpu >= 0)
+                       cpu = preferred_cpu;
+       }
+#endif
 
-       new_cpu_base = &__get_cpu_var(hrtimer_bases);
+again:
+       new_cpu_base = &per_cpu(hrtimer_bases, cpu);
        new_base = &new_cpu_base->clock_base[base->index];
 
        if (base != new_base) {
                timer->base = NULL;
                spin_unlock(&base->cpu_base->lock);
                spin_lock(&new_base->cpu_base->lock);
+
+               /* Optimized away for NOHZ=n SMP=n */
+               if (cpu == preferred_cpu) {
+                       /* Calculate clock monotonic expiry time */
+#ifdef CONFIG_HIGH_RES_TIMERS
+                       ktime_t expires = ktime_sub(hrtimer_get_expires(timer),
+                                                       new_base->offset);
+#else
+                       ktime_t expires = hrtimer_get_expires(timer);
+#endif
+
+                       /*
+                        * Get the next event on target cpu from the
+                        * clock events layer.
+                        * This covers the highres=off nohz=on case as well.
+                        */
+                       ktime_t next = clockevents_get_next_event(cpu);
+
+                       ktime_t delta = ktime_sub(expires, next);
+
+                       /*
+                        * We do not migrate the timer when it is expiring
+                        * before the next event on the target cpu because
+                        * we cannot reprogram the target cpu hardware and
+                        * we would cause it to fire late.
+                        */
+                       if (delta.tv64 < 0) {
+                               cpu = smp_processor_id();
+                               spin_unlock(&new_base->cpu_base->lock);
+                               spin_lock(&base->cpu_base->lock);
+                               timer->base = base;
+                               goto again;
+                       }
+               }
                timer->base = new_base;
        }
        return new_base;
        return base;
 }
 
-# define switch_hrtimer_base(t, b)     (b)
+# define switch_hrtimer_base(t, b, p)  (b)
 
 #endif /* !CONFIG_SMP */
 
 
        .load_balancer = ATOMIC_INIT(-1),
 };
 
+int get_nohz_load_balancer(void)
+{
+       return atomic_read(&nohz.load_balancer);
+}
+
 /*
  * This routine will try to nominate the ilb (idle load balancing)
  * owner among the cpus whose ticks are stopped. ilb owner will do the idle
 
 #include <linux/notifier.h>
 #include <linux/smp.h>
 #include <linux/sysdev.h>
+#include <linux/tick.h>
 
 /* The registered clock event devices */
 static LIST_HEAD(clockevent_devices);
        spin_unlock(&clockevents_lock);
 }
 EXPORT_SYMBOL_GPL(clockevents_notify);
+
+ktime_t clockevents_get_next_event(int cpu)
+{
+       struct tick_device *td;
+       struct clock_event_device *dev;
+
+       td = &per_cpu(tick_cpu_device, cpu);
+       dev = td->evtdev;
+
+       return dev->next_event;
+}
 #endif
 
 #include <linux/delay.h>
 #include <linux/tick.h>
 #include <linux/kallsyms.h>
+#include <linux/sched.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 {
        struct tvec_base *base, *new_base;
        unsigned long flags;
-       int ret;
-
-       ret = 0;
+       int ret = 0 , cpu;
 
        timer_stats_timer_set_start_info(timer);
        BUG_ON(!timer->function);
 
        new_base = __get_cpu_var(tvec_bases);
 
+       cpu = smp_processor_id();
+
+#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
+       if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) {
+               int preferred_cpu = get_nohz_load_balancer();
+
+               if (preferred_cpu >= 0)
+                       cpu = preferred_cpu;
+       }
+#endif
+       new_base = per_cpu(tvec_bases, cpu);
+
        if (base != new_base) {
                /*
                 * We are trying to schedule the timer on the local CPU.