static DEFINE_PER_CPU(int, rcu_dyntick_drain);
 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
-static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
-static ktime_t rcu_idle_gp_wait;       /* If some non-lazy callbacks. */
-static ktime_t rcu_idle_lazy_gp_wait;  /* If only lazy callbacks. */
+static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
 
 /*
  * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
  * real work is done upon re-entry to idle, or by the next scheduling-clock
  * interrupt should idle not be re-entered.
  */
-static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
+static void rcu_idle_gp_timer_func(unsigned long unused)
 {
        trace_rcu_prep_idle("Timer");
-       return HRTIMER_NORESTART;
 }
 
 /*
  */
 static void rcu_prepare_for_idle_init(int cpu)
 {
-       static int firsttime = 1;
-       struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
-
-       hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       hrtp->function = rcu_idle_gp_timer_func;
-       if (firsttime) {
-               unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
-
-               rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
-               upj = jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY);
-               rcu_idle_lazy_gp_wait = ns_to_ktime(upj * (u64)1000);
-               firsttime = 0;
-       }
+       setup_timer(&per_cpu(rcu_idle_gp_timer, cpu),
+                   rcu_idle_gp_timer_func, 0);
 }
 
 /*
  */
 static void rcu_cleanup_after_idle(int cpu)
 {
-       hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
+       del_timer(&per_cpu(rcu_idle_gp_timer, cpu));
        trace_rcu_prep_idle("Cleanup after idle");
 }
 
                per_cpu(rcu_dyntick_drain, cpu) = 0;
                per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
                if (rcu_cpu_has_nonlazy_callbacks(cpu))
-                       hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
-                                     rcu_idle_gp_wait, HRTIMER_MODE_REL);
+                       mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
+                                          jiffies + RCU_IDLE_GP_DELAY);
                else
-                       hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
-                                     rcu_idle_lazy_gp_wait, HRTIMER_MODE_REL);
+                       mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
+                                          jiffies + RCU_IDLE_LAZY_GP_DELAY);
                return; /* Nothing more to do immediately. */
        } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
                /* We have hit the limit, so time to give up. */
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
-       struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
+       struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu);
 
-       sprintf(cp, "drain=%d %c timer=%lld",
+       sprintf(cp, "drain=%d %c timer=%lu",
                per_cpu(rcu_dyntick_drain, cpu),
                per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
-               hrtimer_active(hrtp)
-                       ? ktime_to_us(hrtimer_get_remaining(hrtp))
-                       : -1);
+               timer_pending(tltp) ? tltp->expires - jiffies : -1);
 }
 
 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */