return rotate;
 }
 
-#ifdef CONFIG_NO_HZ_FULL
-bool perf_event_can_stop_tick(void)
-{
-       if (atomic_read(&nr_freq_events) ||
-           __this_cpu_read(perf_throttled_count))
-               return false;
-       else
-               return true;
-}
-#endif
-
 void perf_event_task_tick(void)
 {
        struct list_head *head = this_cpu_ptr(&active_ctx_list);
 
        __this_cpu_inc(perf_throttled_seq);
        throttled = __this_cpu_xchg(perf_throttled_count, 0);
+       tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
 
        list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
                perf_adjust_freq_unthr_context(ctx, throttled);
                atomic_dec(&per_cpu(perf_cgroup_events, cpu));
 }
 
+#ifdef CONFIG_NO_HZ_FULL
+static DEFINE_SPINLOCK(nr_freq_lock);
+#endif
+
+static void unaccount_freq_event_nohz(void)
+{
+#ifdef CONFIG_NO_HZ_FULL
+       spin_lock(&nr_freq_lock);
+       if (atomic_dec_and_test(&nr_freq_events))
+               tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
+       spin_unlock(&nr_freq_lock);
+#endif
+}
+
+static void unaccount_freq_event(void)
+{
+       if (tick_nohz_full_enabled())
+               unaccount_freq_event_nohz();
+       else
+               atomic_dec(&nr_freq_events);
+}
+
 static void unaccount_event(struct perf_event *event)
 {
        bool dec = false;
        if (event->attr.task)
                atomic_dec(&nr_task_events);
        if (event->attr.freq)
-               atomic_dec(&nr_freq_events);
+               unaccount_freq_event();
        if (event->attr.context_switch) {
                dec = true;
                atomic_dec(&nr_switch_events);
                if (unlikely(throttle
                             && hwc->interrupts >= max_samples_per_tick)) {
                        __this_cpu_inc(perf_throttled_count);
+                       tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
                        hwc->interrupts = MAX_INTERRUPTS;
                        perf_log_throttle(event, 0);
-                       tick_nohz_full_kick();
                        ret = 1;
                }
        }
                atomic_inc(&per_cpu(perf_cgroup_events, cpu));
 }
 
+/* Freq events need the tick to stay alive (see perf_event_task_tick). */
+static void account_freq_event_nohz(void)
+{
+#ifdef CONFIG_NO_HZ_FULL
+       /* Lock so we don't race with concurrent unaccount */
+       spin_lock(&nr_freq_lock);
+       if (atomic_inc_return(&nr_freq_events) == 1)
+               tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
+       spin_unlock(&nr_freq_lock);
+#endif
+}
+
+static void account_freq_event(void)
+{
+       if (tick_nohz_full_enabled())
+               account_freq_event_nohz();
+       else
+               atomic_inc(&nr_freq_events);
+}
+
+
 static void account_event(struct perf_event *event)
 {
        bool inc = false;
                atomic_inc(&nr_comm_events);
        if (event->attr.task)
                atomic_inc(&nr_task_events);
-       if (event->attr.freq) {
-               if (atomic_inc_return(&nr_freq_events) == 1)
-                       tick_nohz_full_kick_all();
-       }
+       if (event->attr.freq)
+               account_freq_event();
        if (event->attr.context_switch) {
                atomic_inc(&nr_switch_events);
                inc = true;
 
 #include <linux/module.h>
 #include <linux/irq_work.h>
 #include <linux/posix-timers.h>
-#include <linux/perf_event.h>
 #include <linux/context_tracking.h>
 
 #include <asm/irq_regs.h>
                return false;
        }
 
-       if (!perf_event_can_stop_tick()) {
-               trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
-               return false;
-       }
-
 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
        /*
         * sched_clock_tick() needs us?
  * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
  * is NMI safe.
  */
-void tick_nohz_full_kick(void)
+static void tick_nohz_full_kick(void)
 {
        if (!tick_nohz_full_cpu(smp_processor_id()))
                return;