static DEFINE_PER_CPU(int, perf_throttled_count);
 static DEFINE_PER_CPU(u64, perf_throttled_seq);
 
-static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
+static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
 {
        struct hw_perf_event *hwc = &event->hw;
        s64 period, sample_period;
        hwc->sample_period = sample_period;
 
        if (local64_read(&hwc->period_left) > 8*sample_period) {
-               event->pmu->stop(event, PERF_EF_UPDATE);
+               if (disable)
+                       event->pmu->stop(event, PERF_EF_UPDATE);
+
                local64_set(&hwc->period_left, 0);
-               event->pmu->start(event, PERF_EF_RELOAD);
+
+               if (disable)
+                       event->pmu->start(event, PERF_EF_RELOAD);
        }
 }
 
                return;
 
        raw_spin_lock(&ctx->lock);
+       perf_pmu_disable(ctx->pmu);
 
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
                if (event->state != PERF_EVENT_STATE_ACTIVE)
                /*
                 * restart the event
                 * reload only if value has changed
+                * we have stopped the event so tell that
+                * to perf_adjust_period() to avoid stopping it
+                * twice.
                 */
                if (delta > 0)
-                       perf_adjust_period(event, period, delta);
+                       perf_adjust_period(event, period, delta, false);
 
                event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
        }
 
+       perf_pmu_enable(ctx->pmu);
        raw_spin_unlock(&ctx->lock);
 }
 
                hwc->freq_time_stamp = now;
 
                if (delta > 0 && delta < 2*TICK_NSEC)
-                       perf_adjust_period(event, delta, hwc->last_period);
+                       perf_adjust_period(event, delta, hwc->last_period, true);
        }
 
        /*