*/
                if (interrupts == MAX_INTERRUPTS) {
                        perf_log_throttle(event, 1);
+                       perf_disable();
                        event->pmu->unthrottle(event);
+                       perf_enable();
                }
 
                if (!event->attr.freq || !event->attr.sample_freq)
                        continue;
 
+               perf_disable();
                event->pmu->read(event);
                now = atomic64_read(&event->count);
                delta = now - hwc->freq_count_stamp;
 
                if (delta > 0)
                        perf_adjust_period(event, TICK_NSEC, delta);
+               perf_enable();
        }
        raw_spin_unlock(&ctx->lock);
 }
  */
 static void rotate_ctx(struct perf_event_context *ctx)
 {
-       if (!ctx->nr_events)
-               return;
-
        raw_spin_lock(&ctx->lock);
 
        /* Rotate the first entry last of non-pinned groups */
 {
        struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
+       int rotate = 0;
 
        if (!atomic_read(&nr_events))
                return;
 
        cpuctx = &__get_cpu_var(perf_cpu_context);
-       ctx = curr->perf_event_ctxp;
+       if (cpuctx->ctx.nr_events &&
+           cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
+               rotate = 1;
 
-       perf_disable();
+       ctx = curr->perf_event_ctxp;
+       if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
+               rotate = 1;
 
        perf_ctx_adjust_freq(&cpuctx->ctx);
        if (ctx)
                perf_ctx_adjust_freq(ctx);
 
+       if (!rotate)
+               return;
+
+       perf_disable();
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
        cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_in(curr, EVENT_FLEXIBLE);
-
        perf_enable();
 }