if (ctx->task && cpuctx->task_ctx != ctx)
                return;
 
-       curr_rq_lock_irq_save(&flags);
-       spin_lock(&ctx->lock);
+       spin_lock_irqsave(&ctx->lock, flags);
 
        counter_sched_out(counter, cpuctx, ctx);
 
                            perf_max_counters - perf_reserved_percpu);
        }
 
-       spin_unlock(&ctx->lock);
-       curr_rq_unlock_irq_restore(&flags);
+       spin_unlock_irqrestore(&ctx->lock, flags);
 }
 
 
        if (ctx->task && cpuctx->task_ctx != ctx)
                return;
 
-       curr_rq_lock_irq_save(&flags);
-       spin_lock(&ctx->lock);
+       spin_lock_irqsave(&ctx->lock, flags);
 
        /*
         * If the counter is on, turn it off.
                counter->state = PERF_COUNTER_STATE_OFF;
        }
 
-       spin_unlock(&ctx->lock);
-       curr_rq_unlock_irq_restore(&flags);
+       spin_unlock_irqrestore(&ctx->lock, flags);
 }
 
 /*
        if (ctx->task && cpuctx->task_ctx != ctx)
                return;
 
-       curr_rq_lock_irq_save(&flags);
-       spin_lock(&ctx->lock);
+       spin_lock_irqsave(&ctx->lock, flags);
        update_context_time(ctx);
 
        /*
  unlock:
        hw_perf_restore(perf_flags);
 
-       spin_unlock(&ctx->lock);
-       curr_rq_unlock_irq_restore(&flags);
+       spin_unlock_irqrestore(&ctx->lock, flags);
 }
 
 /*
        if (ctx->task && cpuctx->task_ctx != ctx)
                return;
 
-       curr_rq_lock_irq_save(&flags);
-       spin_lock(&ctx->lock);
+       spin_lock_irqsave(&ctx->lock, flags);
        update_context_time(ctx);
 
        counter->prev_state = counter->state;
        }
 
  unlock:
-       spin_unlock(&ctx->lock);
-       curr_rq_unlock_irq_restore(&flags);
+       spin_unlock_irqrestore(&ctx->lock, flags);
 }
 
 /*
        if (likely(!ctx->nr_counters))
                return 0;
 
-       curr_rq_lock_irq_save(&flags);
+       local_irq_save(flags);
        cpu = smp_processor_id();
 
        perf_counter_task_sched_out(curr, cpu);
 
        hw_perf_restore(perf_flags);
 
-       spin_unlock(&ctx->lock);
-
-       curr_rq_unlock_irq_restore(&flags);
+       spin_unlock_irqrestore(&ctx->lock, flags);
 
        return 0;
 }
        if (likely(!ctx->nr_counters))
                return 0;
 
-       curr_rq_lock_irq_save(&flags);
+       local_irq_save(flags);
        cpu = smp_processor_id();
 
        perf_counter_task_sched_out(curr, cpu);
 
        perf_counter_task_sched_in(curr, cpu);
 
-       curr_rq_unlock_irq_restore(&flags);
+       local_irq_restore(flags);
 
        return 0;
 }
        struct perf_counter_context *ctx = counter->ctx;
        unsigned long flags;
 
-       curr_rq_lock_irq_save(&flags);
+       local_irq_save(flags);
        if (ctx->is_active)
                update_context_time(ctx);
        counter->hw_ops->read(counter);
        update_counter_times(counter);
-       curr_rq_unlock_irq_restore(&flags);
+       local_irq_restore(flags);
 }
 
 static u64 perf_counter_read(struct perf_counter *counter)
                 * Be careful about zapping the list - IRQ/NMI context
                 * could still be processing it:
                 */
-               curr_rq_lock_irq_save(&flags);
+               local_irq_save(flags);
                perf_flags = hw_perf_save_disable();
 
                cpuctx = &__get_cpu_var(perf_cpu_context);
                child_ctx->nr_counters--;
 
                hw_perf_restore(perf_flags);
-               curr_rq_unlock_irq_restore(&flags);
+               local_irq_restore(flags);
        }
 
        parent_counter = child_counter->parent;