struct perf_event_context *ctx;
 
        rcu_read_lock();
- retry:
+retry:
        ctx = rcu_dereference(task->perf_event_ctxp);
        if (ctx) {
                /*
                return;
        }
 
- retry:
+retry:
        task_oncpu_function_call(task, __perf_event_disable, event);
 
        raw_spin_lock_irq(&ctx->lock);
        if (!err && !ctx->task && cpuctx->max_pertask)
                cpuctx->max_pertask--;
 
- unlock:
+unlock:
        perf_enable();
 
        raw_spin_unlock(&ctx->lock);
 
        event->state = PERF_EVENT_STATE_INACTIVE;
        event->tstamp_enabled = ctx->time - event->total_time_enabled;
-       list_for_each_entry(sub, &event->sibling_list, group_entry)
-               if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+       list_for_each_entry(sub, &event->sibling_list, group_entry) {
+               if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
                        sub->tstamp_enabled =
                                ctx->time - sub->total_time_enabled;
+               }
+       }
 }
 
 /*
                }
        }
 
- unlock:
+unlock:
        raw_spin_unlock(&ctx->lock);
 }
 
        if (event->state == PERF_EVENT_STATE_ERROR)
                event->state = PERF_EVENT_STATE_OFF;
 
- retry:
+retry:
        raw_spin_unlock_irq(&ctx->lock);
        task_oncpu_function_call(task, __perf_event_enable, event);
 
        if (event->state == PERF_EVENT_STATE_OFF)
                __perf_event_mark_enabled(event, ctx);
 
- out:
+out:
        raw_spin_unlock_irq(&ctx->lock);
 }
 
        if (!ctx->nr_active)
                goto out_enable;
 
-       if (event_type & EVENT_PINNED)
+       if (event_type & EVENT_PINNED) {
                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
+       }
 
-       if (event_type & EVENT_FLEXIBLE)
+       if (event_type & EVENT_FLEXIBLE) {
                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
+       }
 
  out_enable:
        perf_enable();
- out:
+out:
        raw_spin_unlock(&ctx->lock);
 }
 
                if (event->cpu != -1 && event->cpu != smp_processor_id())
                        continue;
 
-               if (group_can_go_on(event, cpuctx, can_add_hw))
+               if (group_can_go_on(event, cpuctx, can_add_hw)) {
                        if (group_sched_in(event, cpuctx, ctx))
                                can_add_hw = 0;
+               }
        }
 }
 
                ctx_flexible_sched_in(ctx, cpuctx);
 
        perf_enable();
- out:
+out:
        raw_spin_unlock(&ctx->lock);
 }
 
        raw_spin_unlock(&ctx->lock);
 
        perf_event_task_sched_in(task);
- out:
+out:
        local_irq_restore(flags);
 }
 
        if (!ptrace_may_access(task, PTRACE_MODE_READ))
                goto errout;
 
- retry:
+retry:
        ctx = perf_lock_task_context(task, &flags);
        if (ctx) {
                unclone_ctx(ctx);
        put_task_struct(task);
        return ctx;
 
- errout:
+errout:
        put_task_struct(task);
        return ERR_PTR(err);
 }
        if (handle->wakeup != local_read(&buffer->wakeup))
                perf_output_wakeup(handle);
 
- out:
+out:
        preempt_enable();
 }
 
                rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
        }
        cpuctx->hlist_refcount++;
- exit:
+exit:
        mutex_unlock(&cpuctx->hlist_mutex);
 
        return err;
        put_online_cpus();
 
        return 0;
- fail:
+fail:
        for_each_possible_cpu(cpu) {
                if (cpu == failed_cpu)
                        break;