struct { /* breakpoint */
                        struct arch_hw_breakpoint       info;
                        struct list_head                bp_list;
+                       /*
+                        * Crufty hack to avoid the chicken and egg
+                        * problem hw_breakpoint has with context
+                        * creation and event initalization.
+                        */
+                       struct task_struct              *bp_target;
                };
 #endif
        };
 
 #define PERF_ATTACH_CONTEXT    0x01
 #define PERF_ATTACH_GROUP      0x02
+#define PERF_ATTACH_TASK       0x04
 
 /**
  * struct perf_event - performance event kernel representation:
 
  */
 static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
 {
-       struct perf_event_context *ctx = bp->ctx;
+       struct task_struct *tsk = bp->hw.bp_target;
        struct perf_event *iter;
        int count = 0;
 
        list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
-               if (iter->ctx == ctx && find_slot_idx(iter) == type)
+               if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
                        count += hw_breakpoint_weight(iter);
        }
 
                    enum bp_type_idx type)
 {
        int cpu = bp->cpu;
-       struct task_struct *tsk = bp->ctx->task;
+       struct task_struct *tsk = bp->hw.bp_target;
 
        if (cpu >= 0) {
                slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
               int weight)
 {
        int cpu = bp->cpu;
-       struct task_struct *tsk = bp->ctx->task;
+       struct task_struct *tsk = bp->hw.bp_target;
 
        /* Pinned counter cpu profiling */
        if (!tsk) {
 
  */
 static struct perf_event *
 perf_event_alloc(struct perf_event_attr *attr, int cpu,
-                  struct perf_event *group_leader,
-                  struct perf_event *parent_event,
-                  perf_overflow_handler_t overflow_handler)
+                struct task_struct *task,
+                struct perf_event *group_leader,
+                struct perf_event *parent_event,
+                perf_overflow_handler_t overflow_handler)
 {
        struct pmu *pmu;
        struct perf_event *event;
 
        event->state            = PERF_EVENT_STATE_INACTIVE;
 
+       if (task) {
+               event->attach_state = PERF_ATTACH_TASK;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+               /*
+                * hw_breakpoint is a bit difficult here..
+                */
+               if (attr->type == PERF_TYPE_BREAKPOINT)
+                       event->hw.bp_target = task;
+#endif
+       }
+
        if (!overflow_handler && parent_event)
                overflow_handler = parent_event->overflow_handler;
        
                }
        }
 
-       event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL);
+       event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
        if (IS_ERR(event)) {
                err = PTR_ERR(event);
                goto err_task;
         * Get the target context (task or percpu):
         */
 
-       event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler);
+       event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
        if (IS_ERR(event)) {
                err = PTR_ERR(event);
                goto err;
 
        child_event = perf_event_alloc(&parent_event->attr,
                                           parent_event->cpu,
+                                          child,
                                           group_leader, parent_event,
                                           NULL);
        if (IS_ERR(child_event))