else
                set_dumpable(current->mm, suid_dumpable);
 
+       perf_event_exec();
        set_task_comm(current, kbasename(bprm->filename));
 
        /* Set the new mm task size. We have to do that late because it may
 
 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
 
+extern void perf_event_exec(void);
 extern void perf_event_comm(struct task_struct *tsk);
 extern void perf_event_fork(struct task_struct *tsk);
 
 extern void perf_event_disable(struct perf_event *event);
 extern int __perf_event_disable(void *info);
 extern void perf_event_task_tick(void);
-#else
+#else /* !CONFIG_PERF_EVENTS: */
 static inline void
 perf_event_task_sched_in(struct task_struct *prev,
                         struct task_struct *task)                      { }
 (struct perf_guest_info_callbacks *callbacks)                          { return 0; }
 
 static inline void perf_event_mmap(struct vm_area_struct *vma)         { }
+static inline void perf_event_exec(void)                               { }
 static inline void perf_event_comm(struct task_struct *tsk)            { }
 static inline void perf_event_fork(struct task_struct *tsk)            { }
 static inline void perf_event_init(void)                               { }
 
        local_irq_restore(flags);
 }
 
+void perf_event_exec(void)
+{
+       struct perf_event_context *ctx;
+       int ctxn;
+
+       rcu_read_lock();
+       for_each_task_context_nr(ctxn) {
+               ctx = current->perf_event_ctxp[ctxn];
+               if (!ctx)
+                       continue;
+
+               perf_event_enable_on_exec(ctx);
+       }
+       rcu_read_unlock();
+}
+
 /*
  * Cross CPU call to read the hardware event
  */
 void perf_event_comm(struct task_struct *task)
 {
        struct perf_comm_event comm_event;
-       struct perf_event_context *ctx;
-       int ctxn;
-
-       rcu_read_lock();
-       for_each_task_context_nr(ctxn) {
-               ctx = task->perf_event_ctxp[ctxn];
-               if (!ctx)
-                       continue;
-
-               perf_event_enable_on_exec(ctx);
-       }
-       rcu_read_unlock();
 
        if (!atomic_read(&nr_comm_events))
                return;