{
        struct trace_array *tr = trace_file->tr;
        struct trace_array_cpu *data;
+       struct trace_pid_list *no_pid_list;
        struct trace_pid_list *pid_list;
 
        pid_list = rcu_dereference_raw(tr->filtered_pids);
-       if (!pid_list)
+       no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
+
+       if (!pid_list && !no_pid_list)
                return false;
 
        data = this_cpu_ptr(tr->array_buffer.data);
 
        pid_list = rcu_dereference_raw(tr->filtered_pids);
        trace_filter_add_remove_task(pid_list, NULL, task);
+
+       pid_list = rcu_dereference_raw(tr->filtered_no_pids);
+       trace_filter_add_remove_task(pid_list, NULL, task);
 }
 
 static void
 
        pid_list = rcu_dereference_sched(tr->filtered_pids);
        trace_filter_add_remove_task(pid_list, self, task);
+
+       pid_list = rcu_dereference_sched(tr->filtered_no_pids);
+       trace_filter_add_remove_task(pid_list, self, task);
 }
 
 void trace_event_follow_fork(struct trace_array *tr, bool enable)
                    struct task_struct *prev, struct task_struct *next)
 {
        struct trace_array *tr = data;
+       struct trace_pid_list *no_pid_list;
        struct trace_pid_list *pid_list;
+       bool ret;
 
        pid_list = rcu_dereference_sched(tr->filtered_pids);
+       no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
 
-       this_cpu_write(tr->array_buffer.data->ignore_pid,
-                      trace_ignore_this_task(pid_list, NULL, prev) &&
-                      trace_ignore_this_task(pid_list, NULL, next));
+       /*
+        * Sched switch is funny, as we only want to ignore it
+        * in the notrace case if both prev and next should be ignored.
+        */
+       ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
+               trace_ignore_this_task(NULL, no_pid_list, next);
+
+       this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
+                      (trace_ignore_this_task(pid_list, NULL, prev) &&
+                       trace_ignore_this_task(pid_list, NULL, next)));
 }
 
 static void
                    struct task_struct *prev, struct task_struct *next)
 {
        struct trace_array *tr = data;
+       struct trace_pid_list *no_pid_list;
        struct trace_pid_list *pid_list;
 
        pid_list = rcu_dereference_sched(tr->filtered_pids);
+       no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
 
        this_cpu_write(tr->array_buffer.data->ignore_pid,
-                      trace_ignore_this_task(pid_list, NULL, next));
+                      trace_ignore_this_task(pid_list, no_pid_list, next));
 }
 
 static void
 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
 {
        struct trace_array *tr = data;
+       struct trace_pid_list *no_pid_list;
        struct trace_pid_list *pid_list;
 
        /* Nothing to do if we are already tracing */
                return;
 
        pid_list = rcu_dereference_sched(tr->filtered_pids);
+       no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
 
        this_cpu_write(tr->array_buffer.data->ignore_pid,
-                      trace_ignore_this_task(pid_list, NULL, task));
+                      trace_ignore_this_task(pid_list, no_pid_list, task));
 }
 
 static void
 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
 {
        struct trace_array *tr = data;
+       struct trace_pid_list *no_pid_list;
        struct trace_pid_list *pid_list;
 
        /* Nothing to do if we are not tracing */
                return;
 
        pid_list = rcu_dereference_sched(tr->filtered_pids);
+       no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
 
        /* Set tracing if current is enabled */
        this_cpu_write(tr->array_buffer.data->ignore_pid,
-                      trace_ignore_this_task(pid_list, NULL, current));
+                      trace_ignore_this_task(pid_list, no_pid_list, current));
 }
 
-static void __ftrace_clear_event_pids(struct trace_array *tr)
+static void unregister_pid_events(struct trace_array *tr)
 {
-       struct trace_pid_list *pid_list;
-       struct trace_event_file *file;
-       int cpu;
-
-       pid_list = rcu_dereference_protected(tr->filtered_pids,
-                                            lockdep_is_held(&event_mutex));
-       if (!pid_list)
-               return;
-
        unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
        unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
 
 
        unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
        unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
+}
 
-       list_for_each_entry(file, &tr->events, list) {
-               clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
+{
+       struct trace_pid_list *pid_list;
+       struct trace_pid_list *no_pid_list;
+       struct trace_event_file *file;
+       int cpu;
+
+       pid_list = rcu_dereference_protected(tr->filtered_pids,
+                                            lockdep_is_held(&event_mutex));
+       no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
+                                            lockdep_is_held(&event_mutex));
+
+       /* Make sure there's something to do */
+       if (!pid_type_enabled(type, pid_list, no_pid_list))
+               return;
+
+       if (!still_need_pid_events(type, pid_list, no_pid_list)) {
+               unregister_pid_events(tr);
+
+               list_for_each_entry(file, &tr->events, list) {
+                       clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+               }
+
+               for_each_possible_cpu(cpu)
+                       per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
        }
 
-       for_each_possible_cpu(cpu)
-               per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
+       if (type & TRACE_PIDS)
+               rcu_assign_pointer(tr->filtered_pids, NULL);
 
-       rcu_assign_pointer(tr->filtered_pids, NULL);
+       if (type & TRACE_NO_PIDS)
+               rcu_assign_pointer(tr->filtered_no_pids, NULL);
 
        /* Wait till all users are no longer using pid filtering */
        tracepoint_synchronize_unregister();
 
-       trace_free_pid_list(pid_list);
+       if ((type & TRACE_PIDS) && pid_list)
+               trace_free_pid_list(pid_list);
+
+       if ((type & TRACE_NO_PIDS) && no_pid_list)
+               trace_free_pid_list(no_pid_list);
 }
 
-static void ftrace_clear_event_pids(struct trace_array *tr)
+static void ftrace_clear_event_pids(struct trace_array *tr, int type)
 {
        mutex_lock(&event_mutex);
-       __ftrace_clear_event_pids(tr);
+       __ftrace_clear_event_pids(tr, type);
        mutex_unlock(&event_mutex);
 }
 
 }
 
 static void *
-p_next(struct seq_file *m, void *v, loff_t *pos)
+__next(struct seq_file *m, void *v, loff_t *pos, int type)
 {
        struct trace_array *tr = m->private;
-       struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
+       struct trace_pid_list *pid_list;
+
+       if (type == TRACE_PIDS)
+               pid_list = rcu_dereference_sched(tr->filtered_pids);
+       else
+               pid_list = rcu_dereference_sched(tr->filtered_no_pids);
 
        return trace_pid_next(pid_list, v, pos);
 }
 
-static void *p_start(struct seq_file *m, loff_t *pos)
+static void *
+p_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       return __next(m, v, pos, TRACE_PIDS);
+}
+
+static void *
+np_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       return __next(m, v, pos, TRACE_NO_PIDS);
+}
+
+static void *__start(struct seq_file *m, loff_t *pos, int type)
        __acquires(RCU)
 {
        struct trace_pid_list *pid_list;
        mutex_lock(&event_mutex);
        rcu_read_lock_sched();
 
-       pid_list = rcu_dereference_sched(tr->filtered_pids);
+       if (type == TRACE_PIDS)
+               pid_list = rcu_dereference_sched(tr->filtered_pids);
+       else
+               pid_list = rcu_dereference_sched(tr->filtered_no_pids);
 
        if (!pid_list)
                return NULL;
        return trace_pid_start(pid_list, pos);
 }
 
+static void *p_start(struct seq_file *m, loff_t *pos)
+       __acquires(RCU)
+{
+       return __start(m, pos, TRACE_PIDS);
+}
+
+static void *np_start(struct seq_file *m, loff_t *pos)
+       __acquires(RCU)
+{
+       return __start(m, pos, TRACE_NO_PIDS);
+}
+
 static void p_stop(struct seq_file *m, void *p)
        __releases(RCU)
 {
 {
        struct trace_array *tr = data;
        struct trace_pid_list *pid_list;
+       struct trace_pid_list *no_pid_list;
 
        /*
         * This function is called by on_each_cpu() while the
         */
        pid_list = rcu_dereference_protected(tr->filtered_pids,
                                             mutex_is_locked(&event_mutex));
+       no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
+                                            mutex_is_locked(&event_mutex));
 
        this_cpu_write(tr->array_buffer.data->ignore_pid,
-                      trace_ignore_this_task(pid_list, NULL, current));
+                      trace_ignore_this_task(pid_list, no_pid_list, current));
+}
+
+static void register_pid_events(struct trace_array *tr)
+{
+       /*
+        * Register a probe that is called before all other probes
+        * to set ignore_pid if next or prev do not match.
+        * Register a probe this is called after all other probes
+        * to only keep ignore_pid set if next pid matches.
+        */
+       register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
+                                        tr, INT_MAX);
+       register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
+                                        tr, 0);
+
+       register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
+                                        tr, INT_MAX);
+       register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
+                                        tr, 0);
+
+       register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
+                                            tr, INT_MAX);
+       register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
+                                            tr, 0);
+
+       register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
+                                        tr, INT_MAX);
+       register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
+                                        tr, 0);
 }
 
 static ssize_t
-ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
-                      size_t cnt, loff_t *ppos)
+event_pid_write(struct file *filp, const char __user *ubuf,
+               size_t cnt, loff_t *ppos, int type)
 {
        struct seq_file *m = filp->private_data;
        struct trace_array *tr = m->private;
        struct trace_pid_list *filtered_pids = NULL;
+       struct trace_pid_list *other_pids = NULL;
        struct trace_pid_list *pid_list;
        struct trace_event_file *file;
        ssize_t ret;
 
        mutex_lock(&event_mutex);
 
-       filtered_pids = rcu_dereference_protected(tr->filtered_pids,
-                                            lockdep_is_held(&event_mutex));
+       if (type == TRACE_PIDS) {
+               filtered_pids = rcu_dereference_protected(tr->filtered_pids,
+                                                         lockdep_is_held(&event_mutex));
+               other_pids = rcu_dereference_protected(tr->filtered_no_pids,
+                                                         lockdep_is_held(&event_mutex));
+       } else {
+               filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
+                                                         lockdep_is_held(&event_mutex));
+               other_pids = rcu_dereference_protected(tr->filtered_pids,
+                                                         lockdep_is_held(&event_mutex));
+       }
 
        ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
        if (ret < 0)
                goto out;
 
-       rcu_assign_pointer(tr->filtered_pids, pid_list);
+       if (type == TRACE_PIDS)
+               rcu_assign_pointer(tr->filtered_pids, pid_list);
+       else
+               rcu_assign_pointer(tr->filtered_no_pids, pid_list);
 
        list_for_each_entry(file, &tr->events, list) {
                set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
        if (filtered_pids) {
                tracepoint_synchronize_unregister();
                trace_free_pid_list(filtered_pids);
-       } else if (pid_list) {
-               /*
-                * Register a probe that is called before all other probes
-                * to set ignore_pid if next or prev do not match.
-                * Register a probe this is called after all other probes
-                * to only keep ignore_pid set if next pid matches.
-                */
-               register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
-                                                tr, INT_MAX);
-               register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
-                                                tr, 0);
-
-               register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
-                                                tr, INT_MAX);
-               register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
-                                                tr, 0);
-
-               register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
-                                                    tr, INT_MAX);
-               register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
-                                                    tr, 0);
-
-               register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
-                                                tr, INT_MAX);
-               register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
-                                                tr, 0);
+       } else if (pid_list && !other_pids) {
+               register_pid_events(tr);
        }
 
        /*
        return ret;
 }
 
+static ssize_t
+ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
+                      size_t cnt, loff_t *ppos)
+{
+       return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
+}
+
+static ssize_t
+ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
+                       size_t cnt, loff_t *ppos)
+{
+       return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
+}
+
 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
 static int ftrace_event_set_open(struct inode *inode, struct file *file);
 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
+static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
 static int ftrace_event_release(struct inode *inode, struct file *file);
 
 static const struct seq_operations show_event_seq_ops = {
        .stop = p_stop,
 };
 
+static const struct seq_operations show_set_no_pid_seq_ops = {
+       .start = np_start,
+       .next = np_next,
+       .show = trace_pid_show,
+       .stop = p_stop,
+};
+
 static const struct file_operations ftrace_avail_fops = {
        .open = ftrace_event_avail_open,
        .read = seq_read,
        .release = ftrace_event_release,
 };
 
+static const struct file_operations ftrace_set_event_notrace_pid_fops = {
+       .open = ftrace_event_set_npid_open,
+       .read = seq_read,
+       .write = ftrace_event_npid_write,
+       .llseek = seq_lseek,
+       .release = ftrace_event_release,
+};
+
 static const struct file_operations ftrace_enable_fops = {
        .open = tracing_open_generic,
        .read = event_enable_read,
 
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
-               ftrace_clear_event_pids(tr);
+               ftrace_clear_event_pids(tr, TRACE_PIDS);
+
+       ret = ftrace_event_open(inode, file, seq_ops);
+       if (ret < 0)
+               trace_array_put(tr);
+       return ret;
+}
+
+static int
+ftrace_event_set_npid_open(struct inode *inode, struct file *file)
+{
+       const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
+       struct trace_array *tr = inode->i_private;
+       int ret;
+
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
+
+       if ((file->f_mode & FMODE_WRITE) &&
+           (file->f_flags & O_TRUNC))
+               ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
 
        ret = ftrace_event_open(inode, file, seq_ops);
        if (ret < 0)
        if (!entry)
                pr_warn("Could not create tracefs 'set_event_pid' entry\n");
 
+       entry = tracefs_create_file("set_event_notrace_pid", 0644, parent,
+                                   tr, &ftrace_set_event_notrace_pid_fops);
+       if (!entry)
+               pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n");
+
        /* ring buffer internal formats */
        entry = trace_create_file("header_page", 0444, d_events,
                                  ring_buffer_print_page_header,
        clear_event_triggers(tr);
 
        /* Clear the pid list */
-       __ftrace_clear_event_pids(tr);
+       __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
 
        /* Disable any running events */
        __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);