#include <linux/hardirq.h>
 #include <linux/linkage.h>
 #include <linux/uaccess.h>
-#include <linux/kprobes.h>
+#include <linux/vmalloc.h>
 #include <linux/ftrace.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
        return 0;
 }
 
+void trace_free_pid_list(struct trace_pid_list *pid_list)
+{
+       vfree(pid_list->pids);
+       kfree(pid_list);
+}
+
 /**
  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
  * @filtered_pids: The list of pids to check
        return 0;
 }
 
+/* 128 should be much more than enough */
+#define PID_BUF_SIZE           127
+
+int trace_pid_write(struct trace_pid_list *filtered_pids,
+                   struct trace_pid_list **new_pid_list,
+                   const char __user *ubuf, size_t cnt)
+{
+       struct trace_pid_list *pid_list;
+       struct trace_parser parser;
+       unsigned long val;
+       int nr_pids = 0;
+       ssize_t read = 0;
+       ssize_t ret = 0;
+       loff_t pos;
+       pid_t pid;
+
+       if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
+               return -ENOMEM;
+
+       /*
+        * Always recreate a new array. The write is an all or nothing
+        * operation. Always create a new array when adding new pids by
+        * the user. If the operation fails, then the current list is
+        * not modified.
+        */
+       pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
+       if (!pid_list)
+               return -ENOMEM;
+
+       pid_list->pid_max = READ_ONCE(pid_max);
+
+       /* Only truncating will shrink pid_max */
+       if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
+               pid_list->pid_max = filtered_pids->pid_max;
+
+       pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
+       if (!pid_list->pids) {
+               kfree(pid_list);
+               return -ENOMEM;
+       }
+
+       if (filtered_pids) {
+               /* copy the current bits to the new max */
+               pid = find_first_bit(filtered_pids->pids,
+                                    filtered_pids->pid_max);
+               while (pid < filtered_pids->pid_max) {
+                       set_bit(pid, pid_list->pids);
+                       pid = find_next_bit(filtered_pids->pids,
+                                           filtered_pids->pid_max,
+                                           pid + 1);
+                       nr_pids++;
+               }
+       }
+
+       while (cnt > 0) {
+
+               pos = 0;
+
+               ret = trace_get_user(&parser, ubuf, cnt, &pos);
+               if (ret < 0 || !trace_parser_loaded(&parser))
+                       break;
+
+               read += ret;
+               ubuf += ret;
+               cnt -= ret;
+
+               parser.buffer[parser.idx] = 0;
+
+               ret = -EINVAL;
+               if (kstrtoul(parser.buffer, 0, &val))
+                       break;
+               if (val >= pid_list->pid_max)
+                       break;
+
+               pid = (pid_t)val;
+
+               set_bit(pid, pid_list->pids);
+               nr_pids++;
+
+               trace_parser_clear(&parser);
+               ret = 0;
+       }
+       trace_parser_put(&parser);
+
+       if (ret < 0) {
+               trace_free_pid_list(pid_list);
+               return ret;
+       }
+
+       if (!nr_pids) {
+               /* Cleared the list of pids */
+               trace_free_pid_list(pid_list);
+               read = ret;
+               pid_list = NULL;
+       }
+
+       *new_pid_list = pid_list;
+
+       return read;
+}
+
 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 {
        u64 ts;
 
 extern unsigned long tracing_thresh;
 
 /* PID filtering */
+
+extern int pid_max;
+
 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
                             pid_t search_pid);
 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
 int trace_pid_show(struct seq_file *m, void *v);
+void trace_free_pid_list(struct trace_pid_list *pid_list);
+int trace_pid_write(struct trace_pid_list *filtered_pids,
+                   struct trace_pid_list **new_pid_list,
+                   const char __user *ubuf, size_t cnt);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
 
 #include <linux/kthread.h>
 #include <linux/tracefs.h>
 #include <linux/uaccess.h>
-#include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <linux/ctype.h>
 #include <linux/sort.h>
        mutex_unlock(&event_mutex);
 }
 
-/* Shouldn't this be in a header? */
-extern int pid_max;
-
 static void
 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
 {
        /* Wait till all users are no longer using pid filtering */
        synchronize_sched();
 
-       vfree(pid_list->pids);
-       kfree(pid_list);
+       trace_free_pid_list(pid_list);
 }
 
 static void ftrace_clear_event_pids(struct trace_array *tr)
        struct trace_pid_list *filtered_pids = NULL;
        struct trace_pid_list *pid_list;
        struct trace_event_file *file;
-       struct trace_parser parser;
-       unsigned long val;
-       loff_t this_pos;
-       ssize_t read = 0;
-       ssize_t ret = 0;
-       pid_t pid;
-       int nr_pids = 0;
+       ssize_t ret;
 
        if (!cnt)
                return 0;
        if (ret < 0)
                return ret;
 
-       if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
-               return -ENOMEM;
-
        mutex_lock(&event_mutex);
+
        filtered_pids = rcu_dereference_protected(tr->filtered_pids,
                                             lockdep_is_held(&event_mutex));
 
-       /*
-        * Always recreate a new array. The write is an all or nothing
-        * operation. Always create a new array when adding new pids by
-        * the user. If the operation fails, then the current list is
-        * not modified.
-        */
-       pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
-       if (!pid_list) {
-               read = -ENOMEM;
-               goto out;
-       }
-       pid_list->pid_max = READ_ONCE(pid_max);
-       /* Only truncating will shrink pid_max */
-       if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
-               pid_list->pid_max = filtered_pids->pid_max;
-       pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
-       if (!pid_list->pids) {
-               kfree(pid_list);
-               read = -ENOMEM;
-               goto out;
-       }
-       if (filtered_pids) {
-               /* copy the current bits to the new max */
-               pid = find_first_bit(filtered_pids->pids,
-                                    filtered_pids->pid_max);
-               while (pid < filtered_pids->pid_max) {
-                       set_bit(pid, pid_list->pids);
-                       pid = find_next_bit(filtered_pids->pids,
-                                           filtered_pids->pid_max,
-                                           pid + 1);
-                       nr_pids++;
-               }
-       }
-
-       while (cnt > 0) {
-
-               this_pos = 0;
-
-               ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
-               if (ret < 0 || !trace_parser_loaded(&parser))
-                       break;
-
-               read += ret;
-               ubuf += ret;
-               cnt -= ret;
-
-               parser.buffer[parser.idx] = 0;
-
-               ret = -EINVAL;
-               if (kstrtoul(parser.buffer, 0, &val))
-                       break;
-               if (val >= pid_list->pid_max)
-                       break;
-
-               pid = (pid_t)val;
-
-               set_bit(pid, pid_list->pids);
-               nr_pids++;
-
-               trace_parser_clear(&parser);
-               ret = 0;
-       }
-       trace_parser_put(&parser);
-
-       if (ret < 0) {
-               vfree(pid_list->pids);
-               kfree(pid_list);
-               read = ret;
+       ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
+       if (ret < 0)
                goto out;
-       }
 
-       if (!nr_pids) {
-               /* Cleared the list of pids */
-               vfree(pid_list->pids);
-               kfree(pid_list);
-               read = ret;
-               if (!filtered_pids)
-                       goto out;
-               pid_list = NULL;
-       }
        rcu_assign_pointer(tr->filtered_pids, pid_list);
 
        list_for_each_entry(file, &tr->events, list) {
 
        if (filtered_pids) {
                synchronize_sched();
-
-               vfree(filtered_pids->pids);
-               kfree(filtered_pids);
-       } else {
+               trace_free_pid_list(filtered_pids);
+       } else if (pid_list) {
                /*
                 * Register a probe that is called before all other probes
                 * to set ignore_pid if next or prev do not match.
  out:
        mutex_unlock(&event_mutex);
 
-       ret = read;
-       if (read > 0)
-               *ppos += read;
+       if (ret > 0)
+               *ppos += ret;
 
        return ret;
 }