#include <linux/percpu.h>
 #include <linux/smp.h>
 #include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/ftrace.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/cache.h>
 #include <asm/barrier.h>
 #include "internal.h"
 
-void notrace pstore_ftrace_call(unsigned long ip, unsigned long parent_ip)
+static void notrace pstore_ftrace_call(unsigned long ip,
+                                      unsigned long parent_ip)
 {
+       unsigned long flags;
        struct pstore_ftrace_record rec = {};
 
        if (unlikely(oops_in_progress))
                return;
 
+       local_irq_save(flags);
+
        rec.ip = ip;
        rec.parent_ip = parent_ip;
        pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id());
        psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec,
                          sizeof(rec), psinfo);
+
+       local_irq_restore(flags);
+}
+
+static struct ftrace_ops pstore_ftrace_ops __read_mostly = {
+       .func   = pstore_ftrace_call,
+};
+
+static DEFINE_MUTEX(pstore_ftrace_lock);
+static bool pstore_ftrace_enabled;
+
+static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf,
+                                       size_t count, loff_t *ppos)
+{
+       u8 on;
+       ssize_t ret;
+
+       ret = kstrtou8_from_user(buf, count, 2, &on);
+       if (ret)
+               return ret;
+
+       mutex_lock(&pstore_ftrace_lock);
+
+       if (!on ^ pstore_ftrace_enabled)
+               goto out;
+
+       if (on)
+               ret = register_ftrace_function(&pstore_ftrace_ops);
+       else
+               ret = unregister_ftrace_function(&pstore_ftrace_ops);
+       if (ret) {
+               pr_err("%s: unable to %sregister ftrace ops: %zd\n",
+                      __func__, on ? "" : "un", ret);
+               goto err;
+       }
+
+       pstore_ftrace_enabled = on;
+out:
+       ret = count;
+err:
+       mutex_unlock(&pstore_ftrace_lock);
+
+       return ret;
+}
+
+static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf,
+                                      size_t count, loff_t *ppos)
+{
+       char val[] = { '0' + pstore_ftrace_enabled, '\n' };
+
+       return simple_read_from_buffer(buf, count, ppos, val, sizeof(val));
+}
+
+static const struct file_operations pstore_knob_fops = {
+       .open   = simple_open,
+       .read   = pstore_ftrace_knob_read,
+       .write  = pstore_ftrace_knob_write,
+};
+
+void pstore_register_ftrace(void)
+{
+       struct dentry *dir;
+       struct dentry *file;
+
+       if (!psinfo->write_buf)
+               return;
+
+       dir = debugfs_create_dir("pstore", NULL);
+       if (!dir) {
+               pr_err("%s: unable to create pstore directory\n", __func__);
+               return;
+       }
+
+       file = debugfs_create_file("record_ftrace", 0600, dir, NULL,
+                                  &pstore_knob_fops);
+       if (!file) {
+               pr_err("%s: unable to create record_ftrace file\n", __func__);
+               goto err_file;
+       }
+
+       return;
+err_file:
+       debugfs_remove(dir);
 }
 
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/ftrace.h>
-#include <linux/pstore.h>
 #include <linux/fs.h>
 
 #include "trace.h"
        preempt_enable_notrace();
 }
 
-/* Our two options */
+/* Our option */
 enum {
        TRACE_FUNC_OPT_STACK    = 0x1,
-       TRACE_FUNC_OPT_PSTORE   = 0x2,
 };
 
 static struct tracer_flags func_flags;
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1)) {
-               /*
-                * So far tracing doesn't support multiple buffers, so
-                * we make an explicit call for now.
-                */
-               if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
-                       pstore_ftrace_call(ip, parent_ip);
                pc = preempt_count();
                trace_function(tr, ip, parent_ip, flags, pc);
        }
 static struct tracer_opt func_opts[] = {
 #ifdef CONFIG_STACKTRACE
        { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
-#endif
-#ifdef CONFIG_PSTORE_FTRACE
-       { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
 #endif
        { } /* Always set a last empty entry */
 };
                        register_ftrace_function(&trace_ops);
                }
 
-               break;
-       case TRACE_FUNC_OPT_PSTORE:
                break;
        default:
                return -EINVAL;