typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
 
+/*
+ * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
+ * set in the flags member.
+ *
+ * ENABLED - set/unset when ftrace_ops is registered/unregistered
+ * GLOBAL  - set manualy by ftrace_ops user to denote the ftrace_ops
+ *           is part of the global tracers sharing the same filter
+ *           via set_ftrace_* debugfs files.
+ * DYNAMIC - set when ftrace_ops is registered to denote dynamically
+ *           allocated ftrace_ops which need special care
+ * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
+ *           could be controled by following calls:
+ *             ftrace_function_local_enable
+ *             ftrace_function_local_disable
+ */
 enum {
        FTRACE_OPS_FL_ENABLED           = 1 << 0,
        FTRACE_OPS_FL_GLOBAL            = 1 << 1,
        FTRACE_OPS_FL_DYNAMIC           = 1 << 2,
+       FTRACE_OPS_FL_CONTROL           = 1 << 3,
 };
 
 struct ftrace_ops {
        ftrace_func_t                   func;
        struct ftrace_ops               *next;
        unsigned long                   flags;
+       int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
        struct ftrace_hash              *notrace_hash;
        struct ftrace_hash              *filter_hash;
 int unregister_ftrace_function(struct ftrace_ops *ops);
 void clear_ftrace_function(void);
 
+/**
+ * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
+ *
+ * This function enables tracing on current cpu by decreasing
+ * the per cpu control variable.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
+{
+       if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
+               return;
+
+       (*this_cpu_ptr(ops->disabled))--;
+}
+
+/**
+ * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
+ *
+ * This function enables tracing on current cpu by decreasing
+ * the per cpu control variable.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
+{
+       if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
+               return;
+
+       (*this_cpu_ptr(ops->disabled))++;
+}
+
+/**
+ * ftrace_function_local_disabled - returns ftrace_ops disabled value
+ *                                  on current cpu
+ *
+ * This function returns value of ftrace_ops::disabled on current cpu.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
+{
+       WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
+       return *this_cpu_ptr(ops->disabled);
+}
+
 extern void ftrace_stub(unsigned long a0, unsigned long a1);
 
 #else /* !CONFIG_FUNCTION_TRACER */
 
 #define FTRACE_HASH_DEFAULT_BITS 10
 #define FTRACE_HASH_MAX_BITS 12
 
+#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
+
 /* ftrace_enabled is a method to turn ftrace on or off */
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 };
 
 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
+static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
 static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 static struct ftrace_ops global_ops;
+static struct ftrace_ops control_ops;
 
 static void
 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
 }
 #endif
 
+static void control_ops_disable_all(struct ftrace_ops *ops)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               *per_cpu_ptr(ops->disabled, cpu) = 1;
+}
+
+static int control_ops_alloc(struct ftrace_ops *ops)
+{
+       int __percpu *disabled;
+
+       disabled = alloc_percpu(int);
+       if (!disabled)
+               return -ENOMEM;
+
+       ops->disabled = disabled;
+       control_ops_disable_all(ops);
+       return 0;
+}
+
+static void control_ops_free(struct ftrace_ops *ops)
+{
+       free_percpu(ops->disabled);
+}
+
 static void update_global_ops(void)
 {
        ftrace_func_t func;
        return 0;
 }
 
+static void add_ftrace_list_ops(struct ftrace_ops **list,
+                               struct ftrace_ops *main_ops,
+                               struct ftrace_ops *ops)
+{
+       int first = *list == &ftrace_list_end;
+       add_ftrace_ops(list, ops);
+       if (first)
+               add_ftrace_ops(&ftrace_ops_list, main_ops);
+}
+
+static int remove_ftrace_list_ops(struct ftrace_ops **list,
+                                 struct ftrace_ops *main_ops,
+                                 struct ftrace_ops *ops)
+{
+       int ret = remove_ftrace_ops(list, ops);
+       if (!ret && *list == &ftrace_list_end)
+               ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
+       return ret;
+}
+
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
        if (ftrace_disabled)
        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
                return -EBUSY;
 
+       /* We don't support both control and global flags set. */
+       if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
+               return -EINVAL;
+
        if (!core_kernel_data((unsigned long)ops))
                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 
        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               int first = ftrace_global_list == &ftrace_list_end;
-               add_ftrace_ops(&ftrace_global_list, ops);
+               add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
                ops->flags |= FTRACE_OPS_FL_ENABLED;
-               if (first)
-                       add_ftrace_ops(&ftrace_ops_list, &global_ops);
+       } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+               if (control_ops_alloc(ops))
+                       return -ENOMEM;
+               add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
        } else
                add_ftrace_ops(&ftrace_ops_list, ops);
 
                return -EINVAL;
 
        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               ret = remove_ftrace_ops(&ftrace_global_list, ops);
-               if (!ret && ftrace_global_list == &ftrace_list_end)
-                       ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
+               ret = remove_ftrace_list_ops(&ftrace_global_list,
+                                            &global_ops, ops);
                if (!ret)
                        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+       } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+               ret = remove_ftrace_list_ops(&ftrace_control_list,
+                                            &control_ops, ops);
+               if (!ret) {
+                       /*
+                        * The ftrace_ops is now removed from the list,
+                        * so there'll be no new users. We must ensure
+                        * all current users are done before we free
+                        * the control data.
+                        */
+                       synchronize_sched();
+                       control_ops_free(ops);
+               }
        } else
                ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 
 
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
+static void
+ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
+{
+       struct ftrace_ops *op;
+
+       if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
+               return;
+
+       /*
+        * Some of the ops may be dynamically allocated,
+        * they must be freed after a synchronize_sched().
+        */
+       preempt_disable_notrace();
+       trace_recursion_set(TRACE_CONTROL_BIT);
+       op = rcu_dereference_raw(ftrace_control_list);
+       while (op != &ftrace_list_end) {
+               if (!ftrace_function_local_disabled(op) &&
+                   ftrace_ops_test(op, ip))
+                       op->func(ip, parent_ip);
+
+               op = rcu_dereference_raw(op->next);
+       };
+       trace_recursion_clear(TRACE_CONTROL_BIT);
+       preempt_enable_notrace();
+}
+
+static struct ftrace_ops control_ops = {
+       .func = ftrace_ops_control_func,
+};
+
 static void
 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
 {