config HAVE_KPROBES_ON_FTRACE
        bool
 
+config HAVE_KPROBE_OVERRIDE
+       bool
+
 config HAVE_NMI
        bool
 
 
        select HAVE_KERNEL_XZ
        select HAVE_KPROBES
        select HAVE_KPROBES_ON_FTRACE
+       select HAVE_KPROBE_OVERRIDE
        select HAVE_KRETPROBES
        select HAVE_KVM
        select HAVE_LIVEPATCH                   if X86_64
 
 void arch_remove_kprobe(struct kprobe *p);
 asmlinkage void kretprobe_trampoline(void);
 
+#ifdef CONFIG_KPROBES_ON_FTRACE
+extern void arch_ftrace_kprobe_override_function(struct pt_regs *regs);
+#endif
+
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
        /* copy of the original instruction */
 
        return regs->ax;
 }
 
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+       regs->ax = rc;
+}
+
 /*
  * user_mode(regs) determines whether a register set came from user
  * mode.  On x86_32, this is true if V8086 mode was enabled OR if the
 
        p->ainsn.boostable = false;
        return 0;
 }
+
+asmlinkage void override_func(void);
+asm(
+       ".type override_func, @function\n"
+       "override_func:\n"
+       "       ret\n"
+       ".size override_func, .-override_func\n"
+);
+
+void arch_ftrace_kprobe_override_function(struct pt_regs *regs)
+{
+       regs->ip = (unsigned long)&override_func;
+}
+NOKPROBE_SYMBOL(arch_ftrace_kprobe_override_function);
 
                                locked:1,       /* Program image locked? */
                                gpl_compatible:1, /* Is filter GPL compatible? */
                                cb_access:1,    /* Is control block accessed? */
-                               dst_needed:1;   /* Do we need dst entry? */
+                               dst_needed:1,   /* Do we need dst entry? */
+                               kprobe_override:1; /* Do we override a kprobe? */
        kmemcheck_bitfield_end(meta);
        enum bpf_prog_type      type;           /* Type of BPF program */
        u32                     len;            /* Number of filter blocks */
 
 struct perf_event;
 
 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
+DECLARE_PER_CPU(int, bpf_kprobe_override);
 
 extern int  perf_trace_init(struct perf_event *event);
 extern void perf_trace_destroy(struct perf_event *event);
 
  *     @buf: buf to fill
  *     @buf_size: size of the buf
  *     Return : 0 on success or negative error code
+ *
+ * int bpf_override_return(pt_regs, rc)
+ *     @pt_regs: pointer to struct pt_regs
+ *     @rc: the return value to set
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
        FN(xdp_adjust_meta),            \
        FN(perf_event_read_value),      \
        FN(perf_prog_read_value),       \
-       FN(getsockopt),
+       FN(getsockopt),                 \
+       FN(override_return),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
 
 bool bpf_prog_array_compatible(struct bpf_array *array,
                               const struct bpf_prog *fp)
 {
+       if (fp->kprobe_override)
+               return false;
+
        if (!array->owner_prog_type) {
                /* There's no owner yet where we could check for
                 * compatibility.
 
                        prog->dst_needed = 1;
                if (insn->imm == BPF_FUNC_get_prandom_u32)
                        bpf_user_rnd_init_once();
+               if (insn->imm == BPF_FUNC_override_return)
+                       prog->kprobe_override = 1;
                if (insn->imm == BPF_FUNC_tail_call) {
                        /* If we tail call into other programs, we
                         * cannot make any assumptions since they can
 
                return -EINVAL;
        }
 
+       /* Kprobe override only works for kprobes, not uprobes. */
+       if (prog->kprobe_override &&
+           !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) {
+               bpf_prog_put(prog);
+               return -EINVAL;
+       }
+
        if (is_tracepoint || is_syscall_tp) {
                int off = trace_event_get_offsets(event->tp_event);
 
 
 
          If in doubt, say N.
 
+config BPF_KPROBE_OVERRIDE
+       bool "Enable BPF programs to override a kprobed function"
+       depends on BPF_EVENTS
+       depends on KPROBES_ON_FTRACE
+       depends on HAVE_KPROBE_OVERRIDE
+       depends on DYNAMIC_FTRACE_WITH_REGS
+       default n
+       help
+        Allows BPF to override the execution of a probed function and
+        set a different return value.  This is used for error injection.
+
 config FTRACE_MCOUNT_RECORD
        def_bool y
        depends on DYNAMIC_FTRACE
 
 #include <linux/filter.h>
 #include <linux/uaccess.h>
 #include <linux/ctype.h>
+#include <linux/kprobes.h>
+#include <asm/kprobes.h>
+
+#include "trace_probe.h"
 #include "trace.h"
 
 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 }
 EXPORT_SYMBOL_GPL(trace_call_bpf);
 
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
+{
+       __this_cpu_write(bpf_kprobe_override, 1);
+       regs_set_return_value(regs, rc);
+       arch_ftrace_kprobe_override_function(regs);
+       return 0;
+}
+#else
+BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
+{
+       return -EINVAL;
+}
+#endif
+
+static const struct bpf_func_proto bpf_override_return_proto = {
+       .func           = bpf_override_return,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+};
+
 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
 {
        int ret;
                return &bpf_get_stackid_proto;
        case BPF_FUNC_perf_event_read_value:
                return &bpf_perf_event_read_value_proto;
+       case BPF_FUNC_override_return:
+               pr_warn_ratelimited("%s[%d] is installing a program with bpf_override_return helper that may cause unexpected behavior!",
+                                   current->comm, task_pid_nr(current));
+               return &bpf_override_return_proto;
        default:
                return tracing_func_proto(func_id);
        }
        struct bpf_prog_array *new_array;
        int ret = -EEXIST;
 
+       /* Kprobe override only works for ftrace based kprobes. */
+       if (prog->kprobe_override && !trace_kprobe_ftrace(event->tp_event))
+               return -EINVAL;
+
        mutex_lock(&bpf_event_mutex);
 
        if (event->prog)
 
        (offsetof(struct trace_kprobe, tp.args) +       \
        (sizeof(struct probe_arg) * (n)))
 
+DEFINE_PER_CPU(int, bpf_kprobe_override);
 
 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
 {
        return nhit;
 }
 
+int trace_kprobe_ftrace(struct trace_event_call *call)
+{
+       struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+       return kprobe_ftrace(&tk->rp.kp);
+}
+
 static int register_kprobe_event(struct trace_kprobe *tk);
 static int unregister_kprobe_event(struct trace_kprobe *tk);
 
 #ifdef CONFIG_PERF_EVENTS
 
 /* Kprobe profile handler */
-static void
+static int
 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 {
        struct trace_event_call *call = &tk->tp.call;
        int size, __size, dsize;
        int rctx;
 
-       if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
-               return;
+       if (bpf_prog_array_valid(call)) {
+               int ret;
+
+               ret = trace_call_bpf(call, regs);
+
+               /*
+                * We need to check and see if we modified the pc of the
+                * pt_regs, and if so clear the kprobe and return 1 so that we
+                * don't do the instruction skipping.  Also reset our state so
+                * we are clean the next pass through.
+                */
+               if (__this_cpu_read(bpf_kprobe_override)) {
+                       __this_cpu_write(bpf_kprobe_override, 0);
+                       reset_current_kprobe();
+                       return 1;
+               }
+               if (!ret)
+                       return 0;
+       }
 
        head = this_cpu_ptr(call->perf_events);
        if (hlist_empty(head))
-               return;
+               return 0;
 
        dsize = __get_data_size(&tk->tp, regs);
        __size = sizeof(*entry) + tk->tp.size + dsize;
 
        entry = perf_trace_buf_alloc(size, NULL, &rctx);
        if (!entry)
-               return;
+               return 0;
 
        entry->ip = (unsigned long)tk->rp.kp.addr;
        memset(&entry[1], 0, dsize);
        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
                              head, NULL, NULL);
+       return 0;
 }
 NOKPROBE_SYMBOL(kprobe_perf_func);
 
 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 {
        struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
+       int ret = 0;
 
        raw_cpu_inc(*tk->nhit);
 
                kprobe_trace_func(tk, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tk->tp.flags & TP_FLAG_PROFILE)
-               kprobe_perf_func(tk, regs);
+               ret = kprobe_perf_func(tk, regs);
 #endif
-       return 0;       /* We don't tweek kernel, so just return 0 */
+       return ret;
 }
 NOKPROBE_SYMBOL(kprobe_dispatcher);
 
 
 unsigned long update_symbol_cache(struct symbol_cache *sc);
 void free_symbol_cache(struct symbol_cache *sc);
 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
+int trace_kprobe_ftrace(struct trace_event_call *call);
 #else
 /* uprobes do not support symbol fetch methods */
 #define fetch_symbol_u8                        NULL
 {
        return NULL;
 }
+
+static inline int trace_kprobe_ftrace(struct trace_event_call *call)
+{
+       return 0;
+}
 #endif /* CONFIG_KPROBE_EVENTS */
 
 struct probe_arg {