/* Nothing to do here. We support Internal BPF. */
 }
 
-void bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
        struct bpf_binary_header *header;
        struct jit_ctx ctx;
        u8 *image_ptr;
 
        if (!bpf_jit_enable)
-               return;
+               return prog;
 
        memset(&ctx, 0, sizeof(ctx));
        ctx.prog = prog;
 
        ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
        if (ctx.offset == NULL)
-               return;
+               return prog;
 
        /* 1. Initial fake pass to compute ctx->idx. */
 
        prog->jited = 1;
 out:
        kfree(ctx.offset);
+       return prog;
 }
 
 void bpf_jit_free(struct bpf_prog *prog)
 
 /*
  * Compile eBPF program "fp"
  */
-void bpf_int_jit_compile(struct bpf_prog *fp)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 {
        struct bpf_binary_header *header;
        struct bpf_jit jit;
        int pass;
 
        if (!bpf_jit_enable)
-               return;
+               return fp;
+
        memset(&jit, 0, sizeof(jit));
        jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
        if (jit.addrs == NULL)
-               return;
+               return fp;
        /*
         * Three initial passes:
         *   - 1/2: Determine clobbered registers
        }
 free_addrs:
        kfree(jit.addrs);
+       return fp;
 }
 
 /*
 
 {
 }
 
-void bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
        struct bpf_binary_header *header = NULL;
        int proglen, oldproglen = 0;
        int i;
 
        if (!bpf_jit_enable)
-               return;
+               return prog;
 
        addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
        if (!addrs)
-               return;
+               return prog;
 
        /* Before first pass, make a rough estimation of addrs[]
         * each bpf instruction is translated to less than 64 bytes
        }
 out:
        kfree(addrs);
+       return prog;
 }
 
 void bpf_jit_free(struct bpf_prog *fp)
 
 
 int sk_filter(struct sock *sk, struct sk_buff *skb);
 
-int bpf_prog_select_runtime(struct bpf_prog *fp);
+struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
 void bpf_prog_free(struct bpf_prog *fp);
 
 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
-void bpf_int_jit_compile(struct bpf_prog *fp);
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
 bool bpf_helper_changes_skb_data(void *func);
 
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 
 /**
  *     bpf_prog_select_runtime - select exec runtime for BPF program
  *     @fp: bpf_prog populated with internal BPF program
+ *     @err: pointer to error variable
  *
  * Try to JIT eBPF program, if JIT is not available, use interpreter.
  * The BPF program will be executed via BPF_PROG_RUN() macro.
  */
-int bpf_prog_select_runtime(struct bpf_prog *fp)
+struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
        fp->bpf_func = (void *) __bpf_prog_run;
 
-       bpf_int_jit_compile(fp);
+       /* eBPF JITs can rewrite the program in case constant
+        * blinding is active. However, in case of error during
+        * blinding, bpf_int_jit_compile() must always return a
+        * valid program, which in this case would simply not
+        * be JITed, but falls back to the interpreter.
+        */
+       fp = bpf_int_jit_compile(fp);
        bpf_prog_lock_ro(fp);
 
        /* The tail call compatibility check can only be done at
         * with JITed or non JITed program concatenations and not
         * all eBPF JITs might immediately support all features.
         */
-       return bpf_check_tail_call(fp);
+       *err = bpf_check_tail_call(fp);
+
+       return fp;
 }
 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
 
 };
 
 /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
-void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
 {
+       return prog;
 }
 
 bool __weak bpf_helper_changes_skb_data(void *func)
 
        fixup_bpf_calls(prog);
 
        /* eBPF program is ready to be JITed */
-       err = bpf_prog_select_runtime(prog);
+       prog = bpf_prog_select_runtime(prog, &err);
        if (err < 0)
                goto free_used_maps;
 
 
                fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
                memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
 
-               bpf_prog_select_runtime(fp);
+               /* We cannot error here as we don't need type compatibility
+                * checks.
+                */
+               fp = bpf_prog_select_runtime(fp, err);
                break;
        }
 
 
                 */
                goto out_err_free;
 
-       bpf_prog_select_runtime(fp);
+       /* We are guaranteed to never error here with cBPF to eBPF
+        * transitions, since there's no issue with type compatibility
+        * checks on program arrays.
+        */
+       fp = bpf_prog_select_runtime(fp, &err);
 
        kfree(old_prog);
        return fp;