memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
 }
 
-/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
-static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
-                                  struct codegen_context *ctx, u32 *addrs)
-{
-       const struct bpf_insn *insn = fp->insnsi;
-       bool func_addr_fixed;
-       u64 func_addr;
-       u32 tmp_idx;
-       int i, j, ret;
-
-       for (i = 0; i < fp->len; i++) {
-               /*
-                * During the extra pass, only the branch target addresses for
-                * the subprog calls need to be fixed. All other instructions
-                * can left untouched.
-                *
-                * The JITed image length does not change because we already
-                * ensure that the JITed instruction sequence for these calls
-                * are of fixed length by padding them with NOPs.
-                */
-               if (insn[i].code == (BPF_JMP | BPF_CALL) &&
-                   insn[i].src_reg == BPF_PSEUDO_CALL) {
-                       ret = bpf_jit_get_func_addr(fp, &insn[i], true,
-                                                   &func_addr,
-                                                   &func_addr_fixed);
-                       if (ret < 0)
-                               return ret;
-
-                       /*
-                        * Save ctx->idx as this would currently point to the
-                        * end of the JITed image and set it to the offset of
-                        * the instruction sequence corresponding to the
-                        * subprog call temporarily.
-                        */
-                       tmp_idx = ctx->idx;
-                       ctx->idx = addrs[i] / 4;
-                       ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
-                       if (ret)
-                               return ret;
-
-                       /*
-                        * Restore ctx->idx here. This is safe as the length
-                        * of the JITed sequence remains unchanged.
-                        */
-                       ctx->idx = tmp_idx;
-               } else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
-                       tmp_idx = ctx->idx;
-                       ctx->idx = addrs[i] / 4;
-#ifdef CONFIG_PPC32
-                       PPC_LI32(bpf_to_ppc(insn[i].dst_reg) - 1, (u32)insn[i + 1].imm);
-                       PPC_LI32(bpf_to_ppc(insn[i].dst_reg), (u32)insn[i].imm);
-                       for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
-                               EMIT(PPC_RAW_NOP());
-#else
-                       func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
-                       PPC_LI64(bpf_to_ppc(insn[i].dst_reg), func_addr);
-                       /* overwrite rest with nops */
-                       for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
-                               EMIT(PPC_RAW_NOP());
-#endif
-                       ctx->idx = tmp_idx;
-                       i++;
-               }
-       }
-
-       return 0;
-}
-
 int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
 {
        if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
        cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
 
        /* Scouting faux-generate pass 0 */
-       if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
+       if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
                /* We hit something illegal or unsupported. */
                fp = org_fp;
                goto out_addrs;
         */
        if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
                cgctx.idx = 0;
-               if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
+               if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
                        fp = org_fp;
                        goto out_addrs;
                }
 skip_init_ctx:
        code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
 
-       if (extra_pass) {
-               /*
-                * Do not touch the prologue and epilogue as they will remain
-                * unchanged. Only fix the branch target address for subprog
-                * calls in the body, and ldimm64 instructions.
-                *
-                * This does not change the offsets and lengths of the subprog
-                * call instruction sequences and hence, the size of the JITed
-                * image as well.
-                */
-               bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs);
-
-               /* There is no need to perform the usual passes. */
-               goto skip_codegen_passes;
-       }
-
        /* Code generation passes 1-2 */
        for (pass = 1; pass < 3; pass++) {
                /* Now build the prologue, body code & epilogue for real. */
                cgctx.idx = 0;
                cgctx.alt_exit_addr = 0;
                bpf_jit_build_prologue(code_base, &cgctx);
-               if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass)) {
+               if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass, extra_pass)) {
                        bpf_jit_binary_free(bpf_hdr);
                        fp = org_fp;
                        goto out_addrs;
                                proglen - (cgctx.idx * 4), cgctx.seen);
        }
 
-skip_codegen_passes:
        if (bpf_jit_enable > 1)
                /*
                 * Note that we output the base address of the code_base