* PLT for hotpatchable calls. The calling convention is the same as for the
  * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
  */
-extern const char bpf_plt[];
-extern const char bpf_plt_ret[];
-extern const char bpf_plt_target[];
-extern const char bpf_plt_end[];
-#define BPF_PLT_SIZE 32
+struct bpf_plt {
+       char code[16];
+       void *ret;
+       void *target;
+} __packed;
+extern const struct bpf_plt bpf_plt;
 asm(
        ".pushsection .rodata\n"
        "       .balign 8\n"
        "       .balign 8\n"
        "bpf_plt_ret: .quad 0\n"
        "bpf_plt_target: .quad 0\n"
-       "bpf_plt_end:\n"
        "       .popsection\n"
 );
 
-static void bpf_jit_plt(void *plt, void *ret, void *target)
+static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
 {
-       memcpy(plt, bpf_plt, BPF_PLT_SIZE);
-       *(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
-       *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
+       memcpy(plt, &bpf_plt, sizeof(*plt));
+       plt->ret = ret;
+       plt->target = target;
 }
 
 /*
        jit->prg = ALIGN(jit->prg, 8);
        jit->prologue_plt = jit->prg;
        if (jit->prg_buf)
-               bpf_jit_plt(jit->prg_buf + jit->prg,
+               bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
                            jit->prg_buf + jit->prologue_plt_ret, NULL);
-       jit->prg += BPF_PLT_SIZE;
+       jit->prg += sizeof(struct bpf_plt);
 }
 
 static int get_probe_mem_regno(const u8 *insn)
        struct bpf_jit jit;
        int pass;
 
-       if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
-               return orig_fp;
-
        if (!fp->jit_requested)
                return orig_fp;
 
 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
                       void *old_addr, void *new_addr)
 {
+       struct bpf_plt expected_plt, current_plt, new_plt, *plt;
        struct {
                u16 opc;
                s32 disp;
        } __packed insn;
-       char expected_plt[BPF_PLT_SIZE];
-       char current_plt[BPF_PLT_SIZE];
-       char new_plt[BPF_PLT_SIZE];
-       char *plt;
        char *ret;
        int err;
 
                 */
        } else {
                /* Verify the PLT. */
-               plt = (char *)ip + (insn.disp << 1);
-               err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
+               plt = ip + (insn.disp << 1);
+               err = copy_from_kernel_nofault(¤t_plt, plt,
+                                              sizeof(current_plt));
                if (err < 0)
                        return err;
                ret = (char *)ip + 6;
-               bpf_jit_plt(expected_plt, ret, old_addr);
-               if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
+               bpf_jit_plt(&expected_plt, ret, old_addr);
+               if (memcmp(¤t_plt, &expected_plt, sizeof(current_plt)))
                        return -EINVAL;
                /* Adjust the call address. */
-               bpf_jit_plt(new_plt, ret, new_addr);
-               s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
-                                 new_plt + (bpf_plt_target - bpf_plt),
+               bpf_jit_plt(&new_plt, ret, new_addr);
+               s390_kernel_write(&plt->target, &new_plt.target,
                                  sizeof(void *));
        }