As BPF trampoline of different archs moves from bpf_jit_[alloc|free]_exec()
to bpf_prog_pack_[alloc|free](), we need to use different _alloc, _free for
different archs during the transition. Add the following helpers for this
transition:
void *arch_alloc_bpf_trampoline(unsigned int size);
void arch_free_bpf_trampoline(void *image, unsigned int size);
void arch_protect_bpf_trampoline(void *image, unsigned int size);
void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
The fallback version of these helpers require size <= PAGE_SIZE, but they
are only called with size == PAGE_SIZE. They will be called with size <
PAGE_SIZE when arch_bpf_trampoline_size() helper is introduced later.
Signed-off-by: Song Liu <song@kernel.org>
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>  # on s390x
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/r/20231206224054.492250-4-song@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
                                const struct btf_func_model *m, u32 flags,
                                struct bpf_tramp_links *tlinks,
                                void *func_addr);
+void *arch_alloc_bpf_trampoline(unsigned int size);
+void arch_free_bpf_trampoline(void *image, unsigned int size);
+void arch_protect_bpf_trampoline(void *image, unsigned int size);
+void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
+
 u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
                                             struct bpf_tramp_run_ctx *run_ctx);
 void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
 
                        if (err)
                                goto reset_unlock;
                }
-               set_memory_rox((long)st_map->image, 1);
+               arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
                /* Let bpf_link handle registration & unregistration.
                 *
                 * Pair with smp_load_acquire() during lookup_elem().
                goto unlock;
        }
 
-       set_memory_rox((long)st_map->image, 1);
+       arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
        err = st_ops->reg(kdata);
        if (likely(!err)) {
                /* This refcnt increment on the map here after
         * there was a race in registering the struct_ops (under the same name) to
         * a sub-system through different struct_ops's maps.
         */
-       set_memory_nx((long)st_map->image, 1);
-       set_memory_rw((long)st_map->image, 1);
+       arch_unprotect_bpf_trampoline(st_map->image, PAGE_SIZE);
 
 reset_unlock:
        bpf_struct_ops_map_put_progs(st_map);
                bpf_struct_ops_map_put_progs(st_map);
        bpf_map_area_free(st_map->links);
        if (st_map->image) {
-               bpf_jit_free_exec(st_map->image);
+               arch_free_bpf_trampoline(st_map->image, PAGE_SIZE);
                bpf_jit_uncharge_modmem(PAGE_SIZE);
        }
        bpf_map_area_free(st_map->uvalue);
                return ERR_PTR(ret);
        }
 
-       st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
+       st_map->image = arch_alloc_bpf_trampoline(PAGE_SIZE);
        if (!st_map->image) {
                /* __bpf_struct_ops_map_free() uses st_map->image as flag
                 * for "charged or not". In this case, we need to unchange
        }
 
        mutex_init(&st_map->lock);
-       set_vm_flush_reset_perms(st_map->image);
        bpf_map_init_from_attr(map, attr);
 
        return map;
 
 static void bpf_tramp_image_free(struct bpf_tramp_image *im)
 {
        bpf_image_ksym_del(&im->ksym);
-       bpf_jit_free_exec(im->image);
+       arch_free_bpf_trampoline(im->image, PAGE_SIZE);
        bpf_jit_uncharge_modmem(PAGE_SIZE);
        percpu_ref_exit(&im->pcref);
        kfree_rcu(im, rcu);
                goto out_free_im;
 
        err = -ENOMEM;
-       im->image = image = bpf_jit_alloc_exec(PAGE_SIZE);
+       im->image = image = arch_alloc_bpf_trampoline(PAGE_SIZE);
        if (!image)
                goto out_uncharge;
-       set_vm_flush_reset_perms(image);
 
        err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
        if (err)
        return im;
 
 out_free_image:
-       bpf_jit_free_exec(im->image);
+       arch_free_bpf_trampoline(im->image, PAGE_SIZE);
 out_uncharge:
        bpf_jit_uncharge_modmem(PAGE_SIZE);
 out_free_im:
        if (err < 0)
                goto out_free;
 
-       set_memory_rox((long)im->image, 1);
+       arch_protect_bpf_trampoline(im->image, PAGE_SIZE);
 
        WARN_ON(tr->cur_image && total == 0);
        if (tr->cur_image)
                tr->fops->trampoline = 0;
 
                /* reset im->image memory attr for arch_prepare_bpf_trampoline */
-               set_memory_nx((long)im->image, 1);
-               set_memory_rw((long)im->image, 1);
+               arch_unprotect_bpf_trampoline(im->image, PAGE_SIZE);
                goto again;
        }
 #endif
        return -ENOTSUPP;
 }
 
+void * __weak arch_alloc_bpf_trampoline(unsigned int size)
+{
+       void *image;
+
+       if (WARN_ON_ONCE(size > PAGE_SIZE))
+               return NULL;
+       image = bpf_jit_alloc_exec(PAGE_SIZE);
+       if (image)
+               set_vm_flush_reset_perms(image);
+       return image;
+}
+
+void __weak arch_free_bpf_trampoline(void *image, unsigned int size)
+{
+       WARN_ON_ONCE(size > PAGE_SIZE);
+       /* bpf_jit_free_exec doesn't need "size", but
+        * bpf_prog_pack_free() needs it.
+        */
+       bpf_jit_free_exec(image);
+}
+
+void __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
+{
+       WARN_ON_ONCE(size > PAGE_SIZE);
+       set_memory_rox((long)image, 1);
+}
+
+void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+{
+       WARN_ON_ONCE(size > PAGE_SIZE);
+       set_memory_nx((long)image, 1);
+       set_memory_rw((long)image, 1);
+}
+
 static int __init init_trampolines(void)
 {
        int i;
 
                goto out;
        }
 
-       image = bpf_jit_alloc_exec(PAGE_SIZE);
+       image = arch_alloc_bpf_trampoline(PAGE_SIZE);
        if (!image) {
                err = -ENOMEM;
                goto out;
        }
-       set_vm_flush_reset_perms(image);
 
        link = kzalloc(sizeof(*link), GFP_USER);
        if (!link) {
        if (err < 0)
                goto out;
 
-       set_memory_rox((long)image, 1);
+       arch_protect_bpf_trampoline(image, PAGE_SIZE);
        prog_ret = dummy_ops_call_op(image, args);
 
        err = dummy_ops_copy_args(args);
                err = -EFAULT;
 out:
        kfree(args);
-       bpf_jit_free_exec(image);
+       arch_free_bpf_trampoline(image, PAGE_SIZE);
        if (link)
                bpf_link_put(&link->link);
        kfree(tlinks);