char errmsg[STRERR_BUFSIZE];
        struct bpf_link_perf *link;
        int prog_fd, link_fd = -1, err;
+       bool force_ioctl_attach;
 
        if (!OPTS_VALID(opts, bpf_perf_event_opts))
                return libbpf_err_ptr(-EINVAL);
        link->link.dealloc = &bpf_link_perf_dealloc;
        link->perf_event_fd = pfd;
 
-       if (kernel_supports(prog->obj, FEAT_PERF_LINK)) {
+       force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
+       if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
                DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
                        .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
 
                                const struct bpf_kprobe_opts *opts)
 {
        DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
+       enum probe_attach_mode attach_mode;
        char errmsg[STRERR_BUFSIZE];
        char *legacy_probe = NULL;
        struct bpf_link *link;
        if (!OPTS_VALID(opts, bpf_kprobe_opts))
                return libbpf_err_ptr(-EINVAL);
 
+       attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
        retprobe = OPTS_GET(opts, retprobe, false);
        offset = OPTS_GET(opts, offset, 0);
        pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
 
        legacy = determine_kprobe_perf_type() < 0;
+       switch (attach_mode) {
+       case PROBE_ATTACH_MODE_LEGACY:
+               legacy = true;
+               pe_opts.force_ioctl_attach = true;
+               break;
+       case PROBE_ATTACH_MODE_PERF:
+               if (legacy)
+                       return libbpf_err_ptr(-ENOTSUP);
+               pe_opts.force_ioctl_attach = true;
+               break;
+       case PROBE_ATTACH_MODE_LINK:
+               if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
+                       return libbpf_err_ptr(-ENOTSUP);
+               break;
+       case PROBE_ATTACH_MODE_DEFAULT:
+               break;
+       default:
+               return libbpf_err_ptr(-EINVAL);
+       }
+
        if (!legacy) {
                pfd = perf_event_open_probe(false /* uprobe */, retprobe,
                                            func_name, offset,
        const char *archive_path = NULL, *archive_sep = NULL;
        char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
        DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
+       enum probe_attach_mode attach_mode;
        char full_path[PATH_MAX];
        struct bpf_link *link;
        size_t ref_ctr_off;
        if (!OPTS_VALID(opts, bpf_uprobe_opts))
                return libbpf_err_ptr(-EINVAL);
 
+       attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
        retprobe = OPTS_GET(opts, retprobe, false);
        ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
        pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
        }
 
        legacy = determine_uprobe_perf_type() < 0;
+       switch (attach_mode) {
+       case PROBE_ATTACH_MODE_LEGACY:
+               legacy = true;
+               pe_opts.force_ioctl_attach = true;
+               break;
+       case PROBE_ATTACH_MODE_PERF:
+               if (legacy)
+                       return libbpf_err_ptr(-ENOTSUP);
+               pe_opts.force_ioctl_attach = true;
+               break;
+       case PROBE_ATTACH_MODE_LINK:
+               if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
+                       return libbpf_err_ptr(-ENOTSUP);
+               break;
+       case PROBE_ATTACH_MODE_DEFAULT:
+               break;
+       default:
+               return libbpf_err_ptr(-EINVAL);
+       }
+
        if (!legacy) {
                pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
                                            func_offset, pid, ref_ctr_off);
 
 bpf_program__attach(const struct bpf_program *prog);
 
 struct bpf_perf_event_opts {
-       /* size of this struct, for forward/backward compatiblity */
+       /* size of this struct, for forward/backward compatibility */
        size_t sz;
        /* custom user-provided value fetchable through bpf_get_attach_cookie() */
        __u64 bpf_cookie;
+       /* don't use BPF link when attach BPF program */
+       bool force_ioctl_attach;
+       size_t :0;
 };
-#define bpf_perf_event_opts__last_field bpf_cookie
+#define bpf_perf_event_opts__last_field force_ioctl_attach
 
 LIBBPF_API struct bpf_link *
 bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
 bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
                                    const struct bpf_perf_event_opts *opts);
 
+/**
+ * enum probe_attach_mode - the mode to attach kprobe/uprobe
+ *
+ * force libbpf to attach kprobe/uprobe in specific mode, -ENOTSUP will
+ * be returned if it is not supported by the kernel.
+ */
+enum probe_attach_mode {
+       /* attach probe in latest supported mode by kernel */
+       PROBE_ATTACH_MODE_DEFAULT = 0,
+       /* attach probe in legacy mode, using debugfs/tracefs */
+       PROBE_ATTACH_MODE_LEGACY,
+       /* create perf event with perf_event_open() syscall */
+       PROBE_ATTACH_MODE_PERF,
+       /* attach probe with BPF link */
+       PROBE_ATTACH_MODE_LINK,
+};
+
 struct bpf_kprobe_opts {
-       /* size of this struct, for forward/backward compatiblity */
+       /* size of this struct, for forward/backward compatibility */
        size_t sz;
        /* custom user-provided value fetchable through bpf_get_attach_cookie() */
        __u64 bpf_cookie;
        size_t offset;
        /* kprobe is return probe */
        bool retprobe;
+       /* kprobe attach mode */
+       enum probe_attach_mode attach_mode;
        size_t :0;
 };
-#define bpf_kprobe_opts__last_field retprobe
+#define bpf_kprobe_opts__last_field attach_mode
 
 LIBBPF_API struct bpf_link *
 bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
                                      const struct bpf_kprobe_multi_opts *opts);
 
 struct bpf_ksyscall_opts {
-       /* size of this struct, for forward/backward compatiblity */
+       /* size of this struct, for forward/backward compatibility */
        size_t sz;
        /* custom user-provided value fetchable through bpf_get_attach_cookie() */
        __u64 bpf_cookie;
                             const struct bpf_ksyscall_opts *opts);
 
 struct bpf_uprobe_opts {
-       /* size of this struct, for forward/backward compatiblity */
+       /* size of this struct, for forward/backward compatibility */
        size_t sz;
        /* offset of kernel reference counted USDT semaphore, added in
         * a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
         * binary_path.
         */
        const char *func_name;
+       /* uprobe attach mode */
+       enum probe_attach_mode attach_mode;
        size_t :0;
 };
-#define bpf_uprobe_opts__last_field func_name
+#define bpf_uprobe_opts__last_field attach_mode
 
 /**
  * @brief **bpf_program__attach_uprobe()** attaches a BPF program
                         const struct bpf_usdt_opts *opts);
 
 struct bpf_tracepoint_opts {
-       /* size of this struct, for forward/backward compatiblity */
+       /* size of this struct, for forward/backward compatibility */
        size_t sz;
        /* custom user-provided value fetchable through bpf_get_attach_cookie() */
        __u64 bpf_cookie;
 typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
 
 struct ring_buffer_opts {
-       size_t sz; /* size of this struct, for forward/backward compatiblity */
+       size_t sz; /* size of this struct, for forward/backward compatibility */
 };
 
 #define ring_buffer_opts__last_field sz
 bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s);
 
 struct gen_loader_opts {
-       size_t sz; /* size of this struct, for forward/backward compatiblity */
+       size_t sz; /* size of this struct, for forward/backward compatibility */
        const char *data;
        const char *insns;
        __u32 data_sz;
 };
 
 struct bpf_linker_opts {
-       /* size of this struct, for forward/backward compatiblity */
+       /* size of this struct, for forward/backward compatibility */
        size_t sz;
 };
 #define bpf_linker_opts__last_field sz
 
 struct bpf_linker_file_opts {
-       /* size of this struct, for forward/backward compatiblity */
+       /* size of this struct, for forward/backward compatibility */
        size_t sz;
 };
 #define bpf_linker_file_opts__last_field sz
                                       struct bpf_link **link);
 
 struct libbpf_prog_handler_opts {
-       /* size of this struct, for forward/backward compatiblity */
+       /* size of this struct, for forward/backward compatibility */
        size_t sz;
        /* User-provided value that is passed to prog_setup_fn,
         * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to