#include <sys/socket.h>
 #include <sys/syscall.h>
 #include <sys/un.h>
+#include "priv_map.skel.h"
+#include "priv_prog.skel.h"
+#include "dummy_st_ops_success.skel.h"
 
 static inline int sys_mount(const char *dev_name, const char *dir_name,
                            const char *type, unsigned long flags,
        return err;
 }
 
+static int userns_obj_priv_map(int mnt_fd)
+{
+       LIBBPF_OPTS(bpf_object_open_opts, opts);
+       char buf[256];
+       struct priv_map *skel;
+       int err, token_fd;
+
+       skel = priv_map__open_and_load();
+       if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+               priv_map__destroy(skel);
+               return -EINVAL;
+       }
+
+       /* use bpf_token_path to provide BPF FS path */
+       snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+       opts.bpf_token_path = buf;
+       skel = priv_map__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+               return -EINVAL;
+
+       err = priv_map__load(skel);
+       priv_map__destroy(skel);
+       if (!ASSERT_OK(err, "obj_token_path_load"))
+               return -EINVAL;
+
+       /* create token and pass it through bpf_token_fd */
+       token_fd = bpf_token_create(mnt_fd, NULL);
+       if (!ASSERT_GT(token_fd, 0, "create_token"))
+               return -EINVAL;
+
+       opts.bpf_token_path = NULL;
+       opts.bpf_token_fd = token_fd;
+       skel = priv_map__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_fd_open"))
+               return -EINVAL;
+
+       /* we can close our token FD, bpf_object owns dup()'ed FD now */
+       close(token_fd);
+
+       err = priv_map__load(skel);
+       priv_map__destroy(skel);
+       if (!ASSERT_OK(err, "obj_token_fd_load"))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int userns_obj_priv_prog(int mnt_fd)
+{
+       LIBBPF_OPTS(bpf_object_open_opts, opts);
+       char buf[256];
+       struct priv_prog *skel;
+       int err;
+
+       skel = priv_prog__open_and_load();
+       if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+               priv_prog__destroy(skel);
+               return -EINVAL;
+       }
+
+       /* use bpf_token_path to provide BPF FS path */
+       snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+       opts.bpf_token_path = buf;
+       skel = priv_prog__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+               return -EINVAL;
+
+       err = priv_prog__load(skel);
+       priv_prog__destroy(skel);
+       if (!ASSERT_OK(err, "obj_token_path_load"))
+               return -EINVAL;
+
+       return 0;
+}
+
+/* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command,
+ * which should cause struct_ops application to fail, as BTF won't be uploaded
+ * into the kernel, even if STRUCT_OPS programs themselves are allowed
+ */
+static int validate_struct_ops_load(int mnt_fd, bool expect_success)
+{
+       LIBBPF_OPTS(bpf_object_open_opts, opts);
+       char buf[256];
+       struct dummy_st_ops_success *skel;
+       int err;
+
+       snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+       opts.bpf_token_path = buf;
+       skel = dummy_st_ops_success__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+               return -EINVAL;
+
+       err = dummy_st_ops_success__load(skel);
+       dummy_st_ops_success__destroy(skel);
+       if (expect_success) {
+               if (!ASSERT_OK(err, "obj_token_path_load"))
+                       return -EINVAL;
+       } else /* expect failure */ {
+               if (!ASSERT_ERR(err, "obj_token_path_load"))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int userns_obj_priv_btf_fail(int mnt_fd)
+{
+       return validate_struct_ops_load(mnt_fd, false /* should fail */);
+}
+
+static int userns_obj_priv_btf_success(int mnt_fd)
+{
+       return validate_struct_ops_load(mnt_fd, true /* should succeed */);
+}
+
+#define bit(n) (1ULL << (n))
+
 void test_token(void)
 {
        if (test__start_subtest("map_token")) {
 
                subtest_userns(&opts, userns_prog_load);
        }
+       if (test__start_subtest("obj_priv_map")) {
+               struct bpffs_opts opts = {
+                       .cmds = bit(BPF_MAP_CREATE),
+                       .maps = bit(BPF_MAP_TYPE_QUEUE),
+               };
+
+               subtest_userns(&opts, userns_obj_priv_map);
+       }
+       if (test__start_subtest("obj_priv_prog")) {
+               struct bpffs_opts opts = {
+                       .cmds = bit(BPF_PROG_LOAD),
+                       .progs = bit(BPF_PROG_TYPE_KPROBE),
+                       .attachs = ~0ULL,
+               };
+
+               subtest_userns(&opts, userns_obj_priv_prog);
+       }
+       if (test__start_subtest("obj_priv_btf_fail")) {
+               struct bpffs_opts opts = {
+                       /* disallow BTF loading */
+                       .cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+                       .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+                       .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+                       .attachs = ~0ULL,
+               };
+
+               subtest_userns(&opts, userns_obj_priv_btf_fail);
+       }
+       if (test__start_subtest("obj_priv_btf_success")) {
+               struct bpffs_opts opts = {
+                       /* allow BTF loading */
+                       .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+                       .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+                       .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+                       .attachs = ~0ULL,
+               };
+
+               subtest_userns(&opts, userns_obj_priv_btf_success);
+       }
 }