}
 
 static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
-                        u32 off, int sz, struct btf_field_info *info)
+                        u32 off, int sz, struct btf_field_info *info, u32 field_mask)
 {
        enum btf_field_type type;
        u32 res_id;
                type = BPF_KPTR_REF;
        else if (!strcmp("percpu_kptr", __btf_name_by_offset(btf, t->name_off)))
                type = BPF_KPTR_PERCPU;
+       else if (!strcmp("uptr", __btf_name_by_offset(btf, t->name_off)))
+               type = BPF_UPTR;
        else
                return -EINVAL;
 
+       if (!(type & field_mask))
+               return BTF_FIELD_IGNORE;
+
        /* Get the base type */
        t = btf_type_skip_modifiers(btf, t->type, &res_id);
        /* Only pointer to struct is allowed */
        field_mask_test_name(BPF_REFCOUNT,  "bpf_refcount");
 
        /* Only return BPF_KPTR when all other types with matchable names fail */
-       if (field_mask & BPF_KPTR && !__btf_type_is_struct(var_type)) {
+       if (field_mask & (BPF_KPTR | BPF_UPTR) && !__btf_type_is_struct(var_type)) {
                type = BPF_KPTR_REF;
                goto end;
        }
                case BPF_KPTR_UNREF:
                case BPF_KPTR_REF:
                case BPF_KPTR_PERCPU:
+               case BPF_UPTR:
                case BPF_LIST_HEAD:
                case BPF_RB_ROOT:
                        break;
        case BPF_KPTR_UNREF:
        case BPF_KPTR_REF:
        case BPF_KPTR_PERCPU:
+       case BPF_UPTR:
                ret = btf_find_kptr(btf, var_type, off, sz,
-                                   info_cnt ? &info[0] : &tmp);
+                                   info_cnt ? &info[0] : &tmp, field_mask);
                if (ret < 0)
                        return ret;
                break;
                case BPF_KPTR_UNREF:
                case BPF_KPTR_REF:
                case BPF_KPTR_PERCPU:
+               case BPF_UPTR:
                        ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
                        if (ret < 0)
                                goto end;
         * Hence we only need to ensure that bpf_{list_head,rb_root} ownership
         * does not form cycles.
         */
-       if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & BPF_GRAPH_ROOT))
+       if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & (BPF_GRAPH_ROOT | BPF_UPTR)))
                return 0;
        for (i = 0; i < rec->cnt; i++) {
                struct btf_struct_meta *meta;
+               const struct btf_type *t;
                u32 btf_id;
 
+               if (rec->fields[i].type == BPF_UPTR) {
+                       /* The uptr only supports pinning one page and cannot
+                        * point to a kernel struct
+                        */
+                       if (btf_is_kernel(rec->fields[i].kptr.btf))
+                               return -EINVAL;
+                       t = btf_type_by_id(rec->fields[i].kptr.btf,
+                                          rec->fields[i].kptr.btf_id);
+                       if (!t->size)
+                               return -EINVAL;
+                       if (t->size > PAGE_SIZE)
+                               return -E2BIG;
+                       continue;
+               }
+
                if (!(rec->fields[i].type & BPF_GRAPH_ROOT))
                        continue;
                btf_id = rec->fields[i].graph_root.value_btf_id;
                        goto free_aof;
                }
 
-               ret = btf_find_kptr(btf, t, 0, 0, &tmp);
+               ret = btf_find_kptr(btf, t, 0, 0, &tmp, BPF_KPTR);
                if (ret != BTF_FIELD_FOUND)
                        continue;