* copy_from_user() call. However, this is not a concern since this function is
  * meant to be a future-proofing of bits.
  */
-int bpf_check_uarg_tail_zero(void __user *uaddr,
+int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
                             size_t expected_size,
                             size_t actual_size)
 {
-       unsigned char __user *addr = uaddr + expected_size;
        int res;
 
        if (unlikely(actual_size > PAGE_SIZE))  /* silly large */
        if (actual_size <= expected_size)
                return 0;
 
-       res = check_zeroed_user(addr, actual_size - expected_size);
+       if (uaddr.is_kernel)
+               res = memchr_inv(uaddr.kernel + expected_size, 0,
+                                actual_size - expected_size) == NULL;
+       else
+               res = check_zeroed_user(uaddr.user + expected_size,
+                                       actual_size - expected_size);
        if (res < 0)
                return res;
        return res ? 0 : -E2BIG;
        return NULL;
 }
 
+static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
+{
+       if (key_size)
+               return memdup_bpfptr(ukey, key_size);
+
+       if (!bpfptr_is_null(ukey))
+               return ERR_PTR(-EINVAL);
+
+       return NULL;
+}
+
 /* last field in 'union bpf_attr' used by this command */
 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
 
 
 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
 
-static int map_update_elem(union bpf_attr *attr)
+static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
 {
-       void __user *ukey = u64_to_user_ptr(attr->key);
-       void __user *uvalue = u64_to_user_ptr(attr->value);
+       bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
+       bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
        int ufd = attr->map_fd;
        struct bpf_map *map;
        void *key, *value;
                goto err_put;
        }
 
-       key = __bpf_copy_key(ukey, map->key_size);
+       key = ___bpf_copy_key(ukey, map->key_size);
        if (IS_ERR(key)) {
                err = PTR_ERR(key);
                goto err_put;
                goto free_key;
 
        err = -EFAULT;
-       if (copy_from_user(value, uvalue, value_size) != 0)
+       if (copy_from_bpfptr(value, uvalue, value_size) != 0)
                goto free_value;
 
        err = bpf_map_update_value(map, f, key, value, attr->flags);
 /* last field in 'union bpf_attr' used by this command */
 #define        BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
 
-static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
+static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
 {
        enum bpf_prog_type type = attr->prog_type;
        struct bpf_prog *prog, *dst_prog = NULL;
                return -EPERM;
 
        /* copy eBPF program license from user space */
-       if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
-                             sizeof(license) - 1) < 0)
+       if (strncpy_from_bpfptr(license,
+                               make_bpfptr(attr->license, uattr.is_kernel),
+                               sizeof(license) - 1) < 0)
                return -EFAULT;
        license[sizeof(license) - 1] = 0;
 
        prog->len = attr->insn_cnt;
 
        err = -EFAULT;
-       if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
-                          bpf_prog_insn_size(prog)) != 0)
+       if (copy_from_bpfptr(prog->insns,
+                            make_bpfptr(attr->insns, uattr.is_kernel),
+                            bpf_prog_insn_size(prog)) != 0)
                goto free_prog_sec;
 
        prog->orig_prog = NULL;
        u32 ulen;
        int err;
 
-       err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
+       err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
        if (err)
                return err;
        info_len = min_t(u32, sizeof(info), info_len);
        u32 info_len = attr->info.info_len;
        int err;
 
-       err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
+       err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
        if (err)
                return err;
        info_len = min_t(u32, sizeof(info), info_len);
        u32 info_len = attr->info.info_len;
        int err;
 
-       err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
+       err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
        if (err)
                return err;
 
        u32 info_len = attr->info.info_len;
        int err;
 
-       err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
+       err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
        if (err)
                return err;
        info_len = min_t(u32, sizeof(info), info_len);
        return err;
 }
 
-static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
+                                  struct bpf_prog *prog)
 {
        if (attr->link_create.attach_type != prog->expected_attach_type)
                return -EINVAL;
 
        if (prog->expected_attach_type == BPF_TRACE_ITER)
-               return bpf_iter_link_attach(attr, prog);
+               return bpf_iter_link_attach(attr, uattr, prog);
        else if (prog->type == BPF_PROG_TYPE_EXT)
                return bpf_tracing_prog_attach(prog,
                                               attr->link_create.target_fd,
 }
 
 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
-static int link_create(union bpf_attr *attr)
+static int link_create(union bpf_attr *attr, bpfptr_t uattr)
 {
        enum bpf_prog_type ptype;
        struct bpf_prog *prog;
                goto out;
 
        if (prog->type == BPF_PROG_TYPE_EXT) {
-               ret = tracing_bpf_link_attach(attr, prog);
+               ret = tracing_bpf_link_attach(attr, uattr, prog);
                goto out;
        }
 
                ret = cgroup_bpf_link_attach(attr, prog);
                break;
        case BPF_PROG_TYPE_TRACING:
-               ret = tracing_bpf_link_attach(attr, prog);
+               ret = tracing_bpf_link_attach(attr, uattr, prog);
                break;
        case BPF_PROG_TYPE_FLOW_DISSECTOR:
        case BPF_PROG_TYPE_SK_LOOKUP:
        return ret;
 }
 
-SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
+static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
 {
        union bpf_attr attr;
        int err;
 
        /* copy attributes from user space, may be less than sizeof(bpf_attr) */
        memset(&attr, 0, sizeof(attr));
-       if (copy_from_user(&attr, uattr, size) != 0)
+       if (copy_from_bpfptr(&attr, uattr, size) != 0)
                return -EFAULT;
 
        err = security_bpf(cmd, &attr, size);
                err = map_lookup_elem(&attr);
                break;
        case BPF_MAP_UPDATE_ELEM:
-               err = map_update_elem(&attr);
+               err = map_update_elem(&attr, uattr);
                break;
        case BPF_MAP_DELETE_ELEM:
                err = map_delete_elem(&attr);
                err = bpf_prog_detach(&attr);
                break;
        case BPF_PROG_QUERY:
-               err = bpf_prog_query(&attr, uattr);
+               err = bpf_prog_query(&attr, uattr.user);
                break;
        case BPF_PROG_TEST_RUN:
-               err = bpf_prog_test_run(&attr, uattr);
+               err = bpf_prog_test_run(&attr, uattr.user);
                break;
        case BPF_PROG_GET_NEXT_ID:
-               err = bpf_obj_get_next_id(&attr, uattr,
+               err = bpf_obj_get_next_id(&attr, uattr.user,
                                          &prog_idr, &prog_idr_lock);
                break;
        case BPF_MAP_GET_NEXT_ID:
-               err = bpf_obj_get_next_id(&attr, uattr,
+               err = bpf_obj_get_next_id(&attr, uattr.user,
                                          &map_idr, &map_idr_lock);
                break;
        case BPF_BTF_GET_NEXT_ID:
-               err = bpf_obj_get_next_id(&attr, uattr,
+               err = bpf_obj_get_next_id(&attr, uattr.user,
                                          &btf_idr, &btf_idr_lock);
                break;
        case BPF_PROG_GET_FD_BY_ID:
                err = bpf_map_get_fd_by_id(&attr);
                break;
        case BPF_OBJ_GET_INFO_BY_FD:
-               err = bpf_obj_get_info_by_fd(&attr, uattr);
+               err = bpf_obj_get_info_by_fd(&attr, uattr.user);
                break;
        case BPF_RAW_TRACEPOINT_OPEN:
                err = bpf_raw_tracepoint_open(&attr);
                err = bpf_btf_get_fd_by_id(&attr);
                break;
        case BPF_TASK_FD_QUERY:
-               err = bpf_task_fd_query(&attr, uattr);
+               err = bpf_task_fd_query(&attr, uattr.user);
                break;
        case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
                err = map_lookup_and_delete_elem(&attr);
                break;
        case BPF_MAP_LOOKUP_BATCH:
-               err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
+               err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
                break;
        case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
-               err = bpf_map_do_batch(&attr, uattr,
+               err = bpf_map_do_batch(&attr, uattr.user,
                                       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
                break;
        case BPF_MAP_UPDATE_BATCH:
-               err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
+               err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
                break;
        case BPF_MAP_DELETE_BATCH:
-               err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
+               err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
                break;
        case BPF_LINK_CREATE:
-               err = link_create(&attr);
+               err = link_create(&attr, uattr);
                break;
        case BPF_LINK_UPDATE:
                err = link_update(&attr);
                err = bpf_link_get_fd_by_id(&attr);
                break;
        case BPF_LINK_GET_NEXT_ID:
-               err = bpf_obj_get_next_id(&attr, uattr,
+               err = bpf_obj_get_next_id(&attr, uattr.user,
                                          &link_idr, &link_idr_lock);
                break;
        case BPF_ENABLE_STATS:
        return err;
 }
 
+SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
+{
+       return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
+}
+
 static bool syscall_prog_is_valid_access(int off, int size,
                                         enum bpf_access_type type,
                                         const struct bpf_prog *prog,
 
 BPF_CALL_3(bpf_sys_bpf, int, cmd, void *, attr, u32, attr_size)
 {
-       return -EINVAL;
+       switch (cmd) {
+       case BPF_MAP_CREATE:
+       case BPF_MAP_UPDATE_ELEM:
+       case BPF_MAP_FREEZE:
+       case BPF_PROG_LOAD:
+               break;
+       /* case BPF_PROG_TEST_RUN:
+        * is not part of this list to prevent recursive test_run
+        */
+       default:
+               return -EINVAL;
+       }
+       return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
 }
 
 const struct bpf_func_proto bpf_sys_bpf_proto = {
 
 
 static int check_btf_func(struct bpf_verifier_env *env,
                          const union bpf_attr *attr,
-                         union bpf_attr __user *uattr)
+                         bpfptr_t uattr)
 {
        const struct btf_type *type, *func_proto, *ret_type;
        u32 i, nfuncs, urec_size, min_size;
        struct bpf_func_info_aux *info_aux = NULL;
        struct bpf_prog *prog;
        const struct btf *btf;
-       void __user *urecord;
+       bpfptr_t urecord;
        u32 prev_offset = 0;
        bool scalar_return;
        int ret = -ENOMEM;
        prog = env->prog;
        btf = prog->aux->btf;
 
-       urecord = u64_to_user_ptr(attr->func_info);
+       urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
        min_size = min_t(u32, krec_size, urec_size);
 
        krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
                                /* set the size kernel expects so loader can zero
                                 * out the rest of the record.
                                 */
-                               if (put_user(min_size, &uattr->func_info_rec_size))
+                               if (copy_to_bpfptr_offset(uattr,
+                                                         offsetof(union bpf_attr, func_info_rec_size),
+                                                         &min_size, sizeof(min_size)))
                                        ret = -EFAULT;
                        }
                        goto err_free;
                }
 
-               if (copy_from_user(&krecord[i], urecord, min_size)) {
+               if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
                        ret = -EFAULT;
                        goto err_free;
                }
                }
 
                prev_offset = krecord[i].insn_off;
-               urecord += urec_size;
+               bpfptr_add(&urecord, urec_size);
        }
 
        prog->aux->func_info = krecord;
 
 static int check_btf_line(struct bpf_verifier_env *env,
                          const union bpf_attr *attr,
-                         union bpf_attr __user *uattr)
+                         bpfptr_t uattr)
 {
        u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
        struct bpf_subprog_info *sub;
        struct bpf_line_info *linfo;
        struct bpf_prog *prog;
        const struct btf *btf;
-       void __user *ulinfo;
+       bpfptr_t ulinfo;
        int err;
 
        nr_linfo = attr->line_info_cnt;
 
        s = 0;
        sub = env->subprog_info;
-       ulinfo = u64_to_user_ptr(attr->line_info);
+       ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
        expected_size = sizeof(struct bpf_line_info);
        ncopy = min_t(u32, expected_size, rec_size);
        for (i = 0; i < nr_linfo; i++) {
                if (err) {
                        if (err == -E2BIG) {
                                verbose(env, "nonzero tailing record in line_info");
-                               if (put_user(expected_size,
-                                            &uattr->line_info_rec_size))
+                               if (copy_to_bpfptr_offset(uattr,
+                                                         offsetof(union bpf_attr, line_info_rec_size),
+                                                         &expected_size, sizeof(expected_size)))
                                        err = -EFAULT;
                        }
                        goto err_free;
                }
 
-               if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
+               if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
                        err = -EFAULT;
                        goto err_free;
                }
                }
 
                prev_offset = linfo[i].insn_off;
-               ulinfo += rec_size;
+               bpfptr_add(&ulinfo, rec_size);
        }
 
        if (s != env->subprog_cnt) {
 
 static int check_btf_info(struct bpf_verifier_env *env,
                          const union bpf_attr *attr,
-                         union bpf_attr __user *uattr)
+                         bpfptr_t uattr)
 {
        struct btf *btf;
        int err;
        return btf_vmlinux;
 }
 
-int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
-             union bpf_attr __user *uattr)
+int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
 {
        u64 start_time = ktime_get_ns();
        struct bpf_verifier_env *env;