struct btf *btf;
                u32 btf_id;
        } arg_obj_drop;
+       struct {
+               struct btf_field *field;
+       } arg_list_head;
 };
 
 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
 
 enum {
        KF_ARG_DYNPTR_ID,
+       KF_ARG_LIST_HEAD_ID,
+       KF_ARG_LIST_NODE_ID,
 };
 
 BTF_ID_LIST(kf_arg_btf_ids)
 BTF_ID(struct, bpf_dynptr_kern)
+BTF_ID(struct, bpf_list_head)
+BTF_ID(struct, bpf_list_node)
 
-static bool is_kfunc_arg_dynptr(const struct btf *btf,
-                               const struct btf_param *arg)
+static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
+                                   const struct btf_param *arg, int type)
 {
        const struct btf_type *t;
        u32 res_id;
        t = btf_type_skip_modifiers(btf, t->type, &res_id);
        if (!t)
                return false;
-       return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[KF_ARG_DYNPTR_ID]);
+       return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]);
+}
+
+static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
+{
+       return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID);
+}
+
+static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
+{
+       return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID);
+}
+
+static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
+{
+       return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
 }
 
 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
        KF_ARG_PTR_TO_ALLOC_BTF_ID,  /* Allocated object */
        KF_ARG_PTR_TO_KPTR,          /* PTR_TO_KPTR but type specific */
        KF_ARG_PTR_TO_DYNPTR,
+       KF_ARG_PTR_TO_LIST_HEAD,
+       KF_ARG_PTR_TO_LIST_NODE,
        KF_ARG_PTR_TO_BTF_ID,        /* Also covers reg2btf_ids conversions */
        KF_ARG_PTR_TO_MEM,
        KF_ARG_PTR_TO_MEM_SIZE,      /* Size derived from next argument, skip it */
 enum special_kfunc_type {
        KF_bpf_obj_new_impl,
        KF_bpf_obj_drop_impl,
+       KF_bpf_list_push_front,
+       KF_bpf_list_push_back,
+       KF_bpf_list_pop_front,
+       KF_bpf_list_pop_back,
 };
 
 BTF_SET_START(special_kfunc_set)
 BTF_ID(func, bpf_obj_new_impl)
 BTF_ID(func, bpf_obj_drop_impl)
+BTF_ID(func, bpf_list_push_front)
+BTF_ID(func, bpf_list_push_back)
+BTF_ID(func, bpf_list_pop_front)
+BTF_ID(func, bpf_list_pop_back)
 BTF_SET_END(special_kfunc_set)
 
 BTF_ID_LIST(special_kfunc_list)
 BTF_ID(func, bpf_obj_new_impl)
 BTF_ID(func, bpf_obj_drop_impl)
+BTF_ID(func, bpf_list_push_front)
+BTF_ID(func, bpf_list_push_back)
+BTF_ID(func, bpf_list_pop_front)
+BTF_ID(func, bpf_list_pop_back)
 
 static enum kfunc_ptr_arg_type
 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
        if (is_kfunc_arg_dynptr(meta->btf, &args[argno]))
                return KF_ARG_PTR_TO_DYNPTR;
 
+       if (is_kfunc_arg_list_head(meta->btf, &args[argno]))
+               return KF_ARG_PTR_TO_LIST_HEAD;
+
+       if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
+               return KF_ARG_PTR_TO_LIST_NODE;
+
        if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
                if (!btf_type_is_struct(ref_t)) {
                        verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
        return 0;
 }
 
+/* Implementation details:
+ *
+ * Each register points to some region of memory, which we define as an
+ * allocation. Each allocation may embed a bpf_spin_lock which protects any
+ * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same
+ * allocation. The lock and the data it protects are colocated in the same
+ * memory region.
+ *
+ * Hence, everytime a register holds a pointer value pointing to such
+ * allocation, the verifier preserves a unique reg->id for it.
+ *
+ * The verifier remembers the lock 'ptr' and the lock 'id' whenever
+ * bpf_spin_lock is called.
+ *
+ * To enable this, lock state in the verifier captures two values:
+ *     active_lock.ptr = Register's type specific pointer
+ *     active_lock.id  = A unique ID for each register pointer value
+ *
+ * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two
+ * supported register types.
+ *
+ * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
+ * allocated objects is the reg->btf pointer.
+ *
+ * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
+ * can establish the provenance of the map value statically for each distinct
+ * lookup into such maps. They always contain a single map value hence unique
+ * IDs for each pseudo load pessimizes the algorithm and rejects valid programs.
+ *
+ * So, in case of global variables, they use array maps with max_entries = 1,
+ * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point
+ * into the same map value as max_entries is 1, as described above).
+ *
+ * In case of inner map lookups, the inner map pointer has same map_ptr as the
+ * outer map pointer (in verifier context), but each lookup into an inner map
+ * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
+ * maps from the same outer map share the same map_ptr as active_lock.ptr, they
+ * will get different reg->id assigned to each lookup, hence different
+ * active_lock.id.
+ *
+ * In case of allocated objects, active_lock.ptr is the reg->btf, and the
+ * reg->id is a unique ID preserved after the NULL pointer check on the pointer
+ * returned from bpf_obj_new. Each allocation receives a new reg->id.
+ */
+static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+       void *ptr;
+       u32 id;
+
+       switch ((int)reg->type) {
+       case PTR_TO_MAP_VALUE:
+               ptr = reg->map_ptr;
+               break;
+       case PTR_TO_BTF_ID | MEM_ALLOC:
+               ptr = reg->btf;
+               break;
+       default:
+               verbose(env, "verifier internal error: unknown reg type for lock check\n");
+               return -EFAULT;
+       }
+       id = reg->id;
+
+       if (!env->cur_state->active_lock.ptr)
+               return -EINVAL;
+       if (env->cur_state->active_lock.ptr != ptr ||
+           env->cur_state->active_lock.id != id) {
+               verbose(env, "held lock and object are not in the same allocation\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static bool is_bpf_list_api_kfunc(u32 btf_id)
+{
+       return btf_id == special_kfunc_list[KF_bpf_list_push_front] ||
+              btf_id == special_kfunc_list[KF_bpf_list_push_back] ||
+              btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
+              btf_id == special_kfunc_list[KF_bpf_list_pop_back];
+}
+
+static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
+                                          struct bpf_reg_state *reg, u32 regno,
+                                          struct bpf_kfunc_call_arg_meta *meta)
+{
+       struct btf_field *field;
+       struct btf_record *rec;
+       u32 list_head_off;
+
+       if (meta->btf != btf_vmlinux || !is_bpf_list_api_kfunc(meta->func_id)) {
+               verbose(env, "verifier internal error: bpf_list_head argument for unknown kfunc\n");
+               return -EFAULT;
+       }
+
+       if (!tnum_is_const(reg->var_off)) {
+               verbose(env,
+                       "R%d doesn't have constant offset. bpf_list_head has to be at the constant offset\n",
+                       regno);
+               return -EINVAL;
+       }
+
+       rec = reg_btf_record(reg);
+       list_head_off = reg->off + reg->var_off.value;
+       field = btf_record_find(rec, list_head_off, BPF_LIST_HEAD);
+       if (!field) {
+               verbose(env, "bpf_list_head not found at offset=%u\n", list_head_off);
+               return -EINVAL;
+       }
+
+       /* All functions require bpf_list_head to be protected using a bpf_spin_lock */
+       if (check_reg_allocation_locked(env, reg)) {
+               verbose(env, "bpf_spin_lock at off=%d must be held for bpf_list_head\n",
+                       rec->spin_lock_off);
+               return -EINVAL;
+       }
+
+       if (meta->arg_list_head.field) {
+               verbose(env, "verifier internal error: repeating bpf_list_head arg\n");
+               return -EFAULT;
+       }
+       meta->arg_list_head.field = field;
+       return 0;
+}
+
+static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
+                                          struct bpf_reg_state *reg, u32 regno,
+                                          struct bpf_kfunc_call_arg_meta *meta)
+{
+       const struct btf_type *et, *t;
+       struct btf_field *field;
+       struct btf_record *rec;
+       u32 list_node_off;
+
+       if (meta->btf != btf_vmlinux ||
+           (meta->func_id != special_kfunc_list[KF_bpf_list_push_front] &&
+            meta->func_id != special_kfunc_list[KF_bpf_list_push_back])) {
+               verbose(env, "verifier internal error: bpf_list_node argument for unknown kfunc\n");
+               return -EFAULT;
+       }
+
+       if (!tnum_is_const(reg->var_off)) {
+               verbose(env,
+                       "R%d doesn't have constant offset. bpf_list_node has to be at the constant offset\n",
+                       regno);
+               return -EINVAL;
+       }
+
+       rec = reg_btf_record(reg);
+       list_node_off = reg->off + reg->var_off.value;
+       field = btf_record_find(rec, list_node_off, BPF_LIST_NODE);
+       if (!field || field->offset != list_node_off) {
+               verbose(env, "bpf_list_node not found at offset=%u\n", list_node_off);
+               return -EINVAL;
+       }
+
+       field = meta->arg_list_head.field;
+
+       et = btf_type_by_id(field->list_head.btf, field->list_head.value_btf_id);
+       t = btf_type_by_id(reg->btf, reg->btf_id);
+       if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->list_head.btf,
+                                 field->list_head.value_btf_id, true)) {
+               verbose(env, "operation on bpf_list_head expects arg#1 bpf_list_node at offset=%d "
+                       "in struct %s, but arg is at offset=%d in struct %s\n",
+                       field->list_head.node_offset, btf_name_by_offset(field->list_head.btf, et->name_off),
+                       list_node_off, btf_name_by_offset(reg->btf, t->name_off));
+               return -EINVAL;
+       }
+
+       if (list_node_off != field->list_head.node_offset) {
+               verbose(env, "arg#1 offset=%d, but expected bpf_list_node at offset=%d in struct %s\n",
+                       list_node_off, field->list_head.node_offset,
+                       btf_name_by_offset(field->list_head.btf, et->name_off));
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta)
 {
        const char *func_name = meta->func_name, *ref_tname;
                        break;
                case KF_ARG_PTR_TO_KPTR:
                case KF_ARG_PTR_TO_DYNPTR:
+               case KF_ARG_PTR_TO_LIST_HEAD:
+               case KF_ARG_PTR_TO_LIST_NODE:
                case KF_ARG_PTR_TO_MEM:
                case KF_ARG_PTR_TO_MEM_SIZE:
                        /* Trusted by default */
                                return -EINVAL;
                        }
                        break;
+               case KF_ARG_PTR_TO_LIST_HEAD:
+                       if (reg->type != PTR_TO_MAP_VALUE &&
+                           reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
+                               verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
+                               return -EINVAL;
+                       }
+                       if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
+                               verbose(env, "allocated object must be referenced\n");
+                               return -EINVAL;
+                       }
+                       ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta);
+                       if (ret < 0)
+                               return ret;
+                       break;
+               case KF_ARG_PTR_TO_LIST_NODE:
+                       if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
+                               verbose(env, "arg#%d expected pointer to allocated object\n", i);
+                               return -EINVAL;
+                       }
+                       if (!reg->ref_obj_id) {
+                               verbose(env, "allocated object must be referenced\n");
+                               return -EINVAL;
+                       }
+                       ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta);
+                       if (ret < 0)
+                               return ret;
+                       break;
                case KF_ARG_PTR_TO_BTF_ID:
                        /* Only base_type is checked, further checks are done here */
                        if (reg->type != PTR_TO_BTF_ID &&
                                env->insn_aux_data[insn_idx].kptr_struct_meta =
                                        btf_find_struct_meta(meta.arg_obj_drop.btf,
                                                             meta.arg_obj_drop.btf_id);
+                       } else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
+                                  meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
+                               struct btf_field *field = meta.arg_list_head.field;
+
+                               mark_reg_known_zero(env, regs, BPF_REG_0);
+                               regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
+                               regs[BPF_REG_0].btf = field->list_head.btf;
+                               regs[BPF_REG_0].btf_id = field->list_head.value_btf_id;
+                               regs[BPF_REG_0].off = field->list_head.node_offset;
                        } else {
                                verbose(env, "kernel function %s unhandled dynamic return type\n",
                                        meta.func_name);
                                        return -EINVAL;
                                }
 
-                               if (env->cur_state->active_lock.ptr &&
-                                   (insn->src_reg == BPF_PSEUDO_CALL ||
-                                    insn->imm != BPF_FUNC_spin_unlock)) {
-                                       verbose(env, "function calls are not allowed while holding a lock\n");
-                                       return -EINVAL;
+                               if (env->cur_state->active_lock.ptr) {
+                                       if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
+                                           (insn->src_reg == BPF_PSEUDO_CALL) ||
+                                           (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
+                                            (insn->off != 0 || !is_bpf_list_api_kfunc(insn->imm)))) {
+                                               verbose(env, "function calls are not allowed while holding a lock\n");
+                                               return -EINVAL;
+                                       }
                                }
                                if (insn->src_reg == BPF_PSEUDO_CALL)
                                        err = check_func_call(env, insn, &env->insn_idx);