}
 
 static bool is_sync_callback_calling_kfunc(u32 btf_id);
+static bool is_async_callback_calling_kfunc(u32 btf_id);
+static bool is_callback_calling_kfunc(u32 btf_id);
 static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
 
+static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id);
+
 static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
 {
        return func_id == BPF_FUNC_for_each_map_elem ||
 
 static bool is_async_callback_calling_insn(struct bpf_insn *insn)
 {
-       return bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm);
+       return (bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm)) ||
+              (bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm));
 }
 
 static bool is_may_goto_insn(struct bpf_insn *insn)
        }
        dst_state->speculative = src->speculative;
        dst_state->active_rcu_lock = src->active_rcu_lock;
+       dst_state->in_sleepable = src->in_sleepable;
        dst_state->curframe = src->curframe;
        dst_state->active_lock.ptr = src->active_lock.ptr;
        dst_state->active_lock.id = src->active_lock.id;
 /* Similar to push_stack(), but for async callbacks */
 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
                                                int insn_idx, int prev_insn_idx,
-                                               int subprog)
+                                               int subprog, bool is_sleepable)
 {
        struct bpf_verifier_stack_elem *elem;
        struct bpf_func_state *frame;
         * Initialize it similar to do_check_common().
         */
        elem->st.branches = 1;
+       elem->st.in_sleepable = is_sleepable;
        frame = kzalloc(sizeof(*frame), GFP_KERNEL);
        if (!frame)
                goto err;
 
 static bool in_sleepable(struct bpf_verifier_env *env)
 {
-       return env->prog->sleepable;
+       return env->prog->sleepable ||
+              (env->cur_state && env->cur_state->in_sleepable);
 }
 
 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
         */
        env->subprog_info[subprog].is_cb = true;
        if (bpf_pseudo_kfunc_call(insn) &&
-           !is_sync_callback_calling_kfunc(insn->imm)) {
+           !is_callback_calling_kfunc(insn->imm)) {
                verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
                        func_id_name(insn->imm), insn->imm);
                return -EFAULT;
        if (is_async_callback_calling_insn(insn)) {
                struct bpf_verifier_state *async_cb;
 
-               /* there is no real recursion here. timer callbacks are async */
+               /* there is no real recursion here. timer and workqueue callbacks are async */
                env->subprog_info[subprog].is_async_cb = true;
                async_cb = push_async_cb(env, env->subprog_info[subprog].start,
-                                        insn_idx, subprog);
+                                        insn_idx, subprog,
+                                        is_bpf_wq_set_callback_impl_kfunc(insn->imm));
                if (!async_cb)
                        return -EFAULT;
                callee = async_cb->frame[0];
        KF_bpf_percpu_obj_new_impl,
        KF_bpf_percpu_obj_drop_impl,
        KF_bpf_throw,
+       KF_bpf_wq_set_callback_impl,
        KF_bpf_iter_css_task_new,
 };
 
 BTF_ID(func, bpf_percpu_obj_new_impl)
 BTF_ID(func, bpf_percpu_obj_drop_impl)
 BTF_ID(func, bpf_throw)
+BTF_ID(func, bpf_wq_set_callback_impl)
 #ifdef CONFIG_CGROUPS
 BTF_ID(func, bpf_iter_css_task_new)
 #endif
 BTF_ID(func, bpf_percpu_obj_new_impl)
 BTF_ID(func, bpf_percpu_obj_drop_impl)
 BTF_ID(func, bpf_throw)
+BTF_ID(func, bpf_wq_set_callback_impl)
 #ifdef CONFIG_CGROUPS
 BTF_ID(func, bpf_iter_css_task_new)
 #else
        return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
 }
 
+static bool is_async_callback_calling_kfunc(u32 btf_id)
+{
+       return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
+}
+
 static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
 {
        return bpf_pseudo_kfunc_call(insn) && insn->off == 0 &&
               insn->imm == special_kfunc_list[KF_bpf_throw];
 }
 
+static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id)
+{
+       return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
+}
+
+static bool is_callback_calling_kfunc(u32 btf_id)
+{
+       return is_sync_callback_calling_kfunc(btf_id) ||
+              is_async_callback_calling_kfunc(btf_id);
+}
+
 static bool is_rbtree_lock_required_kfunc(u32 btf_id)
 {
        return is_bpf_rbtree_api_kfunc(btf_id);
                }
        }
 
+       if (is_bpf_wq_set_callback_impl_kfunc(meta.func_id)) {
+               err = push_callback_call(env, insn, insn_idx, meta.subprogno,
+                                        set_timer_callback_state);
+               if (err) {
+                       verbose(env, "kfunc %s#%d failed callback verification\n",
+                               func_name, meta.func_id);
+                       return err;
+               }
+       }
+
        rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
        rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
 
        if (old->active_rcu_lock != cur->active_rcu_lock)
                return false;
 
+       if (old->in_sleepable != cur->in_sleepable)
+               return false;
+
        /* for states to be equal callsites have to be the same
         * and all frame states need to be equivalent
         */
                   desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
                insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
                *cnt = 1;
+       } else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) {
+               struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux) };
+
+               insn_buf[0] = ld_addrs[0];
+               insn_buf[1] = ld_addrs[1];
+               insn_buf[2] = *insn;
+               *cnt = 3;
        }
        return 0;
 }