return stack_slot_obj_get_spi(env, reg, "iter", nr_slots);
 }
 
+static int irq_flag_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+       return stack_slot_obj_get_spi(env, reg, "irq_flag", 1);
+}
+
 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
 {
        switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
        return 0;
 }
 
+static int acquire_irq_state(struct bpf_verifier_env *env, int insn_idx);
+static int release_irq_state(struct bpf_verifier_state *state, int id);
+
+static int mark_stack_slot_irq_flag(struct bpf_verifier_env *env,
+                                    struct bpf_kfunc_call_arg_meta *meta,
+                                    struct bpf_reg_state *reg, int insn_idx)
+{
+       struct bpf_func_state *state = func(env, reg);
+       struct bpf_stack_state *slot;
+       struct bpf_reg_state *st;
+       int spi, i, id;
+
+       spi = irq_flag_get_spi(env, reg);
+       if (spi < 0)
+               return spi;
+
+       id = acquire_irq_state(env, insn_idx);
+       if (id < 0)
+               return id;
+
+       slot = &state->stack[spi];
+       st = &slot->spilled_ptr;
+
+       __mark_reg_known_zero(st);
+       st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
+       st->live |= REG_LIVE_WRITTEN;
+       st->ref_obj_id = id;
+
+       for (i = 0; i < BPF_REG_SIZE; i++)
+               slot->slot_type[i] = STACK_IRQ_FLAG;
+
+       mark_stack_slot_scratched(env, spi);
+       return 0;
+}
+
+static int unmark_stack_slot_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+       struct bpf_func_state *state = func(env, reg);
+       struct bpf_stack_state *slot;
+       struct bpf_reg_state *st;
+       int spi, i, err;
+
+       spi = irq_flag_get_spi(env, reg);
+       if (spi < 0)
+               return spi;
+
+       slot = &state->stack[spi];
+       st = &slot->spilled_ptr;
+
+       err = release_irq_state(env->cur_state, st->ref_obj_id);
+       WARN_ON_ONCE(err && err != -EACCES);
+       if (err) {
+               int insn_idx = 0;
+
+               for (int i = 0; i < env->cur_state->acquired_refs; i++) {
+                       if (env->cur_state->refs[i].id == env->cur_state->active_irq_id) {
+                               insn_idx = env->cur_state->refs[i].insn_idx;
+                               break;
+                       }
+               }
+
+               verbose(env, "cannot restore irq state out of order, expected id=%d acquired at insn_idx=%d\n",
+                       env->cur_state->active_irq_id, insn_idx);
+               return err;
+       }
+
+       __mark_reg_not_init(env, st);
+
+       /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
+       st->live |= REG_LIVE_WRITTEN;
+
+       for (i = 0; i < BPF_REG_SIZE; i++)
+               slot->slot_type[i] = STACK_INVALID;
+
+       mark_stack_slot_scratched(env, spi);
+       return 0;
+}
+
+static bool is_irq_flag_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+       struct bpf_func_state *state = func(env, reg);
+       struct bpf_stack_state *slot;
+       int spi, i;
+
+       /* For -ERANGE (i.e. spi not falling into allocated stack slots), we
+        * will do check_mem_access to check and update stack bounds later, so
+        * return true for that case.
+        */
+       spi = irq_flag_get_spi(env, reg);
+       if (spi == -ERANGE)
+               return true;
+       if (spi < 0)
+               return false;
+
+       slot = &state->stack[spi];
+
+       for (i = 0; i < BPF_REG_SIZE; i++)
+               if (slot->slot_type[i] == STACK_IRQ_FLAG)
+                       return false;
+       return true;
+}
+
+static int is_irq_flag_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+       struct bpf_func_state *state = func(env, reg);
+       struct bpf_stack_state *slot;
+       struct bpf_reg_state *st;
+       int spi, i;
+
+       spi = irq_flag_get_spi(env, reg);
+       if (spi < 0)
+               return -EINVAL;
+
+       slot = &state->stack[spi];
+       st = &slot->spilled_ptr;
+
+       if (!st->ref_obj_id)
+               return -EINVAL;
+
+       for (i = 0; i < BPF_REG_SIZE; i++)
+               if (slot->slot_type[i] != STACK_IRQ_FLAG)
+                       return -EINVAL;
+       return 0;
+}
+
 /* Check if given stack slot is "special":
  *   - spilled register state (STACK_SPILL);
  *   - dynptr state (STACK_DYNPTR);
  *   - iter state (STACK_ITER).
+ *   - irq flag state (STACK_IRQ_FLAG)
  */
 static bool is_stack_slot_special(const struct bpf_stack_state *stack)
 {
        case STACK_SPILL:
        case STACK_DYNPTR:
        case STACK_ITER:
+       case STACK_IRQ_FLAG:
                return true;
        case STACK_INVALID:
        case STACK_MISC:
        dst->active_locks = src->active_locks;
        dst->active_preempt_locks = src->active_preempt_locks;
        dst->active_rcu_lock = src->active_rcu_lock;
+       dst->active_irq_id = src->active_irq_id;
        return 0;
 }
 
        return 0;
 }
 
+static int acquire_irq_state(struct bpf_verifier_env *env, int insn_idx)
+{
+       struct bpf_verifier_state *state = env->cur_state;
+       struct bpf_reference_state *s;
+
+       s = acquire_reference_state(env, insn_idx);
+       if (!s)
+               return -ENOMEM;
+       s->type = REF_TYPE_IRQ;
+       s->id = ++env->id_gen;
+
+       state->active_irq_id = s->id;
+       return s->id;
+}
+
 static void release_reference_state(struct bpf_verifier_state *state, int idx)
 {
        int last_idx;
+       size_t rem;
 
+       /* IRQ state requires the relative ordering of elements remaining the
+        * same, since it relies on the refs array to behave as a stack, so that
+        * it can detect out-of-order IRQ restore. Hence use memmove to shift
+        * the array instead of swapping the final element into the deleted idx.
+        */
        last_idx = state->acquired_refs - 1;
+       rem = state->acquired_refs - idx - 1;
        if (last_idx && idx != last_idx)
-               memcpy(&state->refs[idx], &state->refs[last_idx], sizeof(*state->refs));
+               memmove(&state->refs[idx], &state->refs[idx + 1], sizeof(*state->refs) * rem);
        memset(&state->refs[last_idx], 0, sizeof(*state->refs));
        state->acquired_refs--;
        return;
        return -EINVAL;
 }
 
+static int release_irq_state(struct bpf_verifier_state *state, int id)
+{
+       u32 prev_id = 0;
+       int i;
+
+       if (id != state->active_irq_id)
+               return -EACCES;
+
+       for (i = 0; i < state->acquired_refs; i++) {
+               if (state->refs[i].type != REF_TYPE_IRQ)
+                       continue;
+               if (state->refs[i].id == id) {
+                       release_reference_state(state, i);
+                       state->active_irq_id = prev_id;
+                       return 0;
+               } else {
+                       prev_id = state->refs[i].id;
+               }
+       }
+       return -EINVAL;
+}
+
 static struct bpf_reference_state *find_lock_state(struct bpf_verifier_state *state, enum ref_state_type type,
                                                   int id, void *ptr)
 {
        for (i = 0; i < state->acquired_refs; i++) {
                struct bpf_reference_state *s = &state->refs[i];
 
-               if (s->type == REF_TYPE_PTR || s->type != type)
+               if (s->type != type)
                        continue;
 
                if (s->id == id && s->ptr == ptr)
        return mark_stack_slot_obj_read(env, reg, spi, nr_slots);
 }
 
+static int mark_irq_flag_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+       int spi;
+
+       spi = irq_flag_get_spi(env, reg);
+       if (spi < 0)
+               return spi;
+       return mark_stack_slot_obj_read(env, reg, spi, 1);
+}
+
 /* This function is supposed to be used by the following 32-bit optimization
  * code only. It returns TRUE if the source or destination register operates
  * on 64-bit, otherwise return FALSE.
                        return -EINVAL;
                }
 
+               if (env->cur_state->active_irq_id) {
+                       verbose(env, "global function calls are not allowed with IRQs disabled,\n"
+                                    "use static function instead\n");
+                       return -EINVAL;
+               }
+
                if (err) {
                        verbose(env, "Caller passes invalid args into func#%d ('%s')\n",
                                subprog, sub_name);
                return err;
        }
 
+       if (check_lock && env->cur_state->active_irq_id) {
+               verbose(env, "%s cannot be used inside bpf_local_irq_save-ed region\n", prefix);
+               return -EINVAL;
+       }
+
        if (check_lock && env->cur_state->active_rcu_lock) {
                verbose(env, "%s cannot be used inside bpf_rcu_read_lock-ed region\n", prefix);
                return -EINVAL;
                        env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
        }
 
+       if (env->cur_state->active_irq_id) {
+               if (fn->might_sleep) {
+                       verbose(env, "sleepable helper %s#%d in IRQ-disabled region\n",
+                               func_id_name(func_id), func_id);
+                       return -EINVAL;
+               }
+
+               if (in_sleepable(env) && is_storage_get_function(func_id))
+                       env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
+       }
+
        meta.func_id = func_id;
        /* check args */
        for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
        return btf_param_match_suffix(btf, arg, "__str");
 }
 
+static bool is_kfunc_arg_irq_flag(const struct btf *btf, const struct btf_param *arg)
+{
+       return btf_param_match_suffix(btf, arg, "__irq_flag");
+}
+
 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
                                          const struct btf_param *arg,
                                          const char *name)
        KF_ARG_PTR_TO_CONST_STR,
        KF_ARG_PTR_TO_MAP,
        KF_ARG_PTR_TO_WORKQUEUE,
+       KF_ARG_PTR_TO_IRQ_FLAG,
 };
 
 enum special_kfunc_type {
        KF_bpf_iter_css_task_new,
        KF_bpf_session_cookie,
        KF_bpf_get_kmem_cache,
+       KF_bpf_local_irq_save,
+       KF_bpf_local_irq_restore,
 };
 
 BTF_SET_START(special_kfunc_set)
 BTF_ID_UNUSED
 #endif
 BTF_ID(func, bpf_get_kmem_cache)
+BTF_ID(func, bpf_local_irq_save)
+BTF_ID(func, bpf_local_irq_restore)
 
 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
 {
        if (is_kfunc_arg_wq(meta->btf, &args[argno]))
                return KF_ARG_PTR_TO_WORKQUEUE;
 
+       if (is_kfunc_arg_irq_flag(meta->btf, &args[argno]))
+               return KF_ARG_PTR_TO_IRQ_FLAG;
+
        if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
                if (!btf_type_is_struct(ref_t)) {
                        verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
        return 0;
 }
 
+static int process_irq_flag(struct bpf_verifier_env *env, int regno,
+                            struct bpf_kfunc_call_arg_meta *meta)
+{
+       struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
+       bool irq_save;
+       int err;
+
+       if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_save]) {
+               irq_save = true;
+       } else if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_restore]) {
+               irq_save = false;
+       } else {
+               verbose(env, "verifier internal error: unknown irq flags kfunc\n");
+               return -EFAULT;
+       }
+
+       if (irq_save) {
+               if (!is_irq_flag_reg_valid_uninit(env, reg)) {
+                       verbose(env, "expected uninitialized irq flag as arg#%d\n", regno - 1);
+                       return -EINVAL;
+               }
+
+               err = check_mem_access(env, env->insn_idx, regno, 0, BPF_DW, BPF_WRITE, -1, false, false);
+               if (err)
+                       return err;
+
+               err = mark_stack_slot_irq_flag(env, meta, reg, env->insn_idx);
+               if (err)
+                       return err;
+       } else {
+               err = is_irq_flag_reg_valid_init(env, reg);
+               if (err) {
+                       verbose(env, "expected an initialized irq flag as arg#%d\n", regno - 1);
+                       return err;
+               }
+
+               err = mark_irq_flag_read(env, reg);
+               if (err)
+                       return err;
+
+               err = unmark_stack_slot_irq_flag(env, reg);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+
 static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
 {
        struct btf_record *rec = reg_btf_record(reg);
                case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
                case KF_ARG_PTR_TO_CONST_STR:
                case KF_ARG_PTR_TO_WORKQUEUE:
+               case KF_ARG_PTR_TO_IRQ_FLAG:
                        break;
                default:
                        WARN_ON_ONCE(1);
                        if (ret < 0)
                                return ret;
                        break;
+               case KF_ARG_PTR_TO_IRQ_FLAG:
+                       if (reg->type != PTR_TO_STACK) {
+                               verbose(env, "arg#%d doesn't point to an irq flag on stack\n", i);
+                               return -EINVAL;
+                       }
+                       ret = process_irq_flag(env, regno, meta);
+                       if (ret < 0)
+                               return ret;
+                       break;
                }
        }
 
                return -EINVAL;
        }
 
+       if (env->cur_state->active_irq_id && sleepable) {
+               verbose(env, "kernel func %s is sleepable within IRQ-disabled region\n", func_name);
+               return -EACCES;
+       }
+
        /* In case of release function, we get register number of refcounted
         * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
         */
                            !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
                                return false;
                        break;
+               case STACK_IRQ_FLAG:
+                       old_reg = &old->stack[spi].spilled_ptr;
+                       cur_reg = &cur->stack[spi].spilled_ptr;
+                       if (!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
+                               return false;
+                       break;
                case STACK_MISC:
                case STACK_ZERO:
                case STACK_INVALID:
        if (old->active_rcu_lock != cur->active_rcu_lock)
                return false;
 
+       if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap))
+               return false;
+
        for (i = 0; i < old->acquired_refs; i++) {
                if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
                    old->refs[i].type != cur->refs[i].type)
                        return false;
                switch (old->refs[i].type) {
                case REF_TYPE_PTR:
+               case REF_TYPE_IRQ:
                        break;
                case REF_TYPE_LOCK:
                        if (old->refs[i].ptr != cur->refs[i].ptr)