.seq_priv_size          = sizeof(struct bpf_iter_seq_hash_map_info),
 };
 
+static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn,
+                                 void *callback_ctx, u64 flags)
+{
+       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+       struct hlist_nulls_head *head;
+       struct hlist_nulls_node *n;
+       struct htab_elem *elem;
+       u32 roundup_key_size;
+       int i, num_elems = 0;
+       void __percpu *pptr;
+       struct bucket *b;
+       void *key, *val;
+       bool is_percpu;
+       u64 ret = 0;
+
+       if (flags != 0)
+               return -EINVAL;
+
+       is_percpu = htab_is_percpu(htab);
+
+       roundup_key_size = round_up(map->key_size, 8);
+       /* disable migration so percpu value prepared here will be the
+        * same as the one seen by the bpf program with bpf_map_lookup_elem().
+        */
+       if (is_percpu)
+               migrate_disable();
+       for (i = 0; i < htab->n_buckets; i++) {
+               b = &htab->buckets[i];
+               rcu_read_lock();
+               head = &b->head;
+               hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
+                       key = elem->key;
+                       if (is_percpu) {
+                               /* current cpu value for percpu map */
+                               pptr = htab_elem_get_ptr(elem, map->key_size);
+                               val = this_cpu_ptr(pptr);
+                       } else {
+                               val = elem->key + roundup_key_size;
+                       }
+                       num_elems++;
+                       ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
+                                       (u64)(long)key, (u64)(long)val,
+                                       (u64)(long)callback_ctx, 0);
+                       /* return value: 0 - continue, 1 - stop and return */
+                       if (ret) {
+                               rcu_read_unlock();
+                               goto out;
+                       }
+               }
+               rcu_read_unlock();
+       }
+out:
+       if (is_percpu)
+               migrate_enable();
+       return num_elems;
+}
+
 static int htab_map_btf_id;
 const struct bpf_map_ops htab_map_ops = {
        .map_meta_equal = bpf_map_meta_equal,
        .map_delete_elem = htab_map_delete_elem,
        .map_gen_lookup = htab_map_gen_lookup,
        .map_seq_show_elem = htab_map_seq_show_elem,
+       .map_set_for_each_callback_args = map_set_for_each_callback_args,
+       .map_for_each_callback = bpf_for_each_hash_elem,
        BATCH_OPS(htab),
        .map_btf_name = "bpf_htab",
        .map_btf_id = &htab_map_btf_id,
        .map_delete_elem = htab_lru_map_delete_elem,
        .map_gen_lookup = htab_lru_map_gen_lookup,
        .map_seq_show_elem = htab_map_seq_show_elem,
+       .map_set_for_each_callback_args = map_set_for_each_callback_args,
+       .map_for_each_callback = bpf_for_each_hash_elem,
        BATCH_OPS(htab_lru),
        .map_btf_name = "bpf_htab",
        .map_btf_id = &htab_lru_map_btf_id,
        .map_update_elem = htab_percpu_map_update_elem,
        .map_delete_elem = htab_map_delete_elem,
        .map_seq_show_elem = htab_percpu_map_seq_show_elem,
+       .map_set_for_each_callback_args = map_set_for_each_callback_args,
+       .map_for_each_callback = bpf_for_each_hash_elem,
        BATCH_OPS(htab_percpu),
        .map_btf_name = "bpf_htab",
        .map_btf_id = &htab_percpu_map_btf_id,
        .map_update_elem = htab_lru_percpu_map_update_elem,
        .map_delete_elem = htab_lru_map_delete_elem,
        .map_seq_show_elem = htab_percpu_map_seq_show_elem,
+       .map_set_for_each_callback_args = map_set_for_each_callback_args,
+       .map_for_each_callback = bpf_for_each_hash_elem,
        BATCH_OPS(htab_lru_percpu),
        .map_btf_name = "bpf_htab",
        .map_btf_id = &htab_lru_percpu_map_btf_id,
 
        return 0;
 }
 
+int map_set_for_each_callback_args(struct bpf_verifier_env *env,
+                                  struct bpf_func_state *caller,
+                                  struct bpf_func_state *callee)
+{
+       /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
+        *      void *callback_ctx, u64 flags);
+        * callback_fn(struct bpf_map *map, void *key, void *value,
+        *      void *callback_ctx);
+        */
+       callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
+
+       callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
+       __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
+       callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
+
+       callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
+       __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
+       callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
+
+       /* pointer to stack or null */
+       callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
+
+       /* unused */
+       __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
+       return 0;
+}
+
 static int set_callee_state(struct bpf_verifier_env *env,
                            struct bpf_func_state *caller,
                            struct bpf_func_state *callee, int insn_idx)