true, false);
 }
 
+struct bpf_iter_seq_hash_map_info {
+       struct bpf_map *map;
+       struct bpf_htab *htab;
+       void *percpu_value_buf; // non-zero means percpu hash
+       unsigned long flags;
+       u32 bucket_id;
+       u32 skip_elems;
+};
+
+static struct htab_elem *
+bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
+                          struct htab_elem *prev_elem)
+{
+       const struct bpf_htab *htab = info->htab;
+       unsigned long flags = info->flags;
+       u32 skip_elems = info->skip_elems;
+       u32 bucket_id = info->bucket_id;
+       struct hlist_nulls_head *head;
+       struct hlist_nulls_node *n;
+       struct htab_elem *elem;
+       struct bucket *b;
+       u32 i, count;
+
+       if (bucket_id >= htab->n_buckets)
+               return NULL;
+
+       /* try to find next elem in the same bucket */
+       if (prev_elem) {
+               /* no update/deletion on this bucket, prev_elem should be still valid
+                * and we won't skip elements.
+                */
+               n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
+               elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
+               if (elem)
+                       return elem;
+
+               /* not found, unlock and go to the next bucket */
+               b = &htab->buckets[bucket_id++];
+               htab_unlock_bucket(htab, b, flags);
+               skip_elems = 0;
+       }
+
+       for (i = bucket_id; i < htab->n_buckets; i++) {
+               b = &htab->buckets[i];
+               flags = htab_lock_bucket(htab, b);
+
+               count = 0;
+               head = &b->head;
+               hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
+                       if (count >= skip_elems) {
+                               info->flags = flags;
+                               info->bucket_id = i;
+                               info->skip_elems = count;
+                               return elem;
+                       }
+                       count++;
+               }
+
+               htab_unlock_bucket(htab, b, flags);
+               skip_elems = 0;
+       }
+
+       info->bucket_id = i;
+       info->skip_elems = 0;
+       return NULL;
+}
+
+static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       struct bpf_iter_seq_hash_map_info *info = seq->private;
+       struct htab_elem *elem;
+
+       elem = bpf_hash_map_seq_find_next(info, NULL);
+       if (!elem)
+               return NULL;
+
+       if (*pos == 0)
+               ++*pos;
+       return elem;
+}
+
+static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct bpf_iter_seq_hash_map_info *info = seq->private;
+
+       ++*pos;
+       ++info->skip_elems;
+       return bpf_hash_map_seq_find_next(info, v);
+}
+
+static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
+{
+       struct bpf_iter_seq_hash_map_info *info = seq->private;
+       u32 roundup_key_size, roundup_value_size;
+       struct bpf_iter__bpf_map_elem ctx = {};
+       struct bpf_map *map = info->map;
+       struct bpf_iter_meta meta;
+       int ret = 0, off = 0, cpu;
+       struct bpf_prog *prog;
+       void __percpu *pptr;
+
+       meta.seq = seq;
+       prog = bpf_iter_get_info(&meta, elem == NULL);
+       if (prog) {
+               ctx.meta = &meta;
+               ctx.map = info->map;
+               if (elem) {
+                       roundup_key_size = round_up(map->key_size, 8);
+                       ctx.key = elem->key;
+                       if (!info->percpu_value_buf) {
+                               ctx.value = elem->key + roundup_key_size;
+                       } else {
+                               roundup_value_size = round_up(map->value_size, 8);
+                               pptr = htab_elem_get_ptr(elem, map->key_size);
+                               for_each_possible_cpu(cpu) {
+                                       bpf_long_memcpy(info->percpu_value_buf + off,
+                                                       per_cpu_ptr(pptr, cpu),
+                                                       roundup_value_size);
+                                       off += roundup_value_size;
+                               }
+                               ctx.value = info->percpu_value_buf;
+                       }
+               }
+               ret = bpf_iter_run_prog(prog, &ctx);
+       }
+
+       return ret;
+}
+
+static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
+{
+       return __bpf_hash_map_seq_show(seq, v);
+}
+
+static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
+{
+       struct bpf_iter_seq_hash_map_info *info = seq->private;
+
+       if (!v)
+               (void)__bpf_hash_map_seq_show(seq, NULL);
+       else
+               htab_unlock_bucket(info->htab,
+                                  &info->htab->buckets[info->bucket_id],
+                                  info->flags);
+}
+
+static int bpf_iter_init_hash_map(void *priv_data,
+                                 struct bpf_iter_aux_info *aux)
+{
+       struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
+       struct bpf_map *map = aux->map;
+       void *value_buf;
+       u32 buf_size;
+
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+           map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+               buf_size = round_up(map->value_size, 8) * num_possible_cpus();
+               value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
+               if (!value_buf)
+                       return -ENOMEM;
+
+               seq_info->percpu_value_buf = value_buf;
+       }
+
+       seq_info->map = map;
+       seq_info->htab = container_of(map, struct bpf_htab, map);
+       return 0;
+}
+
+static void bpf_iter_fini_hash_map(void *priv_data)
+{
+       struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
+
+       kfree(seq_info->percpu_value_buf);
+}
+
+static const struct seq_operations bpf_hash_map_seq_ops = {
+       .start  = bpf_hash_map_seq_start,
+       .next   = bpf_hash_map_seq_next,
+       .stop   = bpf_hash_map_seq_stop,
+       .show   = bpf_hash_map_seq_show,
+};
+
+static const struct bpf_iter_seq_info iter_seq_info = {
+       .seq_ops                = &bpf_hash_map_seq_ops,
+       .init_seq_private       = bpf_iter_init_hash_map,
+       .fini_seq_private       = bpf_iter_fini_hash_map,
+       .seq_priv_size          = sizeof(struct bpf_iter_seq_hash_map_info),
+};
+
 static int htab_map_btf_id;
 const struct bpf_map_ops htab_map_ops = {
        .map_alloc_check = htab_map_alloc_check,
        BATCH_OPS(htab),
        .map_btf_name = "bpf_htab",
        .map_btf_id = &htab_map_btf_id,
+       .iter_seq_info = &iter_seq_info,
 };
 
 static int htab_lru_map_btf_id;
        BATCH_OPS(htab_lru),
        .map_btf_name = "bpf_htab",
        .map_btf_id = &htab_lru_map_btf_id,
+       .iter_seq_info = &iter_seq_info,
 };
 
 /* Called from eBPF program */
        BATCH_OPS(htab_percpu),
        .map_btf_name = "bpf_htab",
        .map_btf_id = &htab_percpu_map_btf_id,
+       .iter_seq_info = &iter_seq_info,
 };
 
 static int htab_lru_percpu_map_btf_id;
        BATCH_OPS(htab_lru_percpu),
        .map_btf_name = "bpf_htab",
        .map_btf_id = &htab_lru_percpu_map_btf_id,
+       .iter_seq_info = &iter_seq_info,
 };
 
 static int fd_htab_map_alloc_check(union bpf_attr *attr)