/* List elements */
        int (*list)(const struct ip_set *set, struct sk_buff *skb,
                    struct netlink_callback *cb);
+       /* Keep listing private when resizing runs parallel */
+       void (*uref)(struct ip_set *set, struct netlink_callback *cb,
+                    bool start);
 
        /* Return true if "b" set is the same as "a"
         * according to the create set parameters */
 
 /* Netlink CB args */
 enum {
-       IPSET_CB_NET = 0,
-       IPSET_CB_DUMP,
-       IPSET_CB_INDEX,
-       IPSET_CB_ARG0,
+       IPSET_CB_NET = 0,       /* net namespace */
+       IPSET_CB_DUMP,          /* dump single set/all sets */
+       IPSET_CB_INDEX,         /* set index */
+       IPSET_CB_PRIVATE,       /* set private data */
+       IPSET_CB_ARG0,          /* type specific */
        IPSET_CB_ARG1,
-       IPSET_CB_ARG2,
 };
 
 /* register and unregister set references */
 
 static int
 ip_set_dump_done(struct netlink_callback *cb)
 {
-       struct ip_set_net *inst = (struct ip_set_net *)cb->args[IPSET_CB_NET];
        if (cb->args[IPSET_CB_ARG0]) {
-               pr_debug("release set %s\n",
-                        ip_set(inst, cb->args[IPSET_CB_INDEX])->name);
-               __ip_set_put_byindex(inst,
-                       (ip_set_id_t) cb->args[IPSET_CB_INDEX]);
+               struct ip_set_net *inst =
+                       (struct ip_set_net *)cb->args[IPSET_CB_NET];
+               ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
+               struct ip_set *set = ip_set(inst, index);
+
+               if (set->variant->uref)
+                       set->variant->uref(set, cb, false);
+               pr_debug("release set %s\n", set->name);
+               __ip_set_put_byindex(inst, index);
        }
        return 0;
 }
        nla_parse(cda, IPSET_ATTR_CMD_MAX,
                  attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
 
-       /* cb->args[IPSET_CB_NET]:      net namespace
-        *         [IPSET_CB_DUMP]:     dump single set/all sets
-        *         [IPSET_CB_INDEX]:    set index
-        *         [IPSET_CB_ARG0]:     type specific
-        */
-
        if (cda[IPSET_ATTR_SETNAME]) {
                struct ip_set *set;
 
                                goto release_refcount;
                        if (dump_flags & IPSET_FLAG_LIST_HEADER)
                                goto next_set;
+                       if (set->variant->uref)
+                               set->variant->uref(set, cb, true);
                        /* Fall through and add elements */
                default:
                        read_lock_bh(&set->lock);
                dump_type = DUMP_LAST;
                cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16);
                cb->args[IPSET_CB_INDEX] = 0;
+               if (set && set->variant->uref)
+                       set->variant->uref(set, cb, false);
                goto dump_last;
        }
        goto out;
 release_refcount:
        /* If there was an error or set is done, release set */
        if (ret || !cb->args[IPSET_CB_ARG0]) {
-               pr_debug("release set %s\n", ip_set(inst, index)->name);
+               set = ip_set(inst, index);
+               if (set->variant->uref)
+                       set->variant->uref(set, cb, false);
+               pr_debug("release set %s\n", set->name);
                __ip_set_put_byindex(inst, index);
                cb->args[IPSET_CB_ARG0] = 0;
        }
 
 
 /* The hash table: the table size stored here in order to make resizing easy */
 struct htable {
+       atomic_t ref;           /* References for resizing */
+       atomic_t uref;          /* References for dumping */
        u8 htable_bits;         /* size of hash table == 2^htable_bits */
        struct hbucket bucket[0]; /* hashtable buckets */
 };
 #undef mtype_del
 #undef mtype_test_cidrs
 #undef mtype_test
+#undef mtype_uref
 #undef mtype_expire
 #undef mtype_resize
 #undef mtype_head
 #define mtype_del              IPSET_TOKEN(MTYPE, _del)
 #define mtype_test_cidrs       IPSET_TOKEN(MTYPE, _test_cidrs)
 #define mtype_test             IPSET_TOKEN(MTYPE, _test)
+#define mtype_uref             IPSET_TOKEN(MTYPE, _uref)
 #define mtype_expire           IPSET_TOKEN(MTYPE, _expire)
 #define mtype_resize           IPSET_TOKEN(MTYPE, _resize)
 #define mtype_head             IPSET_TOKEN(MTYPE, _head)
        t->htable_bits = htable_bits;
 
        read_lock_bh(&set->lock);
+       /* There can't be another parallel resizing, but dumping is possible */
+       atomic_set(&orig->ref, 1);
+       atomic_inc(&orig->uref);
        for (i = 0; i < jhash_size(orig->htable_bits); i++) {
                n = hbucket(orig, i);
                for (j = 0; j < n->pos; j++) {
 #ifdef IP_SET_HASH_WITH_NETS
                                mtype_data_reset_flags(data, &flags);
 #endif
+                               atomic_set(&orig->ref, 0);
+                               atomic_dec(&orig->uref);
                                read_unlock_bh(&set->lock);
                                mtype_ahash_destroy(set, t, false);
                                if (ret == -EAGAIN)
 
        pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
                 orig->htable_bits, orig, t->htable_bits, t);
-       mtype_ahash_destroy(set, orig, false);
+       /* If there's nobody else dumping the table, destroy it */
+       if (atomic_dec_and_test(&orig->uref)) {
+               pr_debug("Table destroy by resize %p\n", orig);
+               mtype_ahash_destroy(set, orig, false);
+       }
 
        return 0;
 }
        return -EMSGSIZE;
 }
 
+/* Make possible to run dumping parallel with resizing */
+static void
+mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
+{
+       struct htype *h = set->data;
+       struct htable *t;
+
+       if (start) {
+               rcu_read_lock_bh();
+               t = rcu_dereference_bh_nfnl(h->table);
+               atomic_inc(&t->uref);
+               cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
+               rcu_read_unlock_bh();
+       } else if (cb->args[IPSET_CB_PRIVATE]) {
+               t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
+               if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
+                       /* Resizing didn't destroy the hash table */
+                       pr_debug("Table destroy by dump: %p\n", t);
+                       mtype_ahash_destroy(set, t, false);
+               }
+               cb->args[IPSET_CB_PRIVATE] = 0;
+       }
+}
+
 /* Reply a LIST/SAVE request: dump the elements of the specified set */
 static int
 mtype_list(const struct ip_set *set,
           struct sk_buff *skb, struct netlink_callback *cb)
 {
-       const struct htype *h = set->data;
-       const struct htable *t = rcu_dereference_bh_nfnl(h->table);
+       const struct htable *t;
        struct nlattr *atd, *nested;
        const struct hbucket *n;
        const struct mtype_elem *e;
        if (!atd)
                return -EMSGSIZE;
        pr_debug("list hash set %s\n", set->name);
+       t = (const struct htable *)cb->args[IPSET_CB_PRIVATE];
        for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
             cb->args[IPSET_CB_ARG0]++) {
                incomplete = skb_tail_pointer(skb);
        .flush  = mtype_flush,
        .head   = mtype_head,
        .list   = mtype_list,
+       .uref   = mtype_uref,
        .resize = mtype_resize,
        .same_set = mtype_same_set,
 };