static int nft_rbtree_gc_elem(const struct nft_set *__set,
                              struct nft_rbtree *priv,
-                             struct nft_rbtree_elem *rbe)
+                             struct nft_rbtree_elem *rbe,
+                             u8 genmask)
 {
        struct nft_set *set = (struct nft_set *)__set;
        struct rb_node *prev = rb_prev(&rbe->node);
-       struct nft_rbtree_elem *rbe_prev = NULL;
+       struct nft_rbtree_elem *rbe_prev;
        struct nft_set_gc_batch *gcb;
 
        gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
        if (!gcb)
                return -ENOMEM;
 
-       /* search for expired end interval coming before this element. */
+       /* search for end interval coming before this element.
+        * end intervals don't carry a timeout extension, they
+        * are coupled with the interval start element.
+        */
        while (prev) {
                rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
-               if (nft_rbtree_interval_end(rbe_prev))
+               if (nft_rbtree_interval_end(rbe_prev) &&
+                   nft_set_elem_active(&rbe_prev->ext, genmask))
                        break;
 
                prev = rb_prev(prev);
        }
 
-       if (rbe_prev) {
+       if (prev) {
+               rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+
                rb_erase(&rbe_prev->node, &priv->root);
                atomic_dec(&set->nelems);
+               nft_set_gc_batch_add(gcb, rbe_prev);
        }
 
        rb_erase(&rbe->node, &priv->root);
 
                /* perform garbage collection to avoid bogus overlap reports. */
                if (nft_set_elem_expired(&rbe->ext)) {
-                       err = nft_rbtree_gc_elem(set, priv, rbe);
+                       err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
                        if (err < 0)
                                return err;