#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
 #define XFRM_MAX_QUEUE_LEN     100
 
-static struct dst_entry *xfrm_policy_sk_bundles;
-
 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
                                                __read_mostly;
                                goto no_transform;
                        }
 
-                       dst_hold(&xdst->u.dst);
-
-                       spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-                       xdst->u.dst.next = xfrm_policy_sk_bundles;
-                       xfrm_policy_sk_bundles = &xdst->u.dst;
-                       spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-
                        route = xdst->route;
                }
        }
        return dst;
 }
 
-static void __xfrm_garbage_collect(struct net *net)
-{
-       struct dst_entry *head, *next;
-
-       spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-       head = xfrm_policy_sk_bundles;
-       xfrm_policy_sk_bundles = NULL;
-       spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-
-       while (head) {
-               next = head->next;
-               dst_free(head);
-               head = next;
-       }
-}
-
 void xfrm_garbage_collect(struct net *net)
 {
        flow_cache_flush(net);
-       __xfrm_garbage_collect(net);
 }
 EXPORT_SYMBOL(xfrm_garbage_collect);
 
 static void xfrm_garbage_collect_deferred(struct net *net)
 {
        flow_cache_flush_deferred(net);
-       __xfrm_garbage_collect(net);
 }
 
 static void xfrm_init_pmtu(struct dst_entry *dst)
        /* Initialize the per-net locks here */
        spin_lock_init(&net->xfrm.xfrm_state_lock);
        rwlock_init(&net->xfrm.xfrm_policy_lock);
-       spin_lock_init(&net->xfrm.xfrm_policy_sk_bundle_lock);
        mutex_init(&net->xfrm.xfrm_cfg_mutex);
 
        flow_cache_init(net);