struct uncached_list {
        spinlock_t              lock;
        struct list_head        head;
+       struct list_head        quarantine;
 };
 
 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
                struct uncached_list *ul = rt->rt_uncached_list;
 
                spin_lock_bh(&ul->lock);
-               list_del(&rt->rt_uncached);
+               list_del_init(&rt->rt_uncached);
                spin_unlock_bh(&ul->lock);
        }
 }
 
 void rt_flush_dev(struct net_device *dev)
 {
-       struct rtable *rt;
+       struct rtable *rt, *safe;
        int cpu;
 
        for_each_possible_cpu(cpu) {
                struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
 
+               if (list_empty(&ul->head))
+                       continue;
+
                spin_lock_bh(&ul->lock);
-               list_for_each_entry(rt, &ul->head, rt_uncached) {
+               list_for_each_entry_safe(rt, safe, &ul->head, rt_uncached) {
                        if (rt->dst.dev != dev)
                                continue;
                        rt->dst.dev = blackhole_netdev;
                        dev_replace_track(dev, blackhole_netdev,
                                          &rt->dst.dev_tracker,
                                          GFP_ATOMIC);
+                       list_move(&rt->rt_uncached, &ul->quarantine);
                }
                spin_unlock_bh(&ul->lock);
        }
                struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
 
                INIT_LIST_HEAD(&ul->head);
+               INIT_LIST_HEAD(&ul->quarantine);
                spin_lock_init(&ul->lock);
        }
 #ifdef CONFIG_IP_ROUTE_CLASSID