struct in_device;
 extern int             ip_rt_init(void);
-extern void            rt_cache_flush(struct net *net, int how);
+extern void            rt_cache_flush(struct net *net);
 extern void            rt_flush_dev(struct net_device *dev);
 extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
 extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
 
        switch (event) {
        case NETDEV_CHANGEADDR:
                neigh_changeaddr(&arp_tbl, dev);
-               rt_cache_flush(dev_net(dev), 0);
+               rt_cache_flush(dev_net(dev));
                break;
        default:
                break;
 
                if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
                    i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
                        if ((new_value == 0) && (old_value != 0))
-                               rt_cache_flush(net, 0);
+                               rt_cache_flush(net);
        }
 
        return ret;
                                dev_disable_lro(idev->dev);
                        }
                        rtnl_unlock();
-                       rt_cache_flush(net, 0);
+                       rt_cache_flush(net);
                }
        }
 
        struct net *net = ctl->extra2;
 
        if (write && *valp != val)
-               rt_cache_flush(net, 0);
+               rt_cache_flush(net);
 
        return ret;
 }
 
        }
 
        if (flushed)
-               rt_cache_flush(net, -1);
+               rt_cache_flush(net);
 }
 
 /*
        net->ipv4.fibnl = NULL;
 }
 
-static void fib_disable_ip(struct net_device *dev, int force, int delay)
+static void fib_disable_ip(struct net_device *dev, int force)
 {
        if (fib_sync_down_dev(dev, force))
                fib_flush(dev_net(dev));
-       rt_cache_flush(dev_net(dev), delay);
+       rt_cache_flush(dev_net(dev));
        arp_ifdown(dev);
 }
 
                fib_sync_up(dev);
 #endif
                atomic_inc(&net->ipv4.dev_addr_genid);
-               rt_cache_flush(dev_net(dev), -1);
+               rt_cache_flush(dev_net(dev));
                break;
        case NETDEV_DOWN:
                fib_del_ifaddr(ifa, NULL);
                        /* Last address was deleted from this interface.
                         * Disable IP.
                         */
-                       fib_disable_ip(dev, 1, 0);
+                       fib_disable_ip(dev, 1);
                } else {
-                       rt_cache_flush(dev_net(dev), -1);
+                       rt_cache_flush(dev_net(dev));
                }
                break;
        }
        struct net *net = dev_net(dev);
 
        if (event == NETDEV_UNREGISTER) {
-               fib_disable_ip(dev, 2, -1);
+               fib_disable_ip(dev, 2);
                rt_flush_dev(dev);
                return NOTIFY_DONE;
        }
                fib_sync_up(dev);
 #endif
                atomic_inc(&net->ipv4.dev_addr_genid);
-               rt_cache_flush(dev_net(dev), -1);
+               rt_cache_flush(dev_net(dev));
                break;
        case NETDEV_DOWN:
-               fib_disable_ip(dev, 0, 0);
+               fib_disable_ip(dev, 0);
                break;
        case NETDEV_CHANGEMTU:
        case NETDEV_CHANGE:
-               rt_cache_flush(dev_net(dev), 0);
+               rt_cache_flush(dev_net(dev));
                break;
        case NETDEV_UNREGISTER_BATCH:
                break;
 
 
 static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
 {
-       rt_cache_flush(ops->fro_net, -1);
+       rt_cache_flush(ops->fro_net);
 }
 
 static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = {
 
 
                        fib_release_info(fi_drop);
                        if (state & FA_S_ACCESSED)
-                               rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
+                               rt_cache_flush(cfg->fc_nlinfo.nl_net);
                        rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
                                tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
 
        list_add_tail_rcu(&new_fa->fa_list,
                          (fa ? &fa->fa_list : fa_head));
 
-       rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
+       rt_cache_flush(cfg->fc_nlinfo.nl_net);
        rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
                  &cfg->fc_nlinfo, 0);
 succeeded:
                trie_leaf_remove(t, l);
 
        if (fa->fa_state & FA_S_ACCESSED)
-               rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
+               rt_cache_flush(cfg->fc_nlinfo.nl_net);
 
        fib_release_info(fa->fa_info);
        alias_free_mem_rcu(fa);
 
        atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
 }
 
-/*
- * delay < 0  : invalidate cache (fast : entries will be deleted later)
- * delay >= 0 : invalidate & flush cache (can be long)
- */
-void rt_cache_flush(struct net *net, int delay)
+void rt_cache_flush(struct net *net)
 {
        rt_cache_invalidate(net);
 }
 
 void ip_rt_multicast_event(struct in_device *in_dev)
 {
-       rt_cache_flush(dev_net(in_dev->dev), 0);
+       rt_cache_flush(dev_net(in_dev->dev));
 }
 
 #ifdef CONFIG_SYSCTL
                                        size_t *lenp, loff_t *ppos)
 {
        if (write) {
-               int flush_delay;
-               ctl_table ctl;
-               struct net *net;
-
-               memcpy(&ctl, __ctl, sizeof(ctl));
-               ctl.data = &flush_delay;
-               proc_dointvec(&ctl, write, buffer, lenp, ppos);
-
-               net = (struct net *)__ctl->extra1;
-               rt_cache_flush(net, flush_delay);
+               rt_cache_flush((struct net *)__ctl->extra1);
                return 0;
        }