static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
 
-/*
- *     The ICMP socket(s). This is the most convenient way to flow control
- *     our ICMP output as well as maintain a clean interface throughout
- *     all layers. All Socketless IP sends will soon be gone.
- *
- *     On SMP we have one ICMP socket per-cpu.
- */
-static struct sock *icmp_sk(struct net *net)
-{
-       return this_cpu_read(*net->ipv4.icmp_sk);
-}
+static DEFINE_PER_CPU(struct sock *, ipv4_icmp_sk);
 
 /* Called with BH disabled */
 static inline struct sock *icmp_xmit_lock(struct net *net)
 {
        struct sock *sk;
 
-       sk = icmp_sk(net);
+       sk = this_cpu_read(ipv4_icmp_sk);
 
        if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
                /* This can happen if the output path signals a
                 */
                return NULL;
        }
+       sock_net_set(sk, net);
        return sk;
 }
 
 static inline void icmp_xmit_unlock(struct sock *sk)
 {
+       sock_net_set(sk, &init_net);
        spin_unlock(&sk->sk_lock.slock);
 }
 
        return 0;
 }
 
-static void icmp_push_reply(struct icmp_bxm *icmp_param,
+static void icmp_push_reply(struct sock *sk,
+                           struct icmp_bxm *icmp_param,
                            struct flowi4 *fl4,
                            struct ipcm_cookie *ipc, struct rtable **rt)
 {
-       struct sock *sk;
        struct sk_buff *skb;
 
-       sk = icmp_sk(dev_net((*rt)->dst.dev));
        if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
                           icmp_param->data_len+icmp_param->head_len,
                           icmp_param->head_len,
        if (IS_ERR(rt))
                goto out_unlock;
        if (icmpv4_xrlim_allow(net, rt, &fl4, type, code))
-               icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
+               icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt);
        ip_rt_put(rt);
 out_unlock:
        icmp_xmit_unlock(sk);
        if (!fl4.saddr)
                fl4.saddr = htonl(INADDR_DUMMY);
 
-       icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
+       icmp_push_reply(sk, &icmp_param, &fl4, &ipc, &rt);
 ende:
        ip_rt_put(rt);
 out_unlock:
        },
 };
 
-static void __net_exit icmp_sk_exit(struct net *net)
-{
-       int i;
-
-       for_each_possible_cpu(i)
-               inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
-       free_percpu(net->ipv4.icmp_sk);
-       net->ipv4.icmp_sk = NULL;
-}
-
 static int __net_init icmp_sk_init(struct net *net)
 {
-       int i, err;
-
-       net->ipv4.icmp_sk = alloc_percpu(struct sock *);
-       if (!net->ipv4.icmp_sk)
-               return -ENOMEM;
-
-       for_each_possible_cpu(i) {
-               struct sock *sk;
-
-               err = inet_ctl_sock_create(&sk, PF_INET,
-                                          SOCK_RAW, IPPROTO_ICMP, net);
-               if (err < 0)
-                       goto fail;
-
-               *per_cpu_ptr(net->ipv4.icmp_sk, i) = sk;
-
-               /* Enough space for 2 64K ICMP packets, including
-                * sk_buff/skb_shared_info struct overhead.
-                */
-               sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
-
-               /*
-                * Speedup sock_wfree()
-                */
-               sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
-               inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
-       }
-
        /* Control parameters for ECHO replies. */
        net->ipv4.sysctl_icmp_echo_ignore_all = 0;
        net->ipv4.sysctl_icmp_echo_enable_probe = 0;
        net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
 
        return 0;
-
-fail:
-       icmp_sk_exit(net);
-       return err;
 }
 
 static struct pernet_operations __net_initdata icmp_sk_ops = {
        .init = icmp_sk_init,
-       .exit = icmp_sk_exit,
 };
 
 int __init icmp_init(void)
 {
+       int err, i;
+
+       for_each_possible_cpu(i) {
+               struct sock *sk;
+
+               err = inet_ctl_sock_create(&sk, PF_INET,
+                                          SOCK_RAW, IPPROTO_ICMP, &init_net);
+               if (err < 0)
+                       return err;
+
+               per_cpu(ipv4_icmp_sk, i) = sk;
+
+               /* Enough space for 2 64K ICMP packets, including
+                * sk_buff/skb_shared_info struct overhead.
+                */
+               sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
+
+               /*
+                * Speedup sock_wfree()
+                */
+               sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+               inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
+       }
        return register_pernet_subsys(&icmp_sk_ops);
 }