struct hlist_head       *fib_table_hash;
        struct sock             *fibnl;
 
-       struct sock             **icmp_sk;
+       struct sock  * __percpu *icmp_sk;
+
        struct inet_peer_base   *peers;
        struct tcpm_hash_bucket *tcp_metrics_hash;
        unsigned int            tcp_metrics_hash_log;
 
  */
 static struct sock *icmp_sk(struct net *net)
 {
-       return net->ipv4.icmp_sk[smp_processor_id()];
+       return *this_cpu_ptr(net->ipv4.icmp_sk);
 }
 
 static inline struct sock *icmp_xmit_lock(struct net *net)
        int i;
 
        for_each_possible_cpu(i)
-               inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
-       kfree(net->ipv4.icmp_sk);
+               inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
+       free_percpu(net->ipv4.icmp_sk);
        net->ipv4.icmp_sk = NULL;
 }
 
 {
        int i, err;
 
-       net->ipv4.icmp_sk =
-               kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
-       if (net->ipv4.icmp_sk == NULL)
+       net->ipv4.icmp_sk = alloc_percpu(struct sock *);
+       if (!net->ipv4.icmp_sk)
                return -ENOMEM;
 
        for_each_possible_cpu(i) {
                if (err < 0)
                        goto fail;
 
-               net->ipv4.icmp_sk[i] = sk;
+               *per_cpu_ptr(net->ipv4.icmp_sk, i) = sk;
 
                /* Enough space for 2 64K ICMP packets, including
                 * sk_buff/skb_shared_info struct overhead.
 
 fail:
        for_each_possible_cpu(i)
-               inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
-       kfree(net->ipv4.icmp_sk);
+               inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i));
+       free_percpu(net->ipv4.icmp_sk);
        return err;
 }