DEFINE_SPINLOCK(nf_conntrack_lock);
 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
 
-/* nf_conntrack_standalone needs this */
-atomic_t nf_conntrack_count = ATOMIC_INIT(0);
-EXPORT_SYMBOL_GPL(nf_conntrack_count);
-
 unsigned int nf_conntrack_htable_size __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
 
        }
 
        /* We don't want any race condition at early drop stage */
-       atomic_inc(&nf_conntrack_count);
+       atomic_inc(&net->ct.count);
 
        if (nf_conntrack_max &&
-           unlikely(atomic_read(&nf_conntrack_count) > nf_conntrack_max)) {
+           unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
                unsigned int hash = hash_conntrack(orig);
                if (!early_drop(hash)) {
-                       atomic_dec(&nf_conntrack_count);
+                       atomic_dec(&net->ct.count);
                        if (net_ratelimit())
                                printk(KERN_WARNING
                                       "nf_conntrack: table full, dropping"
        ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp);
        if (ct == NULL) {
                pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
-               atomic_dec(&nf_conntrack_count);
+               atomic_dec(&net->ct.count);
                return ERR_PTR(-ENOMEM);
        }
 
 static void nf_conntrack_free_rcu(struct rcu_head *head)
 {
        struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
+       struct net *net = nf_ct_net(ct);
 
        nf_ct_ext_free(ct);
        kmem_cache_free(nf_conntrack_cachep, ct);
-       atomic_dec(&nf_conntrack_count);
+       atomic_dec(&net->ct.count);
 }
 
 void nf_conntrack_free(struct nf_conn *ct)
        nf_ct_event_cache_flush();
  i_see_dead_people:
        nf_conntrack_flush();
-       if (atomic_read(&nf_conntrack_count) != 0) {
+       if (atomic_read(&net->ct.count) != 0) {
                schedule();
                goto i_see_dead_people;
        }
                 * entries. */
                max_factor = 4;
        }
+       atomic_set(&net->ct.count, 0);
        nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
                                                  &nf_conntrack_vmalloc);
        if (!nf_conntrack_hash) {
 
 
 static int ct_cpu_seq_show(struct seq_file *seq, void *v)
 {
-       unsigned int nr_conntracks = atomic_read(&nf_conntrack_count);
+       unsigned int nr_conntracks = atomic_read(&init_net.ct.count);
        const struct ip_conntrack_stat *st = v;
 
        if (v == SEQ_START_TOKEN) {
        {
                .ctl_name       = NET_NF_CONNTRACK_COUNT,
                .procname       = "nf_conntrack_count",
-               .data           = &nf_conntrack_count,
+               .data           = &init_net.ct.count,
                .maxlen         = sizeof(int),
                .mode           = 0444,
                .proc_handler   = &proc_dointvec,