extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
 extern unsigned int nf_conntrack_htable_size;
 extern unsigned int nf_conntrack_max;
+extern unsigned int nf_conntrack_hash_rnd;
+void init_nf_conntrack_hash_rnd(void);
 
 #define NF_CT_STAT_INC(net, count)     \
        __this_cpu_inc((net)->ct.stat->count)
 
 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 
-static unsigned int nf_conntrack_hash_rnd __read_mostly;
+unsigned int nf_conntrack_hash_rnd __read_mostly;
 
 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
 {
        return dropped;
 }
 
+void init_nf_conntrack_hash_rnd(void)
+{
+       unsigned int rand;
+
+       /*
+        * Why not initialize nf_conntrack_rnd in a "init()" function ?
+        * Because there isn't enough entropy when system initializing,
+        * and we initialize it as late as possible.
+        */
+       do {
+               get_random_bytes(&rand, sizeof(rand));
+       } while (!rand);
+       cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
+}
+
 static struct nf_conn *
 __nf_conntrack_alloc(struct net *net, u16 zone,
                     const struct nf_conntrack_tuple *orig,
        struct nf_conn *ct;
 
        if (unlikely(!nf_conntrack_hash_rnd)) {
-               unsigned int rand;
-
-               /*
-                * Why not initialize nf_conntrack_rnd in a "init()" function ?
-                * Because there isn't enough entropy when system initializing,
-                * and we initialize it as late as possible.
-                */
-               do {
-                       get_random_bytes(&rand, sizeof(rand));
-               } while (!rand);
-               cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
-
+               init_nf_conntrack_hash_rnd();
                /* recompute the hash as nf_conntrack_hash_rnd is initialized */
                hash = hash_conntrack_raw(orig, zone);
        }
 
 unsigned int nf_ct_expect_hsize __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
 
-static unsigned int nf_ct_expect_hash_rnd __read_mostly;
 unsigned int nf_ct_expect_max __read_mostly;
-static int nf_ct_expect_hash_rnd_initted __read_mostly;
 
 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
 
 {
        unsigned int hash;
 
-       if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
-               get_random_bytes(&nf_ct_expect_hash_rnd,
-                                sizeof(nf_ct_expect_hash_rnd));
-               nf_ct_expect_hash_rnd_initted = 1;
+       if (unlikely(!nf_conntrack_hash_rnd)) {
+               init_nf_conntrack_hash_rnd();
        }
 
        hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
                      (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
-                      (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
+                      (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
        return ((u64)hash * nf_ct_expect_hsize) >> 32;
 }