}
 
 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
-                                              size_t nbuckets, u32 hash_rnd)
+                                              size_t nbuckets)
 {
        struct bucket_table *tbl = NULL;
        size_t size;
 
        tbl->size = nbuckets;
        tbl->shift = ilog2(nbuckets);
-       tbl->hash_rnd = hash_rnd;
 
        if (alloc_bucket_locks(ht, tbl) < 0) {
                bucket_table_free(tbl);
 
        INIT_LIST_HEAD(&tbl->walkers);
 
+       get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+
        for (i = 0; i < nbuckets; i++)
                INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
 
        struct rhashtable_walker *walker;
        unsigned old_hash;
 
-       get_random_bytes(&new_tbl->hash_rnd, sizeof(new_tbl->hash_rnd));
-
        /* Make insertions go into the new, empty table right away. Deletions
         * and lookups will be attempted in both tables until we synchronize.
         * The synchronize_rcu() guarantees for the new table to be picked up
 
        ASSERT_RHT_MUTEX(ht);
 
-       new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, old_tbl->hash_rnd);
+       new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
        if (new_tbl == NULL)
                return -ENOMEM;
 
 
        ASSERT_RHT_MUTEX(ht);
 
-       new_tbl = bucket_table_alloc(ht, old_tbl->size / 2, old_tbl->hash_rnd);
+       new_tbl = bucket_table_alloc(ht, old_tbl->size / 2);
        if (new_tbl == NULL)
                return -ENOMEM;
 
 {
        struct bucket_table *tbl;
        size_t size;
-       u32 hash_rnd;
 
        size = HASH_DEFAULT_SIZE;
 
        else
                ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
 
-       get_random_bytes(&hash_rnd, sizeof(hash_rnd));
-
-       tbl = bucket_table_alloc(ht, size, hash_rnd);
+       tbl = bucket_table_alloc(ht, size);
        if (tbl == NULL)
                return -ENOMEM;