u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
        };
        unsigned int position;
+       spinlock_t batch_lock;
 };
-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
 
 /*
  * Get a random word for internal kernel use only. The quality of the random
  * wait_for_random_bytes() should be called and return 0 at least once
  * at any point prior.
  */
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+       .batch_lock     = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+};
+
 u64 get_random_u64(void)
 {
        u64 ret;
-       bool use_lock;
-       unsigned long flags = 0;
+       unsigned long flags;
        struct batched_entropy *batch;
        static void *previous;
 
 
        warn_unseeded_randomness(&previous);
 
-       use_lock = READ_ONCE(crng_init) < 2;
-       batch = &get_cpu_var(batched_entropy_u64);
-       if (use_lock)
-               read_lock_irqsave(&batched_entropy_reset_lock, flags);
+       batch = raw_cpu_ptr(&batched_entropy_u64);
+       spin_lock_irqsave(&batch->batch_lock, flags);
        if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
                extract_crng((u8 *)batch->entropy_u64);
                batch->position = 0;
        }
        ret = batch->entropy_u64[batch->position++];
-       if (use_lock)
-               read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-       put_cpu_var(batched_entropy_u64);
+       spin_unlock_irqrestore(&batch->batch_lock, flags);
        return ret;
 }
 EXPORT_SYMBOL(get_random_u64);
 
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
+       .batch_lock     = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
+};
 u32 get_random_u32(void)
 {
        u32 ret;
-       bool use_lock;
-       unsigned long flags = 0;
+       unsigned long flags;
        struct batched_entropy *batch;
        static void *previous;
 
 
        warn_unseeded_randomness(&previous);
 
-       use_lock = READ_ONCE(crng_init) < 2;
-       batch = &get_cpu_var(batched_entropy_u32);
-       if (use_lock)
-               read_lock_irqsave(&batched_entropy_reset_lock, flags);
+       batch = raw_cpu_ptr(&batched_entropy_u32);
+       spin_lock_irqsave(&batch->batch_lock, flags);
        if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
                extract_crng((u8 *)batch->entropy_u32);
                batch->position = 0;
        }
        ret = batch->entropy_u32[batch->position++];
-       if (use_lock)
-               read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-       put_cpu_var(batched_entropy_u32);
+       spin_unlock_irqrestore(&batch->batch_lock, flags);
        return ret;
 }
 EXPORT_SYMBOL(get_random_u32);
        int cpu;
        unsigned long flags;
 
-       write_lock_irqsave(&batched_entropy_reset_lock, flags);
        for_each_possible_cpu (cpu) {
-               per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
-               per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+               struct batched_entropy *batched_entropy;
+
+               batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
+               spin_lock_irqsave(&batched_entropy->batch_lock, flags);
+               batched_entropy->position = 0;
+               spin_unlock(&batched_entropy->batch_lock);
+
+               batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
+               spin_lock(&batched_entropy->batch_lock);
+               batched_entropy->position = 0;
+               spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
        }
-       write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 }
 
 /**