static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u32 kvm_next_vmid;
 static unsigned int kvm_vmid_bits __read_mostly;
-static DEFINE_RWLOCK(kvm_vmid_lock);
+static DEFINE_SPINLOCK(kvm_vmid_lock);
 
 static bool vgic_present;
 
  */
 static bool need_new_vmid_gen(struct kvm *kvm)
 {
-       return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
+       u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
+       smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
+       return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
 }
 
 /**
 {
        phys_addr_t pgd_phys;
        u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
-       bool new_gen;
 
-       read_lock(&kvm_vmid_lock);
-       new_gen = need_new_vmid_gen(kvm);
-       read_unlock(&kvm_vmid_lock);
-
-       if (!new_gen)
+       if (!need_new_vmid_gen(kvm))
                return;
 
-       write_lock(&kvm_vmid_lock);
+       spin_lock(&kvm_vmid_lock);
 
        /*
         * We need to re-check the vmid_gen here to ensure that if another vcpu
         * use the same vmid.
         */
        if (!need_new_vmid_gen(kvm)) {
-               write_unlock(&kvm_vmid_lock);
+               spin_unlock(&kvm_vmid_lock);
                return;
        }
 
                kvm_call_hyp(__kvm_flush_vm_context);
        }
 
-       kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
        kvm->arch.vmid = kvm_next_vmid;
        kvm_next_vmid++;
        kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
        vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
        kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
 
-       write_unlock(&kvm_vmid_lock);
+       smp_wmb();
+       WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
+
+       spin_unlock(&kvm_vmid_lock);
 }
 
 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)