break;
        case KVM_CAP_S390_VECTOR_REGISTERS:
                mutex_lock(&kvm->lock);
-               if (atomic_read(&kvm->online_vcpus)) {
+               if (kvm->created_vcpus) {
                        r = -EBUSY;
                } else if (MACHINE_HAS_VX) {
                        set_kvm_facility(kvm->arch.model.fac_mask, 129);
        case KVM_CAP_S390_RI:
                r = -EINVAL;
                mutex_lock(&kvm->lock);
-               if (atomic_read(&kvm->online_vcpus)) {
+               if (kvm->created_vcpus) {
                        r = -EBUSY;
                } else if (test_facility(64)) {
                        set_kvm_facility(kvm->arch.model.fac_mask, 64);
                ret = -EBUSY;
                VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
                mutex_lock(&kvm->lock);
-               if (atomic_read(&kvm->online_vcpus) == 0) {
+               if (!kvm->created_vcpus) {
                        kvm->arch.use_cmma = 1;
                        ret = 0;
                }
 
                ret = -EBUSY;
                mutex_lock(&kvm->lock);
-               if (atomic_read(&kvm->online_vcpus) == 0) {
+               if (!kvm->created_vcpus) {
                        /* gmap_alloc will round the limit up */
                        struct gmap *new = gmap_alloc(current->mm, new_limit);
 
        int ret = 0;
 
        mutex_lock(&kvm->lock);
-       if (atomic_read(&kvm->online_vcpus)) {
+       if (kvm->created_vcpus) {
                ret = -EBUSY;
                goto out;
        }