kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
 }
 
+/*
+ * kvm_arch_crypto_set_masks
+ *
+ * @kvm: pointer to the target guest's KVM struct containing the crypto masks
+ *      to be set.
+ * @apm: the mask identifying the accessible AP adapters
+ * @aqm: the mask identifying the accessible AP domains
+ * @adm: the mask identifying the accessible AP control domains
+ *
+ * Set the masks that identify the adapters, domains and control domains to
+ * which the KVM guest is granted access.
+ *
+ * Note: The kvm->lock mutex must be locked by the caller before invoking this
+ *      function.
+ */
 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
                               unsigned long *aqm, unsigned long *adm)
 {
        struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
 
-       mutex_lock(&kvm->lock);
        kvm_s390_vcpu_block_all(kvm);
 
        switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
        /* recreate the shadow crycb for each vcpu */
        kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
        kvm_s390_vcpu_unblock_all(kvm);
-       mutex_unlock(&kvm->lock);
 }
 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
 
+/*
+ * kvm_arch_crypto_clear_masks
+ *
+ * @kvm: pointer to the target guest's KVM struct containing the crypto masks
+ *      to be cleared.
+ *
+ * Clear the masks that identify the adapters, domains and control domains to
+ * which the KVM guest is granted access.
+ *
+ * Note: The kvm->lock mutex must be locked by the caller before invoking this
+ *      function.
+ */
 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
 {
-       mutex_lock(&kvm->lock);
        kvm_s390_vcpu_block_all(kvm);
 
        memset(&kvm->arch.crypto.crycb->apcb0, 0,
        /* recreate the shadow crycb for each vcpu */
        kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
        kvm_s390_vcpu_unblock_all(kvm);
-       mutex_unlock(&kvm->lock);
 }
 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
 
 
        matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
                                   struct ap_matrix_mdev, pqap_hook);
 
-       /*
-        * If the KVM pointer is in the process of being set, wait until the
-        * process has completed.
-        */
-       wait_event_cmd(matrix_mdev->wait_for_kvm,
-                      !matrix_mdev->kvm_busy,
-                      mutex_unlock(&matrix_dev->lock),
-                      mutex_lock(&matrix_dev->lock));
-
        /* If the there is no guest using the mdev, there is nothing to do */
        if (!matrix_mdev->kvm)
                goto out_unlock;
 
        matrix_mdev->mdev = mdev;
        vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
-       init_waitqueue_head(&matrix_mdev->wait_for_kvm);
        mdev_set_drvdata(mdev, matrix_mdev);
        matrix_mdev->pqap_hook = handle_pqap;
        mutex_lock(&matrix_dev->lock);
 
        mutex_lock(&matrix_dev->lock);
 
-       /*
-        * If the KVM pointer is in flux or the guest is running, disallow
-        * un-assignment of adapter
-        */
-       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+       /* If the KVM guest is running, disallow assignment of adapter */
+       if (matrix_mdev->kvm) {
                ret = -EBUSY;
                goto done;
        }
 
        mutex_lock(&matrix_dev->lock);
 
-       /*
-        * If the KVM pointer is in flux or the guest is running, disallow
-        * un-assignment of adapter
-        */
-       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+       /* If the KVM guest is running, disallow unassignment of adapter */
+       if (matrix_mdev->kvm) {
                ret = -EBUSY;
                goto done;
        }
 
        mutex_lock(&matrix_dev->lock);
 
-       /*
-        * If the KVM pointer is in flux or the guest is running, disallow
-        * assignment of domain
-        */
-       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+       /* If the KVM guest is running, disallow assignment of domain */
+       if (matrix_mdev->kvm) {
                ret = -EBUSY;
                goto done;
        }
 
        mutex_lock(&matrix_dev->lock);
 
-       /*
-        * If the KVM pointer is in flux or the guest is running, disallow
-        * un-assignment of domain
-        */
-       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+       /* If the KVM guest is running, disallow unassignment of domain */
+       if (matrix_mdev->kvm) {
                ret = -EBUSY;
                goto done;
        }
 
        mutex_lock(&matrix_dev->lock);
 
-       /*
-        * If the KVM pointer is in flux or the guest is running, disallow
-        * assignment of control domain.
-        */
-       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+       /* If the KVM guest is running, disallow assignment of control domain */
+       if (matrix_mdev->kvm) {
                ret = -EBUSY;
                goto done;
        }
 
        mutex_lock(&matrix_dev->lock);
 
-       /*
-        * If the KVM pointer is in flux or the guest is running, disallow
-        * un-assignment of control domain.
-        */
-       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
+       /* If a KVM guest is running, disallow unassignment of control domain */
+       if (matrix_mdev->kvm) {
                ret = -EBUSY;
                goto done;
        }
        struct ap_matrix_mdev *m;
 
        if (kvm->arch.crypto.crycbd) {
+               down_write(&kvm->arch.crypto.pqap_hook_rwsem);
+               kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
+               up_write(&kvm->arch.crypto.pqap_hook_rwsem);
+
+               mutex_lock(&kvm->lock);
+               mutex_lock(&matrix_dev->lock);
+
                list_for_each_entry(m, &matrix_dev->mdev_list, node) {
-                       if (m != matrix_mdev && m->kvm == kvm)
+                       if (m != matrix_mdev && m->kvm == kvm) {
+                               mutex_unlock(&kvm->lock);
+                               mutex_unlock(&matrix_dev->lock);
                                return -EPERM;
+                       }
                }
 
                kvm_get_kvm(kvm);
                matrix_mdev->kvm = kvm;
-               matrix_mdev->kvm_busy = true;
-               mutex_unlock(&matrix_dev->lock);
-
-               down_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem);
-               kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
-               up_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem);
-
                kvm_arch_crypto_set_masks(kvm,
                                          matrix_mdev->matrix.apm,
                                          matrix_mdev->matrix.aqm,
                                          matrix_mdev->matrix.adm);
 
-               mutex_lock(&matrix_dev->lock);
-               matrix_mdev->kvm_busy = false;
-               wake_up_all(&matrix_mdev->wait_for_kvm);
+               mutex_unlock(&kvm->lock);
+               mutex_unlock(&matrix_dev->lock);
        }
 
        return 0;
  * done under the @matrix_mdev->lock.
  *
  */
-static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev,
+                                  struct kvm *kvm)
 {
-       /*
-        * If the KVM pointer is in the process of being set, wait until the
-        * process has completed.
-        */
-       wait_event_cmd(matrix_mdev->wait_for_kvm,
-                      !matrix_mdev->kvm_busy,
-                      mutex_unlock(&matrix_dev->lock),
-                      mutex_lock(&matrix_dev->lock));
-
-       if (matrix_mdev->kvm) {
-               matrix_mdev->kvm_busy = true;
-               mutex_unlock(&matrix_dev->lock);
-
-               if (matrix_mdev->kvm->arch.crypto.crycbd) {
-                       down_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem);
-                       matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
-                       up_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem);
-
-                       kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
-               }
+       if (kvm && kvm->arch.crypto.crycbd) {
+               down_write(&kvm->arch.crypto.pqap_hook_rwsem);
+               kvm->arch.crypto.pqap_hook = NULL;
+               up_write(&kvm->arch.crypto.pqap_hook_rwsem);
 
+               mutex_lock(&kvm->lock);
                mutex_lock(&matrix_dev->lock);
+
+               kvm_arch_crypto_clear_masks(kvm);
                vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
-               kvm_put_kvm(matrix_mdev->kvm);
+               kvm_put_kvm(kvm);
                matrix_mdev->kvm = NULL;
-               matrix_mdev->kvm_busy = false;
-               wake_up_all(&matrix_mdev->wait_for_kvm);
+
+               mutex_unlock(&kvm->lock);
+               mutex_unlock(&matrix_dev->lock);
        }
 }
 
        if (action != VFIO_GROUP_NOTIFY_SET_KVM)
                return NOTIFY_OK;
 
-       mutex_lock(&matrix_dev->lock);
        matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
 
        if (!data)
-               vfio_ap_mdev_unset_kvm(matrix_mdev);
+               vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
        else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
                notify_rc = NOTIFY_DONE;
 
-       mutex_unlock(&matrix_dev->lock);
-
        return notify_rc;
 }
 
 {
        struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
 
-       mutex_lock(&matrix_dev->lock);
-       vfio_ap_mdev_unset_kvm(matrix_mdev);
-       mutex_unlock(&matrix_dev->lock);
-
        vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
                                 &matrix_mdev->iommu_notifier);
        vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
                                 &matrix_mdev->group_notifier);
+       vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
        module_put(THIS_MODULE);
 }
 
                        break;
                }
 
-               /*
-                * If the KVM pointer is in the process of being set, wait until
-                * the process has completed.
-                */
-               wait_event_cmd(matrix_mdev->wait_for_kvm,
-                              !matrix_mdev->kvm_busy,
-                              mutex_unlock(&matrix_dev->lock),
-                              mutex_lock(&matrix_dev->lock));
-
                ret = vfio_ap_mdev_reset_queues(mdev);
                break;
        default: