void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
                                 struct kvm_async_pf *work);
 
+void kvm_arch_crypto_clear_masks(struct kvm *kvm);
+
 extern int sie64a(struct kvm_s390_sie_block *, u64 *);
 extern char sie_exit;
 
 
                kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
 }
 
+void kvm_arch_crypto_clear_masks(struct kvm *kvm)
+{
+       mutex_lock(&kvm->lock);
+       kvm_s390_vcpu_block_all(kvm);
+
+       memset(&kvm->arch.crypto.crycb->apcb0, 0,
+              sizeof(kvm->arch.crypto.crycb->apcb0));
+       memset(&kvm->arch.crypto.crycb->apcb1, 0,
+              sizeof(kvm->arch.crypto.crycb->apcb1));
+
+       kvm_s390_vcpu_unblock_all(kvm);
+       mutex_unlock(&kvm->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
+
 static u64 kvm_s390_get_initial_cpuid(void)
 {
        struct cpuid cpuid;