__u8    armid;                  /* 0x00e3 */
        __u8    reservede4[4];          /* 0x00e4 */
        __u64   tecmc;                  /* 0x00e8 */
-       __u8    reservedf0[16];         /* 0x00f0 */
+       __u8    reservedf0[12];         /* 0x00f0 */
+#define CRYCB_FORMAT1 0x00000001
+       __u32   crycbd;                 /* 0x00fc */
        __u64   gcr[16];                /* 0x0100 */
        __u64   gbea;                   /* 0x0180 */
        __u8    reserved188[24];        /* 0x0188 */
 #define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
 #define MAX_S390_ADAPTER_MAPS 256
 
+struct kvm_s390_crypto {
+       struct kvm_s390_crypto_cb *crycb;
+       __u32 crycbd;
+};
+
+struct kvm_s390_crypto_cb {
+       __u8    reserved00[128];                /* 0x0000 */
+};
+
 struct kvm_arch{
        struct sca_block *sca;
        debug_info_t *dbf;
        struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
        wait_queue_head_t ipte_wq;
        spinlock_t start_stop_lock;
+       struct kvm_s390_crypto crypto;
 };
 
 #define KVM_HVA_ERR_BAD                (-1UL)
 
        return r;
 }
 
+static int kvm_s390_crypto_init(struct kvm *kvm)
+{
+       if (!test_vfacility(76))
+               return 0;
+
+       kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
+                                        GFP_KERNEL | GFP_DMA);
+       if (!kvm->arch.crypto.crycb)
+               return -ENOMEM;
+
+       kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
+                                 CRYCB_FORMAT1;
+
+       return 0;
+}
+
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
        int rc;
        if (!kvm->arch.dbf)
                goto out_nodbf;
 
+       if (kvm_s390_crypto_init(kvm) < 0)
+               goto out_crypto;
+
        spin_lock_init(&kvm->arch.float_int.lock);
        INIT_LIST_HEAD(&kvm->arch.float_int.list);
        init_waitqueue_head(&kvm->arch.ipte_wq);
 
        return 0;
 out_nogmap:
+       kfree(kvm->arch.crypto.crycb);
+out_crypto:
        debug_unregister(kvm->arch.dbf);
 out_nodbf:
        free_page((unsigned long)(kvm->arch.sca));
        kvm_free_vcpus(kvm);
        free_page((unsigned long)(kvm->arch.sca));
        debug_unregister(kvm->arch.dbf);
+       kfree(kvm->arch.crypto.crycb);
        if (!kvm_is_ucontrol(kvm))
                gmap_free(kvm->arch.gmap);
        kvm_s390_destroy_adapters(kvm);
        return 0;
 }
 
+static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
+{
+       if (!test_vfacility(76))
+               return;
+
+       vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
+}
+
 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
 {
        free_page(vcpu->arch.sie_block->cbrlo);
        vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
        get_cpu_id(&vcpu->arch.cpu_id);
        vcpu->arch.cpu_id.version = 0xff;
+
+       kvm_s390_vcpu_crypto_setup(vcpu);
+
        return rc;
 }