return r;
 }
 
+static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
+{
+       struct kvmppc_vcore *vcore;
+
+       vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
+
+       if (vcore == NULL)
+               return NULL;
+
+       INIT_LIST_HEAD(&vcore->runnable_threads);
+       spin_lock_init(&vcore->lock);
+       init_waitqueue_head(&vcore->wq);
+       vcore->preempt_tb = TB_NIL;
+       vcore->lpcr = kvm->arch.lpcr;
+       vcore->first_vcpuid = core * threads_per_subcore;
+       vcore->kvm = kvm;
+
+       return vcore;
+}
+
 static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
                                                   unsigned int id)
 {
        mutex_lock(&kvm->lock);
        vcore = kvm->arch.vcores[core];
        if (!vcore) {
-               vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
-               if (vcore) {
-                       INIT_LIST_HEAD(&vcore->runnable_threads);
-                       spin_lock_init(&vcore->lock);
-                       init_waitqueue_head(&vcore->wq);
-                       vcore->preempt_tb = TB_NIL;
-                       vcore->lpcr = kvm->arch.lpcr;
-                       vcore->first_vcpuid = core * threads_per_subcore;
-                       vcore->kvm = kvm;
-               }
+               vcore = kvmppc_vcore_create(kvm, core);
                kvm->arch.vcores[core] = vcore;
                kvm->arch.online_vcores++;
        }