atomic_set(&completed, 0);
        cpus_clear(cpus);
        needed = 0;
-       for (i = 0; i < kvm->nvcpus; ++i) {
-               vcpu = &kvm->vcpus[i];
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               vcpu = kvm->vcpus[i];
+               if (!vcpu)
+                       continue;
                if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
                        continue;
                cpu = vcpu->cpu;
        }
 }
 
+int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
+{
+       struct page *page;
+       int r;
+
+       mutex_init(&vcpu->mutex);
+       vcpu->cpu = -1;
+       vcpu->mmu.root_hpa = INVALID_PAGE;
+       vcpu->kvm = kvm;
+       vcpu->vcpu_id = id;
+
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!page) {
+               r = -ENOMEM;
+               goto fail;
+       }
+       vcpu->run = page_address(page);
+
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!page) {
+               r = -ENOMEM;
+               goto fail_free_run;
+       }
+       vcpu->pio_data = page_address(page);
+
+       vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
+                                          FX_IMAGE_ALIGN);
+       vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
+
+       r = kvm_mmu_create(vcpu);
+       if (r < 0)
+               goto fail_free_pio_data;
+
+       return 0;
+
+fail_free_pio_data:
+       free_page((unsigned long)vcpu->pio_data);
+fail_free_run:
+       free_page((unsigned long)vcpu->run);
+fail:
+       return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_init);
+
+void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       kvm_mmu_destroy(vcpu);
+       free_page((unsigned long)vcpu->pio_data);
+       free_page((unsigned long)vcpu->run);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
+
 static struct kvm *kvm_create_vm(void)
 {
        struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-       int i;
 
        if (!kvm)
                return ERR_PTR(-ENOMEM);
        spin_lock_init(&kvm->lock);
        INIT_LIST_HEAD(&kvm->active_mmu_pages);
        kvm_io_bus_init(&kvm->mmio_bus);
-       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-               struct kvm_vcpu *vcpu = &kvm->vcpus[i];
-
-               mutex_init(&vcpu->mutex);
-               vcpu->cpu = -1;
-               vcpu->kvm = kvm;
-               vcpu->mmu.root_hpa = INVALID_PAGE;
-       }
        spin_lock(&kvm_lock);
        list_add(&kvm->vm_list, &vm_list);
        spin_unlock(&kvm_lock);
 
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->valid)
-               return;
-
        vcpu_load(vcpu);
        kvm_mmu_unload(vcpu);
        vcpu_put(vcpu);
 }
 
-static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
-{
-       if (!vcpu->valid)
-               return;
-
-       vcpu_load(vcpu);
-       kvm_mmu_destroy(vcpu);
-       vcpu_put(vcpu);
-       kvm_arch_ops->vcpu_free(vcpu);
-       free_page((unsigned long)vcpu->run);
-       vcpu->run = NULL;
-       free_page((unsigned long)vcpu->pio_data);
-       vcpu->pio_data = NULL;
-       free_pio_guest_pages(vcpu);
-}
-
 static void kvm_free_vcpus(struct kvm *kvm)
 {
        unsigned int i;
         * Unpin any mmu pages first.
         */
        for (i = 0; i < KVM_MAX_VCPUS; ++i)
-               kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
-       for (i = 0; i < KVM_MAX_VCPUS; ++i)
-               kvm_free_vcpu(&kvm->vcpus[i]);
+               if (kvm->vcpus[i])
+                       kvm_unload_vcpu_mmu(kvm->vcpus[i]);
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               if (kvm->vcpus[i]) {
+                       kvm_arch_ops->vcpu_free(kvm->vcpus[i]);
+                       kvm->vcpus[i] = NULL;
+               }
+       }
+
 }
 
 static int kvm_dev_release(struct inode *inode, struct file *filp)
 {
        int r;
        struct kvm_vcpu *vcpu;
-       struct page *page;
 
-       r = -EINVAL;
        if (!valid_vcpu(n))
-               goto out;
-
-       vcpu = &kvm->vcpus[n];
-       vcpu->vcpu_id = n;
-
-       mutex_lock(&vcpu->mutex);
-
-       if (vcpu->valid) {
-               mutex_unlock(&vcpu->mutex);
-               return -EEXIST;
-       }
-
-       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-       r = -ENOMEM;
-       if (!page)
-               goto out_unlock;
-       vcpu->run = page_address(page);
-
-       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-       r = -ENOMEM;
-       if (!page)
-               goto out_free_run;
-       vcpu->pio_data = page_address(page);
-
-       vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
-                                          FX_IMAGE_ALIGN);
-       vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
-       vcpu->cr0 = 0x10;
-
-       r = kvm_arch_ops->vcpu_create(vcpu);
-       if (r < 0)
-               goto out_free_vcpus;
+               return -EINVAL;
 
-       r = kvm_mmu_create(vcpu);
-       if (r < 0)
-               goto out_free_vcpus;
+       vcpu = kvm_arch_ops->vcpu_create(kvm, n);
+       if (IS_ERR(vcpu))
+               return PTR_ERR(vcpu);
 
-       kvm_arch_ops->vcpu_load(vcpu);
+       vcpu_load(vcpu);
        r = kvm_mmu_setup(vcpu);
-       if (r >= 0)
-               r = kvm_arch_ops->vcpu_setup(vcpu);
        vcpu_put(vcpu);
-
        if (r < 0)
-               goto out_free_vcpus;
+               goto free_vcpu;
 
+       spin_lock(&kvm->lock);
+       if (kvm->vcpus[n]) {
+               r = -EEXIST;
+               spin_unlock(&kvm->lock);
+               goto mmu_unload;
+       }
+       kvm->vcpus[n] = vcpu;
+       spin_unlock(&kvm->lock);
+
+       /* Now it's all set up, let userspace reach it */
        r = create_vcpu_fd(vcpu);
        if (r < 0)
-               goto out_free_vcpus;
-
-       spin_lock(&kvm_lock);
-       if (n >= kvm->nvcpus)
-               kvm->nvcpus = n + 1;
-       spin_unlock(&kvm_lock);
+               goto unlink;
+       return r;
 
-       vcpu->valid = 1;
+unlink:
+       spin_lock(&kvm->lock);
+       kvm->vcpus[n] = NULL;
+       spin_unlock(&kvm->lock);
 
-       return r;
+mmu_unload:
+       vcpu_load(vcpu);
+       kvm_mmu_unload(vcpu);
+       vcpu_put(vcpu);
 
-out_free_vcpus:
-       kvm_free_vcpu(vcpu);
-out_free_run:
-       free_page((unsigned long)vcpu->run);
-       vcpu->run = NULL;
-out_unlock:
-       mutex_unlock(&vcpu->mutex);
-out:
+free_vcpu:
+       kvm_arch_ops->vcpu_free(vcpu);
        return r;
 }
 
        int i;
 
        spin_lock(&kvm_lock);
-       list_for_each_entry(vm, &vm_list, vm_list)
+       list_for_each_entry(vm, &vm_list, vm_list) {
+               spin_lock(&vm->lock);
                for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-                       vcpu = &vm->vcpus[i];
+                       vcpu = vm->vcpus[i];
+                       if (!vcpu)
+                               continue;
                        /*
                         * If the vcpu is locked, then it is running on some
                         * other cpu and therefore it is not cached on the
                                mutex_unlock(&vcpu->mutex);
                        }
                }
+               spin_unlock(&vm->lock);
+       }
        spin_unlock(&kvm_lock);
 }
 
        spin_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
                for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-                       vcpu = &kvm->vcpus[i];
-                       total += *(u32 *)((void *)vcpu + offset);
+                       vcpu = kvm->vcpus[i];
+                       if (vcpu)
+                               total += *(u32 *)((void *)vcpu + offset);
                }
        spin_unlock(&kvm_lock);
        return total;
 
 
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 {
-       return (struct vcpu_svm*)vcpu->_priv;
+       return container_of(vcpu, struct vcpu_svm, vcpu);
 }
 
 unsigned long iopm_base;
        seg->base = 0;
 }
 
-static int svm_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
 static void init_vmcb(struct vmcb *vmcb)
 {
        struct vmcb_control_area *control = &vmcb->control;
        /* rdx = ?? */
 }
 
-static int svm_create_vcpu(struct kvm_vcpu *vcpu)
+static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 {
        struct vcpu_svm *svm;
        struct page *page;
-       int r;
+       int err;
 
-       r = -ENOMEM;
        svm = kzalloc(sizeof *svm, GFP_KERNEL);
-       if (!svm)
-               goto out1;
+       if (!svm) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(&svm->vcpu, kvm, id);
+       if (err)
+               goto free_svm;
+
        page = alloc_page(GFP_KERNEL);
-       if (!page)
-               goto out2;
+       if (!page) {
+               err = -ENOMEM;
+               goto uninit;
+       }
 
        svm->vmcb = page_address(page);
        clear_page(svm->vmcb);
        memset(svm->db_regs, 0, sizeof(svm->db_regs));
        init_vmcb(svm->vmcb);
 
-       svm->vcpu   = vcpu;
-       vcpu->_priv = svm;
+       fx_init(&svm->vcpu);
+       svm->vcpu.fpu_active = 1;
+       svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+       if (svm->vcpu.vcpu_id == 0)
+               svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
 
-       fx_init(vcpu);
-       vcpu->fpu_active = 1;
-       vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
-       if (vcpu->vcpu_id == 0)
-               vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
+       return &svm->vcpu;
 
-       return 0;
-
-out2:
+uninit:
+       kvm_vcpu_uninit(&svm->vcpu);
+free_svm:
        kfree(svm);
-out1:
-       return r;
+out:
+       return ERR_PTR(err);
 }
 
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       if (!svm)
-               return;
-       if (svm->vmcb)
-               __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
+       __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
+       kvm_vcpu_uninit(vcpu);
        kfree(svm);
-       vcpu->_priv = NULL;
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu)
 #endif
 
 #ifdef CONFIG_X86_64
-               "mov %c[rbx](%[vcpu]), %%rbx \n\t"
-               "mov %c[rcx](%[vcpu]), %%rcx \n\t"
-               "mov %c[rdx](%[vcpu]), %%rdx \n\t"
-               "mov %c[rsi](%[vcpu]), %%rsi \n\t"
-               "mov %c[rdi](%[vcpu]), %%rdi \n\t"
-               "mov %c[rbp](%[vcpu]), %%rbp \n\t"
-               "mov %c[r8](%[vcpu]),  %%r8  \n\t"
-               "mov %c[r9](%[vcpu]),  %%r9  \n\t"
-               "mov %c[r10](%[vcpu]), %%r10 \n\t"
-               "mov %c[r11](%[vcpu]), %%r11 \n\t"
-               "mov %c[r12](%[vcpu]), %%r12 \n\t"
-               "mov %c[r13](%[vcpu]), %%r13 \n\t"
-               "mov %c[r14](%[vcpu]), %%r14 \n\t"
-               "mov %c[r15](%[vcpu]), %%r15 \n\t"
+               "mov %c[rbx](%[svm]), %%rbx \n\t"
+               "mov %c[rcx](%[svm]), %%rcx \n\t"
+               "mov %c[rdx](%[svm]), %%rdx \n\t"
+               "mov %c[rsi](%[svm]), %%rsi \n\t"
+               "mov %c[rdi](%[svm]), %%rdi \n\t"
+               "mov %c[rbp](%[svm]), %%rbp \n\t"
+               "mov %c[r8](%[svm]),  %%r8  \n\t"
+               "mov %c[r9](%[svm]),  %%r9  \n\t"
+               "mov %c[r10](%[svm]), %%r10 \n\t"
+               "mov %c[r11](%[svm]), %%r11 \n\t"
+               "mov %c[r12](%[svm]), %%r12 \n\t"
+               "mov %c[r13](%[svm]), %%r13 \n\t"
+               "mov %c[r14](%[svm]), %%r14 \n\t"
+               "mov %c[r15](%[svm]), %%r15 \n\t"
 #else
-               "mov %c[rbx](%[vcpu]), %%ebx \n\t"
-               "mov %c[rcx](%[vcpu]), %%ecx \n\t"
-               "mov %c[rdx](%[vcpu]), %%edx \n\t"
-               "mov %c[rsi](%[vcpu]), %%esi \n\t"
-               "mov %c[rdi](%[vcpu]), %%edi \n\t"
-               "mov %c[rbp](%[vcpu]), %%ebp \n\t"
+               "mov %c[rbx](%[svm]), %%ebx \n\t"
+               "mov %c[rcx](%[svm]), %%ecx \n\t"
+               "mov %c[rdx](%[svm]), %%edx \n\t"
+               "mov %c[rsi](%[svm]), %%esi \n\t"
+               "mov %c[rdi](%[svm]), %%edi \n\t"
+               "mov %c[rbp](%[svm]), %%ebp \n\t"
 #endif
 
 #ifdef CONFIG_X86_64
                /* Enter guest mode */
                "push %%rax \n\t"
-               "mov %c[svm](%[vcpu]), %%rax \n\t"
-               "mov %c[vmcb](%%rax), %%rax \n\t"
+               "mov %c[vmcb](%[svm]), %%rax \n\t"
                SVM_VMLOAD "\n\t"
                SVM_VMRUN "\n\t"
                SVM_VMSAVE "\n\t"
 #else
                /* Enter guest mode */
                "push %%eax \n\t"
-               "mov %c[svm](%[vcpu]), %%eax \n\t"
-               "mov %c[vmcb](%%eax), %%eax \n\t"
+               "mov %c[vmcb](%[svm]), %%eax \n\t"
                SVM_VMLOAD "\n\t"
                SVM_VMRUN "\n\t"
                SVM_VMSAVE "\n\t"
 
                /* Save guest registers, load host registers */
 #ifdef CONFIG_X86_64
-               "mov %%rbx, %c[rbx](%[vcpu]) \n\t"
-               "mov %%rcx, %c[rcx](%[vcpu]) \n\t"
-               "mov %%rdx, %c[rdx](%[vcpu]) \n\t"
-               "mov %%rsi, %c[rsi](%[vcpu]) \n\t"
-               "mov %%rdi, %c[rdi](%[vcpu]) \n\t"
-               "mov %%rbp, %c[rbp](%[vcpu]) \n\t"
-               "mov %%r8,  %c[r8](%[vcpu]) \n\t"
-               "mov %%r9,  %c[r9](%[vcpu]) \n\t"
-               "mov %%r10, %c[r10](%[vcpu]) \n\t"
-               "mov %%r11, %c[r11](%[vcpu]) \n\t"
-               "mov %%r12, %c[r12](%[vcpu]) \n\t"
-               "mov %%r13, %c[r13](%[vcpu]) \n\t"
-               "mov %%r14, %c[r14](%[vcpu]) \n\t"
-               "mov %%r15, %c[r15](%[vcpu]) \n\t"
+               "mov %%rbx, %c[rbx](%[svm]) \n\t"
+               "mov %%rcx, %c[rcx](%[svm]) \n\t"
+               "mov %%rdx, %c[rdx](%[svm]) \n\t"
+               "mov %%rsi, %c[rsi](%[svm]) \n\t"
+               "mov %%rdi, %c[rdi](%[svm]) \n\t"
+               "mov %%rbp, %c[rbp](%[svm]) \n\t"
+               "mov %%r8,  %c[r8](%[svm]) \n\t"
+               "mov %%r9,  %c[r9](%[svm]) \n\t"
+               "mov %%r10, %c[r10](%[svm]) \n\t"
+               "mov %%r11, %c[r11](%[svm]) \n\t"
+               "mov %%r12, %c[r12](%[svm]) \n\t"
+               "mov %%r13, %c[r13](%[svm]) \n\t"
+               "mov %%r14, %c[r14](%[svm]) \n\t"
+               "mov %%r15, %c[r15](%[svm]) \n\t"
 
                "pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
                "pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
                "pop  %%rbp; pop  %%rdi; pop  %%rsi;"
                "pop  %%rdx; pop  %%rcx; pop  %%rbx; \n\t"
 #else
-               "mov %%ebx, %c[rbx](%[vcpu]) \n\t"
-               "mov %%ecx, %c[rcx](%[vcpu]) \n\t"
-               "mov %%edx, %c[rdx](%[vcpu]) \n\t"
-               "mov %%esi, %c[rsi](%[vcpu]) \n\t"
-               "mov %%edi, %c[rdi](%[vcpu]) \n\t"
-               "mov %%ebp, %c[rbp](%[vcpu]) \n\t"
+               "mov %%ebx, %c[rbx](%[svm]) \n\t"
+               "mov %%ecx, %c[rcx](%[svm]) \n\t"
+               "mov %%edx, %c[rdx](%[svm]) \n\t"
+               "mov %%esi, %c[rsi](%[svm]) \n\t"
+               "mov %%edi, %c[rdi](%[svm]) \n\t"
+               "mov %%ebp, %c[rbp](%[svm]) \n\t"
 
                "pop  %%ebp; pop  %%edi; pop  %%esi;"
                "pop  %%edx; pop  %%ecx; pop  %%ebx; \n\t"
 #endif
                :
-               : [vcpu]"a"(vcpu),
-                 [svm]"i"(offsetof(struct kvm_vcpu, _priv)),
+               : [svm]"a"(svm),
                  [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
-                 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
-                 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
-                 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
-                 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
-                 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
-                 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP]))
+                 [rbx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBX])),
+                 [rcx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RCX])),
+                 [rdx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDX])),
+                 [rsi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RSI])),
+                 [rdi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDI])),
+                 [rbp]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBP]))
 #ifdef CONFIG_X86_64
-                 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
-                 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
-                 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
-                 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
-                 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
-                 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
-                 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
-                 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15]))
+                 ,[r8 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R8])),
+                 [r9 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R9 ])),
+                 [r10]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R10])),
+                 [r11]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R11])),
+                 [r12]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R12])),
+                 [r13]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R13])),
+                 [r14]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R14])),
+                 [r15]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R15]))
 #endif
                : "cc", "memory" );
 
 
        .run = svm_vcpu_run,
        .skip_emulated_instruction = skip_emulated_instruction,
-       .vcpu_setup = svm_vcpu_setup,
        .patch_hypercall = svm_patch_hypercall,
 };