/* Cache the HVA pointer of the region */
        host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
 
-#ifdef __x86_64__
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-#endif
        ucall_init(vm, NULL);
 
        /* Export the shared variables to the guest */
 
 
                vm_vcpu_add_default(vm, vcpu_id, guest_code);
 
-#ifdef __x86_64__
-               vcpu_set_cpuid(vm, vcpu_id, kvm_get_supported_cpuid());
-#endif
-
                vcpu_args->vcpu_id = vcpu_id;
                vcpu_args->gva = guest_test_virt_mem +
                                 (vcpu_id * vcpu_memory_bytes);
 
        vm_create_irqchip(vm);
 #endif
 
-       for (i = 0; i < nr_vcpus; ++i)
-               vm_vcpu_add_default(vm, vcpuids ? vcpuids[i] : i, guest_code);
+       for (i = 0; i < nr_vcpus; ++i) {
+               uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
+
+               vm_vcpu_add_default(vm, vcpuid, guest_code);
+
+#ifdef __x86_64__
+               vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
+#endif
+       }
 
        return vm;
 }
 
 
        vm = vm_create_default(VCPU_ID, 0, guest_code);
 
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
        vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
                                    MEM_REGION_GPA, MEM_REGION_SLOT,
                                    MEM_REGION_SIZE / getpagesize(), 0);
 
 
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
        run = vcpu_state(vm, VCPU_ID);
 
        while (1) {
 
        }
 
        vm = vm_create_default(VCPU_ID, 0, guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
        run = vcpu_state(vm, VCPU_ID);
 
        /* Test software BPs - int3 */
 
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, guest_code);
 
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
        if (!nested_vmx_supported() ||
            !kvm_check_cap(KVM_CAP_NESTED_STATE) ||
            !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
 
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, guest_code);
 
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
        run = vcpu_state(vm, VCPU_ID);
 
        vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
 
 
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
        run = vcpu_state(vm, VCPU_ID);
 
        vcpu_regs_get(vm, VCPU_ID, ®s1);
 
        nested_svm_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 
        vcpu_alloc_svm(vm, &svm_gva);
        vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
 
        uint64_t val;
 
        vm = vm_create_default(VCPU_ID, 0, guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 
        val = 0;
        ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
 
 
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
        run = vcpu_state(vm, VCPU_ID);
 
        rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
 
        nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 
        kvm_get_cpu_address_width(&paddr_width, &vaddr_width);
        high_gpa = (1ul << paddr_width) - getpagesize();
 
        nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 
        /* Allocate VMX pages and shared descriptors (vmx_pages). */
        vcpu_alloc_vmx(vm, &vmx_pages_gva);
 
 
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
        vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
        vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
        run = vcpu_state(vm, VCPU_ID);
 
 
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
        run = vcpu_state(vm, VCPU_ID);
 
        vcpu_regs_get(vm, VCPU_ID, ®s1);
 
        free(state);
 }
 
+void disable_vmx(struct kvm_vm *vm)
+{
+       struct kvm_cpuid2 *cpuid = kvm_get_supported_cpuid();
+       int i;
+
+       for (i = 0; i < cpuid->nent; ++i)
+               if (cpuid->entries[i].function == 1 &&
+                   cpuid->entries[i].index == 0)
+                       break;
+       TEST_ASSERT(i != cpuid->nent, "CPUID function 1 not found");
+
+       cpuid->entries[i].ecx &= ~CPUID_VMX;
+       vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+       cpuid->entries[i].ecx |= CPUID_VMX;
+}
+
 int main(int argc, char *argv[])
 {
        struct kvm_vm *vm;
 
        vm = vm_create_default(VCPU_ID, 0, 0);
 
+       /*
+        * First run tests with VMX disabled to check error handling.
+        */
+       disable_vmx(vm);
+
        /* Passing a NULL kvm_nested_state causes a EFAULT. */
        test_nested_state_expect_efault(vm, NULL);
 
 
        nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 
        /* Allocate VMX pages and shared descriptors (vmx_pages). */
        vcpu_alloc_vmx(vm, &vmx_pages_gva);