struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
+bool nested_svm_supported(void);
 void nested_svm_check_supported(void);
 
 static inline bool cpu_has_svm(void)
 
 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
 bool load_vmcs(struct vmx_pages *vmx);
 
+bool nested_vmx_supported(void);
 void nested_vmx_check_supported(void);
 
 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
 
                : "r15", "memory");
 }
 
-void nested_svm_check_supported(void)
+bool nested_svm_supported(void)
 {
        struct kvm_cpuid_entry2 *entry =
                kvm_get_supported_cpuid_entry(0x80000001);
 
-       if (!(entry->ecx & CPUID_SVM)) {
+       return entry->ecx & CPUID_SVM;
+}
+
+void nested_svm_check_supported(void)
+{
+       if (!nested_svm_supported()) {
                print_skip("nested SVM not enabled");
                exit(KSFT_SKIP);
        }
 }
-
 
        init_vmcs_guest_state(guest_rip, guest_rsp);
 }
 
-void nested_vmx_check_supported(void)
+bool nested_vmx_supported(void)
 {
        struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
-       if (!(entry->ecx & CPUID_VMX)) {
+       return entry->ecx & CPUID_VMX;
+}
+
+void nested_vmx_check_supported(void)
+{
+       if (!nested_vmx_supported()) {
                print_skip("nested VMX not enabled");
                exit(KSFT_SKIP);
        }
 
        vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
 
        if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
-               if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM)
+               if (nested_svm_supported())
                        vcpu_alloc_svm(vm, &nested_gva);
-               else
+               else if (nested_vmx_supported())
                        vcpu_alloc_vmx(vm, &nested_gva);
-               vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
-       } else {
-               pr_info("will skip SMM test with VMX enabled\n");
-               vcpu_args_set(vm, VCPU_ID, 1, 0);
        }
 
+       if (!nested_gva)
+               pr_info("will skip SMM test with VMX enabled\n");
+
+       vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
+
        for (stage = 1;; stage++) {
                _vcpu_run(vm, VCPU_ID);
                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 
        vcpu_regs_get(vm, VCPU_ID, ®s1);
 
        if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
-               if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM)
+               if (nested_svm_supported())
                        vcpu_alloc_svm(vm, &nested_gva);
-               else
+               else if (nested_vmx_supported())
                        vcpu_alloc_vmx(vm, &nested_gva);
-               vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
-       } else {
-               pr_info("will skip nested state checks\n");
-               vcpu_args_set(vm, VCPU_ID, 1, 0);
        }
 
+       if (!nested_gva)
+               pr_info("will skip nested state checks\n");
+
+       vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
+
        for (stage = 1;; stage++) {
                _vcpu_run(vm, VCPU_ID);
                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,