#include "../kvm_util.h"
 
+extern bool host_cpu_is_intel;
+extern bool host_cpu_is_amd;
+
 #define NMI_VECTOR             0x02
 
 #define X86_EFLAGS_FIXED        (1u << 1)
 
 #define MAX_NR_CPUID_ENTRIES 100
 
 vm_vaddr_t exception_handlers;
+bool host_cpu_is_amd;
+bool host_cpu_is_intel;
 
 static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
 {
 
 bool kvm_is_tdp_enabled(void)
 {
-       if (this_cpu_is_intel())
+       if (host_cpu_is_intel)
                return get_kvm_intel_param_bool("ept");
        else
                return get_kvm_amd_param_bool("npt");
 void kvm_arch_vm_post_create(struct kvm_vm *vm)
 {
        vm_create_irqchip(vm);
+       sync_global_to_guest(vm, host_cpu_is_intel);
+       sync_global_to_guest(vm, host_cpu_is_amd);
 }
 
 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
        max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
 
        /* Avoid reserved HyperTransport region on AMD processors.  */
-       if (!this_cpu_is_amd())
+       if (!host_cpu_is_amd)
                return max_gfn;
 
        /* On parts with <40 physical address bits, the area is fully hidden */
 
        return get_kvm_intel_param_bool("unrestricted_guest");
 }
+
+void kvm_selftest_arch_init(void)
+{
+       host_cpu_is_intel = this_cpu_is_intel();
+       host_cpu_is_amd = this_cpu_is_amd();
+}
 
        const uint8_t *other_hypercall_insn;
        uint64_t ret;
 
-       if (this_cpu_is_intel()) {
+       if (host_cpu_is_intel) {
                native_hypercall_insn = vmx_vmcall;
                other_hypercall_insn  = svm_vmmcall;
-       } else if (this_cpu_is_amd()) {
+       } else if (host_cpu_is_amd) {
                native_hypercall_insn = svm_vmmcall;
                other_hypercall_insn  = vmx_vmcall;
        } else {
 
 {
        int warnings_before, warnings_after;
 
-       TEST_REQUIRE(this_cpu_is_intel());
+       TEST_REQUIRE(host_cpu_is_intel);
 
        TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
 
 
  */
 static bool use_intel_pmu(void)
 {
-       return this_cpu_is_intel() &&
+       return host_cpu_is_intel &&
               kvm_cpu_property(X86_PROPERTY_PMU_VERSION) &&
               kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) &&
               kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
        uint32_t family = kvm_cpu_family();
        uint32_t model = kvm_cpu_model();
 
-       return this_cpu_is_amd() &&
+       return host_cpu_is_amd &&
                (is_zen1(family, model) ||
                 is_zen2(family, model) ||
                 is_zen3(family, model));
 
        struct kvm_vcpu *vcpu;
        struct kvm_vm *vm;
 
-       TEST_REQUIRE(this_cpu_is_intel());
+       TEST_REQUIRE(host_cpu_is_intel);
        TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
 
        vm = vm_create_with_one_vcpu(&vcpu, guest_code);