return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE);
 }
 
+static __always_inline bool has_hvhe(void)
+{
+       if (is_vhe_hyp_code())
+               return false;
+
+       return cpus_have_final_cap(ARM64_KVM_HVHE);
+}
+
 static inline bool is_hyp_nvhe(void)
 {
        return is_hyp_mode_available() && !is_kernel_in_hyp_mode();
 
        return true;
 }
 
+static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
+                         int __unused)
+{
+       u64 val;
+
+       val = read_sysreg(id_aa64mmfr1_el1);
+       if (!cpuid_feature_extract_unsigned_field(val, ID_AA64MMFR1_EL1_VH_SHIFT))
+               return false;
+
+       val = arm64_sw_feature_override.val & arm64_sw_feature_override.mask;
+       return cpuid_feature_extract_unsigned_field(val, ARM64_SW_FEATURE_OVERRIDE_HVHE);
+}
+
 #ifdef CONFIG_ARM64_PAN
 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
 {
                .cpu_enable = cpu_enable_dit,
                ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP)
        },
+       {
+               .desc = "VHE for hypervisor only",
+               .capability = ARM64_KVM_HVHE,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = hvhe_possible,
+       },
        {},
 };