#include <asm/kprobes.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
+#include <asm/kvm_fixed_config.h>
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 #include <asm/fpsimd.h>
                write_sysreg(pmu->events_host, pmcntenset_el0);
 }
 
+/**
+ * Handler for protected VM restricted exceptions.
+ *
+ * Inject an undefined exception into the guest and return true to indicate that
+ * the hypervisor has handled the exit, and control should go back to the guest.
+ */
+static bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+       inject_undef64(vcpu);
+       return true;
+}
+
+/**
+ * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
+ *
+ * Returns true if the hypervisor has handled the exit, and control should go
+ * back to the guest, or false if it hasn't.
+ */
+static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+       if (kvm_handle_pvm_sysreg(vcpu, exit_code))
+               return true;
+
+       return kvm_hyp_handle_sysreg(vcpu, exit_code);
+}
+
+/**
+ * Handler for protected floating-point and Advanced SIMD accesses.
+ *
+ * Returns true if the hypervisor has handled the exit, and control should go
+ * back to the guest, or false if it hasn't.
+ */
+static bool kvm_handle_pvm_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+       /* Linux guests assume support for floating-point and Advanced SIMD. */
+       BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP),
+                               PVM_ID_AA64PFR0_ALLOW));
+       BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD),
+                               PVM_ID_AA64PFR0_ALLOW));
+
+       return kvm_hyp_handle_fpsimd(vcpu, exit_code);
+}
+
 static const exit_handler_fn hyp_exit_handlers[] = {
        [0 ... ESR_ELx_EC_MAX]          = NULL,
        [ESR_ELx_EC_CP15_32]            = kvm_hyp_handle_cp15_32,
        [ESR_ELx_EC_PAC]                = kvm_hyp_handle_ptrauth,
 };
 
+static const exit_handler_fn pvm_exit_handlers[] = {
+       [0 ... ESR_ELx_EC_MAX]          = NULL,
+       [ESR_ELx_EC_SYS64]              = kvm_handle_pvm_sys64,
+       [ESR_ELx_EC_SVE]                = kvm_handle_pvm_restricted,
+       [ESR_ELx_EC_FP_ASIMD]           = kvm_handle_pvm_fpsimd,
+       [ESR_ELx_EC_IABT_LOW]           = kvm_hyp_handle_iabt_low,
+       [ESR_ELx_EC_DABT_LOW]           = kvm_hyp_handle_dabt_low,
+       [ESR_ELx_EC_PAC]                = kvm_hyp_handle_ptrauth,
+};
+
 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm *kvm)
 {
+       if (unlikely(kvm_vm_is_protected(kvm)))
+               return pvm_exit_handlers;
+
        return hyp_exit_handlers;
 }