]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
KVM: arm64: Handle trapping of FEAT_LS64* instructions
authorMarc Zyngier <maz@kernel.org>
Thu, 4 Jul 2024 17:58:01 +0000 (18:58 +0100)
committerMarc Zyngier <maz@kernel.org>
Tue, 6 May 2025 16:35:14 +0000 (17:35 +0100)
We generally don't expect FEAT_LS64* instructions to trap, unless
they are trapped by a guest hypervisor.

Otherwise, this is just the guest playing tricks on us by using
an instruction that isn't advertised, which we handle with a well
deserved UNDEF.

Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/handle_exit.c

index b73dc26bc44b410903e2423551d61d4e65fd7e1e..ff75695fb8c964a41d29f3929e8c18e88ce78b79 100644 (file)
@@ -298,6 +298,61 @@ static int handle_svc(struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static int handle_other(struct kvm_vcpu *vcpu)
+{
+       bool is_l2 = vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
+       u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
+       u64 esr = kvm_vcpu_get_esr(vcpu);
+       u64 iss = ESR_ELx_ISS(esr);
+       struct kvm *kvm = vcpu->kvm;
+       bool allowed, fwd = false;
+
+       /*
+        * We only trap for two reasons:
+        *
+        * - the feature is disabled, and the only outcome is to
+        *   generate an UNDEF.
+        *
+        * - the feature is enabled, but a NV guest wants to trap the
+        *   feature used by its L2 guest. We forward the exception in
+        *   this case.
+        *
+        * What we don't expect is to end-up here if the guest is
+        * expected be be able to directly use the feature, hence the
+        * WARN_ON below.
+        */
+       switch (iss) {
+       case ESR_ELx_ISS_OTHER_ST64BV:
+               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
+               if (is_l2)
+                       fwd = !(hcrx & HCRX_EL2_EnASR);
+               break;
+       case ESR_ELx_ISS_OTHER_ST64BV0:
+               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
+               if (is_l2)
+                       fwd = !(hcrx & HCRX_EL2_EnAS0);
+               break;
+       case ESR_ELx_ISS_OTHER_LDST64B:
+               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
+               if (is_l2)
+                       fwd = !(hcrx & HCRX_EL2_EnALS);
+               break;
+       default:
+               /* Clearly, we're missing something. */
+               WARN_ON_ONCE(1);
+               allowed = false;
+       }
+
+       WARN_ON_ONCE(allowed && !fwd);
+
+       if (allowed && fwd)
+               kvm_inject_nested_sync(vcpu, esr);
+       else
+               kvm_inject_undefined(vcpu);
+
+       return 1;
+}
+
 static exit_handle_fn arm_exit_handlers[] = {
        [0 ... ESR_ELx_EC_MAX]  = kvm_handle_unknown_ec,
        [ESR_ELx_EC_WFx]        = kvm_handle_wfx,
@@ -307,6 +362,7 @@ static exit_handle_fn arm_exit_handlers[] = {
        [ESR_ELx_EC_CP14_LS]    = kvm_handle_cp14_load_store,
        [ESR_ELx_EC_CP10_ID]    = kvm_handle_cp10_id,
        [ESR_ELx_EC_CP14_64]    = kvm_handle_cp14_64,
+       [ESR_ELx_EC_OTHER]      = handle_other,
        [ESR_ELx_EC_HVC32]      = handle_hvc,
        [ESR_ELx_EC_SMC32]      = handle_smc,
        [ESR_ELx_EC_HVC64]      = handle_hvc,