]> www.infradead.org Git - nvme.git/commitdiff
KVM: arm64: nv: Handle ERETA[AB] instructions
authorMarc Zyngier <maz@kernel.org>
Fri, 19 Apr 2024 10:29:33 +0000 (11:29 +0100)
committerMarc Zyngier <maz@kernel.org>
Sat, 20 Apr 2024 11:42:51 +0000 (12:42 +0100)
Now that we have some emulation in place for ERETA[AB], we can
plug it into the exception handling machinery.

As for a bare ERET, an "easy" ERETAx instruction is processed as
a fixup, while something that requires a translation regime
transition or an exception delivery is left to the slow path.

Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240419102935.1935571-14-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp/vhe/switch.c

index 63a74c0330f11f2c3302a8c8fb27c412814173fc..72d733c74a382f264e48394c6cb88f0fe9f25f45 100644 (file)
@@ -2172,7 +2172,7 @@ static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
 
 void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
 {
-       u64 spsr, elr;
+       u64 spsr, elr, esr;
 
        /*
         * Forward this trap to the virtual EL2 if the virtual
@@ -2181,12 +2181,30 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
        if (forward_traps(vcpu, HCR_NV))
                return;
 
+       /* Check for an ERETAx */
+       esr = kvm_vcpu_get_esr(vcpu);
+       if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
+               /*
+                * Oh no, ERETAx failed to authenticate.  If we have
+                * FPACCOMBINE, deliver an exception right away.  If we
+                * don't, then let the mangled ELR value trickle down the
+                * ERET handling, and the guest will have a little surprise.
+                */
+               if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
+                       esr &= ESR_ELx_ERET_ISS_ERETA;
+                       esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
+                       kvm_inject_nested_sync(vcpu, esr);
+                       return;
+               }
+       }
+
        preempt_disable();
        kvm_arch_vcpu_put(vcpu);
 
        spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
        spsr = kvm_check_illegal_exception_return(vcpu, spsr);
-       elr = __vcpu_sys_reg(vcpu, ELR_EL2);
+       if (!esr_iss_is_eretax(esr))
+               elr = __vcpu_sys_reg(vcpu, ELR_EL2);
 
        trace_kvm_nested_eret(vcpu, elr, spsr);
 
index 1ba2f788b2c3d8901b554f41231d0e0e808e5081..407bdfbb572b2da0fa1d2fa20908a5fbc582cf06 100644 (file)
@@ -248,7 +248,8 @@ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
 
 static int kvm_handle_eret(struct kvm_vcpu *vcpu)
 {
-       if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)))
+       if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)) &&
+           !vcpu_has_ptrauth(vcpu))
                return kvm_handle_ptrauth(vcpu);
 
        /*
index 26395171621ba70289489b48a1a130e30934b6e0..8e1d98b691c1efa82b96a8459b8fbf3b2ee3c6b0 100644 (file)
@@ -208,7 +208,8 @@ void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
 
 static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
-       u64 spsr, mode;
+       u64 esr = kvm_vcpu_get_esr(vcpu);
+       u64 spsr, elr, mode;
 
        /*
         * Going through the whole put/load motions is a waste of time
@@ -242,10 +243,18 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
                return false;
        }
 
+       /* If ERETAx fails, take the slow path */
+       if (esr_iss_is_eretax(esr)) {
+               if (!(vcpu_has_ptrauth(vcpu) && kvm_auth_eretax(vcpu, &elr)))
+                       return false;
+       } else {
+               elr = read_sysreg_el1(SYS_ELR);
+       }
+
        spsr = (spsr & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode;
 
        write_sysreg_el2(spsr, SYS_SPSR);
-       write_sysreg_el2(read_sysreg_el1(SYS_ELR), SYS_ELR);
+       write_sysreg_el2(elr, SYS_ELR);
 
        return true;
 }