]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Eagerly switch ZCR_EL{1,2}
authorMark Rutland <mark.rutland@arm.com>
Mon, 10 Feb 2025 19:52:26 +0000 (19:52 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 13 Feb 2025 17:55:06 +0000 (17:55 +0000)
In non-protected KVM modes, while the guest FPSIMD/SVE/SME state is live on the
CPU, the host's active SVE VL may differ from the guest's maximum SVE VL:

* For VHE hosts, when a VM uses NV, ZCR_EL2 contains a value constrained
  by the guest hypervisor, which may be less than or equal to that
  guest's maximum VL.

  Note: in this case the value of ZCR_EL1 is immaterial due to E2H.

* For nVHE/hVHE hosts, ZCR_EL1 contains a value written by the guest,
  which may be less than or greater than the guest's maximum VL.

  Note: in this case hyp code traps host SVE usage and lazily restores
  ZCR_EL2 to the host's maximum VL, which may be greater than the
  guest's maximum VL.

This can be the case between exiting a guest and kvm_arch_vcpu_put_fp().
If a softirq is taken during this period and the softirq handler tries
to use kernel-mode NEON, then the kernel will fail to save the guest's
FPSIMD/SVE state, and will pend a SIGKILL for the current thread.

This happens because kvm_arch_vcpu_ctxsync_fp() binds the guest's live
FPSIMD/SVE state with the guest's maximum SVE VL, and
fpsimd_save_user_state() verifies that the live SVE VL is as expected
before attempting to save the register state:

| if (WARN_ON(sve_get_vl() != vl)) {
|         force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
|         return;
| }

Fix this and make this a bit easier to reason about by always eagerly
switching ZCR_EL{1,2} at hyp during guest<->host transitions. With this
happening, there's no need to trap host SVE usage, and the nVHE/nVHE
__deactivate_cptr_traps() logic can be simplified to enable host access
to all present FPSIMD/SVE/SME features.

In protected nVHE/hVHE modes, the host's state is always saved/restored
by hyp, and the guest's state is saved prior to exit to the host, so
from the host's PoV the guest never has live FPSIMD/SVE/SME state, and
the host's ZCR_EL1 is never clobbered by hyp.

Fixes: 8c8010d69c132273 ("KVM: arm64: Save/restore SVE state for nVHE")
Fixes: 2e3cf82063a00ea0 ("KVM: arm64: nv: Ensure correct VL is loaded before saving SVE state")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Fuad Tabba <tabba@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Oliver Upton <oliver.upton@linux.dev>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250210195226.1215254-9-mark.rutland@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c

index f64724197958e0d8a4ec17deb1f9826ce3625eb7..3cbb999419af7bb31ce9cec2baafcad00491610a 100644 (file)
@@ -136,36 +136,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
        local_irq_save(flags);
 
        if (guest_owns_fp_regs()) {
-               if (vcpu_has_sve(vcpu)) {
-                       u64 zcr = read_sysreg_el1(SYS_ZCR);
-
-                       /*
-                        * If the vCPU is in the hyp context then ZCR_EL1 is
-                        * loaded with its vEL2 counterpart.
-                        */
-                       __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
-
-                       /*
-                        * Restore the VL that was saved when bound to the CPU,
-                        * which is the maximum VL for the guest. Because the
-                        * layout of the data when saving the sve state depends
-                        * on the VL, we need to use a consistent (i.e., the
-                        * maximum) VL.
-                        * Note that this means that at guest exit ZCR_EL1 is
-                        * not necessarily the same as on guest entry.
-                        *
-                        * ZCR_EL2 holds the guest hypervisor's VL when running
-                        * a nested guest, which could be smaller than the
-                        * max for the vCPU. Similar to above, we first need to
-                        * switch to a VL consistent with the layout of the
-                        * vCPU's SVE state. KVM support for NV implies VHE, so
-                        * using the ZCR_EL1 alias is safe.
-                        */
-                       if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
-                               sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
-                                                      SYS_ZCR_EL1);
-               }
-
                /*
                 * Flush (save and invalidate) the fpsimd/sve state so that if
                 * the host tries to use fpsimd/sve, it's not using stale data
index 4433a234aa9ba242f43b943d22011b5ddacd8af7..9f4e8d68ab505cf4a7aa8673643d9b47ca1bc7cb 100644 (file)
@@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
 alternative_else_nop_endif
        mrs     x1, isr_el1
        cbz     x1,  1f
+
+       // Ensure that __guest_enter() always provides a context
+       // synchronization event so that callers don't need ISBs for anything
+       // that would usually be synchonized by the ERET.
+       isb
        mov     x0, #ARM_EXCEPTION_IRQ
        ret
 
index 163867f7f7c52a4bbfa3555a7c19928f068d294b..f5e882a358e2d6e6805d112ed646a112455012e8 100644 (file)
@@ -375,6 +375,65 @@ static inline void __hyp_sve_save_host(void)
                         true);
 }
 
+static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
+{
+       u64 zcr_el1, zcr_el2;
+
+       if (!guest_owns_fp_regs())
+               return;
+
+       if (vcpu_has_sve(vcpu)) {
+               /* A guest hypervisor may restrict the effective max VL. */
+               if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
+                       zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
+               else
+                       zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
+
+               write_sysreg_el2(zcr_el2, SYS_ZCR);
+
+               zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
+               write_sysreg_el1(zcr_el1, SYS_ZCR);
+       }
+}
+
+static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
+{
+       u64 zcr_el1, zcr_el2;
+
+       if (!guest_owns_fp_regs())
+               return;
+
+       /*
+        * When the guest owns the FP regs, we know that guest+hyp traps for
+        * any FPSIMD/SVE/SME features exposed to the guest have been disabled
+        * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
+        * prior to __guest_entry(). As __guest_entry() guarantees a context
+        * synchronization event, we don't need an ISB here to avoid taking
+        * traps for anything that was exposed to the guest.
+        */
+       if (vcpu_has_sve(vcpu)) {
+               zcr_el1 = read_sysreg_el1(SYS_ZCR);
+               __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
+
+               /*
+                * The guest's state is always saved using the guest's max VL.
+                * Ensure that the host has the guest's max VL active such that
+                * the host can save the guest's state lazily, but don't
+                * artificially restrict the host to the guest's max VL.
+                */
+               if (has_vhe()) {
+                       zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
+                       write_sysreg_el2(zcr_el2, SYS_ZCR);
+               } else {
+                       zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
+                       write_sysreg_el2(zcr_el2, SYS_ZCR);
+
+                       zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
+                       write_sysreg_el1(zcr_el1, SYS_ZCR);
+               }
+       }
+}
+
 static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
 {
        /*
index 1a334a38d8fd23ab3daf5f20e01044a7b06a71fd..2c37680d954cf2c2aed5abe7c2225b682861869a 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <hyp/adjust_pc.h>
+#include <hyp/switch.h>
 
 #include <asm/pgtable-types.h>
 #include <asm/kvm_asm.h>
@@ -224,8 +225,12 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
 
                sync_hyp_vcpu(hyp_vcpu);
        } else {
+               struct kvm_vcpu *vcpu = kern_hyp_va(host_vcpu);
+
                /* The host is fully trusted, run its vCPU directly. */
-               ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
+               fpsimd_lazy_switch_to_guest(vcpu);
+               ret = __kvm_vcpu_run(vcpu);
+               fpsimd_lazy_switch_to_host(vcpu);
        }
 out:
        cpu_reg(host_ctxt, 1) =  ret;
@@ -675,12 +680,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
        case ESR_ELx_EC_SMC64:
                handle_host_smc(host_ctxt);
                break;
-       case ESR_ELx_EC_SVE:
-               cpacr_clear_set(0, CPACR_EL1_ZEN);
-               isb();
-               sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
-                                      SYS_ZCR_EL2);
-               break;
        case ESR_ELx_EC_IABT_LOW:
        case ESR_ELx_EC_DABT_LOW:
                handle_host_mem_abort(host_ctxt);
index 69d7d3b4294a7e4f978bf20a1516a132ed228dcf..7d2ba6ef026186e2e9f7eae48132c0ede4285c90 100644 (file)
@@ -73,12 +73,10 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
 
 static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
 {
-       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-
        if (has_hvhe()) {
                u64 val = CPACR_EL1_FPEN;
 
-               if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
+               if (cpus_have_final_cap(ARM64_SVE))
                        val |= CPACR_EL1_ZEN;
                if (cpus_have_final_cap(ARM64_SME))
                        val |= CPACR_EL1_SMEN;
@@ -87,7 +85,7 @@ static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
        } else {
                u64 val = CPTR_NVHE_EL2_RES1;
 
-               if (kvm_has_sve(kvm) && guest_owns_fp_regs())
+               if (!cpus_have_final_cap(ARM64_SVE))
                        val |= CPTR_EL2_TZ;
                if (!cpus_have_final_cap(ARM64_SME))
                        val |= CPTR_EL2_TSM;
index c854d844588924f1f9cf9693d67cf050a8697787..647737d6e8d0b5f41b2a8d25a06265e4703126b3 100644 (file)
@@ -579,6 +579,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 
        sysreg_save_host_state_vhe(host_ctxt);
 
+       fpsimd_lazy_switch_to_guest(vcpu);
+
        /*
         * Note that ARM erratum 1165522 requires us to configure both stage 1
         * and stage 2 translation for the guest context before we clear
@@ -603,6 +605,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 
        __deactivate_traps(vcpu);
 
+       fpsimd_lazy_switch_to_host(vcpu);
+
        sysreg_restore_host_state_vhe(host_ctxt);
 
        if (guest_owns_fp_regs())