]> www.infradead.org Git - nvme.git/commitdiff
KVM: arm64: nv: Use guest hypervisor's max VL when running nested guest
authorOliver Upton <oliver.upton@linux.dev>
Thu, 20 Jun 2024 16:46:43 +0000 (16:46 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 20 Jun 2024 19:02:40 +0000 (19:02 +0000)
The max VL for nested guests is additionally constrained by the max VL
selected by the guest hypervisor. Use that instead of KVM's max VL when
running a nested guest.

Note that the guest hypervisor's ZCR_EL2 is sanitised against the VM's
max VL at the time of access, so there's no additional handling required
at the time of use.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240620164653.1130714-7-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/hyp/include/hyp/switch.h

index 16809e74c7f982323549902b90311880fa47bfbb..ab70e6e6bb0cfea5a518cf76cbc2f7055f6f5233 100644 (file)
@@ -314,11 +314,23 @@ static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
 
 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
 {
+       /*
+        * The vCPU's saved SVE state layout always matches the max VL of the
+        * vCPU. Start off with the max VL so we can load the SVE state.
+        */
        sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
        __sve_restore_state(vcpu_sve_pffr(vcpu),
                            &vcpu->arch.ctxt.fp_regs.fpsr,
                            true);
 
+       /*
+        * The effective VL for a VM could differ from the max VL when running a
+        * nested guest, as the guest hypervisor could select a smaller VL. Slap
+        * that into hardware before wrapping up.
+        */
+       if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
+               sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
+
        write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
 }