]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Remove host FPSIMD saving for non-protected KVM
authorMark Rutland <mark.rutland@arm.com>
Mon, 10 Feb 2025 19:52:20 +0000 (19:52 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 13 Feb 2025 17:54:44 +0000 (17:54 +0000)
Now that the host eagerly saves its own FPSIMD/SVE/SME state,
non-protected KVM never needs to save the host FPSIMD/SVE/SME state,
and the code to do this is never used. Protected KVM still needs to
save/restore the host FPSIMD/SVE state to avoid leaking guest state to
the host (and to avoid revealing to the host whether the guest used
FPSIMD/SVE/SME), and that code needs to be retained.

Remove the unused code and data structures.

To avoid the need for a stub copy of kvm_hyp_save_fpsimd_host() in the
VHE hyp code, the nVHE/hVHE version is moved into the shared switch
header, where it is only invoked when KVM is in protected mode.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Fuad Tabba <tabba@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Oliver Upton <oliver.upton@linux.dev>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250210195226.1215254-3-mark.rutland@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c

index 7cfa024de4e3496f0b2d4640c48773be9685d9aa..f56c07568591f60862f4313f5cc820f17c9a2858 100644 (file)
@@ -624,23 +624,13 @@ struct kvm_host_data {
        struct kvm_cpu_context host_ctxt;
 
        /*
-        * All pointers in this union are hyp VA.
+        * Hyp VA.
         * sve_state is only used in pKVM and if system_supports_sve().
         */
-       union {
-               struct user_fpsimd_state *fpsimd_state;
-               struct cpu_sve_state *sve_state;
-       };
-
-       union {
-               /* HYP VA pointer to the host storage for FPMR */
-               u64     *fpmr_ptr;
-               /*
-                * Used by pKVM only, as it needs to provide storage
-                * for the host
-                */
-               u64     fpmr;
-       };
+       struct cpu_sve_state *sve_state;
+
+       /* Used by pKVM only. */
+       u64     fpmr;
 
        /* Ownership of the FP regs */
        enum {
index 62c650c2f7b674d0a44df7a86de338f0d9c0a62b..4b7389ad94f55a30cbe6208e8d09ff827d07287a 100644 (file)
@@ -2481,14 +2481,6 @@ static void finalize_init_hyp_mode(void)
                        per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
                                kern_hyp_va(sve_state);
                }
-       } else {
-               for_each_possible_cpu(cpu) {
-                       struct user_fpsimd_state *fpsimd_state;
-
-                       fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
-                       per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
-                               kern_hyp_va(fpsimd_state);
-               }
        }
 }
 
index ceeb0a4893aa7859e91682ed13532edf43620969..332cb3904e68b0bfed4c0fdf352f9becc7a8b0da 100644 (file)
@@ -64,8 +64,6 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
         */
        fpsimd_save_and_flush_cpu_state();
        *host_data_ptr(fp_owner) = FP_STATE_FREE;
-       *host_data_ptr(fpsimd_state) = NULL;
-       *host_data_ptr(fpmr_ptr) = NULL;
 
        host_data_clear_flag(HOST_SVE_ENABLED);
        if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
index f838a45665f26167a0acde1193c1c9fede7ab0db..c5b8a11ac4f501e79dbac87caf83d02e84ffe108 100644 (file)
@@ -375,7 +375,28 @@ static inline void __hyp_sve_save_host(void)
                         true);
 }
 
-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+{
+       /*
+        * Non-protected kvm relies on the host restoring its sve state.
+        * Protected kvm restores the host's sve state as not to reveal that
+        * fpsimd was used by a guest nor leak upper sve bits.
+        */
+       if (system_supports_sve()) {
+               __hyp_sve_save_host();
+
+               /* Re-enable SVE traps if not supported for the guest vcpu. */
+               if (!vcpu_has_sve(vcpu))
+                       cpacr_clear_set(CPACR_EL1_ZEN, 0);
+
+       } else {
+               __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
+       }
+
+       if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
+               *host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
+}
+
 
 /*
  * We trap the first access to the FP/SIMD to save the host context and
@@ -425,7 +446,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
        isb();
 
        /* Write out the host state if it's in the registers */
-       if (host_owns_fp_regs())
+       if (is_protected_kvm_enabled() && host_owns_fp_regs())
                kvm_hyp_save_fpsimd_host(vcpu);
 
        /* Restore the guest state */
index 6e12c070832f79abea4ed908428beed01b31e936..1a334a38d8fd23ab3daf5f20e01044a7b06a71fd 100644 (file)
@@ -83,7 +83,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
        if (system_supports_sve())
                __hyp_sve_restore_host();
        else
-               __fpsimd_restore_state(*host_data_ptr(fpsimd_state));
+               __fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
 
        if (has_fpmr)
                write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
index 6c846d033d24ad715f615252baf66ba28f14992b..7a2d189176249703e3f3e3accdaba1e4f40b2fbc 100644 (file)
@@ -192,34 +192,6 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
                kvm_handle_pvm_sysreg(vcpu, exit_code));
 }
 
-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
-{
-       /*
-        * Non-protected kvm relies on the host restoring its sve state.
-        * Protected kvm restores the host's sve state as not to reveal that
-        * fpsimd was used by a guest nor leak upper sve bits.
-        */
-       if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
-               __hyp_sve_save_host();
-
-               /* Re-enable SVE traps if not supported for the guest vcpu. */
-               if (!vcpu_has_sve(vcpu))
-                       cpacr_clear_set(CPACR_EL1_ZEN, 0);
-
-       } else {
-               __fpsimd_save_state(*host_data_ptr(fpsimd_state));
-       }
-
-       if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
-               u64 val = read_sysreg_s(SYS_FPMR);
-
-               if (unlikely(is_protected_kvm_enabled()))
-                       *host_data_ptr(fpmr) = val;
-               else
-                       **host_data_ptr(fpmr_ptr) = val;
-       }
-}
-
 static const exit_handler_fn hyp_exit_handlers[] = {
        [0 ... ESR_ELx_EC_MAX]          = NULL,
        [ESR_ELx_EC_CP15_32]            = kvm_hyp_handle_cp15_32,
index b5b9dbaf1fdd6b8b5e426dcdcfca0480da5f1787..e8a07d4bb546b324fc8216a8fc4c46e9466239f0 100644 (file)
@@ -413,14 +413,6 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
        return true;
 }
 
-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
-{
-       __fpsimd_save_state(*host_data_ptr(fpsimd_state));
-
-       if (kvm_has_fpmr(vcpu->kvm))
-               **host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
-}
-
 static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
        int ret = -EINVAL;