Now that the host eagerly saves its own FPSIMD/SVE/SME state,
non-protected KVM never needs to save the host FPSIMD/SVE/SME state,
and the code to do this is never used. Protected KVM still needs to
save/restore the host FPSIMD/SVE state to avoid leaking guest state to
the host (and to avoid revealing to the host whether the guest used
FPSIMD/SVE/SME), and that code needs to be retained.
Remove the unused code and data structures.
To avoid the need for a stub copy of kvm_hyp_save_fpsimd_host() in the
VHE hyp code, the nVHE/hVHE version is moved into the shared switch
header, where it is only invoked when KVM is in protected mode.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
Tested-by: Mark Brown <broonie@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Fuad Tabba <tabba@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Oliver Upton <oliver.upton@linux.dev>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250210195226.1215254-3-mark.rutland@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
 
        struct kvm_cpu_context host_ctxt;
 
        /*
-        * All pointers in this union are hyp VA.
+        * Hyp VA.
         * sve_state is only used in pKVM and if system_supports_sve().
         */
-       union {
-               struct user_fpsimd_state *fpsimd_state;
-               struct cpu_sve_state *sve_state;
-       };
-
-       union {
-               /* HYP VA pointer to the host storage for FPMR */
-               u64     *fpmr_ptr;
-               /*
-                * Used by pKVM only, as it needs to provide storage
-                * for the host
-                */
-               u64     fpmr;
-       };
+       struct cpu_sve_state *sve_state;
+
+       /* Used by pKVM only. */
+       u64     fpmr;
 
        /* Ownership of the FP regs */
        enum {
 
                        per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
                                kern_hyp_va(sve_state);
                }
-       } else {
-               for_each_possible_cpu(cpu) {
-                       struct user_fpsimd_state *fpsimd_state;
-
-                       fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
-                       per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
-                               kern_hyp_va(fpsimd_state);
-               }
        }
 }
 
 
         */
        fpsimd_save_and_flush_cpu_state();
        *host_data_ptr(fp_owner) = FP_STATE_FREE;
-       *host_data_ptr(fpsimd_state) = NULL;
-       *host_data_ptr(fpmr_ptr) = NULL;
 
        host_data_clear_flag(HOST_SVE_ENABLED);
        if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
 
                         true);
 }
 
-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+{
+       /*
+        * Non-protected kvm relies on the host restoring its sve state.
+        * Protected kvm restores the host's sve state as not to reveal that
+        * fpsimd was used by a guest nor leak upper sve bits.
+        */
+       if (system_supports_sve()) {
+               __hyp_sve_save_host();
+
+               /* Re-enable SVE traps if not supported for the guest vcpu. */
+               if (!vcpu_has_sve(vcpu))
+                       cpacr_clear_set(CPACR_EL1_ZEN, 0);
+
+       } else {
+               __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
+       }
+
+       if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
+               *host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
+}
+
 
 /*
  * We trap the first access to the FP/SIMD to save the host context and
        isb();
 
        /* Write out the host state if it's in the registers */
-       if (host_owns_fp_regs())
+       if (is_protected_kvm_enabled() && host_owns_fp_regs())
                kvm_hyp_save_fpsimd_host(vcpu);
 
        /* Restore the guest state */
 
        if (system_supports_sve())
                __hyp_sve_restore_host();
        else
-               __fpsimd_restore_state(*host_data_ptr(fpsimd_state));
+               __fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
 
        if (has_fpmr)
                write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
 
                kvm_handle_pvm_sysreg(vcpu, exit_code));
 }
 
-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
-{
-       /*
-        * Non-protected kvm relies on the host restoring its sve state.
-        * Protected kvm restores the host's sve state as not to reveal that
-        * fpsimd was used by a guest nor leak upper sve bits.
-        */
-       if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
-               __hyp_sve_save_host();
-
-               /* Re-enable SVE traps if not supported for the guest vcpu. */
-               if (!vcpu_has_sve(vcpu))
-                       cpacr_clear_set(CPACR_EL1_ZEN, 0);
-
-       } else {
-               __fpsimd_save_state(*host_data_ptr(fpsimd_state));
-       }
-
-       if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
-               u64 val = read_sysreg_s(SYS_FPMR);
-
-               if (unlikely(is_protected_kvm_enabled()))
-                       *host_data_ptr(fpmr) = val;
-               else
-                       **host_data_ptr(fpmr_ptr) = val;
-       }
-}
-
 static const exit_handler_fn hyp_exit_handlers[] = {
        [0 ... ESR_ELx_EC_MAX]          = NULL,
        [ESR_ELx_EC_CP15_32]            = kvm_hyp_handle_cp15_32,
 
        return true;
 }
 
-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
-{
-       __fpsimd_save_state(*host_data_ptr(fpsimd_state));
-
-       if (kvm_has_fpmr(vcpu->kvm))
-               **host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
-}
-
 static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
        int ret = -EINVAL;