]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Convert the SVE guest vcpu flag to a vm flag
authorFuad Tabba <tabba@google.com>
Mon, 16 Dec 2024 10:50:56 +0000 (10:50 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Dec 2024 13:54:09 +0000 (13:54 +0000)
The vcpu flag GUEST_HAS_SVE is per-vcpu, but it is based on what
is now a per-vm feature. Make the flag per-vm.

Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20241216105057.579031-17-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/reset.c

index 406e99a452bf59e470f8d51631495e45086ded4a..2d91fb88298a263dcd73a4269318f8edf1379650 100644 (file)
@@ -620,7 +620,7 @@ static __always_inline void kvm_write_cptr_el2(u64 val)
 }
 
 /* Resets the value of cptr_el2 when returning to the host. */
-static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
+static __always_inline void __kvm_reset_cptr_el2(struct kvm *kvm)
 {
        u64 val;
 
@@ -631,14 +631,14 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
        } else if (has_hvhe()) {
                val = CPACR_ELx_FPEN;
 
-               if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
+               if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
                        val |= CPACR_ELx_ZEN;
                if (cpus_have_final_cap(ARM64_SME))
                        val |= CPACR_ELx_SMEN;
        } else {
                val = CPTR_NVHE_EL2_RES1;
 
-               if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
+               if (kvm_has_sve(kvm) && guest_owns_fp_regs())
                        val |= CPTR_EL2_TZ;
                if (!cpus_have_final_cap(ARM64_SME))
                        val |= CPTR_EL2_TSM;
@@ -647,6 +647,12 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
        kvm_write_cptr_el2(val);
 }
 
+#ifdef __KVM_NVHE_HYPERVISOR__
+#define kvm_reset_cptr_el2(v)  __kvm_reset_cptr_el2(kern_hyp_va((v)->kvm))
+#else
+#define kvm_reset_cptr_el2(v)  __kvm_reset_cptr_el2((v)->kvm)
+#endif
+
 /*
  * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
  * format if E2H isn't set.
index 86fb40d35051269938bca5b226913b698ce018ac..7ba742b9067e0216a156eebb3e5ea6bb69239a44 100644 (file)
@@ -331,6 +331,8 @@ struct kvm_arch {
 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED              7
        /* Fine-Grained UNDEF initialised */
 #define KVM_ARCH_FLAG_FGU_INITIALIZED                  8
+       /* SVE exposed to guest */
+#define KVM_ARCH_FLAG_GUEST_HAS_SVE                    9
        unsigned long flags;
 
        /* VM-wide vCPU feature set */
@@ -862,12 +864,10 @@ struct kvm_vcpu_arch {
 #define vcpu_set_flag(v, ...)  __vcpu_set_flag((v), __VA_ARGS__)
 #define vcpu_clear_flag(v, ...)        __vcpu_clear_flag((v), __VA_ARGS__)
 
-/* SVE exposed to guest */
-#define GUEST_HAS_SVE          __vcpu_single_flag(cflags, BIT(0))
+/* KVM_ARM_VCPU_INIT completed */
+#define VCPU_INITIALIZED       __vcpu_single_flag(cflags, BIT(0))
 /* SVE config completed */
 #define VCPU_SVE_FINALIZED     __vcpu_single_flag(cflags, BIT(1))
-/* KVM_ARM_VCPU_INIT completed */
-#define VCPU_INITIALIZED       __vcpu_single_flag(cflags, BIT(2))
 
 /* Exception pending */
 #define PENDING_EXCEPTION      __vcpu_single_flag(iflags, BIT(0))
@@ -956,8 +956,14 @@ struct kvm_vcpu_arch {
                                 KVM_GUESTDBG_USE_HW | \
                                 KVM_GUESTDBG_SINGLESTEP)
 
-#define vcpu_has_sve(vcpu) (system_supports_sve() &&                   \
-                           vcpu_get_flag(vcpu, GUEST_HAS_SVE))
+#define kvm_has_sve(kvm)       (system_supports_sve() &&               \
+                                test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags))
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+#define vcpu_has_sve(vcpu)     kvm_has_sve(kern_hyp_va((vcpu)->kvm))
+#else
+#define vcpu_has_sve(vcpu)     kvm_has_sve((vcpu)->kvm)
+#endif
 
 #ifdef CONFIG_ARM64_PTR_AUTH
 #define vcpu_has_ptrauth(vcpu)                                         \
index 5ad271d91cacb4125bb43889ca846c17169db6c3..4b2622f8e250c3a2d0a76edc094430c6e1c5bca9 100644 (file)
@@ -248,10 +248,13 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
 static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
 {
        struct kvm *kvm = &hyp_vm->kvm;
+       unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags);
        DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
 
        /* No restrictions for non-protected VMs. */
        if (!kvm_vm_is_protected(kvm)) {
+               hyp_vm->kvm.arch.flags = host_arch_flags;
+
                bitmap_copy(kvm->arch.vcpu_features,
                            host_kvm->arch.vcpu_features,
                            KVM_VCPU_MAX_FEATURES);
@@ -271,8 +274,10 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
        if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_GENERIC))
                set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
 
-       if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE))
+       if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE)) {
                set_bit(KVM_ARM_VCPU_SVE, allowed_features);
+               kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_GUEST_HAS_SVE);
+       }
 
        bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
                   allowed_features, KVM_VCPU_MAX_FEATURES);
@@ -308,10 +313,8 @@ static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *
 {
        struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
 
-       if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
-               vcpu_clear_flag(vcpu, GUEST_HAS_SVE);
+       if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
                vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
-       }
 }
 
 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
index 1cfab6a5d8a59f147590de3761e8f1fbdb86a9a4..803e11b0dc8f5eb74b07b0ad745b0c4f666713d5 100644 (file)
@@ -85,7 +85,7 @@ static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
         * KVM_REG_ARM64_SVE_VLS.  Allocation is deferred until
         * kvm_arm_vcpu_finalize(), which freezes the configuration.
         */
-       vcpu_set_flag(vcpu, GUEST_HAS_SVE);
+       set_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &vcpu->kvm->arch.flags);
 }
 
 /*