]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
KVM: arm64: Trap SME usage in guest
authorMark Brown <broonie@kernel.org>
Tue, 19 Apr 2022 11:22:33 +0000 (12:22 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 22 Apr 2022 17:51:22 +0000 (18:51 +0100)
SME defines two new traps which need to be enabled for guests to ensure
that they can't use SME, one for the main SME operations which mirrors the
traps for SVE and another for access to TPIDR2 in SCTLR_EL2.

For VHE manage SMEN along with ZEN in activate_traps() and the FP state
management callbacks, along with SCTLR_EL2.EnTPIDR2.  There is no
existing dynamic management of SCTLR_EL2.

For nVHE manage TSM in activate_traps() along with the fine grained
traps for TPIDR2 and SMPRI.  There is no existing dynamic management of
fine grained traps.

Signed-off-by: Mark Brown <broonie@kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220419112247.711548-26-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c

index 6410d21d86957d3b9e057e6412c2b91563e97c31..caace61ea459d29c9af59156bd9b738a866af6f4 100644 (file)
@@ -47,10 +47,24 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
                val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
                __activate_traps_fpsimd32(vcpu);
        }
+       if (cpus_have_final_cap(ARM64_SME))
+               val |= CPTR_EL2_TSM;
 
        write_sysreg(val, cptr_el2);
        write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
 
+       if (cpus_have_final_cap(ARM64_SME)) {
+               val = read_sysreg_s(SYS_HFGRTR_EL2);
+               val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
+                        HFGxTR_EL2_nSMPRI_EL1_MASK);
+               write_sysreg_s(val, SYS_HFGRTR_EL2);
+
+               val = read_sysreg_s(SYS_HFGWTR_EL2);
+               val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
+                        HFGxTR_EL2_nSMPRI_EL1_MASK);
+               write_sysreg_s(val, SYS_HFGWTR_EL2);
+       }
+
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
 
@@ -94,9 +108,25 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
 
        write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
 
+       if (cpus_have_final_cap(ARM64_SME)) {
+               u64 val;
+
+               val = read_sysreg_s(SYS_HFGRTR_EL2);
+               val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
+                       HFGxTR_EL2_nSMPRI_EL1_MASK;
+               write_sysreg_s(val, SYS_HFGRTR_EL2);
+
+               val = read_sysreg_s(SYS_HFGWTR_EL2);
+               val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
+                       HFGxTR_EL2_nSMPRI_EL1_MASK;
+               write_sysreg_s(val, SYS_HFGWTR_EL2);
+       }
+
        cptr = CPTR_EL2_DEFAULT;
        if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
                cptr |= CPTR_EL2_TZ;
+       if (cpus_have_final_cap(ARM64_SME))
+               cptr &= ~CPTR_EL2_TSM;
 
        write_sysreg(cptr, cptr_el2);
        write_sysreg(__kvm_hyp_host_vector, vbar_el2);
index 262dfe03134daba2f7c9669b115ae792cd5b15e1..969f20daf97aabb9b8dd67c7eec8e7272e1953d9 100644 (file)
@@ -41,7 +41,8 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
 
        val = read_sysreg(cpacr_el1);
        val |= CPACR_EL1_TTA;
-       val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+       val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
+                CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
 
        /*
         * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
@@ -62,6 +63,10 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
                __activate_traps_fpsimd32(vcpu);
        }
 
+       if (cpus_have_final_cap(ARM64_SME))
+               write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2,
+                            sctlr_el2);
+
        write_sysreg(val, cpacr_el1);
 
        write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
@@ -83,6 +88,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
         */
        asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 
+       if (cpus_have_final_cap(ARM64_SME))
+               write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2,
+                            sctlr_el2);
+
        write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
 
        if (!arm64_kernel_unmapped_at_el0())