]> www.infradead.org Git - nvme.git/commitdiff
KVM: arm64: nv: Handle TLBI VMALLS12E1{,IS} operations
authorMarc Zyngier <maz@kernel.org>
Fri, 14 Jun 2024 14:45:44 +0000 (15:45 +0100)
committerOliver Upton <oliver.upton@linux.dev>
Wed, 19 Jun 2024 08:14:37 +0000 (08:14 +0000)
Emulating TLBI VMALLS12E1* results in tearing down all the shadow
S2 PTs that match the current VMID, since our shadow S2s are just
some form of SW-managed TLBs. That teardown itself results in a
full TLB invalidation for both S1 and S2.

This can result in over-invalidation if two vcpus use the same VMID
to tag private S2 PTs, but this is still correct from an architecture
perspective.

Co-developed-by: Jintack Lim <jintack.lim@linaro.org>
Co-developed-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240614144552.2773592-9-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/sys_regs.c

index b22309fca3a78c705e7688ed5eed2239e7f991cf..22a3691ce2486603c2b634e04c672c31831350cc 100644 (file)
@@ -2741,6 +2741,22 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        EL2_REG(SP_EL2, NULL, reset_unknown, 0),
 };
 
+static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
+{
+       struct kvm *kvm = vpcu->kvm;
+       u8 CRm = sys_reg_CRm(instr);
+
+       if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
+           !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
+               return false;
+
+       if (CRm == TLBI_CRm_nROS &&
+           !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
+               return false;
+
+       return true;
+}
+
 /* Only defined here as this is an internal "abstraction" */
 union tlbi_info {
        struct {
@@ -2758,6 +2774,38 @@ union tlbi_info {
        } va;
 };
 
+static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
+                              const union tlbi_info *info)
+{
+       kvm_stage2_unmap_range(mmu, info->range.start, info->range.size);
+}
+
+static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                               const struct sys_reg_desc *r)
+{
+       u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+       u64 limit, vttbr;
+
+       if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) {
+               kvm_inject_undefined(vcpu);
+               return false;
+       }
+
+       vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
+       limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
+
+       kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
+                                  &(union tlbi_info) {
+                                          .range = {
+                                                  .start = 0,
+                                                  .size = limit,
+                                          },
+                                  },
+                                  s2_mmu_unmap_range);
+
+       return true;
+}
+
 static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
                             const union tlbi_info *info)
 {
@@ -2831,6 +2879,9 @@ static struct sys_reg_desc sys_insn_descs[] = {
        SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
        SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
        SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
+
+       SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
+       SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
 };
 
 static const struct sys_reg_desc *first_idreg;