]> www.infradead.org Git - nvme.git/commitdiff
KVM: arm64: nv: Add Stage-1 EL2 invalidation primitives
authorMarc Zyngier <maz@kernel.org>
Fri, 14 Jun 2024 14:45:41 +0000 (15:45 +0100)
committerOliver Upton <oliver.upton@linux.dev>
Wed, 19 Jun 2024 08:13:49 +0000 (08:13 +0000)
Provide the primitives required to handle TLB invalidation for
Stage-1 EL2 TLBs, which by definition do not require messing
with the Stage-2 page tables.

Co-developed-by: Jintack Lim <jintack.lim@linaro.org>
Co-developed-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240614144552.2773592-6-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kvm/hyp/vhe/tlb.c

index a6330460d9e57bb7c57b00e55137e32b6207cb6c..2181a11b9d9252c7845f3ad73cee9eedee7e3850 100644 (file)
@@ -232,6 +232,8 @@ extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
                                        phys_addr_t start, unsigned long pages);
 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
 
+extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
+
 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
index 5fa0359f3a8709c9ee4f9128a9b66439ad0476c6..75aa3646580584d4f74ea7086004459c83bf90d1 100644 (file)
@@ -219,3 +219,68 @@ void __kvm_flush_vm_context(void)
        __tlbi(alle1is);
        dsb(ish);
 }
+
+/*
+ * TLB invalidation emulation for NV. For any given instruction, we
+ * perform the following transformtions:
+ *
+ * - a TLBI targeting EL2 S1 is remapped to EL1 S1
+ * - a non-shareable TLBI is upgraded to being inner-shareable
+ */
+int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding)
+{
+       struct tlb_inv_context cxt;
+       int ret = 0;
+
+       /*
+        * The guest will have provided its own DSB ISHST before trapping.
+        * If it hasn't, that's its own problem, and we won't paper over it
+        * (plus, there is plenty of extra synchronisation before we even
+        * get here...).
+        */
+
+       if (mmu)
+               enter_vmid_context(mmu, &cxt);
+
+       switch (sys_encoding) {
+       case OP_TLBI_ALLE2:
+       case OP_TLBI_ALLE2IS:
+       case OP_TLBI_VMALLE1:
+       case OP_TLBI_VMALLE1IS:
+               __tlbi(vmalle1is);
+               break;
+       case OP_TLBI_VAE2:
+       case OP_TLBI_VAE2IS:
+       case OP_TLBI_VAE1:
+       case OP_TLBI_VAE1IS:
+               __tlbi(vae1is, va);
+               break;
+       case OP_TLBI_VALE2:
+       case OP_TLBI_VALE2IS:
+       case OP_TLBI_VALE1:
+       case OP_TLBI_VALE1IS:
+               __tlbi(vale1is, va);
+               break;
+       case OP_TLBI_ASIDE1:
+       case OP_TLBI_ASIDE1IS:
+               __tlbi(aside1is, va);
+               break;
+       case OP_TLBI_VAAE1:
+       case OP_TLBI_VAAE1IS:
+               __tlbi(vaae1is, va);
+               break;
+       case OP_TLBI_VAALE1:
+       case OP_TLBI_VAALE1IS:
+               __tlbi(vaale1is, va);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       dsb(ish);
+       isb();
+
+       if (mmu)
+               exit_vmid_context(&cxt);
+
+       return ret;
+}