]> www.infradead.org Git - nvme.git/commitdiff
KVM: arm64: Support TLB invalidation in guest context
authorWill Deacon <will@kernel.org>
Tue, 23 Apr 2024 15:05:17 +0000 (16:05 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 1 May 2024 15:47:45 +0000 (16:47 +0100)
Typically, TLB invalidation of guest stage-2 mappings using nVHE is
performed by a hypercall originating from the host. For the invalidation
instruction to be effective, therefore, __tlb_switch_to_{guest,host}()
swizzle the active stage-2 context around the TLBI instruction.

With guest-to-host memory sharing and unsharing hypercalls
originating from the guest under pKVM, there is need to support
both guest and host VMID invalidations issued from guest context.

Replace the __tlb_switch_to_{guest,host}() functions with a more general
{enter,exit}_vmid_context() implementation which supports being invoked
from guest context and acts as a no-op if the target context matches the
running context.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Fuad Tabba <tabba@google.com>
Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240423150538.2103045-10-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/nvhe/tlb.c

index a60fb13e21924f4af56162687231251d8655ba04..c6e0a49eb8606ae11be8df0d8d59acf3423e1387 100644 (file)
 #include <nvhe/mem_protect.h>
 
 struct tlb_inv_context {
-       u64             tcr;
+       struct kvm_s2_mmu       *mmu;
+       u64                     tcr;
+       u64                     sctlr;
 };
 
-static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
-                                 struct tlb_inv_context *cxt,
-                                 bool nsh)
+static void enter_vmid_context(struct kvm_s2_mmu *mmu,
+                              struct tlb_inv_context *cxt,
+                              bool nsh)
 {
+       struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;
+       struct kvm_cpu_context *host_ctxt;
+       struct kvm_vcpu *vcpu;
+
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+       vcpu = host_ctxt->__hyp_running_vcpu;
+       cxt->mmu = NULL;
+
        /*
         * We have two requirements:
         *
@@ -40,20 +50,55 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
        else
                dsb(ish);
 
+       /*
+        * If we're already in the desired context, then there's nothing to do.
+        */
+       if (vcpu) {
+               /*
+                * We're in guest context. However, for this to work, this needs
+                * to be called from within __kvm_vcpu_run(), which ensures that
+                * __hyp_running_vcpu is set to the current guest vcpu.
+                */
+               if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
+                       return;
+
+               cxt->mmu = vcpu->arch.hw_mmu;
+       } else {
+               /* We're in host context. */
+               if (mmu == host_s2_mmu)
+                       return;
+
+               cxt->mmu = host_s2_mmu;
+       }
+
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                u64 val;
 
                /*
                 * For CPUs that are affected by ARM 1319367, we need to
-                * avoid a host Stage-1 walk while we have the guest's
-                * VMID set in the VTTBR in order to invalidate TLBs.
-                * We're guaranteed that the S1 MMU is enabled, so we can
-                * simply set the EPD bits to avoid any further TLB fill.
+                * avoid a Stage-1 walk with the old VMID while we have
+                * the new VMID set in the VTTBR in order to invalidate TLBs.
+                * We're guaranteed that the host S1 MMU is enabled, so
+                * we can simply set the EPD bits to avoid any further
+                * TLB fill. For guests, we ensure that the S1 MMU is
+                * temporarily enabled in the next context.
                 */
                val = cxt->tcr = read_sysreg_el1(SYS_TCR);
                val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
                write_sysreg_el1(val, SYS_TCR);
                isb();
+
+               if (vcpu) {
+                       val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
+                       if (!(val & SCTLR_ELx_M)) {
+                               val |= SCTLR_ELx_M;
+                               write_sysreg_el1(val, SYS_SCTLR);
+                               isb();
+                       }
+               } else {
+                       /* The host S1 MMU is always enabled. */
+                       cxt->sctlr = SCTLR_ELx_M;
+               }
        }
 
        /*
@@ -62,18 +107,40 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
         * ensuring that we always have an ISB, but not two ISBs back
         * to back.
         */
-       __load_stage2(mmu, kern_hyp_va(mmu->arch));
+       if (vcpu)
+               __load_host_stage2();
+       else
+               __load_stage2(mmu, kern_hyp_va(mmu->arch));
+
        asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
 }
 
-static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
+static void exit_vmid_context(struct tlb_inv_context *cxt)
 {
-       __load_host_stage2();
+       struct kvm_s2_mmu *mmu = cxt->mmu;
+       struct kvm_cpu_context *host_ctxt;
+       struct kvm_vcpu *vcpu;
+
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+       vcpu = host_ctxt->__hyp_running_vcpu;
+
+       if (!mmu)
+               return;
+
+       if (vcpu)
+               __load_stage2(mmu, kern_hyp_va(mmu->arch));
+       else
+               __load_host_stage2();
 
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
-               /* Ensure write of the host VMID */
+               /* Ensure write of the old VMID */
                isb();
-               /* Restore the host's TCR_EL1 */
+
+               if (!(cxt->sctlr & SCTLR_ELx_M)) {
+                       write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
+                       isb();
+               }
+
                write_sysreg_el1(cxt->tcr, SYS_TCR);
        }
 }
@@ -84,7 +151,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
        struct tlb_inv_context cxt;
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt, false);
+       enter_vmid_context(mmu, &cxt, false);
 
        /*
         * We could do so much better if we had the VA as well.
@@ -105,7 +172,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
        dsb(ish);
        isb();
 
-       __tlb_switch_to_host(&cxt);
+       exit_vmid_context(&cxt);
 }
 
 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
@@ -114,7 +181,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
        struct tlb_inv_context cxt;
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt, true);
+       enter_vmid_context(mmu, &cxt, true);
 
        /*
         * We could do so much better if we had the VA as well.
@@ -135,7 +202,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
        dsb(nsh);
        isb();
 
-       __tlb_switch_to_host(&cxt);
+       exit_vmid_context(&cxt);
 }
 
 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
@@ -152,7 +219,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
        start = round_down(start, stride);
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt, false);
+       enter_vmid_context(mmu, &cxt, false);
 
        __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
 
@@ -161,7 +228,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
        dsb(ish);
        isb();
 
-       __tlb_switch_to_host(&cxt);
+       exit_vmid_context(&cxt);
 }
 
 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
@@ -169,13 +236,13 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
        struct tlb_inv_context cxt;
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt, false);
+       enter_vmid_context(mmu, &cxt, false);
 
        __tlbi(vmalls12e1is);
        dsb(ish);
        isb();
 
-       __tlb_switch_to_host(&cxt);
+       exit_vmid_context(&cxt);
 }
 
 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
@@ -183,19 +250,19 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
        struct tlb_inv_context cxt;
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt, false);
+       enter_vmid_context(mmu, &cxt, false);
 
        __tlbi(vmalle1);
        asm volatile("ic iallu");
        dsb(nsh);
        isb();
 
-       __tlb_switch_to_host(&cxt);
+       exit_vmid_context(&cxt);
 }
 
 void __kvm_flush_vm_context(void)
 {
-       /* Same remark as in __tlb_switch_to_guest() */
+       /* Same remark as in enter_vmid_context() */
        dsb(ish);
        __tlbi(alle1is);
        dsb(ish);