]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Introduce __pkvm_vcpu_{load,put}()
authorMarc Zyngier <maz@kernel.org>
Wed, 18 Dec 2024 19:40:50 +0000 (19:40 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Dec 2024 09:44:00 +0000 (09:44 +0000)
Rather than look-up the hyp vCPU on every run hypercall at EL2,
introduce a per-CPU 'loaded_hyp_vcpu' tracking variable which is updated
by a pair of load/put hypercalls called directly from
kvm_arch_vcpu_{load,put}() when pKVM is enabled.

Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20241218194059.3670226-10-qperret@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/include/nvhe/pkvm.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/vgic/vgic-v3.c

index ca259034431353dc8ec56c0f9c66fbc13fbd7704..89c0fac6955167b8081f67ae380c8d6ac7237d40 100644 (file)
@@ -79,6 +79,8 @@ enum __kvm_host_smccc_func {
        __KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
        __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
        __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
+       __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
+       __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
 };
 
 #define DECLARE_KVM_VHE_SYM(sym)       extern char sym[]
index a102c3aebdbc419fe94a0f973eb2b3f06f0ba698..55cc62b2f4691f06b379af94caafda0369d2afd4 100644 (file)
@@ -619,12 +619,26 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        kvm_arch_vcpu_load_debug_state_flags(vcpu);
 
+       if (is_protected_kvm_enabled()) {
+               kvm_call_hyp_nvhe(__pkvm_vcpu_load,
+                                 vcpu->kvm->arch.pkvm.handle,
+                                 vcpu->vcpu_idx, vcpu->arch.hcr_el2);
+               kvm_call_hyp(__vgic_v3_restore_vmcr_aprs,
+                            &vcpu->arch.vgic_cpu.vgic_v3);
+       }
+
        if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
                vcpu_set_on_unsupported_cpu(vcpu);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       if (is_protected_kvm_enabled()) {
+               kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
+                            &vcpu->arch.vgic_cpu.vgic_v3);
+               kvm_call_hyp_nvhe(__pkvm_vcpu_put);
+       }
+
        kvm_arch_vcpu_put_debug_state_flags(vcpu);
        kvm_arch_vcpu_put_fp(vcpu);
        if (has_vhe())
index f361d8b91930da5927701fb68a5955a1df61c142..be52c5b15e2199f7add5e9877d2352958bdfbc89 100644 (file)
@@ -20,6 +20,12 @@ struct pkvm_hyp_vcpu {
 
        /* Backpointer to the host's (untrusted) vCPU instance. */
        struct kvm_vcpu *host_vcpu;
+
+       /*
+        * If this hyp vCPU is loaded, then this is a backpointer to the
+        * per-cpu pointer tracking us. Otherwise, NULL if not loaded.
+        */
+       struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
 };
 
 /*
@@ -69,6 +75,7 @@ int __pkvm_teardown_vm(pkvm_handle_t handle);
 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
                                         unsigned int vcpu_idx);
 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
+struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
 
 struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
index 6aa0b13d86e581a36ed529bcd932498045d2d6df..95d78db315b3f5fbcad04c003fd01bf6d3ea554c 100644 (file)
@@ -141,16 +141,46 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
                host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
 }
 
+static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
+{
+       DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+       DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2);
+       DECLARE_REG(u64, hcr_el2, host_ctxt, 3);
+       struct pkvm_hyp_vcpu *hyp_vcpu;
+
+       if (!is_protected_kvm_enabled())
+               return;
+
+       hyp_vcpu = pkvm_load_hyp_vcpu(handle, vcpu_idx);
+       if (!hyp_vcpu)
+               return;
+
+       if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
+               /* Propagate WFx trapping flags */
+               hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI);
+               hyp_vcpu->vcpu.arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI);
+       }
+}
+
+static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
+{
+       struct pkvm_hyp_vcpu *hyp_vcpu;
+
+       if (!is_protected_kvm_enabled())
+               return;
+
+       hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
+       if (hyp_vcpu)
+               pkvm_put_hyp_vcpu(hyp_vcpu);
+}
+
 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
 {
        DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
        int ret;
 
-       host_vcpu = kern_hyp_va(host_vcpu);
-
        if (unlikely(is_protected_kvm_enabled())) {
-               struct pkvm_hyp_vcpu *hyp_vcpu;
-               struct kvm *host_kvm;
+               struct pkvm_hyp_vcpu *hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
 
                /*
                 * KVM (and pKVM) doesn't support SME guests for now, and
@@ -163,9 +193,6 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
                        goto out;
                }
 
-               host_kvm = kern_hyp_va(host_vcpu->kvm);
-               hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
-                                             host_vcpu->vcpu_idx);
                if (!hyp_vcpu) {
                        ret = -EINVAL;
                        goto out;
@@ -176,12 +203,10 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
                ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
 
                sync_hyp_vcpu(hyp_vcpu);
-               pkvm_put_hyp_vcpu(hyp_vcpu);
        } else {
                /* The host is fully trusted, run its vCPU directly. */
-               ret = __kvm_vcpu_run(host_vcpu);
+               ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
        }
-
 out:
        cpu_reg(host_ctxt, 1) =  ret;
 }
@@ -409,6 +434,8 @@ static const hcall_t host_hcall[] = {
        HANDLE_FUNC(__pkvm_init_vm),
        HANDLE_FUNC(__pkvm_init_vcpu),
        HANDLE_FUNC(__pkvm_teardown_vm),
+       HANDLE_FUNC(__pkvm_vcpu_load),
+       HANDLE_FUNC(__pkvm_vcpu_put),
 };
 
 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
index d46a02e24e4aa14de6a6ed24ca6aec64be8dd0fb..496d186efb03be611b1c4f7e6287ba789a06a92c 100644 (file)
@@ -23,6 +23,12 @@ unsigned int kvm_arm_vmid_bits;
 
 unsigned int kvm_host_sve_max_vl;
 
+/*
+ * The currently loaded hyp vCPU for each physical CPU. Used only when
+ * protected KVM is enabled, but for both protected and non-protected VMs.
+ */
+static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
+
 /*
  * Set trap register values based on features in ID_AA64PFR0.
  */
@@ -306,15 +312,30 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
        struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
        struct pkvm_hyp_vm *hyp_vm;
 
+       /* Cannot load a new vcpu without putting the old one first. */
+       if (__this_cpu_read(loaded_hyp_vcpu))
+               return NULL;
+
        hyp_spin_lock(&vm_table_lock);
        hyp_vm = get_vm_by_handle(handle);
        if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
                goto unlock;
 
        hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
+
+       /* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
+       if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
+               hyp_vcpu = NULL;
+               goto unlock;
+       }
+
+       hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
        hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
 unlock:
        hyp_spin_unlock(&vm_table_lock);
+
+       if (hyp_vcpu)
+               __this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
        return hyp_vcpu;
 }
 
@@ -323,10 +344,18 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
        struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
 
        hyp_spin_lock(&vm_table_lock);
+       hyp_vcpu->loaded_hyp_vcpu = NULL;
+       __this_cpu_write(loaded_hyp_vcpu, NULL);
        hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
        hyp_spin_unlock(&vm_table_lock);
 }
 
+struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
+{
+       return __this_cpu_read(loaded_hyp_vcpu);
+
+}
+
 struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle)
 {
        struct pkvm_hyp_vm *hyp_vm;
index f267bc2486a18edfa41c6cbc10fac9457d1e4c18..c2ef41fff079bb8f4ce4a7dd8a767cb6c68e4cc7 100644 (file)
@@ -734,7 +734,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
 {
        struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
 
-       kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
+       if (likely(!is_protected_kvm_enabled()))
+               kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
 
        if (has_vhe())
                __vgic_v3_activate_traps(cpu_if);
@@ -746,7 +747,8 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
 {
        struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
 
-       kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
+       if (likely(!is_protected_kvm_enabled()))
+               kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
        WARN_ON(vgic_v4_put(vcpu));
 
        if (has_vhe())