]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Initialize feature id registers for protected VMs
authorFuad Tabba <tabba@google.com>
Mon, 16 Dec 2024 10:50:46 +0000 (10:50 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Dec 2024 13:52:50 +0000 (13:52 +0000)
The hypervisor maintains the state of protected VMs. Initialize
the values for feature ID registers for protected VMs, to be used
when setting traps and when advertising features to protected
VMs.

Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20241216105057.579031-7-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
arch/arm64/kvm/hyp/include/nvhe/pkvm.h
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/nvhe/sys_regs.c

index d1e59b88ff663688089f23dcaecd8bbff35fbb7e..69e26d1a0ebef7902819f73c3c9b5754a748d1da 100644 (file)
 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
+void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu);
 int kvm_check_pvm_sysreg_table(void);
 
 #endif /* __ARM64_KVM_FIXED_CONFIG_H__ */
index 24a9a8330d190396e90abc7202e6b7e86ea1de63..698bc20ab80bfcb8844d940401cdd079dd0e68ed 100644 (file)
@@ -47,6 +47,8 @@ struct pkvm_hyp_vm {
        struct pkvm_hyp_vcpu *vcpus[];
 };
 
+extern hyp_spinlock_t vm_table_lock;
+
 static inline struct pkvm_hyp_vm *
 pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
 {
index c39d4e92dd3cf875542c206e6bf1118323ec2421..c7958183e40d5ee9756bbc23c1b1241749d863cf 100644 (file)
@@ -257,10 +257,10 @@ static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
 
 /*
  * Spinlock for protecting state related to the VM table. Protects writes
- * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
- * 'last_hyp_vcpu_lookup'.
+ * to 'vm_table', 'nr_table_entries', and other per-vm state on initialization.
+ * Also protects reads and writes to 'last_hyp_vcpu_lookup'.
  */
-static DEFINE_HYP_SPINLOCK(vm_table_lock);
+DEFINE_HYP_SPINLOCK(vm_table_lock);
 
 /*
  * The table of VM entries for protected VMs in hyp.
@@ -381,6 +381,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
        hyp_vm->kvm.created_vcpus = nr_vcpus;
        hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
        hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
+       hyp_vm->kvm.arch.flags = 0;
        pkvm_init_features_from_host(hyp_vm, host_kvm);
 }
 
@@ -419,6 +420,9 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
        hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
        hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
 
+       if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
+               kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
+
        ret = pkvm_vcpu_init_traps(hyp_vcpu);
        if (ret)
                goto done;
index 59fb2f0561774b2c1b2491d4efbf0836a1aa3f93..2aea44c911bdd2fb503412d9abb6ee09473e93c6 100644 (file)
@@ -12,6 +12,7 @@
 #include <hyp/adjust_pc.h>
 
 #include <nvhe/fixed_config.h>
+#include <nvhe/pkvm.h>
 
 #include "../../sys_regs.h"
 
@@ -204,8 +205,7 @@ static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
        return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
 }
 
-/* Read a sanitized cpufeature ID register by its encoding */
-u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
+static u64 pvm_calc_id_reg(const struct kvm_vcpu *vcpu, u32 id)
 {
        switch (id) {
        case SYS_ID_AA64PFR0_EL1:
@@ -240,10 +240,25 @@ u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
        }
 }
 
+/* Read a sanitized cpufeature ID register by its encoding */
+u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
+{
+       return pvm_calc_id_reg(vcpu, id);
+}
+
 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
                       struct sys_reg_desc const *r)
 {
-       return pvm_read_id_reg(vcpu, reg_to_encoding(r));
+       struct kvm *kvm = vcpu->kvm;
+       u32 reg = reg_to_encoding(r);
+
+       if (WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)))
+               return 0;
+
+       if (reg >= sys_reg(3, 0, 0, 1, 0) && reg <= sys_reg(3, 0, 0, 7, 7))
+               return kvm->arch.id_regs[IDREG_IDX(reg)];
+
+       return 0;
 }
 
 /* Handler to RAZ/WI sysregs */
@@ -448,6 +463,30 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
        /* Performance Monitoring Registers are restricted. */
 };
 
+/*
+ * Initializes feature registers for protected vms.
+ */
+void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_arch *ka = &kvm->arch;
+       u32 r;
+
+       hyp_assert_lock_held(&vm_table_lock);
+
+       if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
+               return;
+
+       /*
+        * Initialize only AArch64 id registers since AArch32 isn't supported
+        * for protected VMs.
+        */
+       for (r = sys_reg(3, 0, 0, 4, 0); r <= sys_reg(3, 0, 0, 7, 7); r += sys_reg(0, 0, 0, 0, 1))
+               ka->id_regs[IDREG_IDX(r)] = pvm_calc_id_reg(vcpu, r);
+
+       set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
+}
+
 /*
  * Checks that the sysreg table is unique and in-order.
  *