]> www.infradead.org Git - users/hch/misc.git/commitdiff
KVM: arm64: Factor out pKVM hyp vcpu creation to separate function
authorFuad Tabba <tabba@google.com>
Fri, 14 Mar 2025 11:18:31 +0000 (11:18 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Fri, 14 Mar 2025 23:04:23 +0000 (16:04 -0700)
Move the code that creates and initializes the hyp view of a vcpu
in pKVM to its own function. This is meant to make the transition
to initializing every vcpu individually clearer.

Acked-by: Will Deacon <will@kernel.org>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Fuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20250314111832.4137161-4-tabba@google.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/pkvm.c

index 4409a10d02b661dbd45c2de0d1998a00d9364e3b..f297ccdaef64f1757763fbee36440e7888f90a11 100644 (file)
@@ -114,6 +114,26 @@ static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
        free_hyp_memcache(&host_kvm->arch.pkvm.stage2_teardown_mc);
 }
 
+static int __pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu)
+{
+       size_t hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
+       pkvm_handle_t handle = vcpu->kvm->arch.pkvm.handle;
+       void *hyp_vcpu;
+       int ret;
+
+       vcpu->arch.pkvm_memcache.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
+
+       hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT);
+       if (!hyp_vcpu)
+               return -ENOMEM;
+
+       ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, vcpu, hyp_vcpu);
+       if (ret)
+               free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
+
+       return ret;
+}
+
 /*
  * Allocates and donates memory for hypervisor VM structs at EL2.
  *
@@ -126,9 +146,8 @@ static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
  */
 static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
 {
-       size_t pgd_sz, hyp_vm_sz, hyp_vcpu_sz;
+       size_t pgd_sz, hyp_vm_sz;
        struct kvm_vcpu *host_vcpu;
-       pkvm_handle_t handle;
        void *pgd, *hyp_vm;
        unsigned long idx;
        int ret;
@@ -162,37 +181,14 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
        if (ret < 0)
                goto free_vm;
 
-       handle = ret;
-
-       host_kvm->arch.pkvm.handle = handle;
+       host_kvm->arch.pkvm.handle = ret;
        host_kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
        kvm_account_pgtable_pages(pgd, pgd_sz / PAGE_SIZE);
 
-       /* Donate memory for the vcpus at hyp and initialize it. */
-       hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
        kvm_for_each_vcpu(idx, host_vcpu, host_kvm) {
-               void *hyp_vcpu;
-
-               host_vcpu->arch.pkvm_memcache.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
-
-               /* Indexing of the vcpus to be sequential starting at 0. */
-               if (WARN_ON(host_vcpu->vcpu_idx != idx)) {
-                       ret = -EINVAL;
-                       goto destroy_vm;
-               }
-
-               hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT);
-               if (!hyp_vcpu) {
-                       ret = -ENOMEM;
-                       goto destroy_vm;
-               }
-
-               ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu,
-                                       hyp_vcpu);
-               if (ret) {
-                       free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
+               ret = __pkvm_create_hyp_vcpu(host_vcpu);
+               if (ret)
                        goto destroy_vm;
-               }
        }
 
        return 0;