]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Introduce __pkvm_host_share_guest()
authorQuentin Perret <qperret@google.com>
Wed, 18 Dec 2024 19:40:51 +0000 (19:40 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Dec 2024 09:44:00 +0000 (09:44 +0000)
In preparation for handling guest stage-2 mappings at EL2, introduce a
new pKVM hypercall allowing to share pages with non-protected guests.

Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20241218194059.3670226-11-qperret@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
arch/arm64/kvm/hyp/include/nvhe/memory.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/pkvm.c

index 89c0fac6955167b8081f67ae380c8d6ac7237d40..449337f5b2a3bf88e2316a6fe3b7c6260eb244b2 100644 (file)
@@ -65,6 +65,7 @@ enum __kvm_host_smccc_func {
        /* Hypercalls available after pKVM finalisation */
        __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
        __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
+       __KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
        __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
        __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
        __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
index e18e9244d17a4f7742729e31481680254617bac8..1246f1d01dbf604439c64a0a47afa094b1bc725f 100644 (file)
@@ -771,6 +771,9 @@ struct kvm_vcpu_arch {
        /* Cache some mmu pages needed inside spinlock regions */
        struct kvm_mmu_memory_cache mmu_page_cache;
 
+       /* Pages to top-up the pKVM/EL2 guest pool */
+       struct kvm_hyp_memcache pkvm_memcache;
+
        /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
        u64 vsesr_el2;
 
index 25038ac705d8251d7dc7978ecfad3ee7b9d2a528..15b8956051b6cc6fe4f6305caaf2b2c0900cd4c8 100644 (file)
@@ -39,6 +39,8 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
+int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
+                           enum kvm_pgtable_prot prot);
 
 bool addr_is_memory(phys_addr_t phys);
 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
index 2a5eabf4b753faf7248f14cfd248bfdcf392144b..34233d58606074ae11ca2ee1fad89d0faecce143 100644 (file)
@@ -46,6 +46,8 @@ struct hyp_page {
 
        /* Host (non-meta) state. Guarded by the host stage-2 lock. */
        enum pkvm_page_state host_state : 8;
+
+       u32 host_share_guest_count;
 };
 
 extern u64 __hyp_vmemmap;
@@ -68,7 +70,7 @@ static inline phys_addr_t hyp_virt_to_phys(void *addr)
 
 static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
 {
-       BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u32));
+       BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u64));
        return &hyp_vmemmap[hyp_phys_to_pfn(phys)];
 }
 
index 95d78db315b3f5fbcad04c003fd01bf6d3ea554c..d659462fbf5d998ae694b49ed707e03e9f335c5c 100644 (file)
@@ -211,6 +211,39 @@ out:
        cpu_reg(host_ctxt, 1) =  ret;
 }
 
+static int pkvm_refill_memcache(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+       struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+
+       return refill_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache,
+                              host_vcpu->arch.pkvm_memcache.nr_pages,
+                              &host_vcpu->arch.pkvm_memcache);
+}
+
+static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
+{
+       DECLARE_REG(u64, pfn, host_ctxt, 1);
+       DECLARE_REG(u64, gfn, host_ctxt, 2);
+       DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
+       struct pkvm_hyp_vcpu *hyp_vcpu;
+       int ret = -EINVAL;
+
+       if (!is_protected_kvm_enabled())
+               goto out;
+
+       hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
+       if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
+               goto out;
+
+       ret = pkvm_refill_memcache(hyp_vcpu);
+       if (ret)
+               goto out;
+
+       ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu, prot);
+out:
+       cpu_reg(host_ctxt, 1) =  ret;
+}
+
 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
 {
        DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
@@ -420,6 +453,7 @@ static const hcall_t host_hcall[] = {
 
        HANDLE_FUNC(__pkvm_host_share_hyp),
        HANDLE_FUNC(__pkvm_host_unshare_hyp),
+       HANDLE_FUNC(__pkvm_host_share_guest),
        HANDLE_FUNC(__kvm_adjust_pc),
        HANDLE_FUNC(__kvm_vcpu_run),
        HANDLE_FUNC(__kvm_flush_vm_context),
index 12bb5445fe47e3cd2d20eb3bd1e6458a9e01eb24..fb9592e721cf804a33786a81edb06435800b8eb7 100644 (file)
@@ -867,6 +867,27 @@ static int hyp_complete_donation(u64 addr,
        return pkvm_create_mappings_locked(start, end, prot);
 }
 
+static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
+{
+       if (!kvm_pte_valid(pte))
+               return PKVM_NOPAGE;
+
+       return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
+}
+
+static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
+                                         u64 size, enum pkvm_page_state state)
+{
+       struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+       struct check_walk_data d = {
+               .desired        = state,
+               .get_page_state = guest_get_page_state,
+       };
+
+       hyp_assert_lock_held(&vm->lock);
+       return check_page_state_range(&vm->pgt, addr, size, &d);
+}
+
 static int check_share(struct pkvm_mem_share *share)
 {
        const struct pkvm_mem_transition *tx = &share->tx;
@@ -1349,3 +1370,54 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
 
        return ret;
 }
+
+int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
+                           enum kvm_pgtable_prot prot)
+{
+       struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+       u64 phys = hyp_pfn_to_phys(pfn);
+       u64 ipa = hyp_pfn_to_phys(gfn);
+       struct hyp_page *page;
+       int ret;
+
+       if (prot & ~KVM_PGTABLE_PROT_RWX)
+               return -EINVAL;
+
+       ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
+       if (ret)
+               return ret;
+
+       host_lock_component();
+       guest_lock_component(vm);
+
+       ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE);
+       if (ret)
+               goto unlock;
+
+       page = hyp_phys_to_page(phys);
+       switch (page->host_state) {
+       case PKVM_PAGE_OWNED:
+               WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED));
+               break;
+       case PKVM_PAGE_SHARED_OWNED:
+               if (page->host_share_guest_count)
+                       break;
+               /* Only host to np-guest multi-sharing is tolerated */
+               WARN_ON(1);
+               fallthrough;
+       default:
+               ret = -EPERM;
+               goto unlock;
+       }
+
+       WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
+                                      pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED),
+                                      &vcpu->vcpu.arch.pkvm_memcache, 0));
+       page->host_share_guest_count++;
+
+unlock:
+       guest_unlock_component(vm);
+       host_unlock_component();
+
+       return ret;
+}
index 496d186efb03be611b1c4f7e6287ba789a06a92c..0109c36566c8bd6b7f76cf5b66be106db36dcd42 100644 (file)
@@ -795,6 +795,14 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
        /* Push the metadata pages to the teardown memcache */
        for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
                struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
+               struct kvm_hyp_memcache *vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
+
+               while (vcpu_mc->nr_pages) {
+                       void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
+
+                       push_hyp_memcache(mc, addr, hyp_virt_to_phys);
+                       unmap_donated_memory_noclear(addr, PAGE_SIZE);
+               }
 
                teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
        }