]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
KVM: arm64: Introduce __pkvm_host_unshare_guest()
authorQuentin Perret <qperret@google.com>
Wed, 18 Dec 2024 19:40:52 +0000 (19:40 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Dec 2024 09:44:00 +0000 (09:44 +0000)
In preparation for letting the host unmap pages from non-protected
guests, introduce a new hypercall implementing the host-unshare-guest
transition.

Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20241218194059.3670226-12-qperret@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
arch/arm64/kvm/hyp/include/nvhe/pkvm.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/pkvm.c

index 449337f5b2a3bf88e2316a6fe3b7c6260eb244b2..0b6c4d325134723682f9603d4b2e8ed45ae1daf2 100644 (file)
@@ -66,6 +66,7 @@ enum __kvm_host_smccc_func {
        __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
        __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
        __KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
+       __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_guest,
        __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
        __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
        __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
index 15b8956051b6cc6fe4f6305caaf2b2c0900cd4c8..e6d080b71779a75119ddf41f4cdc60154eddb4e9 100644 (file)
@@ -41,6 +41,7 @@ int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
 int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
                            enum kvm_pgtable_prot prot);
+int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
 
 bool addr_is_memory(phys_addr_t phys);
 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
index be52c5b15e2199f7add5e9877d2352958bdfbc89..0cc2a429f1fb61b72c24dfedfbcfa5653c58a6c3 100644 (file)
@@ -64,6 +64,11 @@ static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
        return vcpu_is_protected(&hyp_vcpu->vcpu);
 }
 
+static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm)
+{
+       return kvm_vm_is_protected(&hyp_vm->kvm);
+}
+
 void pkvm_hyp_vm_table_init(void *tbl);
 
 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
@@ -78,6 +83,7 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
 struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
 
 struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
+struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle);
 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
 
 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
index d659462fbf5d998ae694b49ed707e03e9f335c5c..3c3a27c985a2e23effd4d968f7d5b81f91094a7b 100644 (file)
@@ -244,6 +244,26 @@ out:
        cpu_reg(host_ctxt, 1) =  ret;
 }
 
+static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
+{
+       DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+       DECLARE_REG(u64, gfn, host_ctxt, 2);
+       struct pkvm_hyp_vm *hyp_vm;
+       int ret = -EINVAL;
+
+       if (!is_protected_kvm_enabled())
+               goto out;
+
+       hyp_vm = get_np_pkvm_hyp_vm(handle);
+       if (!hyp_vm)
+               goto out;
+
+       ret = __pkvm_host_unshare_guest(gfn, hyp_vm);
+       put_pkvm_hyp_vm(hyp_vm);
+out:
+       cpu_reg(host_ctxt, 1) =  ret;
+}
+
 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
 {
        DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
@@ -454,6 +474,7 @@ static const hcall_t host_hcall[] = {
        HANDLE_FUNC(__pkvm_host_share_hyp),
        HANDLE_FUNC(__pkvm_host_unshare_hyp),
        HANDLE_FUNC(__pkvm_host_share_guest),
+       HANDLE_FUNC(__pkvm_host_unshare_guest),
        HANDLE_FUNC(__kvm_adjust_pc),
        HANDLE_FUNC(__kvm_vcpu_run),
        HANDLE_FUNC(__kvm_flush_vm_context),
index fb9592e721cf804a33786a81edb06435800b8eb7..30243b7922f141e3945f66e6589ece53045f5b93 100644 (file)
@@ -1421,3 +1421,70 @@ unlock:
 
        return ret;
 }
+
+static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa)
+{
+       enum pkvm_page_state state;
+       struct hyp_page *page;
+       kvm_pte_t pte;
+       u64 phys;
+       s8 level;
+       int ret;
+
+       ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level);
+       if (ret)
+               return ret;
+       if (level != KVM_PGTABLE_LAST_LEVEL)
+               return -E2BIG;
+       if (!kvm_pte_valid(pte))
+               return -ENOENT;
+
+       state = guest_get_page_state(pte, ipa);
+       if (state != PKVM_PAGE_SHARED_BORROWED)
+               return -EPERM;
+
+       phys = kvm_pte_to_phys(pte);
+       ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
+       if (WARN_ON(ret))
+               return ret;
+
+       page = hyp_phys_to_page(phys);
+       if (page->host_state != PKVM_PAGE_SHARED_OWNED)
+               return -EPERM;
+       if (WARN_ON(!page->host_share_guest_count))
+               return -EINVAL;
+
+       *__phys = phys;
+
+       return 0;
+}
+
+int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *vm)
+{
+       u64 ipa = hyp_pfn_to_phys(gfn);
+       struct hyp_page *page;
+       u64 phys;
+       int ret;
+
+       host_lock_component();
+       guest_lock_component(vm);
+
+       ret = __check_host_shared_guest(vm, &phys, ipa);
+       if (ret)
+               goto unlock;
+
+       ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE);
+       if (ret)
+               goto unlock;
+
+       page = hyp_phys_to_page(phys);
+       page->host_share_guest_count--;
+       if (!page->host_share_guest_count)
+               WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_OWNED));
+
+unlock:
+       guest_unlock_component(vm);
+       host_unlock_component();
+
+       return ret;
+}
index 0109c36566c8bd6b7f76cf5b66be106db36dcd42..2c618f2f2769084430a96f6e9d64395a0ec3e173 100644 (file)
@@ -376,6 +376,18 @@ void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm)
        hyp_spin_unlock(&vm_table_lock);
 }
 
+struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle)
+{
+       struct pkvm_hyp_vm *hyp_vm = get_pkvm_hyp_vm(handle);
+
+       if (hyp_vm && pkvm_hyp_vm_is_protected(hyp_vm)) {
+               put_pkvm_hyp_vm(hyp_vm);
+               hyp_vm = NULL;
+       }
+
+       return hyp_vm;
+}
+
 static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
 {
        struct kvm *kvm = &hyp_vm->kvm;