]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
KVM: arm64: Add generic hyp_memcache helpers
authorQuentin Perret <qperret@google.com>
Thu, 10 Nov 2022 19:02:50 +0000 (19:02 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 11 Nov 2022 17:16:25 +0000 (17:16 +0000)
The host at EL1 and the pKVM hypervisor at EL2 will soon need to
exchange memory pages dynamically for creating and destroying VM state.

Indeed, the hypervisor will rely on the host to donate memory pages it
can use to create guest stage-2 page-tables and to store VM and vCPU
metadata. In order to ease this process, introduce a
'struct hyp_memcache' which is essentially a linked list of available
pages, indexed by physical addresses so that it can be passed
meaningfully between the different virtual address spaces configured at
EL1 and EL2.

Tested-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110190259.26861-18-will@kernel.org
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
arch/arm64/kvm/hyp/nvhe/mm.c
arch/arm64/kvm/mmu.c

index 467393e7331f10b5ca250b5503a2b8cfff968a13..835987e0f868d9eccb7f0a5cc43d98014869c2a8 100644 (file)
@@ -73,6 +73,63 @@ u32 __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
 
+struct kvm_hyp_memcache {
+       phys_addr_t head;
+       unsigned long nr_pages;
+};
+
+static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
+                                    phys_addr_t *p,
+                                    phys_addr_t (*to_pa)(void *virt))
+{
+       *p = mc->head;
+       mc->head = to_pa(p);
+       mc->nr_pages++;
+}
+
+static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
+                                    void *(*to_va)(phys_addr_t phys))
+{
+       phys_addr_t *p = to_va(mc->head);
+
+       if (!mc->nr_pages)
+               return NULL;
+
+       mc->head = *p;
+       mc->nr_pages--;
+
+       return p;
+}
+
+static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
+                                      unsigned long min_pages,
+                                      void *(*alloc_fn)(void *arg),
+                                      phys_addr_t (*to_pa)(void *virt),
+                                      void *arg)
+{
+       while (mc->nr_pages < min_pages) {
+               phys_addr_t *p = alloc_fn(arg);
+
+               if (!p)
+                       return -ENOMEM;
+               push_hyp_memcache(mc, p, to_pa);
+       }
+
+       return 0;
+}
+
+static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
+                                      void (*free_fn)(void *virt, void *arg),
+                                      void *(*to_va)(phys_addr_t phys),
+                                      void *arg)
+{
+       while (mc->nr_pages)
+               free_fn(pop_hyp_memcache(mc, to_va), arg);
+}
+
+void free_hyp_memcache(struct kvm_hyp_memcache *mc);
+int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
+
 struct kvm_vmid {
        atomic64_t id;
 };
index ef31a1872c93bbaaad76c58c5d4255bcaf1e27e2..420b87e755a42c8d7666203b957f2cefda276361 100644 (file)
@@ -77,6 +77,8 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
 int hyp_pin_shared_mem(void *from, void *to);
 void hyp_unpin_shared_mem(void *from, void *to);
 void reclaim_guest_pages(struct pkvm_hyp_vm *vm);
+int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
+                   struct kvm_hyp_memcache *host_mc);
 
 static __always_inline void __load_host_stage2(void)
 {
index 5648ac21e62d0181d5868c4d37cabc5354912941..c80b2c007619a5bebfdd9d1c69e5822b92775558 100644 (file)
@@ -340,3 +340,36 @@ int hyp_create_idmap(u32 hyp_va_bits)
 
        return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
 }
+
+static void *admit_host_page(void *arg)
+{
+       struct kvm_hyp_memcache *host_mc = arg;
+
+       if (!host_mc->nr_pages)
+               return NULL;
+
+       /*
+        * The host still owns the pages in its memcache, so we need to go
+        * through a full host-to-hyp donation cycle to change it. Fortunately,
+        * __pkvm_host_donate_hyp() takes care of races for us, so if it
+        * succeeds we're good to go.
+        */
+       if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
+               return NULL;
+
+       return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
+}
+
+/* Refill our local memcache by poping pages from the one provided by the host. */
+int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
+                   struct kvm_hyp_memcache *host_mc)
+{
+       struct kvm_hyp_memcache tmp = *host_mc;
+       int ret;
+
+       ret =  __topup_hyp_memcache(mc, min_pages, admit_host_page,
+                                   hyp_virt_to_phys, &tmp);
+       *host_mc = tmp;
+
+       return ret;
+}
index 60ee3d9f01f8c198b0dd90a35f32594e1cfff2d1..18061163c607ba61eb7aee9db644fefaa3086583 100644 (file)
@@ -807,6 +807,32 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
        }
 }
 
+static void hyp_mc_free_fn(void *addr, void *unused)
+{
+       free_page((unsigned long)addr);
+}
+
+static void *hyp_mc_alloc_fn(void *unused)
+{
+       return (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
+}
+
+void free_hyp_memcache(struct kvm_hyp_memcache *mc)
+{
+       if (is_protected_kvm_enabled())
+               __free_hyp_memcache(mc, hyp_mc_free_fn,
+                                   kvm_host_va, NULL);
+}
+
+int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
+{
+       if (!is_protected_kvm_enabled())
+               return 0;
+
+       return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn,
+                                   kvm_host_pa, NULL);
+}
+
 /**
  * kvm_phys_addr_ioremap - map a device range to guest IPA
  *