Secure guests need to share the DTL buffers with the hypervisor. To that
end, use a kmem_cache constructor which converts the underlying buddy
allocated SLUB cache pages into shared memory.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190820021326.6884-10-bauerman@linux.ibm.com
        return mfmsr() & MSR_S;
 }
 
+void dtl_cache_ctor(void *addr);
+#define get_dtl_cache_ctor()   (is_secure_guest() ? dtl_cache_ctor : NULL)
+
 #else /* CONFIG_PPC_SVM */
 
 static inline bool is_secure_guest(void)
        return false;
 }
 
+#define get_dtl_cache_ctor() NULL
+
 #endif /* CONFIG_PPC_SVM */
 #endif /* _ASM_POWERPC_SVM_H */
 
 obj-$(CONFIG_IBMEBUS)          += ibmebus.o
 obj-$(CONFIG_PAPR_SCM)         += papr_scm.o
 obj-$(CONFIG_PPC_SPLPAR)       += vphn.o
+obj-$(CONFIG_PPC_SVM)          += svm.o
 
 ifdef CONFIG_PPC_PSERIES
 obj-$(CONFIG_SUSPEND)          += suspend.o
 
 #include <asm/security_features.h>
 #include <asm/asm-const.h>
 #include <asm/swiotlb.h>
+#include <asm/svm.h>
 
 #include "pseries.h"
 #include "../../../../drivers/pci/pci.h"
 
 static int alloc_dispatch_log_kmem_cache(void)
 {
+       void (*ctor)(void *) = get_dtl_cache_ctor();
+
        dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
-                                               DISPATCH_LOG_BYTES, 0, NULL);
+                                               DISPATCH_LOG_BYTES, 0, ctor);
        if (!dtl_cache) {
                pr_warn("Failed to create dispatch trace log buffer cache\n");
                pr_warn("Stolen time statistics will be unreliable\n");
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Secure VM platform
+ *
+ * Copyright 2018 IBM Corporation
+ * Author: Anshuman Khandual <khandual@linux.vnet.ibm.com>
+ */
+
+#include <linux/mm.h>
+#include <asm/ultravisor.h>
+
+/* There's one dispatch log per CPU. */
+#define NR_DTL_PAGE (DISPATCH_LOG_BYTES * CONFIG_NR_CPUS / PAGE_SIZE)
+
+static struct page *dtl_page_store[NR_DTL_PAGE];
+static long dtl_nr_pages;
+
+static bool is_dtl_page_shared(struct page *page)
+{
+       long i;
+
+       for (i = 0; i < dtl_nr_pages; i++)
+               if (dtl_page_store[i] == page)
+                       return true;
+
+       return false;
+}
+
+void dtl_cache_ctor(void *addr)
+{
+       unsigned long pfn = PHYS_PFN(__pa(addr));
+       struct page *page = pfn_to_page(pfn);
+
+       if (!is_dtl_page_shared(page)) {
+               dtl_page_store[dtl_nr_pages] = page;
+               dtl_nr_pages++;
+               WARN_ON(dtl_nr_pages >= NR_DTL_PAGE);
+               uv_share_page(pfn, 1);
+       }
+}