#include <asm/plpar_wrappers.h>
 #include <asm/firmware.h>
 
+#include "asm/guest-state-buffer.h"
+
 enum kvmppc_pmu_eventid {
        KVMPPC_EVENT_MAX,
 };
 
+#define KVMPPC_PMU_EVENT_ATTR(_name, _id) \
+       PMU_EVENT_ATTR_ID(_name, kvmppc_events_sysfs_show, _id)
+
+static ssize_t kvmppc_events_sysfs_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *page)
+{
+       struct perf_pmu_events_attr *pmu_attr;
+
+       pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+       return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+/* Holds the hostwide stats */
+static struct kvmppc_hostwide_stats {
+       u64 guest_heap;
+       u64 guest_heap_max;
+       u64 guest_pgtable_size;
+       u64 guest_pgtable_size_max;
+       u64 guest_pgtable_reclaim;
+} l0_stats;
+
+/* Protect access to l0_stats */
+static DEFINE_SPINLOCK(lock_l0_stats);
+
+/* GSB related structs needed to talk to L0 */
+static struct kvmppc_gs_msg *gsm_l0_stats;
+static struct kvmppc_gs_buff *gsb_l0_stats;
+
 static struct attribute *kvmppc_pmu_events_attr[] = {
        NULL,
 };
 {
 }
 
+/* Return the size of the needed guest state buffer */
+static size_t hostwide_get_size(struct kvmppc_gs_msg *gsm)
+
+{
+       size_t size = 0;
+       const u16 ids[] = {
+               KVMPPC_GSID_L0_GUEST_HEAP,
+               KVMPPC_GSID_L0_GUEST_HEAP_MAX,
+               KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE,
+               KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX,
+               KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM
+       };
+
+       for (int i = 0; i < ARRAY_SIZE(ids); i++)
+               size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
+       return size;
+}
+
+/* Populate the request guest state buffer */
+static int hostwide_fill_info(struct kvmppc_gs_buff *gsb,
+                             struct kvmppc_gs_msg *gsm)
+{
+       int rc = 0;
+       struct kvmppc_hostwide_stats  *stats = gsm->data;
+
+       /*
+        * It doesn't matter what values are put into request buffer as
+        * they are going to be overwritten anyways. But for the sake of
+        * testcode and symmetry contents of existing stats are put
+        * populated into the request guest state buffer.
+        */
+       if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP))
+               rc = kvmppc_gse_put_u64(gsb,
+                                       KVMPPC_GSID_L0_GUEST_HEAP,
+                                       stats->guest_heap);
+
+       if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX))
+               rc = kvmppc_gse_put_u64(gsb,
+                                       KVMPPC_GSID_L0_GUEST_HEAP_MAX,
+                                       stats->guest_heap_max);
+
+       if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE))
+               rc = kvmppc_gse_put_u64(gsb,
+                                       KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE,
+                                       stats->guest_pgtable_size);
+       if (!rc &&
+           kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX))
+               rc = kvmppc_gse_put_u64(gsb,
+                                       KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX,
+                                       stats->guest_pgtable_size_max);
+       if (!rc &&
+           kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM))
+               rc = kvmppc_gse_put_u64(gsb,
+                                       KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM,
+                                       stats->guest_pgtable_reclaim);
+
+       return rc;
+}
+
+/* Parse and update the host wide stats from returned gsb */
+static int hostwide_refresh_info(struct kvmppc_gs_msg *gsm,
+                                struct kvmppc_gs_buff *gsb)
+{
+       struct kvmppc_gs_parser gsp = { 0 };
+       struct kvmppc_hostwide_stats *stats = gsm->data;
+       struct kvmppc_gs_elem *gse;
+       int rc;
+
+       rc = kvmppc_gse_parse(&gsp, gsb);
+       if (rc < 0)
+               return rc;
+
+       gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP);
+       if (gse)
+               stats->guest_heap = kvmppc_gse_get_u64(gse);
+
+       gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
+       if (gse)
+               stats->guest_heap_max = kvmppc_gse_get_u64(gse);
+
+       gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
+       if (gse)
+               stats->guest_pgtable_size = kvmppc_gse_get_u64(gse);
+
+       gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
+       if (gse)
+               stats->guest_pgtable_size_max = kvmppc_gse_get_u64(gse);
+
+       gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
+       if (gse)
+               stats->guest_pgtable_reclaim = kvmppc_gse_get_u64(gse);
+
+       return 0;
+}
+
+/* gsb-message ops for setting up/parsing */
+static struct kvmppc_gs_msg_ops gsb_ops_l0_stats = {
+       .get_size = hostwide_get_size,
+       .fill_info = hostwide_fill_info,
+       .refresh_info = hostwide_refresh_info,
+};
+
+static int kvmppc_init_hostwide(void)
+{
+       int rc = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lock_l0_stats, flags);
+
+       /* already registered ? */
+       if (gsm_l0_stats) {
+               rc = 0;
+               goto out;
+       }
+
+       /* setup the Guest state message/buffer to talk to L0 */
+       gsm_l0_stats = kvmppc_gsm_new(&gsb_ops_l0_stats, &l0_stats,
+                                     GSM_SEND, GFP_KERNEL);
+       if (!gsm_l0_stats) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       /* Populate the Idents */
+       kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP);
+       kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
+       kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
+       kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
+       kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
+
+       /* allocate GSB. Guest/Vcpu Id is ignored */
+       gsb_l0_stats = kvmppc_gsb_new(kvmppc_gsm_size(gsm_l0_stats), 0, 0,
+                                     GFP_KERNEL);
+       if (!gsb_l0_stats) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       /* ask the ops to fill in the info */
+       rc = kvmppc_gsm_fill_info(gsm_l0_stats, gsb_l0_stats);
+
+out:
+       if (rc) {
+               if (gsm_l0_stats)
+                       kvmppc_gsm_free(gsm_l0_stats);
+               if (gsb_l0_stats)
+                       kvmppc_gsb_free(gsb_l0_stats);
+               gsm_l0_stats = NULL;
+               gsb_l0_stats = NULL;
+       }
+       spin_unlock_irqrestore(&lock_l0_stats, flags);
+       return rc;
+}
+
+static void kvmppc_cleanup_hostwide(void)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&lock_l0_stats, flags);
+
+       if (gsm_l0_stats)
+               kvmppc_gsm_free(gsm_l0_stats);
+       if (gsb_l0_stats)
+               kvmppc_gsb_free(gsb_l0_stats);
+       gsm_l0_stats = NULL;
+       gsb_l0_stats = NULL;
+
+       spin_unlock_irqrestore(&lock_l0_stats, flags);
+}
+
 /* L1 wide counters PMU */
 static struct pmu kvmppc_pmu = {
        .module = THIS_MODULE,
 
        /* only support events for nestedv2 right now */
        if (kvmhv_is_nestedv2()) {
+               rc = kvmppc_init_hostwide();
+               if (rc)
+                       goto out;
+
                /* Register the pmu */
                rc = perf_pmu_register(&kvmppc_pmu, kvmppc_pmu.name, -1);
                if (rc)
 static void __exit kvmppc_unregister_pmu(void)
 {
        if (kvmhv_is_nestedv2()) {
+               kvmppc_cleanup_hostwide();
+
                if (kvmppc_pmu.type != -1)
                        perf_pmu_unregister(&kvmppc_pmu);