Add statistics that show how memory is mapped within the kernel
identity mapping. This is more or less the same like git
commit 
ce0c0e50f94e ("x86, generic: CPA add statistics about state
of direct mapping v4") for x86.
I also intentionally copied the lower case "k" within DirectMap4k vs
the upper case "M" and "G" within the two other lines. Let's have
consistent inconsistencies across architectures.
The output of /proc/meminfo now contains these additional lines:
DirectMap4k:        2048 kB
DirectMap1M:     
3991552 kB
DirectMap2G:     
4194304 kB
The implementation on s390 is lockless unlike the x86 version, since I
assume changes to the kernel mapping are a very rare event. Therefore
it really doesn't matter if these statistics could potentially be
inconsistent if read while kernel pages tables are being changed.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
 
 #include <linux/mm_types.h>
 #include <linux/page-flags.h>
 #include <linux/radix-tree.h>
+#include <linux/atomic.h>
 #include <asm/bug.h>
 #include <asm/page.h>
 
 pmd_t *vmem_pmd_alloc(void);
 pte_t *vmem_pte_alloc(void);
 
+enum {
+       PG_DIRECT_MAP_4K = 0,
+       PG_DIRECT_MAP_1M,
+       PG_DIRECT_MAP_2G,
+       PG_DIRECT_MAP_MAX
+};
+
+extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+
+static inline void update_page_count(int level, long count)
+{
+       if (IS_ENABLED(CONFIG_PROC_FS))
+               atomic_long_add(count, &direct_pages_count[level]);
+}
+
+struct seq_file;
+void arch_report_meminfo(struct seq_file *m);
+
 /*
  * The S390 doesn't have any external MMU info: the kernel page
  * tables contain all the necessary information.
 
 }
 #endif
 
+#ifdef CONFIG_PROC_FS
+atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+
+void arch_report_meminfo(struct seq_file *m)
+{
+       seq_printf(m, "DirectMap4k:    %8lu kB\n",
+                  atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
+       seq_printf(m, "DirectMap1M:    %8lu kB\n",
+                  atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
+       seq_printf(m, "DirectMap2G:    %8lu kB\n",
+                  atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
+}
+#endif /* CONFIG_PROC_FS */
+
 static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
                    unsigned long dtt)
 {
        }
        pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY;
        pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
+       update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
+       update_page_count(PG_DIRECT_MAP_1M, -1);
        return 0;
 }
 
        }
        pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY;
        pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
+       update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
+       update_page_count(PG_DIRECT_MAP_2G, -1);
        return 0;
 }
 
 
  */
 static int vmem_add_mem(unsigned long start, unsigned long size)
 {
+       unsigned long pages4k, pages1m, pages2g;
        unsigned long end = start + size;
        unsigned long address = start;
        pgd_t *pg_dir;
        pte_t *pt_dir;
        int ret = -ENOMEM;
 
+       pages4k = pages1m = pages2g = 0;
        while (address < end) {
                pg_dir = pgd_offset_k(address);
                if (pgd_none(*pg_dir)) {
                     !debug_pagealloc_enabled()) {
                        pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
                        address += PUD_SIZE;
+                       pages2g++;
                        continue;
                }
                if (pud_none(*pu_dir)) {
                    !debug_pagealloc_enabled()) {
                        pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
                        address += PMD_SIZE;
+                       pages1m++;
                        continue;
                }
                if (pmd_none(*pm_dir)) {
                pt_dir = pte_offset_kernel(pm_dir, address);
                pte_val(*pt_dir) = address |  pgprot_val(PAGE_KERNEL);
                address += PAGE_SIZE;
+               pages4k++;
        }
        ret = 0;
 out:
+       update_page_count(PG_DIRECT_MAP_4K, pages4k);
+       update_page_count(PG_DIRECT_MAP_1M, pages1m);
+       update_page_count(PG_DIRECT_MAP_2G, pages2g);
        return ret;
 }
 
  */
 static void vmem_remove_range(unsigned long start, unsigned long size)
 {
+       unsigned long pages4k, pages1m, pages2g;
        unsigned long end = start + size;
        unsigned long address = start;
        pgd_t *pg_dir;
        pmd_t *pm_dir;
        pte_t *pt_dir;
 
+       pages4k = pages1m = pages2g = 0;
        while (address < end) {
                pg_dir = pgd_offset_k(address);
                if (pgd_none(*pg_dir)) {
                if (pud_large(*pu_dir)) {
                        pud_clear(pu_dir);
                        address += PUD_SIZE;
+                       pages2g++;
                        continue;
                }
                pm_dir = pmd_offset(pu_dir, address);
                if (pmd_large(*pm_dir)) {
                        pmd_clear(pm_dir);
                        address += PMD_SIZE;
+                       pages1m++;
                        continue;
                }
                pt_dir = pte_offset_kernel(pm_dir, address);
                pte_clear(&init_mm, address, pt_dir);
                address += PAGE_SIZE;
+               pages4k++;
        }
        flush_tlb_kernel_range(start, end);
+       update_page_count(PG_DIRECT_MAP_4K, -pages4k);
+       update_page_count(PG_DIRECT_MAP_1M, -pages1m);
+       update_page_count(PG_DIRECT_MAP_2G, -pages2g);
 }
 
 /*