]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: rename the global section array to mem_sections
authorDong Aisheng <aisheng.dong@nxp.com>
Wed, 2 Jun 2021 03:52:31 +0000 (13:52 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 2 Jun 2021 03:52:31 +0000 (13:52 +1000)
In order to distinguish the struct mem_section for a better code
readability and align with kernel doc [1] name below, change the global
mem section name to 'mem_sections' from 'mem_section'.

[1] Documentation/vm/memory-model.rst
"The `mem_section` objects are arranged in a two-dimensional array
called `mem_sections`."

Link: https://lkml.kernel.org/r/20210531091908.1738465-5-aisheng.dong@nxp.com
Signed-off-by: Dong Aisheng <aisheng.dong@nxp.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
include/linux/mmzone.h
kernel/crash_core.c
mm/sparse.c

index 0d53eba1c38319e44313ed7de74870f2850709da..f29b83e20e5e8812011c2f6dcbee80781eb7574e 100644 (file)
@@ -1301,9 +1301,9 @@ struct mem_section {
 #define SECTION_ROOT_MASK      (SECTIONS_PER_ROOT - 1)
 
 #ifdef CONFIG_SPARSEMEM_EXTREME
-extern struct mem_section **mem_section;
+extern struct mem_section **mem_sections;
 #else
-extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
+extern struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
 #endif
 
 static inline unsigned long *section_to_usemap(struct mem_section *ms)
@@ -1314,12 +1314,12 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
 static inline struct mem_section *__nr_to_section(unsigned long nr)
 {
 #ifdef CONFIG_SPARSEMEM_EXTREME
-       if (!mem_section)
+       if (!mem_sections)
                return NULL;
 #endif
-       if (!mem_section[SECTION_NR_TO_ROOT(nr)])
+       if (!mem_sections[SECTION_NR_TO_ROOT(nr)])
                return NULL;
-       return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
+       return &mem_sections[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
 }
 extern unsigned long __section_nr(struct mem_section *ms);
 extern size_t mem_section_usage_size(void);
index 825284baaf4660bd3d724a757fd7c51945066ece..d033713f2f0bbf54f74bccbf41ae7159a22b9463 100644 (file)
@@ -460,8 +460,8 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_SYMBOL(contig_page_data);
 #endif
 #ifdef CONFIG_SPARSEMEM
-       VMCOREINFO_SYMBOL_ARRAY(mem_section);
-       VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
+       VMCOREINFO_SYMBOL_ARRAY(mem_sections);
+       VMCOREINFO_LENGTH(mem_sections, NR_SECTION_ROOTS);
        VMCOREINFO_STRUCT_SIZE(mem_section);
        VMCOREINFO_OFFSET(mem_section, section_mem_map);
        VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
index b2ada9dc00cb400c707fee147b8591f156a62025..97ee89099e3db4e472d11d387178e3b6825bf2e0 100644 (file)
  * 1) mem_section      - memory sections, mem_map's for valid memory
  */
 #ifdef CONFIG_SPARSEMEM_EXTREME
-struct mem_section **mem_section;
+struct mem_section **mem_sections;
 #else
-struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
+struct mem_section mem_sections[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
        ____cacheline_internodealigned_in_smp;
 #endif
-EXPORT_SYMBOL(mem_section);
+EXPORT_SYMBOL(mem_sections);
 
 #ifdef NODE_NOT_IN_PAGE_FLAGS
 /*
@@ -90,14 +90,14 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
         *
         * The mem_hotplug_lock resolves the apparent race below.
         */
-       if (mem_section[root])
+       if (mem_sections[root])
                return 0;
 
        section = sparse_index_alloc(nid);
        if (!section)
                return -ENOMEM;
 
-       mem_section[root] = section;
+       mem_sections[root] = section;
 
        return 0;
 }
@@ -130,7 +130,7 @@ unsigned long __section_nr(struct mem_section *ms)
 #else
 unsigned long __section_nr(struct mem_section *ms)
 {
-       return (unsigned long)(ms - mem_section[0]);
+       return (unsigned long)(ms - mem_sections[0]);
 }
 #endif
 
@@ -259,8 +259,8 @@ static void __init memory_present(int nid, unsigned long start, unsigned long en
 
                size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
                align = 1 << (INTERNODE_CACHE_SHIFT);
-               mem_section = memblock_alloc(size, align);
-               if (!mem_section)
+       mem_sections = memblock_alloc(size, align);
+       if (!mem_sections)
                        panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
                              __func__, size, align);
        }