]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
page_ext: introduce boot parameter 'early_page_ext'
authorLi Zhe <lizhe.67@bytedance.com>
Thu, 25 Aug 2022 10:27:14 +0000 (18:27 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Aug 2022 05:03:32 +0000 (22:03 -0700)
In commit 2f1ee0913ce5 ("Revert "mm: use early_pfn_to_nid in
page_ext_init""), we call page_ext_init() after page_alloc_init_late() to
avoid some panic problem.  It seems that we cannot track early page
allocations in current kernel even if page structure has been initialized
early.

This patch introduces a new boot parameter 'early_page_ext' to resolve
this problem.  If we pass it to the kernel, page_ext_init() will be moved
up and the feature 'deferred initialization of struct pages' will be
disabled to initialize the page allocator early and prevent the panic
problem above.  It can help us to catch early page allocations.  This is
useful especially when we find that the free memory value is not the same
right after different kernel booting.

Link: https://lkml.kernel.org/r/20220825102714.669-1-lizhe.67@bytedance.com
Signed-off-by: Li Zhe <lizhe.67@bytedance.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark-PK Tsai <mark-pk.tsai@mediatek.com>
Cc: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/admin-guide/kernel-parameters.txt
include/linux/page_ext.h
init/main.c
mm/page_alloc.c
mm/page_ext.c

index d7f30902fda02fe09ea9eaa6563065b1655b0c2c..4f43fd5b324d9a19782f5ef8fb4f6fbba26a2cef 100644 (file)
                        Permit 'security.evm' to be updated regardless of
                        current integrity status.
 
+       early_page_ext [KNL] Enforces page_ext initialization to earlier
+                       stages so cover more early boot allocations.
+                       Please note that as side effect some optimizations
+                       might be disabled to achieve that (e.g. parallelized
+                       memory initialization is disabled) so the boot process
+                       might take longer, especially on systems with a lot of
+                       memory. Available with CONFIG_PAGE_EXTENSION=y.
+
        failslab=
        fail_usercopy=
        fail_page_alloc=
index ed27198cdaf46709423c33eeb8a16451747ec993..22be4582faaeddb6bd3043216a808ece387b5a8c 100644 (file)
@@ -36,9 +36,15 @@ struct page_ext {
        unsigned long flags;
 };
 
+extern bool early_page_ext;
 extern unsigned long page_ext_size;
 extern void pgdat_page_ext_init(struct pglist_data *pgdat);
 
+static inline bool early_page_ext_enabled(void)
+{
+       return early_page_ext;
+}
+
 #ifdef CONFIG_SPARSEMEM
 static inline void page_ext_init_flatmem(void)
 {
@@ -68,6 +74,11 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
 #else /* !CONFIG_PAGE_EXTENSION */
 struct page_ext;
 
+static inline bool early_page_ext_enabled(void)
+{
+       return false;
+}
+
 static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
 {
 }
index fa2623401ae1dfb58f5a8f697b85df4636627893..7ace0960a5f9a468a340316ebbb7ff725621d533 100644 (file)
@@ -850,6 +850,9 @@ static void __init mm_init(void)
        pgtable_init();
        debug_objects_mem_init();
        vmalloc_init();
+       /* Should be run after vmap initialization */
+       if (early_page_ext_enabled())
+               page_ext_init();
        /* Should be run before the first non-init thread is created */
        init_espfix_bsp();
        /* Should be run after espfix64 is set up. */
@@ -1608,7 +1611,8 @@ static noinline void __init kernel_init_freeable(void)
        padata_init();
        page_alloc_init_late();
        /* Initialize page ext after all struct pages are initialized. */
-       page_ext_init();
+       if (!early_page_ext_enabled())
+               page_ext_init();
 
        do_basic_setup();
 
index 405da48c63d803235b7e972f0ba812d2a8adcdbc..a231ad1d3089db141a4f14780267dbec09564bfd 100644 (file)
@@ -482,6 +482,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 {
        static unsigned long prev_end_pfn, nr_initialised;
 
+       if (early_page_ext_enabled())
+               return false;
        /*
         * prev_end_pfn static that contains the end of previous zone
         * No need to protect because called very early in boot before smp_init.
index b236bdd59fa89c8bb8a28f535b86ebcb4a4e0388..123920484163bd68a5f539cdbb758e6c5263df5f 100644 (file)
@@ -91,6 +91,14 @@ unsigned long page_ext_size = sizeof(struct page_ext);
 static unsigned long total_usage;
 static struct page_ext *lookup_page_ext(const struct page *page);
 
+bool early_page_ext __meminitdata;
+static int __init setup_early_page_ext(char *str)
+{
+       early_page_ext = true;
+       return 0;
+}
+early_param("early_page_ext", setup_early_page_ext);
+
 static bool __init invoke_need_callbacks(void)
 {
        int i;