]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/page_alloc: add helper for checking if check_pages_enabled
authorMike Rapoport (IBM) <rppt@kernel.org>
Tue, 21 Mar 2023 17:05:01 +0000 (19:05 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:25:10 +0000 (16:25 -0700)
Instead of duplicating long static_branch_enabled(&check_pages_enabled)
wrap it in a helper function is_check_pages_enabled()

Link: https://lkml.kernel.org/r/20230321170513.2401534-3-rppt@kernel.org
Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Doug Berger <opendmb@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index 2e72fdbdd8dbf9571b2d3524a241159d3c014514..9a787d0c56aac4dd11b372667d292c9f96980337 100644 (file)
@@ -245,6 +245,11 @@ EXPORT_SYMBOL(init_on_free);
 /* perform sanity checks on struct pages being allocated or freed */
 static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
 
+static inline bool is_check_pages_enabled(void)
+{
+       return static_branch_unlikely(&check_pages_enabled);
+}
+
 static bool _init_on_alloc_enabled_early __read_mostly
                                = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
 static int __init early_init_on_alloc(char *buf)
@@ -1450,7 +1455,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
                for (i = 1; i < (1 << order); i++) {
                        if (compound)
                                bad += free_tail_pages_check(page, page + i);
-                       if (static_branch_unlikely(&check_pages_enabled)) {
+                       if (is_check_pages_enabled()) {
                                if (unlikely(free_page_is_bad(page + i))) {
                                        bad++;
                                        continue;
@@ -1463,7 +1468,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
                page->mapping = NULL;
        if (memcg_kmem_online() && PageMemcgKmem(page))
                __memcg_kmem_uncharge_page(page, order);
-       if (static_branch_unlikely(&check_pages_enabled)) {
+       if (is_check_pages_enabled()) {
                if (free_page_is_bad(page))
                        bad++;
                if (bad)
@@ -2373,7 +2378,7 @@ static int check_new_page(struct page *page)
 
 static inline bool check_new_pages(struct page *page, unsigned int order)
 {
-       if (static_branch_unlikely(&check_pages_enabled)) {
+       if (is_check_pages_enabled()) {
                for (int i = 0; i < (1 << order); i++) {
                        struct page *p = page + i;