]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: move _entire_mapcount in folio to page[2] on 32bit
authorDavid Hildenbrand <david@redhat.com>
Mon, 3 Mar 2025 16:29:59 +0000 (17:29 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 4 Mar 2025 05:50:43 +0000 (21:50 -0800)
Let's free up some space on 32bit in page[1] by moving the _pincount to
page[2].

Ordinary folios only use the entire mapcount with PMD mappings, so order-1
folios don't apply.  Similarly, hugetlb folios are always larger than
order-1, turning the entire mapcount essentially unused for all order-1
folios.  Moving it to order-1 folios will not change anything.

On 32bit, simply check in folio_entire_mapcount() whether we have an
order-1 folio, and return 0 in that case.

Note that THPs on 32bit are not particularly common (and we don't care too
much about performance), but we want to keep it working reliably, because
likely we want to use large folios there as well in the future,
independent of PMD leaf support.

Once we dynamically allocate "struct folio", the 32bit specifics will go
away again; even small folios could then have a pincount.

Link: https://lkml.kernel.org/r/20250303163014.1128035-7-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Andy Lutomirks^H^Hski <luto@kernel.org>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcow (Oracle) <willy@infradead.org>
Cc: Michal Koutn <mkoutny@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: tejun heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zefan Li <lizefan.x@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
include/linux/mm_types.h
mm/internal.h
mm/page_alloc.c

index 1a4ee028a851ea0e4c44926b4d426664cad7640d..9c1290588a11e11ea459296a073f426515e27464 100644 (file)
@@ -1333,6 +1333,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
 static inline int folio_entire_mapcount(const struct folio *folio)
 {
        VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+       if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1))
+               return 0;
        return atomic_read(&folio->_entire_mapcount) + 1;
 }
 
index 31f466d8485bca7773cc8ffb7925d16300b9dd4b..c83dd2f1ee25e44beae746ea4e72a79e4a46b3a2 100644 (file)
@@ -385,9 +385,9 @@ struct folio {
                        union {
                                struct {
                                        atomic_t _large_mapcount;
-                                       atomic_t _entire_mapcount;
                                        atomic_t _nr_pages_mapped;
 #ifdef CONFIG_64BIT
+                                       atomic_t _entire_mapcount;
                                        atomic_t _pincount;
 #endif /* CONFIG_64BIT */
                                };
@@ -409,6 +409,7 @@ struct folio {
        /* public: */
                        struct list_head _deferred_list;
 #ifndef CONFIG_64BIT
+                       atomic_t _entire_mapcount;
                        atomic_t _pincount;
 #endif /* !CONFIG_64BIT */
        /* private: the union with struct page is transitional */
index d33db24c8b17b929ce2046d48f18104c640c23d1..ffdc91b19322ededdaa105ee6d1d72c5f2a1634d 100644 (file)
@@ -721,10 +721,11 @@ static inline void prep_compound_head(struct page *page, unsigned int order)
 
        folio_set_order(folio, order);
        atomic_set(&folio->_large_mapcount, -1);
-       atomic_set(&folio->_entire_mapcount, -1);
        atomic_set(&folio->_nr_pages_mapped, 0);
-       if (IS_ENABLED(CONFIG_64BIT) || order > 1)
+       if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
                atomic_set(&folio->_pincount, 0);
+               atomic_set(&folio->_entire_mapcount, -1);
+       }
        if (order > 1)
                INIT_LIST_HEAD(&folio->_deferred_list);
 }
index 2685dcf4a0ade9a2bffbcf7919e931841376bdb8..dbf2da10565ed1cb66f3b051affba349bfa4fec4 100644 (file)
@@ -947,10 +947,6 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
        switch (page - head_page) {
        case 1:
                /* the first tail page: these may be in place of ->mapping */
-               if (unlikely(folio_entire_mapcount(folio))) {
-                       bad_page(page, "nonzero entire_mapcount");
-                       goto out;
-               }
                if (unlikely(folio_large_mapcount(folio))) {
                        bad_page(page, "nonzero large_mapcount");
                        goto out;
@@ -960,6 +956,10 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
                        goto out;
                }
                if (IS_ENABLED(CONFIG_64BIT)) {
+                       if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
+                               bad_page(page, "nonzero entire_mapcount");
+                               goto out;
+                       }
                        if (unlikely(atomic_read(&folio->_pincount))) {
                                bad_page(page, "nonzero pincount");
                                goto out;
@@ -973,6 +973,10 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
                        goto out;
                }
                if (!IS_ENABLED(CONFIG_64BIT)) {
+                       if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
+                               bad_page(page, "nonzero entire_mapcount");
+                               goto out;
+                       }
                        if (unlikely(atomic_read(&folio->_pincount))) {
                                bad_page(page, "nonzero pincount");
                                goto out;