]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/hugetlb_vmemmap: fix memory loads ordering
authorYu Zhao <yuzhao@google.com>
Wed, 8 Jan 2025 07:48:21 +0000 (00:48 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 26 Jan 2025 23:04:59 +0000 (15:04 -0800)
Using x86_64 as an example, for a 32KB struct page[] area describing a 2MB
hugeTLB, HVO reduces the area to 4KB by the following steps:

1. Split the (r/w vmemmap) PMD mapping the area into 512 (r/w) PTEs;
2. For the 8 PTEs mapping the area, remap PTE 1-7 to the page mapped
   by PTE 0, and at the same time change the permission from r/w to
   r/o;
3. Free the pages PTE 1-7 used to map, hence the reduction from 32KB
   to 4KB.

However, the following race can happen due to improperly memory loads
ordering:
  CPU 1 (HVO)                     CPU 2 (speculative PFN walker)

  page_ref_freeze()
  synchronize_rcu()
                                  rcu_read_lock()
                                  page_is_fake_head() is false
  vmemmap_remap_pte()
  XXX: struct page[] becomes r/o

  page_ref_unfreeze()
                                  page_ref_count() is not zero

                                  atomic_add_unless(&page->_refcount)
                                  XXX: try to modify r/o struct page[]

Specifically, page_is_fake_head() must be ordered after page_ref_count()
on CPU 2 so that it can only return true for this case, to avoid the later
attempt to modify r/o struct page[].

This patch adds the missing memory barrier and makes the tests on
page_is_fake_head() and page_ref_count() done in the proper order.

Link: https://lkml.kernel.org/r/20250108074822.722696-1-yuzhao@google.com
Fixes: bd225530a4c7 ("mm/hugetlb_vmemmap: fix race with speculative PFN walkers")
Signed-off-by: Yu Zhao <yuzhao@google.com>
Reported-by: Will Deacon <will@kernel.org>
Closes: https://lore.kernel.org/20241128142028.GA3506@willie-the-truck/
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
include/linux/page_ref.h

index 691506bdf2c5a82ba0308b0f68e5a150a7eebdb0..16fa8f0cea0273ff4eeeb9d4a7c19f5df302a815 100644 (file)
@@ -225,11 +225,48 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
        }
        return page;
 }
+
+static __always_inline bool page_count_writable(const struct page *page, int u)
+{
+       if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
+               return true;
+
+       /*
+        * The refcount check is ordered before the fake-head check to prevent
+        * the following race:
+        *   CPU 1 (HVO)                     CPU 2 (speculative PFN walker)
+        *
+        *   page_ref_freeze()
+        *   synchronize_rcu()
+        *                                   rcu_read_lock()
+        *                                   page_is_fake_head() is false
+        *   vmemmap_remap_pte()
+        *   XXX: struct page[] becomes r/o
+        *
+        *   page_ref_unfreeze()
+        *                                   page_ref_count() is not zero
+        *
+        *                                   atomic_add_unless(&page->_refcount)
+        *                                   XXX: try to modify r/o struct page[]
+        *
+        * The refcount check also prevents modification attempts to other (r/o)
+        * tail pages that are not fake heads.
+        */
+       if (atomic_read_acquire(&page->_refcount) == u)
+               return false;
+
+       return page_fixed_fake_head(page) == page;
+}
 #else
 static inline const struct page *page_fixed_fake_head(const struct page *page)
 {
        return page;
 }
+
+static inline bool page_count_writable(const struct page *page, int u)
+{
+       return true;
+}
 #endif
 
 static __always_inline int page_is_fake_head(const struct page *page)
index 8c236c651d1d65c52bb8840186553d699cba22ab..544150d1d5fd7462423de4f93f36aefd6268a564 100644 (file)
@@ -234,7 +234,7 @@ static inline bool page_ref_add_unless(struct page *page, int nr, int u)
 
        rcu_read_lock();
        /* avoid writing to the vmemmap area being remapped */
-       if (!page_is_fake_head(page) && page_ref_count(page) != u)
+       if (page_count_writable(page, u))
                ret = atomic_add_unless(&page->_refcount, nr, u);
        rcu_read_unlock();