]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: memory: convert clear_huge_page() to folio_zero_user()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 18 Jun 2024 09:12:39 +0000 (17:12 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 4 Jul 2024 02:30:20 +0000 (19:30 -0700)
Patch series "mm: improve clear and copy user folio", v2.

Some folio conversions.  An improvement is to move address alignment into
the caller as it is only needed if we don't know which address will be
accessed when clearing/copying user folios.

This patch (of 4):

Replace clear_huge_page() with folio_zero_user(), and take a folio
instead of a page. Directly get number of pages by folio_nr_pages()
to remove pages_per_huge_page argument, furthermore, move the address
alignment from folio_zero_user() to the callers since the alignment
is only needed when we don't know which address will be accessed.

Link: https://lkml.kernel.org/r/20240618091242.2140164-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20240618091242.2140164-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/hugetlbfs/inode.c
include/linux/mm.h
mm/huge_memory.c
mm/hugetlb.c
mm/memory.c

index 6df794ed406618593ffd28f0434738c06bcd65c6..9456e1d55540fd317519fa45f4b0c8730bdd2835 100644 (file)
@@ -892,7 +892,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
                        error = PTR_ERR(folio);
                        goto out;
                }
-               clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
+               folio_zero_user(folio, ALIGN_DOWN(addr, hpage_size));
                __folio_mark_uptodate(folio);
                error = hugetlb_add_to_page_cache(folio, mapping, index);
                if (unlikely(error)) {
index 101945baffc783b453b717dd0538a2dfd08fe7f4..e2140ea6ae98221093f8d3034360c146bc99b735 100644 (file)
@@ -4067,9 +4067,7 @@ enum mf_action_page_type {
 };
 
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-extern void clear_huge_page(struct page *page,
-                           unsigned long addr_hint,
-                           unsigned int pages_per_huge_page);
+void folio_zero_user(struct folio *folio, unsigned long addr_hint);
 int copy_user_large_folio(struct folio *dst, struct folio *src,
                          unsigned long addr_hint,
                          struct vm_area_struct *vma);
index 14a05c64380655bd038fb20a217cc7bca6f62ab4..be598e9a5f988ee2d723799d90668f8e2e2e154a 100644 (file)
@@ -944,10 +944,10 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
                goto release;
        }
 
-       clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
+       folio_zero_user(folio, vmf->address);
        /*
         * The memory barrier inside __folio_mark_uptodate makes sure that
-        * clear_huge_page writes become visible before the set_pmd_at()
+        * folio_zero_user writes become visible before the set_pmd_at()
         * write.
         */
        __folio_mark_uptodate(folio);
index 6a5ea898e4dac3eab5e3304cdb5e2c251461c05d..a47f8c6c37c25bba0c68ad932f6bd6a469ba0dfe 100644 (file)
@@ -6300,8 +6300,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
                                ret = 0;
                        goto out;
                }
-               clear_huge_page(&folio->page, vmf->real_address,
-                               pages_per_huge_page(h));
+               folio_zero_user(folio, vmf->real_address);
                __folio_mark_uptodate(folio);
                new_folio = true;
 
index 97ddba866e43fe0e227f9593de790a3da66c8554..cb26f8713db4755c0aa27e96ec70cb96064ded25 100644 (file)
@@ -4488,7 +4488,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
                                goto next;
                        }
                        folio_throttle_swaprate(folio, gfp);
-                       clear_huge_page(&folio->page, vmf->address, 1 << order);
+                       folio_zero_user(folio, vmf->address);
                        return folio;
                }
 next:
@@ -6441,41 +6441,39 @@ static inline int process_huge_page(
        return 0;
 }
 
-static void clear_gigantic_page(struct page *page,
-                               unsigned long addr,
+static void clear_gigantic_page(struct folio *folio, unsigned long addr,
                                unsigned int pages_per_huge_page)
 {
        int i;
-       struct page *p;
 
        might_sleep();
        for (i = 0; i < pages_per_huge_page; i++) {
-               p = nth_page(page, i);
                cond_resched();
-               clear_user_highpage(p, addr + i * PAGE_SIZE);
+               clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
        }
 }
 
 static int clear_subpage(unsigned long addr, int idx, void *arg)
 {
-       struct page *page = arg;
+       struct folio *folio = arg;
 
-       clear_user_highpage(nth_page(page, idx), addr);
+       clear_user_highpage(folio_page(folio, idx), addr);
        return 0;
 }
 
-void clear_huge_page(struct page *page,
-                    unsigned long addr_hint, unsigned int pages_per_huge_page)
+/**
+ * folio_zero_user - Zero a folio which will be mapped to userspace.
+ * @folio: The folio to zero.
+ * @addr_hint: The address will be accessed or the base address if uncelar.
+ */
+void folio_zero_user(struct folio *folio, unsigned long addr_hint)
 {
-       unsigned long addr = addr_hint &
-               ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+       unsigned int nr_pages = folio_nr_pages(folio);
 
-       if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
-               clear_gigantic_page(page, addr, pages_per_huge_page);
-               return;
-       }
-
-       process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+       if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
+               clear_gigantic_page(folio, addr_hint, nr_pages);
+       else
+               process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
 }
 
 static int copy_user_gigantic_page(struct folio *dst, struct folio *src,