]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: memory: improve copy_user_large_folio()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 18 Jun 2024 09:12:41 +0000 (17:12 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 4 Jul 2024 02:30:20 +0000 (19:30 -0700)
Use nr_pages instead of pages_per_huge_page and move the address alignment
from copy_user_large_folio() into the callers since it is only needed when
we don't know which address will be accessed.

Link: https://lkml.kernel.org/r/20240618091242.2140164-4-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c
mm/memory.c

index a47f8c6c37c25bba0c68ad932f6bd6a469ba0dfe..a44f314587e8d657620ebe2fc9ed0a3d765ec4e0 100644 (file)
@@ -5492,9 +5492,8 @@ again:
                                        ret = PTR_ERR(new_folio);
                                        break;
                                }
-                               ret = copy_user_large_folio(new_folio,
-                                                           pte_folio,
-                                                           addr, dst_vma);
+                               ret = copy_user_large_folio(new_folio, pte_folio,
+                                               ALIGN_DOWN(addr, sz), dst_vma);
                                folio_put(pte_folio);
                                if (ret) {
                                        folio_put(new_folio);
@@ -6684,7 +6683,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
        struct hstate *h = hstate_vma(dst_vma);
        struct address_space *mapping = dst_vma->vm_file->f_mapping;
        pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
-       unsigned long size;
+       unsigned long size = huge_page_size(h);
        int vm_shared = dst_vma->vm_flags & VM_SHARED;
        pte_t _dst_pte;
        spinlock_t *ptl;
@@ -6703,8 +6702,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                }
 
                _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
-               set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
-                               huge_page_size(h));
+               set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
 
                /* No need to invalidate - it was non-present before */
                update_mmu_cache(dst_vma, dst_addr, dst_pte);
@@ -6778,7 +6776,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
                        *foliop = NULL;
                        goto out;
                }
-               ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
+               ret = copy_user_large_folio(folio, *foliop,
+                                           ALIGN_DOWN(dst_addr, size), dst_vma);
                folio_put(*foliop);
                *foliop = NULL;
                if (ret) {
@@ -6805,9 +6804,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 
        /* Add shared, newly allocated pages to the page cache. */
        if (vm_shared && !is_continue) {
-               size = i_size_read(mapping->host) >> huge_page_shift(h);
                ret = -EFAULT;
-               if (idx >= size)
+               if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
                        goto out_release_nounlock;
 
                /*
@@ -6864,7 +6862,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
        if (wp_enabled)
                _dst_pte = huge_pte_mkuffd_wp(_dst_pte);
 
-       set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
+       set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
 
        hugetlb_count_add(pages_per_huge_page(h), dst_mm);
 
index fd4784920837bc099bd2824e69a7f7e72dc74567..8ebac8a70ab502df479274976a0666ab21221ae3 100644 (file)
@@ -6521,20 +6521,17 @@ static int copy_subpage(unsigned long addr, int idx, void *arg)
 int copy_user_large_folio(struct folio *dst, struct folio *src,
                          unsigned long addr_hint, struct vm_area_struct *vma)
 {
-       unsigned int pages_per_huge_page = folio_nr_pages(dst);
-       unsigned long addr = addr_hint &
-               ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+       unsigned int nr_pages = folio_nr_pages(dst);
        struct copy_subpage_arg arg = {
                .dst = dst,
                .src = src,
                .vma = vma,
        };
 
-       if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
-               return copy_user_gigantic_page(dst, src, addr, vma,
-                                              pages_per_huge_page);
+       if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
+               return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages);
 
-       return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
+       return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg);
 }
 
 long copy_folio_from_user(struct folio *dst_folio,