]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: drop hugetlb_get_unmapped_area{_*} functions
authorOscar Salvador <osalvador@suse.de>
Mon, 7 Oct 2024 07:50:35 +0000 (09:50 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 7 Nov 2024 04:11:10 +0000 (20:11 -0800)
Hugetlb mappings are now handled through normal channels just like any
other mapping, so we no longer need hugetlb_get_unmapped_area* specific
functions.

Link: https://lkml.kernel.org/r/20241007075037.267650-8-osalvador@suse.de
Signed-off-by: Oscar Salvador <osalvador@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Donet Tom <donettom@linux.ibm.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/parisc/mm/hugetlbpage.c
arch/powerpc/mm/book3s64/slice.c
arch/s390/mm/hugetlbpage.c
arch/sparc/mm/hugetlbpage.c
arch/x86/mm/hugetlbpage.c
fs/hugetlbfs/inode.c
include/linux/hugetlb.h

index aa664f7ddb63986cdc0ad61ad7b4a0457f919e41..e9d18cf25b792bac806ca22d7e44d700a7dfd78d 100644 (file)
 #include <asm/mmu_context.h>
 
 
-unsigned long
-hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       struct hstate *h = hstate_file(file);
-
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       if (flags & MAP_FIXED)
-               if (prepare_hugepage_range(file, addr, len))
-                       return -EINVAL;
-
-       if (addr)
-               addr = ALIGN(addr, huge_page_size(h));
-
-       /* we need to make sure the colouring is OK */
-       return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0);
-}
 
 
 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
index 3a858f6b727012c5e16f77a478e5bbd85ed2d63f..bc9a39821d1c6bedcc22cf014e9862f0d860ace4 100644 (file)
@@ -814,14 +814,4 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 
        return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
 }
-
-unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-                                       unsigned long len, unsigned long pgoff,
-                                       unsigned long flags)
-{
-       if (radix_enabled())
-               return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
-
-       return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
-}
 #endif
index ded0eff58a192a60d460a8e6eda6e9d30a94667d..7c79cf1bc7d7601f89b26c1da561a21f3c4de337 100644 (file)
@@ -242,88 +242,3 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
        else
                return false;
 }
-
-static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
-               unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
-{
-       struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info = {};
-
-       info.length = len;
-       info.low_limit = current->mm->mmap_base;
-       info.high_limit = TASK_SIZE;
-       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       return vm_unmapped_area(&info);
-}
-
-static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
-               unsigned long addr0, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
-{
-       struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info = {};
-       unsigned long addr;
-
-       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
-       info.length = len;
-       info.low_limit = PAGE_SIZE;
-       info.high_limit = current->mm->mmap_base;
-       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       addr = vm_unmapped_area(&info);
-
-       /*
-        * A failed mmap() very likely causes application failure,
-        * so fall back to the bottom-up function here. This scenario
-        * can happen with large stack limits and large mmap()
-        * allocations.
-        */
-       if (addr & ~PAGE_MASK) {
-               VM_BUG_ON(addr != -ENOMEM);
-               info.flags = 0;
-               info.low_limit = TASK_UNMAPPED_BASE;
-               info.high_limit = TASK_SIZE;
-               addr = vm_unmapped_area(&info);
-       }
-
-       return addr;
-}
-
-unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       struct hstate *h = hstate_file(file);
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (len > TASK_SIZE - mmap_min_addr)
-               return -ENOMEM;
-
-       if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(file, addr, len))
-                       return -EINVAL;
-               goto check_asce_limit;
-       }
-
-       if (addr) {
-               addr = ALIGN(addr, huge_page_size(h));
-               vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
-                       goto check_asce_limit;
-       }
-
-       if (!test_bit(MMF_TOPDOWN, &mm->flags))
-               addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
-                               pgoff, flags);
-       else
-               addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
-                               pgoff, flags);
-       if (offset_in_page(addr))
-               return addr;
-
-check_asce_limit:
-       return check_asce_limit(mm, addr, len);
-}
index cc91ca7a1e182cf6f6de2c96be13c0e7e248bed1..eee601a0d2cfb0d426529f38b06b62c546840dbf 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/mmu_context.h>
 
-/* Slightly simplified from the non-hugepage variant because by
- * definition we don't have to worry about any page coloring stuff
- */
-
-static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
-                                                       unsigned long addr,
-                                                       unsigned long len,
-                                                       unsigned long pgoff,
-                                                       unsigned long flags)
-{
-       struct hstate *h = hstate_file(filp);
-       unsigned long task_size = TASK_SIZE;
-       struct vm_unmapped_area_info info = {};
-
-       if (test_thread_flag(TIF_32BIT))
-               task_size = STACK_TOP32;
-
-       info.length = len;
-       info.low_limit = TASK_UNMAPPED_BASE;
-       info.high_limit = min(task_size, VA_EXCLUDE_START);
-       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       addr = vm_unmapped_area(&info);
-
-       if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
-               VM_BUG_ON(addr != -ENOMEM);
-               info.low_limit = VA_EXCLUDE_END;
-               info.high_limit = task_size;
-               addr = vm_unmapped_area(&info);
-       }
-
-       return addr;
-}
-
-static unsigned long
-hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-                                 const unsigned long len,
-                                 const unsigned long pgoff,
-                                 const unsigned long flags)
-{
-       struct hstate *h = hstate_file(filp);
-       struct mm_struct *mm = current->mm;
-       unsigned long addr = addr0;
-       struct vm_unmapped_area_info info = {};
-
-       /* This should only ever run for 32-bit processes.  */
-       BUG_ON(!test_thread_flag(TIF_32BIT));
-
-       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
-       info.length = len;
-       info.low_limit = PAGE_SIZE;
-       info.high_limit = mm->mmap_base;
-       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       addr = vm_unmapped_area(&info);
-
-       /*
-        * A failed mmap() very likely causes application failure,
-        * so fall back to the bottom-up function here. This scenario
-        * can happen with large stack limits and large mmap()
-        * allocations.
-        */
-       if (addr & ~PAGE_MASK) {
-               VM_BUG_ON(addr != -ENOMEM);
-               info.flags = 0;
-               info.low_limit = TASK_UNMAPPED_BASE;
-               info.high_limit = STACK_TOP32;
-               addr = vm_unmapped_area(&info);
-       }
-
-       return addr;
-}
-
-unsigned long
-hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       struct hstate *h = hstate_file(file);
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long task_size = TASK_SIZE;
-
-       if (test_thread_flag(TIF_32BIT))
-               task_size = STACK_TOP32;
-
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (len > task_size)
-               return -ENOMEM;
-
-       if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(file, addr, len))
-                       return -EINVAL;
-               return addr;
-       }
-
-       if (addr) {
-               addr = ALIGN(addr, huge_page_size(h));
-               vma = find_vma(mm, addr);
-               if (task_size - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
-                       return addr;
-       }
-       if (!test_bit(MMF_TOPDOWN, &mm->flags))
-               return hugetlb_get_unmapped_area_bottomup(file, addr, len,
-                               pgoff, flags);
-       else
-               return hugetlb_get_unmapped_area_topdown(file, addr, len,
-                               pgoff, flags);
-}
 
 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
 {
index 807a5859a3c4b3e64c57f9d3f15aa9cc9b034540..58f7f2bd535d5931072d370c461537c2e77996ae 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/elf.h>
 
-#ifdef CONFIG_HUGETLB_PAGE
-static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
-               unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
-{
-       struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info = {};
-
-       info.length = len;
-       info.low_limit = get_mmap_base(1);
-
-       /*
-        * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
-        * in the full address space.
-        */
-       info.high_limit = in_32bit_syscall() ?
-               task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
-
-       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       return vm_unmapped_area(&info);
-}
-
-static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
-               unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
-{
-       struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info = {};
-
-       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
-       info.length = len;
-       info.low_limit = PAGE_SIZE;
-       info.high_limit = get_mmap_base(0);
-
-       /*
-        * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
-        * in the full address space.
-        */
-       if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
-               info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
-
-       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       addr = vm_unmapped_area(&info);
-
-       /*
-        * A failed mmap() very likely causes application failure,
-        * so fall back to the bottom-up function here. This scenario
-        * can happen with large stack limits and large mmap()
-        * allocations.
-        */
-       if (addr & ~PAGE_MASK) {
-               VM_BUG_ON(addr != -ENOMEM);
-               info.flags = 0;
-               info.low_limit = TASK_UNMAPPED_BASE;
-               info.high_limit = TASK_SIZE_LOW;
-               addr = vm_unmapped_area(&info);
-       }
-
-       return addr;
-}
-
-unsigned long
-hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       struct hstate *h = hstate_file(file);
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       /* No address checking. See comment at mmap_address_hint_valid() */
-       if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(file, addr, len))
-                       return -EINVAL;
-               return addr;
-       }
-
-       if (addr) {
-               addr &= huge_page_mask(h);
-               if (!mmap_address_hint_valid(addr, len))
-                       goto get_unmapped_area;
-
-               vma = find_vma(mm, addr);
-               if (!vma || addr + len <= vm_start_gap(vma))
-                       return addr;
-       }
-
-get_unmapped_area:
-       if (!test_bit(MMF_TOPDOWN, &mm->flags))
-               return hugetlb_get_unmapped_area_bottomup(file, addr, len,
-                               pgoff, flags);
-       else
-               return hugetlb_get_unmapped_area_topdown(file, addr, len,
-                               pgoff, flags);
-}
-#endif /* CONFIG_HUGETLB_PAGE */
 
 #ifdef CONFIG_X86_64
 bool __init arch_hugetlb_valid_size(unsigned long size)
index 2c5f34e315d253cf563a70c2ca7abf0211fb9669..935c0ed3aa1ea8d27a9af21b17b73bd697c9aa7d 100644 (file)
@@ -171,96 +171,10 @@ out:
  * Called under mmap_write_lock(mm).
  */
 
-static unsigned long
-hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info = {};
-
-       info.length = len;
-       info.low_limit = current->mm->mmap_base;
-       info.high_limit = arch_get_mmap_end(addr, len, flags);
-       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       return vm_unmapped_area(&info);
-}
-
-static unsigned long
-hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-       struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info = {};
-
-       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
-       info.length = len;
-       info.low_limit = PAGE_SIZE;
-       info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
-       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       addr = vm_unmapped_area(&info);
-
-       /*
-        * A failed mmap() very likely causes application failure,
-        * so fall back to the bottom-up function here. This scenario
-        * can happen with large stack limits and large mmap()
-        * allocations.
-        */
-       if (unlikely(offset_in_page(addr))) {
-               VM_BUG_ON(addr != -ENOMEM);
-               info.flags = 0;
-               info.low_limit = current->mm->mmap_base;
-               info.high_limit = arch_get_mmap_end(addr, len, flags);
-               addr = vm_unmapped_area(&info);
-       }
-
-       return addr;
-}
-
-unsigned long
-generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-                                 unsigned long len, unsigned long pgoff,
-                                 unsigned long flags)
-{
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma, *prev;
-       struct hstate *h = hstate_file(file);
-       const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
-
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (len > mmap_end - mmap_min_addr)
-               return -ENOMEM;
-
-       if (flags & MAP_FIXED) {
-               if (prepare_hugepage_range(file, addr, len))
-                       return -EINVAL;
-               return addr;
-       }
-
-       if (addr) {
-               addr = ALIGN(addr, huge_page_size(h));
-               vma = find_vma_prev(mm, addr, &prev);
-               if (mmap_end - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)) &&
-                   (!prev || addr >= vm_end_gap(prev)))
-                       return addr;
-       }
-
-       /*
-        * Use MMF_TOPDOWN flag as a hint to use topdown routine.
-        * If architectures have special needs, they should define their own
-        * version of hugetlb_get_unmapped_area.
-        */
-       if (test_bit(MMF_TOPDOWN, &mm->flags))
-               return hugetlb_get_unmapped_area_topdown(file, addr, len,
-                               pgoff, flags);
-       return hugetlb_get_unmapped_area_bottomup(file, addr, len,
-                       pgoff, flags);
-}
-
 unsigned long
-__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-                           unsigned long len, unsigned long flags)
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+                           unsigned long len, unsigned long pgoff,
+                           unsigned long flags)
 {
        unsigned long addr0 = 0;
        struct hstate *h = hstate_file(file);
@@ -272,7 +186,7 @@ __hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        if (addr)
                addr0 = ALIGN(addr, huge_page_size(h));
 
-       return mm_get_unmapped_area_vmflags(current->mm, file, addr, len, pgoff,
+       return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff,
                                            flags, 0);
 }
 
@@ -1308,7 +1222,7 @@ static const struct file_operations hugetlbfs_file_operations = {
        .read_iter              = hugetlbfs_read_iter,
        .mmap                   = hugetlbfs_file_mmap,
        .fsync                  = noop_fsync,
-       .get_unmapped_area      = __hugetlb_get_unmapped_area,
+       .get_unmapped_area      = hugetlb_get_unmapped_area,
        .llseek                 = default_llseek,
        .fallocate              = hugetlbfs_fallocate,
        .fop_flags              = FOP_HUGE_PAGES,
index 3a81b6126f623e73b6bd427da66438b76dd5ad96..ae4fe8615bb6e75ae47cc06727904dc78a34155c 100644 (file)
@@ -547,15 +547,10 @@ static inline struct hstate *hstate_inode(struct inode *i)
 #endif /* !CONFIG_HUGETLBFS */
 
 unsigned long
-__generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                                    unsigned long len, unsigned long pgoff,
                                    unsigned long flags);
 
-unsigned long
-generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-                                 unsigned long len, unsigned long pgoff,
-                                 unsigned long flags);
-
 /*
  * huegtlb page specific state flags.  These flags are located in page.private
  * of the hugetlb head page.  Functions created via the below macros should be