int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
                struct vmem_altmap *altmap)
  {
-       unsigned long addr = start;
-       unsigned long next;
-       pgd_t *pgdp;
-       p4d_t *p4dp;
-       pud_t *pudp;
-       pmd_t *pmdp;
- 
        WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
  
 -      if (!ARM64_KERNEL_USES_PMD_MAPS)
 +      if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
                return vmemmap_populate_basepages(start, end, node, altmap);
- 
-       do {
-               next = pmd_addr_end(addr, end);
- 
-               pgdp = vmemmap_pgd_populate(addr, node);
-               if (!pgdp)
-                       return -ENOMEM;
- 
-               p4dp = vmemmap_p4d_populate(pgdp, addr, node);
-               if (!p4dp)
-                       return -ENOMEM;
- 
-               pudp = vmemmap_pud_populate(p4dp, addr, node);
-               if (!pudp)
-                       return -ENOMEM;
- 
-               pmdp = pmd_offset(pudp, addr);
-               if (pmd_none(READ_ONCE(*pmdp))) {
-                       void *p = NULL;
- 
-                       p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
-                       if (!p) {
-                               if (vmemmap_populate_basepages(addr, next, node, altmap))
-                                       return -ENOMEM;
-                               continue;
-                       }
- 
-                       pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
-               } else
-                       vmemmap_verify((pte_t *)pmdp, node, addr, next);
-       } while (addr = next, addr != end);
- 
-       return 0;
+       else
+               return vmemmap_populate_hugepages(start, end, node, altmap);
  }
  
  #ifdef CONFIG_MEMORY_HOTPLUG
 
   *
   * This function takes care of grabbing mmap_lock as necessary.
   */
 -int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
 +int get_vaddr_frames(unsigned long start, unsigned int nr_frames, bool write,
                     struct frame_vector *vec)
  {
 -      struct mm_struct *mm = current->mm;
 -      struct vm_area_struct *vma;
 -      int ret_pin_user_pages_fast = 0;
 -      int ret = 0;
 -      int err;
 +      int ret;
-       unsigned int gup_flags = FOLL_FORCE | FOLL_LONGTERM;
++      unsigned int gup_flags = FOLL_LONGTERM;
  
        if (nr_frames == 0)
                return 0;
 
        return false;
  }
  
 -               * try_grab_page() should always succeed here, because we hold
 -               * the ptl lock and have verified pte_present().
+ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+                               unsigned long address, unsigned int flags)
+ {
+       struct hstate *h = hstate_vma(vma);
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long haddr = address & huge_page_mask(h);
+       struct page *page = NULL;
+       spinlock_t *ptl;
+       pte_t *pte, entry;
+ 
+       /*
+        * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
+        * follow_hugetlb_page().
+        */
+       if (WARN_ON_ONCE(flags & FOLL_PIN))
+               return NULL;
+ 
+ retry:
+       pte = huge_pte_offset(mm, haddr, huge_page_size(h));
+       if (!pte)
+               return NULL;
+ 
+       ptl = huge_pte_lock(h, mm, pte);
+       entry = huge_ptep_get(pte);
+       if (pte_present(entry)) {
+               page = pte_page(entry) +
+                               ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
+               /*
+                * Note that page may be a sub-page, and with vmemmap
+                * optimizations the page struct may be read only.
+                * try_grab_page() will increase the ref count on the
+                * head page, so this will be OK.
+                *
 -              if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
++               * try_grab_page() should always be able to get the page here,
++               * because we hold the ptl lock and have verified pte_present().
+                */
++              if (try_grab_page(page, flags)) {
+                       page = NULL;
+                       goto out;
+               }
+       } else {
+               if (is_hugetlb_entry_migration(entry)) {
+                       spin_unlock(ptl);
+                       __migration_entry_wait_huge(pte, ptl);
+                       goto retry;
+               }
+               /*
+                * hwpoisoned entry is treated as no_page_table in
+                * follow_page_mask().
+                */
+       }
+ out:
+       spin_unlock(ptl);
+       return page;
+ }
+ 
  long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                         struct page **pages, struct vm_area_struct **vmas,
                         unsigned long *position, unsigned long *nr_pages,