int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
                            struct vm_area_struct *, struct vm_area_struct *);
 void unmap_hugepage_range(struct vm_area_struct *,
-                         unsigned long, unsigned long, struct page *,
-                         zap_flags_t);
+                         unsigned long start, unsigned long end,
+                         struct folio *, zap_flags_t);
 void __unmap_hugepage_range(struct mmu_gather *tlb,
                          struct vm_area_struct *vma,
                          unsigned long start, unsigned long end,
 
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
-                         unsigned long end, struct page *ref_page,
+                         unsigned long end, struct folio *folio,
                          zap_flags_t zap_flags)
 {
        struct mmu_notifier_range range;
        mmu_notifier_invalidate_range_start(&range);
        tlb_gather_mmu(&tlb, vma->vm_mm);
 
-       __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
+       __unmap_hugepage_range(&tlb, vma, start, end,
+                              &folio->page, zap_flags);
 
        mmu_notifier_invalidate_range_end(&range);
        tlb_finish_mmu(&tlb);
                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
                        unmap_hugepage_range(iter_vma, address,
                                             address + huge_page_size(h),
-                                            &folio->page, 0);
+                                            folio, 0);
        }
        i_mmap_unlock_write(mapping);
 }