The page is only used to get the mapping, so the folio will do just as
well.  Both callers already have a folio available, so this saves a call
to compound_head().
Link: https://lkml.kernel.org/r/20240412193510.2356957-7-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jane Chu  <jane.chu@oracle.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
 
                                 struct vm_area_struct *vma,
                                 unsigned long address);
 
-struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
+struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
 
 extern int sysctl_hugetlb_shm_group;
 extern struct list_head huge_boot_pages[MAX_NUMNODES];
        return 0;
 }
 
-static inline struct address_space *hugetlb_page_mapping_lock_write(
-                                                       struct page *hpage)
+static inline struct address_space *hugetlb_folio_mapping_lock_write(
+                                                       struct folio *folio)
 {
        return NULL;
 }
 
 /*
  * Find and lock address space (mapping) in write mode.
  *
- * Upon entry, the page is locked which means that page_mapping() is
+ * Upon entry, the folio is locked which means that folio_mapping() is
  * stable.  Due to locking order, we can only trylock_write.  If we can
  * not get the lock, simply return NULL to caller.
  */
-struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
+struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
 {
-       struct address_space *mapping = page_mapping(hpage);
+       struct address_space *mapping = folio_mapping(folio);
 
        if (!mapping)
                return mapping;
 
                 * TTU_RMAP_LOCKED to indicate we have taken the lock
                 * at this higher level.
                 */
-               mapping = hugetlb_page_mapping_lock_write(hpage);
+               mapping = hugetlb_folio_mapping_lock_write(folio);
                if (mapping) {
                        try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
                        i_mmap_unlock_write(mapping);
 
                         * semaphore in write mode here and set TTU_RMAP_LOCKED
                         * to let lower levels know we have taken the lock.
                         */
-                       mapping = hugetlb_page_mapping_lock_write(&src->page);
+                       mapping = hugetlb_folio_mapping_lock_write(src);
                        if (unlikely(!mapping))
                                goto unlock_put_anon;