From d9ef44de5d731e1a1fa94ddb5e39ea1b308b1456 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 1 Jun 2022 15:11:01 -0400 Subject: [PATCH] hugetlb: Convert huge_add_to_page_cache() to use a folio Remove the last caller of add_to_page_cache() Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Mike Kravetz Reviewed-by: Muchun Song --- fs/hugetlbfs/inode.c | 2 +- mm/hugetlb.c | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 62408047e8d7b..ae2524480f239 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -759,7 +759,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, SetHPageMigratable(page); /* - * unlock_page because locked by add_to_page_cache() + * unlock_page because locked by huge_add_to_page_cache() * put_page() due to reference from alloc_huge_page() */ unlock_page(page); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a57e1be41401b..33b2c27e7c61e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5414,19 +5414,25 @@ static bool hugetlbfs_pagecache_present(struct hstate *h, int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx) { + struct folio *folio = page_folio(page); struct inode *inode = mapping->host; struct hstate *h = hstate_inode(inode); - int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); + int err; - if (err) + __folio_set_locked(folio); + err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); + + if (unlikely(err)) { + __folio_clear_locked(folio); return err; + } ClearHPageRestoreReserve(page); /* - * set page dirty so that it will not be removed from cache/file + * mark folio dirty so that it will not be removed from cache/file * by non-hugetlbfs specific code paths. */ - set_page_dirty(page); + folio_mark_dirty(folio); spin_lock(&inode->i_lock); inode->i_blocks += blocks_per_huge_page(h); -- 2.49.0