{
struct address_space *mapping = file->f_mapping;
struct folio *newf;
- struct page *hpage;
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
result = alloc_charge_hpage(&newf, mm, cc);
if (result != SCAN_SUCCEED)
goto out;
- hpage = &newf->page;
/*
* Ensure we have slots for all the pages in the range. This is
}
} while (1);
- __SetPageLocked(hpage);
+ __folio_set_locked(newf);
if (is_shmem)
- __SetPageSwapBacked(hpage);
- hpage->index = start;
- hpage->mapping = mapping;
+ __folio_set_swapbacked(newf);
+ newf->index = start;
+ newf->mapping = mapping;
/*
- * At this point the hpage is locked and not up-to-date.
+ * At this point the folio is locked and not up-to-date.
* It's safe to insert it into the page cache, because nobody would
* be able to map it or use it in another way until we unlock it.
*/
result = SCAN_FAIL;
goto xa_locked;
}
- xas_store(&xas, hpage);
+ xas_store(&xas, newf);
nr_none++;
continue;
}
*/
list_add_tail(&page->lru, &pagelist);
- /* Finally, replace with the new page. */
- xas_store(&xas, hpage);
+ /* Finally, replace with the new folio */
+ xas_store(&xas, newf);
continue;
out_unlock:
unlock_page(page);
put_page(page);
goto xa_unlocked;
}
- nr = thp_nr_pages(hpage);
+ nr = folio_nr_pages(newf);
if (is_shmem)
- __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
+ __lruvec_stat_mod_folio(newf, NR_SHMEM_THPS, nr);
else {
- __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
+ __lruvec_stat_mod_folio(newf, NR_FILE_THPS, nr);
filemap_nr_thps_inc(mapping);
/*
* Paired with smp_mb() in do_dentry_open() to ensure
smp_mb();
if (inode_is_open_for_write(mapping->host)) {
result = SCAN_FAIL;
- __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
+ __lruvec_stat_mod_folio(newf, NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
goto xa_locked;
}
}
if (nr_none) {
- __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
+ __lruvec_stat_mod_folio(newf, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
- __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
+ __lruvec_stat_mod_folio(newf, NR_SHMEM, nr_none);
}
/* Join all the small entries into a single multi-index entry */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
- xas_store(&xas, hpage);
+ xas_store(&xas, newf);
xa_locked:
xas_unlock_irq(&xas);
xa_unlocked:
if (result == SCAN_SUCCEED) {
struct page *page, *tmp;
- struct folio *folio;
/*
* Replacing old pages with new one has succeeded, now we
*/
index = start;
list_for_each_entry_safe(page, tmp, &pagelist, lru) {
+ struct page *new_page = folio_page(newf,
+ index % HPAGE_PMD_NR);
while (index < page->index) {
- clear_highpage(hpage + (index % HPAGE_PMD_NR));
+ clear_highpage(new_page);
index++;
+ new_page++;
}
- copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
- page);
+ copy_highpage(new_page, page);
list_del(&page->lru);
page->mapping = NULL;
page_ref_unfreeze(page, 1);
index++;
}
while (index < end) {
- clear_highpage(hpage + (index % HPAGE_PMD_NR));
+ clear_highpage(folio_page(newf, index % HPAGE_PMD_NR));
index++;
}
- folio = page_folio(hpage);
- folio_mark_uptodate(folio);
- folio_ref_add(folio, HPAGE_PMD_NR - 1);
+ folio_mark_uptodate(newf);
+ folio_ref_add(newf, HPAGE_PMD_NR - 1);
if (is_shmem)
- folio_mark_dirty(folio);
- folio_add_lru(folio);
+ folio_mark_dirty(newf);
+ folio_add_lru(newf);
/*
* Remove pte page tables, so we can re-fault the page as huge.
*/
- result = retract_page_tables(mapping, start, mm, addr, hpage,
- cc);
- unlock_page(hpage);
- hpage = NULL;
+ result = retract_page_tables(mapping, start, mm, addr,
+ &newf->page, cc);
+ folio_unlock(newf);
} else {
struct page *page;
VM_BUG_ON(nr_none);
xas_unlock_irq(&xas);
- hpage->mapping = NULL;
+ newf->mapping = NULL;
}
if (newf)