if (folio_mapped(folio))
folio_redirty_for_writepage(&wbc, folio);
else
- error = shmem_writeout(folio, &wbc);
+ error = shmem_writeout(folio, NULL, NULL);
}
}
if (writeback && !folio_mapped(to_folio) &&
folio_clear_dirty_for_io(to_folio)) {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .nr_to_write = SWAP_CLUSTER_MAX,
- .range_start = 0,
- .range_end = LLONG_MAX,
- .for_reclaim = 1,
- };
folio_set_reclaim(to_folio);
- ret = shmem_writeout(to_folio, &wbc);
+ ret = shmem_writeout(to_folio, NULL, NULL);
if (!folio_test_writeback(to_folio))
folio_clear_reclaim(to_folio);
/*
#include <linux/fs_parser.h>
#include <linux/userfaultfd_k.h>
+struct swap_iocb;
+
/* inode in-kernel data */
#ifdef CONFIG_TMPFS_QUOTA
void shmem_unlock_mapping(struct address_space *mapping);
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-int shmem_writeout(struct folio *folio, struct writeback_control *wbc);
+int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
+ struct list_head *folio_list);
void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
*/
struct swap_iocb **swap_plug;
- /* Target list for splitting a large folio */
- struct list_head *list;
-
/* internal fields used by the ->writepages implementation: */
struct folio_batch fbatch;
pgoff_t index;
* shmem_writeout - Write the folio to swap
* @folio: The folio to write
* @wbc: How writeback is to be done
+ * @plug: swap plug
+ * @folio_list: list to put back folios on split
*
* Move the folio from the page cache to the swap cache.
*/
-int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
+int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
+ struct list_head *folio_list)
{
struct address_space *mapping = folio->mapping;
struct inode *inode = mapping->host;
int nr_pages;
bool split = false;
- if (WARN_ON_ONCE(!wbc->for_reclaim))
- goto redirty;
-
if ((info->flags & VM_LOCKED) || sbinfo->noswap)
goto redirty;
try_split:
/* Ensure the subpages are still dirty */
folio_test_set_dirty(folio);
- if (split_folio_to_list(folio, wbc->list))
+ if (split_folio_to_list(folio, folio_list))
goto redirty;
folio_clear_dirty(folio);
}
list_add(&info->swaplist, &shmem_swaplist);
if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = SWAP_CLUSTER_MAX,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ .for_reclaim = 1,
+ .swap_plug = plug,
+ };
shmem_recalc_inode(inode, 0, nr_pages);
swap_shmem_alloc(folio->swap, nr_pages);
shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap));
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(folio_mapped(folio));
- return swap_writeout(folio, wbc);
+ return swap_writeout(folio, &wbc);
}
list_del_init(&info->swaplist);
goto try_split;
redirty:
folio_mark_dirty(folio);
- if (wbc->for_reclaim)
- return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
- folio_unlock(folio);
- return 0;
+ return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
}
EXPORT_SYMBOL_GPL(shmem_writeout);
/*
* The large shmem folio can be split if CONFIG_THP_SWAP is not enabled
- * or we failed to allocate contiguous swap entries.
+ * or we failed to allocate contiguous swap entries, in which case
+ * the split out folios get added back to folio_list.
*/
- if (shmem_mapping(mapping)) {
- if (folio_test_large(folio))
- wbc.list = folio_list;
- res = shmem_writeout(folio, &wbc);
- } else {
+ if (shmem_mapping(mapping))
+ res = shmem_writeout(folio, plug, folio_list);
+ else
res = swap_writeout(folio, &wbc);
- }
if (res < 0)
handle_write_error(mapping, folio, res);