struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
- ceph_fscache_write_terminated, inode, caching);
+ ceph_fscache_write_terminated, inode, true, caching);
}
#else
static inline void ceph_set_page_fscache(struct page *page)
/* Set parameters for the netfs library */
netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
+ /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
+ __set_bit(NETFS_ICTX_USE_PGPRIV2, &ci->netfs.flags);
spin_lock_init(&ci->i_ceph_lock);
#include "internal.h"
/*
- * Unlock the folios in a read operation. We need to set PG_fscache on any
+ * Unlock the folios in a read operation. We need to set PG_writeback on any
* folios we're going to write back before we unlock them.
+ *
+ * Note that if the deprecated NETFS_RREQ_USE_PGPRIV2 is set then we use
+ * PG_private_2 and do a direct write to the cache from here instead.
*/
void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
{
xas_for_each(&xas, folio, last_page) {
loff_t pg_end;
bool pg_failed = false;
- bool folio_started;
+ bool wback_to_cache = false;
+ bool folio_started = false;
if (xas_retry(&xas, folio))
continue;
pg_end = folio_pos(folio) + folio_size(folio) - 1;
- folio_started = false;
for (;;) {
loff_t sreq_end;
pg_failed = true;
break;
}
- if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
- trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
- folio_start_fscache(folio);
- folio_started = true;
+ if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
+ if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE,
+ &subreq->flags)) {
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_start_fscache(folio);
+ folio_started = true;
+ }
+ } else {
+ wback_to_cache |=
+ test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
}
pg_failed |= subreq_failed;
sreq_end = subreq->start + subreq->len - 1;
kfree(finfo);
}
folio_mark_uptodate(folio);
+ if (wback_to_cache && !WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
+ filemap_dirty_folio(folio->mapping, folio);
+ }
}
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
have_folio:
- ret = folio_wait_fscache_killable(folio);
- if (ret < 0)
- goto error;
+ if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) {
+ ret = folio_wait_fscache_killable(folio);
+ if (ret < 0)
+ goto error;
+ }
have_folio_no_wait:
*_folio = folio;
_leave(" = 0");
static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
{
- if (netfs_group && !folio_get_private(folio))
- folio_attach_private(folio, netfs_get_group(netfs_group));
-}
+ void *priv = folio_get_private(folio);
-#if IS_ENABLED(CONFIG_FSCACHE)
-static void netfs_folio_start_fscache(bool caching, struct folio *folio)
-{
- if (caching)
- folio_start_fscache(folio);
-}
-#else
-static void netfs_folio_start_fscache(bool caching, struct folio *folio)
-{
+ if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE))
+ folio_attach_private(folio, netfs_get_group(netfs_group));
+ else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE)
+ folio_detach_private(folio);
}
-#endif
/*
* Decide how we should modify a folio. We might be attempting to do
bool maybe_trouble)
{
struct netfs_folio *finfo = netfs_folio_info(folio);
+ struct netfs_group *group = netfs_folio_group(folio);
loff_t pos = folio_file_pos(folio);
_enter("");
- if (netfs_folio_group(folio) != netfs_group)
+ if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
return NETFS_FLUSH_CONTENT;
if (folio_test_uptodate(folio))
folio_clear_dirty_for_io(folio);
/* We make multiple writes to the folio... */
if (!folio_test_writeback(folio)) {
- folio_wait_fscache(folio);
folio_start_writeback(folio);
- folio_start_fscache(folio);
if (wreq->iter.count == 0)
trace_netfs_folio(folio, netfs_folio_trace_wthru);
else
*/
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
{
+ struct netfs_group *group;
struct folio *folio = page_folio(vmf->page);
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
goto out;
}
- if (netfs_folio_group(folio) != netfs_group) {
+ group = netfs_folio_group(folio);
+ if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
folio_unlock(folio);
err = filemap_fdatawait_range(inode->i_mapping,
folio_pos(folio),
trace_netfs_folio(folio, netfs_folio_trace_kill);
folio_clear_uptodate(folio);
- if (folio_test_fscache(folio))
- folio_end_fscache(folio);
folio_end_writeback(folio);
folio_lock(folio);
generic_error_remove_folio(mapping, folio);
next = folio_next_index(folio);
trace_netfs_folio(folio, netfs_folio_trace_redirty);
filemap_dirty_folio(mapping, folio);
- if (folio_test_fscache(folio))
- folio_end_fscache(folio);
folio_end_writeback(folio);
folio_put(folio);
} while (index = next, index <= last);
if (!folio_test_dirty(folio)) {
folio_detach_private(folio);
gcount++;
- trace_netfs_folio(folio, netfs_folio_trace_clear_g);
+ if (group == NETFS_FOLIO_COPY_TO_CACHE)
+ trace_netfs_folio(folio,
+ netfs_folio_trace_end_copy);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_clear_g);
} else {
trace_netfs_folio(folio, netfs_folio_trace_redirtied);
}
trace_netfs_folio(folio, netfs_folio_trace_clear);
}
end_wb:
- if (folio_test_fscache(folio))
- folio_end_fscache(folio);
xas_advance(&xas, folio_next_index(folio) - 1);
folio_end_writeback(folio);
}
long *_count,
loff_t start,
loff_t max_len,
- bool caching,
size_t *_len,
size_t *_top)
{
break;
}
if (!folio_test_dirty(folio) ||
- folio_test_writeback(folio) ||
- folio_test_fscache(folio)) {
+ folio_test_writeback(folio)) {
folio_unlock(folio);
folio_put(folio);
xas_reset(xas);
if ((const struct netfs_group *)priv != group) {
stop = true;
finfo = netfs_folio_info(folio);
- if (finfo->netfs_group != group ||
+ if (!finfo ||
+ finfo->netfs_group != group ||
finfo->dirty_offset > 0) {
folio_unlock(folio);
folio_put(folio);
for (i = 0; i < folio_batch_count(&fbatch); i++) {
folio = fbatch.folios[i];
- trace_netfs_folio(folio, netfs_folio_trace_store_plus);
+ if (group == NETFS_FOLIO_COPY_TO_CACHE)
+ trace_netfs_folio(folio, netfs_folio_trace_copy_plus);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_store_plus);
if (!folio_clear_dirty_for_io(folio))
BUG();
folio_start_writeback(folio);
- netfs_folio_start_fscache(caching, folio);
folio_unlock(folio);
}
struct netfs_inode *ctx = netfs_inode(mapping->host);
unsigned long long i_size = i_size_read(&ctx->inode);
size_t len, max_len;
- bool caching = netfs_is_cache_enabled(ctx);
long count = wbc->nr_to_write;
int ret;
- _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
+ _enter(",%lx,%llx-%llx", folio->index, start, end);
wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
- NETFS_WRITEBACK);
+ group == NETFS_FOLIO_COPY_TO_CACHE ?
+ NETFS_COPY_TO_CACHE : NETFS_WRITEBACK);
if (IS_ERR(wreq)) {
folio_unlock(folio);
return PTR_ERR(wreq);
if (!folio_clear_dirty_for_io(folio))
BUG();
folio_start_writeback(folio);
- netfs_folio_start_fscache(caching, folio);
count -= folio_nr_pages(folio);
* immediately lockable, is not dirty or is missing, or we reach the
* end of the range.
*/
- trace_netfs_folio(folio, netfs_folio_trace_store);
+ if (group == NETFS_FOLIO_COPY_TO_CACHE)
+ trace_netfs_folio(folio, netfs_folio_trace_copy);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_store);
len = wreq->len;
finfo = netfs_folio_info(folio);
if (len < max_len)
netfs_extend_writeback(mapping, group, xas, &count, start,
- max_len, caching, &len, &wreq->upper_len);
+ max_len, &len, &wreq->upper_len);
}
cant_expand:
iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start,
wreq->upper_len);
- __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
- ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback);
+ if (group != NETFS_FOLIO_COPY_TO_CACHE) {
+ __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+ ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback);
+ } else {
+ ret = netfs_begin_write(wreq, true, netfs_write_trace_copy_to_cache);
+ }
if (ret == 0 || ret == -EIOCBQUEUED)
wbc->nr_to_write -= len / PAGE_SIZE;
} else {
_debug("write discard %zx @%llx [%llx]", len, start, i_size);
/* The dirty region was entirely beyond the EOF. */
- fscache_clear_page_bits(mapping, start, len, caching);
netfs_pages_written_back(wreq);
ret = 0;
}
/* Skip any dirty folio that's not in the group of interest. */
priv = folio_get_private(folio);
- if ((const struct netfs_group *)priv != group) {
- finfo = netfs_folio_info(folio);
- if (finfo->netfs_group != group) {
+ if ((const struct netfs_group *)priv == NETFS_FOLIO_COPY_TO_CACHE) {
+ group = NETFS_FOLIO_COPY_TO_CACHE;
+ } else if ((const struct netfs_group *)priv != group) {
+ finfo = __netfs_folio_info(priv);
+ if (!finfo || finfo->netfs_group != group) {
folio_put(folio);
continue;
}
goto search_again;
}
- if (folio_test_writeback(folio) ||
- folio_test_fscache(folio)) {
+ if (folio_test_writeback(folio)) {
folio_unlock(folio);
if (wbc->sync_mode != WB_SYNC_NONE) {
folio_wait_writeback(folio);
-#ifdef CONFIG_FSCACHE
- folio_wait_fscache(folio);
-#endif
goto lock_again;
}
bvec_set_folio(&bvec, folio, len, offset);
iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
- __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+ if (group != NETFS_FOLIO_COPY_TO_CACHE)
+ __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
out_put:
kfree(finfo);
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
out:
- folio_wait_fscache(folio);
_leave(" = %d", ret);
return ret;
}
loff_t start;
size_t len;
bool set_bits;
+ bool using_pgpriv2;
netfs_io_terminated_t term_func;
void *term_func_priv;
};
{
struct fscache_write_request *wreq = priv;
- fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
- wreq->set_bits);
+ if (wreq->using_pgpriv2)
+ fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
+ wreq->set_bits);
if (wreq->term_func)
wreq->term_func(wreq->term_func_priv, transferred_or_error,
loff_t start, size_t len, loff_t i_size,
netfs_io_terminated_t term_func,
void *term_func_priv,
- bool cond)
+ bool using_pgpriv2, bool cond)
{
struct fscache_write_request *wreq;
struct netfs_cache_resources *cres;
wreq->mapping = mapping;
wreq->start = start;
wreq->len = len;
+ wreq->using_pgpriv2 = using_pgpriv2;
wreq->set_bits = cond;
wreq->term_func = term_func;
wreq->term_func_priv = term_func_priv;
abandon_free:
kfree(wreq);
abandon:
- fscache_clear_page_bits(mapping, start, len, cond);
+ if (using_pgpriv2)
+ fscache_clear_page_bits(mapping, start, len, cond);
if (term_func)
term_func(term_func_priv, ret, false);
}
*/
static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
{
- if (netfs_group)
+ if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
refcount_inc(&netfs_group->ref);
return netfs_group;
}
*/
static inline void netfs_put_group(struct netfs_group *netfs_group)
{
- if (netfs_group && refcount_dec_and_test(&netfs_group->ref))
+ if (netfs_group &&
+ netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
+ refcount_dec_and_test(&netfs_group->ref))
netfs_group->free(netfs_group);
}
*/
static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
{
- if (netfs_group && refcount_sub_and_test(nr, &netfs_group->ref))
+ if (netfs_group &&
+ netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
+ refcount_sub_and_test(nr, &netfs_group->ref))
netfs_group->free(netfs_group);
}
}
/*
- * Deal with the completion of writing the data to the cache. We have to clear
- * the PG_fscache bits on the folios involved and release the caller's ref.
+ * [DEPRECATED] Deal with the completion of writing the data to the cache. We
+ * have to clear the PG_fscache bits on the folios involved and release the
+ * caller's ref.
*
* May be called in softirq mode and we inherit a ref from the caller.
*/
}
static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
- bool was_async)
+ bool was_async) /* [DEPRECATED] */
{
struct netfs_io_subrequest *subreq = priv;
struct netfs_io_request *rreq = subreq->rreq;
}
/*
- * Perform any outstanding writes to the cache. We inherit a ref from the
- * caller.
+ * [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref
+ * from the caller.
*/
static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
{
netfs_rreq_unmark_after_write(rreq, false);
}
-static void netfs_rreq_write_to_cache_work(struct work_struct *work)
+static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
netfs_rreq_do_write_to_cache(rreq);
}
-static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
+static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */
{
rreq->work.func = netfs_rreq_write_to_cache_work;
if (!queue_work(system_unbound_wq, &rreq->work))
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
- if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
+ if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) &&
+ test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))
return netfs_rreq_write_to_cache(rreq);
netfs_rreq_completed(rreq, was_async);
[NETFS_READAHEAD] = "RA",
[NETFS_READPAGE] = "RP",
[NETFS_READ_FOR_WRITE] = "RW",
+ [NETFS_COPY_TO_CACHE] = "CC",
[NETFS_WRITEBACK] = "WB",
[NETFS_WRITETHROUGH] = "WT",
[NETFS_LAUNDER_WRITE] = "LW",
*/
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
- struct netfs_folio *finfo = NULL;
+ struct netfs_folio *finfo;
size_t flen = folio_size(folio);
_enter("{%lx},%zx,%zx", folio->index, offset, length);
- folio_wait_fscache(folio);
-
if (!folio_test_private(folio))
return;
if (folio_test_private(folio))
return false;
- if (folio_test_fscache(folio)) {
- if (current_is_kswapd() || !(gfp & __GFP_FS))
- return false;
- folio_wait_fscache(folio);
- }
-
fscache_note_page_release(netfs_i_cookie(ctx));
return true;
}
refcount_set(&rreq->ref, 1);
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- if (cached)
+ if (cached) {
__set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
+ if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags))
+ /* Filesystem uses deprecated PG_private_2 marking. */
+ __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
+ }
if (file && file->f_flags & O_NONBLOCK)
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
if (rreq->netfs_ops->init_request) {
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
{
netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false);
+ /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
+ __set_bit(NETFS_ICTX_USE_PGPRIV2, &nfsi->netfs.flags);
}
extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *);
extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *);
-extern void __fscache_write_to_cache(struct fscache_cookie *, struct address_space *,
- loff_t, size_t, loff_t, netfs_io_terminated_t, void *,
- bool);
+void __fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool using_pgpriv2, bool cond);
extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t);
/**
* @i_size: The new size of the inode
* @term_func: The function to call upon completion
* @term_func_priv: The private data for @term_func
- * @caching: If PG_fscache has been set
+ * @using_pgpriv2: If we're using PG_private_2 to mark in-progress write
+ * @caching: If we actually want to do the caching
*
* Helper function for a netfs to write dirty data from an inode into the cache
* object that's backing it.
* marked with PG_fscache.
*
* If given, @term_func will be called upon completion and supplied with
- * @term_func_priv. Note that the PG_fscache flags will have been cleared by
- * this point, so the netfs must retain its own pin on the mapping.
+ * @term_func_priv. Note that if @using_pgpriv2 is set, the PG_private_2 flags
+ * will have been cleared by this point, so the netfs must retain its own pin
+ * on the mapping.
*/
static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
struct address_space *mapping,
loff_t start, size_t len, loff_t i_size,
netfs_io_terminated_t term_func,
void *term_func_priv,
- bool caching)
+ bool using_pgpriv2, bool caching)
{
if (caching)
__fscache_write_to_cache(cookie, mapping, start, len, i_size,
- term_func, term_func_priv, caching);
+ term_func, term_func_priv,
+ using_pgpriv2, caching);
else if (term_func)
term_func(term_func_priv, -ENOBUFS, false);
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
#define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */
+#define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
+ * write to cache on read */
};
/*
unsigned int dirty_len; /* Write-streaming dirty data length */
};
#define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */
+#define NETFS_FOLIO_COPY_TO_CACHE ((struct netfs_group *)0x356UL) /* Write to the cache only */
-static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+static inline bool netfs_is_folio_info(const void *priv)
{
- void *priv = folio_get_private(folio);
+ return (unsigned long)priv & NETFS_FOLIO_INFO;
+}
- if ((unsigned long)priv & NETFS_FOLIO_INFO)
+static inline struct netfs_folio *__netfs_folio_info(const void *priv)
+{
+ if (netfs_is_folio_info(priv))
return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO);
return NULL;
}
+static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+{
+ return __netfs_folio_info(folio_get_private(folio));
+}
+
static inline struct netfs_group *netfs_folio_group(struct folio *folio)
{
struct netfs_folio *finfo;
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+ NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */
NETFS_WRITEBACK, /* This write was triggered by writepages */
NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
#define NETFS_RREQ_BLOCKED 10 /* We blocked */
+#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
+ * write to cache on read */
const struct netfs_request_ops *netfs_ops;
void (*cleanup)(struct netfs_io_request *req);
};
E_(netfs_read_trace_write_begin, "WRITEBEGN")
#define netfs_write_traces \
+ EM(netfs_write_trace_copy_to_cache, "COPY2CACH") \
EM(netfs_write_trace_dio_write, "DIO-WRITE") \
EM(netfs_write_trace_launder, "LAUNDER ") \
EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
EM(NETFS_READ_FOR_WRITE, "RW") \
+ EM(NETFS_COPY_TO_CACHE, "CC") \
EM(NETFS_WRITEBACK, "WB") \
EM(NETFS_WRITETHROUGH, "WT") \
EM(NETFS_LAUNDER_WRITE, "LW") \
EM(netfs_folio_trace_clear, "clear") \
EM(netfs_folio_trace_clear_s, "clear-s") \
EM(netfs_folio_trace_clear_g, "clear-g") \
- EM(netfs_folio_trace_copy_to_cache, "copy") \
+ EM(netfs_folio_trace_copy, "copy") \
+ EM(netfs_folio_trace_copy_plus, "copy+") \
+ EM(netfs_folio_trace_copy_to_cache, "mark-copy") \
EM(netfs_folio_trace_end_copy, "end-copy") \
EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
EM(netfs_folio_trace_kill, "kill") \