if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
if (folio->index == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
- kdebug("no unlock");
+ _debug("no unlock");
else
folio_unlock(folio);
}
struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
int ret;
- kenter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
+ _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
if (readahead_count(ractl) == 0)
return;
struct folio *sink = NULL;
int ret;
- kenter("%lx", folio->index);
+ _enter("%lx", folio->index);
rreq = netfs_alloc_request(mapping, file,
folio_pos(folio), folio_size(folio),
have_folio:
*_folio = folio;
- kleave(" = 0");
+ _leave(" = 0");
return 0;
error_put:
folio_unlock(folio);
folio_put(folio);
}
- kleave(" = %d", ret);
+ _leave(" = %d", ret);
return ret;
}
EXPORT_SYMBOL(netfs_write_begin);
size_t flen = folio_size(folio);
int ret;
- kenter("%zx @%llx", flen, start);
+ _enter("%zx @%llx", flen, start);
ret = -ENOMEM;
error_put:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
error:
- kleave(" = %d", ret);
+ _leave(" = %d", ret);
return ret;
}
struct netfs_group *group = netfs_folio_group(folio);
loff_t pos = folio_pos(folio);
- kenter("");
+ _enter("");
if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
return NETFS_FLUSH_CONTENT;
*/
howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
flen, offset, part, maybe_trouble);
- kdebug("howto %u", howto);
+ _debug("howto %u", howto);
switch (howto) {
case NETFS_JUST_PREFETCH:
ret = netfs_prefetch_for_write(file, folio, offset, part);
if (ret < 0) {
- kdebug("prefetch = %zd", ret);
+ _debug("prefetch = %zd", ret);
goto error_folio_unlock;
}
break;
}
iocb->ki_pos += written;
- kleave(" = %zd [%zd]", written, ret);
+ _leave(" = %zd [%zd]", written, ret);
return written ? written : ret;
error_folio_unlock:
struct netfs_inode *ictx = netfs_inode(inode);
ssize_t ret;
- kenter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
+ _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from))
return 0;
vm_fault_t ret = VM_FAULT_RETRY;
int err;
- kenter("%lx", folio->index);
+ _enter("%lx", folio->index);
sb_start_pagefault(inode->i_sb);
size_t orig_count = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb);
- kenter("");
+ _enter("");
if (!orig_count)
return 0; /* Don't update atime */
size_t len = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb);
- kenter("");
+ _enter("");
/* We're going to need a bounce buffer if what we transmit is going to
* be different in some way to the source buffer, e.g. because it gets
*/
// TODO
- kdebug("uw %llx-%llx", start, end);
+ _debug("uw %llx-%llx", start, end);
wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
iocb->ki_flags & IOCB_DIRECT ?
wreq->cleanup = netfs_cleanup_dio_write;
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
if (ret < 0) {
- kdebug("begin = %zd", ret);
+ _debug("begin = %zd", ret);
goto out;
}
loff_t pos = iocb->ki_pos;
unsigned long long end = pos + iov_iter_count(from) - 1;
- kenter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
+ _enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from))
return 0;
{
int n_accesses;
- kenter("{%s,%s}", ops->name, cache->name);
+ _enter("{%s,%s}", ops->name, cache->name);
BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
up_write(&fscache_addremove_sem);
pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
- kleave(" = 0 [%s]", cache->name);
+ _leave(" = 0 [%s]", cache->name);
return 0;
}
EXPORT_SYMBOL(fscache_add_cache);
{
struct fscache_cookie *cookie;
- kenter("V=%x", volume->debug_id);
+ _enter("V=%x", volume->debug_id);
if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
return NULL;
trace_fscache_acquire(cookie);
fscache_stat(&fscache_n_acquires_ok);
- kleave(" = c=%08x", cookie->debug_id);
+ _leave(" = c=%08x", cookie->debug_id);
return cookie;
}
EXPORT_SYMBOL(__fscache_acquire_cookie);
enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
bool need_withdraw = false;
- kenter("");
+ _enter("");
if (!cookie->volume->cache_priv) {
fscache_create_volume(cookie->volume, true);
if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
need_withdraw = true;
- kleave(" [fail]");
+ _leave(" [fail]");
goto out;
}
bool queue = false;
int n_active;
- kenter("c=%08x", cookie->debug_id);
+ _enter("c=%08x", cookie->debug_id);
if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
"Trying to use relinquished cookie\n"))
spin_unlock(&cookie->lock);
if (queue)
fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
- kleave("");
+ _leave("");
}
EXPORT_SYMBOL(__fscache_use_cookie);
enum fscache_cookie_state state;
bool wake = false;
- kenter("c=%x", cookie->debug_id);
+ _enter("c=%x", cookie->debug_id);
again:
spin_lock(&cookie->lock);
spin_unlock(&cookie->lock);
if (wake)
wake_up_cookie_state(cookie);
- kleave("");
+ _leave("");
}
static void fscache_cookie_worker(struct work_struct *work)
set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
spin_unlock(&cookie->lock);
fscache_stat(&fscache_n_cookies_lru_expired);
- kdebug("lru c=%x", cookie->debug_id);
+ _debug("lru c=%x", cookie->debug_id);
__fscache_withdraw_cookie(cookie);
}
if (retire)
fscache_stat(&fscache_n_relinquishes_retire);
- kenter("c=%08x{%d},%d",
+ _enter("c=%08x{%d},%d",
cookie->debug_id, atomic_read(&cookie->n_active), retire);
if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
{
bool is_caching;
- kenter("c=%x", cookie->debug_id);
+ _enter("c=%x", cookie->debug_id);
fscache_stat(&fscache_n_invalidates);
case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
default:
spin_unlock(&cookie->lock);
- kleave(" [no %u]", cookie->state);
+ _leave(" [no %u]", cookie->state);
return;
case FSCACHE_COOKIE_STATE_LOOKING_UP:
fallthrough;
case FSCACHE_COOKIE_STATE_CREATING:
spin_unlock(&cookie->lock);
- kleave(" [look %x]", cookie->inval_counter);
+ _leave(" [look %x]", cookie->inval_counter);
return;
case FSCACHE_COOKIE_STATE_ACTIVE:
if (is_caching)
fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
- kleave(" [inv]");
+ _leave(" [inv]");
return;
}
}
again:
if (!fscache_cache_is_live(cookie->volume->cache)) {
- kleave(" [broken]");
+ _leave(" [broken]");
return false;
}
state = fscache_cookie_state(cookie);
- kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
+ _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
switch (state) {
case FSCACHE_COOKIE_STATE_CREATING:
case FSCACHE_COOKIE_STATE_DROPPED:
case FSCACHE_COOKIE_STATE_RELINQUISHING:
default:
- kleave(" [not live]");
+ _leave(" [not live]");
return false;
}
spin_lock(&cookie->lock);
state = fscache_cookie_state(cookie);
- kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
+ _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
switch (state) {
case FSCACHE_COOKIE_STATE_LOOKING_UP:
cres->cache_priv = NULL;
cres->ops = NULL;
fscache_end_cookie_access(cookie, fscache_access_io_not_live);
- kleave(" = -ENOBUFS");
+ _leave(" = -ENOBUFS");
return -ENOBUFS;
}
if (len == 0)
goto abandon;
- kenter("%llx,%zx", start, len);
+ _enter("%llx,%zx", start, len);
wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
if (!wreq)
*/
void __exit fscache_exit(void)
{
- kenter("");
+ _enter("");
kmem_cache_destroy(fscache_cookie_jar);
fscache_proc_cleanup();
fscache_see_volume(volume, fscache_volume_new_acquire);
fscache_stat(&fscache_n_volumes);
up_write(&fscache_addremove_sem);
- kleave(" = v=%x", volume->debug_id);
+ _leave(" = v=%x", volume->debug_id);
return volume;
err_vol:
{
int n_accesses;
- kdebug("withdraw V=%x", volume->debug_id);
+ _debug("withdraw V=%x", volume->debug_id);
/* Allow wakeups on dec-to-0 */
n_accesses = atomic_dec_return(&volume->n_accesses);
/*
* main.c
*/
+extern unsigned int netfs_debug;
extern struct list_head netfs_io_requests;
extern spinlock_t netfs_proc_lock;
extern mempool_t netfs_request_pool;
* debug tracing
*/
#define dbgprintk(FMT, ...) \
- pr_debug("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
+ printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
+#ifdef __KDEBUG
+#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
+#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
+#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
+
+#elif defined(CONFIG_NETFS_DEBUG)
+#define _enter(FMT, ...) \
+do { \
+ if (netfs_debug) \
+ kenter(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _leave(FMT, ...) \
+do { \
+ if (netfs_debug) \
+ kleave(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _debug(FMT, ...) \
+do { \
+ if (netfs_debug) \
+ kdebug(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#else
+#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
+#endif
+
/*
* assertions
*/
if (count == remaining)
return;
- kdebug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
+ _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
rreq->debug_id, subreq->debug_index,
iov_iter_count(&subreq->io_iter), subreq->transferred,
subreq->len, rreq->i_size,
struct netfs_io_request *rreq = subreq->rreq;
int u;
- kenter("R=%x[%x]{%llx,%lx},%zd",
+ _enter("R=%x[%x]{%llx,%lx},%zd",
rreq->debug_id, subreq->debug_index,
subreq->start, subreq->flags, transferred_or_error);
struct netfs_inode *ictx = netfs_inode(rreq->inode);
size_t lsize;
- kenter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
+ _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
if (rreq->origin != NETFS_DIO_READ) {
source = netfs_cache_prepare_read(subreq, rreq->i_size);
subreq->start = rreq->start + rreq->submitted;
subreq->len = io_iter->count;
- kdebug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
+ _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
/* Call out to the cache to find out what it can do with the remaining
struct iov_iter io_iter;
int ret;
- kenter("R=%x %llx-%llx",
+ _enter("R=%x %llx-%llx",
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
if (rreq->len == 0) {
atomic_set(&rreq->nr_outstanding, 1);
io_iter = rreq->io_iter;
do {
- kdebug("submit %llx + %llx >= %llx",
+ _debug("submit %llx + %llx >= %llx",
rreq->start, rreq->submitted, rreq->i_size);
if (rreq->origin == NETFS_DIO_READ &&
rreq->start + rreq->submitted >= rreq->i_size)
EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
+unsigned netfs_debug;
+module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
+
static struct kmem_cache *netfs_request_slab;
static struct kmem_cache *netfs_subrequest_slab;
mempool_t netfs_request_pool;
struct fscache_cookie *cookie = netfs_i_cookie(ictx);
bool need_use = false;
- kenter("");
+ _enter("");
if (!filemap_dirty_folio(mapping, folio))
return false;
struct netfs_folio *finfo;
size_t flen = folio_size(folio);
- kenter("{%lx},%zx,%zx", folio->index, offset, length);
+ _enter("{%lx},%zx,%zx", folio->index, offset, length);
if (!folio_test_private(folio))
return;
{
struct list_head *next;
- kenter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
+ _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
if (list_empty(&stream->subrequests))
return;
unsigned int notes;
int s;
- kenter("%llx-%llx", wreq->start, wreq->start + wreq->len);
+ _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
trace_netfs_collect(wreq);
trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
front = stream->front;
while (front) {
trace_netfs_collect_sreq(wreq, front);
- //kdebug("sreq [%x] %llx %zx/%zx",
+ //_debug("sreq [%x] %llx %zx/%zx",
// front->debug_index, front->start, front->transferred, front->len);
/* Stall if there may be a discontinuity. */
out:
netfs_put_group_many(wreq->group, wreq->nr_group_rel);
wreq->nr_group_rel = 0;
- kleave(" = %x", notes);
+ _leave(" = %x", notes);
return;
need_retry:
* that any partially completed op will have had any wholly transferred
* folios removed from it.
*/
- kdebug("retry");
+ _debug("retry");
netfs_retry_writes(wreq);
goto out;
}
size_t transferred;
int s;
- kenter("R=%x", wreq->debug_id);
+ _enter("R=%x", wreq->debug_id);
netfs_see_request(wreq, netfs_rreq_trace_see_work);
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
if (wreq->origin == NETFS_DIO_WRITE)
inode_dio_end(wreq->inode);
- kdebug("finished");
+ _debug("finished");
trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
struct netfs_io_request *wreq = subreq->rreq;
struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
- kenter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
+ _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
switch (subreq->source) {
case NETFS_UPLOAD_TO_SERVER:
if (IS_ERR(wreq))
return wreq;
- kenter("R=%x", wreq->debug_id);
+ _enter("R=%x", wreq->debug_id);
ictx = netfs_inode(wreq->inode);
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
subreq->max_nr_segs = INT_MAX;
subreq->stream_nr = stream->stream_nr;
- kenter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
+ _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
refcount_read(&subreq->ref),
{
struct netfs_io_request *wreq = subreq->rreq;
- kenter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
+ _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
return netfs_write_subrequest_terminated(subreq, subreq->error, false);
size_t part;
if (!stream->avail) {
- kleave("no write");
+ _leave("no write");
return len;
}
- kenter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
+ _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
if (subreq && start != subreq->start + subreq->len) {
netfs_issue_write(wreq, stream);
subreq = stream->construct;
part = min(subreq->max_len - subreq->len, len);
- kdebug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
+ _debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
subreq->len += part;
subreq->nr_segs++;
bool to_eof = false, streamw = false;
bool debug = false;
- kenter("");
+ _enter("");
/* netfs_perform_write() may shift i_size around the page or from out
* of the page to beyond it, but cannot move i_size into or through the
if (fpos >= i_size) {
/* mmap beyond eof. */
- kdebug("beyond eof");
+ _debug("beyond eof");
folio_start_writeback(folio);
folio_unlock(folio);
wreq->nr_group_rel += netfs_folio_written_back(folio);
}
flen -= foff;
- kdebug("folio %zx %zx %zx", foff, flen, fsize);
+ _debug("folio %zx %zx %zx", foff, flen, fsize);
/* Deal with discontinuities in the stream of dirty pages. These can
* arise from a number of sources:
for (int s = 0; s < NR_IO_STREAMS; s++)
netfs_issue_write(wreq, &wreq->io_streams[s]);
- kleave(" = 0");
+ _leave(" = 0");
return 0;
}
netfs_stat(&netfs_n_wh_writepages);
do {
- kdebug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
+ _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
/* It appears we don't have to handle cyclic writeback wrapping. */
WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
mutex_unlock(&ictx->wb_lock);
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
- kleave(" = %d", error);
+ _leave(" = %d", error);
return error;
couldnt_start:
netfs_kill_dirty_pages(mapping, wbc, folio);
out:
mutex_unlock(&ictx->wb_lock);
- kleave(" = %d", error);
+ _leave(" = %d", error);
return error;
}
EXPORT_SYMBOL(netfs_writepages);
struct folio *folio, size_t copied, bool to_page_end,
struct folio **writethrough_cache)
{
- kenter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
+ _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
if (!*writethrough_cache) {
struct netfs_inode *ictx = netfs_inode(wreq->inode);
int ret;
- kenter("R=%x", wreq->debug_id);
+ _enter("R=%x", wreq->debug_id);
if (writethrough_cache)
netfs_write_folio(wreq, wbc, writethrough_cache);
loff_t start = wreq->start;
int error = 0;
- kenter("%zx", len);
+ _enter("%zx", len);
if (wreq->origin == NETFS_DIO_WRITE)
inode_dio_begin(wreq->inode);
while (len) {
// TODO: Prepare content encryption
- kdebug("unbuffered %zx", len);
+ _debug("unbuffered %zx", len);
part = netfs_advance_write(wreq, upload, start, len, false);
start += part;
len -= part;
if (list_empty(&upload->subrequests))
netfs_wake_write_collector(wreq, false);
- kleave(" = %d", error);
+ _leave(" = %d", error);
return error;
}