return free_bytes;
}
-
static void
-xlog_ioend_work(
- struct work_struct *work)
+__xlog_ioend(
+ struct xlog_in_core *iclog,
+ int error)
{
- struct xlog_in_core *iclog =
- container_of(work, struct xlog_in_core, ic_end_io_work);
struct xlog *log = iclog->ic_log;
bool aborted = false;
- int error;
- error = blk_status_to_errno(iclog->ic_bio.bi_status);
#ifdef DEBUG
/* treat writes with injected CRC errors as failed */
if (iclog->ic_fail_crc)
}
xlog_state_done_syncing(iclog, aborted);
+}
+
+static void
+xlog_ioend_work(
+ struct work_struct *work)
+{
+ struct xlog_in_core *iclog =
+ container_of(work, struct xlog_in_core, ic_end_io_work);
+
+ __xlog_ioend(iclog, blk_status_to_errno(iclog->ic_bio.bi_status));
bio_uninit(&iclog->ic_bio);
/*
log->l_flags |= XLOG_ACTIVE_RECOVERY;
INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
+ /* try to setup direct DAX access to the log if possible */
+ if (IS_ENABLED(CONFIG_FS_DAX) && log->l_targ->bt_daxdev) {
+ if (bdev_dax_pgoff(log->l_targ->bt_bdev, blk_offset,
+ BBTOB(num_bblks), &log->l_dax_offset)) {
+ /* give up if the log is not suitably aligned */
+ fs_put_dax(log->l_targ->bt_daxdev);
+ log->l_targ->bt_daxdev = NULL;
+ }
+ }
+
log->l_prev_block = -1;
/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
submit_bio(&iclog->ic_bio);
}
+static int
+xlog_copy_to_dax(
+ struct xlog *log,
+ uint64_t offset,
+ void *data,
+ unsigned int count)
+{
+ uint64_t log_size = BBTOB(log->l_logBBsize);
+ long nr_pages = howmany(log_size, PAGE_SIZE);
+ int id, ret = -EIO;
+ void *pmem_addr;
+
+ /*
+ * XXX: do we need to do anything about badblocks or clearing poison?
+ */
+ id = dax_read_lock();
+ if (likely(dax_direct_access(log->l_targ->bt_daxdev, log->l_dax_offset,
+ nr_pages, &pmem_addr, NULL) == nr_pages)) {
+ memcpy_flushcache(pmem_addr + offset, data, count);
+ ret = 0;
+ }
+ dax_read_unlock(id);
+ return ret;
+}
+
+static void
+xlog_write_iclog_dax(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ uint64_t offset,
+ unsigned int count)
+{
+ uint64_t log_size = BBTOB(log->l_logBBsize);
+ void *data = iclog->ic_data;
+ int error = -EIO;
+
+ if (unlikely(iclog->ic_state == XLOG_STATE_IOERROR))
+ goto out;
+
+ xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
+ if (offset + count > log_size) {
+ unsigned int split = log_size - offset;
+
+ if (xlog_copy_to_dax(log, offset, data, split) ||
+ xlog_copy_to_dax(log, 0, data + split, count - split))
+ goto out;
+ } else {
+ if (xlog_copy_to_dax(log, offset, data, count))
+ goto out;
+ }
+ xfs_blkdev_issue_flush(log->l_targ);
+ error = 0;
+out:
+ __xlog_ioend(iclog, error);
+}
+
/*
* We need to bump cycle number for the part of the iclog that is
* written to the start of the log. Watch out for the header magic
}
#endif
+ xlog_verify_iclog(log, iclog, count);
+
+ if (IS_ENABLED(CONFIG_FS_DAX) && log->l_targ->bt_daxdev) {
+ xlog_write_iclog_dax(log, iclog, BBTOB(bno), count);
+ return;
+ }
+
/*
* Flush the data device before flushing the log to make sure all meta
* data written back from the AIL actually made it to disk before
need_flush = false;
}
- xlog_verify_iclog(log, iclog, count);
xlog_write_iclog(log, iclog, bno, count, need_flush);
}