(*record_cnt)++;
}
-/*
- * Write log vectors into a single iclog which is guaranteed by the caller
- * to have enough space to write the entire log vector into.
- */
-static void
-xlog_write_full(
- struct xfs_log_vec *lv,
- struct xlog_ticket *ticket,
- struct xlog_in_core *iclog,
- uint32_t *log_offset,
- uint32_t *len,
- uint32_t *record_cnt,
- uint32_t *data_cnt)
-{
- int index;
-
- ASSERT(*log_offset + *len <= iclog->ic_size ||
- iclog->ic_state == XLOG_STATE_WANT_SYNC);
-
- /*
- * Ordered log vectors have no regions to write so this
- * loop will naturally skip them.
- */
- for (index = 0; index < lv->lv_niovecs; index++) {
- xlog_write_region(ticket, iclog, log_offset,
- &lv->lv_iovecp[index], len, record_cnt,
- data_cnt);
- ASSERT(lv->lv_iovecp[index].i_len == 0);
- }
-}
-
static int
xlog_write_get_more_iclog_space(
struct xlog_ticket *ticket,
* wholly fit in the iclog.
*/
static int
-xlog_write_partial(
+xlog_write_vec(
struct xfs_log_vec *lv,
struct xlog_ticket *ticket,
struct xlog_in_core **iclogp,
xlog_cil_set_ctx_write_state(ctx, iclog);
list_for_each_entry(lv, lv_chain, lv_list) {
- /*
- * If the entire log vec does not fit in the iclog, punt it to
- * the partial copy loop which can handle this case.
- */
- if (lv->lv_niovecs &&
- lv->lv_bytes > iclog->ic_size - log_offset) {
- error = xlog_write_partial(lv, ticket, &iclog,
- &log_offset, &len, &record_cnt,
- &data_cnt);
- if (error) {
- /*
- * We have no iclog to release, so just return
- * the error immediately.
- */
- return error;
- }
- } else {
- xlog_write_full(lv, ticket, iclog, &log_offset,
- &len, &record_cnt, &data_cnt);
- }
+ error = xlog_write_vec(lv, ticket, &iclog, &log_offset, &len,
+ &record_cnt, &data_cnt);
+ if (error)
+ return error;
}
ASSERT(len == 0);