struct xfs_trans *tp,
enum xfs_rmap_intent_type type,
uint64_t owner,
+ bool isrt,
int whichfork,
struct xfs_bmbt_irec *bmap)
{
ri->ri_owner = owner;
ri->ri_whichfork = whichfork;
ri->ri_bmap = *bmap;
+ ri->ri_realtime = isrt;
xfs_rmap_defer_add(tp, ri);
}
struct xfs_bmbt_irec *PREV)
{
enum xfs_rmap_intent_type type = XFS_RMAP_MAP;
+ bool isrt = xfs_ifork_is_realtime(ip, whichfork);
if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
return;
if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
type = XFS_RMAP_MAP_SHARED;
- __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
+ __xfs_rmap_add(tp, type, ip->i_ino, isrt, whichfork, PREV);
}
/* Unmap an extent out of a file. */
struct xfs_bmbt_irec *PREV)
{
enum xfs_rmap_intent_type type = XFS_RMAP_UNMAP;
+ bool isrt = xfs_ifork_is_realtime(ip, whichfork);
if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
return;
if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
type = XFS_RMAP_UNMAP_SHARED;
- __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
+ __xfs_rmap_add(tp, type, ip->i_ino, isrt, whichfork, PREV);
}
/*
struct xfs_bmbt_irec *PREV)
{
enum xfs_rmap_intent_type type = XFS_RMAP_CONVERT;
+ bool isrt = xfs_ifork_is_realtime(ip, whichfork);
if (!xfs_rmap_update_is_needed(mp, whichfork))
return;
if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
type = XFS_RMAP_CONVERT_SHARED;
- __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
+ __xfs_rmap_add(tp, type, ip->i_ino, isrt, whichfork, PREV);
}
/* Schedule the creation of an rmap for non-file data. */
void
xfs_rmap_alloc_extent(
struct xfs_trans *tp,
+ bool isrt,
xfs_fsblock_t fsbno,
xfs_extlen_t len,
uint64_t owner)
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
- __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap);
+ __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, isrt, XFS_DATA_FORK, &bmap);
}
/* Schedule the deletion of an rmap for non-file data. */
void
xfs_rmap_free_extent(
struct xfs_trans *tp,
+ bool isrt,
xfs_fsblock_t fsbno,
xfs_extlen_t len,
uint64_t owner)
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
- __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap);
+ __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, isrt, XFS_DATA_FORK, &bmap);
}
/* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */
#include "xfs_ag.h"
#include "xfs_btree.h"
#include "xfs_trace.h"
+#include "xfs_rtgroup.h"
struct kmem_cache *xfs_rui_cache;
struct kmem_cache *xfs_rud_cache;
ASSERT(atomic_read(&ruip->rui_next_extent) ==
ruip->rui_format.rui_nextents);
- ruip->rui_format.rui_type = XFS_LI_RUI;
+ ASSERT(lip->li_type == XFS_LI_RUI || lip->li_type == XFS_LI_RUI_RT);
+
+ ruip->rui_format.rui_type = lip->li_type;
ruip->rui_format.rui_size = 1;
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
STATIC struct xfs_rui_log_item *
xfs_rui_init(
struct xfs_mount *mp,
+ unsigned short item_type,
uint nextents)
{
struct xfs_rui_log_item *ruip;
ASSERT(nextents > 0);
+ ASSERT(item_type == XFS_LI_RUI || item_type == XFS_LI_RUI_RT);
+
if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
ruip = kzalloc(xfs_rui_log_item_sizeof(nextents),
GFP_KERNEL | __GFP_NOFAIL);
ruip = kmem_cache_zalloc(xfs_rui_cache,
GFP_KERNEL | __GFP_NOFAIL);
- xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
+ xfs_log_item_init(mp, &ruip->rui_item, item_type, &xfs_rui_item_ops);
ruip->rui_format.rui_nextents = nextents;
ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
atomic_set(&ruip->rui_next_extent, 0);
struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
struct xfs_log_iovec *vecp = NULL;
- rudp->rud_format.rud_type = XFS_LI_RUD;
+ ASSERT(lip->li_type == XFS_LI_RUD || lip->li_type == XFS_LI_RUD_RT);
+
+ rudp->rud_format.rud_type = lip->li_type;
rudp->rud_format.rud_size = 1;
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
return list_entry(e, struct xfs_rmap_intent, ri_list);
}
+static inline bool
+xfs_rui_item_isrt(const struct xfs_log_item *lip)
+{
+ ASSERT(lip->li_type == XFS_LI_RUI || lip->li_type == XFS_LI_RUI_RT);
+
+ return lip->li_type == XFS_LI_RUI_RT;
+}
+
/* Sort rmap intents by AG. */
static int
xfs_rmap_update_diff_items(
}
static struct xfs_log_item *
-xfs_rmap_update_create_intent(
+__xfs_rmap_update_create_intent(
struct xfs_trans *tp,
struct list_head *items,
unsigned int count,
- bool sort)
+ bool sort,
+ unsigned short item_type)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count);
+ struct xfs_rui_log_item *ruip;
struct xfs_rmap_intent *ri;
ASSERT(count > 0);
+ ruip = xfs_rui_init(mp, item_type, count);
if (sort)
list_sort(mp, items, xfs_rmap_update_diff_items);
list_for_each_entry(ri, items, ri_list)
return &ruip->rui_item;
}
+static struct xfs_log_item *
+xfs_rmap_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ return __xfs_rmap_update_create_intent(tp, items, count, sort,
+ XFS_LI_RUI);
+}
+
+static inline unsigned short
+xfs_rud_type_from_rui(const struct xfs_rui_log_item *ruip)
+{
+ return xfs_rui_item_isrt(&ruip->rui_item) ? XFS_LI_RUD_RT : XFS_LI_RUD;
+}
+
/* Get an RUD so we can process all the deferred rmap updates. */
static struct xfs_log_item *
xfs_rmap_update_create_done(
struct xfs_rud_log_item *rudp;
rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
- xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
- &xfs_rud_item_ops);
+ xfs_log_item_init(tp->t_mountp, &rudp->rud_item,
+ xfs_rud_type_from_rui(ruip), &xfs_rud_item_ops);
rudp->rud_ruip = ruip;
rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
trace_xfs_rmap_defer(mp, ri);
+ /*
+ * Deferred rmap updates for the realtime and data sections must use
+ * separate transactions to finish deferred work because updates to
+ * realtime metadata files can lock AGFs to allocate btree blocks and
+ * we don't want that mixing with the AGF locks taken to finish data
+ * section updates.
+ */
ri->ri_group = xfs_group_intent_get(mp, ri->ri_bmap.br_startblock,
- XG_TYPE_AG);
- xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type);
+ ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG);
+ xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ?
+ &xfs_rtrmap_update_defer_type :
+ &xfs_rmap_update_defer_type);
}
/* Cancel a deferred rmap update. */
struct xfs_map_extent *map;
unsigned int count;
+ ASSERT(intent->li_type == XFS_LI_RUI || intent->li_type == XFS_LI_RUI_RT);
+
count = RUI_ITEM(intent)->rui_format.rui_nextents;
map = RUI_ITEM(intent)->rui_format.rui_extents;
- ruip = xfs_rui_init(tp->t_mountp, count);
+ ruip = xfs_rui_init(tp->t_mountp, intent->li_type, count);
memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map));
atomic_set(&ruip->rui_next_extent, count);
.relog_intent = xfs_rmap_relog_intent,
};
+#ifdef CONFIG_XFS_RT
+static struct xfs_log_item *
+xfs_rtrmap_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ return __xfs_rmap_update_create_intent(tp, items, count, sort,
+ XFS_LI_RUI_RT);
+}
+
+/* Clean up after calling xfs_rmap_finish_one. */
+STATIC void
+xfs_rtrmap_finish_one_cleanup(
+ struct xfs_trans *tp,
+ struct xfs_btree_cur *rcur,
+ int error)
+{
+ if (rcur)
+ xfs_btree_del_cursor(rcur, error);
+}
+
+const struct xfs_defer_op_type xfs_rtrmap_update_defer_type = {
+ .name = "rtrmap",
+ .max_items = XFS_RUI_MAX_FAST_EXTENTS,
+ .create_intent = xfs_rtrmap_update_create_intent,
+ .abort_intent = xfs_rmap_update_abort_intent,
+ .create_done = xfs_rmap_update_create_done,
+ .finish_item = xfs_rmap_update_finish_item,
+ .finish_cleanup = xfs_rtrmap_finish_one_cleanup,
+ .cancel_item = xfs_rmap_update_cancel_item,
+ .recover_work = xfs_rmap_recover_work,
+ .relog_intent = xfs_rmap_relog_intent,
+};
+#else
+const struct xfs_defer_op_type xfs_rtrmap_update_defer_type = {
+ .name = "rtrmap",
+};
+#endif
+
STATIC bool
xfs_rui_item_match(
struct xfs_log_item *lip,
return -EFSCORRUPTED;
}
- ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
+ ruip = xfs_rui_init(mp, ITEM_TYPE(item), rui_formatp->rui_nextents);
xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
.commit_pass2 = xlog_recover_rui_commit_pass2,
};
+#ifdef CONFIG_XFS_RT
+STATIC int
+xlog_recover_rtrui_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_rui_log_item *ruip;
+ struct xfs_rui_log_format *rui_formatp;
+ size_t len;
+
+ rui_formatp = item->ri_buf[0].i_addr;
+
+ if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+ len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents);
+ if (item->ri_buf[0].i_len != len) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+ ruip = xfs_rui_init(mp, ITEM_TYPE(item), rui_formatp->rui_nextents);
+ xfs_rui_copy_format(&ruip->rui_format, rui_formatp);
+ atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
+
+ xlog_recover_intent_item(log, &ruip->rui_item, lsn,
+ &xfs_rtrmap_update_defer_type);
+ return 0;
+}
+#else
+STATIC int
+xlog_recover_rtrui_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
+ item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+}
+#endif
+
+const struct xlog_recover_item_ops xlog_rtrui_item_ops = {
+ .item_type = XFS_LI_RUI_RT,
+ .commit_pass2 = xlog_recover_rtrui_commit_pass2,
+};
+
/*
* This routine is called when an RUD format structure is found in a committed
* transaction in the log. Its purpose is to cancel the corresponding RUI if it
.item_type = XFS_LI_RUD,
.commit_pass2 = xlog_recover_rud_commit_pass2,
};
+
+#ifdef CONFIG_XFS_RT
+STATIC int
+xlog_recover_rtrud_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_rud_log_format *rud_formatp;
+
+ rud_formatp = item->ri_buf[0].i_addr;
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
+ rud_formatp, item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+ xlog_recover_release_intent(log, XFS_LI_RUI_RT,
+ rud_formatp->rud_rui_id);
+ return 0;
+}
+#else
+# define xlog_recover_rtrud_commit_pass2 xlog_recover_rtrui_commit_pass2
+#endif
+
+const struct xlog_recover_item_ops xlog_rtrud_item_ops = {
+ .item_type = XFS_LI_RUD_RT,
+ .commit_pass2 = xlog_recover_rtrud_commit_pass2,
+};