#include "xfs_health.h"
#include "defer_item.h"
#include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
struct kmem_cache *xfs_rmap_intent_cache;
struct xfs_rmap_intent *ri,
struct xfs_btree_cur **pcur)
{
- /* coming in a subsequent patch */
- ASSERT(0);
- return -EFSCORRUPTED;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *rcur = *pcur;
+ xfs_rgblock_t bno;
+ bool unwritten;
+
+ trace_xfs_rmap_deferred(mp, ri);
+
+ if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE))
+ return -EIO;
+
+ /*
+ * If we haven't gotten a cursor or the cursor rtgroup doesn't match
+ * the startblock, get one now.
+ */
+ if (rcur != NULL && rcur->bc_ino.rtg != ri->ri_rtg) {
+ xfs_btree_del_cursor(rcur, 0);
+ rcur = NULL;
+ }
+ if (rcur == NULL) {
+ xfs_rtgroup_lock(ri->ri_rtg, XFS_RTGLOCK_RMAP);
+ xfs_rtgroup_trans_join(tp, ri->ri_rtg, XFS_RTGLOCK_RMAP);
+ *pcur = rcur = xfs_rtrmapbt_init_cursor(mp, tp, ri->ri_rtg,
+ ri->ri_rtg->rtg_inodes[XFS_RTG_RMAP]);
+ }
+
+ xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork,
+ ri->ri_bmap.br_startoff);
+ unwritten = ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN;
+ bno = xfs_rtb_to_rgbno(mp, ri->ri_bmap.br_startblock);
+
+ return __xfs_rmap_finish_intent(rcur, ri->ri_type, bno,
+ ri->ri_bmap.br_blockcount, &oinfo, unwritten);
}
/*
} else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
xfs_ilock(rtg->rtg_inodes[XFS_RTG_BITMAP], XFS_ILOCK_SHARED);
}
+
+ if ((rtglock_flags & XFS_RTGLOCK_RMAP) &&
+ rtg->rtg_inodes[XFS_RTG_RMAP] != NULL)
+ xfs_ilock(rtg->rtg_inodes[XFS_RTG_RMAP], XFS_ILOCK_EXCL);
}
/* Unlock metadata inodes associated with this rt group. */
ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
!(rtglock_flags & XFS_RTGLOCK_BITMAP));
+ if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg->rtg_inodes[XFS_RTG_RMAP])
+ xfs_iunlock(rtg->rtg_inodes[XFS_RTG_RMAP], XFS_ILOCK_EXCL);
+
if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
xfs_iunlock(rtg->rtg_inodes[XFS_RTG_SUMMARY], XFS_ILOCK_EXCL);
xfs_iunlock(rtg->rtg_inodes[XFS_RTG_BITMAP], XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTG_SUMMARY],
XFS_ILOCK_EXCL);
}
+
+ if ((rtglock_flags & XFS_RTGLOCK_RMAP) &&
+ rtg->rtg_inodes[XFS_RTG_RMAP] != NULL)
+ xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTG_RMAP],
+ XFS_ILOCK_EXCL);
}
/* Retrieve rt group geometry. */
#define XFS_RTGLOCK_BITMAP (1U << 0)
/* Lock the rt bitmap inode in shared mode */
#define XFS_RTGLOCK_BITMAP_SHARED (1U << 1)
+/* Lock the rt rmap inode in exclusive mode */
+#define XFS_RTGLOCK_RMAP (1U << 2)
#define XFS_RTGLOCK_ALL_FLAGS (XFS_RTGLOCK_BITMAP | \
- XFS_RTGLOCK_BITMAP_SHARED)
+ XFS_RTGLOCK_BITMAP_SHARED | \
+ XFS_RTGLOCK_RMAP)
void xfs_rtgroup_lock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
void xfs_rtgroup_unlock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);