The mount structure and inode can be derived from the rtg.
Signed-off-by: Christoph Hellwig <hch@lst.de>
xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
- *pcur = xfs_rtrmapbt_init_cursor(tp->t_mountp, tp, rtg,
- rtg->rtg_inodes[XFS_RTGI_RMAP]);
+ *pcur = xfs_rtrmapbt_init_cursor(tp, rtg);
return 0;
}
xfs_rtrmapbt_dup_cursor(
struct xfs_btree_cur *cur)
{
- struct xfs_btree_cur *new;
-
- new = xfs_rtrmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
- to_rtg(cur->bc_group), cur->bc_ino.ip);
-
- return new;
+ return xfs_rtrmapbt_init_cursor(cur->bc_tp, to_rtg(cur->bc_group));
}
STATIC int
/* Allocate a new rt rmap btree cursor. */
struct xfs_btree_cur *
xfs_rtrmapbt_init_cursor(
- struct xfs_mount *mp,
struct xfs_trans *tp,
- struct xfs_rtgroup *rtg,
- struct xfs_inode *ip)
+ struct xfs_rtgroup *rtg)
{
+ struct xfs_inode *ip = rtg->rtg_inodes[XFS_RTGI_RMAP];
+ struct xfs_mount *mp = rtg_mount(rtg);
struct xfs_btree_cur *cur;
- struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
cur->bc_ino.ip = ip;
cur->bc_group = xfs_group_hold(&rtg->rtg_group);
cur->bc_ino.whichfork = XFS_DATA_FORK;
- cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
+ cur->bc_nlevels = be16_to_cpu(ip->i_df.if_broot->bb_level) + 1;
cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK);
return cur;
ASSERT(xfs_has_rtsb(mp));
ASSERT(rtg_rgno(rtg) == 0);
- cur = xfs_rtrmapbt_init_cursor(mp, tp, rtg,
- rtg->rtg_inodes[XFS_RTGI_RMAP]);
+ cur = xfs_rtrmapbt_init_cursor(tp, rtg);
error = xfs_rmap_map_raw(cur, &rmap);
xfs_btree_del_cursor(cur, error);
return error;
/* rmaps only exist on crc enabled filesystems */
#define XFS_RTRMAP_BLOCK_LEN XFS_BTREE_LBLOCK_CRC_LEN
-struct xfs_btree_cur *xfs_rtrmapbt_init_cursor(struct xfs_mount *mp,
- struct xfs_trans *tp, struct xfs_rtgroup *rtg,
- struct xfs_inode *ip);
+struct xfs_btree_cur *xfs_rtrmapbt_init_cursor(struct xfs_trans *tp,
+ struct xfs_rtgroup *rtg);
struct xfs_btree_cur *xfs_rtrmapbt_stage_cursor(struct xfs_mount *mp,
struct xfs_rtgroup *rtg, struct xfs_inode *ip,
struct xbtree_ifakeroot *ifake);
int error;
xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
- cur = xfs_rtrmapbt_init_cursor(sc->mp, sc->tp, rtg,
- rtg->rtg_inodes[XFS_RTGI_RMAP]);
-
+ cur = xfs_rtrmapbt_init_cursor(sc->tp, rtg);
sbcri.sc = sc;
sbcri.whichfork = XFS_DATA_FORK;
error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
sr->rtlock_flags = rtglock_flags;
if (xfs_has_rtrmapbt(sc->mp) && (rtglock_flags & XFS_RTGLOCK_RMAP))
- sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->mp, sc->tp,
- sr->rtg, sr->rtg->rtg_inodes[XFS_RTGI_RMAP]);
+ sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg);
if (xfs_has_rtreflink(sc->mp) && (rtglock_flags & XFS_RTGLOCK_REFCOUNT))
sr->refc_cur = xfs_rtrefcountbt_init_cursor(sc->mp, sc->tp,
ASSERT(0);
return -EFSCORRUPTED;
}
- cur = xfs_rtrmapbt_init_cursor(sc->mp, sc->tp, sc->sr.rtg,
- sc->ip);
+ cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg);
goto meta_btree;
case XFS_DINODE_FMT_REFCOUNT:
if (!sc->sr.rtg) {
* Determine if there are any other rmap records covering the first
* block of this extent. If so, the block is crosslinked.
*/
- cur = xfs_rtrmapbt_init_cursor(sc->mp, sc->tp, sc->sr.rtg,
- sc->sr.rtg->rtg_inodes[XFS_RTGI_RMAP]);
+ cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg);
error = xfs_rmap_has_other_keys(cur, rgbno, 1, rs->oinfo,
crosslinked);
if (error)
if (sc->sm->sm_type != XFS_SCRUB_TYPE_RTRMAPBT &&
(sr->rtlock_flags & XFS_RTGLOCK_RMAP) &&
xfs_has_rtrmapbt(mp))
- sr->rmap_cur = xfs_rtrmapbt_init_cursor(mp, sc->tp, sr->rtg,
- sr->rtg->rtg_inodes[XFS_RTGI_RMAP]);
+ sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg);
if (sc->sm->sm_type != XFS_SCRUB_TYPE_RTREFCBT &&
(sr->rtlock_flags & XFS_RTGLOCK_REFCOUNT) &&
while ((rtg = xfs_rtgroup_next(sc->mp, rtg))) {
if (ip == rtg->rtg_inodes[XFS_RTGI_RMAP]) {
- cur = xfs_rtrmapbt_init_cursor(sc->mp, sc->tp, rtg, ip);
+ cur = xfs_rtrmapbt_init_cursor(sc->tp, rtg);
error = xrep_rmap_scan_iroot_btree(rf, cur);
xfs_btree_del_cursor(cur, error);
xfs_rtgroup_rele(rtg);
rr->new_btree.bload.claim_block = xrep_rtrmap_claim_block;
rr->new_btree.bload.iroot_size = xrep_rtrmap_iroot_size;
- rmap_cur = xfs_rtrmapbt_init_cursor(sc->mp, NULL, rtg,
- rtg->rtg_inodes[XFS_RTGI_RMAP]);
+ rmap_cur = xfs_rtrmapbt_init_cursor(NULL, rtg);
xfs_btree_stage_ifakeroot(rmap_cur, &rr->new_btree.ifake);
/* Compute how many blocks we'll need for the rmaps collected. */
struct xfs_getfsmap_info *info,
struct xfs_btree_cur **curpp)
{
- struct xfs_mount *mp = tp->t_mountp;
struct xfs_rtgroup *rtg = to_rtg(info->group);
/* Report any gap at the end of the last rtgroup. */
/* Query the rtrmapbt */
xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP | XFS_RTGLOCK_REFCOUNT);
- *curpp = xfs_rtrmapbt_init_cursor(mp, tp, rtg,
- rtg->rtg_inodes[XFS_RTGI_RMAP]);
+ *curpp = xfs_rtrmapbt_init_cursor(tp, rtg);
return xfs_rmap_query_range(*curpp, &info->low, &info->high,
xfs_getfsmap_rtdev_rmapbt_helper, info);
}