From cc1bdf502e97cfeeb78db727e04051d0b0a05a23 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Mon, 23 Sep 2024 13:42:32 -0700 Subject: [PATCH] xfs: wire up realtime refcount btree cursors Wire up realtime refcount btree cursors wherever they're needed throughout the code base. Signed-off-by: Darrick J. Wong --- libxfs/xfs_btree.h | 2 +- libxfs/xfs_refcount.c | 100 +++++++++++++++++++++++++++++++++- libxfs/xfs_rtgroup.c | 13 +++++ libxfs/xfs_rtgroup.h | 5 +- libxfs/xfs_rtrefcount_btree.c | 4 +- 5 files changed, 117 insertions(+), 7 deletions(-) diff --git a/libxfs/xfs_btree.h b/libxfs/xfs_btree.h index 99624cd8b..e14ffd514 100644 --- a/libxfs/xfs_btree.h +++ b/libxfs/xfs_btree.h @@ -297,7 +297,7 @@ struct xfs_btree_cur struct { unsigned int nr_ops; /* # record updates */ unsigned int shape_changes; /* # of extent splits */ - } bc_refc; /* refcountbt */ + } bc_refc; /* refcountbt/rtrefcountbt */ }; /* Must be at the end of the struct! */ diff --git a/libxfs/xfs_refcount.c b/libxfs/xfs_refcount.c index 408000123..e57b5733a 100644 --- a/libxfs/xfs_refcount.c +++ b/libxfs/xfs_refcount.c @@ -25,6 +25,7 @@ #include "xfs_health.h" #include "defer_item.h" #include "xfs_rtgroup.h" +#include "xfs_rtrefcount_btree.h" struct kmem_cache *xfs_refcount_intent_cache; @@ -1460,6 +1461,32 @@ xfs_refcount_finish_one( return error; } +/* + * Set up a continuation a deferred rtrefcount operation by updating the + * intent. Checks to make sure we're not going to run off the end of the + * rtgroup. + */ +static inline int +xfs_rtrefcount_continue_op( + struct xfs_btree_cur *cur, + struct xfs_refcount_intent *ri, + xfs_agblock_t new_agbno) +{ + struct xfs_mount *mp = cur->bc_mp; + struct xfs_rtgroup *rtg = to_rtg(ri->ri_group); + + if (XFS_IS_CORRUPT(mp, !xfs_verify_rgbext(rtg, new_agbno, + ri->ri_blockcount))) { + xfs_btree_mark_sick(cur); + return -EFSCORRUPTED; + } + + ri->ri_startblock = xfs_rgbno_to_rtb(rtg, new_agbno); + + ASSERT(xfs_verify_rtbext(mp, ri->ri_startblock, ri->ri_blockcount)); + return 0; +} + /* * Process one of the deferred realtime refcount operations. We pass back the * btree cursor to maintain our lock on the btree between calls. @@ -1470,8 +1497,77 @@ xfs_rtrefcount_finish_one( struct xfs_refcount_intent *ri, struct xfs_btree_cur **pcur) { - ASSERT(0); - return -EFSCORRUPTED; + struct xfs_mount *mp = tp->t_mountp; + struct xfs_rtgroup *rtg = to_rtg(ri->ri_group); + struct xfs_btree_cur *rcur = *pcur; + int error = 0; + xfs_rgblock_t bno; + unsigned long nr_ops = 0; + int shape_changes = 0; + + bno = xfs_rtb_to_rgbno(mp, ri->ri_startblock); + + trace_xfs_refcount_deferred(mp, ri); + + if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE)) + return -EIO; + + /* + * If we haven't gotten a cursor or the cursor AG doesn't match + * the startblock, get one now. + */ + if (rcur != NULL && rcur->bc_group != ri->ri_group) { + nr_ops = rcur->bc_refc.nr_ops; + shape_changes = rcur->bc_refc.shape_changes; + xfs_btree_del_cursor(rcur, 0); + rcur = NULL; + *pcur = NULL; + } + if (rcur == NULL) { + xfs_rtgroup_lock(rtg, XFS_RTGLOCK_REFCOUNT); + xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_REFCOUNT); + *pcur = rcur = xfs_rtrefcountbt_init_cursor(tp, rtg); + + rcur->bc_refc.nr_ops = nr_ops; + rcur->bc_refc.shape_changes = shape_changes; + } + + switch (ri->ri_type) { + case XFS_REFCOUNT_INCREASE: + error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount, + XFS_REFCOUNT_ADJUST_INCREASE); + if (error) + return error; + if (ri->ri_blockcount > 0) + error = xfs_rtrefcount_continue_op(rcur, ri, bno); + break; + case XFS_REFCOUNT_DECREASE: + error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount, + XFS_REFCOUNT_ADJUST_DECREASE); + if (error) + return error; + if (ri->ri_blockcount > 0) + error = xfs_rtrefcount_continue_op(rcur, ri, bno); + break; + case XFS_REFCOUNT_ALLOC_COW: + error = __xfs_refcount_cow_alloc(rcur, bno, ri->ri_blockcount); + if (error) + return error; + ri->ri_blockcount = 0; + break; + case XFS_REFCOUNT_FREE_COW: + error = __xfs_refcount_cow_free(rcur, bno, ri->ri_blockcount); + if (error) + return error; + ri->ri_blockcount = 0; + break; + default: + ASSERT(0); + return -EFSCORRUPTED; + } + if (!error && ri->ri_blockcount > 0) + trace_xfs_refcount_finish_one_leftover(mp, ri); + return error; } /* diff --git a/libxfs/xfs_rtgroup.c b/libxfs/xfs_rtgroup.c index 1d68670b9..95b15586d 100644 --- a/libxfs/xfs_rtgroup.c +++ b/libxfs/xfs_rtgroup.c @@ -181,6 +181,10 @@ xfs_rtgroup_lock( if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg->rtg_inodes[XFS_RTGI_RMAP] != NULL) xfs_ilock(rtg->rtg_inodes[XFS_RTGI_RMAP], XFS_ILOCK_EXCL); + + if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && + rtg->rtg_inodes[XFS_RTGI_REFCOUNT] != NULL) + xfs_ilock(rtg->rtg_inodes[XFS_RTGI_REFCOUNT], XFS_ILOCK_EXCL); } /* Unlock metadata inodes associated with this rt group. */ @@ -193,6 +197,10 @@ xfs_rtgroup_unlock( ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) || !(rtglock_flags & XFS_RTGLOCK_BITMAP)); + if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && + rtg->rtg_inodes[XFS_RTGI_REFCOUNT]) + xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_REFCOUNT], XFS_ILOCK_EXCL); + if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg->rtg_inodes[XFS_RTGI_RMAP]) xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_RMAP], XFS_ILOCK_EXCL); @@ -228,6 +236,11 @@ xfs_rtgroup_trans_join( rtg->rtg_inodes[XFS_RTGI_RMAP] != NULL) xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_RMAP], XFS_ILOCK_EXCL); + + if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && + rtg->rtg_inodes[XFS_RTGI_REFCOUNT] != NULL) + xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_REFCOUNT], + XFS_ILOCK_EXCL); } /* Retrieve rt group geometry. */ diff --git a/libxfs/xfs_rtgroup.h b/libxfs/xfs_rtgroup.h index a19f3637e..8c146fcf8 100644 --- a/libxfs/xfs_rtgroup.h +++ b/libxfs/xfs_rtgroup.h @@ -269,10 +269,13 @@ void xfs_rtgroup_calc_geometry(struct xfs_mount *mp, struct xfs_rtgroup *rtg, #define XFS_RTGLOCK_BITMAP_SHARED (1U << 1) /* Lock the rt rmap inode in exclusive mode */ #define XFS_RTGLOCK_RMAP (1U << 2) +/* Lock the rt refcount inode in exclusive mode */ +#define XFS_RTGLOCK_REFCOUNT (1U << 3) #define XFS_RTGLOCK_ALL_FLAGS (XFS_RTGLOCK_BITMAP | \ XFS_RTGLOCK_BITMAP_SHARED | \ - XFS_RTGLOCK_RMAP) + XFS_RTGLOCK_RMAP | \ + XFS_RTGLOCK_REFCOUNT) void xfs_rtgroup_lock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags); void xfs_rtgroup_unlock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags); diff --git a/libxfs/xfs_rtrefcount_btree.c b/libxfs/xfs_rtrefcount_btree.c index 0acd91a9e..835077f9f 100644 --- a/libxfs/xfs_rtrefcount_btree.c +++ b/libxfs/xfs_rtrefcount_btree.c @@ -401,12 +401,10 @@ xfs_rtrefcountbt_init_cursor( struct xfs_trans *tp, struct xfs_rtgroup *rtg) { - struct xfs_inode *ip = NULL; + struct xfs_inode *ip = rtg->rtg_inodes[XFS_RTGI_REFCOUNT]; struct xfs_mount *mp = rtg_mount(rtg); struct xfs_btree_cur *cur; - return NULL; /* XXX */ - xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL); cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrefcountbt_ops, -- 2.50.1