]> www.infradead.org Git - users/hch/xfsprogs.git/commitdiff
xfs: wire up realtime refcount btree cursors
authorDarrick J. Wong <djwong@kernel.org>
Wed, 3 Jul 2024 21:22:29 +0000 (14:22 -0700)
committerDarrick J. Wong <djwong@kernel.org>
Tue, 9 Jul 2024 22:37:22 +0000 (15:37 -0700)
Wire up realtime refcount btree cursors wherever they're needed
throughout the code base.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
libxfs/xfs_btree.h
libxfs/xfs_refcount.c
libxfs/xfs_rtgroup.c
libxfs/xfs_rtgroup.h

index 68e13592d1f5208b1f088a0a877e61f20792cd1d..a74bc9c4c1eb1c1b57503337aafb5eeaf6338f43 100644 (file)
@@ -303,7 +303,7 @@ struct xfs_btree_cur
                struct {
                        unsigned int    nr_ops;         /* # record updates */
                        unsigned int    shape_changes;  /* # of extent splits */
-               } bc_refc;      /* refcountbt */
+               } bc_refc;      /* refcountbt/rtrefcountbt */
        };
 
        /* Must be at the end of the struct! */
index 0756652578dbc2abd1c2b07a2873a1b4970a607c..eeef68324f3119c4af7944f8d921b42bf2bcf5a6 100644 (file)
@@ -25,6 +25,7 @@
 #include "xfs_health.h"
 #include "defer_item.h"
 #include "xfs_rtgroup.h"
+#include "xfs_rtrefcount_btree.h"
 
 struct kmem_cache      *xfs_refcount_intent_cache;
 
@@ -1475,6 +1476,33 @@ xfs_refcount_finish_one(
        return error;
 }
 
+/*
+ * Set up a continuation a deferred rtrefcount operation by updating the
+ * intent.  Checks to make sure we're not going to run off the end of the
+ * rtgroup.
+ */
+static inline int
+xfs_rtrefcount_continue_op(
+       struct xfs_btree_cur            *cur,
+       struct xfs_refcount_intent      *ri,
+       xfs_agblock_t                   new_agbno)
+{
+       struct xfs_mount                *mp = cur->bc_mp;
+       struct xfs_rtgroup              *rtg = ri->ri_rtg;
+
+       if (XFS_IS_CORRUPT(mp, !xfs_verify_rgbext(rtg, new_agbno,
+                                       ri->ri_blockcount))) {
+               xfs_btree_mark_sick(cur);
+               return -EFSCORRUPTED;
+       }
+
+       ri->ri_startblock = xfs_rgbno_to_rtb(mp, rtg->rtg_rgno, new_agbno);
+
+       ASSERT(xfs_verify_rtbext(mp, ri->ri_startblock, ri->ri_blockcount));
+       ASSERT(rtg->rtg_rgno == xfs_rtb_to_rgno(mp, ri->ri_startblock));
+       return 0;
+}
+
 /*
  * Process one of the deferred realtime refcount operations.  We pass back the
  * btree cursor to maintain our lock on the btree between calls.
@@ -1485,8 +1513,77 @@ xfs_rtrefcount_finish_one(
        struct xfs_refcount_intent      *ri,
        struct xfs_btree_cur            **pcur)
 {
-       ASSERT(0);
-       return -EFSCORRUPTED;
+       struct xfs_mount                *mp = tp->t_mountp;
+       struct xfs_btree_cur            *rcur = *pcur;
+       int                             error = 0;
+       xfs_rgnumber_t                  rgno;
+       xfs_rgblock_t                   bno;
+       unsigned long                   nr_ops = 0;
+       int                             shape_changes = 0;
+
+       bno = xfs_rtb_to_rgbno(mp, ri->ri_startblock, &rgno);
+
+       trace_xfs_refcount_deferred(mp, ri);
+
+       if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE))
+               return -EIO;
+
+       /*
+        * If we haven't gotten a cursor or the cursor AG doesn't match
+        * the startblock, get one now.
+        */
+       if (rcur != NULL && rcur->bc_ino.rtg != ri->ri_rtg) {
+               nr_ops = rcur->bc_refc.nr_ops;
+               shape_changes = rcur->bc_refc.shape_changes;
+               xfs_btree_del_cursor(rcur, 0);
+               rcur = NULL;
+               *pcur = NULL;
+       }
+       if (rcur == NULL) {
+               xfs_rtgroup_lock(tp, ri->ri_rtg, XFS_RTGLOCK_REFCOUNT);
+               *pcur = rcur = xfs_rtrefcountbt_init_cursor(mp, tp, ri->ri_rtg,
+                                               ri->ri_rtg->rtg_refcountip);
+
+               rcur->bc_refc.nr_ops = nr_ops;
+               rcur->bc_refc.shape_changes = shape_changes;
+       }
+
+       switch (ri->ri_type) {
+       case XFS_REFCOUNT_INCREASE:
+               error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+                               XFS_REFCOUNT_ADJUST_INCREASE);
+               if (error)
+                       return error;
+               if (ri->ri_blockcount > 0)
+                       error = xfs_rtrefcount_continue_op(rcur, ri, bno);
+               break;
+       case XFS_REFCOUNT_DECREASE:
+               error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+                               XFS_REFCOUNT_ADJUST_DECREASE);
+               if (error)
+                       return error;
+               if (ri->ri_blockcount > 0)
+                       error = xfs_rtrefcount_continue_op(rcur, ri, bno);
+               break;
+       case XFS_REFCOUNT_ALLOC_COW:
+               error = __xfs_refcount_cow_alloc(rcur, bno, ri->ri_blockcount);
+               if (error)
+                       return error;
+               ri->ri_blockcount = 0;
+               break;
+       case XFS_REFCOUNT_FREE_COW:
+               error = __xfs_refcount_cow_free(rcur, bno, ri->ri_blockcount);
+               if (error)
+                       return error;
+               ri->ri_blockcount = 0;
+               break;
+       default:
+               ASSERT(0);
+               return -EFSCORRUPTED;
+       }
+       if (!error && ri->ri_blockcount > 0)
+               trace_xfs_refcount_finish_one_leftover(mp, ri);
+       return error;
 }
 
 /*
index fe4608fca92d593f8da5d10ac524fd5bf16fad21..3d880d9555510abcb2b6002e50d2eb639d397652 100644 (file)
@@ -442,6 +442,13 @@ xfs_rtgroup_lock(
                if (tp)
                        xfs_trans_ijoin(tp, rtg->rtg_rmapip, XFS_ILOCK_EXCL);
        }
+
+       if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg->rtg_refcountip) {
+               xfs_ilock(rtg->rtg_refcountip, XFS_ILOCK_EXCL);
+               if (tp)
+                       xfs_trans_ijoin(tp, rtg->rtg_refcountip,
+                                       XFS_ILOCK_EXCL);
+       }
 }
 
 /* Unlock metadata inodes associated with this rt group. */
@@ -454,6 +461,9 @@ xfs_rtgroup_unlock(
        ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
               !(rtglock_flags & XFS_RTGLOCK_BITMAP));
 
+       if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg->rtg_refcountip)
+               xfs_iunlock(rtg->rtg_refcountip, XFS_ILOCK_EXCL);
+
        if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg->rtg_rmapip)
                xfs_iunlock(rtg->rtg_rmapip, XFS_ILOCK_EXCL);
 
index 21a8251843bc9f995d3a9705796249f2b8d60a02..7cae4458589b4609d579047df2d4fbcd5c02043e 100644 (file)
@@ -253,10 +253,13 @@ struct xfs_buf *xfs_rtgroup_log_super(struct xfs_trans *tp,
 #define XFS_RTGLOCK_BITMAP_SHARED      (1U << 1)
 /* Lock the rt rmap inode in exclusive mode */
 #define XFS_RTGLOCK_RMAP               (1U << 2)
+/* Lock the rt refcount inode in exclusive mode */
+#define XFS_RTGLOCK_REFCOUNT           (1U << 3)
 
 #define XFS_RTGLOCK_ALL_FLAGS  (XFS_RTGLOCK_BITMAP | \
                                 XFS_RTGLOCK_BITMAP_SHARED | \
-                                XFS_RTGLOCK_RMAP)
+                                XFS_RTGLOCK_RMAP | \
+                                XFS_RTGLOCK_REFCOUNT)
 
 void xfs_rtgroup_lock(struct xfs_trans *tp, struct xfs_rtgroup *rtg,
                unsigned int rtglock_flags);