]> www.infradead.org Git - users/hch/xfs.git/commitdiff
xfs: wire up realtime refcount btree cursors
authorDarrick J. Wong <djwong@kernel.org>
Fri, 9 Aug 2024 13:11:07 +0000 (15:11 +0200)
committerChristoph Hellwig <hch@lst.de>
Mon, 12 Aug 2024 11:53:07 +0000 (13:53 +0200)
Wire up realtime refcount btree cursors wherever they're needed
throughout the code base.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
fs/xfs/libxfs/xfs_btree.h
fs/xfs/libxfs/xfs_refcount.c
fs/xfs/libxfs/xfs_rtgroup.c
fs/xfs/libxfs/xfs_rtgroup.h
fs/xfs/xfs_fsmap.c
fs/xfs/xfs_reflink.c

index 1bb526164081e5de5766b151ce027e7cede3a806..7c39eb9906056ca02e61238bbf3fc7f673ac8091 100644 (file)
@@ -288,7 +288,7 @@ struct xfs_btree_cur
                struct {
                        unsigned int    nr_ops;         /* # record updates */
                        unsigned int    shape_changes;  /* # of extent splits */
-               } bc_refc;      /* refcountbt */
+               } bc_refc;      /* refcountbt/rtrefcountbt */
        };
 
        /* Must be at the end of the struct! */
index 982837c852ead3e089617097361979be725400bf..54926af1766b2e1def4e85999867f2f0e10a7aa3 100644 (file)
@@ -27,6 +27,7 @@
 #include "xfs_refcount_item.h"
 #include "xfs_rtgroup.h"
 #include "xfs_rtalloc.h"
+#include "xfs_rtrefcount_btree.h"
 
 struct kmem_cache      *xfs_refcount_intent_cache;
 
@@ -1477,6 +1478,33 @@ xfs_refcount_finish_one(
        return error;
 }
 
+/*
+ * Set up a continuation a deferred rtrefcount operation by updating the
+ * intent.  Checks to make sure we're not going to run off the end of the
+ * rtgroup.
+ */
+static inline int
+xfs_rtrefcount_continue_op(
+       struct xfs_btree_cur            *cur,
+       struct xfs_refcount_intent      *ri,
+       xfs_agblock_t                   new_agbno)
+{
+       struct xfs_mount                *mp = cur->bc_mp;
+       struct xfs_rtgroup              *rtg = ri->ri_rtg;
+
+       if (XFS_IS_CORRUPT(mp, !xfs_verify_rgbext(rtg, new_agbno,
+                                       ri->ri_blockcount))) {
+               xfs_btree_mark_sick(cur);
+               return -EFSCORRUPTED;
+       }
+
+       ri->ri_startblock = xfs_rgbno_to_rtb(mp, rtg->rtg_rgno, new_agbno);
+
+       ASSERT(xfs_verify_rtbext(mp, ri->ri_startblock, ri->ri_blockcount));
+       ASSERT(rtg->rtg_rgno == xfs_rtb_to_rgno(mp, ri->ri_startblock));
+       return 0;
+}
+
 /*
  * Process one of the deferred realtime refcount operations.  We pass back the
  * btree cursor to maintain our lock on the btree between calls.
@@ -1487,8 +1515,77 @@ xfs_rtrefcount_finish_one(
        struct xfs_refcount_intent      *ri,
        struct xfs_btree_cur            **pcur)
 {
-       ASSERT(0);
-       return -EFSCORRUPTED;
+       struct xfs_mount                *mp = tp->t_mountp;
+       struct xfs_btree_cur            *rcur = *pcur;
+       int                             error = 0;
+       xfs_rgblock_t                   bno;
+       unsigned long                   nr_ops = 0;
+       int                             shape_changes = 0;
+
+       bno = xfs_rtb_to_rgbno(mp, ri->ri_startblock);
+
+       trace_xfs_refcount_deferred(mp, ri);
+
+       if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE))
+               return -EIO;
+
+       /*
+        * If we haven't gotten a cursor or the cursor AG doesn't match
+        * the startblock, get one now.
+        */
+       if (rcur != NULL && rcur->bc_ino.rtg != ri->ri_rtg) {
+               nr_ops = rcur->bc_refc.nr_ops;
+               shape_changes = rcur->bc_refc.shape_changes;
+               xfs_btree_del_cursor(rcur, 0);
+               rcur = NULL;
+               *pcur = NULL;
+       }
+       if (rcur == NULL) {
+               xfs_rtgroup_lock(ri->ri_rtg, XFS_RTGLOCK_REFCOUNT);
+               xfs_rtgroup_trans_join(tp, ri->ri_rtg, XFS_RTGLOCK_REFCOUNT);
+               *pcur = rcur = xfs_rtrefcountbt_init_cursor(mp, tp, ri->ri_rtg,
+                               ri->ri_rtg->rtg_inodes[XFS_RTG_REFCOUNT]);
+
+               rcur->bc_refc.nr_ops = nr_ops;
+               rcur->bc_refc.shape_changes = shape_changes;
+       }
+
+       switch (ri->ri_type) {
+       case XFS_REFCOUNT_INCREASE:
+               error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+                               XFS_REFCOUNT_ADJUST_INCREASE);
+               if (error)
+                       return error;
+               if (ri->ri_blockcount > 0)
+                       error = xfs_rtrefcount_continue_op(rcur, ri, bno);
+               break;
+       case XFS_REFCOUNT_DECREASE:
+               error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount,
+                               XFS_REFCOUNT_ADJUST_DECREASE);
+               if (error)
+                       return error;
+               if (ri->ri_blockcount > 0)
+                       error = xfs_rtrefcount_continue_op(rcur, ri, bno);
+               break;
+       case XFS_REFCOUNT_ALLOC_COW:
+               error = __xfs_refcount_cow_alloc(rcur, bno, ri->ri_blockcount);
+               if (error)
+                       return error;
+               ri->ri_blockcount = 0;
+               break;
+       case XFS_REFCOUNT_FREE_COW:
+               error = __xfs_refcount_cow_free(rcur, bno, ri->ri_blockcount);
+               if (error)
+                       return error;
+               ri->ri_blockcount = 0;
+               break;
+       default:
+               ASSERT(0);
+               return -EFSCORRUPTED;
+       }
+       if (!error && ri->ri_blockcount > 0)
+               trace_xfs_refcount_finish_one_leftover(mp, ri);
+       return error;
 }
 
 /*
index 4cb377dee8d5caec44db9d476e01e4a6febe73c2..9c6997b432d2c9e015458a678386cffcbb2a88c2 100644 (file)
@@ -240,6 +240,10 @@ xfs_rtgroup_lock(
        if ((rtglock_flags & XFS_RTGLOCK_RMAP) &&
            rtg->rtg_inodes[XFS_RTG_RMAP] != NULL)
                xfs_ilock(rtg->rtg_inodes[XFS_RTG_RMAP], XFS_ILOCK_EXCL);
+
+       if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) &&
+           rtg->rtg_inodes[XFS_RTG_REFCOUNT] != NULL)
+               xfs_ilock(rtg->rtg_inodes[XFS_RTG_REFCOUNT], XFS_ILOCK_EXCL);
 }
 
 /* Unlock metadata inodes associated with this rt group. */
@@ -252,6 +256,10 @@ xfs_rtgroup_unlock(
        ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
               !(rtglock_flags & XFS_RTGLOCK_BITMAP));
 
+       if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) &&
+                       rtg->rtg_inodes[XFS_RTG_REFCOUNT])
+               xfs_iunlock(rtg->rtg_inodes[XFS_RTG_REFCOUNT], XFS_ILOCK_EXCL);
+
        if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg->rtg_inodes[XFS_RTG_RMAP])
                xfs_iunlock(rtg->rtg_inodes[XFS_RTG_RMAP], XFS_ILOCK_EXCL);
 
@@ -287,6 +295,11 @@ xfs_rtgroup_trans_join(
            rtg->rtg_inodes[XFS_RTG_RMAP] != NULL)
                xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTG_RMAP],
                                XFS_ILOCK_EXCL);
+
+       if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) &&
+           rtg->rtg_inodes[XFS_RTG_REFCOUNT] != NULL)
+               xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTG_REFCOUNT],
+                               XFS_ILOCK_EXCL);
 }
 
 /* Retrieve rt group geometry. */
index 14edec52dac6dfa672a3ac6a36fe4229f25db377..65e88cb4ef0c6ce8b0d96a767fed3e04edb64d20 100644 (file)
@@ -307,10 +307,13 @@ xfs_rtxnum_t xfs_rtgroup_extents(struct xfs_mount *mp, xfs_rgnumber_t rgno);
 #define XFS_RTGLOCK_BITMAP_SHARED      (1U << 1)
 /* Lock the rt rmap inode in exclusive mode */
 #define XFS_RTGLOCK_RMAP               (1U << 2)
+/* Lock the rt refcount inode in exclusive mode */
+#define XFS_RTGLOCK_REFCOUNT           (1U << 3)
 
 #define XFS_RTGLOCK_ALL_FLAGS  (XFS_RTGLOCK_BITMAP | \
                                 XFS_RTGLOCK_BITMAP_SHARED | \
-                                XFS_RTGLOCK_RMAP)
+                                XFS_RTGLOCK_RMAP | \
+                                XFS_RTGLOCK_REFCOUNT)
 
 void xfs_rtgroup_lock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
 void xfs_rtgroup_unlock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
index 0c2ff612a8c24c706c1350733cc180099519a630..00638568716d291e51cd2fd29b16b51440faf0f8 100644 (file)
@@ -27,6 +27,7 @@
 #include "xfs_ag.h"
 #include "xfs_rtgroup.h"
 #include "xfs_rtrmap_btree.h"
+#include "xfs_rtrefcount_btree.h"
 
 /* Convert an xfs_fsmap to an fsmap. */
 static void
@@ -216,14 +217,16 @@ xfs_getfsmap_is_shared(
        *stat = false;
        if (!xfs_has_reflink(mp))
                return 0;
-       /* rt files will have no perag structure */
-       if (!info->pag)
-               return 0;
+
+       if (info->rtg)
+               cur = xfs_rtrefcountbt_init_cursor(mp, tp, info->rtg,
+                               info->rtg->rtg_inodes[XFS_RTG_REFCOUNT]);
+       else
+               cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
+                               info->pag);
 
        /* Are there any shared blocks here? */
        flen = 0;
-       cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp, info->pag);
-
        error = xfs_refcount_find_shared(cur, rec->rm_startblock,
                        rec->rm_blockcount, &fbno, &flen, false);
 
@@ -830,7 +833,7 @@ xfs_getfsmap_rtdev_rmapbt_query(
                                info);
 
        /* Query the rtrmapbt */
-       xfs_rtgroup_lock(info->rtg, XFS_RTGLOCK_RMAP);
+       xfs_rtgroup_lock(info->rtg, XFS_RTGLOCK_RMAP | XFS_RTGLOCK_REFCOUNT);
        *curpp = xfs_rtrmapbt_init_cursor(mp, tp, info->rtg,
                        info->rtg->rtg_inodes[XFS_RTG_RMAP]);
        return xfs_rmap_query_range(*curpp, &info->low, &info->high,
@@ -918,7 +921,8 @@ xfs_getfsmap_rtdev_rmapbt(
 
                if (bt_cur) {
                        xfs_rtgroup_unlock(bt_cur->bc_ino.rtg,
-                                       XFS_RTGLOCK_RMAP);
+                                       XFS_RTGLOCK_RMAP |
+                                       XFS_RTGLOCK_REFCOUNT);
                        xfs_btree_del_cursor(bt_cur, XFS_BTREE_NOERROR);
                        bt_cur = NULL;
                }
@@ -955,7 +959,8 @@ xfs_getfsmap_rtdev_rmapbt(
        }
 
        if (bt_cur) {
-               xfs_rtgroup_unlock(bt_cur->bc_ino.rtg, XFS_RTGLOCK_RMAP);
+               xfs_rtgroup_unlock(bt_cur->bc_ino.rtg, XFS_RTGLOCK_RMAP |
+                                                      XFS_RTGLOCK_REFCOUNT);
                xfs_btree_del_cursor(bt_cur, error < 0 ? XFS_BTREE_ERROR :
                                                         XFS_BTREE_NOERROR);
        }
index ce5bb92d28c740e4550da1f0a43f6d53a24c4f60..f5227be6b9be7c92cf0b85f7534e83f8452dd714 100644 (file)
@@ -30,6 +30,9 @@
 #include "xfs_ag.h"
 #include "xfs_ag_resv.h"
 #include "xfs_health.h"
+#include "xfs_rtrefcount_btree.h"
+#include "xfs_rtalloc.h"
+#include "xfs_rtgroup.h"
 
 /*
  * Copy on Write of Shared Blocks
@@ -162,6 +165,54 @@ xfs_reflink_find_shared(
        return error;
 }
 
+/*
+ * Given a file mapping for the rt device, find the lowest-numbered run of
+ * shared blocks within that mapping and return it in shared_offset/shared_len.
+ * The offset is relative to the start of irec.
+ *
+ * If find_end_of_shared is true, return the longest contiguous extent of shared
+ * blocks.  If there are no shared extents, shared_offset and shared_len will be
+ * set to 0;
+ */
+static int
+xfs_reflink_find_rtshared(
+       struct xfs_mount        *mp,
+       struct xfs_trans        *tp,
+       const struct xfs_bmbt_irec *irec,
+       xfs_extlen_t            *shared_offset,
+       xfs_extlen_t            *shared_len,
+       bool                    find_end_of_shared)
+{
+       struct xfs_rtgroup      *rtg;
+       struct xfs_btree_cur    *cur;
+       xfs_rgblock_t           orig_bno;
+       xfs_agblock_t           found_bno;
+       int                     error;
+
+       BUILD_BUG_ON(NULLRGBLOCK != NULLAGBLOCK);
+
+       /*
+        * Note: this uses the not quite correct xfs_agblock_t type because
+        * xfs_refcount_find_shared is shared between the RT and data device
+        * refcount code.
+        */
+       orig_bno = xfs_rtb_to_rgbno(mp, irec->br_startblock);
+       rtg = xfs_rtgroup_get(mp, xfs_rtb_to_rgno(mp, irec->br_startblock));
+
+       xfs_rtgroup_lock(rtg, XFS_RTGLOCK_REFCOUNT);
+       cur = xfs_rtrefcountbt_init_cursor(mp, tp, rtg,
+                       rtg->rtg_inodes[XFS_RTG_REFCOUNT]);
+       error = xfs_refcount_find_shared(cur, orig_bno, irec->br_blockcount,
+                       &found_bno, shared_len, find_end_of_shared);
+       xfs_btree_del_cursor(cur, error);
+       xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_REFCOUNT);
+       xfs_rtgroup_put(rtg);
+
+       if (!error && *shared_len)
+               *shared_offset = found_bno - orig_bno;
+       return error;
+}
+
 /*
  * Trim the mapping to the next block where there's a change in the
  * shared/unshared status.  More specifically, this means that we
@@ -190,8 +241,12 @@ xfs_reflink_trim_around_shared(
 
        trace_xfs_reflink_trim_around_shared(ip, irec);
 
-       error = xfs_reflink_find_shared(mp, NULL, irec, &shared_offset,
-                       &shared_len, true);
+       if (XFS_IS_REALTIME_INODE(ip))
+               error = xfs_reflink_find_rtshared(mp, NULL, irec,
+                               &shared_offset, &shared_len, true);
+       else
+               error = xfs_reflink_find_shared(mp, NULL, irec,
+                               &shared_offset, &shared_len, true);
        if (error)
                return error;
 
@@ -1554,8 +1609,12 @@ xfs_reflink_inode_has_shared_extents(
                    got.br_state != XFS_EXT_NORM)
                        goto next;
 
-               error = xfs_reflink_find_shared(mp, tp, &got, &shared_offset,
-                               &shared_len, false);
+               if (XFS_IS_REALTIME_INODE(ip))
+                       error = xfs_reflink_find_rtshared(mp, tp, &got,
+                                       &shared_offset, &shared_len, false);
+               else
+                       error = xfs_reflink_find_shared(mp, tp, &got,
+                                       &shared_offset, &shared_len, false);
                if (error)
                        return error;