]> www.infradead.org Git - users/hch/xfsprogs.git/commitdiff
xfs: wire up rmap map and unmap to the realtime rmapbt
authorDarrick J. Wong <djwong@kernel.org>
Mon, 23 Sep 2024 20:42:03 +0000 (13:42 -0700)
committerDarrick J. Wong <djwong@kernel.org>
Wed, 9 Oct 2024 23:29:17 +0000 (16:29 -0700)
Connect the map and unmap reverse-mapping operations to the realtime
rmapbt via the deferred operation callbacks.  This enables us to
perform rmap operations against the correct btree.

[Contains a minor bugfix from hch]

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
libxfs/xfs_rmap.c
libxfs/xfs_rtgroup.c
libxfs/xfs_rtgroup.h

index b75d10fce7c6c573338972004da10cf5e13c0e3b..9a7b93b64dd06f2e5307d099e46382d6e10d6d2e 100644 (file)
@@ -25,6 +25,7 @@
 #include "xfs_health.h"
 #include "defer_item.h"
 #include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
 
 struct kmem_cache      *xfs_rmap_intent_cache;
 
@@ -2620,6 +2621,47 @@ __xfs_rmap_finish_intent(
        }
 }
 
+static int
+xfs_rmap_finish_init_cursor(
+       struct xfs_trans                *tp,
+       struct xfs_rmap_intent          *ri,
+       struct xfs_btree_cur            **pcur)
+{
+       struct xfs_perag                *pag = to_perag(ri->ri_group);
+       struct xfs_buf                  *agbp = NULL;
+       int                             error;
+
+       /*
+        * Refresh the freelist before we start changing the rmapbt, because a
+        * shape change could cause us to allocate blocks.
+        */
+       error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
+       if (error) {
+               xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+               return error;
+       }
+       if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
+               xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+               return -EFSCORRUPTED;
+       }
+       *pcur = xfs_rmapbt_init_cursor(tp->t_mountp, tp, agbp, pag);
+       return 0;
+}
+
+static int
+xfs_rtrmap_finish_init_cursor(
+       struct xfs_trans                *tp,
+       struct xfs_rmap_intent          *ri,
+       struct xfs_btree_cur            **pcur)
+{
+       struct xfs_rtgroup              *rtg = to_rtg(ri->ri_group);
+
+       xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
+       xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
+       *pcur = xfs_rtrmapbt_init_cursor(tp, rtg);
+       return 0;
+}
+
 /*
  * Process one of the deferred rmap operations.  We pass back the
  * btree cursor to maintain our lock on the rmapbt between calls.
@@ -2635,8 +2677,6 @@ xfs_rmap_finish_one(
 {
        struct xfs_owner_info           oinfo;
        struct xfs_mount                *mp = tp->t_mountp;
-       struct xfs_btree_cur            *rcur = *pcur;
-       struct xfs_buf                  *agbp = NULL;
        xfs_agblock_t                   bno;
        bool                            unwritten;
        int                             error = 0;
@@ -2650,38 +2690,29 @@ xfs_rmap_finish_one(
         * If we haven't gotten a cursor or the cursor AG doesn't match
         * the startblock, get one now.
         */
-       if (rcur != NULL && rcur->bc_group != ri->ri_group) {
-               xfs_btree_del_cursor(rcur, 0);
-               rcur = NULL;
+       if (*pcur != NULL && (*pcur)->bc_group != ri->ri_group) {
+               xfs_btree_del_cursor(*pcur, 0);
                *pcur = NULL;
        }
-       if (rcur == NULL) {
-               struct xfs_perag        *pag = to_perag(ri->ri_group);
-
-               /*
-                * Refresh the freelist before we start changing the
-                * rmapbt, because a shape change could cause us to
-                * allocate blocks.
-                */
-               error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
-               if (error) {
-                       xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+       if (*pcur == NULL) {
+               if (ri->ri_group->xg_type == XG_TYPE_RTG)
+                       error = xfs_rtrmap_finish_init_cursor(tp, ri, pcur);
+               else
+                       error = xfs_rmap_finish_init_cursor(tp, ri, pcur);
+               if (error)
                        return error;
-               }
-               if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
-                       xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
-                       return -EFSCORRUPTED;
-               }
-
-               *pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
        }
 
        xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork,
                        ri->ri_bmap.br_startoff);
        unwritten = ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN;
-       bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, ri->ri_bmap.br_startblock);
 
-       error = __xfs_rmap_finish_intent(rcur, ri->ri_type, bno,
+       if (ri->ri_group->xg_type == XG_TYPE_RTG)
+               bno = xfs_rtb_to_rgbno(mp, ri->ri_bmap.br_startblock);
+       else
+               bno = XFS_FSB_TO_AGBNO(mp, ri->ri_bmap.br_startblock);
+
+       error = __xfs_rmap_finish_intent(*pcur, ri->ri_type, bno,
                        ri->ri_bmap.br_blockcount, &oinfo, unwritten);
        if (error)
                return error;
index 60b48301b18ab8f7d36a87bd7977487d46c2efa3..1f2bece74afe9ec13afc79207b4e8b327f56fa97 100644 (file)
@@ -176,6 +176,10 @@ xfs_rtgroup_lock(
        } else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
                xfs_ilock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_SHARED);
        }
+
+       if ((rtglock_flags & XFS_RTGLOCK_RMAP) &&
+           rtg->rtg_inodes[XFS_RTGI_RMAP] != NULL)
+               xfs_ilock(rtg->rtg_inodes[XFS_RTGI_RMAP], XFS_ILOCK_EXCL);
 }
 
 /* Unlock metadata inodes associated with this rt group. */
@@ -188,6 +192,9 @@ xfs_rtgroup_unlock(
        ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
               !(rtglock_flags & XFS_RTGLOCK_BITMAP));
 
+       if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg->rtg_inodes[XFS_RTGI_RMAP])
+               xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_RMAP], XFS_ILOCK_EXCL);
+
        if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
                xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_SUMMARY], XFS_ILOCK_EXCL);
                xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_EXCL);
@@ -215,6 +222,11 @@ xfs_rtgroup_trans_join(
                xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_SUMMARY],
                                XFS_ILOCK_EXCL);
        }
+
+       if ((rtglock_flags & XFS_RTGLOCK_RMAP) &&
+           rtg->rtg_inodes[XFS_RTGI_RMAP] != NULL)
+               xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_RMAP],
+                               XFS_ILOCK_EXCL);
 }
 
 /* Retrieve rt group geometry. */
index af119bae352c45a62d751a33461d887ed2c8c0f1..5b485cf72ad7609d11b990f79a79c54865386089 100644 (file)
@@ -266,9 +266,12 @@ void xfs_rtgroup_calc_geometry(struct xfs_mount *mp, struct xfs_rtgroup *rtg,
 #define XFS_RTGLOCK_BITMAP             (1U << 0)
 /* Lock the rt bitmap inode in shared mode */
 #define XFS_RTGLOCK_BITMAP_SHARED      (1U << 1)
+/* Lock the rt rmap inode in exclusive mode */
+#define XFS_RTGLOCK_RMAP               (1U << 2)
 
 #define XFS_RTGLOCK_ALL_FLAGS  (XFS_RTGLOCK_BITMAP | \
-                                XFS_RTGLOCK_BITMAP_SHARED)
+                                XFS_RTGLOCK_BITMAP_SHARED | \
+                                XFS_RTGLOCK_RMAP)
 
 void xfs_rtgroup_lock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
 void xfs_rtgroup_unlock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);