]> www.infradead.org Git - users/hch/xfs.git/commitdiff
xfs: wire up rmap map and unmap to the realtime rmapbt
authorDarrick J. Wong <djwong@kernel.org>
Sun, 22 Sep 2024 06:25:11 +0000 (08:25 +0200)
committerChristoph Hellwig <hch@lst.de>
Sun, 22 Sep 2024 08:48:13 +0000 (10:48 +0200)
Connect the map and unmap reverse-mapping operations to the realtime
rmapbt via the deferred operation callbacks.  This enables us to
perform rmap operations against the correct btree.

[Contains a minor bugfix from hch]

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
fs/xfs/libxfs/xfs_rmap.c
fs/xfs/libxfs/xfs_rtgroup.c
fs/xfs/libxfs/xfs_rtgroup.h

index 196010c3129b21b794fce98be28b3d0edcfc1957..00f675e2b9186d8d37a1068b63c85d60ef361a26 100644 (file)
@@ -26,6 +26,7 @@
 #include "xfs_health.h"
 #include "xfs_rmap_item.h"
 #include "xfs_rtgroup.h"
+#include "xfs_rtrmap_btree.h"
 
 struct kmem_cache      *xfs_rmap_intent_cache;
 
@@ -2621,6 +2622,48 @@ __xfs_rmap_finish_intent(
        }
 }
 
+static int
+xfs_rmap_finish_init_cursor(
+       struct xfs_trans                *tp,
+       struct xfs_rmap_intent          *ri,
+       struct xfs_btree_cur            **pcur)
+{
+       struct xfs_perag                *pag = to_perag(ri->ri_group);
+       struct xfs_buf                  *agbp = NULL;
+       int                             error;
+
+       /*
+        * Refresh the freelist before we start changing the rmapbt, because a
+        * shape change could cause us to allocate blocks.
+        */
+       error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
+       if (error) {
+               xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+               return error;
+       }
+       if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
+               xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+               return -EFSCORRUPTED;
+       }
+       *pcur = xfs_rmapbt_init_cursor(tp->t_mountp, tp, agbp, pag);
+       return 0;
+}
+
+static int
+xfs_rtrmap_finish_init_cursor(
+       struct xfs_trans                *tp,
+       struct xfs_rmap_intent          *ri,
+       struct xfs_btree_cur            **pcur)
+{
+       struct xfs_rtgroup              *rtg = to_rtg(ri->ri_group);
+
+       xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
+       xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
+       *pcur = xfs_rtrmapbt_init_cursor(tp->t_mountp, tp, rtg,
+                       rtg->rtg_inodes[XFS_RTGI_RMAP]);
+       return 0;
+}
+
 /*
  * Process one of the deferred rmap operations.  We pass back the
  * btree cursor to maintain our lock on the rmapbt between calls.
@@ -2636,8 +2679,6 @@ xfs_rmap_finish_one(
 {
        struct xfs_owner_info           oinfo;
        struct xfs_mount                *mp = tp->t_mountp;
-       struct xfs_btree_cur            *rcur = *pcur;
-       struct xfs_buf                  *agbp = NULL;
        xfs_agblock_t                   bno;
        bool                            unwritten;
        int                             error = 0;
@@ -2651,38 +2692,29 @@ xfs_rmap_finish_one(
         * If we haven't gotten a cursor or the cursor AG doesn't match
         * the startblock, get one now.
         */
-       if (rcur != NULL && rcur->bc_group != ri->ri_group) {
-               xfs_btree_del_cursor(rcur, 0);
-               rcur = NULL;
+       if (*pcur != NULL && (*pcur)->bc_group != ri->ri_group) {
+               xfs_btree_del_cursor(*pcur, 0);
                *pcur = NULL;
        }
-       if (rcur == NULL) {
-               struct xfs_perag        *pag = to_perag(ri->ri_group);
-
-               /*
-                * Refresh the freelist before we start changing the
-                * rmapbt, because a shape change could cause us to
-                * allocate blocks.
-                */
-               error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
-               if (error) {
-                       xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
+       if (*pcur == NULL) {
+               if (ri->ri_group->xg_type == XG_TYPE_RTG)
+                       error = xfs_rtrmap_finish_init_cursor(tp, ri, pcur);
+               else
+                       error = xfs_rmap_finish_init_cursor(tp, ri, pcur);
+               if (error)
                        return error;
-               }
-               if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
-                       xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
-                       return -EFSCORRUPTED;
-               }
-
-               *pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
        }
 
        xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork,
                        ri->ri_bmap.br_startoff);
        unwritten = ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN;
-       bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, ri->ri_bmap.br_startblock);
 
-       error = __xfs_rmap_finish_intent(rcur, ri->ri_type, bno,
+       if (ri->ri_group->xg_type == XG_TYPE_RTG)
+               bno = xfs_rtb_to_rgbno(mp, ri->ri_bmap.br_startblock);
+       else
+               bno = XFS_FSB_TO_AGBNO(mp, ri->ri_bmap.br_startblock);
+
+       error = __xfs_rmap_finish_intent(*pcur, ri->ri_type, bno,
                        ri->ri_bmap.br_blockcount, &oinfo, unwritten);
        if (error)
                return error;
index 5a968338bce52c982168c26942353a8c6c0b8a4b..b8b2370b3057a2d985f1e6119b5fb6f662cddf26 100644 (file)
@@ -115,6 +115,10 @@ xfs_rtgroup_lock(
        } else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
                xfs_ilock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_SHARED);
        }
+
+       if ((rtglock_flags & XFS_RTGLOCK_RMAP) &&
+           rtg->rtg_inodes[XFS_RTGI_RMAP] != NULL)
+               xfs_ilock(rtg->rtg_inodes[XFS_RTGI_RMAP], XFS_ILOCK_EXCL);
 }
 
 /* Unlock metadata inodes associated with this rt group. */
@@ -127,6 +131,9 @@ xfs_rtgroup_unlock(
        ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
               !(rtglock_flags & XFS_RTGLOCK_BITMAP));
 
+       if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg->rtg_inodes[XFS_RTGI_RMAP])
+               xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_RMAP], XFS_ILOCK_EXCL);
+
        if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
                xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_SUMMARY], XFS_ILOCK_EXCL);
                xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_EXCL);
@@ -154,6 +161,11 @@ xfs_rtgroup_trans_join(
                xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_SUMMARY],
                                XFS_ILOCK_EXCL);
        }
+
+       if ((rtglock_flags & XFS_RTGLOCK_RMAP) &&
+           rtg->rtg_inodes[XFS_RTGI_RMAP] != NULL)
+               xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_RMAP],
+                               XFS_ILOCK_EXCL);
 }
 
 /* Retrieve rt group geometry. */
index 60be0ee513a9d16c58fb95efbf7bb079c59dc1b2..0435adca9d7ba321dfd5901f8192031a3a1497ac 100644 (file)
@@ -244,9 +244,12 @@ xfs_rtxnum_t xfs_rtgroup_extents(struct xfs_mount *mp, xfs_rgnumber_t rgno);
 #define XFS_RTGLOCK_BITMAP             (1U << 0)
 /* Lock the rt bitmap inode in shared mode */
 #define XFS_RTGLOCK_BITMAP_SHARED      (1U << 1)
+/* Lock the rt rmap inode in exclusive mode */
+#define XFS_RTGLOCK_RMAP               (1U << 2)
 
 #define XFS_RTGLOCK_ALL_FLAGS  (XFS_RTGLOCK_BITMAP | \
-                                XFS_RTGLOCK_BITMAP_SHARED)
+                                XFS_RTGLOCK_BITMAP_SHARED | \
+                                XFS_RTGLOCK_RMAP)
 
 void xfs_rtgroup_lock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);
 void xfs_rtgroup_unlock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags);