]> www.infradead.org Git - users/hch/xfsprogs.git/commitdiff
repair: add a real per-AG bitmap abstraction
authorChristoph Hellwig <hch@lst.de>
Thu, 15 Aug 2024 06:44:30 +0000 (08:44 +0200)
committerChristoph Hellwig <hch@lst.de>
Thu, 15 Aug 2024 12:58:59 +0000 (14:58 +0200)
Add a struct bmap that contains the btree root and the lock, and provide
helpers for loking instead of directly poking into the data structure.

Signed-off-by: Christoph Hellwig <hch@lst.de>
repair/dino_chunks.c
repair/dinode.c
repair/globals.c
repair/globals.h
repair/incore.c
repair/incore.h
repair/rmap.c
repair/scan.c

index 8345ebbc322db771e7280afa10473eafc3150d9c..2eed630accb17c98aaee058046a3c472f9f04529 100644 (file)
@@ -135,7 +135,7 @@ verify_inode_chunk(xfs_mount_t              *mp,
                if (check_aginode_block(mp, agno, agino) == 0)
                        return 0;
 
-               pthread_mutex_lock(&ag_locks[agno].lock);
+               lock_ag(agno);
 
                state = get_bmap(agno, agbno);
                switch (state) {
@@ -170,8 +170,8 @@ verify_inode_chunk(xfs_mount_t              *mp,
                _("inode block %d/%d multiply claimed, (state %d)\n"),
                                agno, agbno, state);
                        set_bmap(agno, agbno, XR_E_MULT);
-                       pthread_mutex_unlock(&ag_locks[agno].lock);
-                       return(0);
+                       unlock_ag(agno);
+                       return 0;
                default:
                        do_warn(
                _("inode block %d/%d bad state, (state %d)\n"),
@@ -180,7 +180,7 @@ verify_inode_chunk(xfs_mount_t              *mp,
                        break;
                }
 
-               pthread_mutex_unlock(&ag_locks[agno].lock);
+               unlock_ag(agno);
 
                start_agino = XFS_AGB_TO_AGINO(mp, agbno);
                *start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
@@ -427,7 +427,7 @@ verify_inode_chunk(xfs_mount_t              *mp,
         * user data -- we're probably here as a result of a directory
         * entry or an iunlinked pointer
         */
-       pthread_mutex_lock(&ag_locks[agno].lock);
+       lock_ag(agno);
        for (cur_agbno = chunk_start_agbno;
             cur_agbno < chunk_stop_agbno;
             cur_agbno += blen)  {
@@ -441,7 +441,7 @@ verify_inode_chunk(xfs_mount_t              *mp,
        _("inode block %d/%d multiply claimed, (state %d)\n"),
                                agno, cur_agbno, state);
                        set_bmap_ext(agno, cur_agbno, blen, XR_E_MULT);
-                       pthread_mutex_unlock(&ag_locks[agno].lock);
+                       unlock_ag(agno);
                        return 0;
                case XR_E_METADATA:
                case XR_E_INO:
@@ -453,7 +453,7 @@ verify_inode_chunk(xfs_mount_t              *mp,
                        break;
                }
        }
-       pthread_mutex_unlock(&ag_locks[agno].lock);
+       unlock_ag(agno);
 
        /*
         * ok, chunk is good.  put the record into the tree if required,
@@ -476,8 +476,7 @@ verify_inode_chunk(xfs_mount_t              *mp,
 
        set_inode_used(irec_p, agino - start_agino);
 
-       pthread_mutex_lock(&ag_locks[agno].lock);
-
+       lock_ag(agno);
        for (cur_agbno = chunk_start_agbno;
             cur_agbno < chunk_stop_agbno;
             cur_agbno += blen)  {
@@ -519,7 +518,7 @@ verify_inode_chunk(xfs_mount_t              *mp,
                        break;
                }
        }
-       pthread_mutex_unlock(&ag_locks[agno].lock);
+       unlock_ag(agno);
 
        return(ino_cnt);
 }
@@ -578,7 +577,7 @@ process_inode_agbno_state(
 {
        int state;
 
-       pthread_mutex_lock(&ag_locks[agno].lock);
+       lock_ag(agno);
        state = get_bmap(agno, agbno);
        switch (state) {
        case XR_E_INO:  /* already marked */
@@ -608,7 +607,7 @@ process_inode_agbno_state(
                        XFS_AGB_TO_FSB(mp, agno, agbno), state);
                break;
        }
-       pthread_mutex_unlock(&ag_locks[agno].lock);
+       unlock_ag(agno);
 }
 
 /*
index 763adc73c09af8c2700cea0831bf5afce97e4d0f..df57fd77fdaf1a9695e771742eb3fa4560118182 100644 (file)
@@ -683,9 +683,9 @@ _("Fatal error: inode %" PRIu64 " - blkmap_set_ext(): %s\n"
                ebno = first_agbno + irec.br_blockcount;
                if (agno != locked_agno) {
                        if (locked_agno != -1)
-                               pthread_mutex_unlock(&ag_locks[locked_agno].lock);
-                       pthread_mutex_lock(&ag_locks[agno].lock);
+                               unlock_ag(locked_agno);
                        locked_agno = agno;
+                       lock_ag(locked_agno);
                }
 
                /*
@@ -822,7 +822,7 @@ _("illegal state %d in block map %" PRIu64 "\n"),
        error = 0;
 done:
        if (locked_agno != -1)
-               pthread_mutex_unlock(&ag_locks[locked_agno].lock);
+               unlock_ag(locked_agno);
 
        if (i != *numrecs) {
                ASSERT(i < *numrecs);
index d0c2a3f57a9a255d5376ca258ce86d9c76b63386..83794502416a4923018a80dbd1e95e059ac727a8 100644 (file)
@@ -121,8 +121,6 @@ xfs_extlen_t        sb_inoalignmt;
 uint32_t       sb_unit;
 uint32_t       sb_width;
 
-struct aglock  *ag_locks;
-
 time_t         report_interval;
 uint64_t       *prog_rpt_done;
 
index c2593c8947682236500f9000d0ce5d6087ff5ed0..fcef239f45319e1ffe48f578b2027cfcd3d44ed4 100644 (file)
@@ -162,10 +162,6 @@ extern xfs_extlen_t        sb_inoalignmt;
 extern uint32_t        sb_unit;
 extern uint32_t        sb_width;
 
-struct aglock {
-       pthread_mutex_t lock __attribute__((__aligned__(64)));
-};
-extern struct aglock   *ag_locks;
 extern pthread_mutex_t rt_lock;
 
 extern time_t          report_interval;
index 3aae0eaf1bbf9fe183bec80316acb33f1f5900bb..bc4ddcba9822d22cf5fa1e5167088bc44a9b10b4 100644 (file)
 static int states[16] =
        {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
 
-static struct btree_root       **ag_bmap;
+struct bmap {
+       pthread_mutex_t         lock __attribute__((__aligned__(64)));
+       struct btree_root       *root;
+};
+static struct bmap     *ag_bmaps;
+
+void
+lock_ag(
+       xfs_agnumber_t          agno)
+{
+       pthread_mutex_lock(&ag_bmaps[agno].lock);
+}
+
+void
+unlock_ag(
+       xfs_agnumber_t          agno)
+{
+       pthread_mutex_unlock(&ag_bmaps[agno].lock);
+}
 
 static void
 update_bmap(
@@ -129,7 +147,7 @@ set_bmap_ext(
        xfs_extlen_t            blen,
        int                     state)
 {
-       update_bmap(ag_bmap[agno], agbno, blen, &states[state]);
+       update_bmap(ag_bmaps[agno].root, agbno, blen, &states[state]);
 }
 
 int
@@ -139,23 +157,24 @@ get_bmap_ext(
        xfs_agblock_t           maxbno,
        xfs_extlen_t            *blen)
 {
+       struct btree_root       *bmap = ag_bmaps[agno].root;
        int                     *statep;
        unsigned long           key;
 
-       statep = btree_find(ag_bmap[agno], agbno, &key);
+       statep = btree_find(bmap, agbno, &key);
        if (!statep)
                return -1;
 
        if (key == agbno) {
                if (blen) {
-                       if (!btree_peek_next(ag_bmap[agno], &key))
+                       if (!btree_peek_next(bmap, &key))
                                return -1;
                        *blen = min(maxbno, key) - agbno;
                }
                return *statep;
        }
 
-       statep = btree_peek_prev(ag_bmap[agno], NULL);
+       statep = btree_peek_prev(bmap, NULL);
        if (!statep)
                return -1;
        if (blen)
@@ -255,13 +274,15 @@ reset_bmaps(xfs_mount_t *mp)
        ag_size = mp->m_sb.sb_agblocks;
 
        for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+               struct btree_root       *bmap = ag_bmaps[agno].root;
+
                if (agno == mp->m_sb.sb_agcount - 1)
                        ag_size = (xfs_extlen_t)(mp->m_sb.sb_dblocks -
                                   (xfs_rfsblock_t)mp->m_sb.sb_agblocks * agno);
 #ifdef BTREE_STATS
-               if (btree_find(ag_bmap[agno], 0, NULL)) {
+               if (btree_find(bmap, 0, NULL)) {
                        printf("ag_bmap[%d] btree stats:\n", i);
-                       btree_print_stats(ag_bmap[agno], stdout);
+                       btree_print_stats(bmap, stdout);
                }
 #endif
                /*
@@ -272,27 +293,27 @@ reset_bmaps(xfs_mount_t *mp)
                 *      ag_hdr_block..ag_size:          XR_E_UNKNOWN
                 *      ag_size...                      XR_E_BAD_STATE
                 */
-               btree_clear(ag_bmap[agno]);
-               btree_insert(ag_bmap[agno], 0, &states[XR_E_INUSE_FS]);
-               btree_insert(ag_bmap[agno],
-                               ag_hdr_block, &states[XR_E_UNKNOWN]);
-               btree_insert(ag_bmap[agno], ag_size, &states[XR_E_BAD_STATE]);
+               btree_clear(bmap);
+               btree_insert(bmap, 0, &states[XR_E_INUSE_FS]);
+               btree_insert(bmap, ag_hdr_block, &states[XR_E_UNKNOWN]);
+               btree_insert(bmap, ag_size, &states[XR_E_BAD_STATE]);
        }
 
        for ( ; agno < nr_groups; agno++) {
-               btree_clear(ag_bmap[agno]);
+               struct btree_root       *bmap = ag_bmaps[agno].root;
+
+               btree_clear(bmap);
                if (agno == mp->m_sb.sb_agcount && xfs_has_rtsb(mp)) {
-                       btree_insert(ag_bmap[agno], 0, &states[XR_E_INUSE_FS]);
-                       btree_insert(ag_bmap[agno], mp->m_sb.sb_rextsize,
+                       btree_insert(bmap, 0, &states[XR_E_INUSE_FS]);
+                       btree_insert(bmap, mp->m_sb.sb_rextsize,
                                        &states[XR_E_FREE]);
                } else {
-                       btree_insert(ag_bmap[agno], 0, &states[XR_E_FREE]);
+                       btree_insert(bmap, 0, &states[XR_E_FREE]);
                }
 
-               btree_insert(ag_bmap[agno],
+               btree_insert(bmap,
                        xfs_rtgroup_extents(mp, (agno - mp->m_sb.sb_agcount)) <<
-                               mp->m_sb.sb_rextslog,
-                       &states[XR_E_BAD_STATE]);
+                               mp->m_sb.sb_rextslog, &states[XR_E_BAD_STATE]);
        }
 
        if (mp->m_sb.sb_logstart != 0) {
@@ -305,50 +326,58 @@ reset_bmaps(xfs_mount_t *mp)
        rtsb_init(mp);
 }
 
-void
-init_bmaps(
-       struct xfs_mount        *mp)
+static struct bmap *
+alloc_bmaps(
+       unsigned int            nr_groups)
 {
-       unsigned int            nr_groups =
-               mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount;
+       struct bmap             *bmap;
        unsigned int            i;
 
-       ag_bmap = calloc(nr_groups, sizeof(struct btree_root *));
-       if (!ag_bmap)
-               do_error(_("couldn't allocate block map btree roots\n"));
-
-       ag_locks = calloc(nr_groups, sizeof(struct aglock));
-       if (!ag_locks)
-               do_error(_("couldn't allocate block map locks\n"));
+       bmap = calloc(nr_groups, sizeof(*bmap));
+       if (!bmap)
+               return NULL;
 
        for (i = 0; i < nr_groups; i++)  {
-               btree_init(&ag_bmap[i]);
-               pthread_mutex_init(&ag_locks[i].lock, NULL);
+               btree_init(&bmap[i].root);
+               pthread_mutex_init(&bmap[i].lock, NULL);
        }
 
-       init_rt_bmap(mp);
-       reset_bmaps(mp);
+       return bmap;
 }
 
-void
-free_bmaps(
-       struct xfs_mount        *mp)
+static void
+destroy_bmaps(
+       struct bmap             *bmap,
+       unsigned int            nr_groups)
 {
-       unsigned int            nr_groups =
-               mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount;
        unsigned int            i;
 
-       for (i = 0; i < nr_groups; i++)
-               pthread_mutex_destroy(&ag_locks[i].lock);
+       for (i = 0; i < nr_groups; i++) {
+               btree_destroy(bmap[i].root);
+               pthread_mutex_destroy(&bmap[i].lock);
+       }
 
-       free(ag_locks);
-       ag_locks = NULL;
+       free(bmap);
+}
+
+void
+init_bmaps(
+       struct xfs_mount        *mp)
+{
+       ag_bmaps = alloc_bmaps(mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount);
+       if (!ag_bmaps)
+               do_error(_("couldn't allocate block map btree roots\n"));
 
-       for (i = 0; i < nr_groups; i++)
-               btree_destroy(ag_bmap[i]);
+       init_rt_bmap(mp);
+       reset_bmaps(mp);
+}
 
-       free(ag_bmap);
-       ag_bmap = NULL;
+void
+free_bmaps(
+       struct xfs_mount        *mp)
+{
+       destroy_bmaps(ag_bmaps, mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount);
+       ag_bmaps = NULL;
 
        free_rt_bmap(mp);
 }
index 70be9fa25ab6392a7b6afbe2d5a57986d4d2f058..efe82ceec4d6c6ad1c49edcf2618b42e99b92020 100644 (file)
@@ -23,6 +23,9 @@ void          init_bmaps(xfs_mount_t *mp);
 void           reset_bmaps(xfs_mount_t *mp);
 void           free_bmaps(xfs_mount_t *mp);
 
+void           lock_ag(xfs_agnumber_t agno);
+void           unlock_ag(xfs_agnumber_t agno);
+
 void           set_bmap_ext(xfs_agnumber_t agno, xfs_agblock_t agbno,
                             xfs_extlen_t blen, int state);
 int            get_bmap_ext(xfs_agnumber_t agno, xfs_agblock_t agbno,
index 0d057ad7ff85eecd3368dcb625d13a1e4180c201..e61389f759f62527bca8d72bcbf263a348f54dbc 100644 (file)
@@ -867,12 +867,12 @@ mark_reflink_inodes(
                agno = XFS_INO_TO_AGNO(mp, rciter.ino);
                agino = XFS_INO_TO_AGINO(mp, rciter.ino);
 
-               pthread_mutex_lock(&ag_locks[agno].lock);
+               lock_ag(agno);
                irec = find_inode_rec(mp, agno, agino);
                off = get_inode_offset(mp, rciter.ino, irec);
                /* lock here because we might go outside this ag */
                set_inode_is_rl(irec, off);
-               pthread_mutex_unlock(&ag_locks[agno].lock);
+               unlock_ag(agno);
        }
        rcbag_ino_iter_stop(rcstack, &rciter);
 }
index 55f4c6f5ece43c709eb88988594a19fceee132d9..e45200b4f5f5167b2327ec4f8fef68d5b27f8b2a 100644 (file)
@@ -349,7 +349,7 @@ _("bad back (left) sibling pointer (saw %llu should be NULL (0))\n"
                agno = XFS_FSB_TO_AGNO(mp, bno);
                agbno = XFS_FSB_TO_AGBNO(mp, bno);
 
-               pthread_mutex_lock(&ag_locks[agno].lock);
+               lock_ag(agno);
                state = get_bmap(agno, agbno);
                switch (state) {
                case XR_E_INUSE1:
@@ -416,7 +416,7 @@ _("bad state %d, inode %" PRIu64 " bmap block 0x%" PRIx64 "\n"),
                                state, ino, bno);
                        break;
                }
-               pthread_mutex_unlock(&ag_locks[agno].lock);
+               unlock_ag(agno);
        } else {
                if (search_dup_extent(XFS_FSB_TO_AGNO(mp, bno),
                                XFS_FSB_TO_AGBNO(mp, bno),
@@ -429,9 +429,9 @@ _("bad state %d, inode %" PRIu64 " bmap block 0x%" PRIx64 "\n"),
        /* Record BMBT blocks in the reverse-mapping data. */
        if (check_dups && collect_rmaps && !zap_metadata) {
                agno = XFS_FSB_TO_AGNO(mp, bno);
-               pthread_mutex_lock(&ag_locks[agno].lock);
+               lock_ag(agno);
                rmap_add_bmbt_rec(mp, ino, whichfork, bno);
-               pthread_mutex_unlock(&ag_locks[agno].lock);
+               unlock_ag(agno);
        }
 
        if (level == 0) {