struct xchk_bmap_info *info,
struct xfs_bmbt_irec *irec)
{
+ xchk_rt_init(info->sc, &info->sc->sr, XCHK_RTLOCK_BITMAP_SHARED);
xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
irec->br_blockcount);
+ xchk_rt_unlock(info->sc, &info->sc->sr);
}
/* Cross-reference a single datadev extent record. */
return 0;
}
+/*
+ * For scrubbing a realtime file, grab all the in-core resources we'll need to
+ * check the realtime metadata, which means taking the ILOCK of the realtime
+ * metadata inodes. Callers must not join these inodes to the transaction
+ * with non-zero lockflags or concurrency problems will result. The
+ * @rtlock_flags argument takes XCHK_RTLOCK_* flags because scrub has somewhat
+ * unusual locking requirements.
+ */
+void
+xchk_rt_init(
+ struct xfs_scrub *sc,
+ struct xchk_rt *sr,
+ unsigned int rtlock_flags)
+{
+ ASSERT(!(rtlock_flags & ~XCHK_RTLOCK_ALL));
+ ASSERT(hweight32(rtlock_flags & (XCHK_RTLOCK_BITMAP |
+ XCHK_RTLOCK_BITMAP_SHARED)) < 2);
+ ASSERT(hweight32(rtlock_flags & (XCHK_RTLOCK_SUMMARY |
+ XCHK_RTLOCK_SUMMARY_SHARED)) < 2);
+
+ if (rtlock_flags & XCHK_RTLOCK_BITMAP)
+ xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP);
+ else if (rtlock_flags & XCHK_RTLOCK_BITMAP_SHARED)
+ xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
+
+ if (rtlock_flags & XCHK_RTLOCK_SUMMARY)
+ xfs_ilock(sc->mp->m_rsumip, XFS_ILOCK_EXCL | XFS_ILOCK_RTSUM);
+ else if (rtlock_flags & XCHK_RTLOCK_SUMMARY_SHARED)
+ xfs_ilock(sc->mp->m_rsumip, XFS_ILOCK_SHARED | XFS_ILOCK_RTSUM);
+
+ sr->rtlock_flags = rtlock_flags;
+}
+
+/*
+ * Unlock the realtime metadata inodes. This must be done /after/ committing
+ * (or cancelling) the scrub transaction.
+ */
+void
+xchk_rt_unlock(
+ struct xfs_scrub *sc,
+ struct xchk_rt *sr)
+{
+ if (!sr->rtlock_flags)
+ return;
+
+ if (sr->rtlock_flags & XCHK_RTLOCK_SUMMARY)
+ xfs_iunlock(sc->mp->m_rsumip, XFS_ILOCK_EXCL);
+ else if (sr->rtlock_flags & XCHK_RTLOCK_SUMMARY_SHARED)
+ xfs_iunlock(sc->mp->m_rsumip, XFS_ILOCK_SHARED);
+
+ if (sr->rtlock_flags & XCHK_RTLOCK_BITMAP)
+ xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_EXCL);
+ else if (sr->rtlock_flags & XCHK_RTLOCK_BITMAP_SHARED)
+ xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED);
+
+ sr->rtlock_flags = 0;
+}
+
+/* Drop only the shared rt bitmap lock. */
+void
+xchk_rt_unlock_rtbitmap(
+ struct xfs_scrub *sc)
+{
+ ASSERT(sc->sr.rtlock_flags & XCHK_RTLOCK_BITMAP_SHARED);
+
+ xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
+ sc->sr.rtlock_flags &= ~XCHK_RTLOCK_BITMAP_SHARED;
+}
+
/* Per-scrubber setup functions */
void
return error == -ENOENT ? -EFSCORRUPTED : error;
}
+/* Lock the rt bitmap in exclusive mode */
+#define XCHK_RTLOCK_BITMAP (1U << 31)
+/* Lock the rt bitmap in shared mode */
+#define XCHK_RTLOCK_BITMAP_SHARED (1U << 30)
+/* Lock the rt summary in exclusive mode */
+#define XCHK_RTLOCK_SUMMARY (1U << 29)
+/* Lock the rt summary in shared mode */
+#define XCHK_RTLOCK_SUMMARY_SHARED (1U << 28)
+
+#define XCHK_RTLOCK_ALL (XCHK_RTLOCK_BITMAP | \
+ XCHK_RTLOCK_BITMAP_SHARED | \
+ XCHK_RTLOCK_SUMMARY | \
+ XCHK_RTLOCK_SUMMARY_SHARED)
+
+void xchk_rt_init(struct xfs_scrub *sc, struct xchk_rt *sr,
+ unsigned int xchk_rtlock_flags);
+void xchk_rt_unlock(struct xfs_scrub *sc, struct xchk_rt *sr);
+void xchk_rt_unlock_rtbitmap(struct xfs_scrub *sc);
int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
struct xchk_ag *sa);
void xchk_ag_btcur_free(struct xchk_ag *sa);
if (error)
return error;
- xchk_ilock(sc, XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP);
+ xchk_rt_init(sc, &sc->sr, XCHK_RTLOCK_BITMAP);
/*
* Now that we've locked the rtbitmap, we can't race with growfsrt
startext = xfs_rtb_to_rtx(sc->mp, rtbno);
endext = xfs_rtb_to_rtx(sc->mp, rtbno + len - 1);
- xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext,
endext - startext + 1, &is_free);
if (!xchk_should_check_xref(sc, &error, NULL))
- goto out_unlock;
+ return;
if (is_free)
xchk_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino);
-out_unlock:
- xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
}
if (error)
return error;
- /*
- * Locking order requires us to take the rtbitmap first. We must be
- * careful to unlock it ourselves when we are done with the rtbitmap
- * file since the scrub infrastructure won't do that for us. Only
- * then we can lock the rtsummary inode.
- */
- xfs_ilock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
- xchk_ilock(sc, XFS_ILOCK_EXCL | XFS_ILOCK_RTSUM);
+ xchk_rt_init(sc, &sc->sr,
+ XCHK_RTLOCK_SUMMARY | XCHK_RTLOCK_BITMAP_SHARED);
/*
* Now that we've locked the rtbitmap and rtsummary, we can't race with
* that order, so we're still protected against allocation activities
* even if we continue on to the repair function.
*/
- xfs_iunlock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
+ xchk_rt_unlock_rtbitmap(sc);
return error;
}
xfs_trans_cancel(sc->tp);
sc->tp = NULL;
}
+ xchk_rt_unlock(sc, &sc->sr);
if (sc->ip) {
if (sc->ilock_flags)
xchk_iunlock(sc, sc->ilock_flags);
struct xfs_btree_cur *refc_cur;
};
+/* Inode lock state for the RT volume. */
+struct xchk_rt {
+ /* XCHK_RTLOCK_* lock state */
+ unsigned int rtlock_flags;
+};
+
struct xfs_scrub {
/* General scrub state. */
struct xfs_mount *mp;
/* State tracking for single-AG operations. */
struct xchk_ag sa;
+
+ /* State tracking for realtime operations. */
+ struct xchk_rt sr;
};
/* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */