for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno;
cur_agbno += blen) {
- state = get_bmap_ext(agno, cur_agbno, chunk_stop_agbno, &blen);
+ state = get_bmap_ext(agno, cur_agbno, chunk_stop_agbno, &blen,
+ false);
switch (state) {
case XR_E_MULT:
case XR_E_INUSE:
do_warn(
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, cur_agbno, state);
- set_bmap_ext(agno, cur_agbno, blen, XR_E_MULT);
+ set_bmap_ext(agno, cur_agbno, blen, XR_E_MULT, false);
unlock_ag(agno);
return 0;
case XR_E_METADATA:
for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno;
cur_agbno += blen) {
- state = get_bmap_ext(agno, cur_agbno, chunk_stop_agbno, &blen);
+ state = get_bmap_ext(agno, cur_agbno, chunk_stop_agbno, &blen,
+ false);
switch (state) {
case XR_E_INO:
do_error(
case XR_E_UNKNOWN:
case XR_E_FREE1:
case XR_E_FREE:
- set_bmap_ext(agno, cur_agbno, blen, XR_E_INO);
+ set_bmap_ext(agno, cur_agbno, blen, XR_E_INO, false);
break;
case XR_E_MULT:
case XR_E_INUSE:
do_warn(
_("inode block %d/%d bad state, (state %d)\n"),
agno, cur_agbno, state);
- set_bmap_ext(agno, cur_agbno, blen, XR_E_INO);
+ set_bmap_ext(agno, cur_agbno, blen, XR_E_INO, false);
break;
}
}
}
}
- /*
- * XXX: For rtgroup enabled file systems we treat the RTGs as
- * basically another set of AGs tacked on at the end, but
- * otherwise reuse all the existing code. That's why we'll
- * see odd "agno" value here.
- */
if (isrt) {
- agno = mp->m_sb.sb_agcount +
- xfs_rtb_to_rgno(mp, irec.br_startblock);
+ agno = xfs_rtb_to_rgno(mp, irec.br_startblock);
first_agbno = xfs_rtb_to_rgbno(mp, irec.br_startblock);
} else {
agno = XFS_FSB_TO_AGNO(mp, irec.br_startblock);
ebno = first_agbno + irec.br_blockcount;
if (agno != locked_agno) {
if (locked_agno != -1)
- unlock_ag(locked_agno);
+ unlock_group(locked_agno, isrt);
locked_agno = agno;
- lock_ag(locked_agno);
+ lock_group(locked_agno, isrt);
}
/*
for (b = irec.br_startblock;
agbno < ebno;
b += blen, agbno += blen) {
- state = get_bmap_ext(agno, agbno, ebno, &blen);
+ state = get_bmap_ext(agno, agbno, ebno, &blen, isrt);
switch (state) {
case XR_E_FREE:
/*
agbno = first_agbno;
ebno = first_agbno + irec.br_blockcount;
for (; agbno < ebno; agbno += blen) {
- state = get_bmap_ext(agno, agbno, ebno, &blen);
+ state = get_bmap_ext(agno, agbno, ebno, &blen, isrt);
switch (state) {
case XR_E_METADATA:
/*
case XR_E_FREE1:
case XR_E_INUSE1:
case XR_E_UNKNOWN:
- set_bmap_ext(agno, agbno, blen, zap_metadata ?
- XR_E_METADATA : XR_E_INUSE);
+ set_bmap_ext(agno, agbno, blen,
+ zap_metadata ?
+ XR_E_METADATA : XR_E_INUSE, isrt);
break;
case XR_E_INUSE:
case XR_E_MULT:
if (!zap_metadata)
set_bmap_ext(agno, agbno, blen,
- XR_E_MULT);
+ XR_E_MULT, isrt);
break;
default:
break;
error = 0;
done:
if (locked_agno != -1)
- unlock_ag(locked_agno);
+ unlock_group(locked_agno, isrt);
if (i != *numrecs) {
ASSERT(i < *numrecs);
struct btree_root *root;
};
static struct bmap *ag_bmaps;
+static struct bmap *rtg_bmaps;
+
+static inline struct bmap *bmap_for_group(xfs_agnumber_t gno, bool isrt)
+{
+ if (isrt)
+ return &rtg_bmaps[gno];
+ return &ag_bmaps[gno];
+}
void
-lock_ag(
- xfs_agnumber_t agno)
+lock_group(
+ xfs_agnumber_t gno,
+ bool isrt)
{
- pthread_mutex_lock(&ag_bmaps[agno].lock);
+ pthread_mutex_lock(&bmap_for_group(gno, isrt)->lock);
}
void
-unlock_ag(
- xfs_agnumber_t agno)
+unlock_group(
+ xfs_agnumber_t gno,
+ bool isrt)
{
- pthread_mutex_unlock(&ag_bmaps[agno].lock);
+ pthread_mutex_unlock(&bmap_for_group(gno, isrt)->lock);
}
-static void
-update_bmap(
- struct btree_root *bmap,
- unsigned long offset,
+
+void
+set_bmap_ext(
+ xfs_agnumber_t gno,
+ xfs_agblock_t offset,
xfs_extlen_t blen,
- void *new_state)
+ int state,
+ bool isrt)
{
+ struct btree_root *bmap = bmap_for_group(gno, isrt)->root;
+ void *new_state = &states[state];
unsigned long end = offset + blen;
int *cur_state;
unsigned long cur_key;
btree_insert(bmap, end, prev_state);
}
-void
-set_bmap_ext(
- xfs_agnumber_t agno,
- xfs_agblock_t agbno,
- xfs_extlen_t blen,
- int state)
-{
- update_bmap(ag_bmaps[agno].root, agbno, blen, &states[state]);
-}
-
int
get_bmap_ext(
- xfs_agnumber_t agno,
+ xfs_agnumber_t gno,
xfs_agblock_t agbno,
xfs_agblock_t maxbno,
- xfs_extlen_t *blen)
+ xfs_extlen_t *blen,
+ bool isrt)
{
- struct btree_root *bmap = ag_bmaps[agno].root;
+ struct btree_root *bmap = bmap_for_group(gno, isrt)->root;
int *statep;
unsigned long key;
free(rt_bmap);
rt_bmap = NULL;
pthread_mutex_destroy(&rt_lock);
-
}
-void
-reset_bmaps(xfs_mount_t *mp)
+static void
+reset_ag_bmaps(
+ struct xfs_mount *mp)
{
- unsigned int nr_groups = mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount;
- unsigned int agno;
- xfs_agblock_t ag_size;
- int ag_hdr_block;
+ int ag_hdr_block;
+ xfs_agnumber_t agno;
+ xfs_agblock_t ag_size;
ag_hdr_block = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
ag_size = mp->m_sb.sb_agblocks;
btree_insert(bmap, ag_hdr_block, &states[XR_E_UNKNOWN]);
btree_insert(bmap, ag_size, &states[XR_E_BAD_STATE]);
}
+}
- for ( ; agno < nr_groups; agno++) {
- struct btree_root *bmap = ag_bmaps[agno].root;
+static void
+reset_rtg_bmaps(
+ struct xfs_mount *mp)
+{
+ xfs_rgnumber_t rgno;
+
+ for (rgno = 0 ; rgno < mp->m_sb.sb_rgcount; rgno++) {
+ struct btree_root *bmap = rtg_bmaps[rgno].root;
btree_clear(bmap);
- if (agno == mp->m_sb.sb_agcount && xfs_has_rtsb(mp)) {
+ if (rgno == 0 && xfs_has_rtsb(mp)) {
btree_insert(bmap, 0, &states[XR_E_INUSE_FS]);
btree_insert(bmap, mp->m_sb.sb_rextsize,
&states[XR_E_FREE]);
}
btree_insert(bmap,
- xfs_rtgroup_extents(mp, (agno - mp->m_sb.sb_agcount)) <<
- mp->m_sb.sb_rextslog, &states[XR_E_BAD_STATE]);
+ xfs_rtgroup_extents(mp, rgno) << mp->m_sb.sb_rextslog,
+ &states[XR_E_BAD_STATE]);
}
+}
+
+void
+reset_bmaps(
+ struct xfs_mount *mp)
+{
+ reset_ag_bmaps(mp);
if (mp->m_sb.sb_logstart != 0) {
set_bmap_ext(XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart),
XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart),
- mp->m_sb.sb_logblocks, XR_E_INUSE_FS);
+ mp->m_sb.sb_logblocks, XR_E_INUSE_FS, false);
}
- reset_rt_bmap();
- rtsb_init(mp);
+ if (xfs_has_rtgroups(mp)) {
+ reset_rtg_bmaps(mp);
+ rtsb_init(mp);
+ } else {
+ reset_rt_bmap();
+ }
}
static struct bmap *
init_bmaps(
struct xfs_mount *mp)
{
- ag_bmaps = alloc_bmaps(mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount);
+ ag_bmaps = alloc_bmaps(mp->m_sb.sb_agcount);
if (!ag_bmaps)
do_error(_("couldn't allocate block map btree roots\n"));
- init_rt_bmap(mp);
+ if (xfs_has_rtgroups(mp)) {
+ rtg_bmaps = alloc_bmaps(mp->m_sb.sb_rgcount);
+ if (!rtg_bmaps)
+ do_error(_("couldn't allocate block map btree roots\n"));
+ } else {
+ init_rt_bmap(mp);
+ }
+
reset_bmaps(mp);
}
free_bmaps(
struct xfs_mount *mp)
{
- destroy_bmaps(ag_bmaps, mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount);
+ destroy_bmaps(ag_bmaps, mp->m_sb.sb_agcount);
ag_bmaps = NULL;
- free_rt_bmap(mp);
+ if (xfs_has_rtgroups(mp)) {
+ destroy_bmaps(rtg_bmaps, mp->m_sb.sb_rgcount);
+ rtg_bmaps = NULL;
+ } else {
+ free_rt_bmap(mp);
+ }
}
void reset_bmaps(xfs_mount_t *mp);
void free_bmaps(xfs_mount_t *mp);
-void lock_ag(xfs_agnumber_t agno);
-void unlock_ag(xfs_agnumber_t agno);
+void lock_group(xfs_agnumber_t agno, bool isrt);
+void unlock_group(xfs_agnumber_t agno, bool isrt);
+
+static inline void lock_ag(xfs_agnumber_t agno)
+{
+ lock_group(agno, false);
+}
+
+static inline void unlock_ag(xfs_agnumber_t agno)
+{
+ unlock_group(agno, false);
+}
void set_bmap_ext(xfs_agnumber_t agno, xfs_agblock_t agbno,
- xfs_extlen_t blen, int state);
+ xfs_extlen_t blen, int state, bool isrt);
int get_bmap_ext(xfs_agnumber_t agno, xfs_agblock_t agbno,
- xfs_agblock_t maxbno, xfs_extlen_t *blen);
-
-void set_rtbmap(xfs_rtxnum_t rtx, int state);
-int get_rtbmap(xfs_rtxnum_t rtx);
+ xfs_agblock_t maxbno, xfs_extlen_t *blen,
+ bool isrt);
static inline void
set_bmap(xfs_agnumber_t agno, xfs_agblock_t agbno, int state)
{
- set_bmap_ext(agno, agbno, 1, state);
+ set_bmap_ext(agno, agbno, 1, state, false);
}
static inline int
get_bmap(xfs_agnumber_t agno, xfs_agblock_t agbno)
{
- return get_bmap_ext(agno, agbno, agbno + 1, NULL);
+ return get_bmap_ext(agno, agbno, agbno + 1, NULL, false);
+}
+
+static inline int
+get_rgbmap(xfs_rgnumber_t rgno, xfs_rgblock_t rgbno)
+{
+ return get_bmap_ext(rgno, rgbno, rgbno + 1, NULL, true);
}
+void set_rtbmap(xfs_rtxnum_t rtx, int state);
+int get_rtbmap(xfs_rtxnum_t rtx);
+
/*
* extent tree definitions
* right now, there are 3 trees per AG, a bno tree, a bcnt tree
* also mark blocks
*/
set_bmap_ext(0, XFS_INO_TO_AGBNO(mp, sb->sb_rootino),
- M_IGEO(mp)->ialloc_blks, XR_E_INO);
+ M_IGEO(mp)->ialloc_blks, XR_E_INO, false);
} else {
do_log(_(" - found root inode chunk\n"));
j = 0;
*/
static void
process_dup_extents(
+ struct xfs_mount *mp,
xfs_agnumber_t agno,
xfs_agblock_t agbno,
- xfs_agblock_t ag_end)
+ xfs_agblock_t ag_end,
+ bool isrt)
{
do {
int bstate;
xfs_extlen_t blen;
- bstate = get_bmap_ext(agno, agbno, ag_end, &blen);
+ bstate = get_bmap_ext(agno, agbno, ag_end, &blen, isrt);
switch (bstate) {
case XR_E_FREE1:
if (no_modify)
case XR_E_FS_MAP:
break;
case XR_E_MULT:
- add_dup_extent(agno, agbno, blen);
+ add_dup_extent(agno + isrt ? mp->m_sb.sb_agcount : 0,
+ agbno, blen);
break;
case XR_E_BAD_STATE:
default:
mp->m_sb.sb_dblocks -
(xfs_rfsblock_t) mp->m_sb.sb_agblocks * i;
- process_dup_extents(i, ag_hdr_block, ag_end);
+ process_dup_extents(mp, i, ag_hdr_block, ag_end, false);
PROG_RPT_INC(prog_rpt_done[i], 1);
}
if (xfs_has_rtgroups(mp)) {
for (i = 0; i < mp->m_sb.sb_rgcount; i++) {
- process_dup_extents(mp->m_sb.sb_agcount + i, 0,
+ process_dup_extents(mp, i, 0,
xfs_rtgroup_extents(mp, i) <<
- mp->m_sb.sb_rextslog);
+ mp->m_sb.sb_rextslog, true);
}
} else {
process_dup_rt_extents(mp);
* largest extent.
*/
for (agbno = 0; agbno < ag_end; agbno += blen) {
- bstate = get_bmap_ext(agno, agbno, ag_end, &blen);
+ bstate = get_bmap_ext(agno, agbno, ag_end, &blen, false);
if (bstate < XR_E_INUSE) {
free_blocks += blen;
if (in_extent == 0) {
/* Find the number of free space extents. */
ag_end = libxfs_ag_block_count(mp, pag->pag_agno);
for (agbno = 0; agbno < ag_end; agbno += blen) {
- bstate = get_bmap_ext(pag->pag_agno, agbno, ag_end, &blen);
+ bstate = get_bmap_ext(pag->pag_agno, agbno, ag_end, &blen,
+ false);
if (bstate < XR_E_INUSE) {
if (!in_extent) {
/*
{
struct rtg_computed *comp = &rt_computed[rtg->rtg_rgno];
struct xfs_mount *mp = rtg->rtg_mount;
- unsigned int idx = mp->m_sb.sb_agcount + rtg->rtg_rgno;
unsigned int bitsperblock =
mp->m_blockwsize << XFS_NBWORDLOG;
xfs_rtxnum_t extno = 0;
/*
* Note: for the RTG case it might make sense to use
- * get_bmap_ext here and generate multiple bitmap
+ * get_rgbmap_ext here and generate multiple bitmap
* entries per lookup.
*/
if (xfs_has_rtgroups(mp))
- state = get_bmap(idx,
+ state = get_rgbmap(rtg->rtg_rgno,
extno * mp->m_sb.sb_rextsize);
else
state = get_rtbmap(extno);
}
for ( ; b < end; b += blen) {
- state = get_bmap_ext(agno, b, end, &blen);
+ state = get_bmap_ext(agno, b, end, &blen, false);
switch (state) {
case XR_E_UNKNOWN:
- set_bmap_ext(agno, b, blen, XR_E_FREE1);
+ set_bmap_ext(agno, b, blen, XR_E_FREE1,
+ false);
break;
case XR_E_FREE1:
/*
if (magic == XFS_ABTC_MAGIC ||
magic == XFS_ABTC_CRC_MAGIC) {
set_bmap_ext(agno, b, blen,
- XR_E_FREE);
+ XR_E_FREE, false);
break;
}
fallthrough;
switch (owner) {
case XFS_RMAP_OWN_FS:
case XFS_RMAP_OWN_LOG:
- set_bmap_ext(agno, b, blen, XR_E_INUSE_FS1);
+ set_bmap_ext(agno, b, blen, XR_E_INUSE_FS1, false);
break;
case XFS_RMAP_OWN_AG:
case XFS_RMAP_OWN_INOBT:
- set_bmap_ext(agno, b, blen, XR_E_FS_MAP1);
+ set_bmap_ext(agno, b, blen, XR_E_FS_MAP1, false);
break;
case XFS_RMAP_OWN_INODES:
- set_bmap_ext(agno, b, blen, XR_E_INO1);
+ set_bmap_ext(agno, b, blen, XR_E_INO1, false);
break;
case XFS_RMAP_OWN_REFC:
- set_bmap_ext(agno, b, blen, XR_E_REFC);
+ set_bmap_ext(agno, b, blen, XR_E_REFC, false);
break;
case XFS_RMAP_OWN_COW:
- set_bmap_ext(agno, b, blen, XR_E_COW);
+ set_bmap_ext(agno, b, blen, XR_E_COW, false);
break;
case XFS_RMAP_OWN_NULL:
/* still unknown */
break;
default:
/* file data */
- set_bmap_ext(agno, b, blen, XR_E_INUSE1);
+ set_bmap_ext(agno, b, blen, XR_E_INUSE1, false);
break;
}
break;
/* Check for block owner collisions. */
for ( ; b < end; b += blen) {
- state = get_bmap_ext(agno, b, end, &blen);
+ state = get_bmap_ext(agno, b, end, &blen,
+ false);
process_rmap_rec(mp, agno, b, end, blen, owner,
state, name);
}
xfs_extlen_t cnr;
for (c = agb; c < end; c += cnr) {
- state = get_bmap_ext(agno, c, end, &cnr);
+ state = get_bmap_ext(agno, c, end, &cnr,
+ false);
switch (state) {
case XR_E_UNKNOWN:
case XR_E_COW:
do_warn(
_("leftover CoW extent (%u/%u) len %u\n"),
agno, c, cnr);
- set_bmap_ext(agno, c, cnr, XR_E_FREE);
+ set_bmap_ext(agno, c, cnr,
+ XR_E_FREE, false);
break;
default:
do_warn(
xfs_extlen_t blen;
for (b = rgbno; b < end; b += len) {
- state = get_bmap_ext(rgno, b, end, &blen);
+ state = get_bmap_ext(rgno, b, end, &blen,
+ false);
blen = min(blen, len);
switch (state) {
do_warn(
_("leftover CoW rtextent (%llu)\n"),
(unsigned long long)rgbno);
- set_bmap_ext(rgno, b, len, XR_E_FREE);
+ set_bmap_ext(rgno, b, len, XR_E_FREE,
+ false);
break;
default:
do_warn(