if (check_aginode_block(mp, agno, agino) == 0)
return 0;
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
state = get_bmap(agno, agbno);
switch (state) {
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, agbno, state);
set_bmap(agno, agbno, XR_E_MULT);
- pthread_mutex_unlock(&ag_locks[agno].lock);
- return(0);
+ unlock_ag(agno);
+ return 0;
default:
do_warn(
_("inode block %d/%d bad state, (state %d)\n"),
break;
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
start_agino = XFS_AGB_TO_AGINO(mp, agbno);
*start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
* user data -- we're probably here as a result of a directory
* entry or an iunlinked pointer
*/
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno;
cur_agbno += blen) {
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, cur_agbno, state);
set_bmap_ext(agno, cur_agbno, blen, XR_E_MULT);
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
return 0;
case XR_E_METADATA:
case XR_E_INO:
break;
}
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
/*
* ok, chunk is good. put the record into the tree if required,
set_inode_used(irec_p, agino - start_agino);
- pthread_mutex_lock(&ag_locks[agno].lock);
-
+ lock_ag(agno);
for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno;
cur_agbno += blen) {
break;
}
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
return(ino_cnt);
}
{
int state;
- pthread_mutex_lock(&ag_locks[agno].lock);
+ lock_ag(agno);
state = get_bmap(agno, agbno);
switch (state) {
case XR_E_INO: /* already marked */
XFS_AGB_TO_FSB(mp, agno, agbno), state);
break;
}
- pthread_mutex_unlock(&ag_locks[agno].lock);
+ unlock_ag(agno);
}
/*
static int states[16] =
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
-static struct btree_root **ag_bmap;
+struct bmap {
+ pthread_mutex_t lock __attribute__((__aligned__(64)));
+ struct btree_root *root;
+};
+static struct bmap *ag_bmaps;
+
+void
+lock_ag(
+ xfs_agnumber_t agno)
+{
+ pthread_mutex_lock(&ag_bmaps[agno].lock);
+}
+
+void
+unlock_ag(
+ xfs_agnumber_t agno)
+{
+ pthread_mutex_unlock(&ag_bmaps[agno].lock);
+}
static void
update_bmap(
xfs_extlen_t blen,
int state)
{
- update_bmap(ag_bmap[agno], agbno, blen, &states[state]);
+ update_bmap(ag_bmaps[agno].root, agbno, blen, &states[state]);
}
int
xfs_agblock_t maxbno,
xfs_extlen_t *blen)
{
+ struct btree_root *bmap = ag_bmaps[agno].root;
int *statep;
unsigned long key;
- statep = btree_find(ag_bmap[agno], agbno, &key);
+ statep = btree_find(bmap, agbno, &key);
if (!statep)
return -1;
if (key == agbno) {
if (blen) {
- if (!btree_peek_next(ag_bmap[agno], &key))
+ if (!btree_peek_next(bmap, &key))
return -1;
*blen = min(maxbno, key) - agbno;
}
return *statep;
}
- statep = btree_peek_prev(ag_bmap[agno], NULL);
+ statep = btree_peek_prev(bmap, NULL);
if (!statep)
return -1;
if (blen)
ag_size = mp->m_sb.sb_agblocks;
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ struct btree_root *bmap = ag_bmaps[agno].root;
+
if (agno == mp->m_sb.sb_agcount - 1)
ag_size = (xfs_extlen_t)(mp->m_sb.sb_dblocks -
(xfs_rfsblock_t)mp->m_sb.sb_agblocks * agno);
#ifdef BTREE_STATS
- if (btree_find(ag_bmap[agno], 0, NULL)) {
+ if (btree_find(bmap, 0, NULL)) {
printf("ag_bmap[%d] btree stats:\n", i);
- btree_print_stats(ag_bmap[agno], stdout);
+ btree_print_stats(bmap, stdout);
}
#endif
/*
* ag_hdr_block..ag_size: XR_E_UNKNOWN
* ag_size... XR_E_BAD_STATE
*/
- btree_clear(ag_bmap[agno]);
- btree_insert(ag_bmap[agno], 0, &states[XR_E_INUSE_FS]);
- btree_insert(ag_bmap[agno],
- ag_hdr_block, &states[XR_E_UNKNOWN]);
- btree_insert(ag_bmap[agno], ag_size, &states[XR_E_BAD_STATE]);
+ btree_clear(bmap);
+ btree_insert(bmap, 0, &states[XR_E_INUSE_FS]);
+ btree_insert(bmap, ag_hdr_block, &states[XR_E_UNKNOWN]);
+ btree_insert(bmap, ag_size, &states[XR_E_BAD_STATE]);
}
for ( ; agno < nr_groups; agno++) {
- btree_clear(ag_bmap[agno]);
+ struct btree_root *bmap = ag_bmaps[agno].root;
+
+ btree_clear(bmap);
if (agno == mp->m_sb.sb_agcount && xfs_has_rtsb(mp)) {
- btree_insert(ag_bmap[agno], 0, &states[XR_E_INUSE_FS]);
- btree_insert(ag_bmap[agno], mp->m_sb.sb_rextsize,
+ btree_insert(bmap, 0, &states[XR_E_INUSE_FS]);
+ btree_insert(bmap, mp->m_sb.sb_rextsize,
&states[XR_E_FREE]);
} else {
- btree_insert(ag_bmap[agno], 0, &states[XR_E_FREE]);
+ btree_insert(bmap, 0, &states[XR_E_FREE]);
}
- btree_insert(ag_bmap[agno],
+ btree_insert(bmap,
xfs_rtgroup_extents(mp, (agno - mp->m_sb.sb_agcount)) <<
- mp->m_sb.sb_rextslog,
- &states[XR_E_BAD_STATE]);
+ mp->m_sb.sb_rextslog, &states[XR_E_BAD_STATE]);
}
if (mp->m_sb.sb_logstart != 0) {
rtsb_init(mp);
}
-void
-init_bmaps(
- struct xfs_mount *mp)
+static struct bmap *
+alloc_bmaps(
+ unsigned int nr_groups)
{
- unsigned int nr_groups =
- mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount;
+ struct bmap *bmap;
unsigned int i;
- ag_bmap = calloc(nr_groups, sizeof(struct btree_root *));
- if (!ag_bmap)
- do_error(_("couldn't allocate block map btree roots\n"));
-
- ag_locks = calloc(nr_groups, sizeof(struct aglock));
- if (!ag_locks)
- do_error(_("couldn't allocate block map locks\n"));
+ bmap = calloc(nr_groups, sizeof(*bmap));
+ if (!bmap)
+ return NULL;
for (i = 0; i < nr_groups; i++) {
- btree_init(&ag_bmap[i]);
- pthread_mutex_init(&ag_locks[i].lock, NULL);
+ btree_init(&bmap[i].root);
+ pthread_mutex_init(&bmap[i].lock, NULL);
}
- init_rt_bmap(mp);
- reset_bmaps(mp);
+ return bmap;
}
-void
-free_bmaps(
- struct xfs_mount *mp)
+static void
+destroy_bmaps(
+ struct bmap *bmap,
+ unsigned int nr_groups)
{
- unsigned int nr_groups =
- mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount;
unsigned int i;
- for (i = 0; i < nr_groups; i++)
- pthread_mutex_destroy(&ag_locks[i].lock);
+ for (i = 0; i < nr_groups; i++) {
+ btree_destroy(bmap[i].root);
+ pthread_mutex_destroy(&bmap[i].lock);
+ }
- free(ag_locks);
- ag_locks = NULL;
+ free(bmap);
+}
+
+void
+init_bmaps(
+ struct xfs_mount *mp)
+{
+ ag_bmaps = alloc_bmaps(mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount);
+ if (!ag_bmaps)
+ do_error(_("couldn't allocate block map btree roots\n"));
- for (i = 0; i < nr_groups; i++)
- btree_destroy(ag_bmap[i]);
+ init_rt_bmap(mp);
+ reset_bmaps(mp);
+}
- free(ag_bmap);
- ag_bmap = NULL;
+void
+free_bmaps(
+ struct xfs_mount *mp)
+{
+ destroy_bmaps(ag_bmaps, mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount);
+ ag_bmaps = NULL;
free_rt_bmap(mp);
}