bool zap_metadata)
{
xfs_fsblock_t lastb;
- int bad;
+ int bad = 0;
/*
* check numeric validity of the extent
return 1;
}
+ pthread_mutex_lock(&rt_lock);
bad = check_rt_rec_state(mp, ino, irec);
- if (bad)
- return bad;
-
- if (check_dups) {
- bad = process_rt_rec_dups(mp, ino, irec);
- if (bad)
- return bad;
- } else {
- process_rt_rec_state(mp, ino, zap_metadata, irec);
+ if (!bad) {
+ if (check_dups)
+ bad = process_rt_rec_dups(mp, ino, irec);
+ else
+ process_rt_rec_state(mp, ino, zap_metadata, irec);
}
+ pthread_mutex_unlock(&rt_lock);
/*
* bump up the block counter
*/
- *tot += irec->br_blockcount;
- return 0;
+ if (!bad)
+ *tot += irec->br_blockcount;
+ return bad;
}
static inline bool
}
if (isrt && !xfs_has_rtgroups(mp)) {
- pthread_mutex_lock(&rt_lock.lock);
error2 = process_rt_rec(mp, &irec, ino, tot, check_dups,
zap_metadata);
- pthread_mutex_unlock(&rt_lock.lock);
if (error2)
return error2;
uint32_t sb_width;
struct aglock *ag_locks;
-struct aglock rt_lock;
time_t report_interval;
uint64_t *prog_rpt_done;
pthread_mutex_t lock __attribute__((__aligned__(64)));
};
extern struct aglock *ag_locks;
-extern struct aglock rt_lock;
+extern pthread_mutex_t rt_lock;
extern time_t report_interval;
extern uint64_t *prog_rpt_done;
static uint64_t *rt_bmap;
static size_t rt_bmap_size;
+pthread_mutex_t rt_lock;
/* block records fit into uint64_t's units */
#define XR_BB_UNIT 64 /* number of bits/unit */
if (mp->m_sb.sb_rextents == 0)
return;
+ pthread_mutex_init(&rt_lock, NULL);
rt_bmap_size = roundup(howmany(mp->m_sb.sb_rextents, (NBBY / XR_BB)),
sizeof(uint64_t));
{
free(rt_bmap);
rt_bmap = NULL;
-}
+ pthread_mutex_destroy(&rt_lock);
+}
void
reset_bmaps(xfs_mount_t *mp)
btree_init(&ag_bmap[i]);
pthread_mutex_init(&ag_locks[i].lock, NULL);
}
- pthread_mutex_init(&rt_lock.lock, NULL);
init_rt_bmap(mp);
reset_bmaps(mp);
mp->m_sb.sb_agcount + mp->m_sb.sb_rgcount;
unsigned int i;
- pthread_mutex_destroy(&rt_lock.lock);
-
for (i = 0; i < nr_groups; i++)
pthread_mutex_destroy(&ag_locks[i].lock);