xfs_reclaim_work_queue(
struct xfs_mount *mp)
{
-
- rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+ if (xa_marked(&mp->m_perags, XFS_ICI_RECLAIM_MARK)) {
queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
}
- rcu_read_unlock();
}
/*
if (pag->pag_ici_reclaimable++)
return;
- /* propagate the reclaim tag up into the perag radix tree */
- spin_lock(&mp->m_perag_lock);
- radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
- XFS_ICI_RECLAIM_TAG);
- spin_unlock(&mp->m_perag_lock);
+ /* propagate the reclaim tag up into the perag xarray */
+ xa_set_mark(&mp->m_perags, pag->pag_agno, XFS_ICI_RECLAIM_MARK);
/* schedule periodic background inode reclaim */
xfs_reclaim_work_queue(mp);
if (--pag->pag_ici_reclaimable)
return;
- /* clear the reclaim tag from the perag radix tree */
- spin_lock(&mp->m_perag_lock);
- radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
- XFS_ICI_RECLAIM_TAG);
- spin_unlock(&mp->m_perag_lock);
+ /* clear the reclaim tag from the perag xarray */
+ xa_clear_mark(&mp->m_perags, pag->pag_agno, XFS_ICI_RECLAIM_MARK);
trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
}
xfs_queue_eofblocks(
struct xfs_mount *mp)
{
- rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
+ if (xa_marked(&mp->m_perags, XFS_ICI_EOFBLOCKS_MARK))
queue_delayed_work(mp->m_eofblocks_workqueue,
&mp->m_eofblocks_work,
msecs_to_jiffies(xfs_eofb_secs * 1000));
- rcu_read_unlock();
}
void
xfs_queue_cowblocks(
struct xfs_mount *mp)
{
- rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
+ if (xa_marked(&mp->m_perags, XFS_ICI_COWBLOCKS_MARK))
queue_delayed_work(mp->m_eofblocks_workqueue,
&mp->m_cowblocks_work,
msecs_to_jiffies(xfs_cowb_secs * 1000));
- rcu_read_unlock();
}
void
xfs_agnumber_t ag;
ag = 0;
- while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
+ while ((pag = xfs_perag_get_mark(mp, ag, tag))) {
ag = pag->pag_agno + 1;
error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
0);
restart:
ag = 0;
skipped = 0;
- while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
+ while ((pag = xfs_perag_get_mark(mp, ag, XFS_ICI_RECLAIM_MARK))) {
unsigned long first_index = 0;
int done = 0;
int nr_found = 0;
xfs_agnumber_t ag = 0;
int reclaimable = 0;
- while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
+ while ((pag = xfs_perag_get_mark(mp, ag, XFS_ICI_RECLAIM_MARK))) {
ag = pag->pag_agno + 1;
reclaimable += pag->pag_ici_reclaimable;
xfs_perag_put(pag);
}
static void
-__xfs_inode_set_blocks_tag(
+__xfs_inode_set_blocks_mark(
xfs_inode_t *ip,
void (*execute)(struct xfs_mount *mp),
void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
int error, unsigned long caller_ip),
- int tag)
+ xa_mark_t mark)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
- int tagged;
+ int marked;
/*
- * Don't bother locking the AG and looking up in the radix trees
- * if we already know that we have the tag set.
+ * Don't bother locking the AG and looking up in the xarray
+ * if we already know that we have the mark set.
*/
- if (ip->i_flags & xfs_iflag_for_tag(tag))
+ if (ip->i_flags & xfs_iflag_for_tag(mark))
return;
spin_lock(&ip->i_flags_lock);
- ip->i_flags |= xfs_iflag_for_tag(tag);
+ ip->i_flags |= xfs_iflag_for_tag(mark);
spin_unlock(&ip->i_flags_lock);
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
- tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
+ marked = radix_tree_tagged(&pag->pag_ici_root, mark);
radix_tree_tag_set(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
- if (!tagged) {
- /* propagate the eofblocks tag up into the perag radix tree */
- spin_lock(&ip->i_mount->m_perag_lock);
- radix_tree_tag_set(&ip->i_mount->m_perag_tree,
+ XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), mark);
+ if (!marked) {
+ /* propagate the eofblocks mark up into the perag xarray */
+ xa_set_mark(&ip->i_mount->m_perags,
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
- tag);
- spin_unlock(&ip->i_mount->m_perag_lock);
+ mark);
/* kick off background trimming */
execute(ip->i_mount);
xfs_inode_t *ip)
{
trace_xfs_inode_set_eofblocks_tag(ip);
- return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
+ return __xfs_inode_set_blocks_mark(ip, xfs_queue_eofblocks,
trace_xfs_perag_set_eofblocks,
- XFS_ICI_EOFBLOCKS_TAG);
+ XFS_ICI_EOFBLOCKS_MARK);
}
static void
-__xfs_inode_clear_blocks_tag(
+__xfs_inode_clear_blocks_mark(
xfs_inode_t *ip,
void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
int error, unsigned long caller_ip),
- int tag)
+ xa_mark_t mark)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
spin_lock(&ip->i_flags_lock);
- ip->i_flags &= ~xfs_iflag_for_tag(tag);
+ ip->i_flags &= ~xfs_iflag_for_tag(mark);
spin_unlock(&ip->i_flags_lock);
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
radix_tree_tag_clear(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
- if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
- /* clear the eofblocks tag from the perag radix tree */
- spin_lock(&ip->i_mount->m_perag_lock);
- radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
+ XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), mark);
+ if (!radix_tree_tagged(&pag->pag_ici_root, mark)) {
+ /* clear the eofblocks mark from the perag xarray */
+ xa_clear_mark(&ip->i_mount->m_perags,
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
- tag);
- spin_unlock(&ip->i_mount->m_perag_lock);
+ mark);
clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
}
xfs_inode_t *ip)
{
trace_xfs_inode_clear_eofblocks_tag(ip);
- return __xfs_inode_clear_blocks_tag(ip,
- trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
+ return __xfs_inode_clear_blocks_mark(ip,
+ trace_xfs_perag_clear_eofblocks,
+ XFS_ICI_EOFBLOCKS_MARK);
}
/*
xfs_inode_t *ip)
{
trace_xfs_inode_set_cowblocks_tag(ip);
- return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
+ return __xfs_inode_set_blocks_mark(ip, xfs_queue_cowblocks,
trace_xfs_perag_set_cowblocks,
- XFS_ICI_COWBLOCKS_TAG);
+ XFS_ICI_COWBLOCKS_MARK);
}
void
xfs_inode_t *ip)
{
trace_xfs_inode_clear_cowblocks_tag(ip);
- return __xfs_inode_clear_blocks_tag(ip,
- trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
+ return __xfs_inode_clear_blocks_mark(ip,
+ trace_xfs_perag_clear_cowblocks,
+ XFS_ICI_COWBLOCKS_MARK);
}
/* Disable post-EOF and CoW block auto-reclamation. */
struct xfs_perag *pag;
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
- spin_lock(&mp->m_perag_lock);
- pag = radix_tree_delete(&mp->m_perag_tree, agno);
- spin_unlock(&mp->m_perag_lock);
+ pag = xa_erase(&mp->m_perags, agno);
ASSERT(pag);
ASSERT(atomic_read(&pag->pag_ref) == 0);
xfs_iunlink_destroy(pag);
int error = -ENOMEM;
/*
- * Walk the current per-ag tree so we don't try to initialise AGs
+ * Walk the current per-ag array so we don't try to initialise AGs
* that already exist (growfs case). Allocate and insert all the
* AGs we don't find ready for initialisation.
*/
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
- if (radix_tree_preload(GFP_NOFS))
- goto out_hash_destroy;
-
- spin_lock(&mp->m_perag_lock);
- if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
- BUG();
- spin_unlock(&mp->m_perag_lock);
- radix_tree_preload_end();
- error = -EEXIST;
+ error = xa_err(xa_store(&mp->m_perags, index, pag, GFP_NOFS));
+ if (error)
goto out_hash_destroy;
- }
- spin_unlock(&mp->m_perag_lock);
- radix_tree_preload_end();
/* first new pag is fully initialized */
if (first_initialised == NULLAGNUMBER)
first_initialised = index;
out_unwind_new_pags:
/* unwind any prior newly initialized pags */
for (index = first_initialised; index < agcount; index++) {
- pag = radix_tree_delete(&mp->m_perag_tree, index);
+ pag = xa_erase(&mp->m_perags, index);
if (!pag)
break;
xfs_buf_hash_destroy(pag);