struct xfs_rtgroup *rtg;
rcu_read_lock();
- rtg = radix_tree_lookup(&mp->m_rtgroup_tree, rgno);
+ rtg = xa_load(&mp->m_rtgroups, rgno);
if (rtg) {
trace_xfs_rtgroup_get(rtg, _RET_IP_);
ASSERT(atomic_read(&rtg->rtg_ref) >= 0);
struct xfs_rtgroup *rtg;
rcu_read_lock();
- rtg = radix_tree_lookup(&mp->m_rtgroup_tree, agno);
+ rtg = xa_load(&mp->m_rtgroups, agno);
if (rtg) {
trace_xfs_rtgroup_grab(rtg, _RET_IP_);
if (!atomic_inc_not_zero(&rtg->rtg_active_ref))
rtg->rtg_rgno = rgno;
rtg->rtg_mount = mp;
- error = radix_tree_preload(GFP_NOFS);
- if (error)
- goto out_free_rtg;
-
- spin_lock(&mp->m_rtgroup_lock);
- if (radix_tree_insert(&mp->m_rtgroup_tree, rgno, rtg)) {
- spin_unlock(&mp->m_rtgroup_lock);
- radix_tree_preload_end();
- error = -EEXIST;
+ error = xa_insert(&mp->m_rtgroups, rgno, rtg, GFP_KERNEL);
+ if (error) {
+ WARN_ON_ONCE(error == -EBUSY);
goto out_free_rtg;
}
- spin_unlock(&mp->m_rtgroup_lock);
- radix_tree_preload_end();
#ifdef __KERNEL__
/* Place kernel structure only init below this point. */
{
struct xfs_rtgroup *rtg;
- spin_lock(&mp->m_rtgroup_lock);
- rtg = radix_tree_delete(&mp->m_rtgroup_tree, rgno);
- spin_unlock(&mp->m_rtgroup_lock);
-
+ rtg = xa_erase(&mp->m_rtgroups, rgno);
if (!rtg) /* can happen when growfs fails */
return;
*/
atomic64_t m_allocbt_blks;
- struct radix_tree_root m_rtgroup_tree; /* per-rt group info */
- spinlock_t m_rtgroup_lock; /* lock for m_rtgroup_tree */
+ struct xarray m_rtgroups; /* per-rt group info */
struct radix_tree_root m_perag_tree; /* per-ag accounting info */
spinlock_t m_perag_lock; /* lock for m_perag_tree */
uint64_t m_resblks; /* total reserved blocks */
spin_lock_init(&mp->m_sb_lock);
INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
spin_lock_init(&mp->m_perag_lock);
- INIT_RADIX_TREE(&mp->m_rtgroup_tree, GFP_ATOMIC);
- spin_lock_init(&mp->m_rtgroup_lock);
+ xa_init(&mp->m_rtgroups);
mutex_init(&mp->m_growlock);
INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);