From: Matthew Wilcox Date: Wed, 26 Sep 2018 12:01:13 +0000 (-0400) Subject: xfs: Convert xfs dquot to XArray X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=89a43d82e95badbeedf7e0c8716396bdba5f2225;p=users%2Fwilly%2Fxarray.git xfs: Convert xfs dquot to XArray The dquot arrays are protected by a mutex as well as the internal spinlock. That makes it hard to take advantage of the features of the XArray and this patch ends up being a straightforward replacement of the radix tree API with the XArray API. Signed-off-by: Matthew Wilcox --- diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index fb1ad44830815..7900e48bce101 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -28,7 +28,7 @@ * Lock order: * * ip->i_lock - * qi->qi_tree_lock + * qi->qi_xa_lock * dquot->q_qlock (xfs_dqlock() and friends) * dquot->q_flush (xfs_dqflock() and friends) * qi->qi_lru_lock @@ -656,16 +656,16 @@ static struct xfs_dquot * xfs_qm_dqget_cache_lookup( struct xfs_mount *mp, struct xfs_quotainfo *qi, - struct radix_tree_root *tree, + struct xarray *xa, xfs_dqid_t id) { struct xfs_dquot *dqp; restart: - mutex_lock(&qi->qi_tree_lock); - dqp = radix_tree_lookup(tree, id); + mutex_lock(&qi->qi_xa_lock); + dqp = xa_load(xa, id); if (!dqp) { - mutex_unlock(&qi->qi_tree_lock); + mutex_unlock(&qi->qi_xa_lock); XFS_STATS_INC(mp, xs_qm_dqcachemisses); return NULL; } @@ -673,14 +673,14 @@ restart: xfs_dqlock(dqp); if (dqp->dq_flags & XFS_DQ_FREEING) { xfs_dqunlock(dqp); - mutex_unlock(&qi->qi_tree_lock); + mutex_unlock(&qi->qi_xa_lock); trace_xfs_dqget_freeing(dqp); delay(1); goto restart; } dqp->q_nrefs++; - mutex_unlock(&qi->qi_tree_lock); + mutex_unlock(&qi->qi_xa_lock); trace_xfs_dqget_hit(dqp); XFS_STATS_INC(mp, xs_qm_dqcachehits); @@ -697,18 +697,18 @@ static int xfs_qm_dqget_cache_insert( struct xfs_mount *mp, struct xfs_quotainfo *qi, - struct radix_tree_root *tree, + struct xarray *xa, xfs_dqid_t id, struct xfs_dquot *dqp) { int error; - mutex_lock(&qi->qi_tree_lock); - error = radix_tree_insert(tree, id, dqp); + mutex_lock(&qi->qi_xa_lock); + error = xa_insert(xa, id, dqp, GFP_NOFS); if (unlikely(error)) { /* Duplicate found! Caller must try again. */ - WARN_ON(error != -EEXIST); - mutex_unlock(&qi->qi_tree_lock); + WARN_ON(error != -EBUSY); + mutex_unlock(&qi->qi_xa_lock); trace_xfs_dqget_dup(dqp); return error; } @@ -718,7 +718,7 @@ xfs_qm_dqget_cache_insert( dqp->q_nrefs = 1; qi->qi_dquots++; - mutex_unlock(&qi->qi_tree_lock); + mutex_unlock(&qi->qi_xa_lock); return 0; } @@ -764,7 +764,7 @@ xfs_qm_dqget( struct xfs_dquot **O_dqpp) { struct xfs_quotainfo *qi = mp->m_quotainfo; - struct radix_tree_root *tree = xfs_dquot_tree(qi, type); + struct xarray *xa = xfs_dquot_xa(qi, type); struct xfs_dquot *dqp; int error; @@ -773,7 +773,7 @@ xfs_qm_dqget( return error; restart: - dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); + dqp = xfs_qm_dqget_cache_lookup(mp, qi, xa, id); if (dqp) { *O_dqpp = dqp; return 0; @@ -783,7 +783,7 @@ restart: if (error) return error; - error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); + error = xfs_qm_dqget_cache_insert(mp, qi, xa, id, dqp); if (error) { /* * Duplicate found. Just throw away the new dquot and start @@ -853,7 +853,7 @@ xfs_qm_dqget_inode( { struct xfs_mount *mp = ip->i_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; - struct radix_tree_root *tree = xfs_dquot_tree(qi, type); + struct xarray *xa = xfs_dquot_xa(qi, type); struct xfs_dquot *dqp; xfs_dqid_t id; int error; @@ -868,7 +868,7 @@ xfs_qm_dqget_inode( id = xfs_qm_id_for_quotatype(ip, type); restart: - dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); + dqp = xfs_qm_dqget_cache_lookup(mp, qi, xa, id); if (dqp) { *O_dqpp = dqp; return 0; @@ -907,7 +907,7 @@ restart: return -ESRCH; } - error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); + error = xfs_qm_dqget_cache_insert(mp, qi, xa, id, dqp); if (error) { /* * Duplicate found. Just throw away the new dquot and start diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 5e7a37f0cf848..0b1e8da3fca43 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -51,7 +51,7 @@ xfs_qm_dquot_walk( void *data) { struct xfs_quotainfo *qi = mp->m_quotainfo; - struct radix_tree_root *tree = xfs_dquot_tree(qi, type); + struct xarray *xa = xfs_dquot_xa(qi, type); uint32_t next_index; int last_error = 0; int skipped; @@ -67,11 +67,11 @@ restart: int error = 0; int i; - mutex_lock(&qi->qi_tree_lock); - nr_found = radix_tree_gang_lookup(tree, (void **)batch, - next_index, XFS_DQ_LOOKUP_BATCH); + mutex_lock(&qi->qi_xa_lock); + nr_found = xa_extract(xa, (void **)batch, next_index, + ULONG_MAX, XFS_DQ_LOOKUP_BATCH, XA_PRESENT); if (!nr_found) { - mutex_unlock(&qi->qi_tree_lock); + mutex_unlock(&qi->qi_xa_lock); break; } @@ -89,7 +89,7 @@ restart: last_error = error; } - mutex_unlock(&qi->qi_tree_lock); + mutex_unlock(&qi->qi_xa_lock); /* bail out if the filesystem is corrupted. */ if (last_error == -EFSCORRUPTED) { @@ -159,7 +159,7 @@ xfs_qm_dqpurge( xfs_dqfunlock(dqp); xfs_dqunlock(dqp); - radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), + xa_erase(xfs_dquot_xa(qi, dqp->q_core.d_flags), be32_to_cpu(dqp->q_core.d_id)); qi->qi_dquots--; @@ -656,10 +656,10 @@ xfs_qm_init_quotainfo( if (error) goto out_free_lru; - INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); - INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); - INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); - mutex_init(&qinf->qi_tree_lock); + xa_init(&qinf->qi_uquota_xa); + xa_init(&qinf->qi_gquota_xa); + xa_init(&qinf->qi_pquota_xa); + mutex_init(&qinf->qi_xa_lock); /* mutex used to serialize quotaoffs */ mutex_init(&qinf->qi_quotaofflock); @@ -692,7 +692,7 @@ xfs_qm_init_quotainfo( out_free_inos: mutex_destroy(&qinf->qi_quotaofflock); - mutex_destroy(&qinf->qi_tree_lock); + mutex_destroy(&qinf->qi_xa_lock); xfs_qm_destroy_quotainos(qinf); out_free_lru: list_lru_destroy(&qinf->qi_lru); @@ -719,7 +719,7 @@ xfs_qm_destroy_quotainfo( unregister_shrinker(&qi->qi_shrinker); list_lru_destroy(&qi->qi_lru); xfs_qm_destroy_quotainos(qi); - mutex_destroy(&qi->qi_tree_lock); + mutex_destroy(&qi->qi_xa_lock); mutex_destroy(&qi->qi_quotaofflock); kmem_free(qi); mp->m_quotainfo = NULL; @@ -1582,12 +1582,12 @@ xfs_qm_dqfree_one( struct xfs_mount *mp = dqp->q_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; - mutex_lock(&qi->qi_tree_lock); - radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), + mutex_lock(&qi->qi_xa_lock); + xa_erase(xfs_dquot_xa(qi, dqp->q_core.d_flags), be32_to_cpu(dqp->q_core.d_id)); qi->qi_dquots--; - mutex_unlock(&qi->qi_tree_lock); + mutex_unlock(&qi->qi_xa_lock); xfs_qm_dqdestroy(dqp); } diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index b41b750895480..77cfa5709d797 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -55,10 +55,10 @@ struct xfs_def_quota { * The mount structure keeps a pointer to this. */ typedef struct xfs_quotainfo { - struct radix_tree_root qi_uquota_tree; - struct radix_tree_root qi_gquota_tree; - struct radix_tree_root qi_pquota_tree; - struct mutex qi_tree_lock; + struct xarray qi_uquota_xa; + struct xarray qi_gquota_xa; + struct xarray qi_pquota_xa; + struct mutex qi_xa_lock; struct xfs_inode *qi_uquotaip; /* user quota inode */ struct xfs_inode *qi_gquotaip; /* group quota inode */ struct xfs_inode *qi_pquotaip; /* project quota inode */ @@ -79,18 +79,18 @@ typedef struct xfs_quotainfo { struct shrinker qi_shrinker; } xfs_quotainfo_t; -static inline struct radix_tree_root * -xfs_dquot_tree( +static inline struct xarray * +xfs_dquot_xa( struct xfs_quotainfo *qi, int type) { switch (type) { case XFS_DQ_USER: - return &qi->qi_uquota_tree; + return &qi->qi_uquota_xa; case XFS_DQ_GROUP: - return &qi->qi_gquota_tree; + return &qi->qi_gquota_xa; case XFS_DQ_PROJ: - return &qi->qi_pquota_tree; + return &qi->qi_pquota_xa; default: ASSERT(0); }