/* cluster size in bit shift */
unsigned char clusterbits;
- /* the dedicated workstation for compression */
- struct radix_tree_root workstn_tree;
+ struct xarray compression_work;
/* threshold for decompression synchronously */
unsigned int max_sync_decompress_pages;
#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
#ifdef CONFIG_EROFS_FS_ZIP
-#define erofs_workstn_lock(sbi) xa_lock(&(sbi)->workstn_tree)
-#define erofs_workstn_unlock(sbi) xa_unlock(&(sbi)->workstn_tree)
/* basic unit of the workstation of a super_block */
struct erofs_workgroup {
repeat:
rcu_read_lock();
- grp = radix_tree_lookup(&sbi->workstn_tree, index);
+ grp = xa_load(&sbi->compression_work, index);
if (grp) {
*tag = xa_pointer_tag(grp);
grp = xa_untag_pointer(grp);
return -EINVAL;
}
- err = radix_tree_preload(GFP_NOFS);
- if (err)
- return err;
-
sbi = EROFS_SB(sb);
- erofs_workstn_lock(sbi);
-
- grp = xa_tag_pointer(grp, tag);
/*
* Bump up reference count before making this workgroup
* visible to other users in order to avoid potential UAF
- * without serialized by erofs_workstn_lock.
+ * without serialized by xa_lock.
*/
__erofs_workgroup_get(grp);
- err = radix_tree_insert(&sbi->workstn_tree,
- grp->index, grp);
+ err = xa_insert(&sbi->compression_work, grp->index,
+ xa_tag_pointer(grp, tag), GFP_KERNEL);
if (unlikely(err))
/*
* it's safe to decrease since the workgroup isn't visible
*/
__erofs_workgroup_put(grp);
- erofs_workstn_unlock(sbi);
- radix_tree_preload_end();
return err;
}
/*
* note that all cached pages should be unlinked
- * before delete it from the radix tree.
+ * before delete it from the xarray.
* Otherwise some cached pages of an orphan old workgroup
* could be still linked after the new one is available.
*/
* however in order to avoid some race conditions, add a
* DBG_BUGON to observe this in advance.
*/
- DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
+ DBG_BUGON(xa_untag_pointer(xa_erase(&sbi->compression_work,
grp->index)) != grp);
/*
if (cnt > 1)
return false;
- DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
+ DBG_BUGON(xa_untag_pointer(xa_erase(&sbi->compression_work,
grp->index)) != grp);
/* (rarely) could be grabbed again when freeing */
unsigned long nr_shrink,
bool cleanup)
{
- pgoff_t first_index = 0;
- void *batch[PAGEVEC_SIZE];
+ struct erofs_workgroup *grp;
unsigned int freed = 0;
+ unsigned long index;
- int i, found;
-repeat:
- erofs_workstn_lock(sbi);
-
- found = radix_tree_gang_lookup(&sbi->workstn_tree,
- batch, first_index, PAGEVEC_SIZE);
-
- for (i = 0; i < found; ++i) {
- struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
-
- first_index = grp->index + 1;
+ xa_for_each(&sbi->compression_work, index, grp) {
+ grp = xa_untag_pointer(grp);
/* try to shrink each valid workgroup */
if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
if (unlikely(!--nr_shrink))
break;
}
- erofs_workstn_unlock(sbi);
- if (i && nr_shrink)
- goto repeat;
return freed;
}