]> www.infradead.org Git - users/willy/xarray.git/commitdiff
erofs: Convert workstn_tree to XArray
authorMatthew Wilcox <willy@infradead.org>
Thu, 18 Oct 2018 19:36:42 +0000 (15:36 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 8 Aug 2019 18:01:05 +0000 (14:01 -0400)
Rename it to compression_work as a better reflection of what it's for.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
drivers/staging/erofs/internal.h
drivers/staging/erofs/super.c
drivers/staging/erofs/utils.c

index 963cc1b8b8961b0f208a642f91f0f4d9a6f58e65..ca16abca8598cd7eac649a3b7a440afc8cf8aceb 100644 (file)
@@ -93,8 +93,7 @@ struct erofs_sb_info {
        /* cluster size in bit shift */
        unsigned char clusterbits;
 
-       /* the dedicated workstation for compression */
-       struct radix_tree_root workstn_tree;
+       struct xarray compression_work;
 
        /* threshold for decompression synchronously */
        unsigned int max_sync_decompress_pages;
@@ -183,8 +182,6 @@ static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
 #define test_opt(sbi, option)  ((sbi)->mount_opt & EROFS_MOUNT_##option)
 
 #ifdef CONFIG_EROFS_FS_ZIP
-#define erofs_workstn_lock(sbi)         xa_lock(&(sbi)->workstn_tree)
-#define erofs_workstn_unlock(sbi)       xa_unlock(&(sbi)->workstn_tree)
 
 /* basic unit of the workstation of a super_block */
 struct erofs_workgroup {
index 54494412eba4189fa64844d5a4ef207e5ac74e51..f9964d40aed325743e38cc2c9280d6cfdbcb7eae 100644 (file)
@@ -421,7 +421,7 @@ static int erofs_read_super(struct super_block *sb,
                sb->s_flags &= ~SB_POSIXACL;
 
 #ifdef CONFIG_EROFS_FS_ZIP
-       INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
+       xa_init(&sbi->compression_work);
 #endif
 
 #ifdef EROFS_FS_HAS_MANAGED_CACHE
index 4bbd3bf34acd9d52f8d58fab595a0ea9e77e0352..9e9163a2bd777a7ba0ed5eefcd20702785f72642 100644 (file)
@@ -72,7 +72,7 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
 
 repeat:
        rcu_read_lock();
-       grp = radix_tree_lookup(&sbi->workstn_tree, index);
+       grp = xa_load(&sbi->compression_work, index);
        if (grp) {
                *tag = xa_pointer_tag(grp);
                grp = xa_untag_pointer(grp);
@@ -102,24 +102,17 @@ int erofs_register_workgroup(struct super_block *sb,
                return -EINVAL;
        }
 
-       err = radix_tree_preload(GFP_NOFS);
-       if (err)
-               return err;
-
        sbi = EROFS_SB(sb);
-       erofs_workstn_lock(sbi);
-
-       grp = xa_tag_pointer(grp, tag);
 
        /*
         * Bump up reference count before making this workgroup
         * visible to other users in order to avoid potential UAF
-        * without serialized by erofs_workstn_lock.
+        * without serialized by xa_lock.
         */
        __erofs_workgroup_get(grp);
 
-       err = radix_tree_insert(&sbi->workstn_tree,
-                               grp->index, grp);
+       err = xa_insert(&sbi->compression_work, grp->index,
+                       xa_tag_pointer(grp, tag), GFP_KERNEL);
        if (unlikely(err))
                /*
                 * it's safe to decrease since the workgroup isn't visible
@@ -127,8 +120,6 @@ int erofs_register_workgroup(struct super_block *sb,
                 */
                __erofs_workgroup_put(grp);
 
-       erofs_workstn_unlock(sbi);
-       radix_tree_preload_end();
        return err;
 }
 
@@ -171,7 +162,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
 
        /*
         * note that all cached pages should be unlinked
-        * before delete it from the radix tree.
+        * before delete it from the xarray.
         * Otherwise some cached pages of an orphan old workgroup
         * could be still linked after the new one is available.
         */
@@ -185,7 +176,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
         * however in order to avoid some race conditions, add a
         * DBG_BUGON to observe this in advance.
         */
-       DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
+       DBG_BUGON(xa_untag_pointer(xa_erase(&sbi->compression_work,
                                                     grp->index)) != grp);
 
        /*
@@ -210,7 +201,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
        if (cnt > 1)
                return false;
 
-       DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
+       DBG_BUGON(xa_untag_pointer(xa_erase(&sbi->compression_work,
                                                     grp->index)) != grp);
 
        /* (rarely) could be grabbed again when freeing */
@@ -224,21 +215,12 @@ unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
                                       unsigned long nr_shrink,
                                       bool cleanup)
 {
-       pgoff_t first_index = 0;
-       void *batch[PAGEVEC_SIZE];
+       struct erofs_workgroup *grp;
        unsigned int freed = 0;
+       unsigned long index;
 
-       int i, found;
-repeat:
-       erofs_workstn_lock(sbi);
-
-       found = radix_tree_gang_lookup(&sbi->workstn_tree,
-                                      batch, first_index, PAGEVEC_SIZE);
-
-       for (i = 0; i < found; ++i) {
-               struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
-
-               first_index = grp->index + 1;
+       xa_for_each(&sbi->compression_work, index, grp) {
+               grp = xa_untag_pointer(grp);
 
                /* try to shrink each valid workgroup */
                if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
@@ -248,10 +230,7 @@ repeat:
                if (unlikely(!--nr_shrink))
                        break;
        }
-       erofs_workstn_unlock(sbi);
 
-       if (i && nr_shrink)
-               goto repeat;
        return freed;
 }