]> www.infradead.org Git - users/hch/misc.git/commitdiff
btrfs: migrate to use per-fs workspace manager
authorQu Wenruo <wqu@suse.com>
Thu, 14 Aug 2025 00:19:50 +0000 (09:49 +0930)
committerDavid Sterba <dsterba@suse.com>
Tue, 23 Sep 2025 06:49:15 +0000 (08:49 +0200)
There are several interfaces involved for each algorithm:

- alloc workspace
  All algorithms allocate a workspace without the need for workspace
  manager.
  So no change needs to be done.

- get workspace
  This involves checking the workspace manager to find a free one, and
  if not, allocate a new one.

  For none and lzo, they share the same generic btrfs_get_workspace()
  helper, only needs to update that function to use the per-fs manager.

  For zlib it uses a wrapper around btrfs_get_workspace(), so no special
  work needed.

  For zstd, update zstd_find_workspace() and zstd_get_workspace() to
  utilize the per-fs manager.

- put workspace
  For none/zlib/lzo they share the same btrfs_put_workspace(), update
  that function to use the per-fs manager.

  For zstd, it's zstd_put_workspace(), the same update.

- zstd specific timer
  This is the timer to reclaim workspace, change it to grab the per-fs
  workspace manager instead.

Now all workspace are managed by the per-fs manager.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/compression.c
fs/btrfs/zstd.c

index 26c632f0a20cc55875fd25e388b80462e7ed9628..cd479016d73d867175e27134d356a7d27a250ca9 100644 (file)
@@ -875,7 +875,7 @@ static void btrfs_cleanup_workspace_manager(int type)
  */
 struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, int level)
 {
-       struct workspace_manager *wsm;
+       struct workspace_manager *wsm = fs_info->compr_wsm[type];
        struct list_head *workspace;
        int cpus = num_online_cpus();
        unsigned nofs_flag;
@@ -885,7 +885,7 @@ struct list_head *btrfs_get_workspace(struct btrfs_fs_info *fs_info, int type, i
        wait_queue_head_t *ws_wait;
        int *free_ws;
 
-       wsm = btrfs_compress_op[type]->workspace_manager;
+       ASSERT(wsm);
        idle_ws  = &wsm->idle_ws;
        ws_lock  = &wsm->ws_lock;
        total_ws = &wsm->total_ws;
@@ -974,19 +974,19 @@ static struct list_head *get_workspace(struct btrfs_fs_info *fs_info, int type,
  */
 void btrfs_put_workspace(struct btrfs_fs_info *fs_info, int type, struct list_head *ws)
 {
-       struct workspace_manager *wsm;
+       struct workspace_manager *gwsm = fs_info->compr_wsm[type];
        struct list_head *idle_ws;
        spinlock_t *ws_lock;
        atomic_t *total_ws;
        wait_queue_head_t *ws_wait;
        int *free_ws;
 
-       wsm = btrfs_compress_op[type]->workspace_manager;
-       idle_ws  = &wsm->idle_ws;
-       ws_lock  = &wsm->ws_lock;
-       total_ws = &wsm->total_ws;
-       ws_wait  = &wsm->ws_wait;
-       free_ws  = &wsm->free_ws;
+       ASSERT(gwsm);
+       idle_ws  = &gwsm->idle_ws;
+       ws_lock  = &gwsm->ws_lock;
+       total_ws = &gwsm->total_ws;
+       ws_wait  = &gwsm->ws_wait;
+       free_ws  = &gwsm->free_ws;
 
        spin_lock(ws_lock);
        if (*free_ws <= num_online_cpus()) {
index 24898aee3ee00da6c9bfd2910ed0ef171267900a..41019fcebc13aad64b9eb1cf6136e00bd48c78c8 100644 (file)
@@ -112,19 +112,19 @@ static inline int clip_level(int level)
  */
 static void zstd_reclaim_timer_fn(struct timer_list *timer)
 {
+       struct zstd_workspace_manager *zwsm =
+               container_of(timer, struct zstd_workspace_manager, timer);
        unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES;
        struct list_head *pos, *next;
 
-       ASSERT(timer == &wsm.timer);
+       spin_lock(&zwsm->lock);
 
-       spin_lock(&wsm.lock);
-
-       if (list_empty(&wsm.lru_list)) {
-               spin_unlock(&wsm.lock);
+       if (list_empty(&zwsm->lru_list)) {
+               spin_unlock(&zwsm->lock);
                return;
        }
 
-       list_for_each_prev_safe(pos, next, &wsm.lru_list) {
+       list_for_each_prev_safe(pos, next, &zwsm->lru_list) {
                struct workspace *victim = container_of(pos, struct workspace,
                                                        lru_list);
                int level;
@@ -141,15 +141,15 @@ static void zstd_reclaim_timer_fn(struct timer_list *timer)
                list_del(&victim->list);
                zstd_free_workspace(&victim->list);
 
-               if (list_empty(&wsm.idle_ws[level]))
-                       clear_bit(level, &wsm.active_map);
+               if (list_empty(&zwsm->idle_ws[level]))
+                       clear_bit(level, &zwsm->active_map);
 
        }
 
-       if (!list_empty(&wsm.lru_list))
-               mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
+       if (!list_empty(&zwsm->lru_list))
+               mod_timer(&zwsm->timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
 
-       spin_unlock(&wsm.lock);
+       spin_unlock(&zwsm->lock);
 }
 
 /*
@@ -292,29 +292,31 @@ void zstd_cleanup_workspace_manager(void)
  * offer the opportunity to reclaim the workspace in favor of allocating an
  * appropriately sized one in the future.
  */
-static struct list_head *zstd_find_workspace(int level)
+static struct list_head *zstd_find_workspace(struct btrfs_fs_info *fs_info, int level)
 {
+       struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
        struct list_head *ws;
        struct workspace *workspace;
        int i = clip_level(level);
 
-       spin_lock_bh(&wsm.lock);
-       for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) {
-               if (!list_empty(&wsm.idle_ws[i])) {
-                       ws = wsm.idle_ws[i].next;
+       ASSERT(zwsm);
+       spin_lock_bh(&zwsm->lock);
+       for_each_set_bit_from(i, &zwsm->active_map, ZSTD_BTRFS_MAX_LEVEL) {
+               if (!list_empty(&zwsm->idle_ws[i])) {
+                       ws = zwsm->idle_ws[i].next;
                        workspace = list_to_workspace(ws);
                        list_del_init(ws);
                        /* keep its place if it's a lower level using this */
                        workspace->req_level = level;
                        if (clip_level(level) == workspace->level)
                                list_del(&workspace->lru_list);
-                       if (list_empty(&wsm.idle_ws[i]))
-                               clear_bit(i, &wsm.active_map);
-                       spin_unlock_bh(&wsm.lock);
+                       if (list_empty(&zwsm->idle_ws[i]))
+                               clear_bit(i, &zwsm->active_map);
+                       spin_unlock_bh(&zwsm->lock);
                        return ws;
                }
        }
-       spin_unlock_bh(&wsm.lock);
+       spin_unlock_bh(&zwsm->lock);
 
        return NULL;
 }
@@ -331,15 +333,18 @@ static struct list_head *zstd_find_workspace(int level)
  */
 struct list_head *zstd_get_workspace(struct btrfs_fs_info *fs_info, int level)
 {
+       struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
        struct list_head *ws;
        unsigned int nofs_flag;
 
+       ASSERT(zwsm);
+
        /* level == 0 means we can use any workspace */
        if (!level)
                level = 1;
 
 again:
-       ws = zstd_find_workspace(level);
+       ws = zstd_find_workspace(fs_info, level);
        if (ws)
                return ws;
 
@@ -350,9 +355,9 @@ again:
        if (IS_ERR(ws)) {
                DEFINE_WAIT(wait);
 
-               prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE);
+               prepare_to_wait(&zwsm->wait, &wait, TASK_UNINTERRUPTIBLE);
                schedule();
-               finish_wait(&wsm.wait, &wait);
+               finish_wait(&zwsm->wait, &wait);
 
                goto again;
        }
@@ -373,32 +378,34 @@ again:
  */
 void zstd_put_workspace(struct btrfs_fs_info *fs_info, struct list_head *ws)
 {
+       struct zstd_workspace_manager *zwsm = fs_info->compr_wsm[BTRFS_COMPRESS_ZSTD];
        struct workspace *workspace = list_to_workspace(ws);
 
-       spin_lock_bh(&wsm.lock);
+       ASSERT(zwsm);
+       spin_lock_bh(&zwsm->lock);
 
        /* A node is only taken off the lru if we are the corresponding level */
        if (clip_level(workspace->req_level) == workspace->level) {
                /* Hide a max level workspace from reclaim */
-               if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
+               if (list_empty(&zwsm->idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) {
                        INIT_LIST_HEAD(&workspace->lru_list);
                } else {
                        workspace->last_used = jiffies;
-                       list_add(&workspace->lru_list, &wsm.lru_list);
-                       if (!timer_pending(&wsm.timer))
-                               mod_timer(&wsm.timer,
+                       list_add(&workspace->lru_list, &zwsm->lru_list);
+                       if (!timer_pending(&zwsm->timer))
+                               mod_timer(&zwsm->timer,
                                          jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES);
                }
        }
 
-       set_bit(workspace->level, &wsm.active_map);
-       list_add(&workspace->list, &wsm.idle_ws[workspace->level]);
+       set_bit(workspace->level, &zwsm->active_map);
+       list_add(&workspace->list, &zwsm->idle_ws[workspace->level]);
        workspace->req_level = 0;
 
-       spin_unlock_bh(&wsm.lock);
+       spin_unlock_bh(&zwsm->lock);
 
        if (workspace->level == clip_level(ZSTD_BTRFS_MAX_LEVEL))
-               cond_wake_up(&wsm.wait);
+               cond_wake_up(&zwsm->wait);
 }
 
 void zstd_free_workspace(struct list_head *ws)