#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
                                j_working_list))
 
-/* the number of mounted filesystems.  This is used to decide when to
-** start and kill the commit workqueue
-*/
-static int reiserfs_mounted_fs_count;
-
-static struct workqueue_struct *commit_wq;
-
 #define JOURNAL_TRANS_HALF 1018        /* must be correct to keep the desc and commit
                                   structs at 4k */
 #define BUFNR 64               /*read ahead */
                }
        }
 
-       reiserfs_mounted_fs_count--;
        /* wait for all commits to finish */
        cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
 
        reiserfs_write_unlock(sb);
 
        cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
-       flush_workqueue(commit_wq);
-
-       if (!reiserfs_mounted_fs_count) {
-               destroy_workqueue(commit_wq);
-               commit_wq = NULL;
-       }
+       flush_workqueue(REISERFS_SB(sb)->commit_wq);
 
        free_journal_ram(sb);
 
                goto free_and_return;
        }
 
-       reiserfs_mounted_fs_count++;
-       if (reiserfs_mounted_fs_count <= 1)
-               commit_wq = alloc_workqueue("reiserfs", WQ_MEM_RECLAIM, 0);
-
        INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
        journal->j_work_sb = sb;
        return 0;
                flush_commit_list(sb, jl, 1);
                flush_journal_list(sb, jl, 1);
        } else if (!(jl->j_state & LIST_COMMIT_PENDING))
-               queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
+               queue_delayed_work(REISERFS_SB(sb)->commit_wq,
+                                  &journal->j_work, HZ / 10);
 
        /* if the next transaction has any chance of wrapping, flush
         ** transactions that might get overwritten.  If any journal lists are very
 
 
        reiserfs_write_unlock(s);
        mutex_destroy(&REISERFS_SB(s)->lock);
+       destroy_workqueue(REISERFS_SB(s)->commit_wq);
        kfree(s->s_fs_info);
        s->s_fs_info = NULL;
 }
        mutex_init(&sbi->lock);
        sbi->lock_depth = -1;
 
+       sbi->commit_wq = alloc_workqueue("reiserfs/%s", WQ_MEM_RECLAIM, 0,
+                                        s->s_id);
+       if (!sbi->commit_wq) {
+               SWARN(silent, s, "", "Cannot allocate commit workqueue");
+               errval = -ENOMEM;
+               goto error_unlocked;
+       }
+
        jdev_name = NULL;
        if (reiserfs_parse_options
            (s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name,
 {
        int ret;
 
-       if ((ret = init_inodecache())) {
+       ret = init_inodecache();
+       if (ret)
                return ret;
-       }
 
        reiserfs_proc_info_global_init();
 
        ret = register_filesystem(&reiserfs_fs_type);
+       if (ret)
+               goto out;
 
-       if (ret == 0) {
-               return 0;
-       }
-
+       return 0;
+out:
        reiserfs_proc_info_global_done();
        destroy_inodecache();