void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
 
 extern struct workqueue_struct *bcache_wq;
+extern struct workqueue_struct *bch_journal_wq;
 extern struct mutex bch_register_lock;
 extern struct list_head bch_cache_sets;
 
 
 
                closure_get(&ca->set->cl);
                INIT_WORK(&ja->discard_work, journal_discard_work);
-               schedule_work(&ja->discard_work);
+               queue_work(bch_journal_wq, &ja->discard_work);
        }
 }
 
                : &j->w[0];
 
        __closure_wake_up(&w->wait);
-       continue_at_nobarrier(cl, journal_write, system_wq);
+       continue_at_nobarrier(cl, journal_write, bch_journal_wq);
 }
 
 static void journal_write_unlock(struct closure *cl)
                spin_unlock(&c->journal.lock);
 
                btree_flush_write(c);
-               continue_at(cl, journal_write, system_wq);
+               continue_at(cl, journal_write, bch_journal_wq);
                return;
        }
 
 
 static DEFINE_IDA(bcache_device_idx);
 static wait_queue_head_t unregister_wait;
 struct workqueue_struct *bcache_wq;
+struct workqueue_struct *bch_journal_wq;
 
 #define BTREE_MAX_PAGES                (256 * 1024 / PAGE_SIZE)
 /* limitation of partitions number on single bcache device */
                kobject_put(bcache_kobj);
        if (bcache_wq)
                destroy_workqueue(bcache_wq);
+       if (bch_journal_wq)
+               destroy_workqueue(bch_journal_wq);
+
        if (bcache_major)
                unregister_blkdev(bcache_major, "bcache");
        unregister_reboot_notifier(&reboot);
        if (!bcache_wq)
                goto err;
 
+       bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+       if (!bch_journal_wq)
+               goto err;
+
        bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
        if (!bcache_kobj)
                goto err;