btrfs_stop_workers(&fs_info->endio_meta_write_workers);
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->endio_freespace_worker);
-       btrfs_stop_workers(&fs_info->submit_workers);
+       btrfs_destroy_workqueue(fs_info->submit_workers);
        btrfs_stop_workers(&fs_info->delayed_workers);
        btrfs_stop_workers(&fs_info->caching_workers);
        btrfs_stop_workers(&fs_info->readahead_workers);
        btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
                           fs_info->thread_pool_size, NULL);
 
-       btrfs_init_workers(&fs_info->submit_workers, "submit",
-                          min_t(u64, fs_devices->num_devices,
-                          fs_info->thread_pool_size), NULL);
 
        btrfs_init_workers(&fs_info->caching_workers, "cache",
                           fs_info->thread_pool_size, NULL);
 
-       /* a higher idle thresh on the submit workers makes it much more
+       /*
+        * a higher idle thresh on the submit workers makes it much more
         * likely that bios will be send down in a sane order to the
         * devices
         */
-       fs_info->submit_workers.idle_thresh = 64;
+       fs_info->submit_workers =
+               btrfs_alloc_workqueue("submit", flags,
+                                     min_t(u64, fs_devices->num_devices,
+                                           max_active), 64);
 
        btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
                           &fs_info->generic_worker);
         * return -ENOMEM if any of these fail.
         */
        ret = btrfs_start_workers(&fs_info->generic_worker);
-       ret |= btrfs_start_workers(&fs_info->submit_workers);
        ret |= btrfs_start_workers(&fs_info->fixup_workers);
        ret |= btrfs_start_workers(&fs_info->endio_workers);
        ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
                err = -ENOMEM;
                goto fail_sb_buffer;
        }
-       if (!(fs_info->workers && fs_info->delalloc_workers)) {
+       if (!(fs_info->workers && fs_info->delalloc_workers &&
+             fs_info->submit_workers)) {
                err = -ENOMEM;
                goto fail_sb_buffer;
        }
 
        btrfs_set_max_workers(&fs_info->generic_worker, new_pool_size);
        btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
        btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
-       btrfs_set_max_workers(&fs_info->submit_workers, new_pool_size);
+       btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size);
        btrfs_set_max_workers(&fs_info->caching_workers, new_pool_size);
        btrfs_set_max_workers(&fs_info->fixup_workers, new_pool_size);
        btrfs_set_max_workers(&fs_info->endio_workers, new_pool_size);
 
                        device->running_pending = 1;
 
                        spin_unlock(&device->io_lock);
-                       btrfs_requeue_work(&device->work);
+                       btrfs_queue_work(fs_info->submit_workers,
+                                        &device->work);
                        goto done;
                }
                /* unplug every 64 requests just for good measure */
        blk_finish_plug(&plug);
 }
 
-static void pending_bios_fn(struct btrfs_work *work)
+static void pending_bios_fn(struct btrfs_work_struct *work)
 {
        struct btrfs_device *device;
 
        spin_unlock(&device->io_lock);
 
        if (should_queue)
-               btrfs_queue_worker(&root->fs_info->submit_workers,
-                                  &device->work);
+               btrfs_queue_work(root->fs_info->submit_workers,
+                                &device->work);
 }
 
 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
        else
                generate_random_uuid(dev->uuid);
 
-       dev->work.func = pending_bios_fn;
+       btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL);
 
        return dev;
 }