enum {
        WORK_DONE_BIT,
        WORK_ORDER_DONE_BIT,
-       WORK_HIGH_PRIO_BIT,
 };
 
 #define NO_THRESHOLD (-1)
 #define DFT_THRESHOLD (32)
 
-struct __btrfs_workqueue {
+struct btrfs_workqueue {
        struct workqueue_struct *normal_wq;
 
        /* File system this workqueue services */
        spinlock_t thres_lock;
 };
 
-struct btrfs_workqueue {
-       struct __btrfs_workqueue *normal;
-       struct __btrfs_workqueue *high;
-};
-
-struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
+struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
 {
        return wq->fs_info;
 }
 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
 {
        /*
-        * We could compare wq->normal->pending with num_online_cpus()
+        * We could compare wq->pending with num_online_cpus()
         * to support "thresh == NO_THRESHOLD" case, but it requires
         * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
         * postpone it until someone needs the support of that case.
         */
-       if (wq->normal->thresh == NO_THRESHOLD)
+       if (wq->thresh == NO_THRESHOLD)
                return false;
 
-       return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
+       return atomic_read(&wq->pending) > wq->thresh * 2;
 }
 
-static struct __btrfs_workqueue *
-__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
-                       unsigned int flags, int limit_active, int thresh)
+struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
+                                             const char *name, unsigned int flags,
+                                             int limit_active, int thresh)
 {
-       struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+       struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
 
        if (!ret)
                return NULL;
                ret->thresh = thresh;
        }
 
-       if (flags & WQ_HIGHPRI)
-               ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags,
-                                                ret->current_active, name);
-       else
-               ret->normal_wq = alloc_workqueue("btrfs-%s", flags,
-                                                ret->current_active, name);
+       ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active,
+                                        name);
        if (!ret->normal_wq) {
                kfree(ret);
                return NULL;
        INIT_LIST_HEAD(&ret->ordered_list);
        spin_lock_init(&ret->list_lock);
        spin_lock_init(&ret->thres_lock);
-       trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
-       return ret;
-}
-
-static inline void
-__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
-
-struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
-                                             const char *name,
-                                             unsigned int flags,
-                                             int limit_active,
-                                             int thresh)
-{
-       struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
-
-       if (!ret)
-               return NULL;
-
-       ret->normal = __btrfs_alloc_workqueue(fs_info, name,
-                                             flags & ~WQ_HIGHPRI,
-                                             limit_active, thresh);
-       if (!ret->normal) {
-               kfree(ret);
-               return NULL;
-       }
-
-       if (flags & WQ_HIGHPRI) {
-               ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
-                                                   limit_active, thresh);
-               if (!ret->high) {
-                       __btrfs_destroy_workqueue(ret->normal);
-                       kfree(ret);
-                       return NULL;
-               }
-       }
+       trace_btrfs_workqueue_alloc(ret, name);
        return ret;
 }
 
  * This hook WILL be called in IRQ handler context,
  * so workqueue_set_max_active MUST NOT be called in this hook
  */
-static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
+static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
 {
        if (wq->thresh == NO_THRESHOLD)
                return;
  * This hook is called in kthread content.
  * So workqueue_set_max_active is called here.
  */
-static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
+static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
 {
        int new_current_active;
        long pending;
        }
 }
 
-static void run_ordered_work(struct __btrfs_workqueue *wq,
+static void run_ordered_work(struct btrfs_workqueue *wq,
                             struct btrfs_work *self)
 {
        struct list_head *list = &wq->ordered_list;
 {
        struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
                                               normal_work);
-       struct __btrfs_workqueue *wq;
+       struct btrfs_workqueue *wq = work->wq;
        int need_order = 0;
 
        /*
         */
        if (work->ordered_func)
                need_order = 1;
-       wq = work->wq;
 
        trace_btrfs_work_sched(work);
        thresh_exec_hook(wq);
        work->flags = 0;
 }
 
-static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
-                                     struct btrfs_work *work)
+void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
 {
        unsigned long flags;
 
        queue_work(wq->normal_wq, &work->normal_work);
 }
 
-void btrfs_queue_work(struct btrfs_workqueue *wq,
-                     struct btrfs_work *work)
-{
-       struct __btrfs_workqueue *dest_wq;
-
-       if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
-               dest_wq = wq->high;
-       else
-               dest_wq = wq->normal;
-       __btrfs_queue_work(dest_wq, work);
-}
-
-static inline void
-__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
-{
-       destroy_workqueue(wq->normal_wq);
-       trace_btrfs_workqueue_destroy(wq);
-       kfree(wq);
-}
-
 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
 {
        if (!wq)
                return;
-       if (wq->high)
-               __btrfs_destroy_workqueue(wq->high);
-       __btrfs_destroy_workqueue(wq->normal);
+       destroy_workqueue(wq->normal_wq);
+       trace_btrfs_workqueue_destroy(wq);
        kfree(wq);
 }
 
 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
 {
-       if (!wq)
-               return;
-       wq->normal->limit_active = limit_active;
-       if (wq->high)
-               wq->high->limit_active = limit_active;
-}
-
-void btrfs_set_work_high_priority(struct btrfs_work *work)
-{
-       set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
+       if (wq)
+               wq->limit_active = limit_active;
 }
 
 void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
 {
-       if (wq->high)
-               flush_workqueue(wq->high->normal_wq);
-
-       flush_workqueue(wq->normal->normal_wq);
+       flush_workqueue(wq->normal_wq);
 }
 
        async->status = 0;
 
        if (op_is_sync(bio->bi_opf))
-               btrfs_set_work_high_priority(&async->work);
-
-       btrfs_queue_work(fs_info->workers, &async->work);
+               btrfs_queue_work(fs_info->hipri_workers, &async->work);
+       else
+               btrfs_queue_work(fs_info->workers, &async->work);
        return 0;
 }
 
 {
        btrfs_destroy_workqueue(fs_info->fixup_workers);
        btrfs_destroy_workqueue(fs_info->delalloc_workers);
+       btrfs_destroy_workqueue(fs_info->hipri_workers);
        btrfs_destroy_workqueue(fs_info->workers);
        btrfs_destroy_workqueue(fs_info->endio_workers);
        btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
        unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
 
        fs_info->workers =
-               btrfs_alloc_workqueue(fs_info, "worker",
+               btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
+       fs_info->hipri_workers =
+               btrfs_alloc_workqueue(fs_info, "worker-high",
                                      flags | WQ_HIGHPRI, max_active, 16);
 
        fs_info->delalloc_workers =
        fs_info->discard_ctl.discard_workers =
                alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1);
 
-       if (!(fs_info->workers && fs_info->delalloc_workers &&
-             fs_info->flush_workers &&
+       if (!(fs_info->workers && fs_info->hipri_workers &&
+             fs_info->delalloc_workers && fs_info->flush_workers &&
              fs_info->endio_workers && fs_info->endio_meta_workers &&
              fs_info->endio_meta_write_workers &&
              fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
 
 struct map_lookup;
 struct extent_buffer;
 struct btrfs_work;
-struct __btrfs_workqueue;
+struct btrfs_workqueue;
 struct btrfs_qgroup_extent_record;
 struct btrfs_qgroup;
 struct extent_io_tree;
        TP_ARGS(work)
 );
 
-DECLARE_EVENT_CLASS(btrfs__workqueue,
+DECLARE_EVENT_CLASS(btrfs_workqueue,
 
-       TP_PROTO(const struct __btrfs_workqueue *wq,
-                const char *name, int high),
+       TP_PROTO(const struct btrfs_workqueue *wq, const char *name),
 
-       TP_ARGS(wq, name, high),
+       TP_ARGS(wq, name),
 
        TP_STRUCT__entry_btrfs(
                __field(        const void *,   wq                      )
                __string(       name,   name                    )
-               __field(        int ,   high                    )
        ),
 
        TP_fast_assign_btrfs(btrfs_workqueue_owner(wq),
                __entry->wq             = wq;
                __assign_str(name, name);
-               __entry->high           = high;
        ),
 
-       TP_printk_btrfs("name=%s%s wq=%p", __get_str(name),
-                 __print_flags(__entry->high, "",
-                               {(WQ_HIGHPRI),  "-high"}),
+       TP_printk_btrfs("name=%s wq=%p", __get_str(name),
                  __entry->wq)
 );
 
-DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc,
+DEFINE_EVENT(btrfs_workqueue, btrfs_workqueue_alloc,
 
-       TP_PROTO(const struct __btrfs_workqueue *wq,
-                const char *name, int high),
+       TP_PROTO(const struct btrfs_workqueue *wq, const char *name),
 
-       TP_ARGS(wq, name, high)
+       TP_ARGS(wq, name)
 );
 
-DECLARE_EVENT_CLASS(btrfs__workqueue_done,
+DECLARE_EVENT_CLASS(btrfs_workqueue_done,
 
-       TP_PROTO(const struct __btrfs_workqueue *wq),
+       TP_PROTO(const struct btrfs_workqueue *wq),
 
        TP_ARGS(wq),
 
        TP_printk_btrfs("wq=%p", __entry->wq)
 );
 
-DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
+DEFINE_EVENT(btrfs_workqueue_done, btrfs_workqueue_destroy,
 
-       TP_PROTO(const struct __btrfs_workqueue *wq),
+       TP_PROTO(const struct btrfs_workqueue *wq),
 
        TP_ARGS(wq)
 );