spin_lock(&cil->xc_push_lock);
        if (cil->xc_push_seq < cil->xc_current_sequence) {
                cil->xc_push_seq = cil->xc_current_sequence;
-               queue_work(log->l_mp->m_cil_workqueue, &cil->xc_ctx->push_work);
+               queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
        }
 
        /*
 
        /* start on any pending background push to minimise wait time on it */
        if (!async)
-               flush_workqueue(log->l_mp->m_cil_workqueue);
+               flush_workqueue(cil->xc_push_wq);
 
        /*
         * If the CIL is empty or we've already pushed the sequence then
 
        cil->xc_push_seq = push_seq;
        cil->xc_push_commit_stable = async;
-       queue_work(log->l_mp->m_cil_workqueue, &cil->xc_ctx->push_work);
+       queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
        spin_unlock(&cil->xc_push_lock);
 }
 
        cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
        if (!cil)
                return -ENOMEM;
+       /*
+        * Limit the CIL pipeline depth to 4 concurrent works to bound the
+        * concurrency the log spinlocks will be exposed to.
+        */
+       cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
+                       XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
+                       4, log->l_mp->m_super->s_id);
+       if (!cil->xc_push_wq)
+               goto out_destroy_cil;
 
        INIT_LIST_HEAD(&cil->xc_cil);
        INIT_LIST_HEAD(&cil->xc_committing);
        xlog_cil_ctx_switch(cil, ctx);
 
        return 0;
+
+out_destroy_cil:
+       kmem_free(cil);
+       return -ENOMEM;
 }
 
 void
        }
 
        ASSERT(list_empty(&log->l_cilp->xc_cil));
+       destroy_workqueue(log->l_cilp->xc_push_wq);
        kmem_free(log->l_cilp);
 }
 
 
        struct xlog             *xc_log;
        struct list_head        xc_cil;
        spinlock_t              xc_cil_lock;
+       struct workqueue_struct *xc_push_wq;
 
        struct rw_semaphore     xc_ctx_lock ____cacheline_aligned_in_smp;
        struct xfs_cil_ctx      *xc_ctx;
 
        struct xfs_mru_cache    *m_filestream;  /* per-mount filestream data */
        struct workqueue_struct *m_buf_workqueue;
        struct workqueue_struct *m_unwritten_workqueue;
-       struct workqueue_struct *m_cil_workqueue;
        struct workqueue_struct *m_reclaim_workqueue;
        struct workqueue_struct *m_sync_workqueue;
        struct workqueue_struct *m_blockgc_wq;
 
        if (!mp->m_unwritten_workqueue)
                goto out_destroy_buf;
 
-       /*
-        * Limit the CIL pipeline depth to 4 concurrent works to bound the
-        * concurrency the log spinlocks will be exposed to.
-        */
-       mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
-                       XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
-                       4, mp->m_super->s_id);
-       if (!mp->m_cil_workqueue)
-               goto out_destroy_unwritten;
-
        mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
                        XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
                        0, mp->m_super->s_id);
        if (!mp->m_reclaim_workqueue)
-               goto out_destroy_cil;
+               goto out_destroy_unwritten;
 
        mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
                        XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
        destroy_workqueue(mp->m_blockgc_wq);
 out_destroy_reclaim:
        destroy_workqueue(mp->m_reclaim_workqueue);
-out_destroy_cil:
-       destroy_workqueue(mp->m_cil_workqueue);
 out_destroy_unwritten:
        destroy_workqueue(mp->m_unwritten_workqueue);
 out_destroy_buf:
        destroy_workqueue(mp->m_blockgc_wq);
        destroy_workqueue(mp->m_inodegc_wq);
        destroy_workqueue(mp->m_reclaim_workqueue);
-       destroy_workqueue(mp->m_cil_workqueue);
        destroy_workqueue(mp->m_unwritten_workqueue);
        destroy_workqueue(mp->m_buf_workqueue);
 }