return tic;
 }
 
+/*
+ * Unavoidable forward declaration - xlog_cil_push_work() calls
+ * xlog_cil_ctx_alloc() itself.
+ */
+static void xlog_cil_push_work(struct work_struct *work);
+
+static struct xfs_cil_ctx *
+xlog_cil_ctx_alloc(void)
+{
+       struct xfs_cil_ctx      *ctx;
+
+       ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS);
+       INIT_LIST_HEAD(&ctx->committing);
+       INIT_LIST_HEAD(&ctx->busy_extents);
+       INIT_WORK(&ctx->push_work, xlog_cil_push_work);
+       return ctx;
+}
+
+static void
+xlog_cil_ctx_switch(
+       struct xfs_cil          *cil,
+       struct xfs_cil_ctx      *ctx)
+{
+       ctx->sequence = ++cil->xc_current_sequence;
+       ctx->cil = cil;
+       cil->xc_ctx = ctx;
+}
+
 /*
  * After the first stage of log recovery is done, we know where the head and
  * tail of the log are. We need this log initialisation done before we can
 xlog_cil_push_work(
        struct work_struct      *work)
 {
-       struct xfs_cil          *cil =
-               container_of(work, struct xfs_cil, xc_push_work);
+       struct xfs_cil_ctx      *ctx =
+               container_of(work, struct xfs_cil_ctx, push_work);
+       struct xfs_cil          *cil = ctx->cil;
        struct xlog             *log = cil->xc_log;
        struct xfs_log_vec      *lv;
-       struct xfs_cil_ctx      *ctx;
        struct xfs_cil_ctx      *new_ctx;
        struct xlog_ticket      *tic;
        int                     num_iovecs;
        DECLARE_COMPLETION_ONSTACK(bdev_flush);
        bool                    push_commit_stable;
 
-       new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
+       new_ctx = xlog_cil_ctx_alloc();
        new_ctx->ticket = xlog_cil_ticket_alloc(log);
 
        down_write(&cil->xc_ctx_lock);
-       ctx = cil->xc_ctx;
 
        spin_lock(&cil->xc_push_lock);
        push_seq = cil->xc_push_seq;
 
 
        /* check for a previously pushed sequence */
-       if (push_seq < cil->xc_ctx->sequence) {
+       if (push_seq < ctx->sequence) {
                spin_unlock(&cil->xc_push_lock);
                goto out_skip;
        }
        }
 
        /*
-        * initialise the new context and attach it to the CIL. Then attach
-        * the current context to the CIL committing list so it can be found
-        * during log forces to extract the commit lsn of the sequence that
-        * needs to be forced.
-        */
-       INIT_LIST_HEAD(&new_ctx->committing);
-       INIT_LIST_HEAD(&new_ctx->busy_extents);
-       new_ctx->sequence = ctx->sequence + 1;
-       new_ctx->cil = cil;
-       cil->xc_ctx = new_ctx;
-
-       /*
-        * The switch is now done, so we can drop the context lock and move out
+        * Switch the contexts so we can drop the context lock and move out
         * of a shared context. We can't just go straight to the commit record,
         * though - we need to synchronise with previous and future commits so
         * that the commit records are correctly ordered in the log to ensure
         * deferencing a freed context pointer.
         */
        spin_lock(&cil->xc_push_lock);
-       cil->xc_current_sequence = new_ctx->sequence;
+       xlog_cil_ctx_switch(cil, new_ctx);
        spin_unlock(&cil->xc_push_lock);
        up_write(&cil->xc_ctx_lock);
 
        spin_lock(&cil->xc_push_lock);
        if (cil->xc_push_seq < cil->xc_current_sequence) {
                cil->xc_push_seq = cil->xc_current_sequence;
-               queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
+               queue_work(log->l_mp->m_cil_workqueue, &cil->xc_ctx->push_work);
        }
 
        /*
 
        /* start on any pending background push to minimise wait time on it */
        if (!async)
-               flush_work(&cil->xc_push_work);
+               flush_workqueue(log->l_mp->m_cil_workqueue);
 
        /*
         * If the CIL is empty or we've already pushed the sequence then
 
        cil->xc_push_seq = push_seq;
        cil->xc_push_commit_stable = async;
-       queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
+       queue_work(log->l_mp->m_cil_workqueue, &cil->xc_ctx->push_work);
        spin_unlock(&cil->xc_push_lock);
 }
 
        if (!cil)
                return -ENOMEM;
 
-       ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL);
-       if (!ctx) {
-               kmem_free(cil);
-               return -ENOMEM;
-       }
-
-       INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
        INIT_LIST_HEAD(&cil->xc_cil);
        INIT_LIST_HEAD(&cil->xc_committing);
        spin_lock_init(&cil->xc_cil_lock);
        init_rwsem(&cil->xc_ctx_lock);
        init_waitqueue_head(&cil->xc_start_wait);
        init_waitqueue_head(&cil->xc_commit_wait);
-
-       INIT_LIST_HEAD(&ctx->committing);
-       INIT_LIST_HEAD(&ctx->busy_extents);
-       ctx->sequence = 1;
-       ctx->cil = cil;
-       cil->xc_ctx = ctx;
-       cil->xc_current_sequence = ctx->sequence;
-
        cil->xc_log = log;
        log->l_cilp = cil;
+
+       ctx = xlog_cil_ctx_alloc();
+       xlog_cil_ctx_switch(cil, ctx);
+
        return 0;
 }