xfs_log_unmount(mp);
 }
 
+/*
+ * Wait for the iclog to be written disk, or return an error if the log has been
+ * shut down.
+ */
+static int
+xlog_wait_on_iclog(
+       struct xlog_in_core     *iclog)
+               __releases(iclog->ic_log->l_icloglock)
+{
+       struct xlog             *log = iclog->ic_log;
+
+       if (!XLOG_FORCED_SHUTDOWN(log) &&
+           iclog->ic_state != XLOG_STATE_ACTIVE &&
+           iclog->ic_state != XLOG_STATE_DIRTY) {
+               XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
+               xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
+       } else {
+               spin_unlock(&log->l_icloglock);
+       }
+
+       if (XLOG_FORCED_SHUTDOWN(log))
+               return -EIO;
+       return 0;
+}
+
 /*
  * Final log writes as part of unmount.
  *
        atomic_inc(&iclog->ic_refcnt);
        xlog_state_want_sync(log, iclog);
        error = xlog_state_release_iclog(log, iclog);
-       switch (iclog->ic_state) {
-       default:
-               if (!XLOG_FORCED_SHUTDOWN(log)) {
-                       xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
-                       break;
-               }
-               /* fall through */
-       case XLOG_STATE_ACTIVE:
-       case XLOG_STATE_DIRTY:
-               spin_unlock(&log->l_icloglock);
-               break;
-       }
+       xlog_wait_on_iclog(iclog);
 
        if (tic) {
                trace_xfs_log_umount_write(log, tic);
                 * previous iclog and go to sleep.
                 */
                iclog = iclog->ic_prev;
-               if (iclog->ic_state == XLOG_STATE_ACTIVE ||
-                   iclog->ic_state == XLOG_STATE_DIRTY)
-                       goto out_unlock;
        } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
                if (atomic_read(&iclog->ic_refcnt) == 0) {
                        /*
                        if (xlog_state_release_iclog(log, iclog))
                                goto out_error;
 
-                       if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn ||
-                           iclog->ic_state == XLOG_STATE_DIRTY)
+                       if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
                                goto out_unlock;
                } else {
                        /*
                ;
        }
 
-       if (!(flags & XFS_LOG_SYNC))
-               goto out_unlock;
-
-       if (iclog->ic_state == XLOG_STATE_IOERROR)
-               goto out_error;
-       XFS_STATS_INC(mp, xs_log_force_sleep);
-       xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
-       if (iclog->ic_state == XLOG_STATE_IOERROR)
-               return -EIO;
-       return 0;
-
+       if (flags & XFS_LOG_SYNC)
+               return xlog_wait_on_iclog(iclog);
 out_unlock:
        spin_unlock(&log->l_icloglock);
        return 0;
                        goto out_unlock;
        }
 
-       if (iclog->ic_state == XLOG_STATE_DIRTY)
-               goto out_unlock;
-
        if (iclog->ic_state == XLOG_STATE_ACTIVE) {
                /*
                 * We sleep here if we haven't already slept (e.g. this is the
                        *log_flushed = 1;
        }
 
-       if (!(flags & XFS_LOG_SYNC) ||
-           (iclog->ic_state == XLOG_STATE_ACTIVE ||
-            iclog->ic_state == XLOG_STATE_DIRTY))
-               goto out_unlock;
-
-       if (iclog->ic_state == XLOG_STATE_IOERROR)
-               goto out_error;
-
-       XFS_STATS_INC(mp, xs_log_force_sleep);
-       xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
-       if (iclog->ic_state == XLOG_STATE_IOERROR)
-               return -EIO;
-       return 0;
-
+       if (flags & XFS_LOG_SYNC)
+               return xlog_wait_on_iclog(iclog);
 out_unlock:
        spin_unlock(&log->l_icloglock);
        return 0;