lockdep_assert_held(&log->l_icloglock);
 
        trace_xlog_iclog_release(iclog, _RET_IP_);
-       if (iclog->ic_state == XLOG_STATE_IOERROR)
+       if (xlog_is_shutdown(log))
                return -EIO;
 
        /*
        error = xlog_write_unmount_record(log, tic);
        /*
         * At this point, we're umounting anyway, so there's no point in
-        * transitioning log state to IOERROR. Just continue...
+        * transitioning log state to shutdown. Just continue...
         */
 out_err:
        if (error)
         * across the log IO to archieve that.
         */
        down(&iclog->ic_sema);
-       if (unlikely(iclog->ic_state == XLOG_STATE_IOERROR)) {
+       if (xlog_is_shutdown(log)) {
                /*
                 * It would seem logical to return EIO here, but we rely on
                 * the log state machine to propagate I/O errors instead of
                        xlog_state_switch_iclogs(log, iclog, 0);
                else
                        ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
-                              iclog->ic_state == XLOG_STATE_IOERROR);
+                               xlog_is_shutdown(log));
                if (!commit_iclog)
                        goto release_iclog;
                spin_unlock(&log->l_icloglock);
 static bool
 xlog_state_iodone_process_iclog(
        struct xlog             *log,
-       struct xlog_in_core     *iclog,
-       bool                    *ioerror)
+       struct xlog_in_core     *iclog)
 {
        xfs_lsn_t               lowest_lsn;
        xfs_lsn_t               header_lsn;
                 * Skip all iclogs in the ACTIVE & DIRTY states:
                 */
                return false;
-       case XLOG_STATE_IOERROR:
-               /*
-                * Between marking a filesystem SHUTDOWN and stopping the log,
-                * we do flush all iclogs to disk (if there wasn't a log I/O
-                * error). So, we do want things to go smoothly in case of just
-                * a SHUTDOWN w/o a LOG_IO_ERROR.
-                */
-               *ioerror = true;
-               return false;
        case XLOG_STATE_DONE_SYNC:
                /*
                 * Now that we have an iclog that is in the DONE_SYNC state, do
        struct xlog_in_core     *iclog;
        struct xlog_in_core     *first_iclog;
        bool                    cycled_icloglock;
-       bool                    ioerror;
        int                     flushcnt = 0;
        int                     repeats = 0;
 
                 * Keep looping through iclogs until one full pass is made
                 * without running any callbacks.
                 */
-               first_iclog = log->l_iclog;
-               iclog = log->l_iclog;
                cycled_icloglock = false;
-               ioerror = false;
-               repeats++;
+               first_iclog = log->l_iclog;
+               iclog = first_iclog;
 
                do {
                        LIST_HEAD(cb_list);
 
-                       if (xlog_state_iodone_process_iclog(log, iclog,
-                                                       &ioerror))
-                               break;
-
-                       if (iclog->ic_state != XLOG_STATE_CALLBACK &&
-                           iclog->ic_state != XLOG_STATE_IOERROR) {
-                               iclog = iclog->ic_next;
-                               continue;
+                       if (!xlog_is_shutdown(log)) {
+                               if (xlog_state_iodone_process_iclog(log, iclog))
+                                       break;
+                               if (iclog->ic_state != XLOG_STATE_CALLBACK) {
+                                       iclog = iclog->ic_next;
+                                       continue;
+                               }
                        }
                        list_splice_init(&iclog->ic_callbacks, &cb_list);
                        spin_unlock(&log->l_icloglock);
                        else
                                xlog_state_clean_iclog(log, iclog);
                        iclog = iclog->ic_next;
-               } while (first_iclog != iclog);
+               } while (iclog != first_iclog);
 
-               if (repeats > 5000) {
+               if (++repeats > 5000) {
                        flushcnt += repeats;
                        repeats = 0;
                        xfs_warn(log->l_mp,
                                "%s: possible infinite loop (%d iterations)",
                                __func__, flushcnt);
                }
-       } while (!ioerror && cycled_icloglock);
+       } while (!xlog_is_shutdown(log) && cycled_icloglock);
 
        if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE ||
-           log->l_iclog->ic_state == XLOG_STATE_IOERROR)
+           xlog_is_shutdown(log))
                wake_up_all(&log->l_flush_wait);
 
        spin_unlock(&log->l_icloglock);
 /*
  * Finish transitioning this iclog to the dirty state.
  *
- * Make sure that we completely execute this routine only when this is
- * the last call to the iclog.  There is a good chance that iclog flushes,
- * when we reach the end of the physical log, get turned into 2 separate
- * calls to bwrite.  Hence, one iclog flush could generate two calls to this
- * routine.  By using the reference count bwritecnt, we guarantee that only
- * the second completion goes through.
- *
  * Callbacks could take time, so they are done outside the scope of the
  * global state machine log lock.
  */
        xlog_cil_force(log);
 
        spin_lock(&log->l_icloglock);
-       iclog = log->l_iclog;
-       if (iclog->ic_state == XLOG_STATE_IOERROR)
+       if (xlog_is_shutdown(log))
                goto out_error;
 
+       iclog = log->l_iclog;
        trace_xlog_iclog_force(iclog, _RET_IP_);
 
        if (iclog->ic_state == XLOG_STATE_DIRTY ||
        bool                    completed;
 
        spin_lock(&log->l_icloglock);
-       iclog = log->l_iclog;
-       if (iclog->ic_state == XLOG_STATE_IOERROR)
+       if (xlog_is_shutdown(log))
                goto out_error;
 
+       iclog = log->l_iclog;
        while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
                trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
                iclog = iclog->ic_next;
 }
 #endif
 
-/*
- * Mark all iclogs IOERROR. l_icloglock is held by the caller.
- */
-STATIC int
-xlog_state_ioerror(
-       struct xlog     *log)
-{
-       xlog_in_core_t  *iclog, *ic;
-
-       iclog = log->l_iclog;
-       if (iclog->ic_state != XLOG_STATE_IOERROR) {
-               /*
-                * Mark all the incore logs IOERROR.
-                * From now on, no log flushes will result.
-                */
-               ic = iclog;
-               do {
-                       ic->ic_state = XLOG_STATE_IOERROR;
-                       ic = ic->ic_next;
-               } while (ic != iclog);
-               return 0;
-       }
-       /*
-        * Return non-zero, if state transition has already happened.
-        */
-       return 1;
-}
-
 /*
  * This is called from xfs_force_shutdown, when we're forcibly
  * shutting down the filesystem, typically because of an IO error.
  * Note: for the !logerror case we need to flush the regions held in memory out
  * to disk first. This needs to be done before the log is marked as shutdown,
  * otherwise the iclog writes will fail.
+ *
+ * Return non-zero if log shutdown transition had already happened.
  */
 int
 xfs_log_force_umount(
        int                     logerror)
 {
        struct xlog     *log;
-       int             retval;
+       int             retval = 0;
 
        log = mp->m_log;
 
         * Somebody could've already done the hard work for us.
         * No need to get locks for this.
         */
-       if (logerror && log->l_iclog->ic_state == XLOG_STATE_IOERROR) {
-               ASSERT(xlog_is_shutdown(log));
+       if (logerror && xlog_is_shutdown(log))
                return 1;
-       }
 
        /*
         * Flush all the completed transactions to disk before marking the log
         * Mark the log and the iclogs with IO error flags to prevent any
         * further log IO from being issued or completed.
         */
-       log->l_flags |= XLOG_IO_ERROR;
-       retval = xlog_state_ioerror(log);
+       if (!(log->l_flags & XLOG_IO_ERROR)) {
+               log->l_flags |= XLOG_IO_ERROR;
+               retval = 1;
+       }
        spin_unlock(&log->l_icloglock);
 
        /*
        spin_unlock(&log->l_cilp->xc_push_lock);
        xlog_state_do_callback(log);
 
-       /* return non-zero if log IOERROR transition had already happened */
        return retval;
 }