* We may be holding the log iclog lock upon entering this routine.
  */
 xfs_lsn_t
-xlog_assign_tail_lsn(
+xlog_assign_tail_lsn_locked(
        struct xfs_mount        *mp)
 {
-       xfs_lsn_t               tail_lsn;
        struct log              *log = mp->m_log;
+       struct xfs_log_item     *lip;
+       xfs_lsn_t               tail_lsn;
+
+       assert_spin_locked(&mp->m_ail->xa_lock);
 
        /*
         * To make sure we always have a valid LSN for the log tail we keep
         * track of the last LSN which was committed in log->l_last_sync_lsn,
-        * and use that when the AIL was empty and xfs_ail_min_lsn returns 0.
-        *
-        * If the AIL has been emptied we also need to wake any process
-        * waiting for this condition.
+        * and use that when the AIL was empty.
         */
-       tail_lsn = xfs_ail_min_lsn(mp->m_ail);
-       if (!tail_lsn)
+       lip = xfs_ail_min(mp->m_ail);
+       if (lip)
+               tail_lsn = lip->li_lsn;
+       else
                tail_lsn = atomic64_read(&log->l_last_sync_lsn);
        atomic64_set(&log->l_tail_lsn, tail_lsn);
        return tail_lsn;
 }
 
+xfs_lsn_t
+xlog_assign_tail_lsn(
+       struct xfs_mount        *mp)
+{
+       xfs_lsn_t               tail_lsn;
+
+       spin_lock(&mp->m_ail->xa_lock);
+       tail_lsn = xlog_assign_tail_lsn_locked(mp);
+       spin_unlock(&mp->m_ail->xa_lock);
+
+       return tail_lsn;
+}
+
 /*
  * Return the space in the log between the tail and the head.  The head
  * is passed in the cycle/bytes formal parms.  In the special case where
 
                        int                     num_bblocks);
 int      xfs_log_mount_finish(struct xfs_mount *mp);
 xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
+xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
 void     xfs_log_space_wake(struct xfs_mount *mp);
 int      xfs_log_notify(struct xfs_mount       *mp,
                         struct xlog_in_core    *iclog,
 
  * Return a pointer to the first item in the AIL.  If the AIL is empty, then
  * return NULL.
  */
-static xfs_log_item_t *
+xfs_log_item_t *
 xfs_ail_min(
        struct xfs_ail  *ailp)
 {
 
        if (!list_empty(&tmp))
                xfs_ail_splice(ailp, cur, &tmp, lsn);
-       spin_unlock(&ailp->xa_lock);
 
-       if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
-               xlog_assign_tail_lsn(ailp->xa_mount);
+       if (mlip_changed) {
+               if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
+                       xlog_assign_tail_lsn_locked(ailp->xa_mount);
+               spin_unlock(&ailp->xa_lock);
+
                xfs_log_space_wake(ailp->xa_mount);
+       } else {
+               spin_unlock(&ailp->xa_lock);
        }
 }
 
                if (mlip == lip)
                        mlip_changed = 1;
        }
-       spin_unlock(&ailp->xa_lock);
 
-       if (mlip_changed && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
-               xlog_assign_tail_lsn(ailp->xa_mount);
+       if (mlip_changed) {
+               if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
+                       xlog_assign_tail_lsn_locked(ailp->xa_mount);
+               spin_unlock(&ailp->xa_lock);
+
                xfs_log_space_wake(ailp->xa_mount);
+       } else {
+               spin_unlock(&ailp->xa_lock);
        }
 }
 
 
 
 void                   xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
 void                   xfs_ail_push_all(struct xfs_ail *);
+struct xfs_log_item    *xfs_ail_min(struct xfs_ail  *ailp);
 xfs_lsn_t              xfs_ail_min_lsn(struct xfs_ail *ailp);
 
 struct xfs_log_item *  xfs_trans_ail_cursor_first(struct xfs_ail *ailp,