* None of the buffers should be dirty, locked, or pinned.
  */
 
-static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
+static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+                            unsigned int nr_revokes)
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
        struct list_head *head = &gl->gl_ail_list;
 
        gfs2_log_lock(sdp);
        spin_lock(&sdp->sd_ail_lock);
-       list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) {
+       list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
+               if (nr_revokes == 0)
+                       break;
                bh = bd->bd_bh;
                if (bh->b_state & b_state) {
                        if (fsync)
                        gfs2_ail_error(gl, bh);
                }
                gfs2_trans_add_revoke(sdp, bd);
+               nr_revokes--;
        }
        GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
        spin_unlock(&sdp->sd_ail_lock);
        WARN_ON_ONCE(current->journal_info);
        current->journal_info = &tr;
 
-       __gfs2_ail_flush(gl, 0);
+       __gfs2_ail_flush(gl, 0, tr.tr_revokes);
 
        gfs2_trans_end(sdp);
        gfs2_log_flush(sdp, NULL);
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
        unsigned int revokes = atomic_read(&gl->gl_ail_count);
+       unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
        int ret;
 
        if (!revokes)
                return;
 
-       ret = gfs2_trans_begin(sdp, 0, revokes);
+       while (revokes > max_revokes)
+               max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
+
+       ret = gfs2_trans_begin(sdp, 0, max_revokes);
        if (ret)
                return;
-       __gfs2_ail_flush(gl, fsync);
+       __gfs2_ail_flush(gl, fsync, max_revokes);
        gfs2_trans_end(sdp);
        gfs2_log_flush(sdp, NULL);
 }