unsigned long state;
        unsigned long last_accessed;
        struct dm_bufio_client *c;
+       struct list_head write_list;
        struct bio bio;
        struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
 };
  * - Submit our write and don't wait on it. We set B_WRITING indicating
  *   that there is a write in progress.
  */
-static void __write_dirty_buffer(struct dm_buffer *b)
+static void __write_dirty_buffer(struct dm_buffer *b,
+                                struct list_head *write_list)
 {
        if (!test_bit(B_DIRTY, &b->state))
                return;
        wait_on_bit_lock(&b->state, B_WRITING,
                         do_io_schedule, TASK_UNINTERRUPTIBLE);
 
-       submit_io(b, WRITE, b->block, write_endio);
+       if (!write_list)
+               submit_io(b, WRITE, b->block, write_endio);
+       else
+               list_add_tail(&b->write_list, write_list);
+}
+
+static void __flush_write_list(struct list_head *write_list)
+{
+       struct blk_plug plug;
+       blk_start_plug(&plug);
+       while (!list_empty(write_list)) {
+               struct dm_buffer *b =
+                       list_entry(write_list->next, struct dm_buffer, write_list);
+               list_del(&b->write_list);
+               submit_io(b, WRITE, b->block, write_endio);
+               dm_bufio_cond_resched();
+       }
+       blk_finish_plug(&plug);
 }
 
 /*
                return;
 
        wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
-       __write_dirty_buffer(b);
+       __write_dirty_buffer(b, NULL);
        wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
 }
 
        wake_up(&c->free_buffer_wait);
 }
 
-static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait)
+static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
+                                       struct list_head *write_list)
 {
        struct dm_buffer *b, *tmp;
 
                if (no_wait && test_bit(B_WRITING, &b->state))
                        return;
 
-               __write_dirty_buffer(b);
+               __write_dirty_buffer(b, write_list);
                dm_bufio_cond_resched();
        }
 }
  * If we are over threshold_buffers, start freeing buffers.
  * If we're over "limit_buffers", block until we get under the limit.
  */
-static void __check_watermark(struct dm_bufio_client *c)
+static void __check_watermark(struct dm_bufio_client *c,
+                             struct list_head *write_list)
 {
        unsigned long threshold_buffers, limit_buffers;
 
        }
 
        if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
-               __write_dirty_buffers_async(c, 1);
+               __write_dirty_buffers_async(c, 1, write_list);
 }
 
 /*
  *--------------------------------------------------------------*/
 
 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
-                                    enum new_flag nf, int *need_submit)
+                                    enum new_flag nf, int *need_submit,
+                                    struct list_head *write_list)
 {
        struct dm_buffer *b, *new_b = NULL;
 
                goto found_buffer;
        }
 
-       __check_watermark(c);
+       __check_watermark(c, write_list);
 
        b = new_b;
        b->hold_count = 1;
        int need_submit;
        struct dm_buffer *b;
 
+       LIST_HEAD(write_list);
+
        dm_bufio_lock(c);
-       b = __bufio_new(c, block, nf, &need_submit);
+       b = __bufio_new(c, block, nf, &need_submit, &write_list);
        dm_bufio_unlock(c);
 
+       __flush_write_list(&write_list);
+
        if (!b)
                return b;
 
 {
        struct blk_plug plug;
 
+       LIST_HEAD(write_list);
+
        BUG_ON(dm_bufio_in_request());
 
        blk_start_plug(&plug);
        for (; n_blocks--; block++) {
                int need_submit;
                struct dm_buffer *b;
-               b = __bufio_new(c, block, NF_PREFETCH, &need_submit);
+               b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
+                               &write_list);
+               if (unlikely(!list_empty(&write_list))) {
+                       dm_bufio_unlock(c);
+                       blk_finish_plug(&plug);
+                       __flush_write_list(&write_list);
+                       blk_start_plug(&plug);
+                       dm_bufio_lock(c);
+               }
                if (unlikely(b != NULL)) {
                        dm_bufio_unlock(c);
 
                                goto flush_plug;
                        dm_bufio_lock(c);
                }
-
        }
 
        dm_bufio_unlock(c);
 
 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
 {
+       LIST_HEAD(write_list);
+
        BUG_ON(dm_bufio_in_request());
 
        dm_bufio_lock(c);
-       __write_dirty_buffers_async(c, 0);
+       __write_dirty_buffers_async(c, 0, &write_list);
        dm_bufio_unlock(c);
+       __flush_write_list(&write_list);
 }
 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
 
        unsigned long buffers_processed = 0;
        struct dm_buffer *b, *tmp;
 
+       LIST_HEAD(write_list);
+
+       dm_bufio_lock(c);
+       __write_dirty_buffers_async(c, 0, &write_list);
+       dm_bufio_unlock(c);
+       __flush_write_list(&write_list);
        dm_bufio_lock(c);
-       __write_dirty_buffers_async(c, 0);
 
 again:
        list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
        BUG_ON(!b->hold_count);
        BUG_ON(test_bit(B_READING, &b->state));
 
-       __write_dirty_buffer(b);
+       __write_dirty_buffer(b, NULL);
        if (b->hold_count == 1) {
                wait_on_bit(&b->state, B_WRITING,
                            do_io_schedule, TASK_UNINTERRUPTIBLE);