/*
  * Put a btree block that we're loading onto the ordered list and release it.
  * The btree blocks will be written to disk when bulk loading is finished.
+ * If we reach the dirty buffer threshold, flush them to disk before
+ * continuing.
  */
-static void
+static int
 xfs_btree_bload_drop_buf(
-       struct list_head        *buffers_list,
-       struct xfs_buf          **bpp)
+       struct xfs_btree_bload          *bbl,
+       struct list_head                *buffers_list,
+       struct xfs_buf                  **bpp)
 {
-       if (*bpp == NULL)
-               return;
+       struct xfs_buf                  *bp = *bpp;
+       int                             error;
+
+       if (!bp)
+               return 0;
 
        /*
         * Mark this buffer XBF_DONE (i.e. uptodate) so that a subsequent
         * xfs_buf_read will not pointlessly reread the contents from the disk.
         */
-       (*bpp)->b_flags |= XBF_DONE;
+       bp->b_flags |= XBF_DONE;
 
-       xfs_buf_delwri_queue_here(*bpp, buffers_list);
-       xfs_buf_relse(*bpp);
+       xfs_buf_delwri_queue_here(bp, buffers_list);
+       xfs_buf_relse(bp);
        *bpp = NULL;
+       bbl->nr_dirty++;
+
+       if (!bbl->max_dirty || bbl->nr_dirty < bbl->max_dirty)
+               return 0;
+
+       error = xfs_buf_delwri_submit(buffers_list);
+       if (error)
+               return error;
+
+       bbl->nr_dirty = 0;
+       return 0;
 }
 
 /*
         */
        if (*blockp)
                xfs_btree_set_sibling(cur, *blockp, &new_ptr, XFS_BB_RIGHTSIB);
-       xfs_btree_bload_drop_buf(buffers_list, bpp);
+
+       ret = xfs_btree_bload_drop_buf(bbl, buffers_list, bpp);
+       if (ret)
+               return ret;
 
        /* Initialize the new btree block. */
        xfs_btree_init_block_cur(cur, new_bp, level, nr_this_block);
        cur->bc_nlevels = bbl->btree_height;
        xfs_btree_set_ptr_null(cur, &child_ptr);
        xfs_btree_set_ptr_null(cur, &ptr);
+       bbl->nr_dirty = 0;
 
        xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
                        &avg_per_block, &blocks, &blocks_with_extra);
                        xfs_btree_copy_ptrs(cur, &child_ptr, &ptr, 1);
        }
        total_blocks += blocks;
-       xfs_btree_bload_drop_buf(&buffers_list, &bp);
+
+       ret = xfs_btree_bload_drop_buf(bbl, &buffers_list, &bp);
+       if (ret)
+               goto out;
 
        /* Populate the internal btree nodes. */
        for (level = 1; level < cur->bc_nlevels; level++) {
                                xfs_btree_copy_ptrs(cur, &first_ptr, &ptr, 1);
                }
                total_blocks += blocks;
-               xfs_btree_bload_drop_buf(&buffers_list, &bp);
+
+               ret = xfs_btree_bload_drop_buf(bbl, &buffers_list, &bp);
+               if (ret)
+                       goto out;
+
                xfs_btree_copy_ptrs(cur, &child_ptr, &first_ptr, 1);
        }