mutex_unlock(&b->c->bucket_lock);
 }
 
-struct btree *bch_btree_node_alloc(struct cache_set *c, int level)
+struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
 {
        BKEY_PADDED(key) k;
        struct btree *b = ERR_PTR(-EAGAIN);
 
        mutex_lock(&c->bucket_lock);
 retry:
-       if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
+       if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
                goto err;
 
        bkey_put(c, &k.key);
        return b;
 }
 
-static struct btree *btree_node_alloc_replacement(struct btree *b)
+static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
 {
-       struct btree *n = bch_btree_node_alloc(b->c, b->level);
+       struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
        if (!IS_ERR_OR_NULL(n))
                bch_btree_sort_into(b, n);
 
                return 0;
 
        for (i = 0; i < nodes; i++) {
-               new_nodes[i] = btree_node_alloc_replacement(r[i].b);
+               new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
                if (IS_ERR_OR_NULL(new_nodes[i]))
                        goto out_nocoalesce;
        }
                if (!IS_ERR(last->b)) {
                        should_rewrite = btree_gc_mark_node(last->b, gc);
                        if (should_rewrite) {
-                               n = btree_node_alloc_replacement(last->b);
+                               n = btree_node_alloc_replacement(last->b,
+                                                                false);
 
                                if (!IS_ERR_OR_NULL(n)) {
                                        bch_btree_node_write_sync(n);
 
        should_rewrite = btree_gc_mark_node(b, gc);
        if (should_rewrite) {
-               n = btree_node_alloc_replacement(b);
+               n = btree_node_alloc_replacement(b, false);
 
                if (!IS_ERR_OR_NULL(n)) {
                        bch_btree_node_write_sync(n);
 
        closure_init_stack(&cl);
 
-       n1 = btree_node_alloc_replacement(b);
+       n1 = btree_node_alloc_replacement(b, true);
        if (IS_ERR(n1))
                goto err;
 
 
                trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
 
-               n2 = bch_btree_node_alloc(b->c, b->level);
+               n2 = bch_btree_node_alloc(b->c, b->level, true);
                if (IS_ERR(n2))
                        goto err_free1;
 
                if (!b->parent) {
-                       n3 = bch_btree_node_alloc(b->c, b->level + 1);
+                       n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
                        if (IS_ERR(n3))
                                goto err_free2;
                }
 
 void bch_btree_node_write(struct btree *, struct closure *);
 
 void bch_btree_set_root(struct btree *);
-struct btree *bch_btree_node_alloc(struct cache_set *, int);
+struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
 struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
 
 int bch_btree_insert_check_key(struct btree *, struct btree_op *,