struct list_head list;
 
+       /* for controlling how we free up space for allocations */
+       wait_queue_head_t allocate_wait;
+       wait_queue_head_t flush_wait;
+       int allocating_chunk;
+       int flushing;
+
        /* for block groups in our same type */
        struct list_head block_groups;
        spinlock_t lock;
        struct rw_semaphore groups_sem;
        atomic_t caching_threads;
-
-       int allocating_chunk;
-       wait_queue_head_t wait;
-
-       int flushing;
-       wait_queue_head_t flush_wait;
 };
 
 /*
        struct btrfs_workers endio_meta_write_workers;
        struct btrfs_workers endio_write_workers;
        struct btrfs_workers submit_workers;
+       struct btrfs_workers enospc_workers;
        /*
         * fixup workers take dirty pages that didn't properly go through
         * the cow mechanism and make them safe to write.  It happens
 
                           min_t(u64, fs_devices->num_devices,
                           fs_info->thread_pool_size),
                           &fs_info->generic_worker);
+       btrfs_init_workers(&fs_info->enospc_workers, "enospc",
+                          fs_info->thread_pool_size,
+                          &fs_info->generic_worker);
 
        /* a higher idle thresh on the submit workers makes it much more
         * likely that bios will be send down in a sane order to the
        btrfs_start_workers(&fs_info->endio_meta_workers, 1);
        btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
        btrfs_start_workers(&fs_info->endio_write_workers, 1);
+       btrfs_start_workers(&fs_info->enospc_workers, 1);
 
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
        btrfs_stop_workers(&fs_info->endio_meta_write_workers);
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->submit_workers);
+       btrfs_stop_workers(&fs_info->enospc_workers);
 fail_iput:
        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
        iput(fs_info->btree_inode);
        btrfs_stop_workers(&fs_info->endio_meta_write_workers);
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->submit_workers);
+       btrfs_stop_workers(&fs_info->enospc_workers);
 
        btrfs_close_devices(fs_info->fs_devices);
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
 
                meta_sinfo->force_delalloc = 0;
 }
 
+struct async_flush {
+       struct btrfs_root *root;
+       struct btrfs_space_info *info;
+       struct btrfs_work work;
+};
+
+static noinline void flush_delalloc_async(struct btrfs_work *work)
+{
+       struct async_flush *async;
+       struct btrfs_root *root;
+       struct btrfs_space_info *info;
+
+       async = container_of(work, struct async_flush, work);
+       root = async->root;
+       info = async->info;
+
+       btrfs_start_delalloc_inodes(root);
+       wake_up(&info->flush_wait);
+       btrfs_wait_ordered_extents(root, 0);
+
+       spin_lock(&info->lock);
+       info->flushing = 0;
+       spin_unlock(&info->lock);
+       wake_up(&info->flush_wait);
+
+       kfree(async);
+}
+
+static void wait_on_flush(struct btrfs_space_info *info)
+{
+       DEFINE_WAIT(wait);
+       u64 used;
+
+       while (1) {
+               prepare_to_wait(&info->flush_wait, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               spin_lock(&info->lock);
+               if (!info->flushing) {
+                       spin_unlock(&info->lock);
+                       break;
+               }
+
+               used = info->bytes_used + info->bytes_reserved +
+                       info->bytes_pinned + info->bytes_readonly +
+                       info->bytes_super + info->bytes_root +
+                       info->bytes_may_use + info->bytes_delalloc;
+               if (used < info->total_bytes) {
+                       spin_unlock(&info->lock);
+                       break;
+               }
+               spin_unlock(&info->lock);
+               schedule();
+       }
+       finish_wait(&info->flush_wait, &wait);
+}
+
 static void flush_delalloc(struct btrfs_root *root,
                                 struct btrfs_space_info *info)
 {
+       struct async_flush *async;
        bool wait = false;
 
        spin_lock(&info->lock);
        spin_unlock(&info->lock);
 
        if (wait) {
-               wait_event(info->flush_wait,
-                          !info->flushing);
+               wait_on_flush(info);
                return;
        }
 
+       async = kzalloc(sizeof(*async), GFP_NOFS);
+       if (!async)
+               goto flush;
+
+       async->root = root;
+       async->info = info;
+       async->work.func = flush_delalloc_async;
+
+       btrfs_queue_worker(&root->fs_info->enospc_workers,
+                          &async->work);
+       wait_on_flush(info);
+       return;
+
+flush:
        btrfs_start_delalloc_inodes(root);
        btrfs_wait_ordered_extents(root, 0);
 
        if (!info->allocating_chunk) {
                info->force_alloc = 1;
                info->allocating_chunk = 1;
-               init_waitqueue_head(&info->wait);
+               init_waitqueue_head(&info->allocate_wait);
        } else {
                wait = true;
        }
        spin_unlock(&info->lock);
 
        if (wait) {
-               wait_event(info->wait,
+               wait_event(info->allocate_wait,
                           !info->allocating_chunk);
                return 1;
        }
        spin_lock(&info->lock);
        info->allocating_chunk = 0;
        spin_unlock(&info->lock);
-       wake_up(&info->wait);
+       wake_up(&info->allocate_wait);
 
        if (ret)
                return 0;