{
        struct btrfs_root *root = inode->root;
        struct btrfs_fs_info *fs_info = root->fs_info;
-       struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
-       u64 used;
-       int ret = 0;
-       int need_commit = 2;
-       int have_pinned_space;
+       enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_DATA;
 
        /* Make sure bytes are sectorsize aligned */
        bytes = ALIGN(bytes, fs_info->sectorsize);
 
-       if (btrfs_is_free_space_inode(inode)) {
-               need_commit = 0;
-               ASSERT(current->journal_info);
-       }
-
-again:
-       /* Make sure we have enough space to handle the data first */
-       spin_lock(&data_sinfo->lock);
-       used = btrfs_space_info_used(data_sinfo, true);
-
-       if (used + bytes > data_sinfo->total_bytes) {
-               struct btrfs_trans_handle *trans;
-
-               /*
-                * If we don't have enough free bytes in this space then we need
-                * to alloc a new chunk.
-                */
-               if (!data_sinfo->full) {
-                       u64 alloc_target;
-
-                       data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
-                       spin_unlock(&data_sinfo->lock);
-
-                       alloc_target = btrfs_data_alloc_profile(fs_info);
-                       /*
-                        * It is ugly that we don't call nolock join
-                        * transaction for the free space inode case here.
-                        * But it is safe because we only do the data space
-                        * reservation for the free space cache in the
-                        * transaction context, the common join transaction
-                        * just increase the counter of the current transaction
-                        * handler, doesn't try to acquire the trans_lock of
-                        * the fs.
-                        */
-                       trans = btrfs_join_transaction(root);
-                       if (IS_ERR(trans))
-                               return PTR_ERR(trans);
-
-                       ret = btrfs_chunk_alloc(trans, alloc_target,
-                                               CHUNK_ALLOC_NO_FORCE);
-                       btrfs_end_transaction(trans);
-                       if (ret < 0) {
-                               if (ret != -ENOSPC)
-                                       return ret;
-                               else {
-                                       have_pinned_space = 1;
-                                       goto commit_trans;
-                               }
-                       }
-
-                       goto again;
-               }
-
-               /*
-                * If we don't have enough pinned space to deal with this
-                * allocation, and no removed chunk in current transaction,
-                * don't bother committing the transaction.
-                */
-               have_pinned_space = __percpu_counter_compare(
-                       &data_sinfo->total_bytes_pinned,
-                       used + bytes - data_sinfo->total_bytes,
-                       BTRFS_TOTAL_BYTES_PINNED_BATCH);
-               spin_unlock(&data_sinfo->lock);
-
-               /* Commit the current transaction and try again */
-commit_trans:
-               if (need_commit) {
-                       need_commit--;
-
-                       if (need_commit > 0) {
-                               btrfs_start_delalloc_roots(fs_info, -1);
-                               btrfs_wait_ordered_roots(fs_info, U64_MAX, 0,
-                                                        (u64)-1);
-                       }
-
-                       trans = btrfs_join_transaction(root);
-                       if (IS_ERR(trans))
-                               return PTR_ERR(trans);
-                       if (have_pinned_space >= 0 ||
-                           test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
-                                    &trans->transaction->flags) ||
-                           need_commit > 0) {
-                               ret = btrfs_commit_transaction(trans);
-                               if (ret)
-                                       return ret;
-                               /*
-                                * The cleaner kthread might still be doing iput
-                                * operations. Wait for it to finish so that
-                                * more space is released.  We don't need to
-                                * explicitly run the delayed iputs here because
-                                * the commit_transaction would have woken up
-                                * the cleaner.
-                                */
-                               ret = btrfs_wait_on_delayed_iputs(fs_info);
-                               if (ret)
-                                       return ret;
-                               goto again;
-                       } else {
-                               btrfs_end_transaction(trans);
-                       }
-               }
-
-               trace_btrfs_space_reservation(fs_info,
-                                             "space_info:enospc",
-                                             data_sinfo->flags, bytes, 1);
-               return -ENOSPC;
-       }
-       btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, bytes);
-       spin_unlock(&data_sinfo->lock);
+       if (btrfs_is_free_space_inode(inode))
+               flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE;
 
-       return 0;
+       return btrfs_reserve_data_bytes(fs_info, bytes, flush);
 }
 
 int btrfs_check_data_free_space(struct btrfs_inode *inode,
 
        }
        return ret;
 }
+
+/**
+ * btrfs_reserve_data_bytes - try to reserve data bytes for an allocation
+ * @fs_info - the filesystem
+ * @bytes - the number of bytes we need
+ * @flush - how we are allowed to flush
+ *
+ * This will reserve bytes from the data space info.  If there is not enough
+ * space then we will attempt to flush space as specified by flush.
+ */
+int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
+                            enum btrfs_reserve_flush_enum flush)
+{
+       struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
+       const enum btrfs_flush_state *states = NULL;
+       u64 used;
+       int states_nr = 0;
+       int commit_cycles = 2;
+       int ret = -ENOSPC;
+
+       ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
+
+       if (flush == BTRFS_RESERVE_FLUSH_DATA) {
+               states = data_flush_states;
+               states_nr = ARRAY_SIZE(data_flush_states);
+       }
+
+       spin_lock(&data_sinfo->lock);
+again:
+       used = btrfs_space_info_used(data_sinfo, true);
+
+       if (used + bytes > data_sinfo->total_bytes) {
+               u64 prev_total_bytes = data_sinfo->total_bytes;
+               int flush_state = 0;
+
+               spin_unlock(&data_sinfo->lock);
+
+               /*
+                * Everybody can force chunk allocation, so try this first to
+                * see if we can just bail here and make our reservation.
+                */
+               flush_space(fs_info, data_sinfo, bytes, ALLOC_CHUNK_FORCE);
+               spin_lock(&data_sinfo->lock);
+               if (prev_total_bytes < data_sinfo->total_bytes)
+                       goto again;
+               spin_unlock(&data_sinfo->lock);
+
+               /*
+                * Cycle through the rest of the flushing options for our flush
+                * type, then try again.
+                */
+               while (flush_state < states_nr) {
+                       u64 flush_bytes = U64_MAX;
+
+                       /*
+                        * Previously we unconditionally committed the
+                        * transaction twice before finally checking against
+                        * pinned space before committing the final time.  We
+                        * also skipped flushing delalloc the final pass
+                        * through.
+                        */
+                       if (!commit_cycles) {
+                               if (states[flush_state] == FLUSH_DELALLOC_WAIT) {
+                                       flush_state++;
+                                       continue;
+                               }
+                               if (states[flush_state] == COMMIT_TRANS)
+                                       flush_bytes = bytes;
+                       }
+
+                       flush_space(fs_info, data_sinfo, flush_bytes,
+                                   states[flush_state]);
+                       flush_state++;
+               }
+
+               if (!commit_cycles)
+                       goto out;
+
+               commit_cycles--;
+               spin_lock(&data_sinfo->lock);
+               goto again;
+       }
+       btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, bytes);
+       ret = 0;
+       spin_unlock(&data_sinfo->lock);
+out:
+       if (ret)
+               trace_btrfs_space_reservation(fs_info,
+                                             "space_info:enospc",
+                                             data_sinfo->flags, bytes, 1);
+       return ret;
+}