void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
 {
        balance_dirty_pages_ratelimited_nr(
-                       root->fs_info->btree_inode->i_mapping, 1);
+                                  root->fs_info->btree_inode->i_mapping, 1);
 }
 
 void btrfs_set_buffer_defrag(struct extent_buffer *buf)
 
 {
        int ret;
        struct btrfs_block_group_cache *cache = *cache_ret;
+       struct extent_io_tree *free_space_cache;
+       struct extent_state *state;
        u64 last;
        u64 start = 0;
-       u64 end = 0;
        u64 cache_miss = 0;
        u64 total_fs_bytes;
        int wrapped = 0;
                goto out;
        }
        total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
+       free_space_cache = &root->fs_info->free_space_cache;
+
 again:
        ret = cache_block_group(root, cache);
        if (ret)
 
        last = max(search_start, cache->key.objectid);
 
+       spin_lock_irq(&free_space_cache->lock);
+       state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
        while(1) {
-               ret = find_first_extent_bit(&root->fs_info->free_space_cache,
-                                           last, &start, &end, EXTENT_DIRTY);
-               if (ret) {
+               if (!state) {
                        if (!cache_miss)
                                cache_miss = last;
+                       spin_unlock_irq(&free_space_cache->lock);
                        goto new_group;
                }
 
-               start = max(last, start);
-               last = end + 1;
+               start = max(last, state->start);
+               last = state->end + 1;
                if (last - start < num) {
                        if (last == cache->key.objectid + cache->key.offset)
                                cache_miss = start;
+                       do {
+                               state = extent_state_next(state);
+                       } while(state && !(state->state & EXTENT_DIRTY));
                        continue;
                }
+               spin_unlock_irq(&free_space_cache->lock);
                if (data != BTRFS_BLOCK_GROUP_MIXED &&
                    start + num > cache->key.objectid + cache->key.offset)
                        goto new_group;
        struct btrfs_block_group_cache *block_group;
        int full_scan = 0;
        int wrapped = 0;
+       int empty_cluster;
        u64 cached_start;
 
        WARN_ON(num_bytes < root->sectorsize);
                data = BTRFS_BLOCK_GROUP_MIXED;
        }
 
-       if (!data)
+       if (!data) {
                last_ptr = &root->fs_info->last_alloc;
+               empty_cluster = 128 * 1024;
+       }
 
-       if (data && btrfs_test_opt(root, SSD))
+       if (data && btrfs_test_opt(root, SSD)) {
                last_ptr = &root->fs_info->last_data_alloc;
+               empty_cluster = 2 * 1024 * 1024;
+       }
 
        if (last_ptr) {
                if (*last_ptr)
                else {
                        hint_byte = hint_byte &
                                ~((u64)BTRFS_BLOCK_GROUP_SIZE - 1);
-                       empty_size += 2 * 1024 * 1024;
+                       empty_size += empty_cluster;
                }
+               search_start = max(search_start, hint_byte);
        }
 
        search_end = min(search_end,
        if (last_ptr && *last_ptr && search_start != *last_ptr) {
                *last_ptr = 0;
                if (!empty_size) {
-                       empty_size += 2 * 1024 * 1024;
+                       empty_size += empty_cluster;
                        total_needed += empty_size;
                }
                search_start = find_search_start(root, &block_group,
 
                        state->start = other->start;
                        other->tree = NULL;
                        if (tree->last == other)
-                               tree->last = NULL;
+                               tree->last = state;
                        rb_erase(&other->rb_node, &tree->state);
                        free_extent_state(other);
                }
                        other->start = state->start;
                        state->tree = NULL;
                        if (tree->last == state)
-                               tree->last = NULL;
+                               tree->last = other;
                        rb_erase(&state->rb_node, &tree->state);
                        free_extent_state(state);
                }
        if (delete || state->state == 0) {
                if (state->tree) {
                        clear_state_cb(tree, state, state->state);
-                       if (tree->last == state)
-                               tree->last = NULL;
+                       if (tree->last == state) {
+                               tree->last = extent_state_next(state);
+                       }
                        rb_erase(&state->rb_node, &tree->state);
                        state->tree = NULL;
                        free_extent_state(state);
 }
 EXPORT_SYMBOL(find_first_extent_bit);
 
+struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
+                                                u64 start, int bits)
+{
+       struct rb_node *node;
+       struct extent_state *state;
+
+       /*
+        * this search will find all the extents that end after
+        * our range starts.
+        */
+       node = tree_search(tree, start);
+       if (!node || IS_ERR(node)) {
+               goto out;
+       }
+
+       while(1) {
+               state = rb_entry(node, struct extent_state, rb_node);
+               if (state->end >= start && (state->state & bits)) {
+                       return state;
+               }
+               node = rb_next(node);
+               if (!node)
+                       break;
+       }
+out:
+       return NULL;
+}
+EXPORT_SYMBOL(find_first_extent_bit_state);
+
 u64 find_lock_delalloc_range(struct extent_io_tree *tree,
                             u64 *start, u64 *end, u64 max_bytes)
 {
 
 
 struct extent_map_tree;
 
+static inline struct extent_state *extent_state_next(struct extent_state *state)
+{
+       struct rb_node *node;
+       node = rb_next(&state->rb_node);
+       if (!node)
+               return NULL;
+       return rb_entry(node, struct extent_state, rb_node);
+}
+
 typedef struct extent_map *(get_extent_t)(struct inode *inode,
                                          struct page *page,
                                          size_t page_offset,
                     gfp_t mask);
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
                          u64 *start_ret, u64 *end_ret, int bits);
+struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
+                                                u64 start, int bits);
 int extent_invalidatepage(struct extent_io_tree *tree,
                          struct page *page, unsigned long offset);
 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,