int btrfs_drop_extents(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct inode *inode,
                       u64 start, u64 end, u64 locked_end,
-                      u64 inline_limit, u64 *hint_block);
+                      u64 inline_limit, u64 *hint_block, int drop_cache);
 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
                              struct inode *inode, u64 start, u64 end);
 
        return 0;
 }
 
+int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
+{
+       int ret = 0;
+       struct extent_map *merge = NULL;
+       struct rb_node *rb;
+       struct extent_map *em;
+
+       write_lock(&tree->lock);
+       em = lookup_extent_mapping(tree, start, len);
+
+       WARN_ON(em->start != start || !em);
+
+       if (!em)
+               goto out;
+
+       clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+
+       if (em->start != 0) {
+               rb = rb_prev(&em->rb_node);
+               if (rb)
+                       merge = rb_entry(rb, struct extent_map, rb_node);
+               if (rb && mergable_maps(merge, em)) {
+                       em->start = merge->start;
+                       em->len += merge->len;
+                       em->block_len += merge->block_len;
+                       em->block_start = merge->block_start;
+                       merge->in_tree = 0;
+                       rb_erase(&merge->rb_node, &tree->map);
+                       free_extent_map(merge);
+               }
+       }
+
+       rb = rb_next(&em->rb_node);
+       if (rb)
+               merge = rb_entry(rb, struct extent_map, rb_node);
+       if (rb && mergable_maps(em, merge)) {
+               em->len += merge->len;
+               em->block_len += merge->len;
+               rb_erase(&merge->rb_node, &tree->map);
+               merge->in_tree = 0;
+               free_extent_map(merge);
+       }
+
+       free_extent_map(em);
+out:
+       write_unlock(&tree->lock);
+       return ret;
+
+}
+
 /**
  * add_extent_mapping - add new extent map to the extent tree
  * @tree:      tree to insert new map in
 
 void free_extent_map(struct extent_map *em);
 int __init extent_map_init(void);
 void extent_map_exit(void);
+int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len);
 #endif
 
                }
                flags = em->flags;
                if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
-                       write_unlock(&em_tree->lock);
                        if (em->start <= start &&
                            (!testend || em->start + em->len >= start + len)) {
                                free_extent_map(em);
+                               write_unlock(&em_tree->lock);
                                break;
                        }
                        if (start < em->start) {
                                start = em->start + em->len;
                        }
                        free_extent_map(em);
+                       write_unlock(&em_tree->lock);
                        continue;
                }
                compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct inode *inode,
                       u64 start, u64 end, u64 locked_end,
-                      u64 inline_limit, u64 *hint_byte)
+                      u64 inline_limit, u64 *hint_byte, int drop_cache)
 {
        u64 extent_end = 0;
        u64 search_start = start;
        int ret;
 
        inline_limit = 0;
-       btrfs_drop_extent_cache(inode, start, end - 1, 0);
+       if (drop_cache)
+               btrfs_drop_extent_cache(inode, start, end - 1, 0);
 
        path = btrfs_alloc_path();
        if (!path)
 
        }
 
        ret = btrfs_drop_extents(trans, root, inode, start,
-                                aligned_end, aligned_end, start, &hint_byte);
+                                aligned_end, aligned_end, start,
+                                &hint_byte, 1);
        BUG_ON(ret);
 
        if (isize > actual_end)
                                   inline_len, compressed_size,
                                   compressed_pages);
        BUG_ON(ret);
-       btrfs_drop_extent_cache(inode, start, aligned_end, 0);
+       btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
        return 0;
 }
 
        BUG_ON(!path);
 
        path->leave_spinning = 1;
+
+       /*
+        * we may be replacing one extent in the tree with another.
+        * The new extent is pinned in the extent map, and we don't want
+        * to drop it from the cache until it is completely in the btree.
+        *
+        * So, tell btrfs_drop_extents to leave this extent in the cache.
+        * the caller is expected to unpin it and allow it to be merged
+        * with the others.
+        */
        ret = btrfs_drop_extents(trans, root, inode, file_pos,
                                 file_pos + num_bytes, locked_end,
-                                file_pos, &hint);
+                                file_pos, &hint, 0);
        BUG_ON(ret);
 
        ins.objectid = inode->i_ino;
        btrfs_mark_buffer_dirty(leaf);
 
        inode_add_bytes(inode, num_bytes);
-       btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
 
        ins.objectid = disk_bytenr;
        ins.offset = disk_num_bytes;
                                                ordered_extent->len,
                                                compressed, 0, 0,
                                                BTRFS_FILE_EXTENT_REG);
+               unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
+                                  ordered_extent->file_offset,
+                                  ordered_extent->len);
                BUG_ON(ret);
        }
        unlock_extent(io_tree, ordered_extent->file_offset,
                                                 cur_offset,
                                                 cur_offset + hole_size,
                                                 block_end,
-                                                cur_offset, &hint_byte);
+                                                cur_offset, &hint_byte, 1);
                        if (err)
                                break;
                        err = btrfs_insert_file_extent(trans, root,
                                                  0, 0, 0,
                                                  BTRFS_FILE_EXTENT_PREALLOC);
                BUG_ON(ret);
+               btrfs_drop_extent_cache(inode, cur_offset,
+                                       cur_offset + ins.offset -1, 0);
                num_bytes -= ins.offset;
                cur_offset += ins.offset;
                alloc_hint = ins.objectid + ins.offset;
 
                clear_page_dirty_for_io(page);
 
                btrfs_set_extent_delalloc(inode, page_start, page_end);
-
-               unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
                set_page_dirty(page);
+               unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
                unlock_page(page);
                page_cache_release(page);
                balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
 
        /* punch hole in destination first */
        btrfs_drop_extents(trans, root, inode, off, off + len,
-                          off + len, 0, &hint_byte);
+                          off + len, 0, &hint_byte, 1);
 
        /* clone data */
        key.objectid = src->i_ino;
 
        saved_nbytes = inode_get_bytes(inode);
        /* drop any overlapping extents */
        ret = btrfs_drop_extents(trans, root, inode,
-                        start, extent_end, extent_end, start, &alloc_hint);
+                        start, extent_end, extent_end, start, &alloc_hint, 1);
        BUG_ON(ret);
 
        if (found_type == BTRFS_FILE_EXTENT_REG ||