}
 }
 
-static void uncache_state(struct extent_state **cached_ptr)
-{
-       if (cached_ptr && (*cached_ptr)) {
-               struct extent_state *state = *cached_ptr;
-               *cached_ptr = NULL;
-               free_extent_state(state);
-       }
-}
-
 /*
  * set some bits on a range in the tree.  This may require allocations or
  * sleeping, so the gfp mask is used to indicate what is allowed.
                                            EXTENT_LOCKED);
        spin_unlock(&BTRFS_I(inode)->io_tree.lock);
 
-       if (state && state->start == failrec->start) {
+       if (state && state->start <= failrec->start &&
+           state->end >= failrec->start + failrec->len - 1) {
                fs_info = BTRFS_I(inode)->root->fs_info;
                num_copies = btrfs_num_copies(fs_info, failrec->logical,
                                              failrec->len);
        bio_put(bio);
 }
 
+static void
+endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
+                             int uptodate)
+{
+       struct extent_state *cached = NULL;
+       u64 end = start + len - 1;
+
+       if (uptodate && tree->track_uptodate)
+               set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
+       unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
+}
+
 /*
  * after a readpage IO is done, we need to:
  * clear the uptodate bits on error
        u64 start;
        u64 end;
        u64 len;
+       u64 extent_start = 0;
+       u64 extent_len = 0;
        int mirror;
        int ret;
 
 
        do {
                struct page *page = bvec->bv_page;
-               struct extent_state *cached = NULL;
-               struct extent_state *state;
                struct inode *inode = page->mapping->host;
 
                pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
                if (++bvec <= bvec_end)
                        prefetchw(&bvec->bv_page->flags);
 
-               spin_lock(&tree->lock);
-               state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
-               if (likely(state && state->start == start)) {
-                       /*
-                        * take a reference on the state, unlock will drop
-                        * the ref
-                        */
-                       cache_state(state, &cached);
-               }
-               spin_unlock(&tree->lock);
-
                mirror = io_bio->mirror_num;
                if (likely(uptodate && tree->ops &&
                           tree->ops->readpage_end_io_hook)) {
                                        test_bit(BIO_UPTODATE, &bio->bi_flags);
                                if (err)
                                        uptodate = 0;
-                               uncache_state(&cached);
                                continue;
                        }
                }
 readpage_ok:
-               if (uptodate && tree->track_uptodate) {
-                       set_extent_uptodate(tree, start, end, &cached,
-                                           GFP_ATOMIC);
-               }
-               unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
-
-               if (uptodate) {
+               if (likely(uptodate)) {
                        loff_t i_size = i_size_read(inode);
                        pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
                        unsigned offset;
                }
                unlock_page(page);
                offset += len;
+
+               if (unlikely(!uptodate)) {
+                       if (extent_len) {
+                               endio_readpage_release_extent(tree,
+                                                             extent_start,
+                                                             extent_len, 1);
+                               extent_start = 0;
+                               extent_len = 0;
+                       }
+                       endio_readpage_release_extent(tree, start,
+                                                     end - start + 1, 0);
+               } else if (!extent_len) {
+                       extent_start = start;
+                       extent_len = end + 1 - start;
+               } else if (extent_start + extent_len == start) {
+                       extent_len += end + 1 - start;
+               } else {
+                       endio_readpage_release_extent(tree, extent_start,
+                                                     extent_len, uptodate);
+                       extent_start = start;
+                       extent_len = end + 1 - start;
+               }
        } while (bvec <= bvec_end);
 
+       if (extent_len)
+               endio_readpage_release_extent(tree, extent_start, extent_len,
+                                             uptodate);
        if (io_bio->end_io)
                io_bio->end_io(io_bio, err);
        bio_put(bio);