BUG_ON(ret); /* -ENOMEM */
                        }
  
 -                      ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
 +                      ret = btrfs_map_bio(root, bio, 0, 1);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret) {
+                               bio->bi_error = ret;
+                               bio_endio(bio);
+                       }
  
                        bio_put(bio);
  
                BUG_ON(ret); /* -ENOMEM */
        }
  
 -      ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
 +      ret = btrfs_map_bio(root, bio, 0, 1);
-       BUG_ON(ret); /* -ENOMEM */
+       if (ret) {
+               bio->bi_error = ret;
+               bio_endio(bio);
+       }
  
        bio_put(bio);
        return 0;
 
  {
        int ret = 0;
        if (tree->ops && tree->ops->merge_bio_hook)
 -              ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
 +              ret = tree->ops->merge_bio_hook(page, offset, size, bio,
                                                bio_flags);
-       BUG_ON(ret < 0);
        return ret;
  
  }
                        err = __extent_read_full_page(tree, page,
                                                      get_extent, &bio,
                                                      mirror_num, &bio_flags,
 -                                                    READ | REQ_META);
 +                                                    REQ_META);
-                       if (err)
+                       if (err) {
                                ret = err;
+                               /*
+                                * We use &bio in above __extent_read_full_page,
+                                * so we ensure that if it returns error, the
+                                * current page fails to add itself to bio and
+                                * it's been unlocked.
+                                *
+                                * We must dec io_pages by ourselves.
+                                */
+                               atomic_dec(&eb->io_pages);
+                       }
                } else {
                        unlock_page(page);
                }
 
  /*
   * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
   * we don't create bios that span stripes or chunks
+  *
+  * return 1 if page cannot be merged to bio
+  * return 0 if page can be merged to bio
+  * return error otherwise
   */
 -int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
 +int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
                         size_t size, struct bio *bio,
                         unsigned long bio_flags)
  {
  
        length = bio->bi_iter.bi_size;
        map_length = length;
 -      ret = btrfs_map_block(root->fs_info, rw, logical,
 +      ret = btrfs_map_block(root->fs_info, bio_op(bio), logical,
                              &map_length, NULL, 0);
-       /* Will always return 0 with map_multi == NULL */
-       BUG_ON(ret < 0);
+       if (ret < 0)
+               return ret;
        if (map_length < length + size)
                return 1;
        return 0;