kmem_cache_free(extent_buffer_cache, eb);
 }
 
+static int extent_buffer_under_io(struct extent_buffer *eb)
+{
+       return (atomic_read(&eb->io_pages) ||
+               test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
+               test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+}
+
+/*
+ * Helper for releasing extent buffer page.
+ */
+static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
+                                               unsigned long start_idx)
+{
+       unsigned long index;
+       unsigned long num_pages;
+       struct page *page;
+       int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
+
+       BUG_ON(extent_buffer_under_io(eb));
+
+       num_pages = num_extent_pages(eb->start, eb->len);
+       index = start_idx + num_pages;
+       if (start_idx >= index)
+               return;
+
+       do {
+               index--;
+               page = extent_buffer_page(eb, index);
+               if (page && mapped) {
+                       spin_lock(&page->mapping->private_lock);
+                       /*
+                        * We do this since we'll remove the pages after we've
+                        * removed the eb from the radix tree, so we could race
+                        * and have this page now attached to the new eb.  So
+                        * only clear page_private if it's still connected to
+                        * this eb.
+                        */
+                       if (PagePrivate(page) &&
+                           page->private == (unsigned long)eb) {
+                               BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+                               BUG_ON(PageDirty(page));
+                               BUG_ON(PageWriteback(page));
+                               /*
+                                * We need to make sure we haven't be attached
+                                * to a new eb.
+                                */
+                               ClearPagePrivate(page);
+                               set_page_private(page, 0);
+                               /* One for the page private */
+                               page_cache_release(page);
+                       }
+                       spin_unlock(&page->mapping->private_lock);
+
+               }
+               if (page) {
+                       /* One for when we alloced the page */
+                       page_cache_release(page);
+               }
+       } while (index != start_idx);
+}
+
+/*
+ * Helper for releasing the extent buffer.
+ */
+static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
+{
+       btrfs_release_extent_buffer_page(eb, 0);
+       __free_extent_buffer(eb);
+}
+
 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
                                                   u64 start,
                                                   unsigned long len,
 
        for (i = 0; i < num_pages; i++) {
                p = alloc_page(GFP_ATOMIC);
-               BUG_ON(!p);
+               if (!p) {
+                       btrfs_release_extent_buffer(new);
+                       return NULL;
+               }
                attach_extent_buffer_page(new, p);
                WARN_ON(PageDirty(p));
                SetPageUptodate(p);
        return NULL;
 }
 
-static int extent_buffer_under_io(struct extent_buffer *eb)
-{
-       return (atomic_read(&eb->io_pages) ||
-               test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
-               test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
-}
-
-/*
- * Helper for releasing extent buffer page.
- */
-static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
-                                               unsigned long start_idx)
-{
-       unsigned long index;
-       unsigned long num_pages;
-       struct page *page;
-       int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
-
-       BUG_ON(extent_buffer_under_io(eb));
-
-       num_pages = num_extent_pages(eb->start, eb->len);
-       index = start_idx + num_pages;
-       if (start_idx >= index)
-               return;
-
-       do {
-               index--;
-               page = extent_buffer_page(eb, index);
-               if (page && mapped) {
-                       spin_lock(&page->mapping->private_lock);
-                       /*
-                        * We do this since we'll remove the pages after we've
-                        * removed the eb from the radix tree, so we could race
-                        * and have this page now attached to the new eb.  So
-                        * only clear page_private if it's still connected to
-                        * this eb.
-                        */
-                       if (PagePrivate(page) &&
-                           page->private == (unsigned long)eb) {
-                               BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
-                               BUG_ON(PageDirty(page));
-                               BUG_ON(PageWriteback(page));
-                               /*
-                                * We need to make sure we haven't be attached
-                                * to a new eb.
-                                */
-                               ClearPagePrivate(page);
-                               set_page_private(page, 0);
-                               /* One for the page private */
-                               page_cache_release(page);
-                       }
-                       spin_unlock(&page->mapping->private_lock);
-
-               }
-               if (page) {
-                       /* One for when we alloced the page */
-                       page_cache_release(page);
-               }
-       } while (index != start_idx);
-}
-
-/*
- * Helper for releasing the extent buffer.
- */
-static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
-{
-       btrfs_release_extent_buffer_page(eb, 0);
-       __free_extent_buffer(eb);
-}
-
 static void check_buffer_tree_ref(struct extent_buffer *eb)
 {
        int refs;