return ret;
 }
 
-static int check_async_write(struct btrfs_inode *bi, unsigned long bio_flags)
+static int check_async_write(struct btrfs_inode *bi)
 {
        if (atomic_read(&bi->sync_writers))
                return 0;
-       if (bio_flags & EXTENT_BIO_TREE_LOG)
-               return 0;
 #ifdef CONFIG_X86
        if (static_cpu_has(X86_FEATURE_XMM4_2))
                return 0;
 {
        struct inode *inode = private_data;
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-       int async = check_async_write(BTRFS_I(inode), bio_flags);
+       int async = check_async_write(BTRFS_I(inode));
        blk_status_t ret;
 
        if (bio_op(bio) != REQ_OP_WRITE) {
 
        struct bio *bio;
        struct extent_io_tree *tree;
        get_extent_t *get_extent;
-       unsigned long bio_flags;
 
        /* tells writepage not to lock the state bits for this range
         * it still does the unlocking
        u64 offset = eb->start;
        u32 nritems;
        unsigned long i, num_pages;
-       unsigned long bio_flags = 0;
        unsigned long start, end;
        unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
        int ret = 0;
        clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
        num_pages = num_extent_pages(eb->start, eb->len);
        atomic_set(&eb->io_pages, num_pages);
-       if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
-               bio_flags = EXTENT_BIO_TREE_LOG;
 
        /* set btree blocks beyond nritems with 0 to avoid stale content. */
        nritems = btrfs_header_nritems(eb);
                                         p, offset >> 9, PAGE_SIZE, 0, bdev,
                                         &epd->bio,
                                         end_bio_extent_buffer_writepage,
-                                        0, epd->bio_flags, bio_flags, false);
-               epd->bio_flags = bio_flags;
+                                        0, 0, 0, false);
                if (ret) {
                        set_btree_ioerr(p);
                        if (PageWriteback(p))
                .tree = tree,
                .extent_locked = 0,
                .sync_io = wbc->sync_mode == WB_SYNC_ALL,
-               .bio_flags = 0,
        };
        int ret = 0;
        int done = 0;
        if (epd->bio) {
                int ret;
 
-               ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
+               ret = submit_one_bio(epd->bio, 0, 0);
                BUG_ON(ret < 0); /* -ENOMEM */
                epd->bio = NULL;
        }
                .get_extent = get_extent,
                .extent_locked = 0,
                .sync_io = wbc->sync_mode == WB_SYNC_ALL,
-               .bio_flags = 0,
        };
 
        ret = __extent_writepage(page, wbc, &epd);
                .get_extent = get_extent,
                .extent_locked = 1,
                .sync_io = mode == WB_SYNC_ALL,
-               .bio_flags = 0,
        };
        struct writeback_control wbc_writepages = {
                .sync_mode      = mode,
                .get_extent = get_extent,
                .extent_locked = 0,
                .sync_io = wbc->sync_mode == WB_SYNC_ALL,
-               .bio_flags = 0,
        };
 
        ret = extent_write_cache_pages(mapping, wbc, __extent_writepage, &epd,