#include "check-integrity.h"
 #include "rcu-string.h"
 
+#ifdef CONFIG_X86
+#include <asm/cpufeature.h>
+#endif
+
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
 static void free_fs_root(struct btrfs_root *root);
        return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
 }
 
+static int check_async_write(struct inode *inode, unsigned long bio_flags)
+{
+       if (bio_flags & EXTENT_BIO_TREE_LOG)
+               return 0;
+#ifdef CONFIG_X86
+       if (cpu_has_xmm4_2)
+               return 0;
+#endif
+       return 1;
+}
+
 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags,
                                 u64 bio_offset)
 {
+       int async = check_async_write(inode, bio_flags);
        int ret;
 
        if (!(rw & REQ_WRITE)) {
                        return ret;
                return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
                                     mirror_num, 0);
+       } else if (!async) {
+               ret = btree_csum_one_bio(bio);
+               if (ret)
+                       return ret;
+               return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
+                                    mirror_num, 0);
        }
 
        /*
 
        struct bio *bio;
        struct extent_io_tree *tree;
        get_extent_t *get_extent;
+       unsigned long bio_flags;
 
        /* tells writepage not to lock the state bits for this range
         * it still does the unlocking
        struct block_device *bdev = fs_info->fs_devices->latest_bdev;
        u64 offset = eb->start;
        unsigned long i, num_pages;
+       unsigned long bio_flags = 0;
        int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
        int ret = 0;
 
        clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
        num_pages = num_extent_pages(eb->start, eb->len);
        atomic_set(&eb->io_pages, num_pages);
+       if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
+               bio_flags = EXTENT_BIO_TREE_LOG;
+
        for (i = 0; i < num_pages; i++) {
                struct page *p = extent_buffer_page(eb, i);
 
                ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
                                         PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
                                         -1, end_bio_extent_buffer_writepage,
-                                        0, 0, 0);
+                                        0, epd->bio_flags, bio_flags);
+               epd->bio_flags = bio_flags;
                if (ret) {
                        set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
                        SetPageError(p);
                .tree = tree,
                .extent_locked = 0,
                .sync_io = wbc->sync_mode == WB_SYNC_ALL,
+               .bio_flags = 0,
        };
        int ret = 0;
        int done = 0;
                if (epd->sync_io)
                        rw = WRITE_SYNC;
 
-               ret = submit_one_bio(rw, epd->bio, 0, 0);
+               ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
                BUG_ON(ret < 0); /* -ENOMEM */
                epd->bio = NULL;
        }
                .get_extent = get_extent,
                .extent_locked = 0,
                .sync_io = wbc->sync_mode == WB_SYNC_ALL,
+               .bio_flags = 0,
        };
 
        ret = __extent_writepage(page, wbc, &epd);
                .get_extent = get_extent,
                .extent_locked = 1,
                .sync_io = mode == WB_SYNC_ALL,
+               .bio_flags = 0,
        };
        struct writeback_control wbc_writepages = {
                .sync_mode      = mode,
                .get_extent = get_extent,
                .extent_locked = 0,
                .sync_io = wbc->sync_mode == WB_SYNC_ALL,
+               .bio_flags = 0,
        };
 
        ret = extent_write_cache_pages(tree, mapping, wbc,