static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
 {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
        bdi_init(bdi);
+#endif
        bdi->ra_pages   = default_backing_dev_info.ra_pages * 4;
        bdi->state              = 0;
        bdi->capabilities       = default_backing_dev_info.capabilities;
                bio->bi_end_io = end_io_wq->end_io;
                kfree(end_io_wq);
 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
-               bio_endio(bio, bio->bi_size, err);
+               bio_endio(bio, bio->bi_size, error);
 #else
                bio_endio(bio, error);
 #endif
                             fs_info->btree_inode->i_mapping, GFP_NOFS);
        fs_info->do_barriers = 1;
 
-       INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum);
 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+       INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum, fs_info);
        INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info);
 #else
+       INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum);
        INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
 #endif
        BTRFS_I(fs_info->btree_inode)->root = tree_root;
        close_all_devices(fs_info);
        kfree(extent_root);
        kfree(tree_root);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
        bdi_destroy(&fs_info->bdi);
+#endif
        kfree(fs_info);
        return ERR_PTR(err);
 }
 #endif
        close_all_devices(fs_info);
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
        bdi_destroy(&fs_info->bdi);
+#endif
 
        kfree(fs_info->extent_root);
        kfree(fs_info->tree_root);
 
                if (err < 0)
                        num_written = err;
        } else if (num_written > 0 && (file->f_flags & O_DIRECT)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+               do_sync_file_range(file, start_pos,
+                                     start_pos + num_written - 1,
+                                     SYNC_FILE_RANGE_WRITE |
+                                     SYNC_FILE_RANGE_WAIT_AFTER);
+#else
                do_sync_mapping_range(inode->i_mapping, start_pos,
                                      start_pos + num_written - 1,
                                      SYNC_FILE_RANGE_WRITE |
                                      SYNC_FILE_RANGE_WAIT_AFTER);
-
+#endif
                invalidate_mapping_pages(inode->i_mapping,
                      start_pos >> PAGE_CACHE_SHIFT,
                     (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
 
                }
                device->devid = devid;
                device->barriers = 1;
+               spin_lock_init(&device->io_lock);
                device->name = kstrdup(path, GFP_NOFS);
                if (!device->name) {
                        kfree(device);
        em_tree = &extent_root->fs_info->mapping_tree.map_tree;
        spin_lock(&em_tree->lock);
        ret = add_extent_mapping(em_tree, em);
-       BUG_ON(ret);
        spin_unlock(&em_tree->lock);
+       BUG_ON(ret);
        free_extent_map(em);
        return ret;
 }
 
        spin_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, logical, len);
+       spin_unlock(&em_tree->lock);
        BUG_ON(!em);
 
        BUG_ON(em->start > logical || em->start + em->len < logical);
        else
                ret = 1;
        free_extent_map(em);
-       spin_unlock(&em_tree->lock);
        return ret;
 }
 
 
        spin_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, logical, *length);
+       spin_unlock(&em_tree->lock);
        BUG_ON(!em);
 
        BUG_ON(em->start > logical || em->start + em->len < logical);
            ((map->type & BTRFS_BLOCK_GROUP_RAID1) ||
             (map->type & BTRFS_BLOCK_GROUP_DUP))) {
                stripes_allocated = map->num_stripes;
-               spin_unlock(&em_tree->lock);
                free_extent_map(em);
                kfree(multi);
                goto again;
        *multi_ret = multi;
 out:
        free_extent_map(em);
-       spin_unlock(&em_tree->lock);
        return 0;
 }
 
        length = key->offset;
        spin_lock(&map_tree->map_tree.lock);
        em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
+       spin_unlock(&map_tree->map_tree.lock);
 
        /* already mapped? */
        if (em && em->start <= logical && em->start + em->len > logical) {
                free_extent_map(em);
-               spin_unlock(&map_tree->map_tree.lock);
                return 0;
        } else if (em) {
                free_extent_map(em);
        }
-       spin_unlock(&map_tree->map_tree.lock);
 
        map = kzalloc(sizeof(*map), GFP_NOFS);
        if (!map)
 
        spin_lock(&map_tree->map_tree.lock);
        ret = add_extent_mapping(&map_tree->map_tree, em);
-       BUG_ON(ret);
        spin_unlock(&map_tree->map_tree.lock);
+       BUG_ON(ret);
        free_extent_map(em);
 
        return 0;
                        return -ENOMEM;
                list_add(&device->dev_list,
                         &root->fs_info->fs_devices->devices);
-               device->total_ios = 0;
+               device->barriers = 1;
                spin_lock_init(&device->io_lock);
        }