return next->block_start == prev->block_start;
}
+/*
+ * Handle the on-disk data extents merge for @prev and @next.
+ *
+ * Only touches disk_bytenr/disk_num_bytes/offset/ram_bytes.
+ * For now only uncompressed regular extent can be merged.
+ *
+ * @prev and @next will be both updated to point to the new merged range.
+ * Thus one of them should be removed by the caller.
+ */
+static void merge_ondisk_extents(struct extent_map *prev, struct extent_map *next)
+{
+ u64 new_disk_bytenr;
+ u64 new_disk_num_bytes;
+ u64 new_offset;
+
+ /* @prev and @next should not be compressed. */
+ ASSERT(!extent_map_is_compressed(prev));
+ ASSERT(!extent_map_is_compressed(next));
+
+ /*
+ * There are two different cases where @prev and @next can be merged.
+ *
+ * 1) They are referring to the same data extent:
+ *
+ * |<----- data extent A ----->|
+ * |<- prev ->|<- next ->|
+ *
+ * 2) They are referring to different data extents but still adjacent:
+ *
+ * |<-- data extent A -->|<-- data extent B -->|
+ * |<- prev ->|<- next ->|
+ *
+ * The calculation here always merges the data extents first, then updates
+ * @offset using the new data extents.
+ *
+ * For case 1), the merged data extent would be the same.
+ * For case 2), we just merge the two data extents into one.
+ */
+ new_disk_bytenr = min(prev->disk_bytenr, next->disk_bytenr);
+ new_disk_num_bytes = max(prev->disk_bytenr + prev->disk_num_bytes,
+ next->disk_bytenr + next->disk_num_bytes) -
+ new_disk_bytenr;
+ new_offset = prev->disk_bytenr + prev->offset - new_disk_bytenr;
+
+ prev->disk_bytenr = new_disk_bytenr;
+ prev->disk_num_bytes = new_disk_num_bytes;
+ prev->ram_bytes = new_disk_num_bytes;
+ prev->offset = new_offset;
+
+ next->disk_bytenr = new_disk_bytenr;
+ next->disk_num_bytes = new_disk_num_bytes;
+ next->ram_bytes = new_disk_num_bytes;
+ next->offset = new_offset;
+}
+
static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
{
struct extent_map_tree *tree = &inode->extent_tree;
em->block_len += merge->block_len;
em->block_start = merge->block_start;
em->generation = max(em->generation, merge->generation);
+
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
+ merge_ondisk_extents(merge, em);
em->flags |= EXTENT_FLAG_MERGED;
rb_erase(&merge->rb_node, &tree->root);
if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
em->len += merge->len;
em->block_len += merge->block_len;
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
+ merge_ondisk_extents(em, merge);
rb_erase(&merge->rb_node, &tree->root);
RB_CLEAR_NODE(&merge->rb_node);
em->generation = max(em->generation, merge->generation);
!extent_map_is_compressed(em)) {
em->block_start += start_diff;
em->block_len = em->len;
+ em->offset += start_diff;
}
return add_extent_mapping(inode, em, 0);
}
split->block_len = em->block_len;
else
split->block_len = split->len;
+ split->disk_bytenr = em->disk_bytenr;
split->disk_num_bytes = max(split->block_len,
em->disk_num_bytes);
+ split->offset = em->offset;
split->ram_bytes = em->ram_bytes;
} else {
split->orig_start = split->start;
split->block_len = 0;
split->block_start = em->block_start;
+ split->disk_bytenr = em->disk_bytenr;
split->disk_num_bytes = 0;
+ split->offset = 0;
split->ram_bytes = split->len;
}
split->start = end;
split->len = em_end - end;
split->block_start = em->block_start;
+ split->disk_bytenr = em->disk_bytenr;
split->flags = flags;
split->generation = gen;
if (em->block_start < EXTENT_MAP_LAST_BYTE) {
split->disk_num_bytes = max(em->block_len,
em->disk_num_bytes);
-
+ split->offset = em->offset + end - em->start;
split->ram_bytes = em->ram_bytes;
if (compressed) {
split->block_len = em->block_len;
split->orig_start = em->orig_start;
}
} else {
+ split->disk_num_bytes = 0;
+ split->offset = 0;
split->ram_bytes = split->len;
split->orig_start = split->start;
split->block_len = 0;
- split->disk_num_bytes = 0;
}
if (extent_map_in_tree(em)) {
/* First, replace the em with a new extent_map starting from * em->start */
split_pre->start = em->start;
split_pre->len = pre;
+ split_pre->disk_bytenr = new_logical;
+ split_pre->disk_num_bytes = split_pre->len;
+ split_pre->offset = 0;
split_pre->orig_start = split_pre->start;
split_pre->block_start = new_logical;
split_pre->block_len = split_pre->len;
- split_pre->disk_num_bytes = split_pre->block_len;
split_pre->ram_bytes = split_pre->len;
split_pre->flags = flags;
split_pre->generation = em->generation;
/* Insert the middle extent_map. */
split_mid->start = em->start + pre;
split_mid->len = em->len - pre;
+ split_mid->disk_bytenr = em->block_start + pre;
+ split_mid->disk_num_bytes = split_mid->len;
+ split_mid->offset = 0;
split_mid->orig_start = split_mid->start;
split_mid->block_start = em->block_start + pre;
split_mid->block_len = split_mid->len;
- split_mid->disk_num_bytes = split_mid->block_len;
split_mid->ram_bytes = split_mid->len;
split_mid->flags = flags;
split_mid->generation = em->generation;
u64 len, u64 orig_start, u64 block_start,
u64 block_len, u64 disk_num_bytes,
u64 ram_bytes, int compress_type,
+ const struct btrfs_file_extent *file_extent,
int type);
static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ordered_extent *ordered;
+ struct btrfs_file_extent file_extent;
struct btrfs_key ins;
struct page *locked_page = NULL;
struct extent_state *cached = NULL;
lock_extent(io_tree, start, end, &cached);
/* Here we're doing allocation and writeback of the compressed pages */
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.ram_bytes = async_extent->ram_size;
+ file_extent.num_bytes = async_extent->ram_size;
+ file_extent.offset = 0;
+ file_extent.compression = async_extent->compress_type;
+
em = create_io_em(inode, start,
async_extent->ram_size, /* len */
start, /* orig_start */
ins.offset, /* orig_block_len */
async_extent->ram_size, /* ram_bytes */
async_extent->compress_type,
+ &file_extent,
BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
while (num_bytes > 0) {
struct btrfs_ordered_extent *ordered;
+ struct btrfs_file_extent file_extent;
cur_alloc_size = num_bytes;
ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
extent_reserved = true;
ram_size = ins.offset;
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.num_bytes = ins.offset;
+ file_extent.ram_bytes = ins.offset;
+ file_extent.offset = 0;
+ file_extent.compression = BTRFS_COMPRESS_NONE;
lock_extent(&inode->io_tree, start, start + ram_size - 1,
&cached);
ins.offset, /* orig_block_len */
ram_size, /* ram_bytes */
BTRFS_COMPRESS_NONE, /* compress_type */
+ &file_extent,
BTRFS_ORDERED_REGULAR /* type */);
if (IS_ERR(em)) {
unlock_extent(&inode->io_tree, start,
nocow_args.num_bytes, /* block_len */
nocow_args.disk_num_bytes, /* orig_block_len */
ram_bytes, BTRFS_COMPRESS_NONE,
+ &nocow_args.file_extent,
BTRFS_ORDERED_PREALLOC);
if (IS_ERR(em)) {
unlock_extent(&inode->io_tree, cur_offset,
hole_em->orig_start = cur_offset;
hole_em->block_start = EXTENT_MAP_HOLE;
+ hole_em->disk_bytenr = EXTENT_MAP_HOLE;
hole_em->block_len = 0;
hole_em->disk_num_bytes = 0;
hole_em->ram_bytes = hole_size;
}
em->start = EXTENT_MAP_HOLE;
em->orig_start = EXTENT_MAP_HOLE;
+ em->disk_bytenr = EXTENT_MAP_HOLE;
em->len = (u64)-1;
em->block_len = (u64)-1;
const u64 block_len,
const u64 orig_block_len,
const u64 ram_bytes,
- const int type)
+ const int type,
+ const struct btrfs_file_extent *file_extent)
{
struct extent_map *em = NULL;
struct btrfs_ordered_extent *ordered;
em = create_io_em(inode, start, len, orig_start, block_start,
block_len, orig_block_len, ram_bytes,
BTRFS_COMPRESS_NONE, /* compress_type */
- type);
+ file_extent, type);
if (IS_ERR(em))
goto out;
}
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_file_extent file_extent;
struct extent_map *em;
struct btrfs_key ins;
u64 alloc_hint;
if (ret)
return ERR_PTR(ret);
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.num_bytes = ins.offset;
+ file_extent.ram_bytes = ins.offset;
+ file_extent.offset = 0;
+ file_extent.compression = BTRFS_COMPRESS_NONE;
em = btrfs_create_dio_extent(inode, dio_data, start, ins.offset, start,
ins.objectid, ins.offset, ins.offset,
- ins.offset, BTRFS_ORDERED_REGULAR);
+ ins.offset, BTRFS_ORDERED_REGULAR,
+ &file_extent);
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
if (IS_ERR(em))
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
u64 len, u64 orig_start, u64 block_start,
u64 block_len, u64 disk_num_bytes,
u64 ram_bytes, int compress_type,
+ const struct btrfs_file_extent *file_extent,
int type)
{
struct extent_map *em;
em->len = len;
em->block_len = block_len;
em->block_start = block_start;
+ em->disk_bytenr = file_extent->disk_bytenr;
em->disk_num_bytes = disk_num_bytes;
em->ram_bytes = ram_bytes;
em->generation = -1;
+ em->offset = file_extent->offset;
em->flags |= EXTENT_FLAG_PINNED;
if (type == BTRFS_ORDERED_COMPRESSED)
extent_map_set_compression(em, compress_type);
{
const bool nowait = (iomap_flags & IOMAP_NOWAIT);
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct btrfs_file_extent file_extent;
struct extent_map *em = *map;
int type;
u64 block_start, orig_start, orig_block_len, ram_bytes;
block_start = em->block_start + (start - em->start);
if (can_nocow_extent(inode, start, &len, &orig_start,
- &orig_block_len, &ram_bytes, NULL, false, false) == 1) {
+ &orig_block_len, &ram_bytes,
+ &file_extent, false, false) == 1) {
bg = btrfs_inc_nocow_writers(fs_info, block_start);
if (bg)
can_nocow = true;
em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len,
orig_start, block_start,
len, orig_block_len,
- ram_bytes, type);
+ ram_bytes, type,
+ &file_extent);
btrfs_dec_nocow_writers(bg);
if (type == BTRFS_ORDERED_PREALLOC) {
free_extent_map(em);
em->orig_start = cur_offset;
em->len = ins.offset;
em->block_start = ins.objectid;
+ em->disk_bytenr = ins.objectid;
+ em->offset = 0;
em->block_len = ins.offset;
em->disk_num_bytes = ins.offset;
em->ram_bytes = ins.offset;
struct extent_changeset *data_reserved = NULL;
struct extent_state *cached_state = NULL;
struct btrfs_ordered_extent *ordered;
+ struct btrfs_file_extent file_extent;
int compression;
size_t orig_count;
u64 start, end;
goto out_delalloc_release;
extent_reserved = true;
+ file_extent.disk_bytenr = ins.objectid;
+ file_extent.disk_num_bytes = ins.offset;
+ file_extent.num_bytes = num_bytes;
+ file_extent.ram_bytes = ram_bytes;
+ file_extent.offset = encoded->unencoded_offset;
+ file_extent.compression = compression;
em = create_io_em(inode, start, num_bytes,
start - encoded->unencoded_offset, ins.objectid,
ins.offset, ins.offset, ram_bytes, compression,
- BTRFS_ORDERED_COMPRESSED);
+ &file_extent, BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out_free_reserved;