goto out;
        }
 
-       ASSERT(extent_map_is_compressed(em));
+       ASSERT(btrfs_extent_map_is_compressed(em));
        compressed_len = em->disk_num_bytes;
 
        cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
 
        cb->len = bbio->bio.bi_iter.bi_size;
        cb->compressed_len = compressed_len;
-       cb->compress_type = extent_map_compression(em);
+       cb->compress_type = btrfs_extent_map_compression(em);
        cb->orig_bbio = bbio;
 
        free_extent_map(em);
 
 static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info,
                                   const struct extent_map *em)
 {
-       if (extent_map_is_compressed(em))
+       if (btrfs_extent_map_is_compressed(em))
                return BTRFS_MAX_COMPRESSED;
        return fs_info->max_extent_size;
 }
 
         * to buffered IO.  Don't blame me, this is the price we pay for using
         * the generic code.
         */
-       if (extent_map_is_compressed(em) || em->disk_bytenr == EXTENT_MAP_INLINE) {
+       if (btrfs_extent_map_is_compressed(em) || em->disk_bytenr == EXTENT_MAP_INLINE) {
                free_extent_map(em);
                /*
                 * If we are in a NOWAIT context, return -EAGAIN in order to
 
                BUG_ON(extent_map_end(em) <= cur);
                BUG_ON(end < cur);
 
-               compress_type = extent_map_compression(em);
+               compress_type = btrfs_extent_map_compression(em);
 
                if (compress_type != BTRFS_COMPRESS_NONE)
                        disk_bytenr = em->disk_bytenr;
        block_start = extent_map_block_start(em);
        disk_bytenr = extent_map_block_start(em) + extent_offset;
 
-       ASSERT(!extent_map_is_compressed(em));
+       ASSERT(!btrfs_extent_map_is_compressed(em));
        ASSERT(block_start != EXTENT_MAP_HOLE);
        ASSERT(block_start != EXTENT_MAP_INLINE);
 
 
 
 static inline u64 extent_map_block_len(const struct extent_map *em)
 {
-       if (extent_map_is_compressed(em))
+       if (btrfs_extent_map_is_compressed(em))
                return em->disk_num_bytes;
        return em->len;
 }
                return false;
 
        /* Don't merge compressed extents, we need to know their actual size. */
-       if (extent_map_is_compressed(em))
+       if (btrfs_extent_map_is_compressed(em))
                return false;
 
        if (em->flags & EXTENT_FLAG_LOGGING)
        u64 new_offset;
 
        /* @prev and @next should not be compressed. */
-       ASSERT(!extent_map_is_compressed(prev));
-       ASSERT(!extent_map_is_compressed(next));
+       ASSERT(!btrfs_extent_map_is_compressed(prev));
+       ASSERT(!btrfs_extent_map_is_compressed(next));
 
        /*
         * There are two different cases where @prev and @next can be merged.
                if (em->offset + em->len > em->ram_bytes)
                        dump_extent_map(fs_info, "ram_bytes too small", em);
                if (em->offset + em->len > em->disk_num_bytes &&
-                   !extent_map_is_compressed(em))
+                   !btrfs_extent_map_is_compressed(em))
                        dump_extent_map(fs_info, "disk_num_bytes too small", em);
-               if (!extent_map_is_compressed(em) &&
+               if (!btrfs_extent_map_is_compressed(em) &&
                    em->ram_bytes != em->disk_num_bytes)
                        dump_extent_map(fs_info,
                "ram_bytes mismatch with disk_num_bytes for non-compressed em",
        }
 
        ASSERT(em->len == len);
-       ASSERT(!extent_map_is_compressed(em));
+       ASSERT(!btrfs_extent_map_is_compressed(em));
        ASSERT(em->disk_bytenr < EXTENT_MAP_LAST_BYTE);
        ASSERT(em->flags & EXTENT_FLAG_PINNED);
        ASSERT(!(em->flags & EXTENT_FLAG_LOGGING));
 
 
 struct btrfs_inode;
 
-static inline void extent_map_set_compression(struct extent_map *em,
-                                             enum btrfs_compression_type type)
+static inline void btrfs_extent_map_set_compression(struct extent_map *em,
+                                                   enum btrfs_compression_type type)
 {
        if (type == BTRFS_COMPRESS_ZLIB)
                em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
                em->flags |= EXTENT_FLAG_COMPRESS_ZSTD;
 }
 
-static inline enum btrfs_compression_type extent_map_compression(const struct extent_map *em)
+static inline enum btrfs_compression_type btrfs_extent_map_compression(
+                                                      const struct extent_map *em)
 {
        if (em->flags & EXTENT_FLAG_COMPRESS_ZLIB)
                return BTRFS_COMPRESS_ZLIB;
  * More efficient way to determine if extent is compressed, instead of using
  * 'extent_map_compression() != BTRFS_COMPRESS_NONE'.
  */
-static inline bool extent_map_is_compressed(const struct extent_map *em)
+static inline bool btrfs_extent_map_is_compressed(const struct extent_map *em)
 {
        return (em->flags & (EXTENT_FLAG_COMPRESS_ZLIB |
                             EXTENT_FLAG_COMPRESS_LZO |
 static inline u64 extent_map_block_start(const struct extent_map *em)
 {
        if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
-               if (extent_map_is_compressed(em))
+               if (btrfs_extent_map_is_compressed(em))
                        return em->disk_bytenr;
                return em->disk_bytenr + em->offset;
        }
 
                em->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
                em->offset = btrfs_file_extent_offset(leaf, fi);
                if (compress_type != BTRFS_COMPRESS_NONE) {
-                       extent_map_set_compression(em, compress_type);
+                       btrfs_extent_map_set_compression(em, compress_type);
                } else {
                        /*
                         * Older kernels can create regular non-hole data
                em->start = 0;
                em->len = fs_info->sectorsize;
                em->offset = 0;
-               extent_map_set_compression(em, compress_type);
+               btrfs_extent_map_set_compression(em, compress_type);
        } else {
                btrfs_err(fs_info,
                          "unknown file extent item type %d, inode %llu, offset %llu, "
 
        em->offset = file_extent->offset;
        em->flags |= EXTENT_FLAG_PINNED;
        if (type == BTRFS_ORDERED_COMPRESSED)
-               extent_map_set_compression(em, file_extent->compression);
+               btrfs_extent_map_set_compression(em, file_extent->compression);
 
        ret = btrfs_replace_extent_map_range(inode, em, true);
        if (ret) {
                count = min_t(u64, count, encoded->len);
                encoded->len = count;
                encoded->unencoded_len = count;
-       } else if (extent_map_is_compressed(em)) {
+       } else if (btrfs_extent_map_is_compressed(em)) {
                *disk_bytenr = em->disk_bytenr;
                /*
                 * Bail if the buffer isn't large enough to return the whole
                encoded->unencoded_len = em->ram_bytes;
                encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
                ret = btrfs_encoded_io_compression_from_extent(fs_info,
-                                                              extent_map_compression(em));
+                                              btrfs_extent_map_compression(em));
                if (ret < 0)
                        goto out_em;
                encoded->compression = ret;
 
                test_err("wrong offset, want 0, have %llu", em->offset);
                goto out;
        }
-       if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
+       if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
                test_err("unexpected compress type, wanted %d, got %d",
-                        BTRFS_COMPRESS_ZLIB, extent_map_compression(em));
+                        BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em));
                goto out;
        }
        offset = em->start + em->len;
                test_err("wrong offset, want 0, have %llu", em->offset);
                goto out;
        }
-       if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
+       if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
                test_err("unexpected compress type, wanted %d, got %d",
-                        BTRFS_COMPRESS_ZLIB, extent_map_compression(em));
+                        BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em));
                goto out;
        }
        disk_bytenr = extent_map_block_start(em);
                         em->start, em->offset, orig_start);
                goto out;
        }
-       if (extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
+       if (btrfs_extent_map_compression(em) != BTRFS_COMPRESS_ZLIB) {
                test_err("unexpected compress type, wanted %d, got %d",
-                        BTRFS_COMPRESS_ZLIB, extent_map_compression(em));
+                        BTRFS_COMPRESS_ZLIB, btrfs_extent_map_compression(em));
                goto out;
        }
        offset = em->start + em->len;
 
                return 0;
 
        /* If we're compressed we have to save the entire range of csums. */
-       if (extent_map_is_compressed(em)) {
+       if (btrfs_extent_map_is_compressed(em)) {
                csum_offset = 0;
                csum_len = em->disk_num_bytes;
        } else {
                btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_REG);
 
        block_len = em->disk_num_bytes;
-       compress_type = extent_map_compression(em);
+       compress_type = btrfs_extent_map_compression(em);
        if (compress_type != BTRFS_COMPRESS_NONE) {
                btrfs_set_stack_file_extent_disk_bytenr(&fi, block_start);
                btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len);