int error;
 
        btrfs_bio_counter_inc_blocked(fs_info);
-       error = __btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
-                                 &bioc, &smap, &mirror_num, 1);
+       error = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
+                               &bioc, &smap, &mirror_num, 1);
        if (error) {
                ret = errno_to_blk_status(error);
                goto fail;
 
        struct btrfs_device *device;
 
        length = len;
-       ret = __btrfs_map_block(fs_info, BTRFS_MAP_READ, bytenr, &length, &bioc,
-                               NULL, &mirror_num, 0);
+       ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, bytenr, &length, &bioc,
+                             NULL, &mirror_num, 0);
        if (ret) {
                block_ctx_out->start = 0;
                block_ctx_out->dev_bytenr = 0;
 
  *   All new writes will be written to both target and source devices, so even
  *   if replace gets canceled, sources device still contains up-to-date data.
  *
- *   Location:         handle_ops_on_dev_replace() from __btrfs_map_block()
+ *   Location:         handle_ops_on_dev_replace() from btrfs_map_block()
  *   Start:            btrfs_dev_replace_start()
  *   End:              btrfs_dev_replace_finishing()
  *   Content:          Latest data/metadata
 
                        stripe_offset + (stripe_nr << BTRFS_STRIPE_LEN_SHIFT);
 }
 
-int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
-                     u64 logical, u64 *length,
-                     struct btrfs_io_context **bioc_ret,
-                     struct btrfs_io_stripe *smap, int *mirror_num_ret,
-                     int need_raid_map)
+int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
+                   u64 logical, u64 *length,
+                   struct btrfs_io_context **bioc_ret,
+                   struct btrfs_io_stripe *smap, int *mirror_num_ret,
+                   int need_raid_map)
 {
        struct extent_map *em;
        struct map_lookup *map;
                     u64 logical, u64 *length,
                     struct btrfs_io_context **bioc_ret)
 {
-       return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
+       return btrfs_map_block(fs_info, op, logical, length, bioc_ret,
                                 NULL, NULL, 1);
 }
 
 
        ASSERT(mirror_num > 0);
 
-       ret = __btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length,
-                               &bioc, smap, &mirror_ret, true);
+       ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length,
+                             &bioc, smap, &mirror_ret, true);
        if (ret < 0)
                return ret;
 
 
 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
                     u64 logical, u64 *length,
                     struct btrfs_io_context **bioc_ret);
-int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
-                     u64 logical, u64 *length,
-                     struct btrfs_io_context **bioc_ret,
-                     struct btrfs_io_stripe *smap, int *mirror_num_ret,
-                     int need_raid_map);
+int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
+                   u64 logical, u64 *length,
+                   struct btrfs_io_context **bioc_ret,
+                   struct btrfs_io_stripe *smap, int *mirror_num_ret,
+                   int need_raid_map);
 int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
                           struct btrfs_io_stripe *smap, u64 logical,
                           u32 length, int mirror_num);