for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
                bytenr = btrfs_sb_offset(i);
                ret = btrfs_rmap_block(fs_info, cache->key.objectid,
-                                      bytenr, 0, &logical, &nr, &stripe_len);
+                                      bytenr, &logical, &nr, &stripe_len);
                if (ret)
                        return ret;
 
 
        return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
 }
 
-int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
-                    u64 chunk_start, u64 physical, u64 devid,
-                    u64 **logical, int *naddrs, int *stripe_len)
+int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
+                    u64 physical, u64 **logical, int *naddrs, int *stripe_len)
 {
        struct extent_map *em;
        struct map_lookup *map;
        BUG_ON(!buf); /* -ENOMEM */
 
        for (i = 0; i < map->num_stripes; i++) {
-               if (devid && map->stripes[i].dev->devid != devid)
-                       continue;
                if (map->stripes[i].physical > physical ||
                    map->stripes[i].physical + length <= physical)
                        continue;
 
 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
                     u64 logical, u64 *length,
                     struct btrfs_bio **bbio_ret);
-int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
-                    u64 chunk_start, u64 physical, u64 devid,
-                    u64 **logical, int *naddrs, int *stripe_len);
+int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
+                    u64 physical, u64 **logical, int *naddrs, int *stripe_len);
 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,