bio_put(bio);
}
-struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
- block_t blk_addr, struct bio *bio)
+struct f2fs_dev_info *f2fs_target_device(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
{
- struct block_device *bdev = sbi->sb->s_bdev;
int i;
if (f2fs_is_multi_device(sbi)) {
for (i = 0; i < sbi->s_ndevs; i++) {
- if (FDEV(i).start_blk <= blk_addr &&
- FDEV(i).end_blk >= blk_addr) {
- blk_addr -= FDEV(i).start_blk;
- bdev = FDEV(i).bdev;
- break;
- }
+ if (sbi->devs[i].start_blk <= blkaddr &&
+ sbi->devs[i].end_blk >= blkaddr)
+ return &sbi->devs[i];
}
}
- if (bio) {
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
- }
- return bdev;
+
+ return &sbi->devs[0];
}
int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
{
struct f2fs_sb_info *sbi = fio->sbi;
+ struct f2fs_dev_info *di = f2fs_target_device(sbi, fio->new_blkaddr);
struct bio *bio;
- bio = bio_alloc_bioset(NULL, npages, 0, GFP_NOIO, &f2fs_bioset);
+ bio = bio_alloc_bioset(di->bdev, npages, 0, GFP_NOIO, &f2fs_bioset);
+
+ bio->bi_iter.bi_sector =
+ SECTOR_FROM_BLOCK(fio->new_blkaddr - di->start_blk);
- f2fs_target_device(sbi, fio->new_blkaddr, bio);
if (is_read_io(fio->op)) {
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = NULL;
return false;
if (last_blkaddr + 1 != cur_blkaddr)
return false;
- return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
+ return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr)->bdev;
}
static bool io_type_is_mergeable(struct f2fs_bio_info *io,
pgoff_t first_idx, bool for_write)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_dev_info *di = f2fs_target_device(sbi, blkaddr);
struct bio *bio;
struct bio_post_read_ctx *ctx = NULL;
unsigned int post_read_steps = 0;
- bio = bio_alloc_bioset(NULL, bio_max_segs(nr_pages), REQ_OP_READ,
+ bio = bio_alloc_bioset(di->bdev, bio_max_segs(nr_pages), REQ_OP_READ,
for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
if (!bio)
return ERR_PTR(-ENOMEM);
f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
- f2fs_target_device(sbi, blkaddr, bio);
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr - di->start_blk);
bio->bi_end_io = f2fs_read_end_io;
if (fscrypt_inode_uses_fs_layer_crypto(inode))
struct extent_info ei = {0, };
block_t blkaddr;
unsigned int start_pgofs;
- int bidx = 0;
+ struct f2fs_dev_info *di = NULL;
if (!maxblocks)
return 0;
if (map->m_multidev_dio) {
block_t blk_addr = map->m_pblk;
- bidx = f2fs_target_device_index(sbi, map->m_pblk);
-
- map->m_bdev = FDEV(bidx).bdev;
- map->m_pblk -= FDEV(bidx).start_blk;
- map->m_len = min(map->m_len,
- FDEV(bidx).end_blk + 1 - map->m_pblk);
+ di = f2fs_target_device(sbi, blk_addr);
+ map->m_bdev = di->bdev;
+ map->m_pblk -= di->start_blk;
+ map->m_len =
+ min(map->m_len, di->end_blk + 1 - map->m_pblk);
if (map->m_may_create)
f2fs_update_device_state(sbi, inode->i_ino,
goto skip;
if (map->m_multidev_dio)
- bidx = f2fs_target_device_index(sbi, blkaddr);
+ di = f2fs_target_device(sbi, blkaddr);
if (map->m_len == 0) {
/* preallocated unwritten block should be mapped for fiemap. */
map->m_len = 1;
if (map->m_multidev_dio)
- map->m_bdev = FDEV(bidx).bdev;
+ map->m_bdev = di->bdev;
} else if ((map->m_pblk != NEW_ADDR &&
blkaddr == (map->m_pblk + ofs)) ||
(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
flag == F2FS_GET_BLOCK_PRE_DIO) {
- if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
+ if (map->m_multidev_dio && map->m_bdev != di->bdev)
goto sync_out;
ofs++;
map->m_len++;
if (map->m_multidev_dio) {
block_t blk_addr = map->m_pblk;
- bidx = f2fs_target_device_index(sbi, map->m_pblk);
+ di = f2fs_target_device(sbi, map->m_pblk);
- map->m_bdev = FDEV(bidx).bdev;
- map->m_pblk -= FDEV(bidx).start_blk;
+ map->m_bdev = di->bdev;
+ map->m_pblk -= di->start_blk;
if (map->m_may_create)
f2fs_update_device_state(sbi, inode->i_ino,
blk_addr, map->m_len);
f2fs_bug_on(sbi, blk_addr + map->m_len >
- FDEV(bidx).end_blk + 1);
+ di->end_blk + 1);
}
}
int f2fs_submit_page_bio(struct f2fs_io_info *fio);
int f2fs_merge_page_bio(struct f2fs_io_info *fio);
void f2fs_submit_page_write(struct f2fs_io_info *fio);
-struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
- block_t blk_addr, struct bio *bio);
+struct f2fs_dev_info *f2fs_target_device(struct f2fs_sb_info *sbi,
+ block_t blkaddr);
int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
}
#ifdef CONFIG_BLK_DEV_ZONED
-static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
- block_t blkaddr)
+static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi,
+ struct f2fs_dev_info *di, block_t blkaddr)
{
- unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
-
- return test_bit(zno, FDEV(devi).blkz_seq);
+ return test_bit(blkaddr >> sbi->log_blocks_per_blkz, di->blkz_seq);
}
#endif
trace_f2fs_queue_discard(bdev, blkstart, blklen);
- if (f2fs_is_multi_device(sbi)) {
- int devi = f2fs_target_device_index(sbi, blkstart);
-
- blkstart -= FDEV(devi).start_blk;
- }
+ if (f2fs_is_multi_device(sbi))
+ blkstart -= f2fs_target_device(sbi, blkstart)->start_blk;
mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
{
sector_t sector, nr_sects;
block_t lblkstart = blkstart;
- int devi = 0;
+ struct f2fs_dev_info *di = f2fs_target_device(sbi, blkstart);
- if (f2fs_is_multi_device(sbi)) {
- devi = f2fs_target_device_index(sbi, blkstart);
- if (blkstart < FDEV(devi).start_blk ||
- blkstart > FDEV(devi).end_blk) {
- f2fs_err(sbi, "Invalid block %x", blkstart);
- return -EIO;
- }
- blkstart -= FDEV(devi).start_blk;
- }
+ if (f2fs_is_multi_device(sbi))
+ blkstart -= di->start_blk;
/* For sequential zones, reset the zone write pointer */
- if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
+ if (f2fs_blkz_is_seq(sbi, di, blkstart)) {
sector = SECTOR_FROM_BLOCK(blkstart);
nr_sects = SECTOR_FROM_BLOCK(blklen);
if (sector & (bdev_zone_sectors(bdev) - 1) ||
nr_sects != bdev_zone_sectors(bdev)) {
f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
- devi, sbi->s_ndevs ? FDEV(devi).path : "",
+ f2fs_target_device_index(sbi, blkstart),
+ sbi->s_ndevs ? di->path : "",
blkstart, blklen);
return -EIO;
}
block_t i;
int err = 0;
- bdev = f2fs_target_device(sbi, blkstart, NULL);
+ bdev = f2fs_target_device(sbi, blkstart)->bdev;
for (i = blkstart; i < blkstart + blklen; i++, len++) {
if (i != start) {
struct block_device *bdev2 =
- f2fs_target_device(sbi, i, NULL);
+ f2fs_target_device(sbi, i)->bdev;
if (bdev2 != bdev) {
err = __issue_discard_async(sbi, bdev,