xfs_rgnumber_t sb_rgcount; /* number of realtime groups */
xfs_rtxlen_t sb_rgextents; /* size of a realtime group in rtx */
-
uint8_t sb_rgblklog; /* rt group number shift */
uint8_t sb_pad[7]; /* zeroes */
+ xfs_fsblock_t sb_rtstart; /* start of internal RT section in BB */
/* must be padded to 64 bit alignment */
} xfs_sb_t;
__be64 sb_metadirino; /* metadata directory tree root */
__be32 sb_rgcount; /* # of realtime groups */
__be32 sb_rgextents; /* size of rtgroup in rtx */
-
__u8 sb_rgblklog; /* rt group number shift */
__u8 sb_pad[7]; /* zeroes */
+ __be64 sb_rtstart; /* start of internal RT section in BB */
/*
* The size of this structure must be padded to 64 bit alignment.
{
struct xfs_mount *mp = xg->xg_mount;
struct xfs_groups *g = &mp->m_groups[xg->xg_type];
+ uint64_t fsb;
if (g->has_daddr_gaps)
- return XFS_FSB_TO_BB(mp, xfs_gbno_to_fsb(xg, gbno));
- return XFS_FSB_TO_BB(mp, (xfs_fsblock_t)xg->xg_gno * g->blocks + gbno);
+ fsb = xfs_gbno_to_fsb(xg, gbno);
+ else
+ fsb = (xfs_fsblock_t)xg->xg_gno * g->blocks + gbno;
+
+ return XFS_FSB_TO_BB(mp, fsb) + g->start_daddr;
}
static inline uint32_t
16299260424LL);
/* superblock field checks we got from xfs/122 */
- XFS_CHECK_STRUCT_SIZE(struct xfs_dsb, 288);
- XFS_CHECK_STRUCT_SIZE(struct xfs_sb, 288);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_dsb, 296);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_sb, 296);
XFS_CHECK_SB_OFFSET(sb_magicnum, 0);
XFS_CHECK_SB_OFFSET(sb_blocksize, 4);
XFS_CHECK_SB_OFFSET(sb_dblocks, 8);
XFS_CHECK_SB_OFFSET(sb_rgextents, 276);
XFS_CHECK_SB_OFFSET(sb_rgblklog, 280);
XFS_CHECK_SB_OFFSET(sb_pad, 281);
+ XFS_CHECK_SB_OFFSET(sb_rtstart, 288);
}
#endif /* __XFS_ONDISK_H */
rtbno = (xfs_rtblock_t)rgno * g->blocks + (rtbno & g->blkmask);
}
- return XFS_FSB_TO_BB(mp, rtbno);
+ return XFS_FSB_TO_BB(mp, rtbno) + mp->m_sb.sb_rtstart;
}
static inline xfs_rtblock_t
xfs_daddr_t daddr)
{
struct xfs_groups *g = &mp->m_groups[XG_TYPE_RTG];
- xfs_rfsblock_t bno = XFS_BB_TO_FSBT(mp, daddr);
+ xfs_rfsblock_t bno;
+ bno = XFS_BB_TO_FSBT(mp, daddr - mp->m_sb.sb_rtstart);
if (xfs_has_rtgroups(mp) && !g->has_daddr_gaps) {
xfs_rgnumber_t rgno;
uint32_t rgbno;
to->sb_rgcount = 1;
to->sb_rgextents = 0;
}
+
+ if (to->sb_features_incompat & XFS_SB_FEAT_INCOMPAT_ZONED)
+ to->sb_rtstart = be64_to_cpu(from->sb_rtstart);
+ else
+ to->sb_rtstart = 0;
}
void
to->sb_rbmino = cpu_to_be64(0);
to->sb_rsumino = cpu_to_be64(0);
}
+
+ if (from->sb_features_incompat & XFS_SB_FEAT_INCOMPAT_ZONED)
+ to->sb_rtstart = cpu_to_be64(from->sb_rtstart);
}
/*
rgs->blocks = sbp->sb_rgextents * sbp->sb_rextsize;
rgs->blklog = mp->m_sb.sb_rgblklog;
rgs->blkmask = xfs_mask32lo(mp->m_sb.sb_rgblklog);
+ if (xfs_sb_has_incompat_feature(sbp,
+ XFS_SB_FEAT_INCOMPAT_ZONED))
+ rgs->start_daddr = mp->m_sb.sb_rtstart;
if (xfs_sb_has_incompat_feature(sbp,
XFS_SB_FEAT_INCOMPAT_ZONE_GAPS))
rgs->has_daddr_gaps = true;
* ensure newly written file data make it to disk before logging the new
* inode size in case of an extending write.
*/
- if (XFS_IS_REALTIME_INODE(ip))
+ if (XFS_IS_REALTIME_INODE(ip) && mp->m_rtdev_targp != mp->m_ddev_targp)
error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
else if (mp->m_logdev_targp != mp->m_ddev_targp)
error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
handlers[1].fn = xfs_getfsmap_logdev;
}
#ifdef CONFIG_XFS_RT
- if (mp->m_rtdev_targp) {
+ /*
+ * I have no idea how to make the fsmap interface fit two totally
+ * different space pools on the same device. But maybe there is a
+ * way? Darrick, come help me! :)
+ */
+ if (mp->m_rtdev_targp && mp->m_rtdev_targp != mp->m_ddev_targp) {
handlers[2].nr_sectors = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
if (use_rmap)
* maps 1:1 to the device address space.
*/
bool has_daddr_gaps;
+
+ xfs_daddr_t start_daddr;
};
enum {
struct xfs_buf *bp;
int error;
- if (XFS_BB_TO_FSB(mp, daddr) != last_block) {
+ if (xfs_daddr_to_rtb(mp, daddr) != last_block) {
xfs_warn(mp, "RT device size overflow: %llu != %llu",
xfs_daddr_to_rtb(mp, daddr), last_block);
return -EFBIG;
if (error)
return error;
}
- if (mp->m_rtdev_targp) {
+
+ if (mp->m_sb.sb_rtstart) {
+ if (mp->m_rtdev_targp) {
+ xfs_warn(mp,
+ "can't use internal and external rtdev at the same time");
+ return -EINVAL;
+ }
+ mp->m_rtdev_targp = mp->m_ddev_targp;
+ } else if (mp->m_rtname) {
error = xfs_setsize_buftarg(mp->m_rtdev_targp,
mp->m_sb.sb_sectsize);
if (error)
{
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
xfs_free_buftarg(mp->m_logdev_targp);
- if (mp->m_rtdev_targp)
+ if (mp->m_rtdev_targp && mp->m_rtdev_targp != mp->m_ddev_targp)
xfs_free_buftarg(mp->m_rtdev_targp);
if (mp->m_ddev_targp)
xfs_free_buftarg(mp->m_ddev_targp);
"zoned device support requires CONFIG_BLK_DEV_ZONED");
return -EINVAL;
}
- error = blkdev_report_zones(bt->bt_bdev, 0, mp->m_sb.sb_rgcount,
- xfs_get_zone_info_cb, mp);
+ error = blkdev_report_zones(bt->bt_bdev, mp->m_sb.sb_rtstart,
+ mp->m_sb.sb_rgcount, xfs_get_zone_info_cb, mp);
if (error < 0)
return error;
}