rp->ir_u.f.ir_freecount = cpu_to_be32(freecount);
}
+/*
+ * Number of inodes assumed to be always allocated because they are created
+ * by mkfs.
+ */
+static inline unsigned int
+xfs_rootrec_inodes_inuse(
+ struct xfs_mount *mp)
+{
+ if (xfs_has_metadir(mp))
+ return 4; /* sb_rootino, sb_rbmino, sb_rsumino, sb_metadirino */
+ return 3; /* sb_rootino, sb_rbmino, sb_rsumino */
+}
+
#endif /* XFS_REPAIR_INCORE_H */
struct xfs_mount *mp,
int scan_threads)
{
- int j, inuse = 3; /* root, rbm, rsum */
+ int inuse = xfs_rootrec_inodes_inuse(mp), j;
ino_tree_node_t *ino_rec;
/* now we can start using the buffer cache routines */
print_final_rpt();
- if (xfs_has_metadir(mp))
- inuse++; /* root, metaroot, rbm, rsum */
-
/*
* make sure we know about the root inode chunk
*/
static void
keep_fsinos(xfs_mount_t *mp)
{
+ int inuse = xfs_rootrec_inodes_inuse(mp), i;
ino_tree_node_t *irec;
- int i, inuse = 3; /* root, rbm, rsum */
-
- if (xfs_has_metadir(mp))
- inuse++; /* root, metaroot, rbm, rsum */
irec = find_inode_rec(mp, XFS_INO_TO_AGNO(mp, mp->m_sb.sb_rootino),
XFS_INO_TO_AGINO(mp, mp->m_sb.sb_rootino));