return res;
}
-#if 0 /* Unused, provided for completeness. */
/* Clear a region of bits. */
-int
-bitmap_clear(
+static int
+__bitmap_clear(
struct bitmap *bmap,
uint64_t start,
uint64_t len)
uint64_t new_length;
struct avl64node *node;
int stat;
+ int ret = 0;
- pthread_mutex_lock(&bmap->bt_lock);
/* Find any existing nodes over that range. */
avl64_findranges(bmap->bt_tree, start, start + len, &firstn, &lastn);
}
out:
- pthread_mutex_unlock(&bmap->bt_lock);
return ret;
}
-#endif
+
+/* Clear a region of bits. */
+int
+bitmap_clear(
+ struct bitmap *bmap,
+ uint64_t start,
+ uint64_t length)
+{
+ int res;
+
+ pthread_mutex_lock(&bmap->bt_lock);
+ res = __bitmap_clear(bmap, start, length);
+ pthread_mutex_unlock(&bmap->bt_lock);
+
+ return res;
+}
/* Iterate the set regions of this bitmap. */
int
int bitmap_alloc(struct bitmap **bmap);
void bitmap_free(struct bitmap **bmap);
int bitmap_set(struct bitmap *bmap, uint64_t start, uint64_t length);
+int bitmap_clear(struct bitmap *bmap, uint64_t start, uint64_t length);
int bitmap_iterate(struct bitmap *bmap, int (*fn)(uint64_t, uint64_t, void *),
void *arg);
int bitmap_iterate_range(struct bitmap *bmap, uint64_t start, uint64_t length,
#include "versions.h"
#include "prefetch.h"
#include "progress.h"
+#include "slab.h"
+#include "rmap.h"
/*
* validates inode block or chunk, returns # of good inodes
_("would clear realtime summary inode %" PRIu64 "\n"),
ino);
}
+ } else if (is_rtrmap_inode(ino)) {
+ rmap_avoid_check(mp);
+ if (!no_modify) {
+ do_warn(
+ _("cleared realtime rmap inode %" PRIu64 "\n"),
+ ino);
+ } else {
+ do_warn(
+ _("would clear realtime rmap inode %" PRIu64 "\n"),
+ ino);
+ }
} else if (!no_modify) {
do_warn(_("cleared inode %" PRIu64 "\n"),
ino);
clear_dinode_core(mp, dino, ino_num);
clear_dinode_unlinked(mp, dino);
+ if (is_rtrmap_inode(ino_num))
+ rmap_avoid_check(mp);
+
/* and clear the forks */
memset(XFS_DFORK_DPTR(dino), 0, XFS_LITINO(mp));
return;
lino = XFS_AGINO_TO_INO(mp, agno, ino);
- /* This rmap btree inode must be a metadata inode. */
+ /*
+ * This rmap btree inode must be a metadata inode reachable via
+ * /realtime/$rgno.rmap in the metadata directory tree.
+ */
if (!(dip->di_flags2 & be64_to_cpu(XFS_DIFLAG2_METADIR))) {
do_warn(
_("rtrmap inode %" PRIu64 " not flagged as metadata\n"),
lino);
return 1;
}
+ if (type != XR_INO_RTRMAP) {
+ do_warn(
+_("rtrmap inode %" PRIu64 " was not found in the metadata directory tree\n"),
+ lino);
+ return 1;
+ }
rgno = rtgroup_for_rtrmap_inode(mp, lino);
if (rgno == NULLRGNUMBER) {
error = process_rtrmap_reclist(mp, rp, numrecs,
&priv.last_rec, NULL, "rtrmapbt root");
if (error) {
- rmap_avoid_check();
+ rmap_avoid_check(mp);
return 1;
}
return 0;
if (lino == mp->m_sb.sb_rbmino)
return process_check_rt_inode(mp, dinoc, lino, type, dirty,
XR_INO_RTBITMAP, _("realtime bitmap"));
+ if (is_rtrmap_inode(lino))
+ return process_check_rt_inode(mp, dinoc, lino, type, dirty,
+ XR_INO_RTRMAP, _("realtime rmap btree"));
return 0;
}
}
break;
+ case XR_INO_RTRMAP:
+ /*
+ * if we have no rmapbt, any inode claiming
+ * to be a real-time file is bogus
+ */
+ if (!xfs_has_rmapbt(mp)) {
+ do_warn(
+_("found inode %" PRIu64 " claiming to be a rtrmapbt file, but rmapbt is disabled\n"), lino);
+ return 1;
+ }
+ break;
+
default:
break;
}
return 1;
}
break;
+ case XFS_DINODE_FMT_RMAP:
+ if (!(xfs_has_metadir(mp) && xfs_has_parent(mp))) {
+ do_warn(
+_("metadata inode %" PRIu64 " type %d cannot have attr fork\n"),
+ lino, dino->di_format);
+ return 1;
+ }
+ fallthrough;
case XFS_DINODE_FMT_LOCAL:
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
type = XR_INO_GQUOTA;
else if (lino == mp->m_sb.sb_pquotino)
type = XR_INO_PQUOTA;
+ else if (is_rtrmap_inode(lino))
+ type = XR_INO_RTRMAP;
else
type = XR_INO_DATA;
break;
case XR_INO_UQUOTA:
case XR_INO_GQUOTA:
case XR_INO_PQUOTA:
+ case XR_INO_RTRMAP:
/*
* This inode was recognized as being filesystem
* metadata, so preserve the inode and its contents for
#include "da_util.h"
#include "prefetch.h"
#include "progress.h"
+#include "slab.h"
+#include "rmap.h"
/*
* Known bad inode list. These are seen when the leaf and node
} else if (lino == mp->m_sb.sb_metadirino) {
junkit = 1;
junkreason = _("metadata directory root");
+ } else if (is_rtrmap_inode(lino)) {
+ junkit = 1;
+ junkreason = _("realtime rmap");
} else if ((irec_p = find_inode_rec(mp,
XFS_INO_TO_AGNO(mp, lino),
XFS_INO_TO_AGINO(mp, lino))) != NULL) {
clearreason = _("project quota");
} else if (ent_ino == mp->m_sb.sb_metadirino) {
clearreason = _("metadata directory root");
+ } else if (is_rtrmap_inode(ent_ino)) {
+ clearreason = _("realtime rmap");
} else {
irec_p = find_inode_rec(mp,
XFS_INO_TO_AGNO(mp, ent_ino),
#define XR_INO_UQUOTA 12 /* user quota inode */
#define XR_INO_GQUOTA 13 /* group quota inode */
#define XR_INO_PQUOTA 14 /* project quota inode */
+#define XR_INO_RTRMAP 15 /* realtime rmap */
/* inode allocation tree */
/* refcount items, p4-5 */
struct xfs_slab *ar_refcount_items;
+ /*
+ * inumber of the rmap btree for this rtgroup. This can be set to
+ * NULLFSINO to signal to phase 6 to link a new inode into the metadir.
+ */
+ xfs_ino_t rg_rmap_ino;
+
/* agfl entries from leftover agbt allocations */
int ar_flcount;
};
bool rmapbt_suspect;
static bool refcbt_suspect;
+/* Bitmap of rt group rmap inodes reachable via /realtime/$rgno.rmap. */
+static struct bitmap *rmap_inodes;
+
static struct xfs_ag_rmap *rmaps_for_group(bool isrt, unsigned int group)
{
if (isrt)
if (error)
goto nomem;
+ ag_rmap->rg_rmap_ino = NULLFSINO;
return;
nomem:
do_error(
struct xfs_mount *mp,
xfs_ino_t ino)
{
- /* This will be implemented later. */
+ xfs_rgnumber_t rgno;
+
+ if (!rmap_inodes)
+ return NULLRGNUMBER;
+
+ for (rgno = 0; rgno < mp->m_sb.sb_rgcount; rgno++) {
+ if (rg_rmaps[rgno].rg_rmap_ino == ino)
+ return rgno;
+ }
+
return NULLRGNUMBER;
}
+static inline int
+set_rtgroup_rmap_inode(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno)
+{
+ struct xfs_imeta_path *path;
+ struct xfs_ag_rmap *ar = rmaps_for_group(true, rgno);
+ struct xfs_trans *tp;
+ xfs_ino_t ino;
+ int error;
+
+ if (!xfs_has_rtrmapbt(mp))
+ return 0;
+
+ path = xfs_rtrmapbt_create_path(mp, rgno);
+ if (!path)
+ return ENOMEM;
+
+ error = -libxfs_trans_alloc_empty(mp, &tp);
+ if (error)
+ goto out_path;
+
+ error = -libxfs_imeta_lookup(tp, path, &ino);
+ if (error)
+ goto out_trans;
+
+ if (!libxfs_verify_ino(mp, ino) || bitmap_test(rmap_inodes, ino, 1)) {
+ error = EFSCORRUPTED;
+ goto out_trans;
+ }
+
+ error = bitmap_set(rmap_inodes, ino, 1);
+ if (error)
+ goto out_trans;
+
+ ar->rg_rmap_ino = ino;
+
+out_trans:
+ libxfs_trans_cancel(tp);
+out_path:
+ libxfs_imeta_free_path(path);
+ return error;
+}
+
+static void
+discover_rtgroup_inodes(
+ struct xfs_mount *mp)
+{
+ xfs_rgnumber_t rgno;
+ int error;
+
+ error = bitmap_alloc(&rmap_inodes);
+ if (error)
+ goto out;
+
+ for (rgno = 0; rgno < mp->m_sb.sb_rgcount; rgno++) {
+ int err2 = set_rtgroup_rmap_inode(mp, rgno);
+ if (err2 && !error)
+ error = err2;
+ }
+
+out:
+ if (error == EFSCORRUPTED)
+ do_warn(
+ _("corruption in metadata directory tree while discovering rt group inodes\n"));
+ if (error)
+ do_warn(
+ _("couldn't discover rt group inodes, err %d\n"),
+ error);
+}
+
+static inline void
+free_rtmeta_inode_bitmaps(void)
+{
+ bitmap_free(&rmap_inodes);
+}
+
+bool is_rtrmap_inode(xfs_ino_t ino)
+{
+ if (!rmap_inodes)
+ return false;
+ return bitmap_test(rmap_inodes, ino, 1);
+}
+
/*
* Initialize per-AG reverse map data.
*/
for (i = 0; i < mp->m_sb.sb_rgcount; i++)
rmaps_init_rt(mp, i, &rg_rmaps[i]);
+
+ discover_rtgroup_inodes(mp);
}
/*
if (!rmap_needs_work(mp))
return;
+ free_rtmeta_inode_bitmaps();
+
for (i = 0; i < mp->m_sb.sb_rgcount; i++)
rmaps_destroy(mp, &rg_rmaps[i]);
free(rg_rmaps);
}
/*
- * Disable the refcount btree check.
+ * Disable the rmap btree check.
*/
void
-rmap_avoid_check(void)
+rmap_avoid_check(
+ struct xfs_mount *mp)
{
+ struct xfs_rtgroup *rtg;
+ xfs_rgnumber_t rgno;
+
+ for_each_rtgroup(mp, rgno, rtg) {
+ struct xfs_ag_rmap *ar = rmaps_for_group(true, rtg->rtg_rgno);
+
+ ar->rg_rmap_ino = NULLFSINO;
+ }
+
+ bitmap_clear(rmap_inodes, 0, XFS_MAXINUMBER);
rmapbt_suspect = true;
}
return libxfs_refcountbt_calc_size(mp,
slab_count(x->ar_refcount_items));
}
+
+/* Retrieve the rtrmapbt inode number for a given rtgroup. */
+xfs_ino_t
+rtgroup_rmap_ino(
+ struct xfs_rtgroup *rtg)
+{
+ struct xfs_ag_rmap *ar = rmaps_for_group(true, rtg->rtg_rgno);
+
+ return ar->rg_rmap_ino;
+}
uint64_t rmap_record_count(struct xfs_mount *mp, bool isrt,
xfs_agnumber_t agno);
-extern void rmap_avoid_check(void);
+extern void rmap_avoid_check(struct xfs_mount *mp);
void rmaps_verify_btree(struct xfs_mount *mp, xfs_agnumber_t agno);
extern int64_t rmap_diffkeys(struct xfs_rmap_irec *kp1,
int rmap_get_mem_rec(struct xfs_btree_cur *rmcur, struct xfs_rmap_irec *irec);
xfs_rgnumber_t rtgroup_for_rtrmap_inode(struct xfs_mount *mp, xfs_ino_t ino);
+bool is_rtrmap_inode(xfs_ino_t ino);
+xfs_ino_t rtgroup_rmap_ino(struct xfs_rtgroup *rtg);
#endif /* RMAP_H_ */
out:
if (suspect)
- rmap_avoid_check();
+ rmap_avoid_check(mp);
}
int
out:
if (hdr_errors || suspect) {
- rmap_avoid_check();
+ rmap_avoid_check(mp);
return 1;
}
return 0;
if (levels == 0 || levels > mp->m_rmap_maxlevels) {
do_warn(_("bad levels %u for rmapbt root, agno %d\n"),
levels, agno);
- rmap_avoid_check();
+ rmap_avoid_check(mp);
}
bno = be32_to_cpu(agf->agf_rmap_root);
} else {
do_warn(_("bad agbno %u for rmapbt root, agno %d\n"),
bno, agno);
- rmap_avoid_check();
+ rmap_avoid_check(mp);
}
}