#define xfs_buf_verifier_error(bp,e,n,bu,bus,fa) \
xfs_verifier_error(bp, e, fa)
+void
+xfs_buf_corruption_error(struct xfs_buf *bp);
/* XXX: this is clearly a bug - a shared header needs to export this */
/* xfs_rtalloc.c */
ip->i_ino, name);
}
+/*
+ * Complain about the kinds of metadata corruption that we can't detect from a
+ * verifier, such as incorrect inter-block relationship data. Does not set
+ * bp->b_error.
+ */
+void
+xfs_buf_corruption_error(
+ struct xfs_buf *bp)
+{
+ xfs_alert(NULL, "Metadata corruption detected at %p, %s block 0x%llx",
+ __return_address, bp->b_ops->name, bp->b_bn);
+}
+
/*
* This is called from I/O verifiers on v5 superblock filesystems. In the
* kernel, it validates the metadata LSN parameter against the current LSN of
xfs_trans_agblocks_delta(tp, len);
if (unlikely(be32_to_cpu(agf->agf_freeblks) >
- be32_to_cpu(agf->agf_length)))
+ be32_to_cpu(agf->agf_length))) {
+ xfs_buf_corruption_error(agbp);
return -EFSCORRUPTED;
+ }
xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
return 0;
bp = xfs_btree_get_bufs(args->mp, args->tp, args->agno, fbno);
if (!bp) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, args->mp);
error = -EFSCORRUPTED;
goto error;
}
return error;
bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno);
- if (!bp)
+ if (!bp) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp);
return -EFSCORRUPTED;
+ }
xfs_trans_binval(tp, bp);
return 0;
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
- if (ichdr.count >= args->geo->blksize / 8)
+ if (ichdr.count >= args->geo->blksize / 8) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* Binary search. (note: small blocks will skip this loop)
else
break;
}
- if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count)))
+ if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count))) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
- if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval))
+ }
+ if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval)) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* Since we may have duplicate hashval's, find the first matching
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
abp = xfs_btree_get_bufl(mp, tp, args.fsbno);
if (!abp) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto out_unreserve_dquot;
}
if (XFS_IFORK_Q(ip))
goto trans_cancel;
if (ip->i_d.di_anextents != 0) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto trans_cancel;
}
case XFS_DINODE_FMT_EXTENTS:
break;
default:
+ ASSERT(0);
return -EFSCORRUPTED;
}
return 0;
if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) {
+ ASSERT(0);
return -EFSCORRUPTED;
+ }
error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
if (error || is_empty)
del_cursor);
if (stop_fsb >= got.br_startoff + got.br_blockcount) {
+ ASSERT(0);
error = -EFSCORRUPTED;
goto del_cursor;
}
out_bad:
*blkp = NULL;
+ xfs_buf_corruption_error(bp);
xfs_trans_brelse(cur->bc_tp, bp);
return -EFSCORRUPTED;
}
XFS_BTREE_STATS_INC(cur, lookup);
/* No such thing as a zero-level tree. */
- if (cur->bc_nlevels == 0)
+ if (cur->bc_nlevels == 0) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, cur->bc_mp);
return -EFSCORRUPTED;
+ }
block = NULL;
keyno = 0;
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
+ xfs_buf_corruption_error(oldblk->bp);
error = -EFSCORRUPTED;
goto out;
}
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
+ xfs_buf_corruption_error(oldblk->bp);
error = -EFSCORRUPTED;
goto out;
}
break;
}
- if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC)
+ if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
+ xfs_buf_corruption_error(blk->bp);
return -EFSCORRUPTED;
+ }
blk->magic = XFS_DA_NODE_MAGIC;
btree = dp->d_ops->node_tree_p(node);
/* Tree taller than we can handle; bail out! */
- if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
+ if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
+ xfs_buf_corruption_error(blk->bp);
return -EFSCORRUPTED;
+ }
/* Check the level from the root. */
if (blkno == args->geo->leafblk)
expected_level = nodehdr.level - 1;
- else if (expected_level != nodehdr.level)
+ else if (expected_level != nodehdr.level) {
+ xfs_buf_corruption_error(blk->bp);
return -EFSCORRUPTED;
- else
+ } else
expected_level--;
max = nodehdr.count;
}
/* We can't point back to the root. */
- if (blkno == args->geo->leafblk)
+ if (blkno == args->geo->leafblk) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
+ dp->i_mount);
return -EFSCORRUPTED;
+ }
}
- if (expected_level != 0)
+ if (expected_level != 0) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, dp->i_mount);
return -EFSCORRUPTED;
+ }
/*
* A leaf block that ends in the hashval that we are interested in
if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
return rval;
rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
- if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
+ if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, args->dp->i_mount);
return -EFSCORRUPTED;
+ }
*vp = rval;
return 0;
}
oldbest = be16_to_cpu(bf[0].length);
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
bestsp = xfs_dir2_leaf_bests_p(ltp);
- if (be16_to_cpu(bestsp[db]) != oldbest)
+ if (be16_to_cpu(bestsp[db]) != oldbest) {
+ xfs_buf_corruption_error(lbp);
return -EFSCORRUPTED;
+ }
/*
* Mark the former data entry unused.
*/
leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
if (be32_to_cpu(ltp->bestcount) >
- (uint)dp->i_d.di_size / args->geo->blksize)
+ (uint)dp->i_d.di_size / args->geo->blksize) {
+ xfs_buf_corruption_error(lbp);
return -EFSCORRUPTED;
+ }
/*
* Copy freespace entries from the leaf block to the new block.
* Quick check just to make sure we are not going to index
* into other peoples memory
*/
- if (index < 0)
+ if (index < 0) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* If there are already the maximum number of leaf entries in
ents = dp->d_ops->leaf_ents_p(leaf);
xfs_dir3_leaf_check(dp, bp);
- if (leafhdr.count <= 0)
+ if (leafhdr.count <= 0) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* Look up the hash value in the leaf entries.
error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
break;
default:
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
+ dip, sizeof(*dip), __this_address);
return -EFSCORRUPTED;
}
break;
default:
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
+ sizeof(*dip), __this_address);
return -EFSCORRUPTED;
}
if (error)
error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
break;
default:
+ xfs_inode_verifier_error(ip, error, __func__, dip,
+ sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
break;
}
struct list_head *debris = priv;
struct xfs_refcount_recovery *rr;
- if (be32_to_cpu(rec->refc.rc_refcount) != 1)
+ if (be32_to_cpu(rec->refc.rc_refcount) != 1) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, cur->bc_mp);
return -EFSCORRUPTED;
+ }
rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0);
xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
#include "xfs_bmap.h"
#include "xfs_trans.h"
-
/*
* Realtime allocator bitmap functions shared with userspace.
*/
if (error)
return error;
- if (nmap == 0 || !xfs_bmap_is_real_extent(&map))
+ if (nmap == 0 || !xfs_bmap_is_real_extent(&map)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
+ }
ASSERT(map.br_startblock != NULLFSBLOCK);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,