struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr icleafhdr;
struct xfs_attr_leaf_entry *entries;
- struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr icnodehdr;
struct xfs_da_intnode *node;
struct xfs_inode *dp = args->dp;
goto out;
node = bp1->b_addr;
xfs_da3_node_hdr_from_disk(mp, &icnodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
leaf = bp2->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &icleafhdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
/* both on-disk, don't endian-flip twice */
- btree[0].hashval = entries[icleafhdr.count - 1].hashval;
- btree[0].before = cpu_to_be32(blkno);
+ icnodehdr.btree[0].hashval = entries[icleafhdr.count - 1].hashval;
+ icnodehdr.btree[0].before = cpu_to_be32(blkno);
icnodehdr.count = 1;
xfs_da3_node_hdr_to_disk(dp->i_mount, node, &icnodehdr);
xfs_trans_log_buf(args->trans, bp1, 0, args->geo->blksize - 1);
to->magic = be16_to_cpu(from3->hdr.info.hdr.magic);
to->count = be16_to_cpu(from3->hdr.__count);
to->level = be16_to_cpu(from3->hdr.__level);
+ to->btree = from3->__btree;
ASSERT(to->magic == XFS_DA3_NODE_MAGIC);
} else {
to->forw = be32_to_cpu(from->hdr.info.forw);
to->magic = be16_to_cpu(from->hdr.info.magic);
to->count = be16_to_cpu(from->hdr.__count);
to->level = be16_to_cpu(from->hdr.__level);
+ to->btree = from->__btree;
ASSERT(to->magic == XFS_DA_NODE_MAGIC);
}
}
struct xfs_da3_icnode_hdr icnodehdr;
xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot);
- btree = dp->d_ops->node_tree_p(oldroot);
+ btree = icnodehdr.btree;
size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
level = icnodehdr.level;
node = bp->b_addr;
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ btree = nodehdr.btree;
btree[0].hashval = cpu_to_be32(blk1->hashval);
btree[0].before = cpu_to_be32(blk1->blkno);
btree[1].hashval = cpu_to_be32(blk2->hashval);
node2 = blk2->bp->b_addr;
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ btree1 = nodehdr1.btree;
+ btree2 = nodehdr2.btree;
/*
* Figure out how many entries need to move, and in which direction.
node2 = tmpnode;
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ btree1 = nodehdr1.btree;
+ btree2 = nodehdr2.btree;
swap = 1;
}
node2 = blk2->bp->b_addr;
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ btree1 = nodehdr1.btree;
+ btree2 = nodehdr2.btree;
}
blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
node = oldblk->bp->b_addr;
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ btree = nodehdr.btree;
ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
ASSERT(newblk->blkno != 0);
xfs_dablk_t child;
struct xfs_buf *bp;
struct xfs_da3_icnode_hdr oldroothdr;
- struct xfs_da_node_entry *btree;
int error;
struct xfs_inode *dp = state->args->dp;
* Read in the (only) child block, then copy those bytes into
* the root block's buffer and free the original child block.
*/
- btree = dp->d_ops->node_tree_p(oldroot);
- child = be32_to_cpu(btree[0].before);
+ child = be32_to_cpu(oldroothdr.btree[0].before);
ASSERT(child != 0);
error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
args->whichfork);
struct xfs_buf *bp,
int *count)
{
- struct xfs_da_intnode *node;
- struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
- node = bp->b_addr;
- xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr);
if (count)
*count = nodehdr.count;
if (!nodehdr.count)
return 0;
- btree = dp->d_ops->node_tree_p(node);
- return be32_to_cpu(btree[nodehdr.count - 1].hashval);
+ return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval);
}
/*
node = blk->bp->b_addr;
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ btree = nodehdr.btree;
if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
break;
blk->hashval = lasthash;
* Copy over the offending entry, or just zero it out.
*/
index = drop_blk->index;
- btree = dp->d_ops->node_tree_p(node);
+ btree = nodehdr.btree;
if (index < nodehdr.count - 1) {
tmp = nodehdr.count - index - 1;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
save_node = save_blk->bp->b_addr;
xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node);
xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node);
- drop_btree = dp->d_ops->node_tree_p(drop_node);
- save_btree = dp->d_ops->node_tree_p(save_node);
+ drop_btree = drop_hdr.btree;
+ save_btree = save_hdr.btree;
tp = state->args->trans;
/*
*/
node = blk->bp->b_addr;
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ btree = nodehdr.btree;
/* Tree taller than we can handle; bail out! */
if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
node2 = node2_bp->b_addr;
xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1);
xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ btree1 = node1hdr.btree;
+ btree2 = node2hdr.btree;
if (node1hdr.count > 0 && node2hdr.count > 0 &&
((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
{
struct xfs_da_state_blk *blk;
struct xfs_da_blkinfo *info;
- struct xfs_da_intnode *node;
struct xfs_da_args *args;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
level = (path->active-1) - 1; /* skip bottom layer in path */
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
- node = blk->bp->b_addr;
- xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
+ blk->bp->b_addr);
if (forward && (blk->index < nodehdr.count - 1)) {
blk->index++;
- blkno = be32_to_cpu(btree[blk->index].before);
+ blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
break;
} else if (!forward && (blk->index > 0)) {
blk->index--;
- blkno = be32_to_cpu(btree[blk->index].before);
+ blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
break;
}
}
case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC:
blk->magic = XFS_DA_NODE_MAGIC;
- node = (xfs_da_intnode_t *)info;
- xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
+ bp->b_addr);
+ btree = nodehdr.btree;
blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
if (forward)
blk->index = 0;
dead_node = (xfs_da_intnode_t *)dead_info;
xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node);
- btree = dp->d_ops->node_tree_p(dead_node);
+ btree = deadhdr.btree;
dead_level = deadhdr.level;
dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
}
goto done;
}
level = par_hdr.level;
- btree = dp->d_ops->node_tree_p(par_node);
+ btree = par_hdr.btree;
for (entno = 0;
entno < par_hdr.count &&
be32_to_cpu(btree[entno].hashval) < dead_hash;
error = -EFSCORRUPTED;
goto done;
}
- btree = dp->d_ops->node_tree_p(par_node);
+ btree = par_hdr.btree;
entno = 0;
}
/*
uint16_t magic;
uint16_t count;
uint16_t level;
+
+ /*
+ * Pointer to the on-disk format entries, which are behind the
+ * variable size (v4 vs v5) header in the on-disk block.
+ */
+ struct xfs_da_node_entry *btree;
};
/*
hdr3->stale = cpu_to_be16(from->stale);
}
-
-/*
- * Directory/Attribute Node block operations
- */
-static struct xfs_da_node_entry *
-xfs_da2_node_tree_p(struct xfs_da_intnode *dap)
-{
- return dap->__btree;
-}
-
-static struct xfs_da_node_entry *
-xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
-{
- return ((struct xfs_da3_intnode *)dap)->__btree;
-}
-
/*
* Directory free space block operations
*/
.leaf_ents_p = xfs_dir2_leaf_ents_p,
.node_hdr_size = sizeof(struct xfs_da_node_hdr),
- .node_tree_p = xfs_da2_node_tree_p,
.free_hdr_size = sizeof(struct xfs_dir2_free_hdr),
.free_hdr_to_disk = xfs_dir2_free_hdr_to_disk,
.leaf_ents_p = xfs_dir2_leaf_ents_p,
.node_hdr_size = sizeof(struct xfs_da_node_hdr),
- .node_tree_p = xfs_da2_node_tree_p,
.free_hdr_size = sizeof(struct xfs_dir2_free_hdr),
.free_hdr_to_disk = xfs_dir2_free_hdr_to_disk,
.leaf_ents_p = xfs_dir3_leaf_ents_p,
.node_hdr_size = sizeof(struct xfs_da3_node_hdr),
- .node_tree_p = xfs_da3_node_tree_p,
.free_hdr_size = sizeof(struct xfs_dir3_free_hdr),
.free_hdr_to_disk = xfs_dir3_free_hdr_to_disk,
static const struct xfs_dir_ops xfs_dir2_nondir_ops = {
.node_hdr_size = sizeof(struct xfs_da_node_hdr),
- .node_tree_p = xfs_da2_node_tree_p,
};
static const struct xfs_dir_ops xfs_dir3_nondir_ops = {
.node_hdr_size = sizeof(struct xfs_da3_node_hdr),
- .node_tree_p = xfs_da3_node_tree_p,
};
/*
(*leaf_ents_p)(struct xfs_dir2_leaf *lp);
int node_hdr_size;
- struct xfs_da_node_entry *
- (*node_tree_p)(struct xfs_da_intnode *dap);
int free_hdr_size;
void (*free_hdr_to_disk)(struct xfs_dir2_free *to,
xfs_da_intnode_t *node;
bmap_ext_t lbmp;
struct xfs_da_geometry *geo;
- struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
if (whichfork == XFS_DATA_FORK) {
goto error_out;
}
- btree = M_DIROPS(mp)->node_tree_p(node);
if (nodehdr.count > geo->node_ents) {
do_warn(
_("bad %s record count in inode %" PRIu64 ", count = %d, max = %d\n"),
}
}
- da_cursor->level[i].hashval = be32_to_cpu(btree[0].hashval);
+ da_cursor->level[i].hashval =
+ be32_to_cpu(nodehdr.btree[0].hashval);
da_cursor->level[i].bp = bp;
da_cursor->level[i].bno = bno;
da_cursor->level[i].index = 0;
/*
* set up new bno for next level down
*/
- bno = be32_to_cpu(btree[0].before);
+ bno = be32_to_cpu(nodehdr.btree[0].before);
} while (node != NULL && i > 1);
/*
int bad = 0;
int entry;
int this_level = p_level + 1;
- struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
#ifdef XR_DIR_TRACE
*/
entry = cursor->level[this_level].index;
node = cursor->level[this_level].bp->b_addr;
- btree = M_DIROPS(mp)->node_tree_p(node);
libxfs_da3_node_hdr_from_disk(mp, &nodehdr, node);
/*
* hash values monotonically increasing ???
*/
if (cursor->level[this_level].hashval >=
- be32_to_cpu(btree[entry].hashval)) {
+ be32_to_cpu(nodehdr.btree[entry].hashval)) {
do_warn(
_("%s block hashvalue inconsistency, expected > %u / saw %u\n"),
FORKNAME(whichfork),
cursor->level[this_level].hashval,
- be32_to_cpu(btree[entry].hashval));
+ be32_to_cpu(nodehdr.btree[entry].hashval));
bad++;
}
if (nodehdr.forw != 0) {
/*
* ok, now check descendant block number against this level
*/
- if (cursor->level[p_level].bno != be32_to_cpu(btree[entry].before)) {
+ if (cursor->level[p_level].bno !=
+ be32_to_cpu(nodehdr.btree[entry].before)) {
#ifdef XR_DIR_TRACE
fprintf(stderr, "bad %s btree pointer, child bno should "
"be %d, block bno is %d, hashval is %u\n",
- FORKNAME(whichfork), be16_to_cpu(btree[entry].before),
+ FORKNAME(whichfork),
+ be16_to_cpu(nodehdr.btree[entry].before),
cursor->level[p_level].bno,
cursor->level[p_level].hashval);
fprintf(stderr, "verify_final_da_path returns 1 (bad) #1a\n");
}
if (cursor->level[p_level].hashval !=
- be32_to_cpu(btree[entry].hashval)) {
+ be32_to_cpu(nodehdr.btree[entry].hashval)) {
if (!no_modify) {
do_warn(
_("correcting bad hashval in non-leaf %s block\n"
"\tin (level %d) in inode %" PRIu64 ".\n"),
FORKNAME(whichfork), this_level, cursor->ino);
- btree[entry].hashval = cpu_to_be32(
+ nodehdr.btree[entry].hashval = cpu_to_be32(
cursor->level[p_level].hashval);
cursor->level[this_level].dirty++;
} else {
* Note: squirrel hashval away _before_ releasing the
* buffer, preventing a use-after-free problem.
*/
- hashval = be32_to_cpu(btree[entry].hashval);
+ hashval = be32_to_cpu(nodehdr.btree[entry].hashval);
/*
* release/write buffer
int nex;
bmap_ext_t lbmp;
struct xfs_da_geometry *geo;
- struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
if (whichfork == XFS_DATA_FORK)
*/
entry = cursor->level[this_level].index;
node = cursor->level[this_level].bp->b_addr;
- btree = M_DIROPS(mp)->node_tree_p(node);
libxfs_da3_node_hdr_from_disk(mp, &nodehdr, node);
/* No entries in this node? Tree is corrupt. */
* it was set when the block was first read in.
*/
cursor->level[this_level].hashval =
- be32_to_cpu(btree[entry - 1].hashval);
+ be32_to_cpu(nodehdr.btree[entry - 1].hashval);
/*
* keep track of greatest block # -- that gets
}
newnode = bp->b_addr;
- btree = M_DIROPS(mp)->node_tree_p(newnode);
libxfs_da3_node_hdr_from_disk(mp, &nodehdr, newnode);
/*
cursor->level[this_level].dirty = 0;
cursor->level[this_level].bno = dabno;
cursor->level[this_level].hashval =
- be32_to_cpu(btree[0].hashval);
+ be32_to_cpu(nodehdr.btree[0].hashval);
entry = cursor->level[this_level].index = 0;
}
/*
* ditto for block numbers
*/
- if (cursor->level[p_level].bno != be32_to_cpu(btree[entry].before)) {
+ if (cursor->level[p_level].bno !=
+ be32_to_cpu(nodehdr.btree[entry].before)) {
#ifdef XR_DIR_TRACE
fprintf(stderr, "bad %s btree pointer, child bno "
"should be %d, block bno is %d, hashval is %u\n",
- FORKNAME(whichfork), be32_to_cpu(btree[entry].before),
+ FORKNAME(whichfork),
+ be32_to_cpu(nodehdr.btree[entry].before),
cursor->level[p_level].bno,
cursor->level[p_level].hashval);
fprintf(stderr, "verify_da_path returns 1 (bad) #1a\n");
* block against the hashval in the current entry
*/
if (cursor->level[p_level].hashval !=
- be32_to_cpu(btree[entry].hashval)) {
+ be32_to_cpu(nodehdr.btree[entry].hashval)) {
if (!no_modify) {
do_warn(
_("correcting bad hashval in interior %s block\n"
"\tin (level %d) in inode %" PRIu64 ".\n"),
FORKNAME(whichfork), this_level, cursor->ino);
- btree[entry].hashval = cpu_to_be32(
+ nodehdr.btree[entry].hashval = cpu_to_be32(
cursor->level[p_level].hashval);
cursor->level[this_level].dirty++;
} else {