#define XFS_ISRESET_CURSOR(cursor) \
(!((cursor)->initted) && !((cursor)->hashval) && \
!((cursor)->blkno) && !((cursor)->offset)) /* * Copy out entries of shortform attribute lists for attr_list(). * Shortform attribute lists are not stored in hashval sorted order. * If the output buffer is not large enough to hold them all, then * we have to calculate each entries' hashvalue and sort them before * we can begin returning them to the user.
*/ staticint
xfs_attr_shortform_list( struct xfs_attr_list_context *context)
{ struct xfs_attrlist_cursor_kern *cursor = &context->cursor; struct xfs_inode *dp = context->dp; struct xfs_attr_sf_sort *sbuf, *sbp; struct xfs_attr_sf_hdr *sf = dp->i_af.if_data; struct xfs_attr_sf_entry *sfe; int sbsize, nsbuf, count, i; int error = 0;
ASSERT(sf != NULL); if (!sf->count) return 0;
trace_xfs_attr_list_sf(context);
/* * If the buffer is large enough and the cursor is at the start, * do not bother with sorting since we will return everything in * one buffer and another call using the cursor won't need to be * made. * Note the generous fudge factor of 16 overhead bytes per entry. * If bufsize is zero then put_listent must be a search function * and can just scan through what we have.
*/ if (context->bufsize == 0 ||
(XFS_ISRESET_CURSOR(cursor) &&
(dp->i_af.if_bytes + sf->count * 16) < context->bufsize)) { for (i = 0, sfe = xfs_attr_sf_firstentry(sf); i < sf->count; i++) { if (XFS_IS_CORRUPT(context->dp->i_mount,
!xfs_attr_namecheck(sfe->flags,
sfe->nameval,
sfe->namelen))) {
xfs_dirattr_mark_sick(context->dp, XFS_ATTR_FORK); return -EFSCORRUPTED;
}
context->put_listent(context,
sfe->flags,
sfe->nameval,
(int)sfe->namelen,
&sfe->nameval[sfe->namelen],
(int)sfe->valuelen); /* * Either search callback finished early or * didn't fit it all in the buffer after all.
*/ if (context->seen_enough) break;
sfe = xfs_attr_sf_nextentry(sfe);
}
trace_xfs_attr_list_sf_all(context); return 0;
}
/* do no more for a search callback */ if (context->bufsize == 0) return 0;
/* * It didn't all fit, so we have to sort everything on hashval.
*/
sbsize = sf->count * sizeof(*sbuf);
sbp = sbuf = kmalloc(sbsize,
GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
/* * Scan the attribute list for the rest of the entries, storing * the relevant info from only those that match into a buffer.
*/
nsbuf = 0; for (i = 0, sfe = xfs_attr_sf_firstentry(sf); i < sf->count; i++) { if (unlikely(
((char *)sfe < (char *)sf) ||
((char *)sfe >= ((char *)sf + dp->i_af.if_bytes)) ||
!xfs_attr_check_namespace(sfe->flags))) {
XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, sfe, sizeof(*sfe));
kfree(sbuf);
xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK); return -EFSCORRUPTED;
}
fa = xfs_da3_node_header_check(bp, dp->i_ino); if (fa) goto out_corruptbuf;
xfs_da3_node_hdr_from_disk(mp, &nodehdr, node);
/* Tree taller than we can handle; bail out! */ if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) goto out_corruptbuf;
/* Check the level from the root node. */ if (cursor->blkno == 0)
expected_level = nodehdr.level - 1; elseif (expected_level != nodehdr.level) goto out_corruptbuf; else
expected_level--;
btree = nodehdr.btree; for (i = 0; i < nodehdr.count; btree++, i++) { if (cursor->hashval <= be32_to_cpu(btree->hashval)) {
cursor->blkno = be32_to_cpu(btree->before);
trace_xfs_attr_list_node_descend(context,
btree); break;
}
}
xfs_trans_brelse(tp, bp);
if (i == nodehdr.count) return 0;
/* We can't point back to the root. */ if (XFS_IS_CORRUPT(mp, cursor->blkno == 0)) {
xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK); return -EFSCORRUPTED;
}
}
fa = xfs_attr3_leaf_header_check(bp, dp->i_ino); if (fa) {
__xfs_buf_mark_corrupt(bp, fa); goto out_releasebuf;
}
/* * Do all sorts of validation on the passed-in cursor structure. * If anything is amiss, ignore the cursor and look up the hashval * starting from the btree root.
*/
bp = NULL; if (cursor->blkno > 0) { struct xfs_attr_leaf_entry *entries;
error = xfs_da3_node_read(context->tp, dp, cursor->blkno, &bp,
XFS_ATTR_FORK); if (xfs_metadata_is_sick(error))
xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK); if (error != 0 && error != -EFSCORRUPTED) return error; if (!bp) goto need_lookup;
node = bp->b_addr; switch (be16_to_cpu(node->hdr.info.magic)) { case XFS_DA_NODE_MAGIC: case XFS_DA3_NODE_MAGIC:
trace_xfs_attr_list_wrong_blk(context);
fa = xfs_da3_node_header_check(bp, dp->i_ino); if (fa) {
__xfs_buf_mark_corrupt(bp, fa);
xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK);
}
xfs_trans_brelse(context->tp, bp);
bp = NULL; break; case XFS_ATTR_LEAF_MAGIC: case XFS_ATTR3_LEAF_MAGIC:
leaf = bp->b_addr;
fa = xfs_attr3_leaf_header_check(bp, dp->i_ino); if (fa) {
__xfs_buf_mark_corrupt(bp, fa);
xfs_trans_brelse(context->tp, bp);
xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK);
bp = NULL; break;
}
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo,
&leafhdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf); if (cursor->hashval > be32_to_cpu(
entries[leafhdr.count - 1].hashval)) {
trace_xfs_attr_list_wrong_blk(context);
xfs_trans_brelse(context->tp, bp);
bp = NULL;
} elseif (cursor->hashval <= be32_to_cpu(
entries[0].hashval)) {
trace_xfs_attr_list_wrong_blk(context);
xfs_trans_brelse(context->tp, bp);
bp = NULL;
} break; default:
trace_xfs_attr_list_wrong_blk(context);
xfs_trans_brelse(context->tp, bp);
bp = NULL;
}
}
/* * We did not find what we expected given the cursor's contents, * so we start from the top and work down based on the hash value. * Note that start of node block is same as start of leaf block.
*/ if (bp == NULL) {
need_lookup:
error = xfs_attr_node_list_lookup(context, cursor, &bp); if (error || !bp) return error;
}
ASSERT(bp != NULL);
/* * Roll upward through the blocks, processing each leaf block in * order. As long as there is space in the result buffer, keep * adding the information.
*/ for (;;) {
leaf = bp->b_addr;
error = xfs_attr3_leaf_list_int(bp, context); if (error) break;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); if (context->seen_enough || leafhdr.forw == 0) break;
cursor->blkno = leafhdr.forw;
xfs_trans_brelse(context->tp, bp);
error = xfs_attr3_leaf_read(context->tp, dp, dp->i_ino,
cursor->blkno, &bp); if (error) return error;
}
xfs_trans_brelse(context->tp, bp); return error;
}
/* * Copy out attribute list entries for attr_list(), for leaf attribute lists.
*/ int
xfs_attr3_leaf_list_int( struct xfs_buf *bp, struct xfs_attr_list_context *context)
{ struct xfs_attrlist_cursor_kern *cursor = &context->cursor; struct xfs_attr_leafblock *leaf; struct xfs_attr3_icleaf_hdr ichdr; struct xfs_attr_leaf_entry *entries; struct xfs_attr_leaf_entry *entry; int i; struct xfs_mount *mp = context->dp->i_mount;
/* * Re-find our place in the leaf block if this is a new syscall.
*/ if (context->resynch) {
entry = &entries[0]; for (i = 0; i < ichdr.count; entry++, i++) { if (be32_to_cpu(entry->hashval) == cursor->hashval) { if (cursor->offset == context->dupcnt) {
context->dupcnt = 0; break;
}
context->dupcnt++;
} elseif (be32_to_cpu(entry->hashval) >
cursor->hashval) {
context->dupcnt = 0; break;
}
} if (i == ichdr.count) {
trace_xfs_attr_list_notfound(context); return 0;
}
} else {
entry = &entries[0];
i = 0;
}
context->resynch = 0;
/* * We have found our place, start copying out the new attributes.
*/ for (; i < ichdr.count; entry++, i++) { char *name; void *value; int namelen, valuelen;
/* * Decide on what work routines to call based on the inode size.
*/ if (!xfs_inode_hasattr(dp)) return 0; if (dp->i_af.if_format == XFS_DINODE_FMT_LOCAL) return xfs_attr_shortform_list(context);
/* Prerequisite for xfs_attr_is_leaf */
error = xfs_iread_extents(NULL, dp, XFS_ATTR_FORK); if (error) return error;
if (xfs_attr_is_leaf(dp)) return xfs_attr_leaf_list(context); return xfs_attr_node_list(context);
}
int
xfs_attr_list( struct xfs_attr_list_context *context)
{ struct xfs_inode *dp = context->dp;
uint lock_mode; int error;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.