/* * This returns the number of iovecs needed to log the given inode item. * * We only need one iovec for the icreate log structure.
*/ STATICvoid
xfs_icreate_item_size( struct xfs_log_item *lip, int *nvecs, int *nbytes)
{
*nvecs += 1;
*nbytes += sizeof(struct xfs_icreate_log);
}
/* * This is called to fill in the vector of log iovecs for the * given inode create log item.
*/ STATICvoid
xfs_icreate_item_format( struct xfs_log_item *lip, struct xfs_log_vec *lv)
{ struct xfs_icreate_item *icp = ICR_ITEM(lip); struct xfs_log_iovec *vecp = NULL;
/* * Initialize the inode log item for a newly allocated (in-core) inode. * * Inode extents can only reside within an AG. Hence specify the starting * block for the inode chunk by offset within an AG as well as the * length of the allocated extent. * * This joins the item to the transaction and marks it dirty so * that we don't need a separate call to do this, nor does the * caller need to know anything about the icreate item.
*/ void
xfs_icreate_log( struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_agblock_t agbno, unsignedint count, unsignedint inode_size,
xfs_agblock_t length, unsignedint generation)
{ struct xfs_icreate_item *icp;
staticenum xlog_recover_reorder
xlog_recover_icreate_reorder( struct xlog_recover_item *item)
{ /* * Inode allocation buffers must be replayed before subsequent inode * items try to modify those buffers. ICREATE items are the logical * equivalent of logging a newly initialized inode buffer, so recover * these at the same time that we recover logged buffers.
*/ return XLOG_REORDER_BUFFER_LIST;
}
/* * This routine is called when an inode create format structure is found in a * committed transaction in the log. It's purpose is to initialise the inodes * being allocated on disk. This requires us to get inode cluster buffers that * match the range to be initialised, stamped with inode templates and written * by delayed write so that subsequent modifications will hit the cached buffer * and only need writing out at the end of recovery.
*/ STATICint
xlog_recover_icreate_commit_pass2( struct xlog *log, struct list_head *buffer_list, struct xlog_recover_item *item,
xfs_lsn_t lsn)
{ struct xfs_mount *mp = log->l_mp; struct xfs_icreate_log *icl; struct xfs_ino_geometry *igeo = M_IGEO(mp);
xfs_agnumber_t agno;
xfs_agblock_t agbno; unsignedint count; unsignedint isize;
xfs_agblock_t length; int bb_per_cluster; int cancel_count; int nbufs; int i;
icl = (struct xfs_icreate_log *)item->ri_buf[0].iov_base; if (icl->icl_type != XFS_LI_ICREATE) {
xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); return -EINVAL;
}
if (icl->icl_size != 1) {
xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); return -EINVAL;
}
agno = be32_to_cpu(icl->icl_ag); if (agno >= mp->m_sb.sb_agcount) {
xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); return -EINVAL;
}
agbno = be32_to_cpu(icl->icl_agbno); if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); return -EINVAL;
}
isize = be32_to_cpu(icl->icl_isize); if (isize != mp->m_sb.sb_inodesize) {
xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); return -EINVAL;
}
count = be32_to_cpu(icl->icl_count); if (!count) {
xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); return -EINVAL;
}
length = be32_to_cpu(icl->icl_length); if (!length || length >= mp->m_sb.sb_agblocks) {
xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); return -EINVAL;
}
/* * The inode chunk is either full or sparse and we only support * m_ino_geo.ialloc_min_blks sized sparse allocations at this time.
*/ if (length != igeo->ialloc_blks &&
length != igeo->ialloc_min_blks) {
xfs_warn(log->l_mp, "%s: unsupported chunk length", __func__); return -EINVAL;
}
/* verify inode count is consistent with extent length */ if ((count >> mp->m_sb.sb_inopblog) != length) {
xfs_warn(log->l_mp, "%s: inconsistent inode count and chunk length",
__func__); return -EINVAL;
}
/* * The icreate transaction can cover multiple cluster buffers and these * buffers could have been freed and reused. Check the individual * buffers for cancellation so we don't overwrite anything written after * a cancellation.
*/
bb_per_cluster = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster);
nbufs = length / igeo->blocks_per_cluster; for (i = 0, cancel_count = 0; i < nbufs; i++) {
xfs_daddr_t daddr;
daddr = XFS_AGB_TO_DADDR(mp, agno,
agbno + i * igeo->blocks_per_cluster); if (xlog_is_buffer_cancelled(log, daddr, bb_per_cluster))
cancel_count++;
}
/* * We currently only use icreate for a single allocation at a time. This * means we should expect either all or none of the buffers to be * cancelled. Be conservative and skip replay if at least one buffer is * cancelled, but warn the user that something is awry if the buffers * are not consistent. * * XXX: This must be refined to only skip cancelled clusters once we use * icreate for multiple chunk allocations.
*/
ASSERT(!cancel_count || cancel_count == nbufs); if (cancel_count) { if (cancel_count != nbufs)
xfs_warn(mp, "WARNING: partial inode chunk cancellation, skipped icreate.");
trace_xfs_log_recover_icreate_cancel(log, icl); return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.