/* * Freeing the RUI requires that we remove it from the AIL if it has already * been placed there. However, the RUI may not yet have been placed in the AIL * when called by xfs_rui_release() from RUD processing due to the ordering of * committed vs unpin operations in bulk insert operations. Hence the reference * count to ensure only the last caller frees the RUI.
*/ STATICvoid
xfs_rui_release( struct xfs_rui_log_item *ruip)
{
ASSERT(atomic_read(&ruip->rui_refcount) > 0); if (!atomic_dec_and_test(&ruip->rui_refcount)) return;
/* * This is called to fill in the vector of log iovecs for the * given rui log item. We use only 1 iovec, and we point that * at the rui_log_format structure embedded in the rui item. * It is at this point that we assert that all of the extent * slots in the rui item have been filled.
*/ STATICvoid
xfs_rui_item_format( struct xfs_log_item *lip, struct xfs_log_vec *lv)
{ struct xfs_rui_log_item *ruip = RUI_ITEM(lip); struct xfs_log_iovec *vecp = NULL;
/* * The unpin operation is the last place an RUI is manipulated in the log. It is * either inserted in the AIL or aborted in the event of a log I/O error. In * either case, the RUI transaction has been successfully committed to make it * this far. Therefore, we expect whoever committed the RUI to either construct * and commit the RUD or drop the RUD's reference in the event of error. Simply * drop the log's RUI reference now that the log is done with it.
*/ STATICvoid
xfs_rui_item_unpin( struct xfs_log_item *lip, int remove)
{ struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
xfs_rui_release(ruip);
}
/* * The RUI has been either committed or aborted if the transaction has been * cancelled. If the transaction was cancelled, an RUD isn't going to be * constructed and thus we free the RUI here directly.
*/ STATICvoid
xfs_rui_item_release( struct xfs_log_item *lip)
{
xfs_rui_release(RUI_ITEM(lip));
}
/* * Allocate and initialize an rui item with the given number of extents.
*/ STATICstruct xfs_rui_log_item *
xfs_rui_init( struct xfs_mount *mp, unsignedshort item_type,
uint nextents)
/* * This is called to fill in the vector of log iovecs for the * given rud log item. We use only 1 iovec, and we point that * at the rud_log_format structure embedded in the rud item. * It is at this point that we assert that all of the extent * slots in the rud item have been filled.
*/ STATICvoid
xfs_rud_item_format( struct xfs_log_item *lip, struct xfs_log_vec *lv)
{ struct xfs_rud_log_item *rudp = RUD_ITEM(lip); struct xfs_log_iovec *vecp = NULL;
/* * The RUD is either committed or aborted if the transaction is cancelled. If * the transaction is cancelled, drop our reference to the RUI and free the * RUD.
*/ STATICvoid
xfs_rud_item_release( struct xfs_log_item *lip)
{ struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
/* * atomic_inc_return gives us the value after the increment; * we want to use it as an array index so we need to subtract 1 from * it.
*/
next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
ASSERT(next_extent < ruip->rui_format.rui_nextents);
map = &ruip->rui_format.rui_extents[next_extent];
map->me_owner = ri->ri_owner;
map->me_startblock = ri->ri_bmap.br_startblock;
map->me_startoff = ri->ri_bmap.br_startoff;
map->me_len = ri->ri_bmap.br_blockcount;
map->me_flags = 0; if (ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN)
map->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN; if (ri->ri_whichfork == XFS_ATTR_FORK)
map->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK; switch (ri->ri_type) { case XFS_RMAP_MAP:
map->me_flags |= XFS_RMAP_EXTENT_MAP; break; case XFS_RMAP_MAP_SHARED:
map->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED; break; case XFS_RMAP_UNMAP:
map->me_flags |= XFS_RMAP_EXTENT_UNMAP; break; case XFS_RMAP_UNMAP_SHARED:
map->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED; break; case XFS_RMAP_CONVERT:
map->me_flags |= XFS_RMAP_EXTENT_CONVERT; break; case XFS_RMAP_CONVERT_SHARED:
map->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED; break; case XFS_RMAP_ALLOC:
map->me_flags |= XFS_RMAP_EXTENT_ALLOC; break; case XFS_RMAP_FREE:
map->me_flags |= XFS_RMAP_EXTENT_FREE; break; default:
ASSERT(0);
}
}
/* Get an RUD so we can process all the deferred rmap updates. */ staticstruct xfs_log_item *
xfs_rmap_update_create_done( struct xfs_trans *tp, struct xfs_log_item *intent, unsignedint count)
{ struct xfs_rui_log_item *ruip = RUI_ITEM(intent); struct xfs_rud_log_item *rudp;
/* Add this deferred RUI to the transaction. */ void
xfs_rmap_defer_add( struct xfs_trans *tp, struct xfs_rmap_intent *ri)
{ struct xfs_mount *mp = tp->t_mountp;
/* * Deferred rmap updates for the realtime and data sections must use * separate transactions to finish deferred work because updates to * realtime metadata files can lock AGFs to allocate btree blocks and * we don't want that mixing with the AGF locks taken to finish data * section updates.
*/
ri->ri_group = xfs_group_intent_get(mp, ri->ri_bmap.br_startblock,
ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG);
/* Is this recovered RUI ok? */ staticinlinebool
xfs_rui_validate_map( struct xfs_mount *mp, bool isrt, struct xfs_map_extent *map)
{ if (!xfs_has_rmapbt(mp)) returnfalse;
if (map->me_flags & ~XFS_RMAP_EXTENT_FLAGS) returnfalse;
switch (map->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) { case XFS_RMAP_EXTENT_MAP: case XFS_RMAP_EXTENT_MAP_SHARED: case XFS_RMAP_EXTENT_UNMAP: case XFS_RMAP_EXTENT_UNMAP_SHARED: case XFS_RMAP_EXTENT_CONVERT: case XFS_RMAP_EXTENT_CONVERT_SHARED: case XFS_RMAP_EXTENT_ALLOC: case XFS_RMAP_EXTENT_FREE: break; default: returnfalse;
}
if (!XFS_RMAP_NON_INODE_OWNER(map->me_owner) &&
!xfs_verify_ino(mp, map->me_owner)) returnfalse;
if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len)) returnfalse;
if (isrt) return xfs_verify_rtbext(mp, map->me_startblock, map->me_len);
/* * Process an rmap update intent item that was recovered from the log. * We need to update the rmapbt.
*/ STATICint
xfs_rmap_recover_work( struct xfs_defer_pending *dfp, struct list_head *capture_list)
{ struct xfs_trans_res resv; struct xfs_log_item *lip = dfp->dfp_intent; struct xfs_rui_log_item *ruip = RUI_ITEM(lip); struct xfs_trans *tp; struct xfs_mount *mp = lip->li_log->l_mp; bool isrt = xfs_rui_item_isrt(lip); int i; int error = 0;
/* * First check the validity of the extents described by the * RUI. If any are bad, then assume that all are bad and * just toss the RUI.
*/ for (i = 0; i < ruip->rui_format.rui_nextents; i++) { if (!xfs_rui_validate_map(mp, isrt,
&ruip->rui_format.rui_extents[i])) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
&ruip->rui_format, sizeof(ruip->rui_format)); return -EFSCORRUPTED;
}
for (i = 0; i < src->rui_nextents; i++)
memcpy(&dst->rui_extents[i], &src->rui_extents[i], sizeof(struct xfs_map_extent));
}
/* * This routine is called to create an in-core extent rmap update * item from the rui format structure which was logged on disk. * It allocates an in-core rui, copies the extents from the format * structure into it, and adds the rui to the AIL with the given * LSN.
*/ STATICint
xlog_recover_rui_commit_pass2( struct xlog *log, struct list_head *buffer_list, struct xlog_recover_item *item,
xfs_lsn_t lsn)
{ struct xfs_mount *mp = log->l_mp; struct xfs_rui_log_item *ruip; struct xfs_rui_log_format *rui_formatp;
size_t len;
/* * This routine is called when an RUD format structure is found in a committed * transaction in the log. Its purpose is to cancel the corresponding RUI if it * was still in the log. To do this it searches the AIL for the RUI with an id * equal to that in the RUD format structure. If we find it we drop the RUD * reference, which removes the RUI from the AIL and frees it.
*/ STATICint
xlog_recover_rud_commit_pass2( struct xlog *log, struct list_head *buffer_list, struct xlog_recover_item *item,
xfs_lsn_t lsn)
{ struct xfs_rud_log_format *rud_formatp;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.