/* * Freeing the CUI requires that we remove it from the AIL if it has already * been placed there. However, the CUI may not yet have been placed in the AIL * when called by xfs_cui_release() from CUD processing due to the ordering of * committed vs unpin operations in bulk insert operations. Hence the reference * count to ensure only the last caller frees the CUI.
*/ STATICvoid
xfs_cui_release( struct xfs_cui_log_item *cuip)
{
ASSERT(atomic_read(&cuip->cui_refcount) > 0); if (!atomic_dec_and_test(&cuip->cui_refcount)) return;
/* * This is called to fill in the vector of log iovecs for the * given cui log item. We use only 1 iovec, and we point that * at the cui_log_format structure embedded in the cui item. * It is at this point that we assert that all of the extent * slots in the cui item have been filled.
*/ STATICvoid
xfs_cui_item_format( struct xfs_log_item *lip, struct xfs_log_vec *lv)
{ struct xfs_cui_log_item *cuip = CUI_ITEM(lip); struct xfs_log_iovec *vecp = NULL;
/* * The unpin operation is the last place an CUI is manipulated in the log. It is * either inserted in the AIL or aborted in the event of a log I/O error. In * either case, the CUI transaction has been successfully committed to make it * this far. Therefore, we expect whoever committed the CUI to either construct * and commit the CUD or drop the CUD's reference in the event of error. Simply * drop the log's CUI reference now that the log is done with it.
*/ STATICvoid
xfs_cui_item_unpin( struct xfs_log_item *lip, int remove)
{ struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
xfs_cui_release(cuip);
}
/* * The CUI has been either committed or aborted if the transaction has been * cancelled. If the transaction was cancelled, an CUD isn't going to be * constructed and thus we free the CUI here directly.
*/ STATICvoid
xfs_cui_item_release( struct xfs_log_item *lip)
{
xfs_cui_release(CUI_ITEM(lip));
}
/* * Allocate and initialize an cui item with the given number of extents.
*/ STATICstruct xfs_cui_log_item *
xfs_cui_init( struct xfs_mount *mp, unsignedshort item_type,
uint nextents)
{ struct xfs_cui_log_item *cuip;
/* * This is called to fill in the vector of log iovecs for the * given cud log item. We use only 1 iovec, and we point that * at the cud_log_format structure embedded in the cud item. * It is at this point that we assert that all of the extent * slots in the cud item have been filled.
*/ STATICvoid
xfs_cud_item_format( struct xfs_log_item *lip, struct xfs_log_vec *lv)
{ struct xfs_cud_log_item *cudp = CUD_ITEM(lip); struct xfs_log_iovec *vecp = NULL;
/* * The CUD is either committed or aborted if the transaction is cancelled. If * the transaction is cancelled, drop our reference to the CUI and free the * CUD.
*/ STATICvoid
xfs_cud_item_release( struct xfs_log_item *lip)
{ struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
/* * atomic_inc_return gives us the value after the increment; * we want to use it as an array index so we need to subtract 1 from * it.
*/
next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
ASSERT(next_extent < cuip->cui_format.cui_nextents);
pmap = &cuip->cui_format.cui_extents[next_extent];
pmap->pe_startblock = ri->ri_startblock;
pmap->pe_len = ri->ri_blockcount;
pmap->pe_flags = 0; switch (ri->ri_type) { case XFS_REFCOUNT_INCREASE: case XFS_REFCOUNT_DECREASE: case XFS_REFCOUNT_ALLOC_COW: case XFS_REFCOUNT_FREE_COW:
pmap->pe_flags |= ri->ri_type; break; default:
ASSERT(0);
}
}
/* Get an CUD so we can process all the deferred refcount updates. */ staticstruct xfs_log_item *
xfs_refcount_update_create_done( struct xfs_trans *tp, struct xfs_log_item *intent, unsignedint count)
{ struct xfs_cui_log_item *cuip = CUI_ITEM(intent); struct xfs_cud_log_item *cudp;
/* Add this deferred CUI to the transaction. */ void
xfs_refcount_defer_add( struct xfs_trans *tp, struct xfs_refcount_intent *ri)
{ struct xfs_mount *mp = tp->t_mountp;
/* * Deferred refcount updates for the realtime and data sections must * use separate transactions to finish deferred work because updates to * realtime metadata files can lock AGFs to allocate btree blocks and * we don't want that mixing with the AGF locks taken to finish data * section updates.
*/
ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock,
ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG);
/* Process a deferred refcount update. */ STATICint
xfs_refcount_update_finish_item( struct xfs_trans *tp, struct xfs_log_item *done, struct list_head *item, struct xfs_btree_cur **state)
{ struct xfs_refcount_intent *ri = ci_entry(item); int error;
/* Did we run out of reservation? Requeue what we didn't finish. */
error = xfs_refcount_finish_one(tp, ri, state); if (!error && ri->ri_blockcount > 0) {
ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
ri->ri_type == XFS_REFCOUNT_DECREASE); return -EAGAIN;
}
/* Is this recovered CUI ok? */ staticinlinebool
xfs_cui_validate_phys( struct xfs_mount *mp, bool isrt, struct xfs_phys_extent *pmap)
{ if (!xfs_has_reflink(mp)) returnfalse;
if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS) returnfalse;
switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) { case XFS_REFCOUNT_INCREASE: case XFS_REFCOUNT_DECREASE: case XFS_REFCOUNT_ALLOC_COW: case XFS_REFCOUNT_FREE_COW: break; default: returnfalse;
}
if (isrt) return xfs_verify_rtbext(mp, pmap->pe_startblock, pmap->pe_len);
/* * Process a refcount update intent item that was recovered from the log. * We need to update the refcountbt.
*/ STATICint
xfs_refcount_recover_work( struct xfs_defer_pending *dfp, struct list_head *capture_list)
{ struct xfs_trans_res resv; struct xfs_log_item *lip = dfp->dfp_intent; struct xfs_cui_log_item *cuip = CUI_ITEM(lip); struct xfs_trans *tp; struct xfs_mount *mp = lip->li_log->l_mp; bool isrt = xfs_cui_item_isrt(lip); int i; int error = 0;
/* * First check the validity of the extents described by the * CUI. If any are bad, then assume that all are bad and * just toss the CUI.
*/ for (i = 0; i < cuip->cui_format.cui_nextents; i++) { if (!xfs_cui_validate_phys(mp, isrt,
&cuip->cui_format.cui_extents[i])) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
&cuip->cui_format, sizeof(cuip->cui_format)); return -EFSCORRUPTED;
}
/* * Under normal operation, refcount updates are deferred, so we * wouldn't be adding them directly to a transaction. All * refcount updates manage reservation usage internally and * dynamically by deferring work that won't fit in the * transaction. Normally, any work that needs to be deferred * gets attached to the same defer_ops that scheduled the * refcount update. However, we're in log recovery here, so we * use the passed in defer_ops and to finish up any work that * doesn't fit. We need to reserve enough blocks to handle a * full btree split on either end of the refcount range.
*/
resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
error = xfs_trans_alloc(mp, &resv, mp->m_refc_maxlevels * 2, 0,
XFS_TRANS_RESERVE, &tp); if (error) return error;
error = xlog_recover_finish_intent(tp, dfp); if (error == -EFSCORRUPTED)
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
&cuip->cui_format, sizeof(cuip->cui_format)); if (error) goto abort_error;
/* Process a deferred realtime refcount update. */ STATICint
xfs_rtrefcount_update_finish_item( struct xfs_trans *tp, struct xfs_log_item *done, struct list_head *item, struct xfs_btree_cur **state)
{ struct xfs_refcount_intent *ri = ci_entry(item); int error;
error = xfs_rtrefcount_finish_one(tp, ri, state);
/* Did we run out of reservation? Requeue what we didn't finish. */ if (!error && ri->ri_blockcount > 0) {
ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
ri->ri_type == XFS_REFCOUNT_DECREASE); return -EAGAIN;
}
for (i = 0; i < src->cui_nextents; i++)
memcpy(&dst->cui_extents[i], &src->cui_extents[i], sizeof(struct xfs_phys_extent));
}
/* * This routine is called to create an in-core extent refcount update * item from the cui format structure which was logged on disk. * It allocates an in-core cui, copies the extents from the format * structure into it, and adds the cui to the AIL with the given * LSN.
*/ STATICint
xlog_recover_cui_commit_pass2( struct xlog *log, struct list_head *buffer_list, struct xlog_recover_item *item,
xfs_lsn_t lsn)
{ struct xfs_mount *mp = log->l_mp; struct xfs_cui_log_item *cuip; struct xfs_cui_log_format *cui_formatp;
size_t len;
/* * This routine is called when an CUD format structure is found in a committed * transaction in the log. Its purpose is to cancel the corresponding CUI if it * was still in the log. To do this it searches the AIL for the CUI with an id * equal to that in the CUD format structure. If we find it we drop the CUD * reference, which removes the CUI from the AIL and frees it.
*/ STATICint
xlog_recover_cud_commit_pass2( struct xlog *log, struct list_head *buffer_list, struct xlog_recover_item *item,
xfs_lsn_t lsn)
{ struct xfs_cud_log_format *cud_formatp;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.