/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * Copyright (c) 2018 Red Hat, Inc. * All rights reserved.
*/
/* * xfs_initialize_perag_data * * Read in each per-ag structure so we can count up the number of * allocated inodes, free inodes and used filesystem blocks as this * information is no longer persistent in the superblock. Once we have * this information, write it into the in-core superblock structure.
*/ int
xfs_initialize_perag_data( struct xfs_mount *mp,
xfs_agnumber_t agcount)
{
xfs_agnumber_t index; struct xfs_perag *pag; struct xfs_sb *sbp = &mp->m_sb;
uint64_t ifree = 0;
uint64_t ialloc = 0;
uint64_t bfree = 0;
uint64_t bfreelst = 0;
uint64_t btree = 0;
uint64_t fdblocks; int error = 0;
for (index = 0; index < agcount; index++) { /* * Read the AGF and AGI buffers to populate the per-ag * structures for us.
*/
pag = xfs_perag_get(mp, index);
error = xfs_alloc_read_agf(pag, NULL, 0, NULL); if (!error)
error = xfs_ialloc_read_agi(pag, NULL, 0, NULL); if (error) {
xfs_perag_put(pag); return error;
}
/* * If the new summary counts are obviously incorrect, fail the * mount operation because that implies the AGFs are also corrupt. * Clear FS_COUNTERS so that we don't unmount with a dirty log, which * will prevent xfs_repair from fixing anything.
*/ if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
error = -EFSCORRUPTED; goto out;
}
/* Overwrite incore superblock counters with just-read data */
spin_lock(&mp->m_sb_lock);
sbp->sb_ifree = ifree;
sbp->sb_icount = ialloc;
sbp->sb_fdblocks = fdblocks;
spin_unlock(&mp->m_sb_lock);
/* * Free up the per-ag resources within the specified AG range.
*/ void
xfs_free_perag_range( struct xfs_mount *mp,
xfs_agnumber_t first_agno,
xfs_agnumber_t end_agno)
/* Calculate the first and last possible inode number in an AG. */ staticvoid
__xfs_agino_range( struct xfs_mount *mp,
xfs_agblock_t eoag,
xfs_agino_t *first,
xfs_agino_t *last)
{
xfs_agblock_t bno;
/* * Calculate the first inode, which will be in the first * cluster-aligned block after the AGFL.
*/
bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align);
*first = XFS_AGB_TO_AGINO(mp, bno);
/* * Calculate the last inode, which will be at the end of the * last (aligned) cluster that can be allocated in the AG.
*/
bno = round_down(eoag, M_IGEO(mp)->cluster_align);
*last = XFS_AGB_TO_AGINO(mp, bno) - 1;
}
/* * Update the perag of the previous tail AG if it has been changed during * recovery (i.e. recovery of a growfs).
*/ int
xfs_update_last_ag_size( struct xfs_mount *mp,
xfs_agnumber_t prev_agcount)
{ struct xfs_perag *pag = xfs_perag_grab(mp, prev_agcount - 1);
ASSERT(start >= mp->m_ag_prealloc_blocks); if (start != mp->m_ag_prealloc_blocks) { /* * Modify first record to pad stripe align of log and * bump the record count.
*/
arec->ar_blockcount = cpu_to_be32(start -
mp->m_ag_prealloc_blocks);
be16_add_cpu(&block->bb_numrecs, 1);
nrec = arec + 1;
/* * Insert second record at start of internal log * which then gets trimmed.
*/
nrec->ar_startblock = cpu_to_be32(
be32_to_cpu(arec->ar_startblock) +
be32_to_cpu(arec->ar_blockcount));
arec = nrec;
} /* * Change record start to after the internal log
*/
be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks);
}
/* * Calculate the block count of this record; if it is nonzero, * increment the record count.
*/
arec->ar_blockcount = cpu_to_be32(id->agsize -
be32_to_cpu(arec->ar_startblock)); if (arec->ar_blockcount)
be16_add_cpu(&block->bb_numrecs, 1);
}
/* * mark the AG header regions as static metadata The BNO * btree block is the first block after the headers, so * it's location defines the size of region the static * metadata consumes. * * Note: unlike mkfs, we never have to account for log * space when growing the data regions
*/
rrec = XFS_RMAP_REC_ADDR(block, 1);
rrec->rm_startblock = 0;
rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
rrec->rm_offset = 0;
/* account for the log space */ if (xfs_ag_contains_log(mp, id->agno)) {
rrec = XFS_RMAP_REC_ADDR(block,
be16_to_cpu(block->bb_numrecs) + 1);
rrec->rm_startblock = cpu_to_be32(
XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart));
rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks);
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG);
rrec->rm_offset = 0;
be16_add_cpu(&block->bb_numrecs, 1);
}
}
/* * Initialise new secondary superblocks with the pre-grow geometry, but mark * them as "in progress" so we know they haven't yet been activated. This will * get cleared when the update with the new geometry information is done after * changes to the primary are committed. This isn't strictly necessary, but we * get it for free with the delayed buffer write lists and it means we can tell * if a grow operation didn't complete properly after the fact.
*/ staticvoid
xfs_sbblock_init( struct xfs_mount *mp, struct xfs_buf *bp, struct aghdr_init_data *id)
{ struct xfs_dsb *dsb = bp->b_addr;
/* * Prepare new AG headers to be written to disk. We use uncached buffers here, * as it is assumed these new AG headers are currently beyond the currently * valid filesystem address space. Using cached buffers would trip over EOFS * corruption detection alogrithms in the buffer cache lookup routines. * * This is a non-transactional function, but the prepared buffers are added to a * delayed write buffer list supplied by the caller so they can submit them to * disk and wait on them as required.
*/ int
xfs_ag_init_headers( struct xfs_mount *mp, struct aghdr_init_data *id)
/* Account for AG free space in new AG */
id->nfree += id->agsize - mp->m_ag_prealloc_blocks; for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) { if (!dp->need_init) continue;
error = xfs_alloc_read_agf(pag, *tpp, 0, &agfbp); if (error) return error;
agf = agfbp->b_addr;
aglen = be32_to_cpu(agi->agi_length); /* some extra paranoid checks before we shrink the ag */ if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length)) {
xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF); return -EFSCORRUPTED;
} if (delta >= aglen) return -EINVAL;
/* * Make sure that the last inode cluster cannot overlap with the new * end of the AG, even if it's sparse.
*/
error = xfs_ialloc_check_shrink(pag, *tpp, agibp, aglen - delta); if (error) return error;
/* * Disable perag reservations so it doesn't cause the allocation request * to fail. We'll reestablish reservation before we return.
*/
xfs_ag_resv_free(pag);
/* internal log shouldn't also show up in the free space btrees */
error = xfs_alloc_vextent_exact_bno(&args,
xfs_agbno_to_fsb(pag, aglen - delta)); if (!error && args.agbno == NULLAGBLOCK)
error = -ENOSPC;
if (error) { /* * If extent allocation fails, need to roll the transaction to * ensure that the AGFL fixup has been committed anyway. * * We need to hold the AGF across the roll to ensure nothing can * access the AG for allocation until the shrink is fully * cleaned up. And due to the resetting of the AG block * reservation space needing to lock the AGI, we also have to * hold that so we don't get AGI/AGF lock order inversions in * the error handling path.
*/
xfs_trans_bhold(*tpp, agfbp);
xfs_trans_bhold(*tpp, agibp);
err2 = xfs_trans_roll(tpp); if (err2) return err2;
xfs_trans_bjoin(*tpp, agfbp);
xfs_trans_bjoin(*tpp, agibp); goto resv_init_out;
}
/* * if successfully deleted from freespace btrees, need to confirm * per-AG reservation works as expected.
*/
be32_add_cpu(&agi->agi_length, -delta);
be32_add_cpu(&agf->agf_length, -delta);
err2 = xfs_ag_resv_init(pag, *tpp); if (err2) {
be32_add_cpu(&agi->agi_length, delta);
be32_add_cpu(&agf->agf_length, delta); if (err2 != -ENOSPC) goto resv_err;
/* * Roll the transaction before trying to re-init the per-ag * reservation. The new transaction is clean so it will cancel * without any side effects.
*/
error = xfs_defer_finish(tpp); if (error) return error;
/* * Extent the AG indicated by the @id by the length passed in
*/ int
xfs_ag_extend_space( struct xfs_perag *pag, struct xfs_trans *tp,
xfs_extlen_t len)
{ struct xfs_mount *mp = pag_mount(pag); struct xfs_buf *bp; struct xfs_agi *agi; struct xfs_agf *agf; int error;
ASSERT(pag_agno(pag) == mp->m_sb.sb_agcount - 1);
error = xfs_ialloc_read_agi(pag, tp, 0, &bp); if (error) return error;
/* * Free the new space. * * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that * this doesn't actually exist in the rmap btree.
*/
error = xfs_rmap_free(tp, bp, pag, be32_to_cpu(agf->agf_length) - len,
len, &XFS_RMAP_OINFO_SKIP_UPDATE); if (error) return error;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.