/* * Quota Repair * ============ * * Quota repairs are fairly simplistic; we fix everything that the dquot * verifiers complain about, cap any counters or limits that make no sense, * and schedule a quotacheck if we had to fix anything. We also repair any * data fork extent records that don't apply to metadata files.
*/
/* * Allocate a new block into a sparse hole in the quota file backing this * dquot, initialize the block, and commit the whole mess.
*/ STATICint
xrep_quota_item_fill_bmap_hole( struct xfs_scrub *sc, struct xfs_dquot *dq, struct xfs_bmbt_irec *irec)
{ struct xfs_buf *bp; struct xfs_mount *mp = sc->mp; int nmaps = 1; int error;
xfs_trans_ijoin(sc->tp, sc->ip, 0);
/* Map a block into the file. */
error = xfs_trans_reserve_more(sc->tp, XFS_QM_DQALLOC_SPACE_RES(mp),
0); if (error) return error;
/* * Finish the mapping transactions and roll one more time to * disconnect sc->ip from sc->tp.
*/
error = xrep_defer_finish(sc); if (error) return error; return xfs_trans_roll(&sc->tp);
}
/* Make sure there's a written block backing this dquot */ STATICint
xrep_quota_item_bmap( struct xfs_scrub *sc, struct xfs_dquot *dq, bool *dirty)
{ struct xfs_bmbt_irec irec; struct xfs_mount *mp = sc->mp; struct xfs_quotainfo *qi = mp->m_quotainfo;
xfs_fileoff_t offset = dq->q_id / qi->qi_dqperchunk; int nmaps = 1; int error;
/* The computed file offset should always be valid. */ if (!xfs_verify_fileoff(mp, offset)) {
ASSERT(xfs_verify_fileoff(mp, offset)); return -EFSCORRUPTED;
}
dq->q_fileoffset = offset;
if (nmaps < 1 || !xfs_bmap_is_real_extent(&irec)) { /* Hole/delalloc extent; allocate a real block. */
error = xrep_quota_item_fill_bmap_hole(sc, dq, &irec); if (error) return error;
} elseif (irec.br_state != XFS_EXT_NORM) { /* Unwritten extent, which we already took care of? */
ASSERT(irec.br_state == XFS_EXT_NORM); return -EFSCORRUPTED;
} elseif (dq->q_blkno != XFS_FSB_TO_DADDR(mp, irec.br_startblock)) { /* * If the cached daddr is incorrect, repair probably punched a * hole out of the quota file and filled it back in with a new * block. Update the block mapping in the dquot.
*/
dq->q_blkno = XFS_FSB_TO_DADDR(mp, irec.br_startblock);
}
/* Scrub the fields in an individual quota item. */ STATICint
xrep_quota_item( struct xrep_quota_info *rqi, struct xfs_dquot *dq)
{ struct xfs_scrub *sc = rqi->sc; struct xfs_mount *mp = sc->mp;
xfs_ino_t fs_icount; bool dirty = false; int error = 0;
/* Last chance to abort before we start committing fixes. */ if (xchk_should_terminate(sc, &error)) return error;
/* * We might need to fix holes in the bmap record for the storage * backing this dquot, so we need to lock the dquot and the quota file. * dqiterate gave us a locked dquot, so drop the dquot lock to get the * ILOCK_EXCL.
*/
xfs_dqunlock(dq);
xchk_ilock(sc, XFS_ILOCK_EXCL);
xfs_dqlock(dq);
/* * Check that usage doesn't exceed physical limits. However, on * a reflink filesystem we're allowed to exceed physical space * if there are no quota limits. We don't know what the real number * is, but we can make quotacheck find out for us.
*/ if (!xfs_has_reflink(mp) && dq->q_blk.count > mp->m_sb.sb_dblocks) {
dq->q_blk.reserved -= dq->q_blk.count;
dq->q_blk.reserved += mp->m_sb.sb_dblocks;
dq->q_blk.count = mp->m_sb.sb_dblocks;
rqi->need_quotacheck = true;
dirty = true;
}
fs_icount = percpu_counter_sum(&mp->m_icount); if (dq->q_ino.count > fs_icount) {
dq->q_ino.reserved -= dq->q_ino.count;
dq->q_ino.reserved += fs_icount;
dq->q_ino.count = fs_icount;
rqi->need_quotacheck = true;
dirty = true;
} if (!xfs_has_reflink(mp) && dq->q_rtb.count > mp->m_sb.sb_rblocks) {
dq->q_rtb.reserved -= dq->q_rtb.count;
dq->q_rtb.reserved += mp->m_sb.sb_rblocks;
dq->q_rtb.count = mp->m_sb.sb_rblocks;
rqi->need_quotacheck = true;
dirty = true;
}
error = xfs_trans_read_buf(sc->mp, sc->tp, sc->mp->m_ddev_targp, daddr,
qi->qi_dqchunklen, 0, &bp, &xfs_dquot_buf_ops); switch (error) { case -EFSBADCRC: case -EFSCORRUPTED: /* Failed verifier, retry read with no ops. */
error = xfs_trans_read_buf(sc->mp, sc->tp,
sc->mp->m_ddev_targp, daddr, qi->qi_dqchunklen,
0, &bp, NULL); if (error) return error; break; case 0:
dqblk = bp->b_addr;
ddq = &dqblk[0].dd_diskdq;
/* * If there's nothing that would impede a dqiterate, we're * done.
*/ if ((ddq->d_type & XFS_DQTYPE_REC_MASK) != dqtype ||
id == be32_to_cpu(ddq->d_id)) {
xfs_trans_brelse(sc->tp, bp); return 0;
} break; default: return error;
}
/* Something's wrong with the block, fix the whole thing. */
dqblk = bp->b_addr;
bp->b_ops = &xfs_dquot_buf_ops; for (i = 0; i < qi->qi_dqperchunk; i++, dqblk++) {
ddq = &dqblk->dd_diskdq;
error = xfs_defer_finish(&sc->tp); if (error) goto out;
}
}
if (!joined) {
xfs_trans_ijoin(sc->tp, sc->ip, 0);
joined = true;
}
if (truncate) { /* Erase everything after the block containing the max dquot */
error = xfs_bunmapi_range(&sc->tp, sc->ip, 0,
max_dqid_off * sc->mp->m_sb.sb_blocksize,
XFS_MAX_FILEOFF); if (error) goto out;
/* Remove all CoW reservations. */
error = xfs_reflink_cancel_cow_blocks(sc->ip, &sc->tp, 0,
XFS_MAX_FILEOFF, true); if (error) goto out;
sc->ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
/* * Always re-log the inode so that our permanent transaction * can keep on rolling it forward in the log.
*/
xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE);
}
/* Now go fix anything that fails the verifiers. */
for_each_xfs_iext(ifp, &icur, &irec) { for (fsbno = irec.br_startblock, off = irec.br_startoff;
fsbno < irec.br_startblock + irec.br_blockcount;
fsbno += XFS_DQUOT_CLUSTER_SIZE_FSB,
off += XFS_DQUOT_CLUSTER_SIZE_FSB) {
error = xrep_quota_block(sc,
XFS_FSB_TO_DADDR(sc->mp, fsbno),
dqtype, off * qi->qi_dqperchunk); if (error) goto out;
}
}
out: return error;
}
/* * Go fix anything in the quota items that we could have been mad about. Now * that we've checked the quota inode data fork we have to drop ILOCK_EXCL to * use the regular dquot functions.
*/ STATICint
xrep_quota_problems( struct xfs_scrub *sc,
xfs_dqtype_t dqtype)
{ struct xchk_dqiter cursor = { }; struct xrep_quota_info rqi = { .sc = sc }; struct xfs_dquot *dq; int error;
xchk_dqiter_init(&cursor, sc, dqtype); while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
error = xrep_quota_item(&rqi, dq);
xfs_qm_dqput(dq); if (error) break;
} if (error) return error;
/* Make a quotacheck happen. */ if (rqi.need_quotacheck)
xrep_force_quotacheck(sc, dqtype); return 0;
}
/* Repair all of a quota type's items. */ int
xrep_quota( struct xfs_scrub *sc)
{
xfs_dqtype_t dqtype; int error;
dqtype = xchk_quota_to_dqtype(sc);
/* * Re-take the ILOCK so that we can fix any problems that we found * with the data fork mappings, or with the dquot bufs themselves.
*/ if (!(sc->ilock_flags & XFS_ILOCK_EXCL))
xchk_ilock(sc, XFS_ILOCK_EXCL);
error = xrep_quota_data_fork(sc, dqtype); if (error) return error;
/* * Finish deferred items and roll the transaction to unjoin the quota * inode from transaction so that we can unlock the quota inode; we * play only with dquots from now on.
*/
error = xrep_defer_finish(sc); if (error) return error;
error = xfs_trans_roll(&sc->tp); if (error) return error;
xchk_iunlock(sc, sc->ilock_flags);
/* Fix anything the dquot verifiers don't complain about. */
error = xrep_quota_problems(sc, dqtype); if (error) return error;
return xrep_trans_commit(sc);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.