/* * Relax during a scrub operation and exit if there's a fatal signal pending. * * If preemption is disabled, we need to yield to the scheduler every now and * then so that we don't run afoul of the soft lockup watchdog or RCU stall * detector. cond_resched calls are somewhat expensive (~5ns) so we want to * ratelimit this to 10x per second. Amortize the cost of the other checks by * only doing it once every 100 calls.
*/ staticinlineint xchk_maybe_relax(struct xchk_relax *widget)
{ /* Amortize the cost of scheduling and checking signals. */ if (likely(++widget->resched_nr < 100)) return 0;
widget->resched_nr = 0;
if (unlikely(widget->next_resched <= jiffies)) {
cond_resched();
widget->next_resched = XCHK_RELAX_NEXT;
}
if (widget->interruptible && fatal_signal_pending(current)) return -EINTR;
return 0;
}
/* * Standard flags for allocating memory within scrub. NOFS context is * configured by the process allocation scope. Scrub and repair must be able * to back out gracefully if there isn't enough memory. Force-cast to avoid * complaints from static checkers.
*/ #define XCHK_GFP_FLAGS ((__force gfp_t)(GFP_KERNEL | __GFP_NOWARN | \
__GFP_RETRY_MAYFAIL))
/* * For opening files by handle for fsck operations, we don't trust the inumber * or the allocation state; therefore, perform an untrusted lookup. We don't * want these inodes to pollute the cache, so mark them for immediate removal.
*/ #define XCHK_IGET_FLAGS (XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE)
/* Type info and names for the scrub types. */ enum xchk_type {
ST_NONE = 1, /* disabled */
ST_PERAG, /* per-AG metadata */
ST_FS, /* per-FS metadata */
ST_INODE, /* per-inode metadata */
ST_GENERIC, /* determined by the scrubber */
ST_RTGROUP, /* rtgroup metadata */
};
struct xchk_meta_ops { /* Acquire whatever resources are needed for the operation. */ int (*setup)(struct xfs_scrub *sc);
/* Examine metadata for errors. */ int (*scrub)(struct xfs_scrub *);
/* Repair or optimize the metadata. */ int (*repair)(struct xfs_scrub *);
/* * Re-scrub the metadata we repaired, in case there's extra work that * we need to do to check our repair work. If this is NULL, we'll use * the ->scrub function pointer, assuming that the regular scrub is * sufficient.
*/ int (*repair_eval)(struct xfs_scrub *sc);
/* Decide if we even have this piece of metadata. */ bool (*has)(conststruct xfs_mount *);
/* type describing required/allowed inputs */ enum xchk_type type;
};
/* Buffer pointers and btree cursors for an entire AG. */ struct xchk_ag { struct xfs_perag *pag;
/* File that scrub was called with. */ struct file *file;
/* * File that is undergoing the scrub operation. This can differ from * the file that scrub was called with if we're checking file-based fs * metadata (e.g. rt bitmaps) or if we're doing a scrub-by-handle for * something that can't be opened directly (e.g. symlinks).
*/ struct xfs_inode *ip;
/* Kernel memory buffer used by scrubbers; freed at teardown. */ void *buf;
/* * Clean up resources owned by whatever is in the buffer. Cleanup can * be deferred with this hook as a means for scrub functions to pass * data to repair functions. This function must not free the buffer * itself.
*/ void (*buf_cleanup)(void *buf);
/* xfile used by the scrubbers; freed at teardown. */ struct xfile *xfile;
/* buffer target for in-memory btrees; also freed at teardown. */ struct xfs_buftarg *xmbtp;
/* Lock flags for @ip. */
uint ilock_flags;
/* The orphanage, for stashing files that have lost their parent. */
uint orphanage_ilock_flags; struct xfs_inode *orphanage;
/* A temporary file on this filesystem, for staging new metadata. */ struct xfs_inode *tempip;
uint temp_ilock_flags;
/* See the XCHK/XREP state flags below. */ unsignedint flags;
/* * The XFS_SICK_* flags that correspond to the metadata being scrubbed * or repaired. We will use this mask to update the in-core fs health * status with whatever we find.
*/ unsignedint sick_mask;
/* * Clear these XFS_SICK_* flags but only if the scan is ok. Useful for * removing ZAPPED flags after a repair.
*/ unsignedint healthy_mask;
/* next time we want to cond_resched() */ struct xchk_relax relax;
/* State tracking for single-AG operations. */ struct xchk_ag sa;
/* State tracking for realtime operations. */ struct xchk_rt sr;
};
/* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */ #define XCHK_TRY_HARDER (1U << 0) /* can't get resources, try again */ #define XCHK_HAVE_FREEZE_PROT (1U << 1) /* do we have freeze protection? */ #define XCHK_FSGATES_DRAIN (1U << 2) /* defer ops draining enabled */ #define XCHK_NEED_DRAIN (1U << 3) /* scrub needs to drain defer ops */ #define XCHK_FSGATES_QUOTA (1U << 4) /* quota live update enabled */ #define XCHK_FSGATES_DIRENTS (1U << 5) /* directory live update enabled */ #define XCHK_FSGATES_RMAP (1U << 6) /* rmapbt live update enabled */ #define XREP_RESET_PERAG_RESV (1U << 30) /* must reset AG space reservation */ #define XREP_ALREADY_FIXED (1U << 31) /* checking our repair work */
/* * The XCHK_FSGATES* flags reflect functionality in the main filesystem that * are only enabled for this particular online fsck. When not in use, the * features are gated off via dynamic code patching, which is why the state * must be enabled during scrub setup and can only be torn down afterwards.
*/ #define XCHK_FSGATES_ALL (XCHK_FSGATES_DRAIN | \
XCHK_FSGATES_QUOTA | \
XCHK_FSGATES_DIRENTS | \
XCHK_FSGATES_RMAP)
/* * We /could/ terminate a scrub/repair operation early. If we're not * in a good place to continue (fatal signal, etc.) then bail out. * Note that we're careful not to make any judgements about *error.
*/ staticinlinebool
xchk_should_terminate( struct xfs_scrub *sc, int *error)
{ if (xchk_maybe_relax(&sc->relax)) { if (*error == 0)
*error = -EINTR; returntrue;
} returnfalse;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.