/* * When we mark a buffer stale, we remove the buffer from the LRU and clear the * b_lru_ref count so that the buffer is freed immediately when the buffer * reference count falls to zero. If the buffer is already on the LRU, we need * to remove the reference that LRU holds on the buffer. * * This prevents build-up of stale buffers on the LRU.
*/ void
xfs_buf_stale( struct xfs_buf *bp)
{
ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_STALE;
/* * Clear the delwri status so that a delwri queue walker will not * flush this buffer to disk now that it is stale. The delwri queue has * a reference to the buffer, so this is safe to do.
*/
bp->b_flags &= ~_XBF_DELWRI_Q;
bp->b_addr = kmalloc(size, gfp_mask | __GFP_NOFAIL); if (!bp->b_addr) return -ENOMEM;
/* * Slab guarantees that we get back naturally aligned allocations for * power of two sizes. Keep this check as the canary in the coal mine * if anything changes in slab.
*/ if (WARN_ON_ONCE(!IS_ALIGNED((unsignedlong)bp->b_addr, size))) {
kfree(bp->b_addr);
bp->b_addr = NULL; return -ENOMEM;
}
bp->b_flags |= _XBF_KMEM;
trace_xfs_buf_backing_kmem(bp, _RET_IP_); return 0;
}
/* * Allocate backing memory for a buffer. * * For tmpfs-backed buffers used by in-memory btrees this directly maps the * tmpfs page cache folios. * * For real file system buffers there are three different kinds backing memory: * * The first type backs the buffer by a kmalloc allocation. This is done for * less than PAGE_SIZE allocations to avoid wasting memory. * * The second type is a single folio buffer - this may be a high order folio or * just a single page sized folio, but either way they get treated the same way * by the rest of the code - the buffer memory spans a single contiguous memory * region that we don't have to map and unmap to access the data directly. * * The third type of buffer is the vmalloc()d buffer. This provides the buffer * with the required contiguous memory region but backed by discontiguous * physical pages.
*/ staticint
xfs_buf_alloc_backing_mem( struct xfs_buf *bp,
xfs_buf_flags_t flags)
{
size_t size = BBTOB(bp->b_length);
gfp_t gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN; struct folio *folio;
if (xfs_buftarg_is_mem(bp->b_target)) return xmbuf_map_backing_mem(bp);
/* Assure zeroed buffer for non-read cases. */ if (!(flags & XBF_READ))
gfp_mask |= __GFP_ZERO;
if (flags & XBF_READ_AHEAD)
gfp_mask |= __GFP_NORETRY;
/* * For buffers smaller than PAGE_SIZE use a kmalloc allocation if that * is properly aligned. The slab allocator now guarantees an aligned * allocation for all power of two sizes, which matches most of the * smaller than PAGE_SIZE buffers used by XFS.
*/ if (size < PAGE_SIZE && is_power_of_2(size)) return xfs_buf_alloc_kmem(bp, size, gfp_mask);
/* * Don't bother with the retry loop for single PAGE allocations: vmalloc * won't do any better.
*/ if (size <= PAGE_SIZE)
gfp_mask |= __GFP_NOFAIL;
/* * Optimistically attempt a single high order folio allocation for * larger than PAGE_SIZE buffers. * * Allocating a high order folio makes the assumption that buffers are a * power-of-2 size, matching the power-of-2 folios sizes available. * * The exception here are user xattr data buffers, which can be arbitrarily * sized up to 64kB plus structure metadata, skip straight to the vmalloc * path for them instead of wasting memory here.
*/ if (size > PAGE_SIZE) { if (!is_power_of_2(size)) goto fallback;
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
gfp_mask |= __GFP_NORETRY;
}
folio = folio_alloc(gfp_mask, get_order(size)); if (!folio) { if (size <= PAGE_SIZE) return -ENOMEM;
trace_xfs_buf_backing_fallback(bp, _RET_IP_); goto fallback;
}
bp->b_addr = folio_address(folio);
trace_xfs_buf_backing_folio(bp, _RET_IP_); return 0;
fallback: for (;;) {
bp->b_addr = __vmalloc(size, gfp_mask); if (bp->b_addr) break; if (flags & XBF_READ_AHEAD) return -ENOMEM;
XFS_STATS_INC(bp->b_mount, xb_page_retries);
memalloc_retry_wait(gfp_mask);
}
/* * We don't want certain flags to appear in b_flags unless they are * specifically set by later operations on the buffer.
*/
flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
/* * A new buffer is held and locked by the owner. This ensures that the * buffer is owned by the caller and racing RCU lookups right after * inserting into the hash table are safe (and will have to wait for * the unlock to do anything non-trivial).
*/
bp->b_hold = 1;
sema_init(&bp->b_sema, 0); /* held, no waiters */
/* * The key hashing in the lookup path depends on the key being the * first element of the compare_arg, make sure to assert this.
*/
BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
if (bp->b_rhash_key != map->bm_bn) return 1;
if (unlikely(bp->b_length != map->bm_len)) { /* * found a block number match. If the range doesn't * match, the only way this is allowed is if the buffer * in the cache is stale and the transaction that made * it stale has not yet committed. i.e. we are * reallocating a busy extent. Skip this buffer and * continue searching for an exact match. * * Note: If we're scanning for incore buffers to stale, don't * complain if we find non-stale buffers.
*/ if (!(map->bm_flags & XBM_LIVESCAN))
ASSERT(bp->b_flags & XBF_STALE); return 1;
} return 0;
}
/* Check for IOs smaller than the sector size / not sector aligned */
ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
/* * Corrupted block numbers can get through to here, unfortunately, so we * have to check that the buffer falls within the filesystem bounds.
*/
eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); if (map->bm_bn < 0 || map->bm_bn >= eofs) {
xfs_alert(btp->bt_mount, "%s: daddr 0x%llx out of range, EOFS 0x%llx",
__func__, map->bm_bn, eofs);
WARN_ON(1); return -EFSCORRUPTED;
} return 0;
}
/* * if the buffer is stale, clear all the external state associated with * it. We need to keep flags such as how we allocated the buffer memory * intact here.
*/ if (bp->b_flags & XBF_STALE) { if (flags & XBF_LIVESCAN) {
xfs_buf_unlock(bp); return -ENOENT;
}
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
bp->b_flags &= _XBF_KMEM;
bp->b_ops = NULL;
} return 0;
}
/* * Insert the new_bp into the hash table. This consumes the perag reference * taken for the lookup regardless of the result of the insert.
*/ staticint
xfs_buf_find_insert( struct xfs_buftarg *btp, struct xfs_buf_cache *bch, struct xfs_perag *pag, struct xfs_buf_map *cmap, struct xfs_buf_map *map, int nmaps,
xfs_buf_flags_t flags, struct xfs_buf **bpp)
{ struct xfs_buf *new_bp; struct xfs_buf *bp; int error;
/* * Assembles a buffer covering the specified range. The code is optimised for * cache hits, as metadata intensive workloads will see 3 orders of magnitude * more hits than misses.
*/ int
xfs_buf_get_map( struct xfs_buftarg *btp, struct xfs_buf_map *map, int nmaps,
xfs_buf_flags_t flags, struct xfs_buf **bpp)
{ struct xfs_buf_cache *bch; struct xfs_perag *pag; struct xfs_buf *bp = NULL; struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn }; int error; int i;
if (flags & XBF_LIVESCAN)
cmap.bm_flags |= XBM_LIVESCAN; for (i = 0; i < nmaps; i++)
cmap.bm_len += map[i].bm_len;
error = xfs_buf_map_verify(btp, &cmap); if (error) return error;
/* cache hits always outnumber misses by at least 10:1 */ if (unlikely(!bp)) {
XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
if (flags & XBF_INCORE) goto out_put_perag;
/* xfs_buf_find_insert() consumes the perag reference. */
error = xfs_buf_find_insert(btp, bch, pag, &cmap, map, nmaps,
flags, &bp); if (error) return error;
} else {
XFS_STATS_INC(btp->bt_mount, xb_get_locked); if (pag)
xfs_perag_put(pag);
}
/* * Clear b_error if this is a lookup from a caller that doesn't expect * valid data to be found in the buffer.
*/ if (!(flags & XBF_READ))
xfs_buf_ioerror(bp, 0);
/* * Reverify a buffer found in cache without an attached ->b_ops. * * If the caller passed an ops structure and the buffer doesn't have ops * assigned, set the ops and use it to verify the contents. If verification * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is * already in XBF_DONE state on entry. * * Under normal operations, every in-core buffer is verified on read I/O * completion. There are two scenarios that can lead to in-core buffers without * an assigned ->b_ops. The first is during log recovery of buffers on a V4 * filesystem, though these buffers are purged at the end of recovery. The * other is online repair, which intentionally reads with a NULL buffer ops to * run several verifiers across an in-core buffer in order to establish buffer * type. If repair can't establish that, the buffer will be left in memory * with NULL buffer ops.
*/ int
xfs_buf_reverify( struct xfs_buf *bp, conststruct xfs_buf_ops *ops)
{
ASSERT(bp->b_flags & XBF_DONE);
ASSERT(bp->b_error == 0);
if (!(bp->b_flags & XBF_DONE)) { /* Initiate the buffer read and wait. */
XFS_STATS_INC(target->bt_mount, xb_get_read);
bp->b_ops = ops;
error = _xfs_buf_read(bp);
} else { /* Buffer already read; all we need to do is check it. */
error = xfs_buf_reverify(bp, ops);
/* We do not want read in the flags */
bp->b_flags &= ~XBF_READ;
ASSERT(bp->b_ops != NULL || ops == NULL);
}
/* * If we've had a read error, then the contents of the buffer are * invalid and should not be used. To ensure that a followup read tries * to pull the buffer from disk again, we clear the XBF_DONE flag and * mark the buffer stale. This ensures that anyone who has a current * reference to the buffer will interpret it's contents correctly and * future cache lookups will also treat it as an empty, uninitialised * buffer.
*/ if (error) { /* * Check against log shutdown for error reporting because * metadata writeback may require a read first and we need to * report errors in metadata writeback until the log is shut * down. High level transaction read functions already check * against mount shutdown, anyway, so we only need to be * concerned about low level IO interactions here.
*/ if (!xlog_is_shutdown(target->bt_mount->m_log))
xfs_buf_ioerror_alert(bp, fa);
/* bad CRC means corrupted metadata */ if (error == -EFSBADCRC)
error = -EFSCORRUPTED; return error;
}
*bpp = bp; return 0;
}
/* * If we are not low on memory then do the readahead in a deadlock * safe manner.
*/ void
xfs_buf_readahead_map( struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, conststruct xfs_buf_ops *ops)
{ const xfs_buf_flags_t flags = XBF_READ | XBF_ASYNC | XBF_READ_AHEAD; struct xfs_buf *bp;
/* * Currently we don't have a good means or justification for performing * xmbuf_map_page asynchronously, so we don't do readahead.
*/ if (xfs_buftarg_is_mem(target)) return;
/* * Read an uncached buffer from disk. Allocates and returns a locked * buffer containing the disk contents or nothing. Uncached buffers always have * a cache index of XFS_BUF_DADDR_NULL so we can easily determine if the buffer * is cached or uncached during fault diagnosis.
*/ int
xfs_buf_read_uncached( struct xfs_buftarg *target,
xfs_daddr_t daddr,
size_t numblks, struct xfs_buf **bpp, conststruct xfs_buf_ops *ops)
{ struct xfs_buf *bp; int error;
*bpp = NULL;
error = xfs_buf_get_uncached(target, numblks, &bp); if (error) return error;
/* set up the buffer for a read IO */
ASSERT(bp->b_map_count == 1);
bp->b_rhash_key = XFS_BUF_DADDR_NULL;
bp->b_maps[0].bm_bn = daddr;
bp->b_flags |= XBF_READ;
bp->b_ops = ops;
/* * Increment reference count on buffer, to hold the buffer concurrently * with another thread which may release (free) the buffer asynchronously. * Must hold the buffer already to call this function.
*/ void
xfs_buf_hold( struct xfs_buf *bp)
{
trace_xfs_buf_hold(bp, _RET_IP_);
/* we are asked to drop the last reference */ if (atomic_read(&bp->b_lru_ref)) { /* * If the buffer is added to the LRU, keep the reference to the * buffer for the LRU and clear the (now stale) dispose list * state flag, else drop the reference.
*/ if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru))
bp->b_state &= ~XFS_BSTATE_DISPOSE; else
bp->b_hold--;
} else {
bp->b_hold--; /* * most of the time buffers will already be removed from the * LRU, so optimise that case by checking for the * XFS_BSTATE_DISPOSE flag indicating the last list the buffer * was on was the disposal list
*/ if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
list_lru_del_obj(&btp->bt_lru, &bp->b_lru);
} else {
ASSERT(list_empty(&bp->b_lru));
}
/* * Release a hold on the specified buffer.
*/ void
xfs_buf_rele( struct xfs_buf *bp)
{
trace_xfs_buf_rele(bp, _RET_IP_); if (xfs_buf_is_uncached(bp))
xfs_buf_rele_uncached(bp); else
xfs_buf_rele_cached(bp);
}
/* * Lock a buffer object, if it is not already locked. * * If we come across a stale, pinned, locked buffer, we know that we are * being asked to lock a buffer that has been reallocated. Because it is * pinned, we know that the log has not been pushed to disk and hence it * will still be locked. Rather than continuing to have trylock attempts * fail until someone else pushes the log, push it ourselves before * returning. This means that the xfsaild will not get stuck trying * to push on stale inode buffers.
*/ int
xfs_buf_trylock( struct xfs_buf *bp)
{ int locked;
/* * Lock a buffer object. * * If we come across a stale, pinned, locked buffer, we know that we * are being asked to lock a buffer that has been reallocated. Because * it is pinned, we know that the log has not been pushed to disk and * hence it will still be locked. Rather than sleeping until someone * else pushes the log, push it ourselves before trying to get the lock.
*/ void
xfs_buf_lock( struct xfs_buf *bp)
{
trace_xfs_buf_lock(bp, _RET_IP_);
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_mount, 0);
down(&bp->b_sema);
/* * Account for this latest trip around the retry handler, and decide if * we've failed enough times to constitute a permanent failure.
*/ staticbool
xfs_buf_ioerror_permanent( struct xfs_buf *bp, struct xfs_error_cfg *cfg)
{ struct xfs_mount *mp = bp->b_mount;
if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
++bp->b_retries > cfg->max_retries) returntrue; if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time)) returntrue;
/* At unmount we may treat errors differently */ if (xfs_is_unmounting(mp) && mp->m_fail_unmount) returntrue;
returnfalse;
}
/* * On a sync write or shutdown we just want to stale the buffer and let the * caller handle the error in bp->b_error appropriately. * * If the write was asynchronous then no one will be looking for the error. If * this is the first failure of this type, clear the error state and write the * buffer out again. This means we always retry an async write failure at least * once, but we also need to set the buffer up to behave correctly now for * repeated failures. * * If we get repeated async write failures, then we take action according to the * error configuration we have been set up to use. * * Returns true if this function took care of error handling and the caller must * not touch the buffer again. Return false if the caller should proceed with * normal I/O completion handling.
*/ staticbool
xfs_buf_ioend_handle_error( struct xfs_buf *bp)
{ struct xfs_mount *mp = bp->b_mount; struct xfs_error_cfg *cfg; struct xfs_log_item *lip;
/* * If we've already shutdown the journal because of I/O errors, there's * no point in giving this a retry.
*/ if (xlog_is_shutdown(mp->m_log)) goto out_stale;
xfs_buf_ioerror_alert_ratelimited(bp);
/* * We're not going to bother about retrying this during recovery. * One strike!
*/ if (bp->b_flags & _XBF_LOGRECOVERY) {
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); returnfalse;
}
/* * Synchronous writes will have callers process the error.
*/ if (!(bp->b_flags & XBF_ASYNC)) goto out_stale;
/* * Permanent error - we need to trigger a shutdown if we haven't already * to indicate that inconsistency will result from this action.
*/ if (xfs_buf_ioerror_permanent(bp, cfg)) {
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); goto out_stale;
}
/* Still considered a transient error. Caller will schedule retries. */
list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
set_bit(XFS_LI_FAILED, &lip->li_flags);
clear_bit(XFS_LI_FLUSHING, &lip->li_flags);
}
/* returns false if the caller needs to resubmit the I/O, else true */ staticbool
__xfs_buf_ioend( struct xfs_buf *bp)
{
trace_xfs_buf_iodone(bp, _RET_IP_);
if (bp->b_flags & XBF_READ) { if (!bp->b_error && is_vmalloc_addr(bp->b_addr))
invalidate_kernel_vmap_range(bp->b_addr,
roundup(BBTOB(bp->b_length), PAGE_SIZE)); if (!bp->b_error && bp->b_ops)
bp->b_ops->verify_read(bp); if (!bp->b_error)
bp->b_flags |= XBF_DONE; if (bp->b_flags & XBF_READ_AHEAD)
percpu_counter_dec(&bp->b_target->bt_readahead_count);
} else { if (!bp->b_error) {
bp->b_flags &= ~XBF_WRITE_FAIL;
bp->b_flags |= XBF_DONE;
}
if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp)) returnfalse;
/* clear the retry state */
bp->b_last_error = 0;
bp->b_retries = 0;
bp->b_first_retry_time = 0;
/* * Note that for things like remote attribute buffers, there may * not be a buffer log item here, so processing the buffer log * item must remain optional.
*/ if (bp->b_log_item)
xfs_buf_item_done(bp);
void
xfs_buf_ioerror_alert( struct xfs_buf *bp,
xfs_failaddr_t func)
{
xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error", "metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
func, (uint64_t)xfs_buf_daddr(bp),
bp->b_length, -bp->b_error);
}
/* * To simulate an I/O failure, the buffer must be locked and held with at least * three references. The LRU reference is dropped by the stale call. The buf * item reference is dropped via ioend processing. The third reference is owned * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC.
*/ void
xfs_buf_ioend_fail( struct xfs_buf *bp)
{
bp->b_flags &= ~XBF_DONE;
xfs_buf_stale(bp);
xfs_buf_ioerror(bp, -EIO);
xfs_buf_ioend(bp);
}
/* * If there is more than one map segment, split out a new bio for each * map except of the last one. The last map is handled by the * remainder of the original bio outside the loop.
*/
blk_start_plug(&plug); for (map = 0; map < bp->b_map_count - 1; map++) { struct bio *split;
/* * Wait for I/O completion of a sync buffer and return the I/O error code.
*/ staticint
xfs_buf_iowait( struct xfs_buf *bp)
{
ASSERT(!(bp->b_flags & XBF_ASYNC));
do {
trace_xfs_buf_iowait(bp, _RET_IP_);
wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
} while (!__xfs_buf_ioend(bp));
return bp->b_error;
}
/* * Run the write verifier callback function if it exists. If this fails, mark * the buffer with an error and do not dispatch the I/O.
*/ staticbool
xfs_buf_verify_write( struct xfs_buf *bp)
{ if (bp->b_ops) {
bp->b_ops->verify_write(bp); if (bp->b_error) returnfalse;
} elseif (bp->b_rhash_key != XFS_BUF_DADDR_NULL) { /* * Non-crc filesystems don't attach verifiers during log * recovery, so don't warn for such filesystems.
*/ if (xfs_has_crc(bp->b_mount)) {
xfs_warn(bp->b_mount, "%s: no buf ops on daddr 0x%llx len %d",
__func__, xfs_buf_daddr(bp),
bp->b_length);
xfs_hex_dump(bp->b_addr, XFS_CORRUPTION_DUMP_LEN);
dump_stack();
}
}
returntrue;
}
/* * Buffer I/O submission path, read or write. Asynchronous submission transfers * the buffer lock ownership and the current reference to the IO. It is not * safe to reference the buffer after a call to this function unless the caller * holds an additional reference itself.
*/ staticvoid
xfs_buf_submit( struct xfs_buf *bp)
{
trace_xfs_buf_submit(bp, _RET_IP_);
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
/* * On log shutdown we stale and complete the buffer immediately. We can * be called to read the superblock before the log has been set up, so * be careful checking the log state. * * Checking the mount shutdown state here can result in the log tail * moving inappropriately on disk as the log may not yet be shut down. * i.e. failing this buffer on mount shutdown can remove it from the AIL * and move the tail of the log forwards without having written this * buffer to disk. This corrupts the log tail state in memory, and * because the log may not be shut down yet, it can then be propagated * to disk before the log is shutdown. Hence we check log shutdown * state here rather than mount state to avoid corrupting the log tail * on shutdown.
*/ if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log)) {
xfs_buf_ioend_fail(bp); return;
}
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
/* * Make sure we capture only current IO errors rather than stale errors * left over from previous use of the buffer (e.g. failed readahead).
*/
bp->b_error = 0;
/* In-memory targets are directly mapped, no I/O required. */ if (xfs_buftarg_is_mem(bp->b_target)) {
xfs_buf_ioend(bp); return;
}
xfs_buf_submit_bio(bp);
}
/* * Log a message about and stale a buffer that a caller has decided is corrupt. * * This function should be called for the kinds of metadata corruption that * cannot be detect from a verifier, such as incorrect inter-block relationship * data. Do /not/ call this function from a verifier function. * * The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will * be marked stale, but b_error will not be set. The caller is responsible for * releasing the buffer or fixing it.
*/ void
__xfs_buf_mark_corrupt( struct xfs_buf *bp,
xfs_failaddr_t fa)
{
ASSERT(bp->b_flags & XBF_DONE);
/* * Wait for any bufs with callbacks that have been submitted but have not yet * returned. These buffers will have an elevated hold count, so wait on those * while freeing all the buffers only held by the LRU.
*/ staticenum lru_status
xfs_buftarg_drain_rele( struct list_head *item, struct list_lru_one *lru, void *arg)
if (!spin_trylock(&bp->b_lock)) return LRU_SKIP; if (bp->b_hold > 1) { /* need to wait, so skip it this pass */
spin_unlock(&bp->b_lock);
trace_xfs_buf_drain_buftarg(bp, _RET_IP_); return LRU_SKIP;
}
/* * clear the LRU reference count so the buffer doesn't get * ignored in xfs_buf_rele().
*/
atomic_set(&bp->b_lru_ref, 0);
bp->b_state |= XFS_BSTATE_DISPOSE;
list_lru_isolate_move(lru, item, dispose);
spin_unlock(&bp->b_lock); return LRU_REMOVED;
}
/* * Wait for outstanding I/O on the buftarg to complete.
*/ void
xfs_buftarg_wait( struct xfs_buftarg *btp)
{ /* * First wait for all in-flight readahead buffers to be released. This is * critical as new buffers do not make the LRU until they are released. * * Next, flush the buffer workqueue to ensure all completion processing * has finished. Just waiting on buffer locks is not sufficient for * async IO as the reference count held over IO is not released until * after the buffer lock is dropped. Hence we need to ensure here that * all reference counts have been dropped before we start walking the * LRU list.
*/ while (percpu_counter_sum(&btp->bt_readahead_count))
delay(100);
flush_workqueue(btp->bt_mount->m_buf_workqueue);
}
/* loop until there is nothing left on the lru list. */ while (list_lru_count(&btp->bt_lru)) {
list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
&dispose, LONG_MAX);
while (!list_empty(&dispose)) { struct xfs_buf *bp;
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru); if (bp->b_flags & XBF_WRITE_FAIL) {
write_fail = true;
xfs_buf_alert_ratelimited(bp, "XFS: Corruption Alert", "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
(longlong)xfs_buf_daddr(bp));
}
xfs_buf_rele(bp);
} if (loop++ != 0)
delay(100);
}
/* * If one or more failed buffers were freed, that means dirty metadata * was thrown away. This should only ever happen after I/O completion * handling has elevated I/O error(s) to permanent failures and shuts * down the journal.
*/ if (write_fail) {
ASSERT(xlog_is_shutdown(btp->bt_mount->m_log));
xfs_alert(btp->bt_mount, "Please run xfs_repair to determine the extent of the problem.");
}
}
/* * we are inverting the lru lock/bp->b_lock here, so use a trylock. * If we fail to get the lock, just skip it.
*/ if (!spin_trylock(&bp->b_lock)) return LRU_SKIP; /* * Decrement the b_lru_ref count unless the value is already * zero. If the value is already zero, we need to reclaim the * buffer, otherwise it gets another trip through the LRU.
*/ if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
spin_unlock(&bp->b_lock); return LRU_ROTATE;
}
/* Configure a buffer target that abstracts a block device. */ int
xfs_configure_buftarg( struct xfs_buftarg *btp, unsignedint sectorsize)
{ int error;
ASSERT(btp->bt_bdev != NULL);
/* Set up metadata sector size info */
btp->bt_meta_sectorsize = sectorsize;
btp->bt_meta_sectormask = sectorsize - 1;
error = bdev_validate_blocksize(btp->bt_bdev, sectorsize); if (error) {
xfs_warn(btp->bt_mount, "Cannot use blocksize %u on device %pg, err %d",
sectorsize, btp->bt_bdev, error); return -EINVAL;
}
if (bdev_can_atomic_write(btp->bt_bdev))
xfs_configure_buftarg_atomic_writes(btp); return 0;
}
int
xfs_init_buftarg( struct xfs_buftarg *btp,
size_t logical_sectorsize, constchar *descr)
{ /* Set up device logical sector size mask */
btp->bt_logical_sectorsize = logical_sectorsize;
btp->bt_logical_sectormask = logical_sectorsize - 1;
/* * Buffer IO error rate limiting. Limit it to no more than 10 messages * per 30 seconds so as to not spam logs too much on repeated errors.
*/
ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
DEFAULT_RATELIMIT_BURST);
if (list_lru_init(&btp->bt_lru)) return -ENOMEM; if (percpu_counter_init(&btp->bt_readahead_count, 0, GFP_KERNEL)) goto out_destroy_lru;
/* * Flush and invalidate all devices' pagecaches before reading any * metadata because XFS doesn't use the bdev pagecache.
*/
error = sync_blockdev(btp->bt_bdev); if (error) goto error_free;
/* * When allocating the buftargs we have not yet read the super block and * thus don't know the file system sector size yet.
*/
btp->bt_meta_sectorsize = bdev_logical_block_size(btp->bt_bdev);
btp->bt_meta_sectormask = btp->bt_meta_sectorsize - 1;
error = xfs_init_buftarg(btp, btp->bt_meta_sectorsize,
mp->m_super->s_id); if (error) goto error_free;
/* * Cancel a delayed write list. * * Remove each buffer from the list, clear the delwri queue flag and drop the * associated buffer reference.
*/ void
xfs_buf_delwri_cancel( struct list_head *list)
{ struct xfs_buf *bp;
while (!list_empty(list)) {
bp = list_first_entry(list, struct xfs_buf, b_list);
/* * Add a buffer to the delayed write list. * * This queues a buffer for writeout if it hasn't already been. Note that * neither this routine nor the buffer list submission functions perform * any internal synchronization. It is expected that the lists are thread-local * to the callers. * * Returns true if we queued up the buffer, or false if it already had * been on the buffer list.
*/ bool
xfs_buf_delwri_queue( struct xfs_buf *bp, struct list_head *list)
{
ASSERT(xfs_buf_islocked(bp));
ASSERT(!(bp->b_flags & XBF_READ));
/* * If the buffer is already marked delwri it already is queued up * by someone else for imediate writeout. Just ignore it in that * case.
*/ if (bp->b_flags & _XBF_DELWRI_Q) {
trace_xfs_buf_delwri_queued(bp, _RET_IP_); returnfalse;
}
trace_xfs_buf_delwri_queue(bp, _RET_IP_);
/* * If a buffer gets written out synchronously or marked stale while it * is on a delwri list we lazily remove it. To do this, the other party * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone. * It remains referenced and on the list. In a rare corner case it * might get readded to a delwri list after the synchronous writeout, in * which case we need just need to re-add the flag here.
*/
bp->b_flags |= _XBF_DELWRI_Q; if (list_empty(&bp->b_list)) {
xfs_buf_hold(bp);
list_add_tail(&bp->b_list, list);
}
returntrue;
}
/* * Queue a buffer to this delwri list as part of a data integrity operation. * If the buffer is on any other delwri list, we'll wait for that to clear * so that the caller can submit the buffer for IO and wait for the result. * Callers must ensure the buffer is not already on the list.
*/ void
xfs_buf_delwri_queue_here( struct xfs_buf *bp, struct list_head *buffer_list)
{ /* * We need this buffer to end up on the /caller's/ delwri list, not any * old list. This can happen if the buffer is marked stale (which * clears DELWRI_Q) after the AIL queues the buffer to its list but * before the AIL has a chance to submit the list.
*/ while (!list_empty(&bp->b_list)) {
xfs_buf_unlock(bp);
wait_var_event(&bp->b_list, list_empty(&bp->b_list));
xfs_buf_lock(bp);
}
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
xfs_buf_delwri_queue(bp, buffer_list);
}
/* * Compare function is more complex than it needs to be because * the return value is only 32 bits and we are doing comparisons * on 64 bit values
*/ staticint
xfs_buf_cmp( void *priv, conststruct list_head *a, conststruct list_head *b)
{ struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
xfs_daddr_t diff;
diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; if (diff < 0) return -1; if (diff > 0) return 1; return 0;
}
staticbool
xfs_buf_delwri_submit_prep( struct xfs_buf *bp)
{ /* * Someone else might have written the buffer synchronously or marked it * stale in the meantime. In that case only the _XBF_DELWRI_Q flag got * cleared, and we have to drop the reference and remove it from the * list here.
*/ if (!(bp->b_flags & _XBF_DELWRI_Q)) {
xfs_buf_list_del(bp);
xfs_buf_relse(bp); returnfalse;
}
/* * Write out a buffer list asynchronously. * * This will take the @buffer_list, write all non-locked and non-pinned buffers * out and not wait for I/O completion on any of the buffers. This interface * is only safely useable for callers that can track I/O completion by higher * level means, e.g. AIL pushing as the @buffer_list is consumed in this * function. * * Note: this function will skip buffers it would block on, and in doing so * leaves them on @buffer_list so they can be retried on a later pass. As such, * it is up to the caller to ensure that the buffer list is fully submitted or * cancelled appropriately when they are finished with the list. Failure to * cancel or resubmit the list until it is empty will result in leaked buffers * at unmount time.
*/ int
xfs_buf_delwri_submit_nowait( struct list_head *buffer_list)
{ struct xfs_buf *bp, *n; int pinned = 0; struct blk_plug plug;
list_sort(NULL, buffer_list, xfs_buf_cmp);
blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, buffer_list, b_list) { if (!xfs_buf_trylock(bp)) continue; if (xfs_buf_ispinned(bp)) {
xfs_buf_unlock(bp);
pinned++; continue;
} if (!xfs_buf_delwri_submit_prep(bp)) continue;
bp->b_flags |= XBF_ASYNC;
xfs_buf_list_del(bp);
xfs_buf_submit(bp);
}
blk_finish_plug(&plug);
return pinned;
}
/* * Write out a buffer list synchronously. * * This will take the @buffer_list, write all buffers out and wait for I/O * completion on all of the buffers. @buffer_list is consumed by the function, * so callers must have some other way of tracking buffers if they require such * functionality.
*/ int
xfs_buf_delwri_submit( struct list_head *buffer_list)
{
LIST_HEAD (wait_list); int error = 0, error2; struct xfs_buf *bp, *n; struct blk_plug plug;
list_sort(NULL, buffer_list, xfs_buf_cmp);
blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, buffer_list, b_list) {
xfs_buf_lock(bp); if (!xfs_buf_delwri_submit_prep(bp)) continue;
bp->b_flags &= ~XBF_ASYNC;
list_move_tail(&bp->b_list, &wait_list);
xfs_buf_submit(bp);
}
blk_finish_plug(&plug);
/* Wait for IO to complete. */ while (!list_empty(&wait_list)) {
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
xfs_buf_list_del(bp);
/* * Wait on the locked buffer, check for errors and unlock and * release the delwri queue reference.
*/
error2 = xfs_buf_iowait(bp);
xfs_buf_relse(bp); if (!error)
error = error2;
}
return error;
}
void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
{ /* * Set the lru reference count to 0 based on the error injection tag. * This allows userspace to disrupt buffer caching for debug/testing * purposes.
*/ if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
lru_ref = 0;
atomic_set(&bp->b_lru_ref, lru_ref);
}
/* * Verify an on-disk magic value against the magic value specified in the * verifier structure. The verifier magic is in disk byte order so the caller is * expected to pass the value directly from disk.
*/ bool
xfs_verify_magic( struct xfs_buf *bp,
__be32 dmagic)
{ struct xfs_mount *mp = bp->b_mount; int idx;
idx = xfs_has_crc(mp); if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])) returnfalse; return dmagic == bp->b_ops->magic[idx];
} /* * Verify an on-disk magic value against the magic value specified in the * verifier structure. The verifier magic is in disk byte order so the caller is * expected to pass the value directly from disk.
*/ bool
xfs_verify_magic16( struct xfs_buf *bp,
__be16 dmagic)
{ struct xfs_mount *mp = bp->b_mount; int idx;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.