// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2023-2024 Oracle. All Rights Reserved. * Author: Darrick J. Wong <djwong@kernel.org>
*/ #include"xfs.h" #include"xfs_fs.h" #include"xfs_buf.h" #include"xfs_buf_mem.h" #include"xfs_trace.h" #include <linux/shmem_fs.h> #include"xfs_log_format.h" #include"xfs_trans.h" #include"xfs_buf_item.h" #include"xfs_error.h"
/* * Buffer Cache for In-Memory Files * ================================ * * Online fsck wants to create ephemeral ordered recordsets. The existing * btree infrastructure can do this, but we need the buffer cache to target * memory instead of block devices. * * When CONFIG_TMPFS=y, shmemfs is enough of a filesystem to meet those * requirements. Therefore, the xmbuf mechanism uses an unlinked shmem file to * store our staging data. This file is not installed in the file descriptor * table so that user programs cannot access the data, which means that the * xmbuf must be freed with xmbuf_destroy. * * xmbufs assume that the caller will handle all required concurrency * management; standard vfs locks (freezer and inode) are not taken. Reads * and writes are satisfied directly from the page cache. * * The only supported block size is PAGE_SIZE, and we cannot use highmem.
*/
/* * shmem files used to back an in-memory buffer cache must not be exposed to * userspace. Upper layers must coordinate access to the one handle returned * by the constructor, so establish a separate lock class for xmbufs to avoid * confusing lockdep.
*/ staticstruct lock_class_key xmbuf_i_mutex_key;
/* * Allocate a buffer cache target for a memory-backed file and set up the * buffer target.
*/ int
xmbuf_alloc( struct xfs_mount *mp, constchar *descr, struct xfs_buftarg **btpp)
{ struct file *file; struct inode *inode; struct xfs_buftarg *btp; int error;
btp = kzalloc(struct_size(btp, bt_cache, 1), GFP_KERNEL); if (!btp) return -ENOMEM;
/* * We don't want to bother with kmapping data during repair, so don't * allow highmem folios to back this mapping.
*/
mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
/* ensure all writes are below EOF to avoid pagecache zeroing */
i_size_write(inode, inode->i_sb->s_maxbytes);
error = xfs_buf_cache_init(btp->bt_cache); if (error) goto out_file;
if (filemap_check_wb_err(inode->i_mapping, 0)) {
folio_unlock(folio);
folio_put(folio); return -EIO;
}
/* * Mark the folio dirty so that it won't be reclaimed once we drop the * (potentially last) reference in xfs_buf_free.
*/
folio_set_dirty(folio);
folio_unlock(folio);
/* Is this a valid daddr within the buftarg? */ bool
xmbuf_verify_daddr( struct xfs_buftarg *btp,
xfs_daddr_t daddr)
{ struct inode *inode = file_inode(btp->bt_file);
/* * Finalize a buffer -- discard the backing folio if it's stale, or run the * write verifier to detect problems.
*/ int
xmbuf_finalize( struct xfs_buf *bp)
{
xfs_failaddr_t fa; int error = 0;
if (bp->b_flags & XBF_STALE) {
xmbuf_stale(bp); return 0;
}
/* * Although this btree is ephemeral, validate the buffer structure so * that we can detect memory corruption errors and software bugs.
*/
fa = bp->b_ops->verify_struct(bp); if (fa) {
error = -EFSCORRUPTED;
xfs_verifier_error(bp, error, fa);
}
return error;
}
/* * Detach this xmbuf buffer from the transaction by any means necessary. * All buffers are direct-mapped, so they do not need bwrite.
*/ void
xmbuf_trans_bdetach( struct xfs_trans *tp, struct xfs_buf *bp)
{ struct xfs_buf_log_item *bli = bp->b_log_item;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.