// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong <djwong@kernel.org>
*/ #include"xfs.h" #include"xfs_fs.h" #include"xfs_shared.h" #include"xfs_format.h" #include"xfs_log_format.h" #include"xfs_trans_resv.h" #include"xfs_mount.h" #include"scrub/scrub.h" #include"scrub/xfile.h" #include"scrub/xfarray.h" #include"scrub/trace.h" #include <linux/shmem_fs.h>
/* * Swappable Temporary Memory * ========================== * * Online checking sometimes needs to be able to stage a large amount of data * in memory. This information might not fit in the available memory and it * doesn't all need to be accessible at all times. In other words, we want an * indexed data buffer to store data that can be paged out. * * When CONFIG_TMPFS=y, shmemfs is enough of a filesystem to meet those * requirements. Therefore, the xfile mechanism uses an unlinked shmem file to * store our staging data. This file is not installed in the file descriptor * table so that user programs cannot access the data, which means that the * xfile must be freed with xfile_destroy. * * xfiles assume that the caller will handle all required concurrency * management; standard vfs locks (freezer and inode) are not taken. Reads * and writes are satisfied directly from the page cache.
*/
/* * xfiles must not be exposed to userspace and require upper layers to * coordinate access to the one handle returned by the constructor, so * establish a separate lock class for xfiles to avoid confusing lockdep.
*/ staticstruct lock_class_key xfile_i_mutex_key;
/* * Create an xfile of the given size. The description will be used in the * trace output.
*/ int
xfile_create( constchar *description,
loff_t isize, struct xfile **xfilep)
{ struct inode *inode; struct xfile *xf; int error;
xf = kmalloc(sizeof(struct xfile), XCHK_GFP_FLAGS); if (!xf) return -ENOMEM;
/* * We don't want to bother with kmapping data during repair, so don't * allow highmem pages to back this mapping.
*/
mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
/* * Load an object. Since we're treating this file as "memory", any error or * short IO is treated as a failure to allocate memory.
*/ int
xfile_load( struct xfile *xf, void *buf,
size_t count,
loff_t pos)
{ struct inode *inode = file_inode(xf->file); unsignedint pflags;
if (count > MAX_RW_COUNT) return -ENOMEM; if (inode->i_sb->s_maxbytes - pos < count) return -ENOMEM;
if (shmem_get_folio(inode, pos >> PAGE_SHIFT, 0, &folio,
SGP_READ) < 0) break; if (!folio) { /* * No data stored at this offset, just zero the output * buffer until the next page boundary.
*/
len = min_t(ssize_t, count,
PAGE_SIZE - offset_in_page(pos));
memset(buf, 0, len);
} else { if (filemap_check_wb_err(inode->i_mapping, 0)) {
folio_unlock(folio);
folio_put(folio); break;
}
/* * Store an object. Since we're treating this file as "memory", any error or * short IO is treated as a failure to allocate memory.
*/ int
xfile_store( struct xfile *xf, constvoid *buf,
size_t count,
loff_t pos)
{ struct inode *inode = file_inode(xf->file); unsignedint pflags;
if (count > MAX_RW_COUNT) return -ENOMEM; if (inode->i_sb->s_maxbytes - pos < count) return -ENOMEM;
trace_xfile_store(xf, pos, count);
/* * Increase the file size first so that shmem_get_folio(..., SGP_CACHE), * actually allocates a folio instead of erroring out.
*/ if (pos + count > i_size_read(inode))
i_size_write(inode, pos + count);
/* * Grab the (locked) folio for a memory object. The object cannot span a folio * boundary. Returns the locked folio if successful, NULL if there was no * folio or it didn't cover the range requested, or an ERR_PTR on failure.
*/ struct folio *
xfile_get_folio( struct xfile *xf,
loff_t pos,
size_t len, unsignedint flags)
{ struct inode *inode = file_inode(xf->file); struct folio *folio = NULL; unsignedint pflags; int error;
if (inode->i_sb->s_maxbytes - pos < len) return ERR_PTR(-ENOMEM);
trace_xfile_get_folio(xf, pos, len);
/* * Increase the file size first so that shmem_get_folio(..., SGP_CACHE), * actually allocates a folio instead of erroring out.
*/ if ((flags & XFILE_ALLOC) && pos + len > i_size_read(inode))
i_size_write(inode, pos + len);
if (filemap_check_wb_err(inode->i_mapping, 0)) {
folio_unlock(folio);
folio_put(folio); return ERR_PTR(-EIO);
}
/* * Mark the folio dirty so that it won't be reclaimed once we drop the * (potentially last) reference in xfile_put_folio.
*/ if (flags & XFILE_ALLOC)
folio_mark_dirty(folio); return folio;
}
/* * Release the (locked) folio for a memory object.
*/ void
xfile_put_folio( struct xfile *xf, struct folio *folio)
{
trace_xfile_put_folio(xf, folio_pos(folio), folio_size(folio));
folio_unlock(folio);
folio_put(folio);
}
/* Discard the page cache that's backing a range of the xfile. */ void
xfile_discard( struct xfile *xf,
loff_t pos,
u64 count)
{
trace_xfile_discard(xf, pos, count);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.