/* * Decide if the given file range is aligned to the size of the fundamental * allocation unit for the file.
*/ bool
xfs_is_falloc_aligned( struct xfs_inode *ip,
loff_t pos, longlongint len)
{ unsignedint alloc_unit = xfs_inode_alloc_unitsize(ip);
if (!is_power_of_2(alloc_unit)) return isaligned_64(pos, alloc_unit) &&
isaligned_64(len, alloc_unit);
return !((pos | len) & (alloc_unit - 1));
}
/* * Fsync operations on directories are much simpler than on regular files, * as there is no file data to flush, and thus also no need for explicit * cache flush operations, and there are no non-transaction metadata updates * on directories either.
*/ STATICint
xfs_dir_fsync( struct file *file,
loff_t start,
loff_t end, int datasync)
{ struct xfs_inode *ip = XFS_I(file->f_mapping->host);
/* * All metadata updates are logged, which means that we just have to flush the * log up to the latest LSN that touched the inode. * * If we have concurrent fsync/fdatasync() calls, we need them to all block on * the log force before we clear the ili_fsync_fields field. This ensures that * we don't get a racing sync operation that does not wait for the metadata to * hit the journal before returning. If we race with clearing ili_fsync_fields, * then all that will happen is the log force will do nothing as the lsn will * already be on disk. We can't race with setting ili_fsync_fields because that * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock * shared until after the ili_fsync_fields is cleared.
*/ staticint
xfs_fsync_flush_log( struct xfs_inode *ip, bool datasync, int *log_flushed)
{ int error = 0;
xfs_csn_t seq;
STATICint
xfs_file_fsync( struct file *file,
loff_t start,
loff_t end, int datasync)
{ struct xfs_inode *ip = XFS_I(file->f_mapping->host); struct xfs_mount *mp = ip->i_mount; int error, err2; int log_flushed = 0;
trace_xfs_file_fsync(ip);
error = file_write_and_wait_range(file, start, end); if (error) return error;
if (xfs_is_shutdown(mp)) return -EIO;
xfs_iflags_clear(ip, XFS_ITRUNCATED);
/* * If we have an RT and/or log subvolume we need to make sure to flush * the write cache the device used for file data first. This is to * ensure newly written file data make it to disk before logging the new * inode size in case of an extending write.
*/ if (XFS_IS_REALTIME_INODE(ip) && mp->m_rtdev_targp != mp->m_ddev_targp)
error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev); elseif (mp->m_logdev_targp != mp->m_ddev_targp)
error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
/* * Any inode that has dirty modifications in the log is pinned. The * racy check here for a pinned inode will not catch modifications * that happen concurrently to the fsync call, but fsync semantics * only require to sync previously completed I/O.
*/ if (xfs_ipincount(ip)) {
err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed); if (err2 && !error)
error = err2;
}
/* * If we only have a single device, and the log force about was * a no-op we might have to flush the data device cache here. * This can only happen for fdatasync/O_DSYNC if we were overwriting * an already allocated file and thus do not have any metadata to * commit.
*/ if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
mp->m_logdev_targp == mp->m_ddev_targp) {
err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); if (err2 && !error)
error = err2;
}
ret = xfs_ilock_iocb(iocb, *lock_mode); if (ret) return ret;
/* * If a reflink remap is in progress we always need to take the iolock * exclusively to wait for it to finish.
*/ if (*lock_mode == XFS_IOLOCK_SHARED &&
xfs_iflags_test(ip, XFS_IREMAPPING)) {
xfs_iunlock(ip, *lock_mode);
*lock_mode = XFS_IOLOCK_EXCL; return xfs_ilock_iocb(iocb, *lock_mode);
}
if (!iov_iter_count(to)) return 0; /* skip atime */
file_accessed(iocb->ki_filp);
ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED); if (ret) return ret;
ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
if (!iov_iter_count(to)) return 0; /* skip atime */
ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED); if (ret) return ret;
ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
if (IS_DAX(inode))
ret = xfs_file_dax_read(iocb, to); elseif (iocb->ki_flags & IOCB_DIRECT)
ret = xfs_file_dio_read(iocb, to); else
ret = xfs_file_buffered_read(iocb, to);
if (ret > 0)
XFS_STATS_ADD(mp, xs_read_bytes, ret); return ret;
}
/* * Take care of zeroing post-EOF blocks when they might exist. * * Returns 0 if successfully, a negative error for a failure, or 1 if this * function dropped the iolock and reacquired it exclusively and the caller * needs to restart the write sanity checks.
*/ static ssize_t
xfs_file_write_zero_eof( struct kiocb *iocb, struct iov_iter *from, unsignedint *iolock,
size_t count, bool *drained_dio, struct xfs_zone_alloc_ctx *ac)
{ struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
loff_t isize; int error;
/* * We need to serialise against EOF updates that occur in IO completions * here. We want to make sure that nobody is changing the size while * we do this check until we have placed an IO barrier (i.e. hold * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched. The * spinlock effectively forms a memory barrier once we have * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and * hence be able to correctly determine if we need to run zeroing.
*/
spin_lock(&ip->i_flags_lock);
isize = i_size_read(VFS_I(ip)); if (iocb->ki_pos <= isize) {
spin_unlock(&ip->i_flags_lock); return 0;
}
spin_unlock(&ip->i_flags_lock);
if (iocb->ki_flags & IOCB_NOWAIT) return -EAGAIN;
if (!*drained_dio) { /* * If zeroing is needed and we are currently holding the iolock * shared, we need to update it to exclusive which implies * having to redo all checks before.
*/ if (*iolock == XFS_IOLOCK_SHARED) {
xfs_iunlock(ip, *iolock);
*iolock = XFS_IOLOCK_EXCL;
xfs_ilock(ip, *iolock);
iov_iter_reexpand(from, count);
}
/* * We now have an IO submission barrier in place, but AIO can do * EOF updates during IO completion and hence we now need to * wait for all of them to drain. Non-AIO DIO will have drained * before we are given the XFS_IOLOCK_EXCL, and so for most * cases this wait is a no-op.
*/
inode_dio_wait(VFS_I(ip));
*drained_dio = true; return 1;
}
/* * Common pre-write limit and setup checks. * * Called with the iolock held either shared and exclusive according to * @iolock, and returns with it held. Might upgrade the iolock to exclusive * if called for a direct write beyond i_size.
*/ STATIC ssize_t
xfs_file_write_checks( struct kiocb *iocb, struct iov_iter *from, unsignedint *iolock, struct xfs_zone_alloc_ctx *ac)
{ struct inode *inode = iocb->ki_filp->f_mapping->host;
size_t count = iov_iter_count(from); bool drained_dio = false;
ssize_t error;
/* * For changing security info in file_remove_privs() we need i_rwsem * exclusively.
*/ if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
xfs_iunlock(XFS_I(inode), *iolock);
*iolock = XFS_IOLOCK_EXCL;
error = xfs_ilock_iocb(iocb, *iolock); if (error) {
*iolock = 0; return error;
} goto restart;
}
/* * If the offset is beyond the size of the file, we need to zero all * blocks that fall between the existing EOF and the start of this * write. * * We can do an unlocked check for i_size here safely as I/O completion * can only extend EOF. Truncate is locked out at this point, so the * EOF can not move backwards, only forwards. Hence we only need to take * the slow path when we are at or beyond the current EOF.
*/ if (iocb->ki_pos > i_size_read(inode)) {
error = xfs_file_write_zero_eof(iocb, from, iolock, count,
&drained_dio, ac); if (error == 1) goto restart; if (error) return error;
}
if (iocb->ki_flags & IOCB_NOWAIT)
flags |= XFS_ZR_NOWAIT;
/* * Check the rlimit and LFS boundary first so that we don't over-reserve * by possibly a lot. * * The generic write path will redo this check later, and it might have * changed by then. If it got expanded we'll stick to our earlier * smaller limit, and if it is decreased the new smaller limit will be * used and our extra space reservation will be returned after finishing * the write.
*/
error = generic_write_check_limits(iocb->ki_filp, iocb->ki_pos, &count); if (error) return error;
/* * Sloppily round up count to file system blocks. * * This will often reserve an extra block, but that avoids having to look * at the start offset, which isn't stable for O_APPEND until taking the * iolock. Also we need to reserve a block each for zeroing the old * EOF block and the new start block if they are unaligned. * * Any remaining block will be returned after the write.
*/ return xfs_zoned_space_reserve(mp, XFS_B_TO_FSB(mp, count) + 1 + 2,
flags, ac);
}
/* * Capture amount written on completion as we can't reliably account * for it on submission.
*/
XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
/* * We can allocate memory here while doing writeback on behalf of * memory reclaim. To avoid memory allocation deadlocks set the * task-wide nofs context for the following operations.
*/
nofs_flag = memalloc_nofs_save();
if (flags & IOMAP_DIO_COW) { if (iocb->ki_flags & IOCB_ATOMIC)
error = xfs_reflink_end_atomic_cow(ip, offset, size); else
error = xfs_reflink_end_cow(ip, offset, size); if (error) goto out;
}
/* * Unwritten conversion updates the in-core isize after extent * conversion but before updating the on-disk size. Updating isize any * earlier allows a racing dio read to find unwritten extents before * they are converted.
*/ if (flags & IOMAP_DIO_UNWRITTEN) {
error = xfs_iomap_write_unwritten(ip, offset, size, true); goto out;
}
/* * We need to update the in-core inode size here so that we don't end up * with the on-disk inode size being outside the in-core inode size. We * have no other method of updating EOF for AIO, so always do it here * if necessary. * * We need to lock the test/set EOF update as we can be racing with * other IO completions here to update the EOF. Failing to serialise * here can result in EOF moving backwards and Bad Things Happen when * that occurs. * * As IO completion only ever extends EOF, we can do an unlocked check * here to avoid taking the spinlock. If we land within the current EOF, * then we do not need to do an extending update at all, and we don't * need to take the lock to check this. If we race with an update moving * EOF, then we'll either still be beyond EOF and need to take the lock, * or we'll be within EOF and we don't need to take it at all.
*/ if (offset + size <= i_size_read(inode)) goto out;
ret = xfs_ilock_iocb_for_write(iocb, &iolock); if (ret) return ret;
ret = xfs_file_write_checks(iocb, from, &iolock, ac); if (ret) goto out_unlock;
/* * We don't need to hold the IOLOCK exclusively across the IO, so demote * the iolock back to shared if we had to take the exclusive lock in * xfs_file_write_checks() for other reasons.
*/ if (iolock == XFS_IOLOCK_EXCL) {
xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
iolock = XFS_IOLOCK_SHARED;
}
trace_xfs_file_direct_write(iocb, from);
ret = iomap_dio_rw(iocb, from, ops, dops, 0, ac, 0);
out_unlock:
xfs_iunlock(ip, iolock); return ret;
}
ret = xfs_zoned_write_space_reserve(ip->i_mount, iocb, from, 0, &ac); if (ret < 0) return ret;
ret = xfs_file_dio_write_aligned(ip, iocb, from,
&xfs_zoned_direct_write_iomap_ops,
&xfs_dio_zoned_write_ops, &ac);
xfs_zoned_space_unreserve(ip->i_mount, &ac); return ret;
}
/* * Handle block atomic writes * * Two methods of atomic writes are supported: * - REQ_ATOMIC-based, which would typically use some form of HW offload in the * disk * - COW-based, which uses a COW fork as a staging extent for data updates * before atomically updating extent mappings for the range being written *
*/ static noinline ssize_t
xfs_file_dio_write_atomic( struct xfs_inode *ip, struct kiocb *iocb, struct iov_iter *from)
{ unsignedint iolock = XFS_IOLOCK_SHARED;
ssize_t ret, ocount = iov_iter_count(from); conststruct iomap_ops *dops;
/* * HW offload should be faster, so try that first if it is already * known that the write length is not too large.
*/ if (ocount > xfs_inode_buftarg(ip)->bt_awu_max)
dops = &xfs_atomic_write_cow_iomap_ops; else
dops = &xfs_direct_write_iomap_ops;
retry:
ret = xfs_ilock_iocb_for_write(iocb, &iolock); if (ret) return ret;
ret = xfs_file_write_checks(iocb, from, &iolock, NULL); if (ret) goto out_unlock;
/* Demote similar to xfs_file_dio_write_aligned() */ if (iolock == XFS_IOLOCK_EXCL) {
xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
iolock = XFS_IOLOCK_SHARED;
}
/* * The retry mechanism is based on the ->iomap_begin method returning * -ENOPROTOOPT, which would be when the REQ_ATOMIC-based write is not * possible. The REQ_ATOMIC-based method typically not be possible if * the write spans multiple extents or the disk blocks are misaligned.
*/ if (ret == -ENOPROTOOPT && dops == &xfs_direct_write_iomap_ops) {
xfs_iunlock(ip, iolock);
dops = &xfs_atomic_write_cow_iomap_ops; goto retry;
}
out_unlock: if (iolock)
xfs_iunlock(ip, iolock); return ret;
}
/* * Handle block unaligned direct I/O writes * * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing * them to be done in parallel with reads and other direct I/O writes. However, * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need * to do sub-block zeroing and that requires serialisation against other direct * I/O to the same block. In this case we need to serialise the submission of * the unaligned I/O so that we don't get racing block zeroing in the dio layer. * In the case where sub-block zeroing is not required, we can do concurrent * sub-block dios to the same block successfully. * * Optimistically submit the I/O using the shared lock first, but use the * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN * if block allocation or partial block zeroing would be required. In that case * we try again with the exclusive lock.
*/ static noinline ssize_t
xfs_file_dio_write_unaligned( struct xfs_inode *ip, struct kiocb *iocb, struct iov_iter *from)
{
size_t isize = i_size_read(VFS_I(ip));
size_t count = iov_iter_count(from); unsignedint iolock = XFS_IOLOCK_SHARED; unsignedint flags = IOMAP_DIO_OVERWRITE_ONLY;
ssize_t ret;
/* * Extending writes need exclusivity because of the sub-block zeroing * that the DIO code always does for partial tail blocks beyond EOF, so * don't even bother trying the fast path in this case.
*/ if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) { if (iocb->ki_flags & IOCB_NOWAIT) return -EAGAIN;
retry_exclusive:
iolock = XFS_IOLOCK_EXCL;
flags = IOMAP_DIO_FORCE_WAIT;
}
ret = xfs_ilock_iocb_for_write(iocb, &iolock); if (ret) return ret;
/* * We can't properly handle unaligned direct I/O to reflink files yet, * as we can't unshare a partial block.
*/ if (xfs_is_cow_inode(ip)) {
trace_xfs_reflink_bounce_dio_write(iocb, from);
ret = -ENOTBLK; goto out_unlock;
}
ret = xfs_file_write_checks(iocb, from, &iolock, NULL); if (ret) goto out_unlock;
/* * If we are doing exclusive unaligned I/O, this must be the only I/O * in-flight. Otherwise we risk data corruption due to unwritten extent * conversions from the AIO end_io handler. Wait for all other I/O to * drain first.
*/ if (flags & IOMAP_DIO_FORCE_WAIT)
inode_dio_wait(VFS_I(ip));
/* * Retry unaligned I/O with exclusive blocking semantics if the DIO * layer rejected it for mapping or locking reasons. If we are doing * nonblocking user I/O, propagate the error.
*/ if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
xfs_iunlock(ip, iolock); goto retry_exclusive;
}
out_unlock: if (iolock)
xfs_iunlock(ip, iolock); return ret;
}
/* direct I/O must be aligned to device logical sector size */ if ((iocb->ki_pos | count) & target->bt_logical_sectormask) return -EINVAL;
/* * For always COW inodes we also must check the alignment of each * individual iovec segment, as they could end up with different * I/Os due to the way bio_iov_iter_get_pages works, and we'd * then overwrite an already written block.
*/ if (((iocb->ki_pos | count) & ip->i_mount->m_blockmask) ||
(xfs_is_always_cow_inode(ip) &&
(iov_iter_alignment(from) & ip->i_mount->m_blockmask))) return xfs_file_dio_write_unaligned(ip, iocb, from); if (xfs_is_zoned_inode(ip)) return xfs_file_dio_write_zoned(ip, iocb, from); if (iocb->ki_flags & IOCB_ATOMIC) return xfs_file_dio_write_atomic(ip, iocb, from); return xfs_file_dio_write_aligned(ip, iocb, from,
&xfs_direct_write_iomap_ops, &xfs_dio_write_ops, NULL);
}
write_retry:
iolock = XFS_IOLOCK_EXCL;
ret = xfs_ilock_iocb(iocb, iolock); if (ret) return ret;
ret = xfs_file_write_checks(iocb, from, &iolock, NULL); if (ret) goto out;
trace_xfs_file_buffered_write(iocb, from);
ret = iomap_file_buffered_write(iocb, from,
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
NULL);
/* * If we hit a space limit, try to free up some lingering preallocated * space before returning an error. In the case of ENOSPC, first try to * write back all dirty inodes to free up some of the excess reserved * metadata space. This reduces the chances that the eofblocks scan * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this * also behaves as a filter to prevent too many eofblocks scans from * running at the same time. Use a synchronous scan to increase the * effectiveness of the scan.
*/ if (ret == -EDQUOT && !cleared_space) {
xfs_iunlock(ip, iolock);
xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
cleared_space = true; goto write_retry;
} elseif (ret == -ENOSPC && !cleared_space) { struct xfs_icwalk icw = {0};
ret = xfs_zoned_write_space_reserve(mp, iocb, from, XFS_ZR_GREEDY, &ac); if (ret < 0) return ret;
ret = xfs_ilock_iocb(iocb, iolock); if (ret) goto out_unreserve;
ret = xfs_file_write_checks(iocb, from, &iolock, &ac); if (ret) goto out_unlock;
/* * Truncate the iter to the length that we were actually able to * allocate blocks for. This needs to happen after * xfs_file_write_checks, because that assigns ki_pos for O_APPEND * writes.
*/
iov_iter_truncate(from,
XFS_FSB_TO_B(mp, ac.reserved_blocks) -
(iocb->ki_pos & mp->m_blockmask)); if (!iov_iter_count(from)) goto out_unlock;
retry:
trace_xfs_file_buffered_write(iocb, from);
ret = iomap_file_buffered_write(iocb, from,
&xfs_buffered_write_iomap_ops, &xfs_iomap_write_ops,
&ac); if (ret == -ENOSPC && !cleared_space) { /* * Kick off writeback to convert delalloc space and release the * usually too pessimistic indirect block reservations.
*/
xfs_flush_inodes(mp);
cleared_space = true; goto retry;
}
if (iocb->ki_flags & IOCB_ATOMIC) { if (ocount < xfs_get_atomic_write_min(ip)) return -EINVAL;
if (ocount > xfs_get_atomic_write_max(ip)) return -EINVAL;
ret = generic_atomic_write_valid(iocb, from); if (ret) return ret;
}
if (IS_DAX(inode)) return xfs_file_dax_write(iocb, from);
if (iocb->ki_flags & IOCB_DIRECT) { /* * Allow a directio write to fall back to a buffered * write *only* in the case that we're doing a reflink * CoW. In all other directio scenarios we do not * allow an operation to fall back to buffered mode.
*/
ret = xfs_file_dio_write(iocb, from); if (ret != -ENOTBLK) return ret;
}
if (xfs_is_zoned_inode(ip)) return xfs_file_buffered_write_zoned(iocb, from); return xfs_file_buffered_write(iocb, from);
}
/* Does this file, inode, or mount want synchronous writes? */ staticinlinebool xfs_file_sync_writes(struct file *filp)
{ struct xfs_inode *ip = XFS_I(file_inode(filp));
if (xfs_has_wsync(ip->i_mount)) returntrue; if (filp->f_flags & (__O_SYNC | O_DSYNC)) returntrue; if (IS_SYNC(file_inode(filp))) returntrue;
if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len)) return -EINVAL;
/* * There is no need to overlap collapse range with EOF, in which case it * is effectively a truncate operation
*/ if (offset + len >= i_size_read(inode)) return -EINVAL;
if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len)) return -EINVAL;
/* * New inode size must not exceed ->s_maxbytes, accounting for * possible signed overflow.
*/ if (inode->i_sb->s_maxbytes - isize < len) return -EFBIG;
/* Offset should be less than i_size */ if (offset >= isize) return -EINVAL;
error = xfs_falloc_setsize(file, isize + len); if (error) return error;
/* * Perform hole insertion now that the file size has been updated so * that if we crash during the operation we don't leave shifted extents * past EOF and hence losing access to the data that is contained within * them.
*/ return xfs_insert_file_space(XFS_I(inode), offset, len);
}
/* * Punch a hole and prealloc the range. We use a hole punch rather than * unwritten extent conversion for two reasons: * * 1.) Hole punch handles partial block zeroing for us. * 2.) If prealloc returns ENOSPC, the file range is still zero-valued by * virtue of the hole punch.
*/ staticint
xfs_falloc_zero_range( struct file *file, int mode,
loff_t offset,
loff_t len, struct xfs_zone_alloc_ctx *ac)
{ struct inode *inode = file_inode(file); unsignedint blksize = i_blocksize(inode);
loff_t new_size = 0; int error;
/* * If always_cow mode we can't use preallocations and thus should not * create them.
*/ if (xfs_is_always_cow_inode(XFS_I(inode))) return -EOPNOTSUPP;
/* * Must wait for all AIO to complete before we continue as AIO can * change the file size on completion without holding any locks we * currently hold. We must do this first because AIO can update both * the on disk and in memory inode sizes, and the operations that follow * require the in-memory size to be fully up-to-date.
*/
inode_dio_wait(inode);
error = file_modified(file); if (error) goto out_unlock;
if (!S_ISREG(inode->i_mode)) return -EINVAL; if (mode & ~XFS_FALLOC_FL_SUPPORTED) return -EOPNOTSUPP;
/* * For zoned file systems, zeroing the first and last block of a hole * punch requires allocating a new block to rewrite the remaining data * and new zeroes out of place. Get a reservations for those before * taking the iolock. Dip into the reserved pool because we are * expected to be able to punch a hole even on a completely full * file system.
*/ if (xfs_is_zoned_inode(XFS_I(inode)) &&
(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
FALLOC_FL_COLLAPSE_RANGE))) return xfs_file_zoned_fallocate(file, mode, offset, len); return __xfs_file_fallocate(file, mode, offset, len, NULL);
}
STATICint
xfs_file_fadvise( struct file *file,
loff_t start,
loff_t end, int advice)
{ struct xfs_inode *ip = XFS_I(file_inode(file)); int ret; int lockflags = 0;
/* * Operations creating pages in page cache need protection from hole * punching and similar ops
*/ if (advice == POSIX_FADV_WILLNEED) {
lockflags = XFS_IOLOCK_SHARED;
xfs_ilock(ip, lockflags);
}
ret = generic_fadvise(file, start, end, advice); if (lockflags)
xfs_iunlock(ip, lockflags); return ret;
}
if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) return -EINVAL;
if (!xfs_has_reflink(mp)) return -EOPNOTSUPP;
if (xfs_is_shutdown(mp)) return -EIO;
/* Prepare and then clone file data. */
ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
&len, remap_flags); if (ret || len == 0) return ret;
ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
&remapped); if (ret) goto out_unlock;
/* * Carry the cowextsize hint from src to dest if we're sharing the * entire source file to the entire destination file, the source file * has a cowextsize hint, and the destination file does not.
*/
cowextsize = 0; if (pos_in == 0 && len == i_size_read(inode_in) &&
(src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
pos_out == 0 && len >= i_size_read(inode_out) &&
!(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
cowextsize = src->i_cowextsize;
ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
remap_flags); if (ret) goto out_unlock;
if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
xfs_log_force_inode(dest);
out_unlock:
xfs_iunlock2_remapping(src, dest); if (ret)
trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_); /* * If the caller did not set CAN_SHORTEN, then it is not prepared to * handle partial results -- either the whole remap succeeds, or we * must say why it did not. In this case, any error should be returned * to the caller.
*/ if (ret && remapped < len && !(remap_flags & REMAP_FILE_CAN_SHORTEN)) return ret; return remapped > 0 ? remapped : ret;
}
if (xfs_is_shutdown(ip->i_mount)) return -EIO;
error = generic_file_open(inode, file); if (error) return error;
/* * If there are any blocks, read-ahead block 0 as we're almost * certain to have the next operation be a read there.
*/
mode = xfs_ilock_data_map_shared(ip); if (ip->i_df.if_nextents > 0)
error = xfs_dir3_data_readahead(ip, 0, 0);
xfs_iunlock(ip, mode); return error;
}
/* * Don't bother propagating errors. We're just doing cleanup, and the caller * ignores the return value anyway.
*/ STATICint
xfs_file_release( struct inode *inode, struct file *file)
{ struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount;
/* * If this is a read-only mount or the file system has been shut down, * don't generate I/O.
*/ if (xfs_is_readonly(mp) || xfs_is_shutdown(mp)) return 0;
/* * If we previously truncated this file and removed old data in the * process, we want to initiate "early" writeout on the last close. * This is an attempt to combat the notorious NULL files problem which * is particularly noticeable from a truncate down, buffered (re-)write * (delalloc), followed by a crash. What we are effectively doing here * is significantly reducing the time window where we'd otherwise be * exposed to that problem.
*/ if (xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED)) {
xfs_iflags_clear(ip, XFS_EOFBLOCKS_RELEASED); if (ip->i_delayed_blks > 0)
filemap_flush(inode->i_mapping);
}
/* * XFS aggressively preallocates post-EOF space to generate contiguous * allocations for writers that append to the end of the file. * * To support workloads that close and reopen the file frequently, these * preallocations usually persist after a close unless it is the first * close for the inode. This is a tradeoff to generate tightly packed * data layouts for unpacking tarballs or similar archives that write * one file after another without going back to it while keeping the * preallocation for files that have recurring open/write/close cycles. * * This heuristic is skipped for inodes with the append-only flag as * that flag is rather pointless for inodes written only once. * * There is no point in freeing blocks here for open but unlinked files * as they will be taken care of by the inactivation path soon. * * When releasing a read-only context, don't flush data or trim post-EOF * blocks. This avoids open/read/close workloads from removing EOF * blocks that other writers depend upon to reduce fragmentation. * * Inodes on the zoned RT device never have preallocations, so skip * taking the locks below.
*/ if (!inode->i_nlink ||
!(file->f_mode & FMODE_WRITE) ||
(ip->i_diflags & XFS_DIFLAG_APPEND) ||
xfs_is_zoned_inode(ip)) return 0;
/* * If we can't get the iolock just skip truncating the blocks past EOF * because we could deadlock with the mmap_lock otherwise. We'll get * another chance to drop them once the last reference to the inode is * dropped, so we'll never leak blocks permanently.
*/ if (!xfs_iflags_test(ip, XFS_EOFBLOCKS_RELEASED) &&
xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { if (xfs_can_free_eofblocks(ip) &&
!xfs_iflags_test_and_set(ip, XFS_EOFBLOCKS_RELEASED))
xfs_free_eofblocks(ip);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
}
/* * The Linux API doesn't pass down the total size of the buffer * we read into down to the filesystem. With the filldir concept * it's not needed for correct information, but the XFS dir2 leaf * code wants an estimate of the buffer size to calculate it's * readahead window and size the buffers used for mapping to * physical blocks. * * Try to give it an estimate that's good enough, maybe at some * point we can change the ->readdir prototype to include the * buffer size. For now we use the current glibc buffer size.
*/
bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
/* * Normally we only need the shared mmaplock, but if a reflink remap is * in progress we take the exclusive lock to wait for the remap to * finish before taking a write fault.
*/
xfs_ilock(ip, XFS_MMAPLOCK_SHARED); if (xfs_iflags_test(ip, XFS_IREMAPPING)) {
xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
lock_mode = XFS_MMAPLOCK_EXCL;
}
if (IS_DAX(inode))
ret = xfs_dax_fault_locked(vmf, order, true); else
ret = iomap_page_mkwrite(vmf, &xfs_buffered_write_iomap_ops,
ac);
xfs_iunlock(ip, lock_mode);
sb_end_pagefault(inode->i_sb); return ret;
}
static vm_fault_t
xfs_write_fault_zoned( struct vm_fault *vmf, unsignedint order)
{ struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file)); unsignedint len = folio_size(page_folio(vmf->page)); struct xfs_zone_alloc_ctx ac = { }; int error;
vm_fault_t ret;
/* * This could over-allocate as it doesn't check for truncation. * * But as the overallocation is limited to less than a folio and will be * release instantly that's just fine.
*/
error = xfs_zoned_space_reserve(ip->i_mount,
XFS_B_TO_FSB(ip->i_mount, len), 0, &ac); if (error < 0) return vmf_fs_error(error);
ret = __xfs_write_fault(vmf, order, &ac);
xfs_zoned_space_unreserve(ip->i_mount, &ac); return ret;
}
/* DAX can shortcut the normal fault path on write faults! */ if (IS_DAX(inode)) { if (xfs_is_write_fault(vmf)) return xfs_write_fault(vmf, 0); return xfs_dax_read_fault(vmf, 0);
}
/* DAX can shortcut the normal fault path on write faults! */ if (xfs_is_write_fault(vmf)) return xfs_write_fault(vmf, order); return xfs_dax_read_fault(vmf, order);
}
/* * pfn_mkwrite was originally intended to ensure we capture time stamp updates * on write faults. In reality, it needs to serialise against truncate and * prepare memory for writing so handle is as standard write fault.
*/ static vm_fault_t
xfs_filemap_pfn_mkwrite( struct vm_fault *vmf)
{ return xfs_write_fault(vmf, 0);
}
/* * We don't support synchronous mappings for non-DAX files and * for DAX files if underneath dax_device is not synchronous.
*/ if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file),
target->bt_daxdev)) return -EOPNOTSUPP;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.