/* * Don't commit if inode has been committed since last being * marked dirty, or if it has been deleted.
*/ if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode)) return 0;
if (isReadOnly(inode)) { /* kernel allows writes to devices on read-only * partitions and may think inode is dirty
*/ if (!special_file(inode->i_mode) && noisy) {
jfs_err("jfs_commit_inode(0x%p) called on read-only volume",
inode);
jfs_err("Is remount racy?");
noisy--;
} return 0;
}
tid = txBegin(inode->i_sb, COMMIT_INODE);
mutex_lock(&JFS_IP(inode)->commit_mutex);
/* * Retest inode state after taking commit_mutex
*/ if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode))
rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0);
int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{ int wait = wbc->sync_mode == WB_SYNC_ALL;
if (inode->i_nlink == 0) return 0; /* * If COMMIT_DIRTY is not set, the inode isn't really dirty. * It has been committed since the last change, but was still * on the dirty inode list.
*/ if (!test_cflag(COMMIT_Dirty, inode)) { /* Make sure committed changes hit the disk */
jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait); return 0;
}
if (isReadOnly(inode)) { if (!special_file(inode->i_mode) && noisy) { /* kernel allows writes to devices on read-only * partitions and may try to mark inode dirty
*/
jfs_err("jfs_dirty_inode called on read-only volume");
jfs_err("Is remount racy?");
noisy--;
} return;
}
set_cflag(COMMIT_Dirty, inode);
}
int jfs_get_block(struct inode *ip, sector_t lblock, struct buffer_head *bh_result, int create)
{
s64 lblock64 = lblock; int rc = 0;
xad_t xad;
s64 xaddr; int xflag;
s32 xlen = bh_result->b_size >> ip->i_blkbits;
/* * Take appropriate lock on inode
*/ if (create)
IWRITE_LOCK(ip, RDWRLOCK_NORMAL); else
IREAD_LOCK(ip, RDWRLOCK_NORMAL);
if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
(!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
xaddr) { if (xflag & XAD_NOTRECORDED) { if (!create) /* * Allocated but not recorded, read treats * this as a hole
*/ goto unlock;
XADoffset(&xad, lblock64);
XADlength(&xad, xlen);
XADaddress(&xad, xaddr);
rc = extRecord(ip, &xad); if (rc) goto unlock;
set_buffer_new(bh_result);
}
ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
/* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again.
*/ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = iocb->ki_pos + count;
if (end > isize)
jfs_write_failed(mapping, end);
}
/* * Guts of jfs_truncate. Called with locks already held. Can be called * with directory for truncating directory index table.
*/ void jfs_truncate_nolock(struct inode *ip, loff_t length)
{
loff_t newsize;
tid_t tid;
/* * The commit_mutex cannot be taken before txBegin. * txBegin may block and there is a chance the inode * could be marked dirty and need to be committed * before txBegin unblocks
*/
mutex_lock(&JFS_IP(ip)->commit_mutex);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.