// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/xattr.c * * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> * * Fix by Harrison Xing <harrison@mountainviewdata.com>. * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>. * Extended attributes for symlinks and special files added per * suggestion of Luka Renko <luka.renko@hermes.si>. * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, * Red Hat Inc. * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz * and Andreas Gruenbacher <agruen@suse.de>.
*/
/* * Extended attributes are stored directly in inodes (on file systems with * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl * field contains the block number if an inode uses an additional block. All * attributes must fit in the inode and one additional block. Blocks that * contain the identical set of attributes may be shared among several inodes. * Identical blocks are detected by keeping a cache of blocks that have * recently been accessed. * * The attributes in inodes and on blocks have a different header; the entries * are stored in the same format: * * +------------------+ * | header | * | entry 1 | | * | entry 2 | | growing downwards * | entry 3 | v * | four null bytes | * | . . . | * | value 1 | ^ * | value 3 | | growing upwards * | value 2 | | * +------------------+ * * The header is followed by multiple entry descriptors. In disk blocks, the * entry descriptors are kept sorted. In inodes, they are unsorted. The * attribute values are aligned to the end of the block in no specific order. * * Locking strategy * ---------------- * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem. * EA blocks are only changed if they are exclusive to an inode, so * holding xattr_sem also means that nothing but the EA block's reference * count can change. Multiple writers to the same block are synchronized * by the buffer lock.
*/
header -= 1; if (end - (void *)header < sizeof(*header) + sizeof(u32)) {
err_str = "in-inode xattr block too small"; goto errout;
} if (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
err_str = "bad magic number in in-inode xattr"; goto errout;
}
}
/* Find the end of the names list */ while (!IS_LAST_ENTRY(e)) { struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); if ((void *)next >= end) {
err_str = "e_name out of bounds"; goto errout;
} if (strnlen(e->e_name, e->e_name_len) != e->e_name_len) {
err_str = "bad e_name length"; goto errout;
}
e = next;
}
/* Check the values */ while (!IS_LAST_ENTRY(entry)) {
u32 size = le32_to_cpu(entry->e_value_size); unsignedlong ea_ino = le32_to_cpu(entry->e_value_inum);
if (!ext4_has_feature_ea_inode(inode->i_sb) && ea_ino) {
err_str = "ea_inode specified without ea_inode feature enabled"; goto errout;
} if (ea_ino && ((ea_ino == EXT4_ROOT_INO) ||
!ext4_valid_inum(inode->i_sb, ea_ino))) {
err_str = "invalid ea_ino"; goto errout;
} if (ea_ino && !size) {
err_str = "invalid size in ea xattr"; goto errout;
} if (size > EXT4_XATTR_SIZE_MAX) {
err_str = "e_value size too large"; goto errout;
}
/* * The value cannot overlap the names, and the value * with padding cannot extend beyond 'end'. Check both * the padded and unpadded sizes, since the size may * overflow to 0 when adding padding.
*/ if (offs > end - value_start) {
err_str = "e_value out of bounds"; goto errout;
}
value = value_start + offs; if (value < (void *)e + sizeof(u32) ||
size > end - value ||
EXT4_XATTR_SIZE(size) > end - value) {
err_str = "overlapping e_value "; goto errout;
}
}
entry = EXT4_XATTR_NEXT(entry);
} if (bh)
set_buffer_verified(bh); return 0;
/* * Read the EA value from an inode.
*/ staticint ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size)
{ int blocksize = 1 << ea_inode->i_blkbits; int bh_count = (size + blocksize - 1) >> ea_inode->i_blkbits; int tail_size = (size % blocksize) ?: blocksize; struct buffer_head *bhs_inline[8]; struct buffer_head **bhs = bhs_inline; int i, ret;
if (bh_count > ARRAY_SIZE(bhs_inline)) {
bhs = kmalloc_array(bh_count, sizeof(*bhs), GFP_NOFS); if (!bhs) return -ENOMEM;
}
ret = ext4_bread_batch(ea_inode, 0 /* block */, bh_count, true/* wait */, bhs); if (ret) goto free_bhs;
for (i = 0; i < bh_count; i++) { /* There shouldn't be any holes in ea_inode. */ if (!bhs[i]) {
ret = -EFSCORRUPTED; goto put_bhs;
}
memcpy((char *)buf + blocksize * i, bhs[i]->b_data,
i < bh_count - 1 ? blocksize : tail_size);
}
ret = 0;
put_bhs: for (i = 0; i < bh_count; i++)
brelse(bhs[i]);
free_bhs: if (bhs != bhs_inline)
kfree(bhs); return ret;
}
/* * We have to check for this corruption early as otherwise * iget_locked() could wait indefinitely for the state of our * parent inode.
*/ if (parent->i_ino == ea_ino) {
ext4_error(parent->i_sb, "Parent and EA inode have the same ino %lu", ea_ino); return -EFSCORRUPTED;
}
inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_EA_INODE); if (IS_ERR(inode)) {
err = PTR_ERR(inode);
ext4_error(parent->i_sb, "error while reading EA inode %lu err=%d", ea_ino,
err); return err;
}
ext4_xattr_inode_set_class(inode);
/* * Check whether this is an old Lustre-style xattr inode. Lustre * implementation does not have hash validation, rather it has a * backpointer from ea_inode to the parent inode.
*/ if (ea_inode_hash != ext4_xattr_inode_get_hash(inode) &&
EXT4_XATTR_INODE_GET_PARENT(inode) == parent->i_ino &&
inode->i_generation == parent->i_generation) {
ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE);
ext4_xattr_inode_set_ref(inode, 1);
} else {
inode_lock_nested(inode, I_MUTEX_XATTR);
inode->i_flags |= S_NOQUOTA;
inode_unlock(inode);
}
*ea_inode = inode; return 0;
}
/* Remove entry from mbcache when EA inode is getting evicted */ void ext4_evict_ea_inode(struct inode *inode)
{ struct mb_cache_entry *oe;
if (!EA_INODE_CACHE(inode)) return; /* Wait for entry to get unused so that we can remove it */ while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode),
ext4_xattr_inode_get_hash(inode), inode->i_ino))) {
mb_cache_entry_wait_unused(oe);
mb_cache_entry_put(EA_INODE_CACHE(inode), oe);
}
}
/* * Not good. Maybe the entry hash was calculated * using the buggy signed char version?
*/
e_hash = ext4_xattr_hash_entry_signed(entry->e_name, entry->e_name_len,
&tmp_data, 1); /* Still no match - bad */ if (e_hash != entry->e_hash) return -EFSCORRUPTED;
/* Let people know about old hash */
pr_warn_once("ext4: filesystem with signed xattr name hash");
} return 0;
}
/* * Read xattr value from the EA inode.
*/ staticint
ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry, void *buffer, size_t size)
{ struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode); struct inode *ea_inode; int err;
/* * ext4_xattr_get() * * Copy an extended attribute into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success.
*/ int
ext4_xattr_get(struct inode *inode, int name_index, constchar *name, void *buffer, size_t buffer_size)
{ int error;
if (unlikely(ext4_forced_shutdown(inode->i_sb))) return -EIO;
/* * Inode operation listxattr() * * d_inode(dentry)->i_rwsem: don't care * * Copy a list of attribute names into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success.
*/
ssize_t
ext4_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{ int ret, ret2;
down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size); if (ret < 0) goto errout; if (buffer) {
buffer += ret;
buffer_size -= ret;
}
ret = ext4_xattr_block_list(dentry, buffer, buffer_size); if (ret < 0) goto errout;
ret += ret2;
errout:
up_read(&EXT4_I(d_inode(dentry))->xattr_sem); return ret;
}
/* * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is * not set, set it.
*/ staticvoid ext4_xattr_update_super_block(handle_t *handle, struct super_block *sb)
{ if (ext4_has_feature_xattr(sb)) return;
int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode, struct buffer_head *block_bh, size_t value_len, bool is_create)
{ int credits; int blocks;
/* * 1) Owner inode update * 2) Ref count update on old xattr block * 3) new xattr block * 4) block bitmap update for new xattr block * 5) group descriptor for new xattr block * 6) block bitmap update for old xattr block * 7) group descriptor for old block * * 6 & 7 can happen if we have two racing threads T_a and T_b * which are each trying to set an xattr on inodes I_a and I_b * which were both initially sharing an xattr block.
*/
credits = 7;
/* * In case of inline data, we may push out the data to a block, * so we need to reserve credits for this eventuality
*/ if (inode && ext4_has_inline_data(inode))
credits += ext4_chunk_trans_extent(inode, 1) + 1;
/* We are done if ea_inode feature is not enabled. */ if (!ext4_has_feature_ea_inode(sb)) return credits;
/* New ea_inode, inode map, block bitmap, group descriptor. */
credits += 4;
/* Indirection block or one level of extent tree. */
blocks += 1;
/* Block bitmap and group descriptor updates for each block. */
credits += blocks * 2;
/* Blocks themselves. */
credits += blocks;
if (!is_create) { /* Dereference ea_inode holding old xattr value. * Old ea_inode, inode map, block bitmap, group descriptor.
*/
credits += 4;
/* Data blocks for old ea_inode. */
blocks = XATTR_SIZE_MAX >> sb->s_blocksize_bits;
/* Indirection block or one level of extent tree for old * ea_inode.
*/
blocks += 1;
/* Block bitmap and group descriptor updates for each block. */
credits += blocks * 2;
}
/* We may need to clone the existing xattr block in which case we need * to increment ref counts for existing ea_inodes referenced by it.
*/ if (block_bh) { struct ext4_xattr_entry *entry = BFIRST(block_bh);
for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) if (entry->e_value_inum) /* Ref count update on ea_inode. */
credits += 1;
} return credits;
}
staticint ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode, int ref_change)
{ struct ext4_iloc iloc;
u64 ref_count; int ret;
inode_lock_nested(ea_inode, I_MUTEX_XATTR);
ret = ext4_reserve_inode_write(handle, ea_inode, &iloc); if (ret) goto out;
err = ext4_xattr_inode_dec_ref(handle, ea_inode); if (err) {
ext4_warning_inode(ea_inode, "ea_inode dec ref err=%d",
err); continue;
}
if (!skip_quota)
ext4_xattr_inode_free_quota(parent, ea_inode,
le32_to_cpu(entry->e_value_size));
/* * Forget about ea_inode within the same transaction that * decrements the ref count. This avoids duplicate decrements in * case the rest of the work spills over to subsequent * transactions.
*/
entry->e_value_inum = 0;
entry->e_value_size = 0;
dirty = true;
}
if (dirty) { /* * Note that we are deliberately skipping csum calculation for * the final update because we do not expect any journal * restarts until xattr block is freed.
*/
if (ea_block_cache) {
ce = mb_cache_entry_get(ea_block_cache, hash,
bh->b_blocknr); if (ce) {
set_bit(MBE_REUSABLE_B, &ce->e_flags);
mb_cache_entry_put(ea_block_cache, ce);
}
}
}
ext4_xattr_block_csum_set(inode, bh); /* * Beware of this ugliness: Releasing of xattr block references * from different inodes can race and so we have to protect * from a race where someone else frees the block (and releases * its journal_head) before we are done dirtying the buffer. In * nojournal mode this race is harmless and we actually cannot * call ext4_handle_dirty_metadata() with locked buffer as * that function can call sync_dirty_buffer() so for that case * we handle the dirtying after unlocking the buffer.
*/ if (ext4_handle_valid(handle))
error = ext4_handle_dirty_metadata(handle, inode, bh);
unlock_buffer(bh); if (!ext4_handle_valid(handle))
error = ext4_handle_dirty_metadata(handle, inode, bh); if (IS_SYNC(inode))
ext4_handle_sync(handle);
dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
}
out:
ext4_std_error(inode->i_sb, error); return;
}
/* * Find the available free space for EAs. This also returns the total number of * bytes used by EA entries.
*/ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
size_t *min_offs, void *base, int *total)
{ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { if (!last->e_value_inum && last->e_value_size) {
size_t offs = le16_to_cpu(last->e_value_offs); if (offs < *min_offs)
*min_offs = offs;
} if (total)
*total += EXT4_XATTR_LEN(last->e_name_len);
} return (*min_offs - ((void *)last - base) - sizeof(__u32));
}
/* * Write the value of the EA in an inode.
*/ staticint ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode, constvoid *buf, int bufsize)
{ struct buffer_head *bh = NULL; unsignedlong block = 0; int blocksize = ea_inode->i_sb->s_blocksize; int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits; int csize, wsize = 0; int ret = 0, ret2 = 0; int retries = 0;
ret2 = ext4_mark_inode_dirty(handle, ea_inode); if (unlikely(ret2 && !ret))
ret = ret2;
out:
brelse(bh);
return ret;
}
/* * Create an inode to store the value of a large EA.
*/ staticstruct inode *ext4_xattr_inode_create(handle_t *handle, struct inode *inode, u32 hash)
{ struct inode *ea_inode = NULL;
uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) }; int err;
if (inode->i_sb->s_root == NULL) {
ext4_warning(inode->i_sb, "refuse to create EA inode when umounting");
WARN_ON(1); return ERR_PTR(-EINVAL);
}
/* * Let the next inode be the goal, so we try and allocate the EA inode * in the same group, or nearby one.
*/
ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
S_IFREG | 0600, NULL, inode->i_ino + 1, owner,
EXT4_EA_INODE_FL); if (!IS_ERR(ea_inode)) {
ea_inode->i_op = &ext4_file_inode_operations;
ea_inode->i_fop = &ext4_file_operations;
ext4_set_aops(ea_inode);
ext4_xattr_inode_set_class(ea_inode);
unlock_new_inode(ea_inode);
ext4_xattr_inode_set_ref(ea_inode, 1);
ext4_xattr_inode_set_hash(ea_inode, hash);
err = ext4_mark_inode_dirty(handle, ea_inode); if (!err)
err = ext4_inode_attach_jinode(ea_inode); if (err) { if (ext4_xattr_inode_dec_ref(handle, ea_inode))
ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err);
iput(ea_inode); return ERR_PTR(err);
}
/* * Xattr inodes are shared therefore quota charging is performed * at a higher level.
*/
dquot_free_inode(ea_inode);
dquot_drop(ea_inode);
inode_lock(ea_inode);
ea_inode->i_flags |= S_NOQUOTA;
inode_unlock(ea_inode);
}
/* * Add value of the EA in an inode.
*/ staticstruct inode *ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode, constvoid *value, size_t value_len)
{ struct inode *ea_inode;
u32 hash; int err;
/* Account inode & space to quota even if sharing... */
err = ext4_xattr_inode_alloc_quota(inode, value_len); if (err) return ERR_PTR(err);
/* Create an inode for the EA value */
ea_inode = ext4_xattr_inode_create(handle, inode, hash); if (IS_ERR(ea_inode)) {
ext4_xattr_inode_free_quota(inode, NULL, value_len); return ea_inode;
}
err = ext4_xattr_inode_write(handle, ea_inode, value, value_len); if (err) { if (ext4_xattr_inode_dec_ref(handle, ea_inode))
ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err); goto out_err;
}
/* Space used by old and new values. */
old_size = (!s->not_found && !here->e_value_inum) ?
EXT4_XATTR_SIZE(le32_to_cpu(here->e_value_size)) : 0;
new_size = (i->value && !in_inode) ? EXT4_XATTR_SIZE(i->value_len) : 0;
/* * Optimization for the simple case when old and new values have the * same padded sizes. Not applicable if external inodes are involved.
*/ if (new_size && new_size == old_size) {
size_t offs = le16_to_cpu(here->e_value_offs); void *val = s->base + offs;
if (free < EXT4_XATTR_LEN(name_len) + new_size) {
ret = -ENOSPC; goto out;
}
/* * If storing the value in an external inode is an option, * reserve space for xattr entries/names in the external * attribute block so that a long value does not occupy the * whole space and prevent further entries being added.
*/ if (ext4_has_feature_ea_inode(inode->i_sb) &&
new_size && is_block &&
(min_offs + old_size - new_size) <
EXT4_XATTR_BLOCK_RESERVE(inode)) {
ret = -ENOSPC; goto out;
}
}
/* * Getting access to old and new ea inodes is subject to failures. * Finish that work before doing any modifications to the xattr data.
*/ if (!s->not_found && here->e_value_inum) {
ret = ext4_xattr_inode_iget(inode,
le32_to_cpu(here->e_value_inum),
le32_to_cpu(here->e_hash),
&old_ea_inode); if (ret) {
old_ea_inode = NULL; goto out;
}
/* We are ready to release ref count on the old_ea_inode. */
ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode); if (ret) goto out;
/* Entry hash calculation. */ if (in_inode) {
__le32 crc32c_hash;
/* * Feed crc32c hash instead of the raw value for entry * hash calculation. This is to avoid walking * potentially long value buffer again.
*/
crc32c_hash = cpu_to_le32(
ext4_xattr_inode_get_hash(new_ea_inode));
hash = ext4_xattr_hash_entry(here->e_name,
here->e_name_len,
&crc32c_hash, 1);
} elseif (is_block) {
__le32 *value = s->base + le16_to_cpu(
here->e_value_offs);
if (header(s->base)->h_refcount == cpu_to_le32(1)) {
__u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
/* * This must happen under buffer lock for * ext4_xattr_block_set() to reliably detect modified * block
*/ if (ea_block_cache) { struct mb_cache_entry *oe;
/* * If existing entry points to an xattr inode, we need * to prevent ext4_xattr_set_entry() from decrementing * ref count on it because the reference belongs to the * original block. In this case, make the entry look * like it has an empty value.
*/ if (!s->not_found && s->here->e_value_inum) {
ea_ino = le32_to_cpu(s->here->e_value_inum);
error = ext4_xattr_inode_iget(inode, ea_ino,
le32_to_cpu(s->here->e_hash),
&tmp_inode); if (error) goto cleanup;
if (!ext4_test_inode_state(tmp_inode,
EXT4_STATE_LUSTRE_EA_INODE)) { /* * Defer quota free call for previous * inode until success is guaranteed.
*/
old_ea_inode_quota = le32_to_cpu(
s->here->e_value_size);
}
iput(tmp_inode);
s->here->e_value_inum = 0;
s->here->e_value_size = 0;
}
} else { /* Allocate a buffer where we construct the new block. */
s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
error = -ENOMEM; if (s->base == NULL) goto cleanup;
header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
header(s->base)->h_blocks = cpu_to_le32(1);
header(s->base)->h_refcount = cpu_to_le32(1);
s->first = ENTRY(header(s->base)+1);
s->here = ENTRY(header(s->base)+1);
s->end = s->base + sb->s_blocksize;
}
error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode, true/* is_block */); if (error == -EFSCORRUPTED) goto bad_block; if (error) goto cleanup;
if (new_bh) { /* We found an identical block in the cache. */ if (new_bh == bs->bh)
ea_bdebug(new_bh, "keeping"); else {
u32 ref;
#ifdef EXT4_XATTR_DEBUG
WARN_ON_ONCE(dquot_initialize_needed(inode)); #endif /* The old block is released after updating
the inode. */
error = dquot_alloc_block(inode,
EXT4_C2B(EXT4_SB(sb), 1)); if (error) goto cleanup;
BUFFER_TRACE(new_bh, "get_write_access");
error = ext4_journal_get_write_access(
handle, sb, new_bh,
EXT4_JTR_NONE); if (error) goto cleanup_dquot;
lock_buffer(new_bh); /* * We have to be careful about races with * adding references to xattr block. Once we * hold buffer lock xattr block's state is * stable so we can check the additional * reference fits.
*/
ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1; if (ref > EXT4_XATTR_REFCOUNT_MAX) { /* * Undo everything and check mbcache * again.
*/
unlock_buffer(new_bh);
dquot_free_block(inode,
EXT4_C2B(EXT4_SB(sb),
1));
brelse(new_bh);
mb_cache_entry_put(ea_block_cache, ce);
ce = NULL;
new_bh = NULL; goto inserted;
}
BHDR(new_bh)->h_refcount = cpu_to_le32(ref); if (ref == EXT4_XATTR_REFCOUNT_MAX)
clear_bit(MBE_REUSABLE_B, &ce->e_flags);
ea_bdebug(new_bh, "reusing; refcount now=%d",
ref);
ext4_xattr_block_csum_set(inode, new_bh);
unlock_buffer(new_bh);
error = ext4_handle_dirty_metadata(handle,
inode,
new_bh); if (error) goto cleanup_dquot;
}
mb_cache_entry_touch(ea_block_cache, ce);
mb_cache_entry_put(ea_block_cache, ce);
ce = NULL;
} elseif (bs->bh && s->base == bs->bh->b_data) { /* We were modifying this block in-place. */
ea_bdebug(bs->bh, "keeping this block");
ext4_xattr_block_cache_insert(ea_block_cache, bs->bh);
new_bh = bs->bh;
get_bh(new_bh);
} else { /* We need to allocate a new block */
ext4_fsblk_t goal, block;
/* When e_value_inum is set the value is stored externally. */ if (s->here->e_value_inum) return 0; if (le32_to_cpu(s->here->e_value_size) != i->value_len) return 0;
value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs); return !memcmp(value, i->value, i->value_len);
}
if (!EXT4_I(inode)->i_file_acl) return NULL;
bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); if (IS_ERR(bh)) return bh;
error = ext4_xattr_check_block(inode, bh); if (error) {
brelse(bh); return ERR_PTR(error);
} return bh;
}
/* * ext4_xattr_set_handle() * * Create, replace or remove an extended attribute for this inode. Value * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE * specify that an extended attribute must exist and must not exist * previous to the call, respectively. * * Returns 0, or a negative error number on failure.
*/ int
ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, constchar *name, constvoid *value, size_t value_len, int flags)
{ struct ext4_xattr_info i = {
.name_index = name_index,
.name = name,
.value = value,
.value_len = value_len,
.in_inode = 0,
}; struct ext4_xattr_ibody_find is = {
.s = { .not_found = -ENODATA, },
}; struct ext4_xattr_block_find bs = {
.s = { .not_found = -ENODATA, },
}; int no_expand; int error;
if (!name) return -EINVAL; if (strlen(name) > 255) return -ERANGE;
ext4_write_lock_xattr(inode, &no_expand);
/* Check journal credits under write lock. */ if (ext4_handle_valid(handle)) { struct buffer_head *bh; int credits;
error = ext4_xattr_ibody_find(inode, &i, &is); if (error) goto cleanup; if (is.s.not_found)
error = ext4_xattr_block_find(inode, &i, &bs); if (error) goto cleanup; if (is.s.not_found && bs.s.not_found) {
error = -ENODATA; if (flags & XATTR_REPLACE) goto cleanup;
error = 0; if (!value) goto cleanup;
} else {
error = -EEXIST; if (flags & XATTR_CREATE) goto cleanup;
}
if (!value) { if (!is.s.not_found)
error = ext4_xattr_ibody_set(handle, inode, &i, &is); elseif (!bs.s.not_found)
error = ext4_xattr_block_set(handle, inode, &i, &bs);
} else {
error = 0; /* Xattr value did not change? Save us some work and bail out */ if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i)) goto cleanup; if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i)) goto cleanup;