// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2014 Red Hat, Inc. * All Rights Reserved.
*/ #include"xfs.h" #include"xfs_fs.h" #include"xfs_shared.h" #include"xfs_format.h" #include"xfs_log_format.h" #include"xfs_trans_resv.h" #include"xfs_bit.h" #include"xfs_mount.h" #include"xfs_sb.h" #include"xfs_defer.h" #include"xfs_btree.h" #include"xfs_trans.h" #include"xfs_alloc.h" #include"xfs_rmap.h" #include"xfs_rmap_btree.h" #include"xfs_trace.h" #include"xfs_errortag.h" #include"xfs_error.h" #include"xfs_inode.h" #include"xfs_ag.h" #include"xfs_health.h" #include"xfs_rmap_item.h" #include"xfs_rtgroup.h" #include"xfs_rtrmap_btree.h"
struct kmem_cache *xfs_rmap_intent_cache;
/* * Lookup the first record less than or equal to [bno, len, owner, offset] * in the btree given by cur.
*/ int
xfs_rmap_lookup_le( struct xfs_btree_cur *cur,
xfs_agblock_t bno,
uint64_t owner,
uint64_t offset, unsignedint flags, struct xfs_rmap_irec *irec, int *stat)
{ int get_stat = 0; int error;
error = xfs_rmap_get_rec(cur, irec, &get_stat); if (error) return error; if (!get_stat) {
xfs_btree_mark_sick(cur); return -EFSCORRUPTED;
}
return 0;
}
/* * Lookup the record exactly matching [bno, len, owner, offset] * in the btree given by cur.
*/ int
xfs_rmap_lookup_eq( struct xfs_btree_cur *cur,
xfs_agblock_t bno,
xfs_extlen_t len,
uint64_t owner,
uint64_t offset, unsignedint flags, int *stat)
{
cur->bc_rec.r.rm_startblock = bno;
cur->bc_rec.r.rm_blockcount = len;
cur->bc_rec.r.rm_owner = owner;
cur->bc_rec.r.rm_offset = offset;
cur->bc_rec.r.rm_flags = flags; return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
}
/* * Update the record referred to by cur to the value given * by [bno, len, owner, offset]. * This either works (return 0) or gets an EFSCORRUPTED error.
*/ STATICint
xfs_rmap_update( struct xfs_btree_cur *cur, struct xfs_rmap_irec *irec)
{ union xfs_btree_rec rec; int error;
if (is_bmbt && irec->rm_offset != 0) return __this_address;
if (!is_inode && irec->rm_offset != 0) return __this_address;
if (is_unwritten && (is_bmbt || !is_inode || is_attr)) return __this_address;
if (!is_inode && (is_bmbt || is_unwritten || is_attr)) return __this_address;
/* Check for a valid fork offset, if applicable. */ if (is_inode && !is_bmbt &&
!xfs_verify_fileext(mp, irec->rm_offset, irec->rm_blockcount)) return __this_address;
if (xfs_btree_is_mem_rmap(cur->bc_ops))
xfs_warn(mp, "In-Memory Reverse Mapping BTree record corruption detected at %pS!", fa); elseif (xfs_btree_is_rtrmap(cur->bc_ops))
xfs_warn(mp, "RT Reverse Mapping BTree record corruption in rtgroup %u detected at %pS!",
cur->bc_group->xg_gno, fa); else
xfs_warn(mp, "Reverse Mapping BTree record corruption in AG %d detected at %pS!",
cur->bc_group->xg_gno, fa);
xfs_warn(mp, "Owner 0x%llx, flags 0x%x, start block 0x%x block count 0x%x",
irec->rm_owner, irec->rm_flags, irec->rm_startblock,
irec->rm_blockcount);
xfs_btree_mark_sick(cur); return -EFSCORRUPTED;
}
/* * Get the data from the pointed-to record.
*/ int
xfs_rmap_get_rec( struct xfs_btree_cur *cur, struct xfs_rmap_irec *irec, int *stat)
{ union xfs_btree_rec *rec;
xfs_failaddr_t fa; int error;
/* For each rmap given, figure out if it matches the key we want. */ STATICint
xfs_rmap_find_left_neighbor_helper( struct xfs_btree_cur *cur, conststruct xfs_rmap_irec *rec, void *priv)
{ struct xfs_find_left_neighbor_info *info = priv;
/* * Find the record to the left of the given extent, being careful only to * return a match with the same owner and adjacent physical and logical * block ranges.
*/ STATICint
xfs_rmap_find_left_neighbor( struct xfs_btree_cur *cur,
xfs_agblock_t bno,
uint64_t owner,
uint64_t offset, unsignedint flags, struct xfs_rmap_irec *irec, int *stat)
{ struct xfs_find_left_neighbor_info info; int found = 0; int error;
/* * Historically, we always used the range query to walk every reverse * mapping that could possibly overlap the key that the caller asked * for, and filter out the ones that don't. That is very slow when * there are a lot of records. * * However, there are two scenarios where the classic btree search can * produce correct results -- if the index contains a record that is an * exact match for the lookup key; and if there are no other records * between the record we want and the key we supplied. * * As an optimization, try a non-overlapped lookup first. This makes * extent conversion and remap operations run a bit faster if the * physical extents aren't being shared. If we don't find what we * want, we fall back to the overlapped query.
*/
error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, irec,
&found); if (error) return error; if (found)
error = xfs_rmap_find_left_neighbor_helper(cur, irec, &info); if (!error)
error = xfs_rmap_query_range(cur, &info.high, &info.high,
xfs_rmap_find_left_neighbor_helper, &info); if (error != -ECANCELED) return error;
/* For each rmap given, figure out if it matches the key we want. */ STATICint
xfs_rmap_lookup_le_range_helper( struct xfs_btree_cur *cur, conststruct xfs_rmap_irec *rec, void *priv)
{ struct xfs_find_left_neighbor_info *info = priv;
/* * Find the record to the left of the given extent, being careful only to * return a match with the same owner and overlapping physical and logical * block ranges. This is the overlapping-interval version of * xfs_rmap_lookup_le.
*/ int
xfs_rmap_lookup_le_range( struct xfs_btree_cur *cur,
xfs_agblock_t bno,
uint64_t owner,
uint64_t offset, unsignedint flags, struct xfs_rmap_irec *irec, int *stat)
{ struct xfs_find_left_neighbor_info info; int found = 0; int error;
/* * Historically, we always used the range query to walk every reverse * mapping that could possibly overlap the key that the caller asked * for, and filter out the ones that don't. That is very slow when * there are a lot of records. * * However, there are two scenarios where the classic btree search can * produce correct results -- if the index contains a record that is an * exact match for the lookup key; and if there are no other records * between the record we want and the key we supplied. * * As an optimization, try a non-overlapped lookup first. This makes * scrub run much faster on most filesystems because bmbt records are * usually an exact match for rmap records. If we don't find what we * want, we fall back to the overlapped query.
*/
error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, irec,
&found); if (error) return error; if (found)
error = xfs_rmap_lookup_le_range_helper(cur, irec, &info); if (!error)
error = xfs_rmap_query_range(cur, &info.high, &info.high,
xfs_rmap_lookup_le_range_helper, &info); if (error != -ECANCELED) return error;
/* * Perform all the relevant owner checks for a removal op. If we're doing an * unknown-owner removal then we have no owner information to check.
*/ staticint
xfs_rmap_free_check_owner( struct xfs_btree_cur *cur,
uint64_t ltoff, struct xfs_rmap_irec *rec,
xfs_extlen_t len,
uint64_t owner,
uint64_t offset, unsignedint flags)
{ struct xfs_mount *mp = cur->bc_mp; int error = 0;
if (owner == XFS_RMAP_OWN_UNKNOWN) return 0;
/* Make sure the unwritten flag matches. */ if (XFS_IS_CORRUPT(mp,
(flags & XFS_RMAP_UNWRITTEN) !=
(rec->rm_flags & XFS_RMAP_UNWRITTEN))) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out;
}
/* Make sure the owner matches what we expect to find in the tree. */ if (XFS_IS_CORRUPT(mp, owner != rec->rm_owner)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out;
}
/* Check the offset, if necessary. */ if (XFS_RMAP_NON_INODE_OWNER(owner)) goto out;
/* * Find the extent in the rmap btree and remove it. * * The record we find should always be an exact match for the extent that we're * looking for, since we insert them into the btree without modification. * * Special Case #1: when growing the filesystem, we "free" an extent when * growing the last AG. This extent is new space and so it is not tracked as * used space in the btree. The growfs code will pass in an owner of * XFS_RMAP_OWN_NULL to indicate that it expected that there is no owner of this * extent. We verify that - the extent lookup result in a record that does not * overlap. * * Special Case #2: EFIs do not record the owner of the extent, so when * recovering EFIs from the log we pass in XFS_RMAP_OWN_UNKNOWN to tell the rmap * btree to ignore the owner (i.e. wildcard match) so we don't trigger * corruption checks during log recovery.
*/ STATICint
xfs_rmap_unmap( struct xfs_btree_cur *cur,
xfs_agblock_t bno,
xfs_extlen_t len, bool unwritten, conststruct xfs_owner_info *oinfo)
{ struct xfs_mount *mp = cur->bc_mp; struct xfs_rmap_irec ltrec;
uint64_t ltoff; int error = 0; int i;
uint64_t owner;
uint64_t offset; unsignedint flags; bool ignore_off;
/* * We should always have a left record because there's a static record * for the AG headers at rm_startblock == 0 created by mkfs/growfs that * will not ever be removed from the tree.
*/
error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, <rec, &i); if (error) goto out_error; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
/* * For growfs, the incoming extent must be beyond the left record we * just found as it is new space and won't be used by anyone. This is * just a corruption check as we don't actually do anything with this * extent. Note that we need to use >= instead of > because it might * be the case that the "left" extent goes all the way to EOFS.
*/ if (owner == XFS_RMAP_OWN_NULL) { if (XFS_IS_CORRUPT(mp,
bno <
ltrec.rm_startblock + ltrec.rm_blockcount)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
} goto out_done;
}
/* * If we're doing an unknown-owner removal for EFI recovery, we expect * to find the full range in the rmapbt or nothing at all. If we * don't find any rmaps overlapping either end of the range, we're * done. Hopefully this means that the EFI creator already queued * (and finished) a RUI to remove the rmap.
*/ if (owner == XFS_RMAP_OWN_UNKNOWN &&
ltrec.rm_startblock + ltrec.rm_blockcount <= bno) { struct xfs_rmap_irec rtrec;
error = xfs_btree_increment(cur, 0, &i); if (error) goto out_error; if (i == 0) goto out_done;
error = xfs_rmap_get_rec(cur, &rtrec, &i); if (error) goto out_error; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
} if (rtrec.rm_startblock >= bno + len) goto out_done;
}
/* Make sure the extent we found covers the entire freeing range. */ if (XFS_IS_CORRUPT(mp,
ltrec.rm_startblock > bno ||
ltrec.rm_startblock + ltrec.rm_blockcount <
bno + len)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) { /* exact match, simply remove the record from rmap tree */
trace_xfs_rmap_delete(cur, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
error = xfs_btree_delete(cur, &i); if (error) goto out_error; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
} elseif (ltrec.rm_startblock == bno) { /* * overlap left hand side of extent: move the start, trim the * length and update the current record. * * ltbno ltlen * Orig: |oooooooooooooooooooo| * Freeing: |fffffffff| * Result: |rrrrrrrrrr| * bno len
*/
ltrec.rm_startblock += len;
ltrec.rm_blockcount -= len; if (!ignore_off)
ltrec.rm_offset += len;
error = xfs_rmap_update(cur, <rec); if (error) goto out_error;
} elseif (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) { /* * overlap right hand side of extent: trim the length and update * the current record. * * ltbno ltlen * Orig: |oooooooooooooooooooo| * Freeing: |fffffffff| * Result: |rrrrrrrrrr| * bno len
*/
ltrec.rm_blockcount -= len;
error = xfs_rmap_update(cur, <rec); if (error) goto out_error;
} else {
/* * overlap middle of extent: trim the length of the existing * record to the length of the new left-extent size, increment * the insertion position so we can insert a new record * containing the remaining right-extent space. * * ltbno ltlen * Orig: |oooooooooooooooooooo| * Freeing: |fffffffff| * Result: |rrrrr| |rrrr| * bno len
*/
xfs_extlen_t orig_len = ltrec.rm_blockcount;
#ifdef CONFIG_XFS_LIVE_HOOKS /* * Use a static key here to reduce the overhead of rmapbt live updates. If * the compiler supports jump labels, the static branch will be replaced by a * nop sled when there are no hook users. Online fsck is currently the only * caller, so this is a reasonable tradeoff. * * Note: Patching the kernel code requires taking the cpu hotplug lock. Other * parts of the kernel allocate memory with that lock held, which means that * XFS callers cannot hold any locks that might be used by memory reclaim or * writeback when calling the static_branch_{inc,dec} functions.
*/
DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_rmap_hooks_switch);
if (xg)
xfs_hooks_call(&xg->xg_rmap_update_hooks, op, &p);
}
}
/* Call the specified function during a reverse mapping update. */ int
xfs_rmap_hook_add( struct xfs_group *xg, struct xfs_rmap_hook *hook)
{ return xfs_hooks_add(&xg->xg_rmap_update_hooks, &hook->rmap_hook);
}
/* Stop calling the specified function during a reverse mapping update. */ void
xfs_rmap_hook_del( struct xfs_group *xg, struct xfs_rmap_hook *hook)
{
xfs_hooks_del(&xg->xg_rmap_update_hooks, &hook->rmap_hook);
}
/* Configure rmap update hook functions. */ void
xfs_rmap_hook_setup( struct xfs_rmap_hook *hook,
notifier_fn_t mod_fn)
{
xfs_hook_setup(&hook->rmap_hook, mod_fn);
} #else # define xfs_rmap_update_hook(t, p, o, s, b, u, oi) do { } while (0) #endif/* CONFIG_XFS_LIVE_HOOKS */
/* * Remove a reference to an extent in the rmap btree.
*/ int
xfs_rmap_free( struct xfs_trans *tp, struct xfs_buf *agbp, struct xfs_perag *pag,
xfs_agblock_t bno,
xfs_extlen_t len, conststruct xfs_owner_info *oinfo)
{ struct xfs_mount *mp = tp->t_mountp; struct xfs_btree_cur *cur; int error;
/* * A mergeable rmap must have the same owner and the same values for * the unwritten, attr_fork, and bmbt flags. The startblock and * offset are checked separately.
*/ staticbool
xfs_rmap_is_mergeable( struct xfs_rmap_irec *irec,
uint64_t owner, unsignedint flags)
{ if (irec->rm_owner == XFS_RMAP_OWN_NULL) returnfalse; if (irec->rm_owner != owner) returnfalse; if ((flags & XFS_RMAP_UNWRITTEN) ^
(irec->rm_flags & XFS_RMAP_UNWRITTEN)) returnfalse; if ((flags & XFS_RMAP_ATTR_FORK) ^
(irec->rm_flags & XFS_RMAP_ATTR_FORK)) returnfalse; if ((flags & XFS_RMAP_BMBT_BLOCK) ^
(irec->rm_flags & XFS_RMAP_BMBT_BLOCK)) returnfalse; returntrue;
}
/* * When we allocate a new block, the first thing we do is add a reference to * the extent in the rmap btree. This takes the form of a [agbno, length, * owner, offset] record. Flags are encoded in the high bits of the offset * field.
*/ STATICint
xfs_rmap_map( struct xfs_btree_cur *cur,
xfs_agblock_t bno,
xfs_extlen_t len, bool unwritten, conststruct xfs_owner_info *oinfo)
{ struct xfs_mount *mp = cur->bc_mp; struct xfs_rmap_irec ltrec; struct xfs_rmap_irec gtrec; int have_gt; int have_lt; int error = 0; int i;
uint64_t owner;
uint64_t offset; unsignedint flags = 0; bool ignore_off;
/* * For the initial lookup, look for an exact match or the left-adjacent * record for our insertion point. This will also give us the record for * start block contiguity tests.
*/
error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, <rec,
&have_lt); if (error) goto out_error; if (have_lt) {
trace_xfs_rmap_lookup_le_range_result(cur, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags);
if (!xfs_rmap_is_mergeable(<rec, owner, flags))
have_lt = 0;
}
/* * Increment the cursor to see if we have a right-adjacent record to our * insertion point. This will give us the record for end block * contiguity tests.
*/
error = xfs_btree_increment(cur, 0, &have_gt); if (error) goto out_error; if (have_gt) {
error = xfs_rmap_get_rec(cur, >rec, &have_gt); if (error) goto out_error; if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
} if (XFS_IS_CORRUPT(mp, bno + len > gtrec.rm_startblock)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
trace_xfs_rmap_find_right_neighbor_result(cur,
gtrec.rm_startblock, gtrec.rm_blockcount,
gtrec.rm_owner, gtrec.rm_offset,
gtrec.rm_flags); if (!xfs_rmap_is_mergeable(>rec, owner, flags))
have_gt = 0;
}
/* * Note: cursor currently points one record to the right of ltrec, even * if there is no record in the tree to the right.
*/ if (have_lt &&
ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
(ignore_off || ltrec.rm_offset + ltrec.rm_blockcount == offset)) { /* * left edge contiguous, merge into left record. * * ltbno ltlen * orig: |ooooooooo| * adding: |aaaaaaaaa| * result: |rrrrrrrrrrrrrrrrrrr| * bno len
*/
ltrec.rm_blockcount += len; if (have_gt &&
bno + len == gtrec.rm_startblock &&
(ignore_off || offset + len == gtrec.rm_offset) &&
(unsignedlong)ltrec.rm_blockcount + len +
gtrec.rm_blockcount <= XFS_RMAP_LEN_MAX) { /* * right edge also contiguous, delete right record * and merge into left record. * * ltbno ltlen gtbno gtlen * orig: |ooooooooo| |ooooooooo| * adding: |aaaaaaaaa| * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
*/
ltrec.rm_blockcount += gtrec.rm_blockcount;
trace_xfs_rmap_delete(cur, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
gtrec.rm_offset, gtrec.rm_flags);
error = xfs_btree_delete(cur, &i); if (error) goto out_error; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
}
/* point the cursor back to the left record and update */
error = xfs_btree_decrement(cur, 0, &have_gt); if (error) goto out_error;
error = xfs_rmap_update(cur, <rec); if (error) goto out_error;
} elseif (have_gt &&
bno + len == gtrec.rm_startblock &&
(ignore_off || offset + len == gtrec.rm_offset)) { /* * right edge contiguous, merge into right record. * * gtbno gtlen * Orig: |ooooooooo| * adding: |aaaaaaaaa| * Result: |rrrrrrrrrrrrrrrrrrr| * bno len
*/
gtrec.rm_startblock = bno;
gtrec.rm_blockcount += len; if (!ignore_off)
gtrec.rm_offset = offset;
error = xfs_rmap_update(cur, >rec); if (error) goto out_error;
} else { /* * no contiguous edge with identical owner, insert * new record at current cursor position.
*/
cur->bc_rec.r.rm_startblock = bno;
cur->bc_rec.r.rm_blockcount = len;
cur->bc_rec.r.rm_owner = owner;
cur->bc_rec.r.rm_offset = offset;
cur->bc_rec.r.rm_flags = flags;
trace_xfs_rmap_insert(cur, bno, len, owner, offset, flags);
error = xfs_btree_insert(cur, &i); if (error) goto out_error; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
}
#define LEFT r[0] #define RIGHT r[1] #define PREV r[2] #defineNEW r[3]
/* * Convert an unwritten extent to a real extent or vice versa. * Does not handle overlapping extents.
*/ STATICint
xfs_rmap_convert( struct xfs_btree_cur *cur,
xfs_agblock_t bno,
xfs_extlen_t len, bool unwritten, conststruct xfs_owner_info *oinfo)
{ struct xfs_mount *mp = cur->bc_mp; struct xfs_rmap_irec r[4]; /* neighbor extent entries */ /* left is 0, right is 1, */ /* prev is 2, new is 3 */
uint64_t owner;
uint64_t offset;
uint64_t new_endoff; unsignedint oldext; unsignedint newext; unsignedint flags = 0; int i; int state = 0; int error;
/* * For the initial lookup, look for an exact match or the left-adjacent * record for our insertion point. This will also give us the record for * start block contiguity tests.
*/
error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, &PREV, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
/* * Set flags determining what part of the previous oldext allocation * extent is being replaced by a newext allocation.
*/ if (PREV.rm_offset == offset)
state |= RMAP_LEFT_FILLING; if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
state |= RMAP_RIGHT_FILLING;
/* * Decrement the cursor to see if we have a left-adjacent record to our * insertion point. This will give us the record for end block * contiguity tests.
*/
error = xfs_btree_decrement(cur, 0, &i); if (error) goto done; if (i) {
state |= RMAP_LEFT_VALID;
error = xfs_rmap_get_rec(cur, &LEFT, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} if (XFS_IS_CORRUPT(mp,
LEFT.rm_startblock + LEFT.rm_blockcount >
bno)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
trace_xfs_rmap_find_left_neighbor_result(cur,
LEFT.rm_startblock, LEFT.rm_blockcount,
LEFT.rm_owner, LEFT.rm_offset, LEFT.rm_flags); if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
LEFT.rm_offset + LEFT.rm_blockcount == offset &&
xfs_rmap_is_mergeable(&LEFT, owner, newext))
state |= RMAP_LEFT_CONTIG;
}
/* * Increment the cursor to see if we have a right-adjacent record to our * insertion point. This will give us the record for end block * contiguity tests.
*/
error = xfs_btree_increment(cur, 0, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
error = xfs_btree_increment(cur, 0, &i); if (error) goto done; if (i) {
state |= RMAP_RIGHT_VALID;
error = xfs_rmap_get_rec(cur, &RIGHT, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
trace_xfs_rmap_find_right_neighbor_result(cur,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
RIGHT.rm_flags); if (bno + len == RIGHT.rm_startblock &&
offset + len == RIGHT.rm_offset &&
xfs_rmap_is_mergeable(&RIGHT, owner, newext))
state |= RMAP_RIGHT_CONTIG;
}
/* check that left + prev + right is not too long */ if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
(RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
(unsignedlong)LEFT.rm_blockcount + len +
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
state &= ~RMAP_RIGHT_CONTIG;
/* reset the cursor back to PREV */
error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, NULL, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
/* * Switch out based on the FILLING and CONTIG state bits.
*/ switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) { case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The left and right neighbors are both contiguous with new.
*/
error = xfs_btree_increment(cur, 0, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
trace_xfs_rmap_delete(cur, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
RIGHT.rm_offset, RIGHT.rm_flags);
error = xfs_btree_delete(cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
error = xfs_btree_decrement(cur, 0, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
trace_xfs_rmap_delete(cur, PREV.rm_startblock,
PREV.rm_blockcount, PREV.rm_owner,
PREV.rm_offset, PREV.rm_flags);
error = xfs_btree_delete(cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
error = xfs_btree_decrement(cur, 0, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW = LEFT; NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The left neighbor is contiguous, the right is not.
*/
trace_xfs_rmap_delete(cur, PREV.rm_startblock,
PREV.rm_blockcount, PREV.rm_owner,
PREV.rm_offset, PREV.rm_flags);
error = xfs_btree_delete(cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
error = xfs_btree_decrement(cur, 0, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW = LEFT; NEW.rm_blockcount += PREV.rm_blockcount;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The right neighbor is contiguous, the left is not.
*/
error = xfs_btree_increment(cur, 0, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
trace_xfs_rmap_delete(cur, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
RIGHT.rm_offset, RIGHT.rm_flags);
error = xfs_btree_delete(cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
error = xfs_btree_decrement(cur, 0, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW = PREV; NEW.rm_blockcount = len + RIGHT.rm_blockcount; NEW.rm_flags = newext;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING: /* * Setting all of a previous oldext extent to newext. * Neither the left nor right neighbors are contiguous with * the new one.
*/ NEW = PREV; NEW.rm_flags = newext;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG: /* * Setting the first part of a previous oldext extent to newext. * The left neighbor is contiguous.
*/ NEW = PREV; NEW.rm_offset += len; NEW.rm_startblock += len; NEW.rm_blockcount -= len;
error = xfs_rmap_update(cur, &NEW); if (error) goto done;
error = xfs_btree_decrement(cur, 0, &i); if (error) goto done; NEW = LEFT; NEW.rm_blockcount += len;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_LEFT_FILLING: /* * Setting the first part of a previous oldext extent to newext. * The left neighbor is not contiguous.
*/ NEW = PREV; NEW.rm_startblock += len; NEW.rm_offset += len; NEW.rm_blockcount -= len;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; NEW.rm_startblock = bno; NEW.rm_owner = owner; NEW.rm_offset = offset; NEW.rm_blockcount = len; NEW.rm_flags = newext;
cur->bc_rec.r = NEW;
trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
error = xfs_btree_insert(cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} break;
case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG: /* * Setting the last part of a previous oldext extent to newext. * The right neighbor is contiguous with the new allocation.
*/ NEW = PREV; NEW.rm_blockcount -= len;
error = xfs_rmap_update(cur, &NEW); if (error) goto done;
error = xfs_btree_increment(cur, 0, &i); if (error) goto done; NEW = RIGHT; NEW.rm_offset = offset; NEW.rm_startblock = bno; NEW.rm_blockcount += len;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_RIGHT_FILLING: /* * Setting the last part of a previous oldext extent to newext. * The right neighbor is not contiguous.
*/ NEW = PREV; NEW.rm_blockcount -= len;
error = xfs_rmap_update(cur, &NEW); if (error) goto done;
error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
oldext, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 0)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW.rm_startblock = bno; NEW.rm_owner = owner; NEW.rm_offset = offset; NEW.rm_blockcount = len; NEW.rm_flags = newext;
cur->bc_rec.r = NEW;
trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
error = xfs_btree_insert(cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} break;
case 0: /* * Setting the middle part of a previous oldext extent to * newext. Contiguity is impossible here. * One extent becomes three extents.
*/ /* new right extent - oldext */ NEW.rm_startblock = bno + len; NEW.rm_owner = owner; NEW.rm_offset = new_endoff; NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
new_endoff; NEW.rm_flags = PREV.rm_flags;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; /* new left extent - oldext */ NEW = PREV; NEW.rm_blockcount = offset - PREV.rm_offset;
cur->bc_rec.r = NEW;
trace_xfs_rmap_insert(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags);
error = xfs_btree_insert(cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} /* * Reset the cursor to the position of the new extent * we are about to insert as we can't trust it after * the previous insert.
*/
error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
oldext, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 0)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} /* new middle extent - newext */
cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
cur->bc_rec.r.rm_flags |= newext;
trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
error = xfs_btree_insert(cur, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} break;
case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG: case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG: case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG: case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG: case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG: case RMAP_LEFT_CONTIG: case RMAP_RIGHT_CONTIG: /* * These cases are all impossible.
*/
ASSERT(0);
}
/* * Convert an unwritten extent to a real extent or vice versa. If there is no * possibility of overlapping extents, delegate to the simpler convert * function.
*/ STATICint
xfs_rmap_convert_shared( struct xfs_btree_cur *cur,
xfs_agblock_t bno,
xfs_extlen_t len, bool unwritten, conststruct xfs_owner_info *oinfo)
{ struct xfs_mount *mp = cur->bc_mp; struct xfs_rmap_irec r[4]; /* neighbor extent entries */ /* left is 0, right is 1, */ /* prev is 2, new is 3 */
uint64_t owner;
uint64_t offset;
uint64_t new_endoff; unsignedint oldext; unsignedint newext; unsignedint flags = 0; int i; int state = 0; int error;
/* * For the initial lookup, look for and exact match or the left-adjacent * record for our insertion point. This will also give us the record for * start block contiguity tests.
*/
error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
&PREV, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
/* * Set flags determining what part of the previous oldext allocation * extent is being replaced by a newext allocation.
*/ if (PREV.rm_offset == offset)
state |= RMAP_LEFT_FILLING; if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
state |= RMAP_RIGHT_FILLING;
/* Is there a left record that abuts our range? */
error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, newext,
&LEFT, &i); if (error) goto done; if (i) {
state |= RMAP_LEFT_VALID; if (XFS_IS_CORRUPT(mp,
LEFT.rm_startblock + LEFT.rm_blockcount >
bno)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} if (xfs_rmap_is_mergeable(&LEFT, owner, newext))
state |= RMAP_LEFT_CONTIG;
}
/* Is there a right record that abuts our range? */
error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
newext, &i); if (error) goto done; if (i) {
state |= RMAP_RIGHT_VALID;
error = xfs_rmap_get_rec(cur, &RIGHT, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
}
trace_xfs_rmap_find_right_neighbor_result(cur,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
RIGHT.rm_flags); if (xfs_rmap_is_mergeable(&RIGHT, owner, newext))
state |= RMAP_RIGHT_CONTIG;
}
/* check that left + prev + right is not too long */ if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
(RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
(unsignedlong)LEFT.rm_blockcount + len +
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
state &= ~RMAP_RIGHT_CONTIG;
trace_xfs_rmap_convert_state(cur, state, _RET_IP_); /* * Switch out based on the FILLING and CONTIG state bits.
*/ switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) { case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The left and right neighbors are both contiguous with new.
*/
error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
RIGHT.rm_offset, RIGHT.rm_flags); if (error) goto done;
error = xfs_rmap_delete(cur, PREV.rm_startblock,
PREV.rm_blockcount, PREV.rm_owner,
PREV.rm_offset, PREV.rm_flags); if (error) goto done; NEW = LEFT;
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The left neighbor is contiguous, the right is not.
*/
error = xfs_rmap_delete(cur, PREV.rm_startblock,
PREV.rm_blockcount, PREV.rm_owner,
PREV.rm_offset, PREV.rm_flags); if (error) goto done; NEW = LEFT;
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW.rm_blockcount += PREV.rm_blockcount;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG: /* * Setting all of a previous oldext extent to newext. * The right neighbor is contiguous, the left is not.
*/
error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
RIGHT.rm_offset, RIGHT.rm_flags); if (error) goto done; NEW = PREV;
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW.rm_blockcount += RIGHT.rm_blockcount; NEW.rm_flags = RIGHT.rm_flags;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING: /* * Setting all of a previous oldext extent to newext. * Neither the left nor right neighbors are contiguous with * the new one.
*/ NEW = PREV;
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW.rm_flags = newext;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG: /* * Setting the first part of a previous oldext extent to newext. * The left neighbor is contiguous.
*/ NEW = PREV;
error = xfs_rmap_delete(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags); if (error) goto done; NEW.rm_offset += len; NEW.rm_startblock += len; NEW.rm_blockcount -= len;
error = xfs_rmap_insert(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags); if (error) goto done; NEW = LEFT;
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW.rm_blockcount += len;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; break;
case RMAP_LEFT_FILLING: /* * Setting the first part of a previous oldext extent to newext. * The left neighbor is not contiguous.
*/ NEW = PREV;
error = xfs_rmap_delete(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags); if (error) goto done; NEW.rm_offset += len; NEW.rm_startblock += len; NEW.rm_blockcount -= len;
error = xfs_rmap_insert(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags); if (error) goto done;
error = xfs_rmap_insert(cur, bno, len, owner, offset, newext); if (error) goto done; break;
case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG: /* * Setting the last part of a previous oldext extent to newext. * The right neighbor is contiguous with the new allocation.
*/ NEW = PREV;
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW.rm_blockcount = offset - NEW.rm_offset;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; NEW = RIGHT;
error = xfs_rmap_delete(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags); if (error) goto done; NEW.rm_offset = offset; NEW.rm_startblock = bno; NEW.rm_blockcount += len;
error = xfs_rmap_insert(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags); if (error) goto done; break;
case RMAP_RIGHT_FILLING: /* * Setting the last part of a previous oldext extent to newext. * The right neighbor is not contiguous.
*/ NEW = PREV;
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW.rm_blockcount -= len;
error = xfs_rmap_update(cur, &NEW); if (error) goto done;
error = xfs_rmap_insert(cur, bno, len, owner, offset, newext); if (error) goto done; break;
case 0: /* * Setting the middle part of a previous oldext extent to * newext. Contiguity is impossible here. * One extent becomes three extents.
*/ /* new right extent - oldext */ NEW.rm_startblock = bno + len; NEW.rm_owner = owner; NEW.rm_offset = new_endoff; NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
new_endoff; NEW.rm_flags = PREV.rm_flags;
error = xfs_rmap_insert(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags); if (error) goto done; /* new left extent - oldext */ NEW = PREV;
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags, &i); if (error) goto done; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto done;
} NEW.rm_blockcount = offset - NEW.rm_offset;
error = xfs_rmap_update(cur, &NEW); if (error) goto done; /* new middle extent - newext */ NEW.rm_startblock = bno; NEW.rm_blockcount = len; NEW.rm_owner = owner; NEW.rm_offset = offset; NEW.rm_flags = newext;
error = xfs_rmap_insert(cur, NEW.rm_startblock, NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset, NEW.rm_flags); if (error) goto done; break;
case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG: case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG: case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG: case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG: case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG: case RMAP_LEFT_CONTIG: case RMAP_RIGHT_CONTIG: /* * These cases are all impossible.
*/
ASSERT(0);
}
/* * Find an extent in the rmap btree and unmap it. For rmap extent types that * can overlap (data fork rmaps on reflink filesystems) we must be careful * that the prev/next records in the btree might belong to another owner. * Therefore we must use delete+insert to alter any of the key fields. * * For every other situation there can only be one owner for a given extent, * so we can call the regular _free function.
*/ STATICint
xfs_rmap_unmap_shared( struct xfs_btree_cur *cur,
xfs_agblock_t bno,
xfs_extlen_t len, bool unwritten, conststruct xfs_owner_info *oinfo)
{ struct xfs_mount *mp = cur->bc_mp; struct xfs_rmap_irec ltrec;
uint64_t ltoff; int error = 0; int i;
uint64_t owner;
uint64_t offset; unsignedint flags;
/* * We should always have a left record because there's a static record * for the AG headers at rm_startblock == 0 created by mkfs/growfs that * will not ever be removed from the tree.
*/
error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
<rec, &i); if (error) goto out_error; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
ltoff = ltrec.rm_offset;
/* Make sure the extent we found covers the entire freeing range. */ if (XFS_IS_CORRUPT(mp,
ltrec.rm_startblock > bno ||
ltrec.rm_startblock + ltrec.rm_blockcount <
bno + len)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
/* Make sure the owner matches what we expect to find in the tree. */ if (XFS_IS_CORRUPT(mp, owner != ltrec.rm_owner)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
/* Make sure the unwritten flag matches. */ if (XFS_IS_CORRUPT(mp,
(flags & XFS_RMAP_UNWRITTEN) !=
(ltrec.rm_flags & XFS_RMAP_UNWRITTEN))) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) { /* Exact match, simply remove the record from rmap tree. */
error = xfs_rmap_delete(cur, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags); if (error) goto out_error;
} elseif (ltrec.rm_startblock == bno) { /* * Overlap left hand side of extent: move the start, trim the * length and update the current record. * * ltbno ltlen * Orig: |oooooooooooooooooooo| * Freeing: |fffffffff| * Result: |rrrrrrrrrr| * bno len
*/
/* Add an rmap at the new offset. */
ltrec.rm_startblock += len;
ltrec.rm_blockcount -= len;
ltrec.rm_offset += len;
error = xfs_rmap_insert(cur, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags); if (error) goto out_error;
} elseif (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) { /* * Overlap right hand side of extent: trim the length and * update the current record. * * ltbno ltlen * Orig: |oooooooooooooooooooo| * Freeing: |fffffffff| * Result: |rrrrrrrrrr| * bno len
*/
error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags, &i); if (error) goto out_error; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
ltrec.rm_blockcount -= len;
error = xfs_rmap_update(cur, <rec); if (error) goto out_error;
} else { /* * Overlap middle of extent: trim the length of the existing * record to the length of the new left-extent size, increment * the insertion position so we can insert a new record * containing the remaining right-extent space. * * ltbno ltlen * Orig: |oooooooooooooooooooo| * Freeing: |fffffffff| * Result: |rrrrr| |rrrr| * bno len
*/
xfs_extlen_t orig_len = ltrec.rm_blockcount;
/* Shrink the left side of the rmap */
error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
ltrec.rm_offset, ltrec.rm_flags, &i); if (error) goto out_error; if (XFS_IS_CORRUPT(mp, i != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
ltrec.rm_blockcount = bno - ltrec.rm_startblock;
error = xfs_rmap_update(cur, <rec); if (error) goto out_error;
/* Add an rmap at the new offset */
error = xfs_rmap_insert(cur, bno + len,
orig_len - len - ltrec.rm_blockcount,
ltrec.rm_owner, offset + len,
ltrec.rm_flags); if (error) goto out_error;
}
/* * Find an extent in the rmap btree and map it. For rmap extent types that * can overlap (data fork rmaps on reflink filesystems) we must be careful * that the prev/next records in the btree might belong to another owner. * Therefore we must use delete+insert to alter any of the key fields. * * For every other situation there can only be one owner for a given extent, * so we can call the regular _alloc function.
*/ STATICint
xfs_rmap_map_shared( struct xfs_btree_cur *cur,
xfs_agblock_t bno,
xfs_extlen_t len, bool unwritten, conststruct xfs_owner_info *oinfo)
{ struct xfs_mount *mp = cur->bc_mp; struct xfs_rmap_irec ltrec; struct xfs_rmap_irec gtrec; int have_gt; int have_lt; int error = 0; int i;
uint64_t owner;
uint64_t offset; unsignedint flags = 0;
/* Is there a left record that abuts our range? */
error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, flags,
<rec, &have_lt); if (error) goto out_error; if (have_lt &&
!xfs_rmap_is_mergeable(<rec, owner, flags))
have_lt = 0;
/* Is there a right record that abuts our range? */
error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
flags, &have_gt); if (error) goto out_error; if (have_gt) {
error = xfs_rmap_get_rec(cur, >rec, &have_gt); if (error) goto out_error; if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
xfs_btree_mark_sick(cur);
error = -EFSCORRUPTED; goto out_error;
}
trace_xfs_rmap_find_right_neighbor_result(cur,
gtrec.rm_startblock, gtrec.rm_blockcount,
gtrec.rm_owner, gtrec.rm_offset,
gtrec.rm_flags);
if (!xfs_rmap_is_mergeable(>rec, owner, flags))
have_gt = 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.