/* * Realtime Reverse Map btree. * * This is a btree used to track the owner(s) of a given extent in the realtime * device. See the comments in xfs_rmap_btree.c for more information. * * This tree is basically the same as the regular rmap btree except that it * is rooted in an inode and does not live in free space.
*/
/* Calculate number of records in the ondisk realtime rmap btree inode root. */ unsignedint
xfs_rtrmapbt_droot_maxrecs( unsignedint blocklen, bool leaf)
{
blocklen -= sizeof(struct xfs_rtrmap_root);
/* * Get the maximum records we could store in the on-disk format. * * For non-root nodes this is equivalent to xfs_rtrmapbt_get_maxrecs, but * for the root node this checks the available space in the dinode fork * so that we can resize the in-memory buffer to match it. After a * resize to the maximum size this function returns the same value * as xfs_rtrmapbt_get_maxrecs for the root node, too.
*/ STATICint
xfs_rtrmapbt_get_dmaxrecs( struct xfs_btree_cur *cur, int level)
{ if (level != cur->bc_nlevels - 1) return cur->bc_mp->m_rtrmap_mxr[level != 0]; return xfs_rtrmapbt_droot_maxrecs(cur->bc_ino.forksize, level == 0);
}
/* * Convert the ondisk record's offset field into the ondisk key's offset field. * Fork and bmbt are significant parts of the rmap record key, but written * status is merely a record attribute.
*/ staticinline __be64 ondisk_rec_offset_to_key(constunion xfs_btree_rec *rec)
{ return rec->rmap.rm_offset & ~cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN);
}
/* * Mask the appropriate parts of the ondisk key field for a key comparison. * Fork and bmbt are significant parts of the rmap record key, but written * status is merely a record attribute.
*/ staticinline uint64_t offset_keymask(uint64_t offset)
{ return offset & ~XFS_RMAP_OFF_UNWRITTEN;
}
x = be32_to_cpu(k1->rmap.rm_startblock);
y = be32_to_cpu(k2->rmap.rm_startblock); if (x < y) return 1; elseif (x > y) return 0;
a = be64_to_cpu(k1->rmap.rm_owner);
b = be64_to_cpu(k2->rmap.rm_owner); if (a < b) return 1; elseif (a > b) return 0;
a = offset_keymask(be64_to_cpu(k1->rmap.rm_offset));
b = offset_keymask(be64_to_cpu(k2->rmap.rm_offset)); if (a <= b) return 1; return 0;
}
x = be32_to_cpu(r1->rmap.rm_startblock);
y = be32_to_cpu(r2->rmap.rm_startblock); if (x < y) return 1; elseif (x > y) return 0;
a = be64_to_cpu(r1->rmap.rm_owner);
b = be64_to_cpu(r2->rmap.rm_owner); if (a < b) return 1; elseif (a > b) return 0;
a = offset_keymask(be64_to_cpu(r1->rmap.rm_offset));
b = offset_keymask(be64_to_cpu(r2->rmap.rm_offset)); if (a <= b) return 1; return 0;
}
/* * We only support checking contiguity of the physical space component. * If any callers ever need more specificity than that, they'll have to * implement it here.
*/
ASSERT(!mask || (!mask->rmap.rm_owner && !mask->rmap.rm_offset));
/* Handle the nop case quietly. */ if (new_size == old_size) return ifp->if_broot;
if (new_size > old_size) { unsignedint old_numrecs;
/* * If there wasn't any memory allocated before, just allocate * it now and get out.
*/ if (old_size == 0) return xfs_broot_realloc(ifp, new_size);
/* * If there is already an existing if_broot, then we need to * realloc it and possibly move the node block pointers because * those are not butted up against the btree block header.
*/
old_numrecs = xfs_rtrmapbt_maxrecs(mp, old_size, level == 0);
broot = xfs_broot_realloc(ifp, new_size); if (level > 0)
xfs_rtrmapbt_move_ptrs(mp, broot, old_size, new_size,
old_numrecs); goto out_broot;
}
/* * We're reducing numrecs. If we're going all the way to zero, just * free the block.
*/
ASSERT(ifp->if_broot != NULL && old_size > 0); if (new_size == 0) return xfs_broot_realloc(ifp, 0);
/* * Shrink the btree root by possibly moving the rtrmapbt pointers, * since they are not butted up against the btree block header. Then * reallocate broot.
*/ if (level > 0)
xfs_rtrmapbt_move_ptrs(mp, ifp->if_broot, old_size, new_size,
new_numrecs);
broot = xfs_broot_realloc(ifp, new_size);
/* * Free any resources hanging off the real fork, then shallow-copy the * staging fork's contents into the real fork to transfer everything * we just built.
*/
ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK);
xfs_idestroy_fork(ifp);
memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork));
/* * Compute the asymptotic maxlevels for an rtrmapbt on any rtreflink fs. * * On a reflink filesystem, each block in an rtgroup can have up to * 2^32 (per the refcount record format) owners, which means that * theoretically we could face up to 2^64 rmap records. However, we're * likely to run out of blocks in the data device long before that * happens, which means that we must compute the max height based on * what the btree will look like if it consumes almost all the blocks * in the data device due to maximal sharing factor.
*/
max_dblocks = -1U; /* max ag count */
max_dblocks *= XFS_MAX_CRC_AG_BLOCKS; return xfs_btree_space_to_height(minrecs, max_dblocks);
}
/* Compute the maximum height of an rt reverse mapping btree. */ void
xfs_rtrmapbt_compute_maxlevels( struct xfs_mount *mp)
{ unsignedint d_maxlevels, r_maxlevels;
if (!xfs_has_rtrmapbt(mp)) {
mp->m_rtrmap_maxlevels = 0; return;
}
/* * The realtime rmapbt lives on the data device, which means that its * maximum height is constrained by the size of the data device and * the height required to store one rmap record for each block in an * rt group. * * On a reflink filesystem, each rt block can have up to 2^32 (per the * refcount record format) owners, which means that theoretically we * could face up to 2^64 rmap records. This makes the computation of * maxlevels based on record count meaningless, so we only consider the * size of the data device.
*/
d_maxlevels = xfs_btree_space_to_height(mp->m_rtrmap_mnr,
mp->m_sb.sb_dblocks); if (xfs_has_rtreflink(mp)) {
mp->m_rtrmap_maxlevels = d_maxlevels + 1; return;
}
/* Add one level to handle the inode root level. */
mp->m_rtrmap_maxlevels = min(d_maxlevels, r_maxlevels) + 1;
}
/* Calculate the rtrmap btree size for some records. */ unsignedlonglong
xfs_rtrmapbt_calc_size( struct xfs_mount *mp, unsignedlonglong len)
{ return xfs_btree_calc_size(mp->m_rtrmap_mnr, len);
}
/* * Calculate the maximum rmap btree size.
*/ staticunsignedlonglong
xfs_rtrmapbt_max_size( struct xfs_mount *mp,
xfs_rtblock_t rtblocks)
{ /* Bail out if we're uninitialized, which can happen in mkfs. */ if (mp->m_rtrmap_mxr[0] == 0) return 0;
return xfs_rtrmapbt_calc_size(mp, rtblocks);
}
/* * Figure out how many blocks to reserve and how many are used by this btree.
*/
xfs_filblks_t
xfs_rtrmapbt_calc_reserves( struct xfs_mount *mp)
{
uint32_t blocks = mp->m_groups[XG_TYPE_RTG].blocks;
if (!xfs_has_rtrmapbt(mp)) return 0;
/* Reserve 1% of the rtgroup or enough for 1 block per record. */ return max_t(xfs_filblks_t, blocks / 100,
xfs_rtrmapbt_max_size(mp, blocks));
}
/* Load a realtime reverse mapping btree root in from disk. */ int
xfs_iformat_rtrmap( struct xfs_inode *ip, struct xfs_dinode *dip)
{ struct xfs_mount *mp = ip->i_mount; struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK); struct xfs_btree_block *broot; unsignedint numrecs; unsignedint level; int dsize;
/* * growfs must create the rtrmap inodes before adding a realtime volume * to the filesystem, so we cannot use the rtrmapbt predicate here.
*/ if (!xfs_has_rmapbt(ip->i_mount)) {
xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); return -EFSCORRUPTED;
}
/* * Return the highest rgbno currently tracked by the rmap for this rtg.
*/
xfs_rgblock_t
xfs_rtrmap_highest_rgbno( struct xfs_rtgroup *rtg)
{ struct xfs_btree_block *block = rtg_rmap(rtg)->i_df.if_broot; union xfs_btree_key key = {}; struct xfs_btree_cur *cur;
if (block->bb_numrecs == 0) return NULLRGBLOCK;
cur = xfs_rtrmapbt_init_cursor(NULL, rtg);
xfs_btree_get_keys(cur, block, &key);
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); return be32_to_cpu(key.__rmap_bigkey[1].rm_startblock);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.15 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.