/* * Because the new bitmap blocks are created via a shadow * operation, the old entry has already had its reference count * decremented and we don't need the btree to do any bookkeeping.
*/
ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
ll->bitmap_info.value_type.inc = NULL;
ll->bitmap_info.value_type.dec = NULL;
ll->bitmap_info.value_type.equal = NULL;
nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block); if (nr_indexes > ll->max_entries(ll)) {
DMERR("space map too large"); return -EINVAL;
}
/* * We need to set this before the dm_tm_new_block() call below.
*/
ll->nr_blocks = nr_blocks; for (i = old_blocks; i < blocks; i++) { struct dm_block *b; struct disk_index_entry idx;
r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b); if (r < 0) return r;
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result)
{ int r; struct disk_index_entry ie_disk;
dm_block_t i, index_begin = begin;
dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block);
/* * FIXME: Use shifts
*/
begin = do_div(index_begin, ll->entries_per_block);
end = do_div(end, ll->entries_per_block); if (end == 0)
end = ll->entries_per_block;
for (i = index_begin; i < index_end; i++, begin = 0) { struct dm_block *blk; unsignedint position;
uint32_t bit_end;
r = ll->load_ie(ll, i, &ie_disk); if (r < 0) return r;
if (le32_to_cpu(ie_disk.nr_free) == 0) continue;
r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
&dm_sm_bitmap_validator, &blk); if (r < 0) return r;
bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
r = sm_find_free(dm_bitmap_data(blk),
max_t(unsignedint, begin, le32_to_cpu(ie_disk.none_free_before)),
bit_end, &position); if (r == -ENOSPC) { /* * This might happen because we started searching * part way through the bitmap.
*/
dm_tm_unlock(ll->tm, blk); continue;
}
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
dm_block_t begin, dm_block_t end, dm_block_t *b)
{ int r;
uint32_t count;
do {
r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b); if (r) break;
/* double check this block wasn't used in the old transaction */ if (*b >= old_ll->nr_blocks)
count = 0; else {
r = sm_ll_lookup(old_ll, *b, &count); if (r) break;
/* * Holds useful intermediate results for the range based inc and dec * operations.
*/ struct inc_context { struct disk_index_entry ie_disk; struct dm_block *bitmap_block; void *bitmap;
/* * bitmap_block needs to be unlocked because getting the * overflow_leaf may need to allocate, and thus use the space map.
*/
reset_inc_context(ll, ic);
r = btree_get_overwrite_leaf(&ll->ref_count_info, ll->ref_count_root,
b, &index, &ll->ref_count_root, &ic->overflow_leaf); if (r < 0) return r;
n = dm_block_data(ic->overflow_leaf);
if (!contains_key(n, b, index)) {
DMERR("overflow btree is missing an entry"); return -EINVAL;
}
/* * Do we already have the correct overflow leaf?
*/ if (ic->overflow_leaf) {
n = dm_block_data(ic->overflow_leaf);
index = lower_bound(n, b); if (contains_key(n, b, index)) {
v_ptr = value_ptr(n, index);
rc = le32_to_cpu(*v_ptr) + 1;
*v_ptr = cpu_to_le32(rc);
/* * Once shadow_bitmap has been called, which always happens at the start of inc/dec, * we can reopen the bitmap with a simple write lock, rather than re calling * dm_tm_shadow_block().
*/ staticinlineint ensure_bitmap(struct ll_disk *ll, struct inc_context *ic)
{ if (!ic->bitmap_block) { int r = dm_bm_write_lock(dm_tm_get_bm(ll->tm), le64_to_cpu(ic->ie_disk.blocknr),
&dm_sm_bitmap_validator, &ic->bitmap_block); if (r) {
DMERR("unable to re-get write lock for bitmap"); return r;
}
ic->bitmap = dm_bitmap_data(ic->bitmap_block);
}
return 0;
}
/* * Loops round incrementing entries in a single bitmap.
*/ staticinlineint sm_ll_inc_bitmap(struct ll_disk *ll, dm_block_t b,
uint32_t bit, uint32_t bit_end,
int32_t *nr_allocations, dm_block_t *new_b, struct inc_context *ic)
{ int r;
__le32 le_rc;
uint32_t old;
for (; bit != bit_end; bit++, b++) { /* * We only need to drop the bitmap if we need to find a new btree * leaf for the overflow. So if it was dropped last iteration, * we now re-get it.
*/
r = ensure_bitmap(ll, ic); if (r) return r;
default: /* * inc within the overflow tree only.
*/
r = sm_ll_inc_overflow(ll, b, ic); if (r < 0) return r;
}
}
*new_b = b; return 0;
}
/* * Finds a bitmap that contains entries in the block range, and increments * them.
*/ staticint __sm_ll_inc(struct ll_disk *ll, dm_block_t b, dm_block_t e,
int32_t *nr_allocations, dm_block_t *new_b)
{ int r; struct inc_context ic;
uint32_t bit, bit_end;
dm_block_t index = b;
init_inc_context(&ic);
bit = do_div(index, ll->entries_per_block);
r = ll->load_ie(ll, index, &ic.ie_disk); if (r < 0) return r;
r = shadow_bitmap(ll, &ic); if (r) return r;
bit_end = min(bit + (e - b), (dm_block_t) ll->entries_per_block);
r = sm_ll_inc_bitmap(ll, b, bit, bit_end, nr_allocations, new_b, &ic);
exit_inc_context(ll, &ic);
if (r) return r;
return ll->save_ie(ll, index, &ic.ie_disk);
}
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, dm_block_t e,
int32_t *nr_allocations)
{
*nr_allocations = 0; while (b != e) { int r = __sm_ll_inc(ll, b, e, nr_allocations, &b);
/* * Loops round incrementing entries in a single bitmap.
*/ staticinlineint sm_ll_dec_bitmap(struct ll_disk *ll, dm_block_t b,
uint32_t bit, uint32_t bit_end, struct inc_context *ic,
int32_t *nr_allocations, dm_block_t *new_b)
{ int r;
uint32_t old;
for (; bit != bit_end; bit++, b++) { /* * We only need to drop the bitmap if we need to find a new btree * leaf for the overflow. So if it was dropped last iteration, * we now re-get it.
*/
r = ensure_bitmap(ll, ic); if (r) return r;
old = sm_lookup_bitmap(ic->bitmap, bit); switch (old) { case 0:
DMERR("unable to decrement block"); return -EINVAL;
case 1: /* dec bitmap */
sm_set_bitmap(ic->bitmap, bit, 0);
(*nr_allocations)--;
ll->nr_allocated--;
le32_add_cpu(&ic->ie_disk.nr_free, 1);
ic->ie_disk.none_free_before =
cpu_to_le32(min(le32_to_cpu(ic->ie_disk.none_free_before), bit)); break;
case 2: /* dec bitmap and insert into overflow */
sm_set_bitmap(ic->bitmap, bit, 1); break;
case 3:
r = sm_ll_dec_overflow(ll, b, ic, &old); if (r < 0) return r;
if (old == 3) {
r = ensure_bitmap(ll, ic); if (r) return r;
sm_set_bitmap(ic->bitmap, bit, 2);
} break;
}
}
*new_b = b; return 0;
}
staticint __sm_ll_dec(struct ll_disk *ll, dm_block_t b, dm_block_t e,
int32_t *nr_allocations, dm_block_t *new_b)
{ int r;
uint32_t bit, bit_end; struct inc_context ic;
dm_block_t index = b;
init_inc_context(&ic);
bit = do_div(index, ll->entries_per_block);
r = ll->load_ie(ll, index, &ic.ie_disk); if (r < 0) return r;
r = shadow_bitmap(ll, &ic); if (r) return r;
bit_end = min(bit + (e - b), (dm_block_t) ll->entries_per_block);
r = sm_ll_dec_bitmap(ll, b, bit, bit_end, &ic, nr_allocations, new_b);
exit_inc_context(ll, &ic);
if (r) return r;
return ll->save_ie(ll, index, &ic.ie_disk);
}
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, dm_block_t e,
int32_t *nr_allocations)
{
*nr_allocations = 0; while (b != e) { int r = __sm_ll_dec(ll, b, e, nr_allocations, &b);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.