// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> * * Uses a block device as cache for other block devices; optimized for SSDs. * All allocation is done in buckets, which should match the erase block size * of the device. * * Buckets containing cached data are kept on a heap sorted by priority; * bucket priority is increased on cache hit, and periodically all the buckets * on the heap have their priority scaled down. This currently is just used as * an LRU but in the future should allow for more intelligent heuristics. * * Buckets have an 8 bit counter; freeing is accomplished by incrementing the * counter. Garbage collection is used to remove stale pointers. * * Indexing is done via a btree; nodes are not necessarily fully sorted, rather * as keys are inserted we only sort the pages that have not yet been written. * When garbage collection is run, we resort the entire node. * * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
*/
for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i)) { struct cache *ca = c->cache;
size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
if (KEY_SIZE(k) + r > c->cache->sb.bucket_size) return"bad, length too big"; if (bucket < ca->sb.first_bucket) return"bad, short offset"; if (bucket >= ca->sb.nbuckets) return"bad, offset past end of device"; if (ptr_stale(c, k, i)) return"stale";
}
if (!bkey_cmp(k, &ZERO_KEY)) return"bad, null key"; if (!KEY_PTRS(k)) return"bad, no pointers"; if (!KEY_SIZE(k)) return"zeroed key"; return"";
}
/* * Returns true if l > r - unless l == r, in which case returns true if l is * older than r. * * Necessary for btree_sort_fixup() - if there are multiple keys that compare * equal in different sets, we have to process them newest to oldest.
*/ staticbool bch_extent_sort_cmp(struct btree_iter_set l, struct btree_iter_set r)
{
int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
/* * We might overlap with 0 size extents; we can't skip these * because if they're in the set we're inserting to we have to * adjust them so they don't overlap with the key we're * inserting. But we don't want to check them for replace * operations.
*/
if (replace_key && KEY_SIZE(k)) { /* * k might have been split since we inserted/found the * key we're replacing
*/ unsignedint i;
uint64_t offset = KEY_START(k) -
KEY_START(replace_key);
/* But it must be a subset of the replace key */ if (KEY_START(k) < KEY_START(replace_key) ||
KEY_OFFSET(k) > KEY_OFFSET(replace_key)) goto check_failed;
/* We didn't find a key that we were supposed to */ if (KEY_START(k) > KEY_START(insert) + sectors_found) goto check_failed;
if (!bch_bkey_equal_header(k, replace_key)) goto check_failed;
/* skip past gen */
offset <<= 8;
BUG_ON(!KEY_PTRS(replace_key));
for (i = 0; i < KEY_PTRS(replace_key); i++) if (k->ptr[i] != replace_key->ptr[i] + offset) goto check_failed;
if (bkey_cmp(insert, k) < 0 &&
bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { /* * We overlapped in the middle of an existing key: that * means we have to split the old key. But we have to do * slightly different things depending on whether the * old key has been written out yet.
*/
if (bkey_written(b, k)) { /* * We insert a new key to cover the top of the * old key, and the old key is modified in place * to represent the bottom split. * * It's completely arbitrary whether the new key * is the top or the bottom, but it has to match * up with what btree_sort_fixup() does - it * doesn't check for this kind of overlap, it * depends on us inserting a new key for the top * here.
*/
top = bch_bset_search(b, bset_tree_last(b),
insert);
bch_bset_insert(b, top, k);
} else {
BKEY_PADDED(key) temp;
bkey_copy(&temp.key, k);
bch_bset_insert(b, k, &temp.key);
top = bkey_next(k);
}
for (i = 0; i < KEY_PTRS(l); i++) if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) returnfalse;
/* Keys with no pointers aren't restricted to one bucket and could * overflow KEY_SIZE
*/ if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
SET_KEY_SIZE(l, USHRT_MAX);
bch_cut_front(l, r); returnfalse;
}
if (KEY_CSUM(l)) { if (KEY_CSUM(r))
l->ptr[KEY_PTRS(l)] = merge_chksums(l, r); else
SET_KEY_CSUM(l, 0);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.