/* First value is a hash value itself. */ if (t1 < t2) return -1; if (t1 > t2) return 1;
/* Second value is security Id. */ if (data) {
t1 = le32_to_cpu(k1->sec_id);
t2 = le32_to_cpu(k2->sec_id); if (t1 < t2) return -1; if (t1 > t2) return 1;
}
/* * hdr_find_split * * Find a point at which the index allocation buffer would like to be split. * NOTE: This function should never return 'END' entry NULL returns on error.
*/ staticconststruct NTFS_DE *hdr_find_split(conststruct INDEX_HDR *hdr)
{
size_t o; conststruct NTFS_DE *e = hdr_first_de(hdr);
u32 used_2 = le32_to_cpu(hdr->used) >> 1;
u16 esize;
if (!e || de_is_last(e)) return NULL;
esize = le16_to_cpu(e->size); for (o = le32_to_cpu(hdr->de_off) + esize; o < used_2; o += esize) { conststruct NTFS_DE *p = e;
e = Add2Ptr(hdr, o);
/* We must not return END entry. */ if (de_is_last(e)) return p;
esize = le16_to_cpu(e->size);
}
return e;
}
/* * hdr_insert_head - Insert some entries at the beginning of the buffer. * * It is used to insert entries into a newly-created buffer.
*/ staticconststruct NTFS_DE *hdr_insert_head(struct INDEX_HDR *hdr, constvoid *ins, u32 ins_bytes)
{
u32 to_move; struct NTFS_DE *e = hdr_first_de(hdr);
u32 used = le32_to_cpu(hdr->used);
if (!e) return NULL;
/* Now we just make room for the inserted entries and jam it in. */
to_move = used - le32_to_cpu(hdr->de_off);
memmove(Add2Ptr(e, ins_bytes), e, to_move);
memcpy(e, ins, ins_bytes);
hdr->used = cpu_to_le32(used + ins_bytes);
return e;
}
/* * index_hdr_check * * return true if INDEX_HDR is valid
*/ staticbool index_hdr_check(conststruct INDEX_HDR *hdr, u32 bytes)
{
u32 end = le32_to_cpu(hdr->used);
u32 tot = le32_to_cpu(hdr->total);
u32 off = le32_to_cpu(hdr->de_off);
if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot ||
size_add(off, sizeof(struct NTFS_DE)) > end) { /* incorrect index buffer. */ returnfalse;
}
staticint fnd_push(struct ntfs_fnd *fnd, struct indx_node *n, struct NTFS_DE *e)
{ int i = fnd->level;
if (i < 0 || i >= ARRAY_SIZE(fnd->nodes)) return -EINVAL;
fnd->nodes[i] = n;
fnd->de[i] = e;
fnd->level += 1; return 0;
}
staticstruct indx_node *fnd_pop(struct ntfs_fnd *fnd)
{ struct indx_node *n; int i = fnd->level;
i -= 1;
n = fnd->nodes[i];
fnd->nodes[i] = NULL;
fnd->level = i;
return n;
}
staticbool fnd_is_empty(struct ntfs_fnd *fnd)
{ if (!fnd->level) return !fnd->root_de;
return !fnd->de[fnd->level - 1];
}
/* * hdr_find_e - Locate an entry the index buffer. * * If no matching entry is found, it returns the first entry which is greater * than the desired entry If the search key is greater than all the entries the * buffer, it returns the 'end' entry. This function does a binary search of the * current index buffer, for the first entry that is <= to the search value. * * Return: NULL if error.
*/ staticstruct NTFS_DE *hdr_find_e(conststruct ntfs_index *indx, conststruct INDEX_HDR *hdr, constvoid *key,
size_t key_len, constvoid *ctx, int *diff)
{ struct NTFS_DE *e, *found = NULL;
NTFS_CMP_FUNC cmp = indx->cmp; int min_idx = 0, mid_idx, max_idx = 0; int diff2; int table_size = 8;
u32 e_size, e_key_len;
u32 end = le32_to_cpu(hdr->used);
u32 off = le32_to_cpu(hdr->de_off);
u32 total = le32_to_cpu(hdr->total);
u16 offs[128];
if (unlikely(!cmp)) return NULL;
fill_table: if (end > total) return NULL;
if (size_add(off, sizeof(struct NTFS_DE)) > end) return NULL;
e = Add2Ptr(hdr, off);
e_size = le16_to_cpu(e->size);
if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) return NULL;
if (!de_is_last(e)) {
offs[max_idx] = off;
off += e_size;
max_idx++; if (max_idx < table_size) goto fill_table;
/* * hdr_insert_de - Insert an index entry into the buffer. * * 'before' should be a pointer previously returned from hdr_find_e.
*/ staticstruct NTFS_DE *hdr_insert_de(conststruct ntfs_index *indx, struct INDEX_HDR *hdr, conststruct NTFS_DE *de, struct NTFS_DE *before, constvoid *ctx)
{ int diff;
size_t off = PtrOffset(hdr, before);
u32 used = le32_to_cpu(hdr->used);
u32 total = le32_to_cpu(hdr->total);
u16 de_size = le16_to_cpu(de->size);
/* First, check to see if there's enough room. */ if (used + de_size > total) return NULL;
/* We know there's enough space, so we know we'll succeed. */ if (before) { /* Check that before is inside Index. */ if (off >= used || off < le32_to_cpu(hdr->de_off) ||
off + le16_to_cpu(before->size) > total) { return NULL;
} goto ok;
} /* No insert point is applied. Get it manually. */
before = hdr_find_e(indx, hdr, de + 1, le16_to_cpu(de->key_size), ctx,
&diff); if (!before) return NULL;
off = PtrOffset(hdr, before);
ok: /* Now we just make room for the entry and jam it in. */
memmove(Add2Ptr(before, de_size), before, used - off);
hdr->used = cpu_to_le32(used + de_size);
memcpy(before, de, de_size);
return before;
}
/* * hdr_delete_de - Remove an entry from the index buffer.
*/ staticinlinestruct NTFS_DE *hdr_delete_de(struct INDEX_HDR *hdr, struct NTFS_DE *re)
{
u32 used = le32_to_cpu(hdr->used);
u16 esize = le16_to_cpu(re->size);
u32 off = PtrOffset(hdr, re); int bytes = used - (off + esize);
/* check INDEX_HDR valid before using INDEX_HDR */ if (!check_index_header(hdr, le32_to_cpu(hdr->total))) return NULL;
if (off >= used || esize < sizeof(struct NTFS_DE) ||
bytes < sizeof(struct NTFS_DE)) return NULL;
/* Check index record size. */ if (t32 < sbi->cluster_size) { /* Index record is smaller than a cluster, use 512 blocks. */ if (t32 != root->index_block_clst * SECTOR_SIZE) goto out;
/* Check alignment to a cluster. */ if ((sbi->cluster_size >> SECTOR_SHIFT) &
(root->index_block_clst - 1)) { goto out;
}
indx->vbn2vbo_bits = SECTOR_SHIFT;
} else { /* Index record must be a multiple of cluster size. */ if (t32 != root->index_block_clst << sbi->cluster_bits) goto out;
indx->vbn2vbo_bits = sbi->cluster_bits;
}
init_rwsem(&indx->run_lock);
indx->cmp = get_cmp_func(root); if (!indx->cmp) goto out;
/* Lookup entry that is <= to the search value. */
e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
diff); if (!e) {
put_indx_node(node); return -EINVAL;
}
fnd_push(fnd, node, e);
}
*entry = e; return 0;
}
int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni, conststruct INDEX_ROOT *root, struct NTFS_DE **entry, struct ntfs_fnd *fnd)
{ int err; struct indx_node *n = NULL; struct NTFS_DE *e;
size_t iter = 0; int level = fnd->level;
if (!*entry) { /* Start find. */
e = hdr_first_de(&root->ihdr); if (!e) return 0;
fnd_clear(fnd);
fnd->root_de = e;
} elseif (!level) { if (de_is_last(fnd->root_de)) {
*entry = NULL; return 0;
}
e = hdr_next_de(&root->ihdr, fnd->root_de); if (!e) return -EINVAL;
fnd->root_de = e;
} else {
n = fnd->nodes[level - 1];
e = fnd->de[level - 1];
if (de_is_last(e)) goto pop_level;
e = hdr_next_de(&n->index->ihdr, e); if (!e) return -EINVAL;
fnd->de[level - 1] = e;
}
/* Just to avoid tree cycle. */
next_iter: if (iter++ >= 1000) return -EINVAL;
while (de_has_vcn_ex(e)) { if (le16_to_cpu(e->size) < sizeof(struct NTFS_DE) + sizeof(u64)) { if (n) {
fnd_pop(fnd);
kfree(n);
} return -EINVAL;
}
/* Read next level. */
err = indx_read(indx, ni, de_get_vbn(e), &n); if (err) return err;
/* Try next level. */
e = hdr_first_de(&n->index->ihdr); if (!e) {
kfree(n); return -EINVAL;
}
/* Use non sorted algorithm. */ if (!*entry) { /* This is the first call. */
e = hdr_first_de(&root->ihdr); if (!e) return 0;
fnd_clear(fnd);
fnd->root_de = e;
/* The first call with setup of initial element. */ if (*off >= record_size) {
next_vbn = (((*off - record_size) >> indx->index_bits))
<< indx->idx2vbn_bits; /* Jump inside cycle 'for'. */ goto next;
}
for (;;) { /* Check if current entry can be used. */ if (e && le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) goto ok;
if (!fnd->level) { /* Continue to enumerate root. */ if (!de_is_last(fnd->root_de)) {
e = hdr_next_de(&root->ihdr, fnd->root_de); if (!e) return -EINVAL;
fnd->root_de = e; continue;
}
/* Start to enumerate indexes from 0. */
next_vbn = 0;
} else { /* Continue to enumerate indexes. */
e2 = fnd->de[fnd->level - 1];
n = fnd->nodes[fnd->level - 1];
if (!de_is_last(e2)) {
e = hdr_next_de(&n->index->ihdr, e2); if (!e) return -EINVAL;
fnd->de[fnd->level - 1] = e; continue;
}
/* Continue with next index. */
next_vbn = le64_to_cpu(n->index->vbn) +
root->index_block_clst;
}
next: /* Release current index. */ if (n) {
fnd_pop(fnd);
put_indx_node(n);
n = NULL;
}
/* Skip all free indexes. */
bit = next_vbn >> indx->idx2vbn_bits;
err = indx_used_bit(indx, ni, &bit); if (err == -ENOENT || bit == MINUS_ONE_T) { /* No used indexes. */
*entry = NULL; return 0;
}
next_used_vbn = bit << indx->idx2vbn_bits;
/* Read buffer into memory. */
err = indx_read(indx, ni, next_used_vbn, &n); if (err) return err;
e = hdr_first_de(&n->index->ihdr);
fnd_push(fnd, n, e); if (!e) return -EINVAL;
}
ok: /* Return offset to restore enumerator if necessary. */ if (!n) { /* 'e' points in root, */
*off = PtrOffset(&root->ihdr, e);
} else { /* 'e' points in index, */
*off = (le64_to_cpu(n->index->vbn) << indx->vbn2vbo_bits) +
record_size + PtrOffset(&n->index->ihdr, e);
}
/* * Index blocks exist, but $BITMAP has zero valid bits. * This implies an on-disk corruption and must be rejected.
*/ if (in->name == I30_NAME &&
unlikely(bmp_size_v == 0 && indx->alloc_run.count)) {
err = -EINVAL; goto out1;
}
/* Now we can create/format the new buffer and copy the entries into. */
n = indx_new(indx, ni, new_vbn, sub_vbn); if (IS_ERR(n)) {
err = PTR_ERR(n); goto out_free_re;
}
/* Check if we can insert new entry new index buffer. */ if (hdr_used + new_de_size > hdr_total) { /* * This occurs if MFT record is the same or bigger than index * buffer. Move all root new index and have no space to add * new entry classic case when MFT record is 1K and index * buffer 4K the problem should not occurs.
*/
kfree(re);
indx_write(indx, ni, n, 0);
/* * Now root is a parent for new index buffer. * Insert NewEntry a new buffer.
*/
e = hdr_insert_de(indx, hdr, new_de, NULL, ctx); if (!e) {
err = -EINVAL; goto out_put_n;
}
fnd_push(fnd, n, e);
/* Just write updates index into disk. */
indx_write(indx, ni, n, 0);
/* * indx_insert_into_buffer * * Attempt to insert an entry into an Index Allocation Buffer. * If necessary, it will split the buffer.
*/ staticint
indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni, struct INDEX_ROOT *root, conststruct NTFS_DE *new_de, constvoid *ctx, int level, struct ntfs_fnd *fnd)
{ int err; conststruct NTFS_DE *sp; struct NTFS_DE *e, *de_t, *up_e; struct indx_node *n2; struct indx_node *n1 = fnd->nodes[level]; struct INDEX_HDR *hdr1 = &n1->index->ihdr; struct INDEX_HDR *hdr2;
u32 to_copy, used, used1;
CLST new_vbn;
__le64 t_vbn, *sub_vbn;
u16 sp_size; void *hdr1_saved = NULL;
/* Try the most easy case. */
e = fnd->level - 1 == level ? fnd->de[level] : NULL;
e = hdr_insert_de(indx, hdr1, new_de, e, ctx);
fnd->de[level] = e; if (e) { /* Just write updated index into disk. */
indx_write(indx, ni, n1, 0); return 0;
}
/* * No space to insert into buffer. Split it. * To split we: * - Save split point ('cause index buffers will be changed) * - Allocate NewBuffer and copy all entries <= sp into new buffer * - Remove all entries (sp including) from TargetBuffer * - Insert NewEntry into left or right buffer (depending on sp <=> * NewEntry) * - Insert sp into parent buffer (or root) * - Make sp a parent for new buffer
*/
sp = hdr_find_split(hdr1); if (!sp) return -EINVAL;
if (fnd_is_empty(fnd)) { /* * Find the spot the tree where we want to * insert the new entry.
*/
err = indx_find(indx, ni, root, new_de + 1,
le16_to_cpu(new_de->key_size), ctx, &diff, &e,
fnd); if (err) goto out;
if (!diff) {
err = -EEXIST; goto out;
}
}
if (!fnd->level) { /* * The root is also a leaf, so we'll insert the * new entry into it.
*/
err = indx_insert_into_root(indx, ni, new_de, fnd->root_de, ctx,
fnd, undo);
} else { /* * Found a leaf buffer, so we'll insert the new entry into it.
*/
err = indx_insert_into_buffer(indx, ni, root, new_de, ctx,
fnd->level - 1, fnd);
}
out:
fnd_put(fnd_a);
out1: return err;
}
/* * indx_find_buffer - Locate a buffer from the tree.
*/ staticstruct indx_node *indx_find_buffer(struct ntfs_index *indx, struct ntfs_inode *ni, conststruct INDEX_ROOT *root,
__le64 vbn, struct indx_node *n)
{ int err; conststruct NTFS_DE *e; struct indx_node *r; conststruct INDEX_HDR *hdr = n ? &n->index->ihdr : &root->ihdr;
/* Step 1: Scan one level. */ for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) { if (!e) return ERR_PTR(-EINVAL);
if (de_has_vcn(e) && vbn == de_get_vbn_le(e)) return n;
if (de_is_last(e)) break;
}
/* Step2: Do recursion. */
e = Add2Ptr(hdr, le32_to_cpu(hdr->de_off)); for (;;) { if (de_has_vcn_ex(e)) {
err = indx_read(indx, ni, de_get_vbn(e), &n); if (err) return ERR_PTR(err);
r = indx_find_buffer(indx, ni, root, vbn, n); if (r) return r;
}
err = indx_read(indx, ni, vbn, &n); if (err) return err;
hdr = &n->index->ihdr; /* First, recurse into the children, if any. */ if (hdr_has_subnode(hdr)) { for (e = hdr_first_de(hdr); e; e = hdr_next_de(hdr, e)) {
indx_free_children(indx, ni, e, false); if (de_is_last(e)) break;
}
}
put_indx_node(n);
i = vbn >> indx->idx2vbn_bits; /* * We've gotten rid of the children; add this buffer to the free list.
*/
indx_mark_free(indx, ni, i);
if (!trim) return 0;
/* * If there are no used indexes after current free index * then we can truncate allocation and bitmap. * Use bitmap to estimate the case.
*/
indx_shrink(indx, ni, i + 1); return 0;
}
/* * indx_get_entry_to_replace * * Find a replacement entry for a deleted entry. * Always returns a node entry: * NTFS_IE_HAS_SUBNODES is set the flags and the size includes the sub_vcn.
*/ staticint indx_get_entry_to_replace(struct ntfs_index *indx, struct ntfs_inode *ni, conststruct NTFS_DE *de_next, struct NTFS_DE **de_to_replace, struct ntfs_fnd *fnd)
{ int err; int level = -1;
CLST vbn; struct NTFS_DE *e, *te, *re; struct indx_node *n; struct INDEX_BUFFER *ib;
*de_to_replace = NULL;
/* Find first leaf entry down from de_next. */
vbn = de_get_vbn(de_next); for (;;) {
n = NULL;
err = indx_read(indx, ni, vbn, &n); if (err) goto out;
e = hdr_first_de(&n->index->ihdr);
fnd_push(fnd, n, e); if (!e) {
err = -EINVAL; goto out;
}
if (!de_is_last(e)) { /* * This buffer is non-empty, so its first entry * could be used as the replacement entry.
*/
level = fnd->level - 1;
}
if (!de_has_vcn(e)) break;
/* This buffer is a node. Continue to go down. */
vbn = de_get_vbn(e);
}
if (level == -1) goto out;
n = fnd->nodes[level];
te = hdr_first_de(&n->index->ihdr); if (!te) {
err = -EINVAL; goto out;
} /* Copy the candidate entry into the replacement entry buffer. */
re = kmalloc(le16_to_cpu(te->size) + sizeof(u64), GFP_NOFS); if (!re) {
err = -ENOMEM; goto out;
}
if (!de_has_vcn(re)) { /* * The replacement entry we found doesn't have a sub_vcn. * increase its size to hold one.
*/
le16_add_cpu(&re->size, sizeof(u64));
re->flags |= NTFS_IE_HAS_SUBNODES;
} else { /* * The replacement entry we found was a node entry, which * means that all its child buffers are empty. Return them * to the free pool.
*/
indx_free_children(indx, ni, te, true);
}
/* * Expunge the replacement entry from its former location, * and then write that buffer.
*/
ib = n->index;
e = hdr_delete_de(&ib->ihdr, te);
fnd->de[level] = e;
indx_write(indx, ni, n, 0);
if (ib_is_leaf(ib) && ib_is_empty(ib)) { /* An empty leaf. */ return 0;
}
/* * Check to see if removing that entry made * the leaf empty.
*/ if (ib_is_leaf(ib) && ib_is_empty(ib)) {
fnd_pop(fnd);
fnd_push(fnd2, n, e);
}
} else { /* * The entry we wish to delete is a node buffer, so we * have to find a replacement for it.
*/
next = de_get_next(e);
if (re) {
de_set_vbn_le(re, de_get_vbn_le(e));
hdr_delete_de(hdr, e);
err = level ? indx_insert_into_buffer(indx, ni, root,
re, ctx,
fnd->level - 1,
fnd) :
indx_insert_into_root(indx, ni, re, e,
ctx, fnd, 0);
kfree(re);
if (err) goto out;
} else { /* * There is no replacement for the current entry. * This means that the subtree rooted at its node * is empty, and can be deleted, which turn means * that the node can just inherit the deleted * entry sub_vcn.
*/
indx_free_children(indx, ni, next, true);
de_set_vbn_le(next, de_get_vbn_le(e));
hdr_delete_de(hdr, e); if (level) {
indx_write(indx, ni, n, 0);
} else {
hdr->total = hdr->used;
e = hdr_first_de(hdr); if (!e) {
err = -EINVAL; goto out;
}
if (hdr != &root->ihdr || !de_is_last(e)) {
prev = NULL; while (!de_is_last(e)) { if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e)) break;
prev = e;
e = hdr_next_de(hdr, e); if (!e) {
err = -EINVAL; goto out;
}
}
if (sub_vbn != de_get_vbn_le(e)) { /* * Didn't find the parent entry, although this buffer * is the parent trail. Something is corrupt.
*/
err = -EINVAL; goto out;
}
if (de_is_last(e)) { /* * Since we can't remove the end entry, we'll remove * its predecessor instead. This means we have to * transfer the predecessor's sub_vcn to the end entry. * Note: This index block is not empty, so the * predecessor must exist.
*/ if (!prev) {
err = -EINVAL; goto out;
}
/* * Copy the current entry into a temporary buffer (stripping * off its down-pointer, if any) and delete it from the current * buffer or root, as appropriate.
*/
e_size = le16_to_cpu(e->size);
me = kmemdup(e, e_size, GFP_NOFS); if (!me) {
err = -ENOMEM; goto out;
}
if (de_has_vcn(me)) {
me->flags &= ~NTFS_IE_HAS_SUBNODES;
le16_sub_cpu(&me->size, sizeof(u64));
}
/* * Re-insert the entry into the tree. * Find the spot the tree where we want to insert the new entry.
*/
err = indx_insert_entry(indx, ni, me, ctx, fnd, 0);
kfree(me); if (err) goto out;
if (trim_bit != -1)
indx_shrink(indx, ni, trim_bit);
} else { /* * This tree needs to be collapsed down to an empty root. * Recreate the index root as an empty leaf and free all * the bits the index allocation bitmap.
*/
fnd_clear(fnd);
fnd_clear(fnd2);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.