// SPDX-License-Identifier: GPL-2.0 /* * * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. * * TODO: try to use extents tree (instead of array)
*/
/* * run_lookup - Lookup the index of a MCB entry that is first <= vcn. * * Case of success it will return non-zero value and set * @index parameter to index of entry been found. * Case of entry missing from list 'index' will be set to * point to insertion position for the entry question.
*/ staticbool run_lookup(conststruct runs_tree *run, CLST vcn, size_t *index)
{
size_t min_idx, max_idx, mid_idx; struct ntfs_run *r;
if (!run->count) {
*index = 0; returnfalse;
}
min_idx = 0;
max_idx = run->count - 1;
/* Check boundary cases specially, 'cause they cover the often requests. */
r = run->runs; if (vcn < r->vcn) {
*index = 0; returnfalse;
}
/* * run_consolidate - Consolidate runs starting from a given one.
*/ staticvoid run_consolidate(struct runs_tree *run, size_t index)
{
size_t i; struct ntfs_run *r = run->runs + index;
while (index + 1 < run->count) { /* * I should merge current run with next * if start of the next run lies inside one being tested.
*/ struct ntfs_run *n = r + 1;
CLST end = r->vcn + r->len;
CLST dl;
/* Stop if runs are not aligned one to another. */ if (n->vcn > end) break;
dl = end - n->vcn;
/* * If range at index overlaps with next one * then I will either adjust it's start position * or (if completely matches) dust remove one from the list.
*/ if (dl > 0) { if (n->len <= dl) goto remove_next_range;
/* * Stop if sparse mode does not match * both current and next runs.
*/ if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
index += 1;
r = n; continue;
}
/* * Check if volume block * of a next run lcn does not match * last volume block of the current run.
*/ if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len) break;
/* * Next and current are siblings. * Eat/join.
*/
r->len += n->len - dl;
remove_next_range:
i = run->count - (index + 1); if (i > 1)
memmove(n, n + 1, sizeof(*n) * (i - 1));
/* * run_truncate - Decommit the range after vcn.
*/ void run_truncate(struct runs_tree *run, CLST vcn)
{
size_t index;
/* * If I hit the range then * I have to truncate one. * If range to be truncated is becoming empty * then it will entirely be removed.
*/ if (run_lookup(run, vcn, &index)) { struct ntfs_run *r = run->runs + index;
r->len = vcn - r->vcn;
if (r->len > 0)
index += 1;
}
/* * At this point 'index' is set to position that * should be thrown away (including index itself) * Simple one - just set the limit.
*/
run->count = index;
/* Do not reallocate array 'runs'. Only free if possible. */ if (!index) {
kvfree(run->runs);
run->runs = NULL;
run->allocated = 0;
}
}
/* * run_truncate_around - Trim head and tail if necessary.
*/ void run_truncate_around(struct runs_tree *run, CLST vcn)
{
run_truncate_head(run, vcn);
/* * run_add_entry * * Sets location to known state. * Run to be added may overlap with existing location. * * Return: false if of memory.
*/ bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len, bool is_mft)
{
size_t used, index; struct ntfs_run *r; bool inrange;
CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0; bool should_add_tail = false;
/* * Lookup the insertion point. * * Execute bsearch for the entry containing * start position question.
*/
inrange = run_lookup(run, vcn, &index);
/* * Shortcut here would be case of * range not been found but one been added * continues previous run. * This case I can directly make use of * existing range as my start point.
*/ if (!inrange && index > 0) { struct ntfs_run *t = run->runs + index - 1;
/* * At this point 'index' either points to the range * containing start position or to the insertion position * for a new range. * So first let's check if range I'm probing is here already.
*/ if (!inrange) {
requires_new_range: /* * Range was not found. * Insert at position 'index'
*/
used = run->count * sizeof(struct ntfs_run);
/* * Check allocated space. * If one is not enough to get one more entry * then it will be reallocated.
*/ if (run->allocated < used + sizeof(struct ntfs_run)) {
size_t bytes; struct ntfs_run *new_ptr;
/* Use power of 2 for 'bytes'. */ if (!used) {
bytes = 64;
} elseif (used <= 16 * PAGE_SIZE) { if (is_power_of_2(run->allocated))
bytes = run->allocated << 1; else
bytes = (size_t)1
<< (2 + blksize_bits(used));
} else {
bytes = run->allocated + (16 * PAGE_SIZE);
}
/* * If one of ranges was not allocated then we * have to split location we just matched and * insert current one. * A common case this requires tail to be reinserted * a recursive call.
*/ if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
(lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
CLST to_eat = vcn - r->vcn;
CLST Tovcn = to_eat + len;
if (to_eat > 0) {
r->len = to_eat;
inrange = false;
index += 1; goto requires_new_range;
}
/* lcn should match one were going to add. */
r->lcn = lcn;
}
/* * If existing range fits then were done. * Otherwise extend found one and fall back to range jocode.
*/ if (r->vcn + r->len < vcn + len)
r->len += len - ((r->vcn + r->len) - vcn);
}
/* * And normalize it starting from insertion point. * It's possible that no insertion needed case if * start point lies within the range of an entry * that 'index' points to.
*/ if (inrange && index > 0)
index -= 1;
run_consolidate(run, index);
run_consolidate(run, index + 1);
/* * A special case. * We have to add extra range a tail.
*/ if (should_add_tail &&
!run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft)) returnfalse;
returntrue;
}
/* run_collapse_range * * Helper for attr_collapse_range(), * which is helper for fallocate(collapse_range).
*/ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
{
size_t index, eat; struct ntfs_run *r, *e, *eat_start, *eat_end;
CLST end;
if (WARN_ON(!run_lookup(run, vcn, &index))) returntrue; /* Should never be here. */
e = run->runs + run->count;
r = run->runs + index;
end = vcn + len;
if (vcn > r->vcn) { if (r->vcn + r->len <= end) { /* Collapse tail of run .*/
r->len = vcn - r->vcn;
} elseif (r->lcn == SPARSE_LCN) { /* Collapse a middle part of sparsed run. */
r->len -= len;
} else { /* Collapse a middle part of normal run, split. */ if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) returnfalse; return run_collapse_range(run, vcn, len);
}
r += 1;
}
eat_start = r;
eat_end = r;
for (; r < e; r++) {
CLST d;
if (r->vcn >= end) {
r->vcn -= len; continue;
}
if (r->vcn + r->len <= end) { /* Eat this run. */
eat_end = r + 1; continue;
}
d = end - r->vcn; if (r->lcn != SPARSE_LCN)
r->lcn += d;
r->len -= d;
r->vcn -= len - d;
}
if (n >= 0) { if (p[7] || p[6] || p[5] || p[4])
p += 4; if (p[3] || p[2])
p += 2; if (p[1])
p += 1; if (p[0] & 0x80)
p += 1;
} else { if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
p[4] != 0xff)
p += 4; if (p[3] != 0xff || p[2] != 0xff)
p += 2; if (p[1] != 0xff)
p += 1; if (!(p[0] & 0x80))
p += 1;
}
return 1 + p - (const u8 *)&n;
}
/* Full trusted function. It does not check 'size' for errors. */ staticinlinevoid run_pack_s64(u8 *run_buf, u8 size, s64 v)
{ const u8 *p = (u8 *)&v;
/* memcpy( run_buf, &v, size); Is it faster? */ switch (size) { case 8:
run_buf[7] = p[7];
fallthrough; case 7:
run_buf[6] = p[6];
fallthrough; case 6:
run_buf[5] = p[5];
fallthrough; case 5:
run_buf[4] = p[4];
fallthrough; case 4:
run_buf[3] = p[3];
fallthrough; case 3:
run_buf[2] = p[2];
fallthrough; case 2:
run_buf[1] = p[1];
fallthrough; case 1:
run_buf[0] = p[0];
}
}
/* full trusted function. It does not check 'size' for errors */ staticinline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
{
u8 *p = (u8 *)&v;
/* memcpy( &v, run_buf, size); Is it faster? */ switch (size) { case 8:
p[7] = run_buf[7];
fallthrough; case 7:
p[6] = run_buf[6];
fallthrough; case 6:
p[5] = run_buf[5];
fallthrough; case 5:
p[4] = run_buf[4];
fallthrough; case 4:
p[3] = run_buf[3];
fallthrough; case 3:
p[2] = run_buf[2];
fallthrough; case 2:
p[1] = run_buf[1];
fallthrough; case 1:
p[0] = run_buf[0];
} return v;
} #endif
/* * run_pack - Pack runs into buffer. * * packed_vcns - How much runs we have packed. * packed_size - How much bytes we have used run_buf.
*/ int run_pack(conststruct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
u32 run_buf_size, CLST *packed_vcns)
{
CLST next_vcn, vcn, lcn;
CLST prev_lcn = 0;
CLST evcn1 = svcn + len; conststruct ntfs_run *r, *r_end; int packed_size = 0;
size_t i;
s64 dlcn; int offset_size, size_size, tmp;
*packed_vcns = 0;
if (!len) goto out;
/* Check all required entries [svcn, encv1) available. */ if (!run_lookup(run, svcn, &i)) return -ENOENT;
r_end = run->runs + run->count;
r = run->runs + i;
/* Read all runs the chain. */ /* size_size - How much bytes is packed len. */ while (run_buf < run_last) { /* size_size - How much bytes is packed len. */
u8 size_size = *run_buf & 0xF; /* offset_size - How much bytes is packed dlcn. */
u8 offset_size = *run_buf++ >> 4;
u64 len;
if (!size_size) break;
/* * Unpack runs. * NOTE: Runs are stored little endian order * "len" is unsigned value, "dlcn" is signed. * Large positive number requires to store 5 bytes * e.g.: 05 FF 7E FF FF 00 00 00
*/ if (size_size > sizeof(len)) return -EINVAL;
#ifndef CONFIG_NTFS3_64BIT_CLUSTER if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
ntfs_err(
sbi->sb, "This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n" "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n" "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
vcn64, lcn, len); return -EOPNOTSUPP;
} #endif if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) { /* LCN range is out of volume. */ return -EINVAL;
}
if (!run)
; /* Called from check_attr(fslog.c) to check run. */ elseif (run == RUN_DEALLOCATE) { /* * Called from ni_delete_all to free clusters * without storing in run.
*/ if (lcn != SPARSE_LCN64)
mark_as_free_ex(sbi, lcn, len, true);
} elseif (vcn64 >= vcn) { if (!run_add_entry(run, vcn64, lcn, len, is_mft)) return -ENOMEM;
} elseif (next_vcn > vcn) {
u64 dlen = vcn - vcn64;
if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
is_mft)) return -ENOMEM;
}
vcn64 = next_vcn;
}
if (vcn64 != evcn + 1) { /* Not expected length of unpacked runs. */ return -EINVAL;
}
return run_buf - run_0;
}
#ifdef NTFS3_CHECK_FREE_CLST /* * run_unpack_ex - Unpack packed runs from "run_buf". * * Checks unpacked runs to be used in bitmap. * * Return: Error if negative, or real used bytes.
*/ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, int run_buf_size)
{ int ret, err;
CLST next_vcn, lcn, len;
size_t index, done; bool ok, zone; struct wnd_bitmap *wnd;
ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size); if (ret <= 0) return ret;
if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE) return ret;
if (ino == MFT_REC_BADCLUST) return ret;
next_vcn = vcn = svcn;
wnd = &sbi->used.bitmap;
for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
next_vcn <= evcn;
ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) { if (!ok || next_vcn != vcn) return -EINVAL;
next_vcn = vcn + len;
if (lcn == SPARSE_LCN) continue;
if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) continue;
down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
zone = max(wnd->zone_bit, lcn) < min(wnd->zone_end, lcn + len); /* Check for free blocks. */
ok = !zone && wnd_is_used(wnd, lcn, len);
up_read(&wnd->rw_lock); if (ok) continue;
/* Looks like volume is corrupted. */
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
if (!down_write_trylock(&wnd->rw_lock)) continue;
if (zone) { /* * Range [lcn, lcn + len) intersects with zone. * To avoid complex with zone just turn it off.
*/
wnd_zone_set(wnd, 0, 0);
}
/* Mark all zero bits as used in range [lcn, lcn+len). */
err = wnd_set_used_safe(wnd, lcn, len, &done); if (zone) { /* Restore zone. Lock mft run. */ struct rw_semaphore *lock =
is_mounted(sbi) ? &sbi->mft.ni->file.run_lock :
NULL; if (lock)
down_read(lock);
ntfs_refresh_zone(sbi); if (lock)
up_read(lock);
}
up_write(&wnd->rw_lock); if (err) return err;
}
return ret;
} #endif
/* * run_get_highest_vcn * * Return the highest vcn from a mapping pairs array * it used while replaying log file.
*/ int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
{
u64 vcn64 = vcn;
u8 size_size;
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.31Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.