// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Red Hat, Inc. * * Author: Mikulas Patocka <mpatocka@redhat.com> * * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors * * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set * default prefetch value. Data are read in "prefetch_cluster" chunks from the * hash device. Setting this greatly improves performance when data and hash * are on the same disk on different partitions on devices with poor random * access behavior.
*/
/* * Auxiliary structure appended to each dm-bufio buffer. If the value * hash_verified is nonzero, hash of the block has been verified. * * The variable hash_verified is set to 0 when allocating the buffer, then * it can be changed to 1 and it is never reset to 0 again. * * There is no lock around this value, a race condition can at worst cause * that multiple processes verify the hash of the same buffer simultaneously * and write 1 to hash_verified simultaneously. * This condition is harmless, so we don't need locking.
*/ struct buffer_aux { int hash_verified;
};
/* * Initialize struct buffer_aux for a freshly created buffer.
*/ staticvoid dm_bufio_alloc_callback(struct dm_buffer *buf)
{ struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
aux->hash_verified = 0;
}
/* * Translate input sector number to the sector number on the target device.
*/ static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
{ return dm_target_offset(v->ti, bi_sector);
}
/* * Return hash position of a specified block at a specified tree level * (0 is the lowest level). * The lowest "hash_per_block_bits"-bits of the result denote hash position * inside a hash block. The remaining bits denote location of the hash block.
*/ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, int level)
{ return block >> (level * v->hash_per_block_bits);
}
out: if (v->mode == DM_VERITY_MODE_LOGGING) return 0;
if (v->mode == DM_VERITY_MODE_RESTART)
kernel_restart("dm-verity device corrupted");
if (v->mode == DM_VERITY_MODE_PANIC)
panic("dm-verity device corrupted");
return 1;
}
/* * Verify hash of a metadata block pertaining to the specified data block * ("block" argument) at a specified level ("level" argument). * * On successful return, verity_io_want_digest(v, io) contains the hash value * for a lower tree level or for the data block (if we're at the lowest level). * * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned. * If "skip_unverified" is false, unverified buffer is hashed and verified * against current value of verity_io_want_digest(v, io).
*/ staticint verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, int level, bool skip_unverified,
u8 *want_digest)
{ struct dm_buffer *buf; struct buffer_aux *aux;
u8 *data; int r;
sector_t hash_block; unsignedint offset; struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
data = dm_bufio_get(v->bufio, hash_block, &buf); if (IS_ERR_OR_NULL(data)) { /* * In tasklet and the hash was not in the bufio cache. * Return early and resume execution from a work-queue * to read the hash from disk.
*/ return -EAGAIN;
}
} else {
data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
&buf, bio->bi_ioprio);
}
if (IS_ERR(data)) { if (skip_unverified) return 1;
r = PTR_ERR(data);
data = dm_bufio_new(v->bufio, hash_block, &buf); if (IS_ERR(data)) return r; if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
hash_block, data) == 0) {
aux = dm_bufio_get_aux_data(buf);
aux->hash_verified = 1; goto release_ok;
} else {
dm_bufio_release(buf);
dm_bufio_forget(v->bufio, hash_block); return r;
}
}
aux = dm_bufio_get_aux_data(buf);
if (!aux->hash_verified) { if (skip_unverified) {
r = 1; goto release_ret_r;
}
r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits,
verity_io_real_digest(v, io)); if (unlikely(r < 0)) goto release_ret_r;
if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
v->digest_size) == 0))
aux->hash_verified = 1; elseif (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) { /* * Error handling code (FEC included) cannot be run in a * tasklet since it may sleep, so fallback to work-queue.
*/
r = -EAGAIN; goto release_ret_r;
} elseif (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
hash_block, data) == 0)
aux->hash_verified = 1; elseif (verity_handle_err(v,
DM_VERITY_BLOCK_TYPE_METADATA,
hash_block)) { struct bio *bio;
io->had_mismatch = true;
bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
dm_audit_log_bio(DM_MSG_PREFIX, "verify-metadata", bio,
block, 0);
r = -EIO; goto release_ret_r;
}
}
release_ok:
data += offset;
memcpy(want_digest, data, v->digest_size);
r = 0;
release_ret_r:
dm_bufio_release(buf); return r;
}
/* * Find a hash for a given block, write it to digest and verify the integrity * of the hash tree if necessary.
*/ int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero)
{ int r = 0, i;
if (likely(v->levels)) { /* * First, we try to get the requested hash for * the current block. If the hash block itself is * verified, zero is returned. If it isn't, this * function returns 1 and we fall back to whole * chain verification.
*/
r = verity_verify_level(v, io, block, 0, true, digest); if (likely(r <= 0)) goto out;
}
memcpy(digest, v->root_digest, v->digest_size);
for (i = v->levels - 1; i >= 0; i--) {
r = verity_verify_level(v, io, block, i, false, digest); if (unlikely(r)) goto out;
}
out: if (!r && v->zero_digest)
*is_zero = !memcmp(v->zero_digest, digest, v->digest_size); else
*is_zero = false;
if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) { /* * Copy the iterator in case we need to restart * verification in a work-queue.
*/
iter_copy = io->iter;
iter = &iter_copy;
} else
iter = &io->iter;
for (b = 0; b < io->n_blocks;
b++, bio_advance_iter(bio, iter, block_size)) { int r;
sector_t cur_block = io->block + b; bool is_zero; struct bio_vec bv; void *data;
if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
likely(test_bit(cur_block, v->validated_blocks))) continue;
r = verity_hash_for_block(v, io, cur_block,
verity_io_want_digest(v, io),
&is_zero); if (unlikely(r < 0)) return r;
bv = bio_iter_iovec(bio, *iter); if (unlikely(bv.bv_len < block_size)) { /* * Data block spans pages. This should not happen, * since dm-verity sets dma_alignment to the data block * size minus 1, and dm-verity also doesn't allow the * data block size to be greater than PAGE_SIZE.
*/
DMERR_LIMIT("unaligned io (data block spans pages)"); return -EIO;
}
data = bvec_kmap_local(&bv);
if (is_zero) { /* * If we expect a zero block, don't validate, just * return zeros.
*/
memset(data, 0, block_size);
kunmap_local(data); continue;
}
r = verity_hash(v, io, data, block_size,
verity_io_real_digest(v, io)); if (unlikely(r < 0)) {
kunmap_local(data); return r;
}
if (likely(memcmp(verity_io_real_digest(v, io),
verity_io_want_digest(v, io), v->digest_size) == 0)) { if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
kunmap_local(data); continue;
}
r = verity_handle_data_hash_mismatch(v, io, bio, cur_block,
data);
kunmap_local(data); if (unlikely(r)) return r;
}
return 0;
}
/* * Skip verity work in response to I/O error when system is shutting down.
*/ staticinlinebool verity_is_system_shutting_down(void)
{ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
|| system_state == SYSTEM_RESTART;
}
/* * End one "io" structure with a given error.
*/ staticvoid verity_finish_io(struct dm_verity_io *io, blk_status_t status)
{ struct dm_verity *v = io->v; struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
/* * Prefetch buffers for the specified io. * The root buffer is not prefetched, it is assumed that it will be cached * all the time.
*/ staticvoid verity_prefetch_io(struct work_struct *work)
{ struct dm_verity_prefetch_work *pw =
container_of(work, struct dm_verity_prefetch_work, work); struct dm_verity *v = pw->v; int i;
for (i = v->levels - 2; i >= 0; i--) {
sector_t hash_block_start;
sector_t hash_block_end;
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) { unsignedint cluster = READ_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits; if (unlikely(!cluster)) goto no_prefetch_cluster;
/* * Bio map function. It allocates dm_verity_io structure and bio vector and * fills them. Then it issues prefetches and the I/O.
*/ staticint verity_map(struct dm_target *ti, struct bio *bio)
{ struct dm_verity *v = ti->private; struct dm_verity_io *io;
/* * Similar to what dm-crypt does, opt dm-verity out of support for * direct I/O that is aligned to less than the traditional direct I/O * alignment requirement of logical_block_size. This prevents dm-verity * data blocks from crossing pages, eliminating various edge cases.
*/
limits->dma_alignment = limits->logical_block_size - 1;
}
/* the bitset can only handle INT_MAX blocks */ if (v->data_blocks > INT_MAX) {
ti->error = "device too large to use check_at_most_once"; return -E2BIG;
}
v->validated_blocks = kvcalloc(BITS_TO_LONGS(v->data_blocks), sizeof(unsignedlong),
GFP_KERNEL); if (!v->validated_blocks) {
ti->error = "failed to allocate bitset for check_at_most_once"; return -ENOMEM;
}
return 0;
}
staticint verity_alloc_zero_digest(struct dm_verity *v)
{ int r = -ENOMEM; struct dm_verity_io *io;
u8 *zero_data;
} elseif (verity_is_fec_opt_arg(arg_name)) { if (only_modifier_opts) continue;
r = verity_fec_parse_opt_args(as, v, &argc, arg_name); if (r) return r; continue;
} elseif (verity_verify_is_sig_opt_arg(arg_name)) { if (only_modifier_opts) continue;
r = verity_verify_sig_parse_opt_args(as, v,
verify_args,
&argc, arg_name); if (r) return r; continue;
} elseif (only_modifier_opts) { /* * Ignore unrecognized opt, could easily be an extra * argument to an option whose parsing was skipped. * Normal parsing (@only_modifier_opts=false) will * properly parse all options (and their extra args).
*/ continue;
}
if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
ti->error = "Hash device is too small";
r = -E2BIG; goto bad;
}
/* * Using WQ_HIGHPRI improves throughput and completion latency by * reducing wait times when reading from a dm-verity device. * * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI * allows verify_wq to preempt softirq since verification in BH workqueue * will fall-back to using it for error handling (or if the bufio cache * doesn't have required hashes).
*/
v->verify_wq = alloc_workqueue("kverityd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM; goto bad;
}
/* * Get the verity mode (error behavior) of a verity target. * * Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity * target.
*/ int dm_verity_get_mode(struct dm_target *ti)
{ struct dm_verity *v = ti->private;
if (!dm_is_verity_target(ti)) return -EINVAL;
return v->mode;
}
/* * Get the root digest of a verity target. * * Returns a copy of the root digest, the caller is responsible for * freeing the memory of the digest.
*/ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsignedint *digest_size)
{ struct dm_verity *v = ti->private;
staticint verity_security_set_signature(struct block_device *bdev, struct dm_verity *v)
{ /* * if the dm-verity target is unsigned, v->root_digest_sig will * be NULL, and the hook call is still required to let LSMs mark * the device as unsigned. This information is crucial for LSMs to * block operations such as execution on unsigned files
*/ return security_bdev_setintegrity(bdev,
LSM_INT_DMVERITY_SIG_VALID,
v->root_digest_sig,
v->sig_size);
}
/* * Expose verity target's root hash and signature data to LSMs before resume. * * Returns 0 on success, or -ENOMEM if the system is out of memory.
*/ staticint verity_preresume(struct dm_target *ti)
{ struct block_device *bdev; struct dm_verity_digest root_digest; struct dm_verity *v; int r;
/* * Check whether a DM target is a verity target.
*/ bool dm_is_verity_target(struct dm_target *ti)
{ return ti->type == &verity_target;
}
MODULE_AUTHOR("Mikulas Patocka ");
MODULE_AUTHOR("Mandeep Baines ");
MODULE_AUTHOR("Will Drewry ");
MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
MODULE_LICENSE("GPL");
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.23Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.