/* Used by icache_is_aliasing(). */ unsignedlong __icache_flags;
/* Used by kvm_get_vttbr(). */ unsignedint kvm_arm_vmid_bits;
unsignedint kvm_host_sve_max_vl;
/* * The currently loaded hyp vCPU for each physical CPU. Used only when * protected KVM is enabled, but for both protected and non-protected VMs.
*/ static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
/* * Always trap: * - Feature id registers: to control features exposed to guests * - Implementation-defined features
*/
val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1;
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
val |= HCR_TERR | HCR_TEA;
val &= ~(HCR_FIEN);
}
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
val &= ~(HCR_AMVOFFEN);
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, IMP)) {
val |= HCR_TID5;
val &= ~(HCR_DCT | HCR_ATA);
}
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
val |= HCR_TLOR;
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) {
val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK);
}
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, IMP))
val |= MDCR_EL2_TDRA | MDCR_EL2_TDA;
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
val |= MDCR_EL2_TDOSA;
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) {
val |= MDCR_EL2_TPMS;
val &= ~MDCR_EL2_E2PB_MASK;
}
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
val |= MDCR_EL2_TTRF;
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, ExtTrcBuff, IMP))
val |= MDCR_EL2_E2TB_MASK;
/* Trap Debug Communications Channel registers */ if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
val |= MDCR_EL2_TDCC;
vcpu->arch.mdcr_el2 = val;
}
/* * Check that cpu features that are neither trapped nor supported are not * enabled for protected VMs.
*/ staticint pkvm_check_pvm_cpu_features(struct kvm_vcpu *vcpu)
{ struct kvm *kvm = vcpu->kvm;
/* Protected KVM does not support AArch32 guests. */ if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) ||
kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32)) return -EINVAL;
/* * Linux guests assume support for floating-point and Advanced SIMD. Do * not change the trapping behavior for these from the KVM default.
*/ if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, FP, IMP) ||
!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AdvSIMD, IMP)) return -EINVAL;
/* No SME support in KVM right now. Check to catch if it changes. */ if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP)) return -EINVAL;
/* * Spinlock for protecting state related to the VM table. Protects writes * to 'vm_table', 'nr_table_entries', and other per-vm state on initialization. * Also protects reads and writes to 'last_hyp_vcpu_lookup'.
*/
DEFINE_HYP_SPINLOCK(vm_table_lock);
/* * The table of VM entries for protected VMs in hyp. * Allocated at hyp initialization and setup.
*/ staticstruct pkvm_hyp_vm **vm_table;
if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED); return 0;
}
/* Limit guest vector length to the maximum supported by the host. */
sve_max_vl = min(READ_ONCE(host_vcpu->arch.sve_max_vl), kvm_host_sve_max_vl);
sve_state_size = sve_state_size_from_vl(sve_max_vl);
sve_state = kern_hyp_va(READ_ONCE(host_vcpu->arch.sve_state));
if (!sve_state || !sve_state_size) {
ret = -EINVAL; goto err;
}
ret = hyp_pin_shared_mem(sve_state, sve_state + sve_state_size); if (ret) goto err;
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
ret = pkvm_vcpu_init_traps(hyp_vcpu); if (ret) goto done;
ret = pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
done: if (ret)
unpin_host_vcpu(host_vcpu); return ret;
}
staticint find_free_vm_table_entry(struct kvm *host_kvm)
{ int i;
for (i = 0; i < KVM_MAX_PVMS; ++i) { if (!vm_table[i]) return i;
}
return -ENOMEM;
}
/* * Allocate a VM table entry and insert a pointer to the new vm. * * Return a unique handle to the protected VM on success, * negative error code on failure.
*/ static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm)
{ struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu; int idx;
hyp_assert_lock_held(&vm_table_lock);
/* * Initializing protected state might have failed, yet a malicious * host could trigger this function. Thus, ensure that 'vm_table' * exists.
*/ if (unlikely(!vm_table)) return -EINVAL;
idx = find_free_vm_table_entry(host_kvm); if (idx < 0) return idx;
/* * Deallocate and remove the VM table entry corresponding to the handle.
*/ staticvoid remove_vm_table_entry(pkvm_handle_t handle)
{
hyp_assert_lock_held(&vm_table_lock);
vm_table[vm_handle_to_idx(handle)] = NULL;
}
staticvoid unmap_donated_memory_noclear(void *va, size_t size)
{ if (!va) return;
__unmap_donated_memory(va, size);
}
/* * Initialize the hypervisor copy of the protected VM state using the * memory donated by the host. * * Unmaps the donated memory from the host at stage 2. * * host_kvm: A pointer to the host's struct kvm. * vm_hva: The host va of the area being donated for the VM state. * Must be page aligned. * pgd_hva: The host va of the area being donated for the stage-2 PGD for * the VM. Must be page aligned. Its size is implied by the VM's * VTCR. * * Return a unique handle to the protected VM on success, * negative error code on failure.
*/ int __pkvm_init_vm(struct kvm *host_kvm, unsignedlong vm_hva, unsignedlong pgd_hva)
{ struct pkvm_hyp_vm *hyp_vm = NULL;
size_t vm_size, pgd_size; unsignedint nr_vcpus; void *pgd = NULL; int ret;
ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1); if (ret) return ret;
nr_vcpus = READ_ONCE(host_kvm->created_vcpus); if (nr_vcpus < 1) {
ret = -EINVAL; goto err_unpin_kvm;
}
/* * Initialize the hypervisor copy of the protected vCPU state using the * memory donated by the host. * * handle: The handle for the protected vm. * host_vcpu: A pointer to the corresponding host vcpu. * vcpu_hva: The host va of the area being donated for the vcpu state. * Must be page aligned. The size of the area must be equal to * the page-aligned size of 'struct pkvm_hyp_vcpu'. * Return 0 on success, negative error code on failure.
*/ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, unsignedlong vcpu_hva)
{ struct pkvm_hyp_vcpu *hyp_vcpu; struct pkvm_hyp_vm *hyp_vm; unsignedint idx; int ret;
hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu)); if (!hyp_vcpu) return -ENOMEM;
hyp_spin_lock(&vm_table_lock);
hyp_vm = get_vm_by_handle(handle); if (!hyp_vm) {
ret = -ENOENT; goto unlock;
}
ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu); if (ret) goto unlock;
idx = hyp_vcpu->vcpu.vcpu_idx; if (idx >= hyp_vm->kvm.created_vcpus) {
ret = -EINVAL; goto unlock;
}
if (hyp_vm->vcpus[idx]) {
ret = -EINVAL; goto unlock;
}
if (WARN_ON(hyp_page_count(hyp_vm))) {
err = -EBUSY; goto err_unlock;
}
host_kvm = hyp_vm->host_kvm;
/* Ensure the VMID is clean before it can be reallocated */
__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
remove_vm_table_entry(handle);
hyp_spin_unlock(&vm_table_lock);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.