/* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
*/ struct nested_vmx { /* Has the level1 guest done vmxon? */ bool vmxon;
gpa_t vmxon_ptr; bool pml_full;
/* The guest-physical address of the current VMCS L1 keeps for L2 */
gpa_t current_vmptr; /* * Cache of the guest's VMCS, existing outside of guest memory. * Loaded from guest memory during VMPTRLD. Flushed to guest * memory during VMCLEAR and VMPTRLD.
*/ struct vmcs12 *cached_vmcs12; /* * Cache of the guest's shadow VMCS, existing outside of guest * memory. Loaded from guest memory during VM entry. Flushed * to guest memory during VM exit.
*/ struct vmcs12 *cached_shadow_vmcs12;
/* * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
*/ struct gfn_to_hva_cache shadow_vmcs12_cache;
/* * GPA to HVA cache for VMCS12
*/ struct gfn_to_hva_cache vmcs12_cache;
/* * Indicates if the shadow vmcs or enlightened vmcs must be updated * with the data held by struct vmcs12.
*/ bool need_vmcs12_to_shadow_sync; bool dirty_vmcs12;
/* * Indicates whether MSR bitmap for L2 needs to be rebuilt due to * changes in MSR bitmap for L1 or switching to a different L2. Note, * this flag can only be used reliably in conjunction with a paravirt L1 * which informs L0 whether any changes to MSR bitmap for L2 were done * on its side.
*/ bool force_msr_bitmap_recalc;
/* * Indicates lazily loaded guest state has not yet been decached from * vmcs02.
*/ bool need_sync_vmcs02_to_vmcs12_rare;
/* * vmcs02 has been initialized, i.e. state that is constant for * vmcs02 has been written to the backing VMCS. Initialization * is delayed until L1 actually attempts to run a nested VM.
*/ bool vmcs02_initialized;
/* * Enlightened VMCS has been enabled. It does not mean that L1 has to * use it. However, VMX features available to L1 will be limited based * on what the enlightened VMCS supports.
*/ bool enlightened_vmcs_enabled;
/* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending;
/* Pending MTF VM-exit into L1. */ bool mtf_pending;
struct loaded_vmcs vmcs02;
/* * Guest pages referred to in the vmcs02 with host-physical * pointers, so we must keep them pinned while L2 runs.
*/ struct kvm_host_map apic_access_page_map; struct kvm_host_map virtual_apic_map; struct kvm_host_map pi_desc_map;
/* * Used to snapshot MSRs that are conditionally loaded on VM-Enter in * order to propagate the guest's pre-VM-Enter value into vmcs02. For * emulation of VMLAUNCH/VMRESUME, the snapshot will be of L1's value. * For KVM_SET_NESTED_STATE, the snapshot is of L2's value, _if_ * userspace restores MSRs before nested state. If userspace restores * MSRs after nested state, the snapshot holds garbage, but KVM can't * detect that, and the garbage value in vmcs02 will be overwritten by * MSR restoration in any case.
*/
u64 pre_vmenter_debugctl;
u64 pre_vmenter_bndcfgs;
/* to migrate it to L1 if L2 writes to L1's CR8 directly */ int l1_tpr_threshold;
u16 vpid02;
u16 last_vpid;
struct nested_vmx_msrs msrs;
/* SMM related state */ struct { /* in VMX operation on SMM entry? */ bool vmxon; /* in guest mode on SMM entry? */ bool guest_mode;
} smm;
/* * User return MSRs are always emulated when enabled in the guest, but * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to * be loaded into hardware if those conditions aren't met.
*/ struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]; bool guest_uret_msrs_loaded; #ifdef CONFIG_X86_64
u64 msr_guest_kernel_gs_base; #endif
u64 spec_ctrl;
u32 msr_ia32_umwait_control;
/* * loaded_vmcs points to the VMCS currently used in this vcpu. For a * non-nested (L1) guest, it always points to vmcs01. For a nested * guest (L2), it points to a different VMCS.
*/ struct loaded_vmcs vmcs01; struct loaded_vmcs *loaded_vmcs;
/* Support for PML */ #define PML_LOG_NR_ENTRIES 512 /* PML is written backwards: this is the first entry written by the CPU */ #define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1)
struct page *pml_pg;
/* apic deadline value in host tsc */
u64 hv_deadline_tsc;
/* * Only bits masked by msr_ia32_feature_control_valid_bits can be set in * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included * in msr_ia32_feature_control_valid_bits.
*/
u64 msr_ia32_feature_control;
u64 msr_ia32_feature_control_valid_bits; /* SGX Launch Control public key hash */
u64 msr_ia32_sgxlepubkeyhash[4];
u64 msr_ia32_mcu_opt_ctrl; bool disable_fb_clear;
struct pt_desc pt_desc; struct lbr_desc lbr_desc;
/* ve_info must be page aligned. */ struct vmx_ve_information *ve_info;
};
/* * CR0.WP needs to be intercepted when KVM is shadowing legacy paging * in order to construct shadow PTEs with the correct protections. * Note! CR0.WP technically can be passed through to the guest if * paging is disabled, but checking CR0.PG would generate a cyclical * dependency of sorts due to forcing the caller to ensure CR0 holds * the correct value prior to determining which CR0 bits can be owned * by L1. Keep it simple and limit the optimization to EPT.
*/ if (!enable_ept)
bits &= ~X86_CR0_WP; return bits;
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.2Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.