.macro RESTORE_GUEST_SPEC_CTRL /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
ALTERNATIVE_2 "", \ "jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \ "", X86_FEATURE_V_SPEC_CTRL
801:
.endm
.macro RESTORE_GUEST_SPEC_CTRL_BODY
800: /* * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the * host's, write the MSR. This is kept out-of-line so that the common * case does not have to jump. * * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, * there must not be any returns or indirect branches between this code * and vmentry.
*/
movl SVM_spec_ctrl(%_ASM_DI), %eax
cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
je 801b
mov $MSR_IA32_SPEC_CTRL, %ecx
xor %edx, %edx
wrmsr
jmp 801b
.endm
.macro RESTORE_HOST_SPEC_CTRL /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
ALTERNATIVE_2 "", \ "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \ "", X86_FEATURE_V_SPEC_CTRL
901:
.endm
.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
900: /* Same for after vmexit. */
mov $MSR_IA32_SPEC_CTRL, %ecx
/* * Load the value that the guest had written into MSR_IA32_SPEC_CTRL, * if it was not intercepted during guest execution.
*/
cmpb $0, \spec_ctrl_intercepted
jnz 998f
rdmsr
movl %eax, SVM_spec_ctrl(%_ASM_DI)
998:
/* Now restore the host value of the MSR if different from the guest's. */
movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
cmp SVM_spec_ctrl(%_ASM_DI), %eax
je 901b
xor %edx, %edx
wrmsr
jmp 901b
.endm
/** * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode * @svm: struct vcpu_svm * * @spec_ctrl_intercepted: bool
*/
SYM_FUNC_START(__svm_vcpu_run)
push %_ASM_BP
mov %_ASM_SP, %_ASM_BP
#ifdef CONFIG_X86_64
push %r15
push %r14
push %r13
push %r12
#else
push %edi
push %esi
#endif
push %_ASM_BX
/* * Save variables needed after vmexit on the stack, in inverse * order compared to when they are needed.
*/
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
push %_ASM_ARG2
/* Needed to restore access to percpu variables. */
__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
/* Finally save @svm. */
push %_ASM_ARG1
.ifnc _ASM_ARG1, _ASM_DI /* * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
*/
mov %_ASM_ARG1, %_ASM_DI
.endif
/* * Use a single vmcb (vmcb01 because it's always valid) for * context switching guest state via VMLOAD/VMSAVE, that way * the state doesn't need to be copied between vmcb01 and * vmcb02 when switching vmcbs for nested virtualization.
*/
mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
1: vmload %_ASM_AX
2:
/* Get svm->current_vmcb->pa into RAX. */
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
/* * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be * untrained as soon as we exit the VM and are back to the * kernel. This should be done before re-enabling interrupts * because interrupt handlers won't sanitize 'ret' if the return is * from the kernel.
*/
UNTRAIN_RET_VM
/* * Clear all general purpose registers except RSP and RAX to prevent * speculative use of the guest's values, even those that are reloaded * via the stack. In theory, an L1 cache miss when restoring registers * could lead to speculative execution with the guest's values. * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially * free. RSP and RAX are exempt as they are restored by hardware * during VM-Exit.
*/
xor %ecx, %ecx
xor %edx, %edx
xor %ebx, %ebx
xor %ebp, %ebp
xor %esi, %esi
xor %edi, %edi
#ifdef CONFIG_X86_64
xor %r8d, %r8d
xor %r9d, %r9d
xor %r10d, %r10d
xor %r11d, %r11d
xor %r12d, %r12d
xor %r13d, %r13d
xor %r14d, %r14d
xor %r15d, %r15d
#endif
/* "Pop" @spec_ctrl_intercepted. */
pop %_ASM_BX
pop %_ASM_BX
#ifdef CONFIG_X86_64
pop %r12
pop %r13
pop %r14
pop %r15
#else
pop %esi
pop %edi
#endif
pop %_ASM_BP
RET
/** * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode * @svm: struct vcpu_svm * * @spec_ctrl_intercepted: bool
*/
SYM_FUNC_START(__svm_sev_es_vcpu_run)
FRAME_BEGIN
/* * Save non-volatile (callee-saved) registers to the host save area. * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not * saved on VMRUN.
*/
mov %rbp, SEV_ES_RBP (%rdx)
mov %r15, SEV_ES_R15 (%rdx)
mov %r14, SEV_ES_R14 (%rdx)
mov %r13, SEV_ES_R13 (%rdx)
mov %r12, SEV_ES_R12 (%rdx)
mov %rbx, SEV_ES_RBX (%rdx)
/* * Save volatile registers that hold arguments that are needed after * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
*/
mov %rdi, SEV_ES_RDI (%rdx)
mov %rsi, SEV_ES_RSI (%rdx)
/* * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be * untrained as soon as we exit the VM and are back to the * kernel. This should be done before re-enabling interrupts * because interrupt handlers won't sanitize RET if the return is * from the kernel.
*/
UNTRAIN_RET_VM
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.