/* * Entry point for PVH guests. * * Xen ABI specifies the following register state when we come here: * * - `ebx`: contains the physical memory address where the loader has placed * the boot start info structure. * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared. * - `cr4`: all bits are cleared. * - `cs `: must be a 32-bit read/execute code segment with a base of `0` * and a limit of `0xFFFFFFFF`. The selector value is unspecified. * - `ds`, `es`: must be a 32-bit read/write data segment with a base of * `0` and a limit of `0xFFFFFFFF`. The selector values are all * unspecified. * - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit * of '0x67'. * - `eflags`: bit 17 (VM) must be cleared. Bit 9 (IF) must be cleared. * Bit 8 (TF) must be cleared. Other bits are all unspecified. * * All other processor registers and flag bits are unspecified. The OS is in * charge of setting up its own stack, GDT and IDT.
*/
/* * See the comment for startup_32 for more details. We need to * execute a call to get the execution address to be position * independent, but we don't have a stack. Save and restore the * magic field of start_info in ebx, and use that as the stack.
*/
mov (%ebx), %eax
leal 4(%ebx), %esp
ANNOTATE_INTRA_FUNCTION_CALL
call 1f
1: popl %ebp
mov %eax, (%ebx)
subl $rva(1b), %ebp
movl $0, %esp
/* * Reuse the non-relocatable symbol emitted for the ELF note to * subtract the build time physical address of pvh_start_xen() from * its actual runtime address, without relying on absolute 32-bit ELF * relocations, as these are not supported by the linker when running * in -pie mode, and should be avoided in .head.text in general.
*/
mov %ebp, %ebx
subl rva(xen_elfnote_phys32_entry)(%ebp), %ebx
jz .Lpagetable_done
/* * Store the resulting load offset in phys_base. __pa() needs * phys_base set to calculate the hypercall page in xen_pvh_init().
*/
movl %ebx, rva(phys_base)(%ebp)
/* * Set up GSBASE. * Note that on SMP the boot CPU uses the init data section until * the per-CPU areas are set up.
*/
movl $MSR_GS_BASE,%ecx
xorl %eax, %eax
xorl %edx, %edx
wrmsr
#ifdef CONFIG_X86_64 /* * Xen PVH needs a set of identity mapped and kernel high mapping * page tables. pvh_start_xen starts running on the identity mapped * page tables, but xen_prepare_pvh calls into the high mapping. * These page tables need to be relocatable and are only used until * startup_64 transitions to init_top_pgt.
*/
SYM_DATA_START_PAGE_ALIGNED(pvh_init_top_pgt)
.quad pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org pvh_init_top_pgt + L4_PAGE_OFFSET * 8, 0
.quad pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org pvh_init_top_pgt + L4_START_KERNEL * 8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad pvh_level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
SYM_DATA_END(pvh_init_top_pgt)
SYM_DATA_START_PAGE_ALIGNED(pvh_level3_ident_pgt)
.quad pvh_level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.fill 511, 8, 0
SYM_DATA_END(pvh_level3_ident_pgt)
SYM_DATA_START_PAGE_ALIGNED(pvh_level2_ident_pgt) /* * Since I easily can, map the first 1G. * Don't set NX because code runs from these pages. * * Note: This sets _PAGE_GLOBAL despite whether * the CPU supports it or it is enabled. But, * the CPU should ignore the bit.
*/
PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
SYM_DATA_END(pvh_level2_ident_pgt)
SYM_DATA_START_PAGE_ALIGNED(pvh_level3_kernel_pgt)
.fill L3_START_KERNEL, 8, 0 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
.quad pvh_level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.quad 0 /* no fixmap */
SYM_DATA_END(pvh_level3_kernel_pgt)
SYM_DATA_START_PAGE_ALIGNED(pvh_level2_kernel_pgt) /* * Kernel high mapping. * * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled, * 512 MiB otherwise. * * (NOTE: after that starts the module area, see MODULES_VADDR.) * * This table is eventually used by the kernel during normal runtime. * Care must be taken to clear out undesired bits later, like _PAGE_RW * or _PAGE_GLOBAL in some cases.
*/
PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE / PMD_SIZE)
SYM_DATA_END(pvh_level2_kernel_pgt)
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.