/* * Limit the nested partition table to 4096 entries (because that's what * hardware supports). Both guest and host use this value.
*/ #define KVM_MAX_NESTED_GUESTS_SHIFT 12
/* allow access to big endian 32bit upper/lower parts and 64bit var */ struct kvmppc_exit_timing { union {
u64 tv64; struct {
u32 tbu, tbl;
} tv32;
};
};
/* XICS components, defined in book3s_xics.c */ struct kvmppc_xics; struct kvmppc_icp; externstruct kvm_device_ops kvm_xics_ops;
/* XIVE components, defined in book3s_xive.c */ struct kvmppc_xive; struct kvmppc_xive_vcpu; externstruct kvm_device_ops kvm_xive_ops; externstruct kvm_device_ops kvm_xive_native_ops;
struct kvmppc_passthru_irqmap;
/* * The reverse mapping array has one entry for each HPTE, * which stores the guest's view of the second word of the HPTE * (including the guest physical address of the mapping), * plus forward and backward pointers in a doubly-linked ring * of HPTEs that map the same host page. The pointers in this * ring are 32-bit HPTE indexes, to save space.
*/ struct revmap_entry { unsignedlong guest_rpte; unsignedint forw, back;
};
/* * The rmap array of size number of guest pages is allocated for each memslot. * This array is used to store usage specific information about the guest page. * Below are the encodings of the various possible usage types.
*/ /* Free bits which can be used to define a new usage */ #define KVMPPC_RMAP_TYPE_MASK 0xff00000000000000 #define KVMPPC_RMAP_NESTED 0xc000000000000000 /* Nested rmap array */ #define KVMPPC_RMAP_HPT 0x0100000000000000 /* HPT guest */
/* * rmap usage definition for a hash page table (hpt) guest: * 0x0000080000000000 Lock bit * 0x0000018000000000 RC bits * 0x0000000100000000 Present bit * 0x00000000ffffffff HPT index bits * The bottom 32 bits are the index in the guest HPT of a HPTE that points to * the page.
*/ #define KVMPPC_RMAP_LOCK_BIT 43 #define KVMPPC_RMAP_RC_SHIFT 32 #define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT) #define KVMPPC_RMAP_PRESENT 0x100000000ul #define KVMPPC_RMAP_INDEX 0xfffffffful
/* This bit is used when a vcore exit is triggered from outside the vcore */ #define VCORE_EXIT_REQ 0x10000
/* * Values for vcore_state. * Note that these are arranged such that lower values * (< VCORE_SLEEPING) don't require stolen time accounting * on load/unload, and higher values do.
*/ #define VCORE_INACTIVE 0 #define VCORE_PREEMPT 1 #define VCORE_PIGGYBACK 2 #define VCORE_SLEEPING 3 #define VCORE_RUNNING 4 #define VCORE_EXITING 5 #define VCORE_POLLING 6
/* * Struct used to manage memory for a virtual processor area * registered by a PAPR guest. There are three types of area * that a guest can register.
*/ struct kvmppc_vpa { unsignedlong gpa; /* Current guest phys addr */ void *pinned_addr; /* Address in kernel linear mapping */ void *pinned_end; /* End of region */ unsignedlong next_gpa; /* Guest phys addr for update */ unsignedlong len; /* Number of bytes required */
u8 update_pending; /* 1 => update pinned_addr from next_gpa */ bool dirty; /* true => area has been modified by kernel */
};
/* Struct used to accumulate timing information in HV real mode code */ struct kvmhv_tb_accumulator {
u64 seqcount; /* used to synchronize access, also count * 2 */
u64 tb_total; /* total time in timebase ticks */
u64 tb_min; /* min time */
u64 tb_max; /* max time */
};
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid; #ifdef CONFIG_PPC_BOOK3S struct kvmppc_slb slb[64]; int slb_max; /* 1 + index of last valid entry in slb[] */ int slb_nr; /* total number of entries in SLB */ struct kvmppc_mmu mmu; struct kvmppc_vcpu_book3s *book3s; #endif #ifdef CONFIG_PPC_BOOK3S_32 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; #endif
/* * This is passed along to the HV via H_ENTER_NESTED. Align to * prevent it crossing a real 4K page.
*/ struct pt_regs regs __aligned(512);
#ifdef CONFIG_PPC_BOOK3S
ulong fault_dar;
u32 fault_dsisr; unsignedlong intr_msr; /* * POWER9 and later: fault_gpa contains the guest real address of page * fault for a radix guest, or segment descriptor (equivalent to result * from slbmfev of SLB entry that translated the EA) for hash guests.
*/
ulong fault_gpa; #endif
u16 io_gpr; /* GPR used as IO source/target */
u8 mmio_host_swabbed;
u8 mmio_sign_extend; /* conversion between single and double precision */
u8 mmio_sp64_extend; /* * Number of simulations for vsx. * If we use 2*8bytes to simulate 1*16bytes, * then the number should be 2 and * mmio_copy_type=KVMPPC_VSX_COPY_DWORD. * If we use 4*4bytes to simulate 1*16bytes, * the number should be 4 and * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
*/
u8 mmio_vsx_copy_nums;
u8 mmio_vsx_offset;
u8 mmio_vmx_copy_nums;
u8 mmio_vmx_offset;
u8 mmio_copy_type;
u8 osi_needed;
u8 osi_enabled;
u8 papr_enabled;
u8 watchdog_enabled;
u8 sane;
u8 cpu_type;
u8 hcall_needed;
u8 epr_flags; /* KVMPPC_EPR_xxx */
u8 epr_needed;
u8 external_oneshot; /* clear external irq after delivery */
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
struct hrtimer dec_timer;
u64 dec_jiffies;
u64 dec_expires; /* Relative to guest timebase. */ unsignedlong pending_exceptions;
u8 ceded;
u8 prodded;
u8 doorbell_request;
u8 irq_pending; /* Used by XIVE to signal pending guest irqs */ unsignedlong last_inst;
struct rcuwait wait; struct rcuwait *waitp; struct kvmppc_vcore *vcore; int ret; int trap; int state; int ptid; int thread_cpu; int prev_cpu; bool timer_running;
wait_queue_head_t cpu_run; struct machine_check_event mce_evt; /* Valid if trap == 0x200 */
struct kvm_vcpu_arch_shared *shared; #ifdefined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) bool shared_big_endian; #endif unsignedlong magic_page_pa; /* phys addr to map the magic page to */ unsignedlong magic_page_ea; /* effect. addr to map the magic page to */ bool disable_kernel_nx;
int irq_type; /* one of KVM_IRQ_* */ int irq_cpu_id; struct openpic *mpic; /* KVM_IRQ_MPIC */ #ifdef CONFIG_KVM_XICS struct kvmppc_icp *icp; /* XICS presentation controller */ struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
__be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */
u8 xive_pushed; /* Is the VP pushed on the physical CPU ? */
u8 xive_esc_on; /* Is the escalation irq enabled ? */ union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
u64 xive_esc_raddr; /* Escalation interrupt ESB real addr */
u64 xive_esc_vaddr; /* Escalation interrupt ESB virt addr */ #endif
u64 hfscr_permitted; /* A mask of permitted HFSCR facilities */
/* For support of nested guests */ struct kvm_nested_guest *nested;
u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
u32 nested_vcpu_id;
gpa_t nested_io_gpr; /* For nested APIv2 guests*/ struct kvmhv_nestedv2_io nestedv2_io; #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.