u64 __gic_v3_get_lr(unsignedint lr)
{ switch (lr & 0xf) { case 0: return read_gicreg(ICH_LR0_EL2); case 1: return read_gicreg(ICH_LR1_EL2); case 2: return read_gicreg(ICH_LR2_EL2); case 3: return read_gicreg(ICH_LR3_EL2); case 4: return read_gicreg(ICH_LR4_EL2); case 5: return read_gicreg(ICH_LR5_EL2); case 6: return read_gicreg(ICH_LR6_EL2); case 7: return read_gicreg(ICH_LR7_EL2); case 8: return read_gicreg(ICH_LR8_EL2); case 9: return read_gicreg(ICH_LR9_EL2); case 10: return read_gicreg(ICH_LR10_EL2); case 11: return read_gicreg(ICH_LR11_EL2); case 12: return read_gicreg(ICH_LR12_EL2); case 13: return read_gicreg(ICH_LR13_EL2); case 14: return read_gicreg(ICH_LR14_EL2); case 15: return read_gicreg(ICH_LR15_EL2);
}
unreachable();
}
staticvoid __gic_v3_set_lr(u64 val, int lr)
{ switch (lr & 0xf) { case 0:
write_gicreg(val, ICH_LR0_EL2); break; case 1:
write_gicreg(val, ICH_LR1_EL2); break; case 2:
write_gicreg(val, ICH_LR2_EL2); break; case 3:
write_gicreg(val, ICH_LR3_EL2); break; case 4:
write_gicreg(val, ICH_LR4_EL2); break; case 5:
write_gicreg(val, ICH_LR5_EL2); break; case 6:
write_gicreg(val, ICH_LR6_EL2); break; case 7:
write_gicreg(val, ICH_LR7_EL2); break; case 8:
write_gicreg(val, ICH_LR8_EL2); break; case 9:
write_gicreg(val, ICH_LR9_EL2); break; case 10:
write_gicreg(val, ICH_LR10_EL2); break; case 11:
write_gicreg(val, ICH_LR11_EL2); break; case 12:
write_gicreg(val, ICH_LR12_EL2); break; case 13:
write_gicreg(val, ICH_LR13_EL2); break; case 14:
write_gicreg(val, ICH_LR14_EL2); break; case 15:
write_gicreg(val, ICH_LR15_EL2); break;
}
}
staticvoid __vgic_v3_write_ap0rn(u32 val, int n)
{ switch (n) { case 0:
write_gicreg(val, ICH_AP0R0_EL2); break; case 1:
write_gicreg(val, ICH_AP0R1_EL2); break; case 2:
write_gicreg(val, ICH_AP0R2_EL2); break; case 3:
write_gicreg(val, ICH_AP0R3_EL2); break;
}
}
staticvoid __vgic_v3_write_ap1rn(u32 val, int n)
{ switch (n) { case 0:
write_gicreg(val, ICH_AP1R0_EL2); break; case 1:
write_gicreg(val, ICH_AP1R1_EL2); break; case 2:
write_gicreg(val, ICH_AP1R2_EL2); break; case 3:
write_gicreg(val, ICH_AP1R3_EL2); break;
}
}
static u32 __vgic_v3_read_ap0rn(int n)
{
u32 val;
switch (n) { case 0:
val = read_gicreg(ICH_AP0R0_EL2); break; case 1:
val = read_gicreg(ICH_AP0R1_EL2); break; case 2:
val = read_gicreg(ICH_AP0R2_EL2); break; case 3:
val = read_gicreg(ICH_AP0R3_EL2); break; default:
unreachable();
}
return val;
}
static u32 __vgic_v3_read_ap1rn(int n)
{
u32 val;
switch (n) { case 0:
val = read_gicreg(ICH_AP1R0_EL2); break; case 1:
val = read_gicreg(ICH_AP1R1_EL2); break; case 2:
val = read_gicreg(ICH_AP1R2_EL2); break; case 3:
val = read_gicreg(ICH_AP1R3_EL2); break; default:
unreachable();
}
/* * Make sure stores to the GIC via the memory mapped interface * are now visible to the system register interface when reading the * LRs, and when reading back the VMCR on non-VHE systems.
*/ if (used_lrs || !has_vhe()) { if (!cpu_if->vgic_sre) {
dsb(sy);
isb();
}
}
if (used_lrs || cpu_if->its_vpe.its_vm) { int i;
u32 elrsr;
if (used_lrs || cpu_if->its_vpe.its_vm) {
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
for (i = 0; i < used_lrs; i++)
__gic_v3_set_lr(cpu_if->vgic_lr[i], i);
}
/* * Ensure that writes to the LRs, and on non-VHE systems ensure that * the write to the VMCR in __vgic_v3_activate_traps(), will have * reached the (re)distributors. This ensure the guest will read the * correct values from the memory-mapped interface.
*/ if (used_lrs || !has_vhe()) { if (!cpu_if->vgic_sre) {
isb();
dsb(sy);
}
}
}
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
{ /* * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a * Group0 interrupt (as generated in GICv2 mode) to be * delivered as a FIQ to the guest, with potentially fatal * consequences. So we must make sure that ICC_SRE_EL1 has * been actually programmed with the value we want before * starting to mess with the rest of the GIC, and VMCR_EL2 in * particular. This logic must be called before * __vgic_v3_restore_state(). * * However, if the vgic is disabled (ICH_HCR_EL2.EN==0), no GIC is * provisioned at all. In order to prevent illegal accesses to the * system registers to trap to EL1 (duh), force ICC_SRE_EL1.SRE to 1 * so that the trap bits can take effect. Yes, we *loves* the GIC.
*/ if (!(cpu_if->vgic_hcr & ICH_HCR_EL2_En)) {
write_gicreg(ICC_SRE_EL1_SRE, ICC_SRE_EL1);
isb();
} elseif (!cpu_if->vgic_sre) {
write_gicreg(0, ICC_SRE_EL1);
isb();
write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
if (has_vhe()) { /* * Ensure that the write to the VMCR will have reached * the (re)distributors. This ensure the guest will * read the correct values from the memory-mapped * interface.
*/
isb();
dsb(sy);
}
}
/* * GICv5 BET0 FEAT_GCIE_LEGACY doesn't include ICC_SRE_EL2. This is due * to be relaxed in a future spec release, at which point this in * condition can be dropped.
*/ if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) { /* * Prevent the guest from touching the ICC_SRE_EL1 system * register. Note that this may not have any effect, as * ICC_SRE_EL2.Enable being RAO/WI is a valid implementation.
*/
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
ICC_SRE_EL2);
}
/* * If we need to trap system registers, we must write * ICH_HCR_EL2 anyway, even if no interrupts are being * injected. Note that this also applies if we don't expect * any system register access (no vgic at all).
*/ if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre)
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
}
if (!cpu_if->vgic_sre) {
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
}
/* * Can be dropped in the future when GICv5 spec is relaxed. See comment * above.
*/ if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) {
val = read_gicreg(ICC_SRE_EL2);
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
}
if (!cpu_if->vgic_sre) { /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
isb();
write_gicreg(1, ICC_SRE_EL1);
}
/* * If we were trapping system registers, we enabled the VGIC even if * no interrupts were being injected, and we disable it again here.
*/ if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre)
write_gicreg(0, ICH_HCR_EL2);
}
/* * In compat mode, we cannot access ICC_SRE_EL1 at any EL * other than EL1 itself; just return the * ICH_VTR_EL2. ICC_IDR0_EL1 is only implemented on a GICv5 * system, so we first check if we have GICv5 support.
*/ if (cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF)) return read_gicreg(ICH_VTR_EL2);
sre = read_gicreg(ICC_SRE_EL1); /* * To check whether we have a MMIO-based (GICv2 compatible) * CPU interface, we need to disable the system register * view. * * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates * that to be able to set ICC_SRE_EL1.SRE to 0, all the * interrupt overrides must be set. You've got to love this. * * As we always run VHE with HCR_xMO set, no extra xMO * manipulation is required in that case. * * To safely disable SRE, we have to prevent any interrupt * from firing (which would be deadly). This only makes sense * on VHE, as interrupts are already masked for nVHE as part * of the exception entry to EL2.
*/ if (has_vhe()) {
flags = local_daif_save();
} else {
sysreg_clear_set_hcr(0, HCR_AMO | HCR_FMO | HCR_IMO);
isb();
}
/* * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen * is dependent on ICC_SRE_EL1.SRE, and we have to perform the * VMCR_EL2 save/restore in the world switch.
*/ if (cpu_if->vgic_sre)
__vgic_v3_write_vmcr(cpu_if->vgic_vmcr);
__vgic_v3_restore_aprs(cpu_if);
}
staticint __vgic_v3_bpr_min(void)
{ /* See Pseudocode for VPriorityGroup */ return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
}
/* * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers * contain the active priority levels for this VCPU * for the maximum number of supported priority * levels, and we return the full priority level only * if the BPR is programmed to its minimum, otherwise * we return a combination of the priority level and * subpriority, as determined by the setting of the * BPR, but without the full subpriority.
*/
val = __vgic_v3_read_ap0rn(i);
val |= __vgic_v3_read_ap1rn(i); if (!val) {
hap += 32; continue;
}
/* * Convert a priority to a preemption level, taking the relevant BPR * into account by zeroing the sub-priority bits.
*/ static u8 __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
{ unsignedint bpr;
/* * The priority value is independent of any of the BPR values, so we * normalize it using the minimal BPR value. This guarantees that no * matter what the guest does with its BPR, we can always set/get the * same value of a priority.
*/ staticvoid __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
{
u8 pre, ap;
u32 val; int apr;
pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
ap = pre >> __vgic_v3_bpr_min();
apr = ap / 32;
if (!grp) {
val = __vgic_v3_read_ap0rn(apr);
__vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
} else {
val = __vgic_v3_read_ap1rn(apr);
__vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
}
}
staticvoid __vgic_v3_write_dir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
{
u32 vid = vcpu_get_reg(vcpu, rt);
u64 lr_val; int lr;
/* EOImode == 0, nothing to be done here */ if (!(vmcr & ICH_VMCR_EOIM_MASK)) return;
/* No deactivate to be performed on an LPI */ if (vid >= VGIC_MIN_LPI) return;
lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); if (lr == -1) {
__vgic_v3_bump_eoicount(); return;
}
__vgic_v3_clear_active_lr(lr, lr_val);
}
staticvoid __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
{
u32 vid = vcpu_get_reg(vcpu, rt);
u64 lr_val;
u8 lr_prio, act_prio; int lr, grp;
grp = __vgic_v3_get_group(vcpu);
/* Drop priority in any case */
act_prio = __vgic_v3_clear_highest_active_priority();
lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); if (lr == -1) { /* Do not bump EOIcount for LPIs that aren't in the LRs */ if (!(vid >= VGIC_MIN_LPI))
__vgic_v3_bump_eoicount(); return;
}
/* EOImode == 1 and not an LPI, nothing to be done here */ if ((vmcr & ICH_VMCR_EOIM_MASK) && !(vid >= VGIC_MIN_LPI)) return;
/* If priorities or group do not match, the guest has fscked-up. */ if (grp != !!(lr_val & ICH_LR_GROUP) ||
__vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio) return;
/* Let's now perform the deactivation */
__vgic_v3_clear_active_lr(lr, lr_val);
}
switch (sysreg) { case SYS_ICC_IGRPEN0_EL1: if (is_read &&
(__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGRTR_EL2_ICC_IGRPENn_EL1)) returntrue;
if (!is_read &&
(__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGWTR_EL2_ICC_IGRPENn_EL1)) returntrue;
fallthrough;
case SYS_ICC_AP0Rn_EL1(0): case SYS_ICC_AP0Rn_EL1(1): case SYS_ICC_AP0Rn_EL1(2): case SYS_ICC_AP0Rn_EL1(3): case SYS_ICC_BPR0_EL1: case SYS_ICC_EOIR0_EL1: case SYS_ICC_HPPIR0_EL1: case SYS_ICC_IAR0_EL1: return ich_hcr & ICH_HCR_EL2_TALL0;
case SYS_ICC_IGRPEN1_EL1: if (is_read &&
(__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGRTR_EL2_ICC_IGRPENn_EL1)) returntrue;
if (!is_read &&
(__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGWTR_EL2_ICC_IGRPENn_EL1)) returntrue;
fallthrough;
case SYS_ICC_AP1Rn_EL1(0): case SYS_ICC_AP1Rn_EL1(1): case SYS_ICC_AP1Rn_EL1(2): case SYS_ICC_AP1Rn_EL1(3): case SYS_ICC_BPR1_EL1: case SYS_ICC_EOIR1_EL1: case SYS_ICC_HPPIR1_EL1: case SYS_ICC_IAR1_EL1: return ich_hcr & ICH_HCR_EL2_TALL1;
case SYS_ICC_DIR_EL1: if (ich_hcr & ICH_HCR_EL2_TDIR) returntrue;
fallthrough;
case SYS_ICC_RPR_EL1: case SYS_ICC_CTLR_EL1: case SYS_ICC_PMR_EL1: return ich_hcr & ICH_HCR_EL2_TC;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.