/* * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value * of the enabled bit, so there is only one function for both here.
*/ unsignedlong vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
gpa_t addr, unsignedint len)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 value = 0; int i;
/* Loop over all IRQs affected by this read */ for (i = 0; i < len * 8; i++) { struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
/* * We need to update the state of the interrupt because * the guest might have changed the state of the device * while the interrupt was disabled at the VGIC level.
*/
irq->line_level = vgic_get_phys_line_level(irq); /* * Deactivate the physical interrupt so the GIC will let * us know when it is asserted again.
*/ if (!irq->active && was_high && !irq->line_level)
vgic_irq_set_phys_active(irq, false);
}
irq->enabled = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
/* Loop over all IRQs affected by this read */ for (i = 0; i < len * 8; i++) { struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i); unsignedlong flags; bool val;
/* * When used from userspace with a GICv3 model: * * Pending state of interrupt is latched in pending_latch * variable. Userspace will save and restore pending state * and line_level separately. * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst * for handling of ISPENDR and ICPENDR.
*/
raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq->hw && vgic_irq_is_sgi(irq->intid)) { int err;
val = false;
err = irq_get_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
&val);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
} elseif (!is_user && vgic_irq_is_mapped_level(irq)) {
val = vgic_get_phys_line_level(irq);
} else { switch (vcpu->kvm->arch.vgic.vgic_model) { case KVM_DEV_TYPE_ARM_VGIC_V3: if (is_user) {
val = irq->pending_latch; break;
}
fallthrough; default:
val = irq_is_pending(irq); break;
}
}
value |= ((u32)val << i);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
/* GICD_ISPENDR0 SGI bits are WI when written from the guest. */ if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
vgic_put_irq(vcpu->kvm, irq); continue;
}
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* * GICv2 SGIs are terribly broken. We can't restore * the source of the interrupt, so just pick the vcpu * itself as the source...
*/ if (is_vgic_v2_sgi(vcpu, irq))
irq->source |= BIT(vcpu->vcpu_id);
if (irq->hw && vgic_irq_is_sgi(irq->intid)) { /* HW SGI? Ask the GIC to inject it */ int err;
err = irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING, true);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
/* Must be called with irq->irq_lock held */ staticvoid vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
{
irq->pending_latch = false;
/* * We don't want the guest to effectively mask the physical * interrupt by doing a write to SPENDR followed by a write to * CPENDR for HW interrupts, so we clear the active state on * the physical side if the virtual interrupt is not active. * This may lead to taking an additional interrupt on the * host, but that should not be a problem as the worst that * can happen is an additional vgic injection. We also clear * the pending state to maintain proper semantics for edge HW * interrupts.
*/
vgic_irq_set_phys_pending(irq, false); if (!irq->active)
vgic_irq_set_phys_active(irq, false);
}
/* GICD_ICPENDR0 SGI bits are WI when written from the guest. */ if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
vgic_put_irq(vcpu->kvm, irq); continue;
}
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* * More fun with GICv2 SGIs! If we're clearing one of them * from userspace, which source vcpu to clear? Let's not * even think of it, and blow the whole set.
*/ if (is_vgic_v2_sgi(vcpu, irq))
irq->source = 0;
if (irq->hw && vgic_irq_is_sgi(irq->intid)) { /* HW SGI? Ask the GIC to clear its pending bit */ int err;
err = irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING, false);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
/* * If we are fiddling with an IRQ's active state, we have to make sure the IRQ * is not queued on some running VCPU's LRs, because then the change to the * active state can be overwritten when the VCPU's state is synced coming back * from the guest. * * For shared interrupts as well as GICv3 private interrupts accessed from the * non-owning CPU, we have to stop all the VCPUs because interrupts can be * migrated while we don't hold the IRQ locks and we don't want to be chasing * moving targets. * * For GICv2 private interrupts we don't have to do anything because * userspace accesses to the VGIC state already require all VCPUs to be * stopped, and only the VCPU itself can modify its private interrupts * active state, which guarantees that the VCPU is not running.
*/ staticvoid vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
{ if ((vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
vcpu != kvm_get_running_vcpu()) ||
intid >= VGIC_NR_PRIVATE_IRQS)
kvm_arm_halt_guest(vcpu->kvm);
}
staticunsignedlong __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
gpa_t addr, unsignedint len)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 value = 0; int i;
/* Loop over all IRQs affected by this read */ for (i = 0; i < len * 8; i++) { struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
/* * Even for HW interrupts, don't evaluate the HW state as * all the guest is interested in is the virtual state.
*/ if (irq->active)
value |= (1U << i);
/* Must be called with irq->irq_lock held */ staticvoid vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, bool active, bool is_uaccess)
{ if (is_uaccess) return;
if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
} elseif (irq->hw && vgic_irq_is_sgi(irq->intid)) { /* * GICv4.1 VSGI feature doesn't track an active state, * so let's not kid ourselves, there is nothing we can * do here.
*/
irq->active = false;
} else {
u32 model = vcpu->kvm->arch.vgic.vgic_model;
u8 active_source;
irq->active = active;
/* * The GICv2 architecture indicates that the source CPUID for * an SGI should be provided during an EOI which implies that * the active state is stored somewhere, but at the same time * this state is not architecturally exposed anywhere and we * have no way of knowing the right source. * * This may lead to a VCPU not being able to receive * additional instances of a particular SGI after migration * for a GICv2 VM on some GIC implementations. Oh well.
*/
active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
active && vgic_irq_is_sgi(irq->intid))
irq->active_source = active_source;
}
if (irq->active)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); else
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
unsignedlong vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
gpa_t addr, unsignedint len)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 8); int i;
u64 val = 0;
for (i = 0; i < len; i++) { struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
val |= (u64)irq->priority << (i * 8);
vgic_put_irq(vcpu->kvm, irq);
}
return val;
}
/* * We currently don't handle changing the priority of an interrupt that * is already pending on a VCPU. If there is a need for this, we would * need to make this VCPU exit and re-evaluate the priorities, potentially * leading to this interrupt getting presented now to the guest (if it has * been masked by the priority mask before).
*/ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
gpa_t addr, unsignedint len, unsignedlong val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 8); int i; unsignedlong flags;
for (i = 0; i < len; i++) { struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags); /* Narrow the priority range to what we actually support */
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); if (irq->hw && vgic_irq_is_sgi(irq->intid))
vgic_update_vsgi(irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
unsignedlong vgic_mmio_read_config(struct kvm_vcpu *vcpu,
gpa_t addr, unsignedint len)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
u32 value = 0; int i;
for (i = 0; i < len * 4; i++) { struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
if (irq->config == VGIC_CONFIG_EDGE)
value |= (2U << (i * 2));
for (i = 0; i < len * 4; i++) { struct vgic_irq *irq;
/* * The configuration cannot be changed for SGIs in general, * for PPIs this is IMPLEMENTATION DEFINED. The arch timer * code relies on PPIs being level triggered, so we also * make them read-only here.
*/ if (intid + i < VGIC_NR_PRIVATE_IRQS) continue;
/* * Line level is set irrespective of irq type * (level or edge) to avoid dependency that VM should * restore irq config before line level.
*/
new_level = !!(val & (1U << i));
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->line_level = new_level; if (new_level)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags); else
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
/* * kvm_mmio_read_buf() returns a value in a format where it can be converted * to a byte array and be directly observed as the guest wanted it to appear * in memory if it had done the store itself, which is LE for the GIC, as the * guest knows the GIC is always LE. * * We convert this value to the CPUs native format to deal with it as a data * value.
*/ unsignedlong vgic_data_mmio_bus_to_host(constvoid *val, unsignedint len)
{ unsignedlong data = kvm_mmio_read_buf(val, len);
switch (len) { case 1: return data; case 2: return le16_to_cpu(data); case 4: return le32_to_cpu(data); default: return le64_to_cpu(data);
}
}
/* * kvm_mmio_write_buf() expects a value in a format such that if converted to * a byte array it is observed as the guest would see it if it could perform * the load directly. Since the GIC is LE, and the guest knows this, the * guest expects a value in little endian format. * * We convert the data value from the CPUs native format to LE so that the * value is returned in the proper format.
*/ void vgic_data_host_to_mmio_bus(void *buf, unsignedint len, unsignedlong data)
{ switch (len) { case 1: break; case 2:
data = cpu_to_le16(data); break; case 4:
data = cpu_to_le32(data); break; default:
data = cpu_to_le64(data);
}
region = vgic_get_mmio_region(vcpu, iodev, addr, len); if (!region) {
memset(val, 0, len); return 0;
}
switch (iodev->iodev_type) { case IODEV_CPUIF:
data = region->read(vcpu, addr, len); break; case IODEV_DIST:
data = region->read(vcpu, addr, len); break; case IODEV_REDIST:
data = region->read(iodev->redist_vcpu, addr, len); break; case IODEV_ITS:
data = region->its_read(vcpu->kvm, iodev->its, addr, len); break;
}
switch (type) { case VGIC_V2:
len = vgic_v2_init_dist_iodev(io_device); break; case VGIC_V3:
len = vgic_v3_init_dist_iodev(io_device); break; default:
BUG();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.