/* There is one of these allocated per node */ struct uv_rtc_timer_head {
spinlock_t lock; /* next cpu waiting for timer, local node relative: */ int next_cpu; /* number of cpus on this node: */ int ncpus; struct { int lcpu; /* systemwide logical cpu number */
u64 expires; /* next timer expiration for this cpu */
} cpu[] __counted_by(ncpus);
};
/* * Access to uv_rtc_timer_head via blade id.
*/ staticstruct uv_rtc_timer_head **blade_info __read_mostly;
staticint uv_rtc_evt_enable;
/* * Hardware interface routines
*/
/* Send IPIs to another node */ staticvoid uv_rtc_send_IPI(int cpu)
{ unsignedlong apicid, val; int pnode;
/* Allocate per-node list of cpu timer expiration times. */ static __init int uv_rtc_allocate_timers(void)
{ int cpu;
blade_info = kcalloc(uv_possible_blades, sizeof(void *), GFP_KERNEL); if (!blade_info) return -ENOMEM;
for_each_present_cpu(cpu) { int nid = cpu_to_node(cpu); int bid = uv_cpu_to_blade_id(cpu); int bcpu = uv_cpu_blade_processor_id(cpu); struct uv_rtc_timer_head *head = blade_info[bid];
if (!head) {
head = kmalloc_node(struct_size(head, cpu,
uv_blade_nr_possible_cpus(bid)),
GFP_KERNEL, nid); if (!head) {
uv_rtc_deallocate_timers(); return -ENOMEM;
}
spin_lock_init(&head->lock);
head->ncpus = uv_blade_nr_possible_cpus(bid);
head->next_cpu = -1;
blade_info[bid] = head;
}
/* Find and set the next expiring timer. */ staticvoid uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
{
u64 lowest = ULLONG_MAX; int c, bcpu = -1;
head->next_cpu = -1; for (c = 0; c < head->ncpus; c++) {
u64 exp = head->cpu[c].expires; if (exp < lowest) {
bcpu = c;
lowest = exp;
}
} if (bcpu >= 0) {
head->next_cpu = bcpu;
c = head->cpu[bcpu].lcpu; if (uv_setup_intr(c, lowest)) /* If we didn't set it up in time, trigger */
uv_rtc_send_IPI(c);
} else {
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
UVH_RTC1_INT_CONFIG_M_MASK);
}
}
/* * Set expiration time for current cpu. * * Returns 1 if we missed the expiration time.
*/ staticint uv_rtc_set_timer(int cpu, u64 expires)
{ int pnode = uv_cpu_to_pnode(cpu); int bid = uv_cpu_to_blade_id(cpu); struct uv_rtc_timer_head *head = blade_info[bid]; int bcpu = uv_cpu_blade_processor_id(cpu);
u64 *t = &head->cpu[bcpu].expires; unsignedlong flags; int next_cpu;
spin_lock_irqsave(&head->lock, flags);
next_cpu = head->next_cpu;
*t = expires;
/* Will this one be next to go off? */ if (next_cpu < 0 || bcpu == next_cpu ||
expires < head->cpu[next_cpu].expires) {
head->next_cpu = bcpu; if (uv_setup_intr(cpu, expires)) {
*t = ULLONG_MAX;
uv_rtc_find_next_timer(head, pnode);
spin_unlock_irqrestore(&head->lock, flags); return -ETIME;
}
}
/* * Unset expiration time for current cpu. * * Returns 1 if this timer was pending.
*/ staticint uv_rtc_unset_timer(int cpu, int force)
{ int pnode = uv_cpu_to_pnode(cpu); int bid = uv_cpu_to_blade_id(cpu); struct uv_rtc_timer_head *head = blade_info[bid]; int bcpu = uv_cpu_blade_processor_id(cpu);
u64 *t = &head->cpu[bcpu].expires; unsignedlong flags; int rc = 0;
if (rc) {
*t = ULLONG_MAX; /* Was the hardware setup for this timer? */ if (head->next_cpu == bcpu)
uv_rtc_find_next_timer(head, pnode);
}
spin_unlock_irqrestore(&head->lock, flags);
return rc;
}
/* * Kernel interface routines.
*/
/* * Read the RTC. * * Starting with HUB rev 2.0, the UV RTC register is replicated across all * cachelines of its own page. This allows faster simultaneous reads * from a given socket.
*/ static u64 uv_read_rtc(struct clocksource *cs)
{ unsignedlong offset;
/* * Program the next event, relative to now
*/ staticint uv_rtc_next_event(unsignedlong delta, struct clock_event_device *ced)
{ int ced_cpu = cpumask_first(ced->cpumask);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.