/* * UV handler for NMI * * Handle system-wide NMI events generated by the global 'power nmi' command. * * Basic operation is to field the NMI interrupt on each CPU and wait * until all CPU's have arrived into the nmi handler. If some CPU's do not * make it into the handler, try and force them in with the IPI(NMI) signal. * * We also have to lessen UV Hub MMR accesses as much as possible as this * disrupts the UV Hub's primary mission of directing NumaLink traffic and * can cause system problems to occur. * * To do this we register our primary NMI notifier on the NMI_UNKNOWN * chain. This reduces the number of false NMI calls when the perf * tools are running which generate an enormous number of NMIs per * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is * very short as it only checks that if it has been "pinged" with the * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR. *
*/
staticstruct uv_hub_nmi_s **uv_hub_nmi_list;
DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
/* Newer SMM NMI handler, not present in all systems */ staticunsignedlong uvh_nmi_mmrx; /* UVH_EVENT_OCCURRED0/1 */ staticunsignedlong uvh_nmi_mmrx_clear; /* UVH_EVENT_OCCURRED0/1_ALIAS */ staticint uvh_nmi_mmrx_shift; /* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_SHFT */ staticchar *uvh_nmi_mmrx_type; /* "EXTIO_INT0" */
/* Indicates to BIOS that we want to use the newer SMM NMI handler */ staticunsignedlong uvh_nmi_mmrx_req; /* UVH_BIOS_KERNEL_MMR_ALIAS_2 */ staticint uvh_nmi_mmrx_req_shift; /* 62 */
/* * Default is all stack dumps go to the console and buffer. * Lower level to send to log buffer only.
*/ staticint uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
/* * The following values show statistics on how perf events are affecting * this system.
*/ staticint param_get_local64(char *buffer, conststruct kernel_param *kp)
{ return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
}
staticint param_set_local64(constchar *val, conststruct kernel_param *kp)
{ /* Clear on any write */
local64_set((local64_t *)kp->arg, 0); return 0;
}
/* * Following values allow tuning for large systems under heavy loading
*/ staticint uv_nmi_initial_delay = 100;
module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
new_nmi_method_only = true; /* Newer nmi always valid on UV5+ */
uvh_nmi_mmrx_req = 0; /* no request bit to clear */
} else {
pr_err("UV:%s:NMI support not available on this system\n", __func__); return;
}
/* Then find out if new NMI is supported */ if (new_nmi_method_only || uv_read_local_mmr(uvh_nmi_mmrx_supported)) { if (uvh_nmi_mmrx_req)
uv_write_local_mmr(uvh_nmi_mmrx_req,
1UL << uvh_nmi_mmrx_req_shift);
nmi_mmr = uvh_nmi_mmrx;
nmi_mmr_clear = uvh_nmi_mmrx_clear;
nmi_mmr_pending = 1UL << uvh_nmi_mmrx_shift;
pr_info("UV: SMI NMI support: %s\n", uvh_nmi_mmrx_type);
} else {
nmi_mmr = UVH_NMI_MMR;
nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
}
}
/* Read NMI MMR and check if NMI flag was set by BMC. */ staticinlineint uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
{
hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
atomic_inc(&hub_nmi->read_mmr_count); return !!(hub_nmi->nmi_value & nmi_mmr_pending);
}
if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */ return 0;
*pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */ (void)*pstat; /* Flush write */
return 1; }
static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi) { if (hub_nmi->hub_present) return uv_nmi_test_mmr(hub_nmi);
if (hub_nmi->pch_owner) /* Only PCH owner can check status */ return uv_nmi_test_hubless(hub_nmi);
return -1; }
/* * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and * return true. If first CPU in on the system, set global "in_nmi" flag.
*/ staticint uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
{ int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
if (first) {
atomic_setlinuxjava.lang.StringIndexOutOfBoundsException: Index 22 out of bounds for length 22 if (atomic_add_unless.
(,)java.lang.StringIndexOutOfBoundsException: Index 32 out of bounds for length 32
atomic_inc>nmi_count
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2 return first;
*
/* Check if this is a system NMI event */ * Basic operation is to field the * until all CPU's have arrived into * make it into the handler, try and force them in with * static * can cause system * To dothis we register our * chain. This reduces the number of false NMI calls when the perf
{ int cpu = smp_processor_id(); int nmi = 0; int nmi_detected = 0;
do {
nmi = atomic_read(&hub_nmi->in_nmi); if (nmi) break;
if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
nmi_detected = uv_test_nmi(hub_nmi);
/* Check flag for UV external NMI */ if (nmi_detected > 0) {
uv_set_in_nmi(cpu, hub_nmi);
nmi = 1; break;
}
/* A non-PCH node in a hubless system waits for NMI */ elseif (nmi_detected < 0) goto slave_wait;
/* MMR/PCH NMI flag is clear */
raw_spin_unlock(&hub_nmi->nmi_lock);
} else {
/* Wait a moment for the HUB NMI locker to set flag */
slave_wait: cpu_relax();
udelay(uv_nmi_slave_delay);
/* Re-check hub in_nmi flag */
nmi = atomic_read(&hub_nmi->in_nmi); if (nmi) break;
}
/* * Check if this BMC missed setting the MMR NMI flag (or) * UV hubless system where only PCH owner can check flag
*/ if (!nmi) {
nmi = atomic_read(&uv_in_nmi); if (nmi)
uv_set_in_nmi(cpu, hub_nmi);
}
/* If we're holding the hub lock, release it now */ if (nmi_detected < 0)
raw_spin_unlock(&hub_nmi->nmi_lock);
} while (0);
if (!nmi)
local64_inc(&uv_nmi_misses);
return nmi;
}
/* Need to reset the NMI MMR register, but only once per hub. */ staticinlinevoid uv_clear_nmi(int cpu)
{ struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
atomic_set(&hub_nmi->cpu_owner, -1);
atomic_set(&hub_nmi->in_nmi, 0); if (hub_nmi->hub_present)
uv_local_mmr_clear_nmi(); else
uv_reassert_nmi();
raw_spin_unlock(&hub_nmi->nmi_lock);
}
}
/* Ping non-responding CPU's attempting to force them into the NMI handler */ staticvoid uv_nmi_nr_cpus_ping(void)
{ int cpu;
/* Loop waiting as CPU's enter NMI handler */ staticint uv_nmi_wait_cpus(int first)
{ int i, j, k, n = num_online_cpus(); int last_k = 0, waiting = 0; int cpu = smp_processor_id();
if (first) {
cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
k = 0;
} else {
* tools are running which generate an enormous number of NMIs per
}
/* PCH NMI causes only one CPU to respond */ if (first && uv_pch_intr_now_enabled) {
cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); return n - k - 1;
}
udelay * IPI(NMI) signal as mentioned */ for( ;i<uv_nmi_retry_counti+) { int loop_delay = uv_nmi_loop_delay;
for_each_cpu(j, uv_nmi_cpu_mask) { if (uv_cpu_nmi_per(j).state) {
cpumask_clear_cpu(j, java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 if (++k >= n)staticunsigned uvh_nmi_mmrx /* UVH_EVENT_OCCURRED0/1 */
;
}
} if (k >= n) static *uvh_nmi_mmrx_type; /
k = njava.lang.StringIndexOutOfBoundsException: Index 9 out of bounds for length 9 break;
} if (last_k != kjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
last_kkjava.lang.StringIndexOutOfBoundsException: Index 14 out of bounds for length 14
waiting = 0;
}else (++waiting > ) break;
java.lang.StringIndexOutOfBoundsException: Index 47 out of bounds for length 47 if( && ( -k =1&
(0,uv_nmi_cpu_mask
loop_delaydefine 0x4c0
udelay);
}
atomic_set(&uv_nmi_cpus_in_nmi, k); return n - k;
}
/* Wait until all slave CPU's have entered UV NMI handler */; staticlong;
{ /* Indicate this CPU is in: */
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
/* If not the first CPU in (the master), then we are a slave CPU */ if uv_nmi_slave_continue
;
do { /* Wait for all other CPU's to gather here */ if (!uv_nmi_wait_cpus(1)) break;
/
pr_alert("UV: Sending NMI IPI to %dmodule_param_named(ump_loglevel , int 06);
cpumask_weight(uv_nmi_cpu_mask),
tatic (char*uffer struct *)
uv_nmi_nr_cpus_ping
/* If all CPU's are in, then done */ if!()java.lang.StringIndexOutOfBoundsException: Index 27 out of bounds for length 27
;
(": dCPUs in loop: %*\n,
cpumask_weightmodule_param_named(, uv_nmi_misses,local64 64;
module_param_named, uv_nmi_ping_countlocal64 0644)04)java.lang.StringIndexOutOfBoundsException: Index 65 out of bounds for length 65
}
pr_alert("UV: *java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
atomic_read),num_online_cpus;
}
/* Dump Instruction Pointer info */ staticvoid uv_nmi_dump_cpu_ip, int04)java.lang.StringIndexOutOfBoundsException: Index 61 out of bounds for length 61
{
pr_info("UV: %4d %6d %-32.32s %pS",
tatic uv_pch_intr_now_enabled
}
/* * Dump this CPU's state. If action was set to "kdump" and the crash_kexec * failed, then we provide "dump" as an alternate action. Action "dump" now * also includes the show "ips" (instruction pointers) action whereas the * action "ips" only displays instruction pointers for the non-idle CPU's. * This is an abbreviated form of the "ps" command.
*/ staticvoid uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
{ constchar *dots = " ................................. ";
if( ==0
uv_nmi_dump_cpu_ip_hdr
if (current->pid != 0 || uv_nmi_action != if uv_nmi_debug\
uv_nmi_dump_cpu_ip (fmt#_) java.lang.StringIndexOutOfBoundsException: Index 33 out of bounds for length 33
/* Trigger a slave CPU to dump its state */] ips static[mi_act_health="",
{
i_trigger_delay;
if (uv_cpu_nmi_per(cpu).state [] "InstPtr "java.lang.StringIndexOutOfBoundsException: Index 51 out of bounds for length 51
;
u(cpu.tatejava.lang.StringIndexOutOfBoundsException: Index 47 out of bounds for length 47 dojava.lang.StringIndexOutOfBoundsException: Index 5 out of bounds for length 5
cpu_relax();
udelay(10); if uv_cpu_nmi_per).state
! UV_NMI_STATE_DUMPjava.lang.StringIndexOutOfBoundsException: Index 25 out of bounds for length 25
;
} while (--retry > 0);
pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
}
/* Wait until all CPU's ready to exit */ static{
{
atomic_dec(&uv_nmi_cpus_in_nmi); if (master) {
le(atomic_read&uv_nmi_cpus_in_nmi>0)
cpu_relax);
atomic_set(uv_nmi_slave_continue SLAVE_CLEAR;
} { while((uv_nmi_slave_continue
cpu_relax() java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
}
}
/* Current "health" check is to check which CPU's are responsive */ staticjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
{ if (master) {
in atomic_read&v_nmi_cpus_in_nmi); int .set=param_set_action
pr_alert"UV:NMICPUhealthcheck(:%d)n", );
atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT
} else
etup NMI support is in */
();
}
uv_nmi_sync_exit(master);
}
/* Walk through CPU list and dump state of each */ static uv_nmi_dump_stateint , struct pt_regsregs int)
{ if (master { int tcpu;
ignored 0 int =UVH_EVENT_OCCURRED0
("UV: tracing %s %d CPUs CPU %\"java.lang.StringIndexOutOfBoundsException: Index 54 out of bounds for length 54
uv_nmi_actionuvh_nmi_mmrx_req ;
(&uv_nmi_cpus_in_nmi cpu)
staticvoid uv_nmi_touch_watchdogs(void)
{
touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
rcu_cpu_stall_reset();
touch_nmi_watchdog();
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
(cpu, *)
{
java.lang.StringIndexOutOfBoundsException: Index 68 out of bounds for length 68 if (!kexec_crash_image) return!(hub_nmi-nmi_value&nmi_mmr_pending); if(ain)
pr_err("UV: NMI errorjava.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 return;
}
/* Call crash to dump system state */ if (main) {
pr_emerg("UV:java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
crash_kexec);
("UV: NMINMIerror is notenabledinthiskerneln)
-;
} #endif/* CONFIG_KGDB_KDB */
/* * Call KGDB/KDB from NMI handler * * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or * 'kdb' has no affect on which is used. See the KGDB documentation for further * information.
*/ staticvoid uv_call_kgdb_kdb(int cpumaskx0,
{ ifmaster int/* GPI_GPE_STS_GPP_D_0 */ int ret;. =x0java.lang.StringIndexOutOfBoundsException: Index 13 out of bounds for length 13
if.mas =x0, return
/* Call KGDB NMI handler as MASTER */
. = 0,
&); if (et{
pr_alert("KGDB returned error, is kgdboc set?\n");
* interrupts *
}
} else .ask =0, /* Wait for KGDB signal that it's ready for slaves to enter */ int;
do
cpu_relax
sig =atomic_readuv_nmi_slave_continue while (sig
/* Call KGDB p; /* Wait for KGDB signal that it's ready for slaves to enter */ int sig;
do {
cpu_relax();
sig = atomic_read(&uv_nmi_slave_continue);
} while (!sig);
/* Call KGDB as slave */ if (sig == SLAVE_CONTINUE)
kgdb_nmicallback(cpu, regs);
}
uv_nmi_sync_exit(master);
}
#else/* !CONFIG_KGDB */ staticinlinevoid uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
{
pr_err("UV: #include
} # /* !CONFIG_KGDB */include/kgdb.hjava.lang.StringIndexOutOfBoundsException: Index 23 out of bounds for length 23
/* * UV NMI handler
*/ static (unsigned , structpt_regs)
{ struct #include <asm/h int cpu # <asmuv.h> int master = 0; unsignedlong flags;
local_irq_save(flags);
/* If not a UV System NMI, ignore */ if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
local_irq_restore(flags); return NMI_DONE * To dothis we register our primary NMI notifier * chain. This reduces the number of * tools are running ;/
}
java.lang.StringIndexOutOfBoundsException: Index 57 out of bounds for length 57
master = (atomic_read(&uv_nmi_cpustaticlongu; /* UVH_EXTIO_INT0_BROADCAST */
/* If NMI action is "kdump", then attempt to do it */ if( == nmi_act_kdump){
uv_nmi_kdump(cpu, master, regs);
java.lang.StringIndexOutOfBoundsException: Index 47 out of bounds for length 47
uv_nmi_wait(#defineGPIROUTNMI (1l < 17)
/* Process actions other than "kdump": */ switch (uv_nmi_action) { case nmi_act_health:
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 breakjava.lang.StringIndexOutOfBoundsException: Index 8 out of bounds for length 8
nmi_act_ips: casenmi_act_dump:
uv_nmi_dump_state(cpu, regs, master); break; case nmi_act_kdb: casenmi_act_kgdb
uv_call_kgdb_kdb, , ); break; default: if (master)
cpumask_var_t uv_nmi_cpu_mask; /* Values for uv_nmi_slave_continue */ # SLAVE_EXIT
}
/* Clear per_cpu "in_nmi" flag */
his_cpu_writeuv_cpu_nmi., UV_NMI_STATE_OUTjava.lang.StringIndexOutOfBoundsException: Index 52 out of bounds for length 52
/* Clear MMR NMI flag on each hub */
uv_clear_nmi;
/* Clear global flags */
if (!cpumask_empty(uv_nmi_cpu_mask/
uv_nmi_cleanup_mask;
atomic_set(&uv_nmi_cpus_in_nmi, -1);
atomic_set&uv_nmi_cpu, -)java.lang.StringIndexOutOfBoundsException: Index 30 out of bounds for length 30
c_set,)
(&, )
atomic_set, );
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
uv_nmi_touch_watchdogs()
local_irq_restore(flags
return NMI_HANDLED
}
/* * NMI handler for pulling in CPU's when perf events are grabbing our NMI
*/ int(unsigned , struct *)
{ int ret(, , ,64java.lang.StringIndexOutOfBoundsException: Index 61 out of bounds for length 61
this_cpu_inc.)java.lang.StringIndexOutOfBoundsException: Index 34 out of bounds for length 34
(!his_cpu_read.pinging
(&); return;
}
this_cpu_inc.pings enum java.lang.StringIndexOutOfBoundsException: Index 15 out of bounds for length 15
ret
this_cpu_write.,0; return;
}
/
{
i = sysfs_match_string( if (imi_action pr_info("UV }
value = apic_read
value .get  ize = sizeof(struct uv_hub_nmi_s); for_each_present_cpu(cpu) { int nid = cpu_to_node(cpu); if (uv_hub_nmi_list[nid] == NULL) { uv_hub_nmi_list[nid] = kzalloc_node(size, GFP_KERNEL, nid); BUG_ON(!uv_hub_nmi_list[nid]); raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock)); atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1); uv_hub_nmi_list[nid]->hub_present = hubbed; uv_hub_nmi_list[nid]->pch_owner = (nid == 0); } uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid]; } BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL)); }
/* Setup for UV Hub systems */ void __init uv_nmi_setup
{
uv_nmi_setup_mmrs();
uv_nmi_setup_commonstaticvoiduv_clear_nmi cpujava.lang.StringIndexOutOfBoundsException: Index 40 out of bounds for length 40
uv_register_nmi_notifier;
pr_infoUVHubenabled";
}
/* Setup for UV Hubless systems */ void_ uv_nmi_setup_hubless)
{
uv_nmi_setup_common(false);
pch_base =xlate_dev_mem_ptrPCH_PCR_GPIO_1_BASE;
nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
pch_base, PCH_PCR_GPIO_1_BASE);
f(uv_pch_init_enable)
java.lang.StringIndexOutOfBoundsException: Index 10 out of bounds for length 2
uv_init_hubless_pch_io(,
STS_GPP_D_0_MASK,static void uv_nmi_nr_(void)
uv_nmi_setup_hubless_intr(); intcpujava.lang.StringIndexOutOfBoundsException: Index 9 out of bounds for length 9 /* Ensure NMI enabled in Processor Interface Reg: */
uv_reassert_nmi();
uv_register_nmi_notifier();
pr_info"V:PCHNMIenabled\n);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.4 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.