/* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * * Note: cpumask_of_node() is not valid until after this is done. * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
*/ void __init setup_node_to_cpumask_map(void)
{ unsignedint node;
/* setup nr_node_ids if not done yet */ if (nr_node_ids == MAX_NUMNODES)
setup_nr_node_ids();
/* allocate the map */ for (node = 0; node < nr_node_ids; node++)
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
/* cpumask_of_node() will now work */
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
}
staticint __init numa_register_nodes(void)
{ int nid;
if (!memblock_validate_numa_coverage(SZ_1M)) return -EINVAL;
/* * Note, get_pfn_range_for_nid() depends on * memblock_set_node() having already happened
*/
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); if (start_pfn >= end_pfn) continue;
alloc_node_data(nid);
node_set_online(nid);
}
/* Dump memblock with node info and return. */
memblock_dump_all(); return 0;
}
/* * There are unfortunately some poorly designed mainboards around that * only connect memory to a single CPU. This breaks the 1:1 cpu->node * mapping. To avoid this fill in the mapping for all possible CPUs, * as the number of CPUs is not known yet. We round robin the existing * nodes.
*/ staticvoid __init numa_init_array(void)
{ int rr, i;
rr = first_node(node_online_map); for (i = 0; i < nr_cpu_ids; i++) { if (early_cpu_to_node(i) != NUMA_NO_NODE) continue;
numa_set_node(i, rr);
rr = next_node_in(rr, node_online_map);
}
}
staticint __init numa_init(int (*init_func)(void))
{ int i; int ret;
for (i = 0; i < MAX_LOCAL_APIC; i++)
set_apicid_to_node(i, NUMA_NO_NODE);
ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true); if (ret < 0) return ret;
ret = numa_register_nodes(); if (ret < 0) return ret;
for (i = 0; i < nr_cpu_ids; i++) { int nid = early_cpu_to_node(i);
if (nid == NUMA_NO_NODE) continue; if (!node_online(nid))
numa_clear_node(i);
}
numa_init_array();
return 0;
}
/** * dummy_numa_init - Fallback dummy NUMA init * * Used if there's no underlying NUMA architecture, NUMA initialization * fails, or NUMA is disabled on the command line. * * Must online at least one node and add memory blocks that cover all * allowed memory. This function must not fail.
*/ staticint __init dummy_numa_init(void)
{
printk(KERN_INFO "%s\n",
numa_off ? "NUMA turned off" : "No NUMA configuration found");
printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
0LLU, PFN_PHYS(max_pfn) - 1);
/** * x86_numa_init - Initialize NUMA * * Try each configured NUMA initialization method until one succeeds. The * last fallback is dummy single node config encompassing whole memory and * never fails.
*/ void __init x86_numa_init(void)
{ if (!numa_off) { #ifdef CONFIG_ACPI_NUMA if (!numa_init(x86_acpi_numa_init)) return; #endif #ifdef CONFIG_AMD_NUMA if (!numa_init(amd_numa_init)) return; #endif if (acpi_disabled && !numa_init(of_numa_init)) return;
}
numa_init(dummy_numa_init);
}
/* * A node may exist which has one or more Generic Initiators but no CPUs and no * memory. * * This function must be called after init_cpu_to_node(), to ensure that any * memoryless CPU nodes have already been brought online, and before the * node_data[nid] is needed for zone list setup in build_all_zonelists(). * * When this function is called, any nodes containing either memory and/or CPUs * will already be online and there is no need to do anything extra, even if * they also contain one or more Generic Initiators.
*/ void __init init_gi_nodes(void)
{ int nid;
/* * Exclude this node from * bringup_nonboot_cpus * cpu_up * __try_online_node * register_one_node * because node_subsys is not initialized yet. * TODO remove dependency on node_online
*/
for_each_node_state(nid, N_GENERIC_INITIATOR) if (!node_online(nid))
node_set_online(nid);
}
/* * Setup early cpu_to_node. * * Populate cpu_to_node[] only if x86_cpu_to_apicid[], * and apicid_to_node[] tables have valid entries for a CPU. * This means we skip cpu_to_node[] initialisation for NUMA * emulation and faking node case (when running a kernel compiled * for NUMA on a non NUMA box), which is OK as cpu_to_node[] * is already initialized in a round robin manner at numa_init_array, * prior to this call, and this initialization is good enough * for the fake NUMA cases. * * Called before the per_cpu areas are setup.
*/ void __init init_cpu_to_node(void)
{ int cpu;
u32 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
BUG_ON(cpu_to_apicid == NULL);
for_each_possible_cpu(cpu) { int node = numa_cpu_node(cpu);
if (node == NUMA_NO_NODE) continue;
/* * Exclude this node from * bringup_nonboot_cpus * cpu_up * __try_online_node * register_one_node * because node_subsys is not initialized yet. * TODO remove dependency on node_online
*/ if (!node_online(node))
node_set_online(node);
int __cpu_to_node(int cpu)
{ if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
printk(KERN_WARNING "cpu_to_node(%d): usage too early!\n", cpu);
dump_stack(); return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
} return per_cpu(x86_cpu_to_node_map, cpu);
}
EXPORT_SYMBOL(__cpu_to_node);
/* * Same function as cpu_to_node() but used if called before the * per_cpu areas are setup.
*/ int early_cpu_to_node(int cpu)
{ if (early_per_cpu_ptr(x86_cpu_to_node_map)) return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
if (!cpu_possible(cpu)) {
printk(KERN_WARNING "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
dump_stack(); return NUMA_NO_NODE;
} return per_cpu(x86_cpu_to_node_map, cpu);
}
/* * Returns a pointer to the bitmask of CPUs on Node 'node'.
*/ conststruct cpumask *cpumask_of_node(int node)
{ if ((unsigned)node >= nr_node_ids) {
printk(KERN_WARNING "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
node, nr_node_ids);
dump_stack(); return cpu_none_mask;
} if (!cpumask_available(node_to_cpumask_map[node])) {
printk(KERN_WARNING "cpumask_of_node(%d): no node_to_cpumask_map!\n",
node);
dump_stack(); return cpu_online_mask;
} return node_to_cpumask_map[node];
}
EXPORT_SYMBOL(cpumask_of_node);
#endif/* !CONFIG_DEBUG_PER_CPU_MAPS */
#ifdef CONFIG_NUMA_EMU void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, unsignedint nr_emu_nids)
{ int i, j;
/* * Transform __apicid_to_node table to use emulated nids by * reverse-mapping phys_nid. The maps should always exist but fall * back to zero just in case.
*/ for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { if (__apicid_to_node[i] == NUMA_NO_NODE) continue; for (j = 0; j < nr_emu_nids; j++) if (__apicid_to_node[i] == emu_nid_to_phys[j]) break;
__apicid_to_node[i] = j < nr_emu_nids ? j : 0;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.