/* * Cached cpu_architecture() result for use by assembler code. * C code should use the cpu_architecture() function instead of accessing this * variable directly.
*/ int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
/* * These functions re-use the assembly code in head.S, which * already provide the required functionality.
*/ externstruct proc_info_list *lookup_processor_type(unsignedint);
/* Check for Speculative Store Bypassing control */
pfr2 = read_cpuid_ext(CPUID_EXT_PFR2);
block = cpuid_feature_extract_field(pfr2, 4); if (block >= 1)
elf_hwcap2 |= HWCAP2_SSBS;
}
staticvoid __init elf_hwcap_fixup(void)
{ unsigned id = read_cpuid_id();
/* * HWCAP_TLS is available only on 1136 r1p0 and later, * see also kuser_get_tls_init.
*/ if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
((id >> 20) & 3) == 0) {
elf_hwcap &= ~HWCAP_TLS; return;
}
/* Verify if CPUID scheme is implemented */ if ((id & 0x000f0000) != 0x000f0000) return;
/* * If the CPU supports LDREX/STREX and LDREXB/STREXB, * avoid advertising SWP; it may not be atomic with * multiprocessing cores.
*/ if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
(cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
elf_hwcap &= ~HWCAP_SWP;
}
/* * cpu_init - initialise one CPU. * * cpu_init sets up the per-CPU stacks.
*/ void notrace cpu_init(void)
{ #ifndef CONFIG_CPU_V7M unsignedint cpu = smp_processor_id(); struct stack *stk = &stacks[cpu];
if (cpu >= NR_CPUS) {
pr_crit("CPU%u: bad primary CPU number\n", cpu);
BUG();
}
/* * This only works on resume and secondary cores. For booting on the * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
*/
set_my_cpu_offset(per_cpu_offset(cpu));
cpu_proc_init();
/* * Define the placement constraint for the inline asm directive below. * In Thumb-2, msr with an immediate value is not allowed.
*/ #ifdef CONFIG_THUMB2_KERNEL #define PLC_l "l" #define PLC_r "r" #else #define PLC_l "I" #define PLC_r "I" #endif
cpu_logical_map(0) = cpu; for (i = 1; i < nr_cpu_ids; ++i)
cpu_logical_map(i) = i == cpu ? 0 : i;
/* * clear __my_cpu_offset on boot CPU to avoid hang caused by * using percpu variable early, for example, lockdep will * access percpu variable inside lock_release
*/
set_my_cpu_offset(0);
pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
}
struct mpidr_hash mpidr_hash; #ifdef CONFIG_SMP /** * smp_build_mpidr_hash - Pre-compute shifts required at each affinity * level in order to build a linear index from an * MPIDR value. Resulting algorithm is a collision * free hash carried out through shifting and ORing
*/ staticvoid __init smp_build_mpidr_hash(void)
{
u32 i, affinity;
u32 fs[3], bits[3], ls, mask = 0; /* * Pre-scan the list of MPIDRS and filter out bits that do * not contribute to affinity levels, ie they never toggle.
*/
for_each_possible_cpu(i)
mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
pr_debug("mask of set bits 0x%x\n", mask); /* * Find and stash the last and first bit set at all affinity levels to * check how many bits are required to represent them.
*/ for (i = 0; i < 3; i++) {
affinity = MPIDR_AFFINITY_LEVEL(mask, i); /* * Find the MSB bit and LSB bits position * to determine how many bits are required * to express the affinity level.
*/
ls = fls(affinity);
fs[i] = affinity ? ffs(affinity) - 1 : 0;
bits[i] = ls - fs[i];
} /* * An index can be created from the MPIDR by isolating the * significant bits at each affinity level and by shifting * them in order to compress the 24 bits values space to a * compressed set of values. This is equivalent to hashing * the MPIDR through shifting and ORing. It is a collision free * hash though not minimal since some levels might contain a number * of CPUs that is not an exact power of 2 and their bit * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
*/
mpidr_hash.shift_aff[0] = fs[0];
mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
(bits[1] + bits[0]);
mpidr_hash.mask = mask;
mpidr_hash.bits = bits[2] + bits[1] + bits[0];
pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
mpidr_hash.shift_aff[0],
mpidr_hash.shift_aff[1],
mpidr_hash.shift_aff[2],
mpidr_hash.mask,
mpidr_hash.bits); /* * 4x is an arbitrary value used to warn on a hash table much bigger * than expected on most systems.
*/ if (mpidr_hash_size() > 4 * num_possible_cpus())
pr_warn("Large number of MPIDR hash buckets detected\n");
sync_cache_w(&mpidr_hash);
} #endif
/* * locate processor in the list of supported processor types. The linker * builds this table for us from the entries in arch/arm/mm/proc-*.S
*/ struct proc_info_list *lookup_processor(u32 midr)
{ struct proc_info_list *list = lookup_processor_type(midr);
if (!list) {
pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
smp_processor_id(), midr); while (1) /* can't use cpu_relax() here as it may require MMU setup */;
}
early_print("\nPlease check your kernel config and/or bootloader.\n");
while (true) /* can't use cpu_relax() here as it may require MMU setup */;
}
int __init arm_add_memory(u64 start, u64 size)
{
u64 aligned_start;
/* * Ensure that start/size are aligned to a page boundary. * Size is rounded down, start is rounded up.
*/
aligned_start = PAGE_ALIGN(start); if (aligned_start > start + size)
size = 0; else
size -= aligned_start - start;
#ifndef CONFIG_PHYS_ADDR_T_64BIT if (aligned_start > ULONG_MAX) {
pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
start); return -EINVAL;
}
if (aligned_start + size > ULONG_MAX) {
pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
(longlong)start); /* * To ensure bank->start + bank->size is representable in * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB. * This means we lose a page after masking.
*/
size = ULONG_MAX - aligned_start;
} #endif
/* * In memblock, end points to the first byte after the * range while in resourses, end points to the last byte in * the range.
*/
res_end = end - 1;
/* * Some systems have a special memory alias which is only * used for booting. We need to advertise this region to * kexec-tools so they know where bootable RAM is located.
*/
boot_alias_start = phys_to_idmap(start); if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
res = memblock_alloc_or_panic(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM (boot alias)";
res->start = boot_alias_start;
res->end = phys_to_idmap(res_end);
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
}
/* * Some machines don't have the possibility of ever * possessing lp0, lp1 or lp2
*/ if (mdesc->reserve_lp0)
request_resource(&ioport_resource, &lp0); if (mdesc->reserve_lp1)
request_resource(&ioport_resource, &lp1); if (mdesc->reserve_lp2)
request_resource(&ioport_resource, &lp2);
}
staticint __init customize_machine(void)
{ /* * customizes platform devices, or adds new ones * On DT based machines, we fall back to populating the * machine from the device tree, if no callback is provided, * otherwise we would always need an init_machine callback.
*/ if (machine_desc->init_machine)
machine_desc->init_machine();
return 0;
}
arch_initcall(customize_machine);
staticint __init init_machine_late(void)
{ struct device_node *root; int ret;
if (machine_desc->init_late)
machine_desc->init_late();
root = of_find_node_by_path("/"); if (root) {
ret = of_property_read_string(root, "serial-number",
&system_serial); if (ret)
system_serial = NULL;
}
if (!system_serial)
system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
system_serial_high,
system_serial_low);
return 0;
}
late_initcall(init_machine_late);
#ifdef CONFIG_CRASH_RESERVE /* * The crash region must be aligned to 128MB to avoid * zImage relocating below the reserved region.
*/ #define CRASH_ALIGN (128 << 20)
total = max_low_pfn - min_low_pfn; return total << PAGE_SHIFT;
}
/** * reserve_crashkernel() - reserves memory are for crash kernel * * This function reserves memory area given in "crashkernel=" kernel command * line parameter. The memory reserved is used by a dump capture kernel when * primary kernel is crashing.
*/ staticvoid __init reserve_crashkernel(void)
{ unsignedlonglong crash_size, crash_base; unsignedlonglong total_mem; int ret;
total_mem = get_total_mem();
ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base,
NULL, NULL, NULL); /* invalid value specified or crashkernel=0 */ if (ret || !crash_size) return;
crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
CRASH_ALIGN, crash_max); if (!crash_base) {
pr_err("crashkernel reservation failed - No suitable area found.\n"); return;
}
} else { unsignedlonglong crash_max = crash_base + crash_size; unsignedlonglong start;
start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
crash_base, crash_max); if (!start) {
pr_err("crashkernel reservation failed - memory is in use.\n"); return;
}
}
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
(unsignedlong)(crash_size >> 20),
(unsignedlong)(crash_base >> 20),
(unsignedlong)(total_mem >> 20));
/* The crashk resource must always be located in normal mem */
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
insert_resource(&iomem_resource, &crashk_res);
if (arm_has_idmap_alias()) { /* * If we have a special RAM alias for use at boot, we * need to advertise to kexec tools where the alias is.
*/ staticstruct resource crashk_boot_res = {
.name = "Crash kernel (boot alias)",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
};
if (is_hyp_mode_available()) {
pr_info("CPU: All CPU(s) started in HYP mode.\n");
pr_info("CPU: Virtualization extensions available.\n");
} elseif (is_hyp_mode_mismatched()) {
pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
__boot_cpu_mode & MODE_MASK);
pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
} else
pr_info("CPU: All CPU(s) started in SVC mode.\n"); #endif
}
/* populate cmd_line too for later use, preserving boot_command_line */
strscpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
early_fixmap_init();
early_ioremap_init();
parse_early_param();
#ifdef CONFIG_MMU
early_mm_init(mdesc); #endif
setup_dma_zone(mdesc);
xen_early_init();
arm_efi_init(); /* * Make sure the calculation for lowmem/highmem is set appropriately * before reserving/allocating any memory
*/
adjust_lowmem_bounds();
arm_memblock_init(mdesc); /* Memory may have been removed so recalculate the bounds. */
adjust_lowmem_bounds();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Angebot
Hier finden Sie eine Liste der Produkte des Unternehmens