int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{ /* * Save any process state which is live in hardware registers to the * parent context prior to duplication. This prevents the new child * state becoming stale if the parent is preempted before copy_thread() * gets a chance to save the parent's live hardware registers to the * child context.
*/
preempt_disable();
if (is_fpu_owner()) { if (is_lasx_enabled())
save_lasx(current); elseif (is_lsx_enabled())
save_lsx(current); else
save_fp(current);
}
/* * New tasks lose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu.
*/
childregs->csr_euen = 0;
if (clone_flags & CLONE_SETTLS)
childregs->regs[2] = tls;
for (unwind_start(&state, task, NULL);
!unwind_done(&state); unwind_next_frame(&state)) {
pc = unwind_get_return_address(&state); if (!pc) break; if (in_sched_functions(pc)) continue; break;
}
put_task_stack(task);
return pc;
}
bool in_irq_stack(unsignedlong stack, struct stack_info *info)
{ unsignedlong nextsp; unsignedlong begin = (unsignedlong)this_cpu_read(irq_stack); unsignedlong end = begin + IRQ_STACK_START;
unsignedlong stack_top(void)
{ unsignedlong top = TASK_SIZE & PAGE_MASK;
if (current->thread.vdso) { /* Space for the VDSO & data page */
top -= PAGE_ALIGN(current->thread.vdso->size);
top -= VVAR_SIZE;
/* Space to randomize the VDSO base */ if (current->flags & PF_RANDOMIZE)
top -= VDSO_RANDOMIZE_SIZE;
}
return top;
}
/* * Don't forget that the stack pointer must be aligned on a 8 bytes * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
*/ unsignedlong arch_align_stack(unsignedlong sp)
{ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(PAGE_SIZE);
staticvoid raise_backtrace(cpumask_t *mask)
{
call_single_data_t *csd; int cpu;
for_each_cpu(cpu, mask) { /* * If we previously sent an IPI to the target CPU & it hasn't * cleared its bit in the busy cpumask then it didn't handle * our previous IPI & it's not safe for us to reuse the * call_single_data_t.
*/ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
cpu); continue;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.