local_ctl_store(0, &cr0_old.reg);
local_ctl_store(2, &cr2_old.reg);
cr0_new = cr0_old;
cr2_new = cr2_old; /* Take care of the enable/disable of transactional execution. */ if (machine_has_tx()) { /* Set or clear transaction execution TXC bit 8. */
cr0_new.tcx = 1; if (task->thread.per_flags & PER_FLAG_NO_TE)
cr0_new.tcx = 0; /* Set or clear transaction execution TDC bits 62 and 63. */
cr2_new.tdc = 0; if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
cr2_new.tdc = 1; else
cr2_new.tdc = 2;
}
} /* Take care of enable/disable of guarded storage. */ if (cpu_has_gs()) {
cr2_new.gse = 0; if (task->thread.gs_cb)
cr2_new.gse = 1;
} /* Load control register 0/2 iff changed */
cr0_changed = cr0_new.val != cr0_old.val;
cr2_changed = cr2_new.val != cr2_old.val; if (cr0_changed)
local_ctl_load(0, &cr0_new.reg); if (cr2_changed)
local_ctl_load(2, &cr2_new.reg); /* Copy user specified PER registers */ new.control.val = thread->per_user.control; new.start.val = thread->per_user.start; new.end.val = thread->per_user.end;
/* merge TIF_SINGLE_STEP into user specified PER registers. */ if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) { if (test_tsk_thread_flag(task, TIF_BLOCK_STEP)) new.control.val |= PER_EVENT_BRANCH; else new.control.val |= PER_EVENT_IFETCH; new.control.val |= PER_CONTROL_SUSPENSION; new.control.val |= PER_EVENT_TRANSACTION_END; if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) new.control.val |= PER_EVENT_IFETCH; new.start.val = 0; new.end.val = -1UL;
}
/* Take care of the PER enablement bit in the PSW. */ if (!(new.control.val & PER_EVENT_MASK)) {
regs->psw.mask &= ~PSW_MASK_PER; return;
}
regs->psw.mask |= PSW_MASK_PER;
__local_ctl_store(9, 11, old.regs); if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
__local_ctl_load(9, 11, new.regs);
}
/* * Called by kernel/ptrace.c when detaching.. * * Clear all debugging related fields.
*/ void ptrace_disable(struct task_struct *task)
{
memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
clear_tsk_thread_flag(task, TIF_PER_TRAP);
task->thread.per_flags = 0;
}
#define __ADDR_MASK 7
staticinlineunsignedlong __peek_user_per(struct task_struct *child,
addr_t addr)
{ if (addr == offsetof(struct per_struct_kernel, cr9)) /* Control bits of the active per set. */ return test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control; elseif (addr == offsetof(struct per_struct_kernel, cr10)) /* Start address of the active per set. */ return test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start; elseif (addr == offsetof(struct per_struct_kernel, cr11)) /* End address of the active per set. */ return test_thread_flag(TIF_SINGLE_STEP) ?
-1UL : child->thread.per_user.end; elseif (addr == offsetof(struct per_struct_kernel, bits)) /* Single-step bit. */ return test_thread_flag(TIF_SINGLE_STEP) ?
(1UL << (BITS_PER_LONG - 1)) : 0; elseif (addr == offsetof(struct per_struct_kernel, starting_addr)) /* Start address of the user specified per set. */ return child->thread.per_user.start; elseif (addr == offsetof(struct per_struct_kernel, ending_addr)) /* End address of the user specified per set. */ return child->thread.per_user.end; elseif (addr == offsetof(struct per_struct_kernel, perc_atmid)) /* PER code, ATMID and AI of the last PER trap */ return (unsignedlong)
child->thread.per_event.cause << (BITS_PER_LONG - 16); elseif (addr == offsetof(struct per_struct_kernel, address)) /* Address of the last PER trap */ return child->thread.per_event.address; elseif (addr == offsetof(struct per_struct_kernel, access_id)) /* Access id of the last PER trap */ return (unsignedlong)
child->thread.per_event.paid << (BITS_PER_LONG - 8); return 0;
}
/* * Read the word at offset addr from the user area of a process. The * trouble here is that the information is littered over different * locations. The process registers are found on the kernel stack, * the floating point stuff and the trace settings are stored in * the task structure. In addition the different structures in * struct user contain pad bytes that should be read as zeroes. * Lovely...
*/ staticunsignedlong __peek_user(struct task_struct *child, addr_t addr)
{
addr_t offset, tmp;
if (addr < offsetof(struct user, regs.acrs)) { /* * psw and gprs are stored on the stack
*/
tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); if (addr == offsetof(struct user, regs.psw.mask)) { /* Return a clean psw mask. */
tmp &= PSW_MASK_USER | PSW_MASK_RI;
tmp |= PSW_USER_BITS;
}
} elseif (addr < offsetof(struct user, regs.orig_gpr2)) { /* * access registers are stored in the thread structure
*/
offset = addr - offsetof(struct user, regs.acrs); /* * Very special case: old & broken 64 bit gdb reading * from acrs[15]. Result is a 64 bit value. Read the * 32 bit acrs[15] value and shift it by 32. Sick...
*/ if (addr == offsetof(struct user, regs.acrs[15]))
tmp = ((unsignedlong) child->thread.acrs[15]) << 32; else
tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
} elseif (addr == offsetof(struct user, regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack
*/
tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
} elseif (addr < offsetof(struct user, regs.fp_regs)) { /* * prevent reads of padding hole between * orig_gpr2 and fp_regs on s390.
*/
tmp = 0;
} elseif (addr == offsetof(struct user, regs.fp_regs.fpc)) { /* * floating point control reg. is in the thread structure
*/
tmp = child->thread.ufpu.fpc;
tmp <<= BITS_PER_LONG - 32;
staticinlinevoid __poke_user_per(struct task_struct *child,
addr_t addr, addr_t data)
{ /* * There are only three fields in the per_info struct that the * debugger user can write to. * 1) cr9: the debugger wants to set a new PER event mask * 2) starting_addr: the debugger wants to set a new starting * address to use with the PER event mask. * 3) ending_addr: the debugger wants to set a new ending * address to use with the PER event mask. * The user specified PER event mask and the start and end * addresses are used only if single stepping is not in effect. * Writes to any other field in per_info are ignored.
*/ if (addr == offsetof(struct per_struct_kernel, cr9)) /* PER event mask of the user specified per set. */
child->thread.per_user.control =
data & (PER_EVENT_MASK | PER_CONTROL_MASK); elseif (addr == offsetof(struct per_struct_kernel, starting_addr)) /* Starting address of the user specified per set. */
child->thread.per_user.start = data; elseif (addr == offsetof(struct per_struct_kernel, ending_addr)) /* Ending address of the user specified per set. */
child->thread.per_user.end = data;
}
/* * Write a word to the user area of a process at location addr. This * operation does have an additional problem compared to peek_user. * Stores to the program status word and on the floating point * control register needs to get checked for validity.
*/ staticint __poke_user(struct task_struct *child, addr_t addr, addr_t data)
{
addr_t offset;
if (addr < offsetof(struct user, regs.acrs)) { struct pt_regs *regs = task_pt_regs(child); /* * psw and gprs are stored on the stack
*/ if (addr == offsetof(struct user, regs.psw.mask)) { unsignedlong mask = PSW_MASK_USER;
regs->int_code = 0x20000 | (data & 0xffff);
}
*(addr_t *)((addr_t) ®s->psw + addr) = data;
} elseif (addr < offsetof(struct user, regs.orig_gpr2)) { /* * access registers are stored in the thread structure
*/
offset = addr - offsetof(struct user, regs.acrs); /* * Very special case: old & broken 64 bit gdb writing * to acrs[15] with a 64 bit value. Ignore the lower * half of the value and write the upper 32 bit to * acrs[15]. Sick...
*/ if (addr == offsetof(struct user, regs.acrs[15]))
child->thread.acrs[15] = (unsignedint) (data >> 32); else
*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
} elseif (addr == offsetof(struct user, regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack
*/
task_pt_regs(child)->orig_gpr2 = data;
} elseif (addr < offsetof(struct user, regs.fp_regs)) { /* * prevent writes of padding hole between * orig_gpr2 and fp_regs on s390.
*/ return 0;
} elseif (addr == offsetof(struct user, regs.fp_regs.fpc)) { /* * floating point control reg. is in the thread structure
*/ if ((unsignedint)data != 0) return -EINVAL;
child->thread.ufpu.fpc = data >> (BITS_PER_LONG - 32);
/* * Stupid gdb peeks/pokes the access registers in 64 bit with * an alignment of 4. Programmers from hell indeed...
*/
mask = __ADDR_MASK; if (addr >= offsetof(struct user, regs.acrs) &&
addr < offsetof(struct user, regs.orig_gpr2))
mask = 3; if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) return -EIO;
return __poke_user(child, addr, data);
}
long arch_ptrace(struct task_struct *child, long request, unsignedlong addr, unsignedlong data)
{
ptrace_area parea; int copied, ret;
switch (request) { case PTRACE_PEEKUSR: /* read the word at location addr in the USER area. */ return peek_user(child, addr, data);
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ return poke_user(child, addr, data);
case PTRACE_PEEKUSR_AREA: case PTRACE_POKEUSR_AREA: if (copy_from_user(&parea, (void __force __user *) addr, sizeof(parea))) return -EFAULT;
addr = parea.kernel_addr;
data = parea.process_addr;
copied = 0; while (copied < parea.len) { if (request == PTRACE_PEEKUSR_AREA)
ret = peek_user(child, addr, data); else {
addr_t utmp; if (get_user(utmp,
(addr_t __force __user *) data)) return -EFAULT;
ret = poke_user(child, addr, utmp);
} if (ret) return ret;
addr += sizeof(unsignedlong);
data += sizeof(unsignedlong);
copied += sizeof(unsignedlong);
} return 0; case PTRACE_GET_LAST_BREAK: return put_user(child->thread.last_break, (unsignedlong __user *)data); case PTRACE_ENABLE_TE: if (!machine_has_tx()) return -EIO;
child->thread.per_flags &= ~PER_FLAG_NO_TE; return 0; case PTRACE_DISABLE_TE: if (!machine_has_tx()) return -EIO;
child->thread.per_flags |= PER_FLAG_NO_TE;
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; return 0; case PTRACE_TE_ABORT_RAND: if (!machine_has_tx() || (child->thread.per_flags & PER_FLAG_NO_TE)) return -EIO; switch (data) { case 0UL:
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; break; case 1UL:
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND; break; case 2UL:
child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND; break; default: return -EINVAL;
} return 0; default: return ptrace_request(child, request, addr, data);
}
}
#ifdef CONFIG_COMPAT /* * Now the fun part starts... a 31 bit program running in the * 31 bit emulation tracing another program. PTRACE_PEEKTEXT, * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy * to handle, the difference to the 64 bit versions of the requests * is that the access is done in multiples of 4 byte instead of * 8 bytes (sizeof(unsigned long) on 31/64 bit). * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA, * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program * is a 31 bit program too, the content of struct user can be * emulated. A 31 bit program peeking into the struct user of * a 64 bit program is a no-no.
*/
/* * Same as peek_user_per but for a 31 bit program.
*/ staticinline __u32 __peek_user_per_compat(struct task_struct *child,
addr_t addr)
{ if (addr == offsetof(struct compat_per_struct_kernel, cr9)) /* Control bits of the active per set. */ return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control; elseif (addr == offsetof(struct compat_per_struct_kernel, cr10)) /* Start address of the active per set. */ return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start; elseif (addr == offsetof(struct compat_per_struct_kernel, cr11)) /* End address of the active per set. */ return test_thread_flag(TIF_SINGLE_STEP) ?
PSW32_ADDR_INSN : child->thread.per_user.end; elseif (addr == offsetof(struct compat_per_struct_kernel, bits)) /* Single-step bit. */ return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0x80000000 : 0; elseif (addr == offsetof(struct compat_per_struct_kernel, starting_addr)) /* Start address of the user specified per set. */ return (__u32) child->thread.per_user.start; elseif (addr == offsetof(struct compat_per_struct_kernel, ending_addr)) /* End address of the user specified per set. */ return (__u32) child->thread.per_user.end; elseif (addr == offsetof(struct compat_per_struct_kernel, perc_atmid)) /* PER code, ATMID and AI of the last PER trap */ return (__u32) child->thread.per_event.cause << 16; elseif (addr == offsetof(struct compat_per_struct_kernel, address)) /* Address of the last PER trap */ return (__u32) child->thread.per_event.address; elseif (addr == offsetof(struct compat_per_struct_kernel, access_id)) /* Access id of the last PER trap */ return (__u32) child->thread.per_event.paid << 24; return 0;
}
/* * Same as peek_user but for a 31 bit program.
*/ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
{
addr_t offset;
__u32 tmp;
if (addr < offsetof(struct compat_user, regs.acrs)) { struct pt_regs *regs = task_pt_regs(child); /* * psw and gprs are stored on the stack
*/ if (addr == offsetof(struct compat_user, regs.psw.mask)) { /* Fake a 31 bit psw mask. */
tmp = (__u32)(regs->psw.mask >> 32);
tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
tmp |= PSW32_USER_BITS;
} elseif (addr == offsetof(struct compat_user, regs.psw.addr)) { /* Fake a 31 bit psw address. */
tmp = (__u32) regs->psw.addr |
(__u32)(regs->psw.mask & PSW_MASK_BA);
} else { /* gpr 0-15 */
tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4);
}
} elseif (addr < offsetof(struct compat_user, regs.orig_gpr2)) { /* * access registers are stored in the thread structure
*/
offset = addr - offsetof(struct compat_user, regs.acrs);
tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
} elseif (addr == offsetof(struct compat_user, regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack
*/
tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
} elseif (addr < offsetof(struct compat_user, regs.fp_regs)) { /* * prevent reads of padding hole between * orig_gpr2 and fp_regs on s390.
*/
tmp = 0;
} elseif (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) { /* * floating point control reg. is in the thread structure
*/
tmp = child->thread.ufpu.fpc;
/* * Same as poke_user_per but for a 31 bit program.
*/ staticinlinevoid __poke_user_per_compat(struct task_struct *child,
addr_t addr, __u32 data)
{ if (addr == offsetof(struct compat_per_struct_kernel, cr9)) /* PER event mask of the user specified per set. */
child->thread.per_user.control =
data & (PER_EVENT_MASK | PER_CONTROL_MASK); elseif (addr == offsetof(struct compat_per_struct_kernel, starting_addr)) /* Starting address of the user specified per set. */
child->thread.per_user.start = data; elseif (addr == offsetof(struct compat_per_struct_kernel, ending_addr)) /* Ending address of the user specified per set. */
child->thread.per_user.end = data;
}
/* * Same as poke_user but for a 31 bit program.
*/ staticint __poke_user_compat(struct task_struct *child,
addr_t addr, addr_t data)
{
__u32 tmp = (__u32) data;
addr_t offset;
if (addr < offsetof(struct compat_user, regs.acrs)) { struct pt_regs *regs = task_pt_regs(child); /* * psw, gprs, acrs and orig_gpr2 are stored on the stack
*/ if (addr == offsetof(struct compat_user, regs.psw.mask)) {
__u32 mask = PSW32_MASK_USER;
mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; /* Build a 64 bit psw mask from 31 bit mask. */ if ((tmp ^ PSW32_USER_BITS) & ~mask) /* Invalid psw mask. */ return -EINVAL; if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) /* Invalid address-space-control bits */ return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(regs->psw.mask & PSW_MASK_BA) |
(__u64)(tmp & mask) << 32;
} elseif (addr == offsetof(struct compat_user, regs.psw.addr)) { /* Build a 64 bit psw address from 31 bit address. */
regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; /* Transfer 31 bit amode bit to psw mask. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
(__u64)(tmp & PSW32_ADDR_AMODE);
} else { if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
addr == offsetof(struct compat_user, regs.gprs[2])) { struct pt_regs *regs = task_pt_regs(child);
} elseif (addr == offsetof(struct compat_user, regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack
*/
*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
} elseif (addr < offsetof(struct compat_user, regs.fp_regs)) { /* * prevent writess of padding hole between * orig_gpr2 and fp_regs on s390.
*/ return 0;
} elseif (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) { /* * floating point control reg. is in the thread structure
*/
child->thread.ufpu.fpc = data;
if (!cpu_has_vx()) return -ENODEV; if (target == current)
save_user_fpu_regs(); for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = target->thread.ufpu.vxrs[i].low; return membuf_write(&to, vxrs, sizeof(vxrs));
}
if (!is_ri_cb_valid(&ri_cb)) {
kfree(data); return -EINVAL;
} /* * Override access key in any case, since user space should * not be able to set it, nor should it care about it.
*/
ri_cb.key = PAGE_DEFAULT_KEY >> 4;
preempt_disable(); if (!target->thread.ri_cb)
target->thread.ri_cb = data;
*target->thread.ri_cb = ri_cb; if (target == current)
load_runtime_instr_cb(target->thread.ri_cb);
preempt_enable();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.