CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset); if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
ksft_print_msg("failed to force CPU %d\n", cpu);
ksft_test_result_skip("vdso_getcpu\n");
ksft_test_result_skip("vsyscall_map_x\n");
return;
}
ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0); if (vdso_getcpu)
ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0); if (vsyscall_map_x)
ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
if (ret_sys == 0) { if (cpu_sys != cpu)
ksft_print_msg("syscall reported CPU %u but should be %d\n",
cpu_sys, cpu);
have_node = true;
node = node_sys;
}
if (vdso_getcpu) { if (ret_vdso) {
ksft_test_result_fail("vDSO getcpu() failed\n");
} else { if (!have_node) {
have_node = true;
node = node_vdso;
}
if (cpu_vdso != cpu || node_vdso != node) { if (cpu_vdso != cpu)
ksft_print_msg("vDSO reported CPU %u but should be %d\n",
cpu_vdso, cpu); if (node_vdso != node)
ksft_print_msg("vDSO reported node %u but should be %u\n",
node_vdso, node);
ksft_test_result_fail("Wrong values\n");
} else {
ksft_test_result_pass("vDSO reported correct CPU and node\n");
}
}
} else {
ksft_test_result_skip("vdso_getcpu isn't set\n");
}
if (vsyscall_map_x) { if (ret_vsys) {
ksft_test_result_fail("vsyscall getcpu() failed\n");
} else { if (!have_node) {
have_node = true;
node = node_vsys;
}
if (cpu_vsys != cpu || node_vsys != node) { if (cpu_vsys != cpu)
ksft_print_msg("vsyscall reported CPU %u but should be %d\n",
cpu_vsys, cpu); if (node_vsys != node)
ksft_print_msg("vsyscall reported node %u but should be %u\n",
node_vsys, node);
ksft_test_result_fail("Wrong values\n");
} else {
ksft_test_result_pass("vsyscall reported correct CPU and node\n");
}
}
} else {
ksft_test_result_skip("vsyscall_map_x isn't set\n");
}
}
staticvoid test_vsys_r(void)
{
ksft_print_msg("Checking read access to the vsyscall page\n"); bool can_read; if (sigsetjmp(jmpbuf, 1) == 0) {
*(volatileint *)0xffffffffff600000;
can_read = true;
} else {
can_read = false;
}
if (can_read && !vsyscall_map_r)
ksft_test_result_fail("We have read access, but we shouldn't\n"); elseif (!can_read && vsyscall_map_r)
ksft_test_result_fail("We don't have read access, but we should\n"); elseif (can_read)
ksft_test_result_pass("We have read access\n"); else
ksft_test_result_pass("We do not have read access: #PF(0x%lx)\n", segv_err);
}
staticvoid test_vsys_x(void)
{ if (vsyscall_map_x) { /* We already tested this adequately. */
ksft_test_result_pass("vsyscall_map_x is true\n"); return;
}
ksft_print_msg("Make sure that vsyscalls really page fault\n");
if (can_exec)
ksft_test_result_fail("Executing the vsyscall did not page fault\n"); elseif (segv_err & (1 << 4)) /* INSTR */
ksft_test_result_pass("Executing the vsyscall page failed: #PF(0x%lx)\n",
segv_err); else
ksft_test_result_fail("Execution failed with the wrong error: #PF(0x%lx)\n",
segv_err);
}
/* * Debuggers expect ptrace() to be able to peek at the vsyscall page. * Use process_vm_readv() as a proxy for ptrace() to test this. We * want it to work in the vsyscall=emulate case and to fail in the * vsyscall=xonly case. * * It's worth noting that this ABI is a bit nutty. write(2) can't * read from the vsyscall page on any kernel version or mode. The * fact that ptrace() ever worked was a nice courtesy of old kernels, * but the code to support it is fairly gross.
*/ staticvoid test_process_vm_readv(void)
{ char buf[4096]; struct iovec local, remote; int ret;
ksft_print_msg("process_vm_readv() from vsyscall page\n");
local.iov_base = buf;
local.iov_len = 4096;
remote.iov_base = (void *)0xffffffffff600000;
remote.iov_len = 4096;
ret = process_vm_readv(getpid(), &local, 1, &remote, 1, 0); if (ret != 4096) { /* * We expect process_vm_readv() to work if and only if the * vsyscall page is readable.
*/
ksft_test_result(!vsyscall_map_r, "process_vm_readv() failed (ret = %d, errno = %d)\n", ret, errno); return;
}
if (vsyscall_map_r)
ksft_test_result(!memcmp(buf, remote.iov_base, sizeof(buf)), "Read data\n"); else
ksft_test_result_fail("process_rm_readv() succeeded, but it should have failed in this configuration\n");
}
staticvoid init_vsys(void)
{ int nerrs = 0;
FILE *maps; char line[MAPS_LINE_LEN]; bool found = false;
maps = fopen("/proc/self/maps", "r"); if (!maps) {
ksft_test_result_skip("Could not open /proc/self/maps -- assuming vsyscall is r-x\n");
vsyscall_map_r = true; return;
}
if (!vsyscall_map_x) {
ksft_test_result_skip("vsyscall_map_x isn't set\n"); return;
}
ksft_print_msg("checking that vsyscalls are emulated\n");
sethandler(SIGTRAP, sigtrap, 0);
set_eflags(get_eflags() | X86_EFLAGS_TF);
vtime(&tmp);
set_eflags(get_eflags() & ~X86_EFLAGS_TF);
/* * If vsyscalls are emulated, we expect a single trap in the * vsyscall page -- the call instruction will trap with RIP * pointing to the entry point before emulation takes over. * In native mode, we expect two traps, since whatever code * the vsyscall page contains will be more than just a ret * instruction.
*/
is_native = (num_vsyscall_traps > 1);
ksft_test_result(!is_native, "vsyscalls are %s (%d instructions in vsyscall page)\n",
(is_native ? "native" : "emulated"), (int)num_vsyscall_traps);
} #endif
int main(int argc, char **argv)
{ int total_tests = TOTAL_TESTS;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.