/* * User data (data section and bss) needs to be aligned. * We pick 0x20 here because it is the max value elf2flt has always * used in producing FLAT files, and because it seems to be large * enough to make all the gcc alignment related tests happy.
*/ #define FLAT_DATA_ALIGN (0x20)
/* * User data (stack) also needs to be aligned. * Here we can be a bit looser than the data sections since this * needs to only meet arch ABI requirements.
*/ #define FLAT_STACK_ALIGN max_t(unsignedlong, sizeof(void *), ARCH_SLAB_MINALIGN)
struct lib_info { struct { unsignedlong start_code; /* Start of text segment */ unsignedlong start_data; /* Start of data segment */ unsignedlong start_brk; /* End of data segment */ unsignedlong text_len; /* Length of text segment */ unsignedlong entry; /* Start address for this module */ unsignedlong build_date; /* When this one was compiled */ bool loaded; /* Has this library been loaded? */
} lib_list[MAX_SHARED_LIBS];
};
/****************************************************************************/ /* * create_flat_tables() parses the env- and arg-strings in new user * memory and creates the pointer tables from them, and puts their * addresses on the "stack", recording the new stack pointer value.
*/
staticint create_flat_tables(struct linux_binprm *bprm, unsignedlong arg_start)
{ char __user *p; unsignedlong __user *sp; long i, len;
p = (char __user *)arg_start;
sp = (unsignedlong __user *)current->mm->start_stack;
current->mm->arg_start = (unsignedlong)p; for (i = bprm->argc; i > 0; i--) { if (put_user((unsignedlong)p, sp++)) return -EFAULT;
len = strnlen_user(p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL;
p += len;
} if (put_user(0, sp++)) return -EFAULT;
current->mm->arg_end = (unsignedlong)p;
current->mm->env_start = (unsignedlong) p; for (i = bprm->envc; i > 0; i--) { if (put_user((unsignedlong)p, sp++)) return -EFAULT;
len = strnlen_user(p, MAX_ARG_STRLEN); if (!len || len > MAX_ARG_STRLEN) return -EINVAL;
p += len;
} if (put_user(0, sp++)) return -EFAULT;
current->mm->env_end = (unsignedlong)p;
pr_debug("Relocation of variable at DATASEG+%x " "(address %p, currently %lx) into segment %s\n",
r.reloc.offset, ptr, val, segment[r.reloc.type]);
switch (r.reloc.type) { case OLD_FLAT_RELOC_TYPE_TEXT:
val += current->mm->start_code; break; case OLD_FLAT_RELOC_TYPE_DATA:
val += current->mm->start_data; break; case OLD_FLAT_RELOC_TYPE_BSS:
val += current->mm->end_data; break; default:
pr_err("Unknown relocation type=%x\n", r.reloc.type); break;
}
put_user(val, ptr);
pr_debug("Relocation became %lx\n", val);
} #endif/* CONFIG_BINFMT_FLAT_OLD */
staticinline u32 __user *skip_got_header(u32 __user *rp)
{ if (IS_ENABLED(CONFIG_RISCV)) { /* * RISC-V has a 16 byte GOT PLT header for elf64-riscv * and 8 byte GOT PLT header for elf32-riscv. * Skip the whole GOT PLT header, since it is reserved * for the dynamic linker (ld.so).
*/
u32 rp_val0, rp_val1;
if (get_user(rp_val0, rp)) return rp; if (get_user(rp_val1, rp + 1)) return rp;
if (rp_val0 == 0xffffffff && rp_val1 == 0xffffffff)
rp += 4; elseif (rp_val0 == 0xffffffff)
rp += 2;
} return rp;
}
if (strncmp(hdr->magic, "bFLT", 4)) { /* * Previously, here was a printk to tell people * "BINFMT_FLAT: bad header magic". * But for the kernel which also use ELF FD-PIC format, this * error message is confusing. * because a lot of people do not manage to produce good
*/
ret = -ENOEXEC; goto err;
}
if (flags & FLAT_FLAG_KTRACE)
pr_info("Loading file: %s\n", bprm->filename);
#ifdef CONFIG_BINFMT_FLAT_OLD if (rev != FLAT_VERSION && rev != OLD_FLAT_VERSION) {
pr_err("bad flat file version 0x%x (supported 0x%lx and 0x%lx)\n",
rev, FLAT_VERSION, OLD_FLAT_VERSION);
ret = -ENOEXEC; goto err;
}
/* * fix up the flags for the older format, there were all kinds * of endian hacks, this only works for the simple cases
*/ if (rev == OLD_FLAT_VERSION &&
(flags || IS_ENABLED(CONFIG_BINFMT_FLAT_OLD_ALWAYS_RAM)))
flags = FLAT_FLAG_RAM;
#else/* CONFIG_BINFMT_FLAT_OLD */ if (rev != FLAT_VERSION) {
pr_err("bad flat file version 0x%x (supported 0x%lx)\n",
rev, FLAT_VERSION);
ret = -ENOEXEC; goto err;
} #endif/* !CONFIG_BINFMT_FLAT_OLD */
/* * Make sure the header params are sane. * 28 bits (256 MB) is way more than reasonable in this case. * If some top bits are set we have probable binary corruption.
*/ if ((text_len | data_len | bss_len | stack_len | relocs | full_data) >> 28) {
pr_err("bad header\n");
ret = -ENOEXEC; goto err;
}
#ifndef CONFIG_BINFMT_ZFLAT if (flags & (FLAT_FLAG_GZIP|FLAT_FLAG_GZDATA)) {
pr_err("Support for ZFLAT executables is not enabled.\n");
ret = -ENOEXEC; goto err;
} #endif
/* * Check initial limits. This avoids letting people circumvent * size limits imposed on them by creating programs with large * arrays in the data or bss.
*/
rlim = rlimit(RLIMIT_DATA); if (rlim >= RLIM_INFINITY)
rlim = ~0; if (data_len + bss_len > rlim) {
ret = -ENOMEM; goto err;
}
/* Flush all traces of the currently running executable */
ret = begin_new_exec(bprm); if (ret) goto err;
/* OK, This is the point of no return */
set_personality(PER_LINUX_32BIT);
setup_new_exec(bprm);
/* * calculate the extra space we need to map in
*/
extra = max_t(unsignedlong, bss_len + stack_len,
relocs * sizeof(unsignedlong));
/* * there are a couple of cases here, the separate code/data * case, and then the fully copied to RAM case which lumps * it all together.
*/ if (!IS_ENABLED(CONFIG_MMU) && !(flags & (FLAT_FLAG_RAM|FLAT_FLAG_GZIP))) { /* * this should give us a ROM ptr, but if it doesn't we don't * really care
*/
pr_debug("ROM mapping of file (we hope)\n");
textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
MAP_PRIVATE, 0); if (!textpos || IS_ERR_VALUE(textpos)) {
ret = textpos; if (!textpos)
ret = -ENOMEM;
pr_err("Unable to mmap process text, errno %d\n", ret); goto err;
}
len = data_len + extra +
DATA_START_OFFSET_WORDS * sizeof(unsignedlong);
len = PAGE_ALIGN(len);
realdatastart = vm_mmap(NULL, 0, len,
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) {
ret = realdatastart; if (!realdatastart)
ret = -ENOMEM;
pr_err("Unable to allocate RAM for process data, " "errno %d\n", ret);
vm_munmap(textpos, text_len); goto err;
}
datapos = ALIGN(realdatastart +
DATA_START_OFFSET_WORDS * sizeof(unsignedlong),
FLAT_DATA_ALIGN);
len = text_len + data_len + extra +
DATA_START_OFFSET_WORDS * sizeof(u32);
len = PAGE_ALIGN(len);
textpos = vm_mmap(NULL, 0, len,
PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
if (!textpos || IS_ERR_VALUE(textpos)) {
ret = textpos; if (!textpos)
ret = -ENOMEM;
pr_err("Unable to allocate RAM for process text/data, " "errno %d\n", ret); goto err;
}
reloc = (__be32 __user *)
(datapos + (ntohl(hdr->reloc_start) - text_len));
memp = textpos;
memp_size = len; #ifdef CONFIG_BINFMT_ZFLAT /* * load it all in and treat it like a RAM load from now on
*/ if (flags & FLAT_FLAG_GZIP) { #ifndef CONFIG_MMU
result = decompress_exec(bprm, sizeof(struct flat_hdr),
(((char *)textpos) + sizeof(struct flat_hdr)),
(text_len + full_data
- sizeof(struct flat_hdr)),
0);
memmove((void *) datapos, (void *) realdatastart,
full_data); #else /* * This is used on MMU systems mainly for testing. * Let's use a kernel buffer to simplify things.
*/ long unz_text_len = text_len - sizeof(struct flat_hdr); long unz_len = unz_text_len + full_data; char *unz_data = vmalloc(unz_len); if (!unz_data) {
result = -ENOMEM;
} else {
result = decompress_exec(bprm, sizeof(struct flat_hdr),
unz_data, unz_len, 0); if (result == 0 &&
(copy_to_user((void __user *)textpos + sizeof(struct flat_hdr),
unz_data, unz_text_len) ||
copy_to_user((void __user *)datapos,
unz_data + unz_text_len, full_data)))
result = -EFAULT;
vfree(unz_data);
} #endif
} elseif (flags & FLAT_FLAG_GZDATA) {
result = read_code(bprm->file, textpos, 0, text_len); if (!IS_ERR_VALUE(result)) { #ifndef CONFIG_MMU
result = decompress_exec(bprm, text_len, (char *) datapos,
full_data, 0); #else char *unz_data = vmalloc(full_data); if (!unz_data) {
result = -ENOMEM;
} else {
result = decompress_exec(bprm, text_len,
unz_data, full_data, 0); if (result == 0 &&
copy_to_user((void __user *)datapos,
unz_data, full_data))
result = -EFAULT;
vfree(unz_data);
} #endif
}
} else #endif/* CONFIG_BINFMT_ZFLAT */
{
result = read_code(bprm->file, textpos, 0, text_len); if (!IS_ERR_VALUE(result))
result = read_code(bprm->file, datapos,
ntohl(hdr->data_start),
full_data);
} if (IS_ERR_VALUE(result)) {
ret = result;
pr_err("Unable to read code+data+bss, errno %d\n", ret);
vm_munmap(textpos, text_len + data_len + extra +
DATA_START_OFFSET_WORDS * sizeof(u32)); goto err;
}
}
start_code = textpos + sizeof(struct flat_hdr);
end_code = textpos + text_len;
text_len -= sizeof(struct flat_hdr); /* the real code len */
/* The main program needs a little extra setup in the task structure */
current->mm->start_code = start_code;
current->mm->end_code = end_code;
current->mm->start_data = datapos;
current->mm->end_data = datapos + data_len; /* * set up the brk stuff, uses any slack left in data/bss/stack * allocation. We put the brk after the bss (between the bss * and stack) like other platforms. * Userspace code relies on the stack pointer starting out at * an address right at the end of a page.
*/
current->mm->start_brk = datapos + data_len + bss_len;
current->mm->brk = (current->mm->start_brk + 3) & ~3; #ifndef CONFIG_MMU
current->mm->context.end_brk = memp + memp_size - stack_len; #endif
if (flags & FLAT_FLAG_KTRACE) {
pr_info("Mapping is %lx, Entry point is %x, data_start is %x\n",
textpos, 0x00ffffff&ntohl(hdr->entry), ntohl(hdr->data_start));
pr_info("%s %s: TEXT=%lx-%lx DATA=%lx-%lx BSS=%lx-%lx\n", "Load", bprm->filename,
start_code, end_code, datapos, datapos + data_len,
datapos + data_len, (datapos + data_len + bss_len + 3) & ~3);
}
/* Store the current module values into the global library structure */
libinfo->lib_list[0].start_code = start_code;
libinfo->lib_list[0].start_data = datapos;
libinfo->lib_list[0].start_brk = datapos + data_len + bss_len;
libinfo->lib_list[0].text_len = text_len;
libinfo->lib_list[0].loaded = 1;
libinfo->lib_list[0].entry = (0x00ffffff & ntohl(hdr->entry)) + textpos;
libinfo->lib_list[0].build_date = ntohl(hdr->build_date);
/* * We just load the allocations into some temporary memory to * help simplify all this mumbo jumbo * * We've got two different sections of relocation entries. * The first is the GOT which resides at the beginning of the data segment * and is terminated with a -1. This one can be relocated in place. * The second is the extra relocation entries tacked after the image's * data segment. These require a little more processing as the entry is * really an offset into the image which contains an offset into the * image.
*/ if (flags & FLAT_FLAG_GOTPIC) {
rp = skip_got_header((u32 __user *) datapos); for (; ; rp++) {
u32 addr, rp_val; if (get_user(rp_val, rp)) return -EFAULT; if (rp_val == 0xffffffff) break; if (rp_val) {
addr = calc_reloc(rp_val, libinfo); if (addr == RELOC_FAILED) {
ret = -ENOEXEC; goto err;
} if (put_user(addr, rp)) return -EFAULT;
}
}
}
/* * Now run through the relocation entries. * We've got to be careful here as C++ produces relocatable zero * entries in the constructor and destructor tables which are then * tested for being not zero (which will always occur unless we're * based from address zero). This causes an endless loop as __start * is at zero. The solution used is to not relocate zero addresses. * This has the negative side effect of not allowing a global data * reference to be statically initialised to _stext (I've moved * __start to address 4 so that is okay).
*/ if (rev > OLD_FLAT_VERSION) { for (i = 0; i < relocs; i++) {
u32 addr, relval;
__be32 tmp;
/* * Get the address of the pointer to be * relocated (of course, the address has to be * relocated first).
*/ if (get_user(tmp, reloc + i)) return -EFAULT;
relval = ntohl(tmp);
addr = flat_get_relocate_addr(relval);
rp = (u32 __user *)calc_reloc(addr, libinfo); if (rp == (u32 __user *)RELOC_FAILED) {
ret = -ENOEXEC; goto err;
}
/* Get the pointer's value. */
ret = flat_get_addr_from_rp(rp, relval, flags, &addr); if (unlikely(ret)) goto err;
if (addr != 0) { /* * Do the relocation. PIC relocs in the data section are * already in target order
*/ if ((flags & FLAT_FLAG_GOTPIC) == 0) { /* * Meh, the same value can have a different * byte order based on a flag..
*/
addr = ntohl((__force __be32)addr);
}
addr = calc_reloc(addr, libinfo); if (addr == RELOC_FAILED) {
ret = -ENOEXEC; goto err;
}
/* Write back the relocated pointer. */
ret = flat_put_addr_at_rp(rp, addr, relval); if (unlikely(ret)) goto err;
}
} #ifdef CONFIG_BINFMT_FLAT_OLD
} else { for (i = 0; i < relocs; i++) {
__be32 relval; if (get_user(relval, reloc + i)) return -EFAULT;
old_reloc(ntohl(relval));
} #endif/* CONFIG_BINFMT_FLAT_OLD */
}
flush_icache_user_range(start_code, end_code);
/* zero the BSS, BRK and stack areas */ if (clear_user((void __user *)(datapos + data_len), bss_len +
(memp + memp_size - stack_len - /* end brk */
libinfo->lib_list[0].start_brk) + /* start brk */
stack_len)) return -EFAULT;
/* * These are the functions used to load flat style executables and shared * libraries. There is no binary dependent code anywhere else.
*/
staticint load_flat_binary(struct linux_binprm *bprm)
{ struct lib_info libinfo; struct pt_regs *regs = current_pt_regs(); unsignedlong stack_len = 0; unsignedlong start_addr; int res; int i, j;
memset(&libinfo, 0, sizeof(libinfo));
/* * We have to add the size of our arguments to our stack size * otherwise it's too easy for users to create stack overflows * by passing in a huge argument list. And yes, we have to be * pedantic and include space for the argv/envp array as it may have * a lot of entries.
*/ #ifndef CONFIG_MMU
stack_len += PAGE_SIZE * MAX_ARG_PAGES - bprm->p; /* the strings */ #endif
stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */
stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */
stack_len = ALIGN(stack_len, FLAT_STACK_ALIGN);
res = load_flat_file(bprm, &libinfo, &stack_len); if (res < 0) return res;
/* Update data segment pointers for all libraries */ for (i = 0; i < MAX_SHARED_LIBS_UPDATE; i++) { if (!libinfo.lib_list[i].loaded) continue; for (j = 0; j < MAX_SHARED_LIBS; j++) { unsignedlong val = libinfo.lib_list[j].loaded ?
libinfo.lib_list[j].start_data : UNLOADED_LIB; unsignedlong __user *p = (unsignedlong __user *)
libinfo.lib_list[i].start_data;
p -= j + 1; if (put_user(val, p)) return -EFAULT;
}
}
set_binfmt(&flat_format);
#ifdef CONFIG_MMU
res = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); if (!res)
res = create_flat_tables(bprm, bprm->p); #else /* Stash our initial stack pointer into the mm structure */
current->mm->start_stack =
((current->mm->context.end_brk + stack_len + 3) & ~3) - 4;
pr_debug("sp=%lx\n", current->mm->start_stack);
/* copy the arg pages onto the stack */
res = transfer_args_to_stack(bprm, ¤t->mm->start_stack); if (!res)
res = create_flat_tables(bprm, current->mm->start_stack); #endif if (res) return res;
/* Fake some return addresses to ensure the call chain will * initialise library in order for us. We are required to call * lib 1 first, then 2, ... and finally the main program (id 0).
*/
start_addr = libinfo.lib_list[0].entry;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.