/* * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
void NativeCall::verify() {
assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
}
address NativeCall::destination() const {
address addr = (address)this;
assert(NativeInstruction::is_jal_at(instruction_address()), "inst must be jal.");
address destination = MacroAssembler::target_addr_for_insn(instruction_address());
// Do we use a trampoline stub for this call?
CodeBlob* cb = CodeCache::find_blob(addr);
assert(cb && cb->is_nmethod(), "sanity");
nmethod *nm = (nmethod *)cb; if (nm != NULL && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { // Yes we do, so get the destination from the trampoline stub. const address trampoline_stub_addr = destination;
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
}
return destination;
}
// Similar to replace_mt_safe, but just changes the destination. The // important thing is that free-running threads are able to execute this // call instruction at all times. // // Used in the runtime linkage of calls; see class CompiledIC. // // Add parameter assert_lock to switch off assertion // during code generation, where no patching lock is needed. void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
assert(!assert_lock ||
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
CompiledICLocker::is_safe(addr_at(0)), "concurrent code patching");
void NativeMovConstReg::verify() { if (!(nativeInstruction_at(instruction_address())->is_movptr() ||
is_auipc_at(instruction_address()))) {
fatal("should be MOVPTR or AUIPC");
}
}
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { // Patching to not_entrant can happen while activations of the method are // in use. The patching in that instance must happen only when certain // alignment restrictions are true. These guarantees check those // conditions.
// Must be 4 bytes aligned
MacroAssembler::assert_alignment(verified_entry);
}
address NativeJump::jump_destination() const {
address dest = MacroAssembler::target_addr_for_insn(instruction_address());
// We use jump to self as the unresolved address which the inline // cache code (and relocs) know about // As a special case we also use sequence movptr(r,0), jalr(r,0) // i.e. jump to 0 when we need leave space for a wide immediate // load
// return -1 if jump to self or to 0 if ((dest == (address) this) || dest == 0) {
dest = (address) -1;
}
return dest;
};
void NativeJump::set_jump_destination(address dest) { // We use jump to self as the unresolved address which the inline // cache code (and relocs) know about if (dest == (address) -1)
dest = instruction_address();
// We use jump to self as the unresolved address which the inline // cache code (and relocs) know about // As a special case we also use jump to 0 when first generating // a general jump
// return -1 if jump to self or to 0 if ((dest == (address) this) || dest == 0) {
dest = (address) -1;
}
// A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction. bool NativeInstruction::is_sigill_not_entrant() { // jvmci return uint_at(0) == 0xffffffff;
}
void NativeIllegalInstruction::insert(address code_pos) {
assert_cond(code_pos != NULL);
*(juint*)code_pos = 0xffffffff; // all bits ones is permanently reserved as an illegal instruction
}
bool NativeInstruction::is_stop() { return uint_at(0) == 0xc0101073; // an illegal instruction, 'csrrw x0, time, x0'
}
// Patch this nmethod atomically. if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
ptrdiff_t offset = dest - verified_entry;
guarantee(is_imm_in_range(offset, 20, 1), "offset is too large to be patched in one jal insrusction."); // 1M
uint32_t insn = 0;
address pInsn = (address)&insn;
Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1);
Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff);
Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1);
Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff);
Assembler::patch(pInsn, 11, 7, 0); // zero, no link jump
Assembler::patch(pInsn, 6, 0, 0b1101111); // j, (jal x0 offset)
*(unsignedint*)verified_entry = insn;
} else { // We use an illegal instruction for marking a method as // not_entrant.
NativeIllegalInstruction::insert(verified_entry);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.