/* * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// This specifies the stack pointer decrement needed to build the frame. int LIR_Assembler::initial_frame_size_in_bytes() const { return in_bytes(frame_map()->framesize_in_bytes());
}
// Inline cache check: the inline cached class is in inline_cache_reg; // we fetch the class of the receiver and compare it with the cached class. // If they do not match we jump to slow case. int LIR_Assembler::check_icache() { int offset = __ offset();
__ inline_cache_check(R3_ARG1, R19_inline_cache_reg); return offset;
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
assert(!method->holder()->is_not_initialized(), "initialization should have been started");
void LIR_Assembler::osr_entry() { // On-stack-replacement entry sequence: // // 1. Create a new compiled activation. // 2. Initialize local variables in the compiled activation. The expression // stack must be empty at the osr_bci; it is not initialized. // 3. Jump to the continuation address in compiled code to resume execution.
// OSR entry point
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
ValueStack* entry_state = osr_entry->end()->state(); int number_of_locks = entry_state->locks_size();
// Create a frame for the compiled activation.
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
// OSR buffer is // // locals[nlocals-1..0] // monitors[number_of_locks-1..0] // // Locals is a direct copy of the interpreter frame so in the osr buffer // the first slot in the local array is the last local from the interpreter // and the last slot is local[0] (receiver) from the interpreter. // // Similarly with locks. The first lock slot in the osr buffer is the nth lock // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock // in the interpreter frame (the method lock if a sync method).
// Initialize monitors in the compiled activation. // R3: pointer to osr buffer // // All other registers are dead at this point and the locals will be // copied into place by code emitted in the IR.
Register OSR_buf = osrBufferPointer()->as_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); int monitor_offset = BytesPerWord * method()->max_locals() +
(2 * BytesPerWord) * (number_of_locks - 1); // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in // the OSR buffer using 2 word entries: first the lock and then // the oop. for (int i = 0; i < number_of_locks; i++) { int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); #ifdef ASSERT // Verify the interpreter's monitor has a non-null object.
{
Label L;
__ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
__ cmpdi(CCR0, R0, 0);
__ bne(CCR0, L);
__ stop("locked object is NULL");
__ bind(L);
} #endif// ASSERT // Copy the lock field into the compiled activation.
Address ml = frame_map()->address_for_monitor_lock(i),
mo = frame_map()->address_for_monitor_object(i);
assert(ml.index() == noreg && mo.index() == noreg, "sanity");
__ ld(R0, slot_offset + 0, OSR_buf);
__ std(R0, ml.disp(), ml.base());
__ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
__ std(R0, mo.disp(), mo.base());
}
}
}
int LIR_Assembler::emit_exception_handler() { // Generate code for the exception handler.
address handler_base = __ start_a_stub(exception_handler_size());
if (handler_base == NULL) { // Not enough space left for the handler.
bailout("exception handler overflow"); return -1;
}
// Emit the code to remove the frame from the stack in the exception // unwind path. int LIR_Assembler::emit_unwind_handler() {
_masm->block_comment("Unwind handler");
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { // Allocate a new index in table to hold the object once it's been patched. int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {
AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation)
__ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg);
}
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { // Allocate a new index in table to hold the klass once it's been patched. int index = __ oop_recorder()->allocate_metadata_index(NULL);
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index));
assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
__ load_const(reg, addrlit, R0);
switch(code) { case Bytecodes::_i2l: {
__ extsw(dst->as_register_lo(), src->as_register()); break;
} case Bytecodes::_l2i: {
__ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage break;
} case Bytecodes::_i2b: {
__ extsb(dst->as_register(), src->as_register()); break;
} case Bytecodes::_i2c: {
__ clrldi(dst->as_register(), src->as_register(), 64-16); break;
} case Bytecodes::_i2s: {
__ extsh(dst->as_register(), src->as_register()); break;
} case Bytecodes::_i2d: case Bytecodes::_l2d: { bool src_in_memory = !VM_Version::has_mtfprd();
FloatRegister rdst = dst->as_double_reg();
FloatRegister rsrc; if (src_in_memory) {
rsrc = src->as_double_reg(); // via mem
} else { // move src to dst register if (code == Bytecodes::_i2d) {
__ mtfprwa(rdst, src->as_register());
} else {
__ mtfprd(rdst, src->as_register_lo());
}
rsrc = rdst;
}
__ fcfid(rdst, rsrc); break;
} case Bytecodes::_i2f: case Bytecodes::_l2f: { bool src_in_memory = !VM_Version::has_mtfprd();
FloatRegister rdst = dst->as_float_reg();
FloatRegister rsrc; if (src_in_memory) {
rsrc = src->as_double_reg(); // via mem
} else { // move src to dst register if (code == Bytecodes::_i2f) {
__ mtfprwa(rdst, src->as_register());
} else {
__ mtfprd(rdst, src->as_register_lo());
}
rsrc = rdst;
} if (VM_Version::has_fcfids()) {
__ fcfids(rdst, rsrc);
} else {
assert(code == Bytecodes::_i2f, "fcfid+frsp needs fixup code to avoid rounding incompatibility");
__ fcfid(rdst, rsrc);
__ frsp(rdst, rdst);
} break;
} case Bytecodes::_f2d: {
__ fmr_if_needed(dst->as_double_reg(), src->as_float_reg()); break;
} case Bytecodes::_d2f: {
__ frsp(dst->as_float_reg(), src->as_double_reg()); break;
} case Bytecodes::_d2i: case Bytecodes::_f2i: { bool dst_in_memory = !VM_Version::has_mtfprd();
FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg();
Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : Address();
Label L; // Result must be 0 if value is NaN; test by comparing value to itself.
__ fcmpu(CCR0, rsrc, rsrc); if (dst_in_memory) {
__ li(R0, 0); // 0 in case of NAN
__ std(R0, addr.disp(), addr.base());
} else {
__ li(dst->as_register(), 0);
}
__ bso(CCR0, L);
__ fctiwz(rsrc, rsrc); // USE_KILL if (dst_in_memory) {
__ stfd(rsrc, addr.disp(), addr.base());
} else {
__ mffprd(dst->as_register(), rsrc);
}
__ bind(L); break;
} case Bytecodes::_d2l: case Bytecodes::_f2l: { bool dst_in_memory = !VM_Version::has_mtfprd();
FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg();
Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : Address();
Label L; // Result must be 0 if value is NaN; test by comparing value to itself.
__ fcmpu(CCR0, rsrc, rsrc); if (dst_in_memory) {
__ li(R0, 0); // 0 in case of NAN
__ std(R0, addr.disp(), addr.base());
} else {
__ li(dst->as_register_lo(), 0);
}
__ bso(CCR0, L);
__ fctidz(rsrc, rsrc); // USE_KILL if (dst_in_memory) {
__ stfd(rsrc, addr.disp(), addr.base());
} else {
__ mffprd(dst->as_register_lo(), rsrc);
}
__ bind(L); break;
}
default: ShouldNotReachHere();
}
}
void LIR_Assembler::align_call(LIR_Code) { // do nothing since all instructions are word aligned on ppc
}
bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) { int start_offset = __ offset(); // Put the entry point as a constant into the constant pool. const address entry_point_toc_addr = __ address_constant(target, RelocationHolder::none); if (entry_point_toc_addr == NULL) {
bailout("const section overflow"); returnfalse;
} constint entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below.
address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc); if (!stub) {
bailout("no space for trampoline stub"); returnfalse;
} returntrue;
}
bool success = emit_trampoline_stub_for_call(op->addr()); if (!success) { return; }
__ relocate(rtype); // Note: At this point we do not have the address of the trampoline // stub, and the entry point might be too far away for bl, so __ pc() // serves as dummy and the bl will be patched later.
__ code()->set_insts_mark();
__ bl(__ pc());
add_call_info(code_offset(), op->info());
__ post_call_nop();
}
// Virtual call relocation will point to ic load.
address virtual_call_meta_addr = __ pc(); // Load a clear inline cache.
AddressLiteral empty_ic((address) Universe::non_oop_word()); bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC); if (!success) {
bailout("const section overflow"); return;
} // Call to fixup routine. Fixup routine uses ScopeDesc info // to determine who we intended to call.
__ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
success = emit_trampoline_stub_for_call(op->addr(), R2_TOC); if (!success) { return; }
// Note: At this point we do not have the address of the trampoline // stub, and the entry point might be too far away for bl, so __ pc() // serves as dummy and the bl will be patched later.
__ bl(__ pc());
add_call_info(code_offset(), op->info());
__ post_call_nop();
}
// Remember the offset of the load. The patching_epilog must be done // before the call to add_debug_info, otherwise the PcDescs don't get // entered in increasing order. int offset;
if (disp_reg == noreg) {
assert(Assembler::is_simm16(disp_value), "should have set this up");
offset = load(src, disp_value, to_reg, type, wide);
} else {
offset = load(src, disp_reg, to_reg, type, wide);
}
// remember the offset of the store. The patching_epilog must be done // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get // entered in increasing order. int offset;
if (compress_oop) { Register co = __ encode_heap_oop(R0, from_reg->as_register());
from_reg = FrameMap::as_opr(co);
}
if (disp_reg == noreg) {
assert(Assembler::is_simm16(disp_value), "should have set this up");
offset = store(from_reg, src, disp_value, type, wide);
} else {
offset = store(from_reg, src, disp_reg, type, wide);
}
if (use_R29) {
__ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit
}
if (patch != NULL) {
patching_epilog(patch, patch_code, src, info);
}
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { constRegister return_pc = R31; // Must survive C-call to enable_stack_reserved_zone(). constRegister temp = R12;
// Pop the stack before the safepoint code. int frame_size = initial_frame_size_in_bytes(); if (Assembler::is_simm(frame_size, 16)) {
__ addi(R1_SP, R1_SP, frame_size);
} else {
__ pop_frame();
}
// Restore return pc relative to callers' sp.
__ ld(return_pc, _abi0(lr), R1_SP); // Move return pc to LR.
__ mtlr(return_pc);
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check(return_pc);
}
// We need to mark the code position where the load from the safepoint // polling page was emitted as relocInfo::poll_return_type here. if (!UseSIGTRAP) {
code_stub->set_safepoint_offset(__ offset());
__ relocate(relocInfo::poll_return_type);
}
__ safepoint_poll(*code_stub->entry(), temp, true/* at_return */, true /* in_nmethod */);
// Return.
__ blr();
}
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { constRegister poll_addr = tmp->as_register();
__ ld(poll_addr, in_bytes(JavaThread::polling_page_offset()), R16_thread); if (info != NULL) {
add_debug_info_for_branch(info);
} int offset = __ offset();
__ relocate(relocInfo::poll_type);
__ load_from_polling_page(poll_addr);
// For java_to_interp stubs we use R11_scratch1 as scratch register // and in call trampoline stubs we use R12_scratch2. This way we // can distinguish them (see is_NativeCallTrampolineStub_at()). constRegister reg_scratch = R11_scratch1;
// Create a static stub relocation which relates this stub // with the call instruction at insts_call_instruction_offset in the // instructions code-section. int start = __ offset();
__ relocate(static_stub_Relocation::spec(call_pc));
// Now, create the stub's code: // - load the TOC // - load the inline cache oop from the constant pool // - load the call target from the constant pool // - call
__ calculate_address_from_global_toc(reg_scratch, __ method_toc());
AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true);
case T_OBJECT: // There are only equal/notequal comparisons on objects.
{
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
jobject con = opr2->as_constant_ptr()->as_jobject(); if (con == NULL) {
__ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
} else {
jobject2reg(con, R0);
__ cmpd(BOOL_RESULT, opr1->as_register(), R0);
}
} break;
case T_METADATA: // We only need, for now, comparison with NULL for metadata.
{
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
Metadata* p = opr2->as_constant_ptr()->as_metadata(); if (p == NULL) {
__ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
} else {
ShouldNotReachHere();
}
} break;
bool positive = false;
Assembler::Condition cond = Assembler::equal; switch (condition) { case lir_cond_equal: positive = true ; cond = Assembler::equal ; break; case lir_cond_notEqual: positive = false; cond = Assembler::equal ; break; case lir_cond_less: positive = true ; cond = Assembler::less ; break; case lir_cond_belowEqual: case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; case lir_cond_greater: positive = true ; cond = Assembler::greater; break; case lir_cond_aboveEqual: case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; default: ShouldNotReachHere();
}
// Try to use isel on >=Power7. if (VM_Version::has_isel() && result->is_cpu_register()) { bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register(); constRegister result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo();
// We can use result_reg to load one operand if not already in register. Register first = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg,
second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg;
if (first != second) { if (!o1_is_reg) {
load_to_reg(this, opr1, result);
}
if (!o2_is_reg) {
load_to_reg(this, opr2, result);
}
Register lreg = left->as_register(); Register res = dest->as_register(); Register rreg = right->as_register(); switch (code) { case lir_add: __ add (res, lreg, rreg); break; case lir_sub: __ sub (res, lreg, rreg); break; case lir_mul: __ mullw(res, lreg, rreg); break; default: ShouldNotReachHere();
}
}
} else {
assert (right->is_constant(), "must be constant");
if (dest->is_single_cpu()) { Register lreg = left->as_register(); Register res = dest->as_register(); int simm16 = right->as_constant_ptr()->as_jint();
switch (code) { case lir_sub: assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int
simm16 = -simm16; case lir_add: if (res == lreg && simm16 == 0) break;
__ addi(res, lreg, simm16); break; case lir_mul: if (res == lreg && simm16 == 1) break;
__ mulli(res, lreg, simm16); break; default: ShouldNotReachHere();
}
} else { Register lreg = left->as_pointer_register(); Register res = dest->as_register_lo(); long con = right->as_constant_ptr()->as_jlong();
assert(Assembler::is_simm16(con), "must be simm16");
switch (code) { case lir_sub: assert(Assembler::is_simm16(-con), "cannot encode"); // see do_ArithmeticOp_Long
con = -con; case lir_add: if (res == lreg && con == 0) break;
__ addi(res, lreg, (int)con); break; case lir_mul: if (res == lreg && con == 1) break;
__ mulli(res, lreg, (int)con); break; default: ShouldNotReachHere();
}
}
}
}
// Set up the arraycopy stub information.
ArrayCopyStub* stub = op->stub(); constint frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame.
// Always do stub if no type information is available. It's ok if // the known type isn't loaded since the code sanity checks // in debug mode and the type isn't required when we know the exact type // also check that the type is an array type. if (op->expected_type() == NULL) {
assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() &&
length->is_nonvolatile(), "must preserve");
address copyfunc_addr = StubRoutines::generic_arraycopy();
assert(copyfunc_addr != NULL, "generic arraycopy stub required");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.