/* * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
#ifndef PRODUCT #define COMMENT(x) do { __ block_comment(x); } while (0) #else #define COMMENT(x) #endif
NEEDS_CLEANUP // remove this definitions ? constRegister IC_Klass = rscratch2; // where the IC klass is cached constRegister SYNC_header = r0; // synchronization header constRegister SHIFT_count = r0; // where count for shift operations must be
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { return as_Address(addr, rscratch1); // Ouch // FIXME: This needs to be much more clever. See x86.
}
// Ensure a valid Address (base + offset) to a stack-slot. If stack access is // not encodable as a base + (immediate) offset, generate an explicit address // calculation to hold the address in a temporary register.
Address LIR_Assembler::stack_slot_address(int index, uint size, Register tmp, int adjust) {
precond(size == 4 || size == 8);
Address addr = frame_map()->address_for_slot(index, adjust);
precond(addr.getMode() == Address::base_plus_offset);
precond(addr.base() == sp);
precond(addr.offset() > 0);
uint mask = size - 1;
assert((addr.offset() & mask) == 0, "scaled offsets only"); return __ legitimize_address(addr, size, tmp);
}
// we jump here if osr happens with the interpreter // state set up to continue at the beginning of the // loop that triggered osr - in particular, we have // the following registers setup: // // r2: osr buffer //
// build frame
ciMethod* m = compilation()->method();
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
// OSR buffer is // // locals[nlocals-1..0] // monitors[0..number_of_locks] // // locals is a direct copy of the interpreter frame so in the osr buffer // so first slot in the local array is the last local from the interpreter // and last slot is local[0] (receiver) from the interpreter // // Similarly with locks. The first lock slot in the osr buffer is the nth lock // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock // in the interpreter frame (the method lock if a sync method)
// Initialize monitors in the compiled activation. // r2: pointer to osr buffer // // All other registers are dead at this point and the locals will be // copied into place by code emitted in the IR.
Register OSR_buf = osrBufferPointer()->as_pointer_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); int monitor_offset = BytesPerWord * method()->max_locals() +
(2 * BytesPerWord) * (number_of_locks - 1); // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in // the OSR buffer using 2 word entries: first the lock and then // the oop. for (int i = 0; i < number_of_locks; i++) { int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); #ifdef ASSERT // verify the interpreter's monitor has a non-null object
{
Label L;
__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
__ cbnz(rscratch1, L);
__ stop("locked object is NULL");
__ bind(L);
} #endif
__ ldp(r19, r20, Address(OSR_buf, slot_offset));
__ str(r19, frame_map()->address_for_monitor_lock(i));
__ str(r20, frame_map()->address_for_monitor_object(i));
}
}
}
// inline cache check; done before the frame is built. int LIR_Assembler::check_icache() { Register receiver = FrameMap::receiver_opr->as_register(); Register ic_klass = IC_Klass; int start_offset = __ offset();
__ inline_cache_check(receiver, ic_klass);
// if icache check fails, then jump to runtime routine // Note: RECEIVER must still contain the receiver!
Label dont;
__ br(Assembler::EQ, dont);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
// We align the verified entry point unless the method body // (including its inline cache check) will fit in a single 64-byte // icache line. if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) { // force alignment after the cache check.
__ align(CodeEntryAlignment);
}
__ bind(dont); return start_offset;
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
assert(!method->holder()->is_not_initialized(), "initialization should have been started");
// This specifies the rsp decrement needed to build the frame int LIR_Assembler::initial_frame_size_in_bytes() const { // if rounding, must let FrameMap know!
int LIR_Assembler::emit_exception_handler() { // generate code for exception handler
address handler_base = __ start_a_stub(exception_handler_size()); if (handler_base == NULL) { // not enough space left for the handler
bailout("exception handler overflow"); return -1;
}
int offset = code_offset();
// the exception oop and pc are in r0, and r3 // no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, false, true, true);
// check that there is really an exception
__ verify_not_null_oop(r0);
// Emit the code to remove the frame from the stack in the exception // unwind path. int LIR_Assembler::emit_unwind_handler() { #ifndef PRODUCT if (CommentedAssembly) {
_masm->block_comment("Unwind handler");
} #endif
int offset = code_offset();
// Fetch the exception from TLS and clear out exception related thread state
__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
__ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
__ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
__ bind(_unwind_handler_entry);
__ verify_not_null_oop(r0); if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mov(r19, r0); // Preserve the exception
}
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mov(r0, r19); // Restore the exception
}
// remove the activation and dispatch to the unwind handler
__ block_comment("remove_frame and dispatch to the unwind handler");
__ remove_frame(initial_frame_size_in_bytes());
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
// Emit the slow path assembly if (stub != NULL) {
stub->emit_code(this);
}
return offset;
}
int LIR_Assembler::emit_deopt_handler() { // generate code for exception handler
address handler_base = __ start_a_stub(deopt_handler_size()); if (handler_base == NULL) { // not enough space left for the handler
bailout("deopt handler overflow"); return -1;
}
switch (type) { case T_ADDRESS:
assert(c->as_jint() == 0, "should be");
insn = &Assembler::str; break; case T_LONG:
assert(c->as_jlong() == 0, "should be");
insn = &Assembler::str; break; case T_INT:
assert(c->as_jint() == 0, "should be");
insn = &Assembler::strw; break; case T_OBJECT: case T_ARRAY:
assert(c->as_jobject() == 0, "should be"); if (UseCompressedOops && !wide) {
insn = &Assembler::strw;
} else {
insn = &Assembler::str;
} break; case T_CHAR: case T_SHORT:
assert(c->as_jint() == 0, "should be");
insn = &Assembler::strh; break; case T_BOOLEAN: case T_BYTE:
assert(c->as_jint() == 0, "should be");
insn = &Assembler::strb; break; default:
ShouldNotReachHere();
insn = &Assembler::str; // unreachable
}
if (info) add_debug_info_for_null_check_here(info);
(_masm->*insn)(zr, as_Address(to_addr, rscratch1));
}
void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
assert(src->is_register(), "should not call otherwise");
assert(dest->is_register(), "should not call otherwise");
// move between cpu-registers if (dest->is_single_cpu()) { if (src->type() == T_LONG) { // Can do LONG -> OBJECT
move_regs(src->as_register_lo(), dest->as_register()); return;
}
assert(src->is_single_cpu(), "must match"); if (src->type() == T_OBJECT) {
__ verify_oop(src->as_register());
}
move_regs(src->as_register(), dest->as_register());
} elseif (dest->is_double_cpu()) { if (is_reference_type(src->type())) { // Surprising to me but we can see move of a long to t_object
__ verify_oop(src->as_register());
move_regs(src->as_register(), dest->as_register_lo()); return;
}
assert(src->is_double_cpu(), "must match"); Register f_lo = src->as_register_lo(); Register f_hi = src->as_register_hi(); Register t_lo = dest->as_register_lo(); Register t_hi = dest->as_register_hi();
assert(f_hi == f_lo, "must be same");
assert(t_hi == t_lo, "must be same");
move_regs(f_lo, t_lo);
int null_check_here = code_offset(); switch (type) { case T_FLOAT: {
__ strs(src->as_float_reg(), as_Address(to_addr)); break;
}
case T_DOUBLE: {
__ strd(src->as_double_reg(), as_Address(to_addr)); break;
}
case T_ARRAY: // fall through case T_OBJECT: // fall through if (UseCompressedOops && !wide) {
__ strw(compressed_src, as_Address(to_addr, rscratch2));
} else {
__ str(compressed_src, as_Address(to_addr));
} break; case T_METADATA: // We get here to store a method pointer to the stack to pass to // a dtrace runtime call. This can't work on 64 bit with // compressed klass ptrs: T_METADATA can be a compressed klass // ptr or a 64 bit method pointer.
ShouldNotReachHere();
__ str(src->as_register(), as_Address(to_addr)); break; case T_ADDRESS:
__ str(src->as_register(), as_Address(to_addr)); break; case T_INT:
__ strw(src->as_register(), as_Address(to_addr)); break;
case T_LONG: {
__ str(src->as_register_lo(), as_Address_lo(to_addr)); break;
}
case T_BYTE: // fall through case T_BOOLEAN: {
__ strb(src->as_register(), as_Address(to_addr)); break;
}
case T_CHAR: // fall through case T_SHORT:
__ strh(src->as_register(), as_Address(to_addr)); break;
if (addr->base()->type() == T_OBJECT) {
__ verify_oop(addr->base()->as_pointer_register());
}
if (patch_code != lir_patch_none) {
deoptimize_trap(info); return;
}
if (info != NULL) {
add_debug_info_for_null_check_here(info);
} int null_check_here = code_offset(); switch (type) { case T_FLOAT: {
__ ldrs(dest->as_float_reg(), as_Address(from_addr)); break;
}
case T_DOUBLE: {
__ ldrd(dest->as_double_reg(), as_Address(from_addr)); break;
}
case T_ARRAY: // fall through case T_OBJECT: // fall through if (UseCompressedOops && !wide) {
__ ldrw(dest->as_register(), as_Address(from_addr));
} else {
__ ldr(dest->as_register(), as_Address(from_addr));
} break; case T_METADATA: // We get here to store a method pointer to the stack to pass to // a dtrace runtime call. This can't work on 64 bit with // compressed klass ptrs: T_METADATA can be a compressed klass // ptr or a 64 bit method pointer.
ShouldNotReachHere();
__ ldr(dest->as_register(), as_Address(from_addr)); break; case T_ADDRESS:
__ ldr(dest->as_register(), as_Address(from_addr)); break; case T_INT:
__ ldrw(dest->as_register(), as_Address(from_addr)); break;
case T_LONG: {
__ ldr(dest->as_register_lo(), as_Address_lo(from_addr)); break;
}
case T_BYTE:
__ ldrsb(dest->as_register(), as_Address(from_addr)); break; case T_BOOLEAN: {
__ ldrb(dest->as_register(), as_Address(from_addr)); break;
}
case T_CHAR:
__ ldrh(dest->as_register(), as_Address(from_addr)); break; case T_SHORT:
__ ldrsh(dest->as_register(), as_Address(from_addr)); break;
default:
ShouldNotReachHere();
}
if (is_reference_type(type)) { if (UseCompressedOops && !wide) {
__ decode_heap_oop(dest->as_register());
}
if (!UseZGC) { // Load barrier has not yet been applied, so ZGC can't verify the oop here
__ verify_oop(dest->as_register());
}
}
}
int LIR_Assembler::array_element_size(BasicType type) const { int elem_size = type2aelembytes(type); return exact_log2(elem_size);
}
void LIR_Assembler::emit_op3(LIR_Op3* op) { switch (op->code()) { case lir_idiv: case lir_irem:
arithmetic_idiv(op->code(),
op->in_opr1(),
op->in_opr2(),
op->in_opr3(),
op->result_opr(),
op->info()); break; case lir_fmad:
__ fmaddd(op->result_opr()->as_double_reg(),
op->in_opr1()->as_double_reg(),
op->in_opr2()->as_double_reg(),
op->in_opr3()->as_double_reg()); break; case lir_fmaf:
__ fmadds(op->result_opr()->as_float_reg(),
op->in_opr1()->as_float_reg(),
op->in_opr2()->as_float_reg(),
op->in_opr3()->as_float_reg()); break; default: ShouldNotReachHere(); break;
}
}
if (op->fast_check()) { // get object class // not a safepoint as obj null check happens earlier
__ load_klass(rscratch1, obj);
__ cmp( rscratch1, k_RInfo);
__ br(Assembler::NE, *failure_target); // successful cast, fall through to profile or jump
} else { // get object class // not a safepoint as obj null check happens earlier
__ load_klass(klass_RInfo, obj); if (k->is_loaded()) { // See if we get an immediate positive hit
__ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
__ cmp(k_RInfo, rscratch1); if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
__ br(Assembler::NE, *failure_target); // successful cast, fall through to profile or jump
} else { // See if we get an immediate positive hit
__ br(Assembler::EQ, *success_target); // check for self
__ cmp(klass_RInfo, k_RInfo);
__ br(Assembler::EQ, *success_target);
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize))); // result is a boolean
__ cbzw(klass_RInfo, *failure_target); // successful cast, fall through to profile or jump
}
} else { // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); // call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); // result is a boolean
__ cbz(k_RInfo, *failure_target); // successful cast, fall through to profile or jump
}
} if (should_profile) { Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success);
__ mov_metadata(mdo, md->constant_encoding());
__ load_klass(recv, obj);
Label update_done;
type_profile_helper(mdo, md, data, recv, success);
__ b(*success);
// get instance klass (it's already uncompressed)
__ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); // call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); // result is a boolean
__ cbzw(k_RInfo, *failure_target); // fall through to the success case
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
// FIXME. This is fugly: we really need to factor all this logic. switch(right->type()) { case T_LONG:
c = right->as_constant_ptr()->as_jlong(); break; case T_INT: case T_ADDRESS:
c = right->as_constant_ptr()->as_jint(); break; default:
ShouldNotReachHere();
c = 0; // unreachable break;
}
// opcode check
assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem"); bool is_irem = (code == lir_irem);
// operand check
assert(left->is_single_cpu(), "left must be register");
assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
assert(result->is_single_cpu(), "result must be register"); Register lreg = left->as_register(); Register dreg = result->as_register();
// power-of-2 constant check and codegen if (right->is_constant()) { int c = right->as_constant_ptr()->as_jint();
assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant"); if (is_irem) { if (c == 1) { // move 0 to dreg if divisor is 1
__ movw(dreg, zr);
} else { // use rscratch1 as intermediate result register
__ negsw(rscratch1, lreg);
__ andw(dreg, lreg, c - 1);
__ andw(rscratch1, rscratch1, c - 1);
__ csnegw(dreg, dreg, rscratch1, Assembler::MI);
}
} else { if (c == 1) { // move lreg to dreg if divisor is 1
__ movw(dreg, lreg);
} else { unsignedint shift = exact_log2(c); // use rscratch1 as intermediate result register
__ asrw(rscratch1, lreg, 31);
__ addw(rscratch1, lreg, rscratch1, Assembler::LSR, 32 - shift);
__ asrw(dreg, rscratch1, shift);
}
}
} else { Register rreg = right->as_register();
__ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.