/* * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Note: Rtemp usage is this file should not impact C2 and should be // correct as long as it is not implicitly used in lower layers (the // arm [macro]assembler) and used with care in the other C1 specific // files.
bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
ShouldNotCallThis(); // Not used on ARM returnfalse;
}
LIR_Opr LIR_Assembler::receiverOpr() { // The first register in Java calling conventions return FrameMap::R0_oop_opr;
}
assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord; for (int i = 0; i < number_of_locks; i++) { int slot_offset = monitor_offset - (i * 2 * BytesPerWord);
__ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord));
__ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord));
__ str(R1, frame_map()->address_for_monitor_lock(i));
__ str(R2, frame_map()->address_for_monitor_object(i));
}
}
int LIR_Assembler::check_icache() { Register receiver = LIR_Assembler::receiverOpr()->as_register(); int offset = __ offset();
__ inline_cache_check(receiver, Ricklass); return offset;
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
ShouldNotReachHere(); // not implemented
}
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
jobject o = (jobject)Universe::non_oop_word(); int index = __ oop_recorder()->allocate_oop_index(o);
PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index);
__ patchable_mov_oop(reg, o, index);
patching_epilog(patch, lir_patch_normal, reg, info);
}
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
Metadata* o = (Metadata*)Universe::non_oop_word(); int index = __ oop_recorder()->allocate_metadata_index(o);
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
__ patchable_mov_metadata(reg, o, index);
patching_epilog(patch, lir_patch_normal, reg, info);
}
int LIR_Assembler::initial_frame_size_in_bytes() const { // Subtracts two words to account for return address and link return frame_map()->framesize()*VMRegImpl::stack_slot_size - 2*wordSize;
}
int LIR_Assembler::emit_exception_handler() {
address handler_base = __ start_a_stub(exception_handler_size()); if (handler_base == NULL) {
bailout("exception handler overflow"); return -1;
}
int offset = code_offset();
// check that there is really an exception
__ verify_not_null_oop(Rexception_obj);
// Emit the code to remove the frame from the stack in the exception // unwind path. int LIR_Assembler::emit_unwind_handler() { #ifndef PRODUCT if (CommentedAssembly) {
_masm->block_comment("Unwind handler");
} #endif
int offset = code_offset();
// Fetch the exception from TLS and clear out exception related thread state Register zero = __ zero_register(Rtemp);
__ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
__ str(zero, Address(Rthread, JavaThread::exception_oop_offset()));
__ str(zero, Address(Rthread, JavaThread::exception_pc_offset()));
// remove the activation and dispatch to the unwind handler
__ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR
__ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
// Emit the slow path assembly if (stub != NULL) {
stub->emit_code(this);
}
return offset;
}
int LIR_Assembler::emit_deopt_handler() {
address handler_base = __ start_a_stub(deopt_handler_size()); if (handler_base == NULL) {
bailout("deopt handler overflow"); return -1;
}
int offset = code_offset();
__ mov_relative_address(LR, __ pc());
__ push(LR); // stub expects LR to be saved
__ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { // Pop the frame before safepoint polling
__ remove_frame(initial_frame_size_in_bytes());
__ read_polling_page(Rtemp, relocInfo::poll_return_type);
__ ret();
}
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
int offset = __ offset();
__ get_polling_page(Rtemp);
__ relocate(relocInfo::poll_type);
add_debug_info_for_branch(info); // help pc_desc_at to find correct scope for current PC
__ ldr(Rtemp, Address(Rtemp));
switch (c->type()) { case T_INT: // fall through case T_FLOAT:
__ mov_slow(Rtemp, c->as_jint_bits());
__ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); break;
case T_ADDRESS:
__ mov_slow(Rtemp, c->as_jint());
__ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); break;
case T_OBJECT:
__ mov_oop(Rtemp, c->as_jobject());
__ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); break;
case T_LONG: // fall through case T_DOUBLE:
__ mov_slow(Rtemp, c->as_jint_lo_bits());
__ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) {
__ mov_slow(Rtemp, c->as_jint_hi_bits());
}
__ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); break;
PatchingStub* patch = NULL; if (needs_patching) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
}
int null_check_offset = code_offset();
switch (type) { case T_ARRAY: case T_OBJECT: if (UseCompressedOops && !wide) {
ShouldNotReachHere();
} else {
__ str(src->as_register(), as_Address(to_addr));
} break;
case T_ADDRESS:
__ str(src->as_pointer_register(), as_Address(to_addr)); break;
case T_BYTE: case T_BOOLEAN:
__ strb(src->as_register(), as_Address(to_addr)); break;
case T_CHAR: case T_SHORT:
__ strh(src->as_register(), as_Address(to_addr)); break;
case T_INT: #ifdef __SOFTFP__ case T_FLOAT: #endif// __SOFTFP__
__ str_32(src->as_register(), as_Address(to_addr)); break;
#ifdef __SOFTFP__ case T_DOUBLE: #endif// __SOFTFP__ case T_LONG: { Register from_lo = src->as_register_lo(); Register from_hi = src->as_register_hi(); if (to_addr->index()->is_register()) {
assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
assert(to_addr->disp() == 0, "Not yet supporting both");
__ add(Rtemp, base_reg, to_addr->index()->as_register());
base_reg = Rtemp;
__ str(from_lo, Address(Rtemp)); if (patch != NULL) {
__ nop(); // see comment before patching_epilog for 2nd str
patching_epilog(patch, lir_patch_low, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_high;
}
__ str(from_hi, Address(Rtemp, BytesPerWord));
} elseif (base_reg == from_lo) {
__ str(from_hi, as_Address_hi(to_addr)); if (patch != NULL) {
__ nop(); // see comment before patching_epilog for 2nd str
patching_epilog(patch, lir_patch_high, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_low;
}
__ str(from_lo, as_Address_lo(to_addr));
} else {
__ str(from_lo, as_Address_lo(to_addr)); if (patch != NULL) {
__ nop(); // see comment before patching_epilog for 2nd str
patching_epilog(patch, lir_patch_low, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_high;
}
__ str(from_hi, as_Address_hi(to_addr));
} break;
}
#ifndef __SOFTFP__ case T_FLOAT: if (to_addr->index()->is_register()) {
assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
__ add(Rtemp, base_reg, to_addr->index()->as_register()); if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
__ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp()));
} else {
__ fsts(src->as_float_reg(), as_Address(to_addr));
} break;
case T_DOUBLE: if (to_addr->index()->is_register()) {
assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
__ add(Rtemp, base_reg, to_addr->index()->as_register()); if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
__ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp()));
} else {
__ fstd(src->as_double_reg(), as_Address(to_addr));
} break; #endif// __SOFTFP__
default:
ShouldNotReachHere();
}
if (info != NULL) {
add_debug_info_for_null_check(null_check_offset, info);
}
if (patch != NULL) { // Offset embedded into LDR/STR instruction may appear not enough // to address a field. So, provide a space for one more instruction // that will deal with larger offsets.
__ nop();
patching_epilog(patch, patch_code, base_reg, info);
}
}
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
assert(src->is_stack(), "should not call otherwise");
assert(dest->is_register(), "should not call otherwise");
PatchingStub* patch = NULL; if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
} if (info != NULL) {
add_debug_info_for_null_check_here(info);
}
switch (type) { case T_OBJECT: // fall through case T_ARRAY: if (UseCompressedOops && !wide) {
__ ldr_u32(dest->as_register(), as_Address(addr));
} else {
__ ldr(dest->as_register(), as_Address(addr));
} break;
case T_ADDRESS:
__ ldr(dest->as_pointer_register(), as_Address(addr)); break;
case T_INT: #ifdef __SOFTFP__ case T_FLOAT: #endif// __SOFTFP__
__ ldr(dest->as_pointer_register(), as_Address(addr)); break;
case T_BOOLEAN:
__ ldrb(dest->as_register(), as_Address(addr)); break;
case T_BYTE:
__ ldrsb(dest->as_register(), as_Address(addr)); break;
case T_CHAR:
__ ldrh(dest->as_register(), as_Address(addr)); break;
case T_SHORT:
__ ldrsh(dest->as_register(), as_Address(addr)); break;
#ifdef __SOFTFP__ case T_DOUBLE: #endif// __SOFTFP__ case T_LONG: { Register to_lo = dest->as_register_lo(); Register to_hi = dest->as_register_hi(); if (addr->index()->is_register()) {
assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
assert(addr->disp() == 0, "Not yet supporting both");
__ add(Rtemp, base_reg, addr->index()->as_register());
base_reg = Rtemp;
__ ldr(to_lo, Address(Rtemp)); if (patch != NULL) {
__ nop(); // see comment before patching_epilog for 2nd ldr
patching_epilog(patch, lir_patch_low, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_high;
}
__ ldr(to_hi, Address(Rtemp, BytesPerWord));
} elseif (base_reg == to_lo) {
__ ldr(to_hi, as_Address_hi(addr)); if (patch != NULL) {
__ nop(); // see comment before patching_epilog for 2nd ldr
patching_epilog(patch, lir_patch_high, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_low;
}
__ ldr(to_lo, as_Address_lo(addr));
} else {
__ ldr(to_lo, as_Address_lo(addr)); if (patch != NULL) {
__ nop(); // see comment before patching_epilog for 2nd ldr
patching_epilog(patch, lir_patch_low, base_reg, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_high;
}
__ ldr(to_hi, as_Address_hi(addr));
} break;
}
#ifndef __SOFTFP__ case T_FLOAT: if (addr->index()->is_register()) {
assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
__ add(Rtemp, base_reg, addr->index()->as_register()); if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
__ flds(dest->as_float_reg(), Address(Rtemp, addr->disp()));
} else {
__ flds(dest->as_float_reg(), as_Address(addr));
} break;
case T_DOUBLE: if (addr->index()->is_register()) {
assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
__ add(Rtemp, base_reg, addr->index()->as_register()); if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
__ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp()));
} else {
__ fldd(dest->as_double_reg(), as_Address(addr));
} break; #endif// __SOFTFP__
default:
ShouldNotReachHere();
}
if (patch != NULL) { // Offset embedded into LDR/STR instruction may appear not enough // to address a field. So, provide a space for one more instruction // that will deal with larger offsets.
__ nop();
patching_epilog(patch, patch_code, base_reg, info);
}
if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) { int c = op->in_opr2()->as_constant_ptr()->as_jint();
assert(is_power_of_2(c), "non power-of-2 constant should be put in a register");
Register left = op->in_opr1()->as_register(); Register dest = op->result_opr()->as_register(); if (c == 1) {
__ mov(dest, left);
} elseif (c == 2) {
__ add_32(dest, left, AsmOperand(left, lsr, 31));
__ asr_32(dest, dest, 1);
} elseif (c != (int) 0x80000000) { int power = log2i_exact(c);
__ asr_32(Rtemp, left, 31);
__ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0);
__ asr_32(dest, dest, power); // dest = dest >>> power;
} else { // x/0x80000000 is a special case, since dividend is a power of two, but is negative. // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000.
__ cmp_32(left, c);
__ mov(dest, 0, ne);
__ mov(dest, 1, eq);
}
} else {
assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3");
__ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
add_debug_info_for_div0_here(op->info());
}
}
void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
ciMethodData *md, ciProfileData *data, Register recv, Register tmp1, Label* update_done) {
assert_different_registers(mdo, recv, tmp1);
uint i; for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test; // See if the receiver is receiver[n].
Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
mdo_offset_bias);
__ ldr(tmp1, receiver_addr);
__ verify_klass_ptr(tmp1);
__ cmp(recv, tmp1);
__ b(next_test, ne);
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
mdo_offset_bias);
__ ldr(tmp1, data_addr);
__ add(tmp1, tmp1, DataLayout::counter_increment);
__ str(tmp1, data_addr);
__ b(*update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
mdo_offset_bias);
__ ldr(tmp1, recv_addr);
__ cbnz(tmp1, next_test);
__ str(recv, recv_addr);
__ mov(tmp1, DataLayout::counter_increment);
__ str(tmp1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
mdo_offset_bias));
__ b(*update_done);
__ bind(next_test);
}
}
void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
md = method->method_data_or_null();
assert(md != NULL, "Sanity");
data = md->bci_to_data(bci);
assert(data != NULL, "need data for checkcast");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) { // The offset is large so bias the mdo by the base of the slot so // that the ldr can use an immediate offset to reference the slots of the data
mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
}
}
// On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null). void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci,
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias, Register obj, Register mdo, Register data_val, Label* obj_is_null) {
assert(method != NULL, "Should have method");
assert_different_registers(obj, mdo, data_val);
setup_md_access(method, bci, md, data, mdo_offset_bias);
Label not_null;
__ b(not_null, ne);
__ mov_metadata(mdo, md->constant_encoding()); if (mdo_offset_bias > 0) {
__ mov_slow(data_val, mdo_offset_bias);
__ add(mdo, mdo, data_val);
}
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
__ ldrb(data_val, flags_addr);
__ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant());
__ strb(data_val, flags_addr);
__ b(*obj_is_null);
__ bind(not_null);
}
if (stub->is_simple_exception_stub()) { // TODO: ARM - Late binding is used to prevent confusion of register allocator
assert(stub->is_exception_throw_stub(), "must be");
((SimpleExceptionStub*)stub)->set_obj(op->result_opr());
}
ciMethodData* md;
ciProfileData* data; int mdo_offset_bias = 0;
// Negate the condition and repeat the algorithm with the second operand if (opr1 == opr2) { break; }
opr1 = opr2;
acond = ncond;
}
}
#ifdef ASSERT staticint reg_size(LIR_Opr op) { switch (op->type()) { case T_FLOAT: case T_INT: return BytesPerInt; case T_LONG: case T_DOUBLE: return BytesPerLong; case T_OBJECT: case T_ARRAY: case T_METADATA: return BytesPerWord; case T_ADDRESS: case T_ILLEGAL: // fall through default: ShouldNotReachHere(); return -1;
}
} #endif
if (right->is_address()) { // special case for adding shifted/extended register constRegister res = dest->as_pointer_register(); constRegister lreg = left->as_pointer_register(); const LIR_Address* addr = right->as_address_ptr();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.