/* * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
if (index->is_register()) { // Apply the shift and accumulate the displacement. if (shift > 0) {
LIR_Opr tmp = new_pointer_register();
__ shift_left(index, shift, tmp);
index = tmp;
} if (large_disp != 0) {
LIR_Opr tmp = new_pointer_register(); if (Assembler::is_simm16(large_disp)) {
__ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
index = tmp;
} else {
__ move(LIR_OprFact::intptrConst(large_disp), tmp);
__ add(tmp, index, tmp);
index = tmp;
}
large_disp = 0;
}
} elseif (!Assembler::is_simm16(large_disp)) { // Index is illegal so replace it with the displacement loaded into a register.
index = new_pointer_register();
__ move(LIR_OprFact::intptrConst(large_disp), index);
large_disp = 0;
}
// At this point we either have base + index or base + displacement. if (large_disp == 0) { returnnew LIR_Address(base, index, type);
} else {
assert(Assembler::is_simm16(large_disp), "must be"); returnnew LIR_Address(base, large_disp, type);
}
}
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
BasicType type) { int elem_size = type2aelembytes(type); int shift = exact_log2(elem_size);
// We use R4+R5 in order to get a temp effect. These regs are used in slow path (MonitorEnterStub).
LIR_Opr lock = FrameMap::R5_opr;
LIR_Opr scratch = FrameMap::R4_opr;
LIR_Opr hdr = FrameMap::R6_opr;
// This CodeEmitInfo must not have the xhandlers because here the // object is already locked (xhandlers expects object to be unlocked).
CodeEmitInfo* info = state_for(x, x->state(), true);
monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info);
}
LIRItem right(x->y(), this); // Missing test if instr is commutative and if we should swap. if (right.value()->type()->as_LongConstant() &&
(x->op() == Bytecodes::_lsub && right.value()->type()->as_LongConstant()->value() == ((-1)<<15)) ) { // Sub is implemented by addi and can't support min_simm16 as constant..
right.load_item();
} else {
right.load_nonconstant();
}
assert(right.is_constant() || right.is_register(), "wrong state of right");
LIRItem right(x->y(), this); // Missing test if instr is commutative and if we should swap. if (right.value()->type()->as_IntConstant() &&
(x->op() == Bytecodes::_isub && right.value()->type()->as_IntConstant()->value() == ((-1)<<15)) ) { // Sub is implemented by addi and can't support min_simm16 as constant.
right.load_item();
} else {
right.load_nonconstant();
}
assert(right.is_constant() || right.is_register(), "wrong state of right");
// Volatile load may be followed by Unsafe CAS. if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ membar(); // To be safe. Unsafe semantics are unclear.
} else {
__ membar_release();
}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { switch (x->id()) { case vmIntrinsics::_dabs: {
assert(x->number_of_arguments() == 1, "wrong type");
LIRItem value(x->argument_at(0), this);
value.load_item();
LIR_Opr dst = rlock_result(x);
__ abs(value.result(), dst, LIR_OprFact::illegalOpr); break;
} case vmIntrinsics::_dsqrt: case vmIntrinsics::_dsqrt_strict: { if (VM_Version::has_fsqrt()) {
assert(x->number_of_arguments() == 1, "wrong type");
LIRItem value(x->argument_at(0), this);
value.load_item();
LIR_Opr dst = rlock_result(x);
__ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); break;
} // else fallthru
} case vmIntrinsics::_dsin: // fall through case vmIntrinsics::_dcos: // fall through case vmIntrinsics::_dtan: // fall through case vmIntrinsics::_dlog: // fall through case vmIntrinsics::_dlog10: // fall through case vmIntrinsics::_dexp: {
assert(x->number_of_arguments() == 1, "wrong type");
address runtime_entry = NULL; switch (x->id()) { case vmIntrinsics::_dsqrt: case vmIntrinsics::_dsqrt_strict:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt); break; case vmIntrinsics::_dsin:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break; case vmIntrinsics::_dcos:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break; case vmIntrinsics::_dtan:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break; case vmIntrinsics::_dlog:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); break; case vmIntrinsics::_dlog10:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); break; case vmIntrinsics::_dexp:
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); break; default:
ShouldNotReachHere();
}
// Load all values in callee_save_registers (C calling convention), // as this makes the parameter passing to the fast case simpler.
src.load_item_force (FrameMap::R14_oop_opr);
src_pos.load_item_force (FrameMap::R15_opr);
dst.load_item_force (FrameMap::R17_oop_opr);
dst_pos.load_item_force (FrameMap::R18_opr);
length.load_item_force (FrameMap::R19_opr);
LIR_Opr tmp = FrameMap::R20_opr;
int flags;
ciArrayKlass* expected_type;
arraycopy_helper(x, &flags, &expected_type);
// int -> float: force spill case Bytecodes::_l2f: { if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only // fcfid+frsp needs fixup code to avoid rounding incompatibility.
address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
set_result(x, result); return;
} // else fallthru
} case Bytecodes::_l2d: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.load_item();
LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE);
__ convert(x->op(), tmp, reg); return;
} case Bytecodes::_i2f: case Bytecodes::_i2d: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.load_item(); // Convert i2l first.
LIR_Opr tmp1 = new_register(T_LONG);
__ convert(Bytecodes::_i2l, value.result(), tmp1);
LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE);
__ convert(x->op(), tmp2, reg); return;
}
// float -> int: result will be stored case Bytecodes::_f2l: case Bytecodes::_d2l: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.set_destroys_register(); // USE_KILL
value.load_item();
set_vreg_flag(reg, must_start_in_memory);
__ convert(x->op(), value.result(), reg); return;
} case Bytecodes::_f2i: case Bytecodes::_d2i: {
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.set_destroys_register(); // USE_KILL
value.load_item(); // Convert l2i afterwards.
LIR_Opr tmp1 = new_register(T_LONG);
set_vreg_flag(tmp1, must_start_in_memory);
__ convert(x->op(), value.result(), tmp1);
__ convert(Bytecodes::_l2i, tmp1, reg); return;
}
// Within same category: just register conversions. case Bytecodes::_i2b: case Bytecodes::_i2c: case Bytecodes::_i2s: case Bytecodes::_i2l: case Bytecodes::_l2i: case Bytecodes::_f2d: case Bytecodes::_d2f: break;
default: ShouldNotReachHere();
}
}
// Register conversion.
LIRItem value(x->value(), this);
LIR_Opr reg = rlock_result(x);
value.load_item(); switch (x->op()) { case Bytecodes::_f2l: case Bytecodes::_d2l: case Bytecodes::_f2i: case Bytecodes::_d2i: value.set_destroys_register(); break; // USE_KILL default: break;
}
__ convert(x->op(), value.result(), reg);
}
void LIRGenerator::do_NewInstance(NewInstance* x) { // This instruction can be deoptimized in the slow path. const LIR_Opr reg = result_register_for(x->type()); #ifndef PRODUCT if (PrintNotLoaded && !x->klass()->is_loaded()) {
tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
} #endif
CodeEmitInfo* info = state_for(x, x->state());
LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewInstanceStub).
LIR_Opr tmp1 = FrameMap::R5_oop_opr;
LIR_Opr tmp2 = FrameMap::R6_oop_opr;
LIR_Opr tmp3 = FrameMap::R7_oop_opr;
LIR_Opr tmp4 = FrameMap::R8_oop_opr;
new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
// Must prevent reordering of stores for object initialization // with stores that publish the new object.
__ membar_storestore();
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { // Evaluate state_for early since it may emit code.
CodeEmitInfo* info = state_for(x, x->state());
LIR_Opr reg = result_register_for(x->type());
LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewTypeArrayStub). // We use R5 in order to get a temp effect. This reg is used in slow path (NewTypeArrayStub).
LIR_Opr tmp1 = FrameMap::R5_oop_opr;
LIR_Opr tmp2 = FrameMap::R6_oop_opr;
LIR_Opr tmp3 = FrameMap::R7_oop_opr;
LIR_Opr tmp4 = FrameMap::R8_oop_opr;
LIR_Opr len = length.result();
BasicType elem_type = x->elt_type();
// Must prevent reordering of stores for object initialization // with stores that publish the new object.
__ membar_storestore();
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { // Evaluate state_for early since it may emit code.
CodeEmitInfo* info = state_for(x, x->state()); // In case of patching (i.e., object class is not yet loaded), // we need to reexecute the instruction and therefore provide // the state before the parameters have been consumed.
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
}
const LIR_Opr reg = result_register_for(x->type());
LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewObjectArrayStub). // We use R5 in order to get a temp effect. This reg is used in slow path (NewObjectArrayStub).
LIR_Opr tmp1 = FrameMap::R5_oop_opr;
LIR_Opr tmp2 = FrameMap::R6_oop_opr;
LIR_Opr tmp3 = FrameMap::R7_oop_opr;
LIR_Opr tmp4 = FrameMap::R8_oop_opr;
LIR_Opr len = length.result();
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
ciMetadata* obj = ciObjArrayKlass::make(x->klass()); if (obj == ciEnv::unloaded_ciobjarrayklass()) {
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
}
klass2reg_with_patching(klass_reg, obj, patching_info);
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
// Must prevent reordering of stores for object initialization // with stores that publish the new object.
__ membar_storestore();
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
Values* dims = x->dims(); int i = dims->length();
LIRItemList* items = new LIRItemList(i, i, NULL); while (i-- > 0) {
LIRItem* size = new LIRItem(dims->at(i), this);
items->at_put(i, size);
}
// Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so // clone all handlers (NOTE: Usually this is handled transparently // by the CodeEmitInfo cloning logic in CodeStub constructors but // is done explicitly here because a stub isn't being used).
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
}
CodeEmitInfo* info = state_for(x, x->state());
i = dims->length(); while (i-- > 0) {
LIRItem* size = items->at(i);
size->load_nonconstant(); // FrameMap::_reserved_argument_area_size includes the dimensions // varargs, because it's initialized to hir()->max_stack() when the // FrameMap is created.
store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame));
}
const LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path.
klass2reg_with_patching(klass_reg, x->klass(), patching_info);
LIR_Opr rank = FrameMap::R5_opr; // Used by slow path.
__ move(LIR_OprFact::intConst(x->rank()), rank);
LIR_Opr varargs = FrameMap::as_pointer_opr(R6); // Used by slow path.
__ leal(LIR_OprFact::address(new LIR_Address(FrameMap::SP_opr, FrameMap::first_available_sp_in_frame, T_INT)),
varargs);
// Note: This instruction can be deoptimized in the slow path.
LIR_OprList* args = new LIR_OprList(3);
args->append(klass_reg);
args->append(rank);
args->append(varargs); const LIR_Opr reg = result_register_for(x->type());
__ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
LIR_OprFact::illegalOpr,
reg, args, info);
// Must prevent reordering of stores for object initialization // with stores that publish the new object.
__ membar_storestore();
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_BlockBegin(BlockBegin* x) { // nothing to do for now
}
void LIRGenerator::do_CheckCast(CheckCast* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { // Must do this before locking the destination register as // an oop register, and before the obj is loaded (so x->obj()->item() // is valid for creating a debug info location).
patching_info = state_for(x, x->state_before());
}
obj.load_item();
LIR_Opr out_reg = rlock_result(x);
CodeStub* stub;
CodeEmitInfo* info_for_exception =
(x->needs_exception_state() ? state_for(x) :
state_for(x, x->state_before(), true/*ignore_xhandler*/));
if (x->is_incompatible_class_change_check()) {
assert(patching_info == NULL, "can't patch this");
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
LIR_OprFact::illegalOpr, info_for_exception);
} elseif (x->is_invokespecial_receiver_check()) {
assert(patching_info == NULL, "can't patch this");
stub = new DeoptimizeStub(info_for_exception,
Deoptimization::Reason_class_check,
Deoptimization::Action_none);
} else {
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
} // Following registers are used by slow_subtype_check:
LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
__ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
x->profiled_method(), x->profiled_bci());
}
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
LIRItem obj(x->obj(), this);
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
} // Ensure the result register is not the input register because the // result is initialized before the patching safepoint.
obj.load_item();
LIR_Opr out_reg = rlock_result(x); // Following registers are used by slow_subtype_check:
LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
__ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
x->direct_compare(), patching_info,
x->profiled_method(), x->profiled_bci());
}
LIR_Opr left = LIR_OprFact::illegalOpr;
LIR_Opr right = LIR_OprFact::illegalOpr;
xin->load_item();
left = xin->result();
if (yin->result()->is_constant() && yin->result()->type() == T_INT &&
Assembler::is_simm16(yin->result()->as_constant_ptr()->as_jint())) { // Inline int constants which are small enough to be immediate operands.
right = LIR_OprFact::value_type(yin->value()->type());
} elseif (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
(cond == If::eql || cond == If::neq)) { // Inline long zero.
right = LIR_OprFact::value_type(yin->value()->type());
} elseif (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) {
right = LIR_OprFact::value_type(yin->value()->type());
} else {
yin->load_item();
right = yin->result();
}
set_no_result(x);
// Add safepoint before generating condition code so it can be recomputed. if (x->is_safepoint()) { // Increment backedge counter if needed.
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
__ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
}
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
__ leal(LIR_OprFact::address(a), arg2);
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.