/* * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Item will be loaded into a byte register; Intel only void LIRItem::load_byte_item() {
load_item();
LIR_Opr res = result();
if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) { // make sure that it is a byte register
assert(!value()->type()->is_float() && !value()->type()->is_double(), "can't load floats in byte register");
LIR_Opr reg = _gen->rlock_byte(T_BYTE);
__ move(res, reg);
_result = reg;
}
}
void LIRItem::load_nonconstant() {
LIR_Opr r = value()->operand(); if (r->is_constant()) {
_result = r;
} else {
load_item();
}
}
//--------- loading items into registers --------------------------------
// i486 instructions can inline constants bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { if (type == T_SHORT || type == T_CHAR) { returnfalse;
}
Constant* c = v->as_Constant(); if (c && c->state_before() == NULL) { // constants of any type can be stored directly, except for // unloaded object constants. returntrue;
} returnfalse;
}
LIR_Address* addr; if (index_opr->is_constant()) { int elem_size = type2aelembytes(type); #ifdef _LP64
jint index = index_opr->as_jint();
jlong disp = offset_in_bytes + (jlong)(index) * elem_size; if (disp > max_jint) { // Displacement overflow. Cannot directly use instruction with 32-bit displacement for 64-bit addresses. // Convert array index to long to do array offset computation with 64-bit values.
index_opr = new_register(T_LONG);
__ move(LIR_OprFact::longConst(index), index_opr);
addr = new LIR_Address(array_opr, index_opr, LIR_Address::scale(type), offset_in_bytes, type);
} else {
addr = new LIR_Address(array_opr, (intx)disp, type);
} #else // A displacement overflow can also occur for x86 but that is not a problem due to the 32-bit address range! // Let's assume an array 'a' and an access with displacement 'disp'. When disp overflows, then "a + disp" will // always be negative (i.e. underflows the 32-bit address range): // Let N = 2^32: a + signed_overflow(disp) = a + disp - N. // "a + disp" is always smaller than N. If an index was chosen which would point to an address beyond N, then // range checks would catch that and throw an exception. Thus, a + disp < 0 holds which means that it always // underflows the 32-bit address range: // unsigned_underflow(a + signed_overflow(disp)) = unsigned_underflow(a + disp - N) // = (a + disp - N) + N = a + disp // This shows that we still end up at the correct address with a displacement overflow due to the 32-bit address // range limitation. This overflow only needs to be handled if addresses can be larger as on 64-bit platforms.
addr = new LIR_Address(array_opr, offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type); #endif// _LP64
} else { #ifdef _LP64 if (index_opr->type() == T_INT) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index_opr, tmp);
index_opr = tmp;
} #endif// _LP64
addr = new LIR_Address(array_opr,
index_opr,
LIR_Address::scale(type),
offset_in_bytes, type);
} return addr;
}
// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
CodeEmitInfo* info_for_exception = NULL; if (x->needs_null_check()) {
info_for_exception = state_for(x);
} // this CodeEmitInfo must not have the xhandlers because here the // object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
x->monitor_no(), info_for_exception, info);
}
#ifndef _LP64 // do not load right operand if it is a constant. only 0 and 1 are // loaded because there are special instructions for loading them // without memory access (not needed for SSE2 instructions) bool must_load_right = false; if (right.is_constant()) {
LIR_Const* c = right.result()->as_constant_ptr();
assert(c != NULL, "invalid constant");
assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type");
if (must_load_both) { // frem and drem destroy also right operand, so move it to a new register
right.set_destroys_register();
right.load_item();
} elseif (right.is_register()) {
right.load_item(); #ifndef _LP64
} elseif (must_load_right) {
right.load_item(); #endif// !LP64
} else {
right.dont_load_item();
}
LIR_Opr reg = rlock(x);
LIR_Opr tmp = LIR_OprFact::illegalOpr; if (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv) {
tmp = new_register(T_DOUBLE);
}
#ifdef _LP64 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { // frem and drem are implemented as a direct call into the runtime.
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
// for _ladd, _lmul, _lsub, _ldiv, _lrem void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { // long division is implemented as a direct call into the runtime
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
// the check for division by zero destroys the right operand
right.set_destroys_register();
BasicTypeList signature(2);
signature.append(T_LONG);
signature.append(T_LONG);
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
// check for division by zero (destroys registers of right operand!)
CodeEmitInfo* info = state_for(x);
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info));
address entry = NULL; switch (x->op()) { case Bytecodes::_lrem:
entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); break; // check if dividend is 0 is done elsewhere case Bytecodes::_ldiv:
entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); break; // check if dividend is 0 is done elsewhere default:
ShouldNotReachHere();
}
LIR_Opr result = rlock_result(x);
__ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
__ move(result_reg, result);
} elseif (x->op() == Bytecodes::_lmul) { // missing test if instr is commutative and if we should swap
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
// right register is destroyed by the long mul, so it must be // copied to a new register.
right.set_destroys_register();
left.load_item();
right.load_item();
LIR_Opr reg = FrameMap::long0_opr;
arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
} else { // missing test if instr is commutative and if we should swap
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
left.load_item(); // don't load constants to save register
right.load_nonconstant();
rlock_result(x);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
}
}
// for: _iadd, _imul, _isub, _idiv, _irem void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { // The requirements for division and modulo // input : rax,: dividend min_int // reg: divisor (may not be rax,/rdx) -1 // // output: rax,: quotient (= rax, idiv reg) min_int // rdx: remainder (= rax, irem reg) 0
// rax, and rdx will be destroyed
// Note: does this invalidate the spec ???
LIRItem right(x->y(), this);
LIRItem left(x->x() , this); // visit left second, so that the is_register test is valid
// call state_for before load_item_force because state_for may // force the evaluation of other instructions that are needed for // correct debug info. Otherwise the live range of the fix // register might be too long.
CodeEmitInfo* info = state_for(x);
if (!ImplicitDiv0Checks) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info)); // Idiv/irem cannot trap (passing info would generate an assertion).
info = NULL;
}
LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation if (x->op() == Bytecodes::_irem) {
__ irem(left.result(), right.result(), result_reg, tmp, info);
} elseif (x->op() == Bytecodes::_idiv) {
__ idiv(left.result(), right.result(), result_reg, tmp, info);
} else {
ShouldNotReachHere();
}
__ move(result_reg, result);
} else { // missing test if instr is commutative and if we should swap
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
LIRItem* left_arg = &left;
LIRItem* right_arg = &right; if (x->is_commutative() && left.is_stack() && right.is_register()) { // swap them if left is real stack (or cached) and right is real register(not cached)
left_arg = &right;
right_arg = &left;
}
left_arg->load_item();
// do not need to load right, as we can handle stack and constants if (x->op() == Bytecodes::_imul ) { // check if we can use shift instead bool use_constant = false; bool use_tmp = false; if (right_arg->is_constant()) {
jint iconst = right_arg->get_jint_constant(); if (iconst > 0 && iconst < max_jint) { if (is_power_of_2(iconst)) {
use_constant = true;
} elseif (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) {
use_constant = true;
use_tmp = true;
}
}
} if (use_constant) {
right_arg->dont_load_item();
} else {
right_arg->load_item();
}
LIR_Opr tmp = LIR_OprFact::illegalOpr; if (use_tmp) {
tmp = new_register(T_INT);
}
rlock_result(x);
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { // when an operand with use count 1 is the left operand, then it is // likely that no move for 2-operand-LIR-form is necessary if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands();
}
ValueTag tag = x->type()->tag();
assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); switch (tag) { case floatTag: case doubleTag: do_ArithmeticOp_FPU(x); return; case longTag: do_ArithmeticOp_Long(x); return; case intTag: do_ArithmeticOp_Int(x); return; default: ShouldNotReachHere(); return;
}
}
// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr void LIRGenerator::do_ShiftOp(ShiftOp* x) { // count must always be in rcx
LIRItem value(x->x(), this);
LIRItem count(x->y(), this);
ValueTag elemType = x->type()->tag(); bool must_load_count = !count.is_constant() || elemType == longTag; if (must_load_count) { // count for long must be in register
count.load_item_force(shiftCountOpr());
} else {
count.dont_load_item();
}
value.load_item();
LIR_Opr reg = rlock_result(x);
// _iand, _land, _ior, _lor, _ixor, _lxor void LIRGenerator::do_LogicOp(LogicOp* x) { // when an operand with use count 1 is the left operand, then it is // likely that no move for 2-operand-LIR-form is necessary if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands();
}
// The java calling convention will give us enough registers // so that on the stub side the args will be perfect already. // On the other slow/special case side we call C and the arg // positions are not similar enough to pick one as the best. // Also because the java calling convention is a "shifted" version // of the C convention we can process the java args trivially into C // args without worry of overwriting during the xfer
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); // Make all state_for calls early since they can emit code
LIR_Opr result = rlock_result(x); int flags = 0; switch (x->id()) { case vmIntrinsics::_updateCRC32: {
LIRItem crc(x->argument_at(0), this);
LIRItem val(x->argument_at(1), this); // val is destroyed by update_crc32
val.set_destroys_register();
crc.load_item();
val.load_item();
__ update_crc32(crc.result(), val.result(), result); break;
} case vmIntrinsics::_updateBytesCRC32: case vmIntrinsics::_updateByteBufferCRC32: { bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
void LIRGenerator::do_Convert(Convert* x) { #ifdef _LP64
LIRItem value(x->value(), this);
value.load_item();
LIR_Opr input = value.result();
LIR_Opr result = rlock(x);
__ convert(x->op(), input, result);
assert(result->is_virtual(), "result must be virtual register");
set_result(x, result); #else // flags that vary for the different operations and different SSE-settings bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false;
switch (x->op()) { case Bytecodes::_i2l: // fall through case Bytecodes::_l2i: // fall through case Bytecodes::_i2b: // fall through case Bytecodes::_i2c: // fall through case Bytecodes::_i2s: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break;
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIRItem length(x->length(), this); // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // and therefore provide the state before the parameters have been consumed
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
}
length.load_item_force(FrameMap::rbx_opr);
LIR_Opr len = length.result();
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); if (obj == ciEnv::unloaded_ciobjarrayklass()) {
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
}
klass2reg_with_patching(klass_reg, obj, patching_info);
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
Values* dims = x->dims(); int i = dims->length();
LIRItemList* items = new LIRItemList(i, i, NULL); while (i-- > 0) {
LIRItem* size = new LIRItem(dims->at(i), this);
items->at_put(i, size);
}
// Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so // clone all handlers (NOTE: Usually this is handled transparently // by the CodeEmitInfo cloning logic in CodeStub constructors but // is done explicitly here because a stub isn't being used).
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
}
CodeEmitInfo* info = state_for(x, x->state());
i = dims->length(); while (i-- > 0) {
LIRItem* size = items->at(i);
size->load_nonconstant();
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { // must do this before locking the destination register as an oop register, // and before the obj is loaded (the latter is for deoptimization)
patching_info = state_for(x, x->state_before());
}
obj.load_item();
// info for exceptions
CodeEmitInfo* info_for_exception =
(x->needs_exception_state() ? state_for(x) :
state_for(x, x->state_before(), true/*ignore_xhandler*/));
// result and test object may not be in same register
LIR_Opr reg = rlock_result(x);
CodeEmitInfo* patching_info = NULL; if ((!x->klass()->is_loaded() || PatchALot)) { // must do this before locking the destination register as an oop register
patching_info = state_for(x, x->state_before());
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr; if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
__ instanceof(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
}
if (tag == longTag) { // for longs, only conditions "eql", "neq", "lss", "geq" are valid; // mirror for other conditions if (cond == If::gtr || cond == If::leq) {
cond = Instruction::mirror(cond);
xin = &yitem;
yin = &xitem;
}
xin->set_destroys_register();
}
xin->load_item(); if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { // inline long zero
yin->dont_load_item();
} elseif (tag == longTag || tag == floatTag || tag == doubleTag) { // longs cannot handle constants at right side
yin->load_item();
} else {
yin->dont_load_item();
}
LIR_Opr left = xin->result();
LIR_Opr right = yin->result();
set_no_result(x);
// add safepoint before generating condition code so it can be recomputed if (x->is_safepoint()) { // increment backedge counter if needed
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
__ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
}
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
CodeEmitInfo* info) { if (address->type() == T_LONG) {
address = new LIR_Address(address->base(),
address->index(), address->scale(),
address->disp(), T_DOUBLE); // Transfer the value atomically by using FP moves. This means // the value has to be moved between CPU and FPU registers. It // always has to be moved through spill slot since there's no // quick way to pack the value into an SSE register.
LIR_Opr temp_double = new_register(T_DOUBLE);
LIR_Opr spill = new_register(T_LONG);
set_vreg_flag(spill, must_start_in_memory);
__ move(value, spill);
__ volatile_move(spill, temp_double, T_LONG);
__ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info);
} else {
__ store(value, address, info);
}
}
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) { if (address->type() == T_LONG) {
address = new LIR_Address(address->base(),
address->index(), address->scale(),
address->disp(), T_DOUBLE); // Transfer the value atomically by using FP moves. This means // the value has to be moved between CPU and FPU registers. In // SSE0 and SSE1 mode it has to be moved through spill slot but in // SSE2+ mode it can be moved directly.
LIR_Opr temp_double = new_register(T_DOUBLE);
__ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
__ volatile_move(temp_double, result, T_LONG); #ifndef _LP64 if (UseSSE < 2) { // no spill slot needed in SSE2 mode because xmm->cpu register move is possible
set_vreg_flag(result, must_start_in_memory);
} #endif// !LP64
} else {
__ load(address, result, info);
}
}
¤ Dauer der Verarbeitung: 0.22 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.