/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
xorptr(obj, mdo_addr);
testptr(obj, TypeEntries::type_klass_mask);
jccb(Assembler::zero, next); // klass seen before, nothing to // do. The unknown bit may have been // set already but no need to check.
testptr(obj, TypeEntries::type_unknown);
jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
cmpptr(mdo_addr, 0);
jccb(Assembler::equal, none);
cmpptr(mdo_addr, TypeEntries::null_seen);
jccb(Assembler::equal, none); // There is a chance that the checks above (re-reading profiling // data from memory) fail if another thread has just set the // profiling to this obj's klass
xorptr(obj, mdo_addr);
testptr(obj, TypeEntries::type_klass_mask);
jccb(Assembler::zero, next);
// different than before. Cannot keep accurate profile.
orptr(mdo_addr, TypeEntries::type_unknown);
jmpb(next);
bind(none); // first time here. Set profile type.
movptr(mdo_addr, obj);
if (MethodData::profile_arguments()) {
Label done; int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
addptr(mdp, off_to_args);
for (int i = 0; i < TypeProfileArgsLimit; i++) { if (i > 0 || MethodData::profile_return()) { // If return value type is profiled we may have no argument to profile
movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
subl(tmp, i*TypeStackSlotEntries::per_arg_count());
cmpl(tmp, TypeStackSlotEntries::per_arg_count());
jcc(Assembler::less, done);
}
movptr(tmp, Address(callee, Method::const_offset()));
load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset())); // stack offset o (zero based) from the start of the argument // list, for n arguments translates into offset n - o - 1 from // the end of the argument list
subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
subl(tmp, 1);
Address arg_addr = argument_address(tmp);
movptr(tmp, arg_addr);
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
addptr(mdp, to_add);
off_to_args += to_add;
}
if (MethodData::profile_return()) {
movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
}
bind(done);
if (MethodData::profile_return()) { // We're right after the type profile for the last // argument. tmp is the number of cells left in the // CallTypeData/VirtualCallTypeData to reach its end. Non null // if there's a return to profile.
assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
shll(tmp, log2i_exact((int)DataLayout::cell_size));
addptr(mdp, tmp);
}
movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), mdp);
} else {
assert(MethodData::profile_return(), "either profile call args or call ret");
update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
}
// mdp points right after the end of the // CallTypeData/VirtualCallTypeData, right after the cells for the // return value type if there's one
if (MethodData::profile_return_jsr292_only()) {
assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2");
// If we don't profile all invoke bytecodes we must make sure // it's a bytecode we indeed profile. We can't go back to the // beginning of the ProfileData we intend to update to check its // type because we're right after it and we don't known its // length
Label do_profile;
cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
jcc(Assembler::equal, do_profile);
cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
jcc(Assembler::equal, do_profile);
get_method(tmp);
cmpw(Address(tmp, Method::intrinsic_id_offset_in_bytes()), static_cast<int>(vmIntrinsics::_compiledLambdaForm));
jcc(Assembler::notEqual, profile_continue);
// Load the offset of the area within the MDO used for // parameters. If it's negative we're not profiling any parameters
movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
testl(tmp1, tmp1);
jcc(Assembler::negative, profile_continue);
// Compute a pointer to the area for parameters from the offset // and move the pointer to the slot for the last // parameters. Collect profiling from last parameter down. // mdo start + parameters offset + array length - 1
addptr(mdp, tmp1);
movptr(tmp1, Address(mdp, ArrayData::array_len_offset()));
decrement(tmp1, TypeStackSlotEntries::per_arg_count());
// load offset on the stack from the slot for this parameter
movptr(tmp2, arg_off);
negptr(tmp2); // read the parameter from the local area
movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale()));
// profile the parameter
profile_obj_type(tmp2, arg_type);
// go to next parameter
decrement(tmp1, TypeStackSlotEntries::per_arg_count());
jcc(Assembler::positive, loop);
bind(profile_continue);
}
}
void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, int number_of_arguments) { // interpreter specific // // Note: No need to save/restore bcp & locals registers // since these are callee saved registers and no blocking/ // GC can happen in leaf calls. // Further Note: DO NOT save/restore bcp/locals. If a caller has // already saved them so that it can use rsi/rdi as temporaries // then a save/restore here will DESTROY the copy the caller // saved! There used to be a save_bcp() that only happened in // the ASSERT path (no restore_bcp). Which caused bizarre failures // when jvm built with ASSERTs. #ifdef ASSERT
{
Label L;
cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:" " last_sp != NULL");
bind(L);
} #endif // super call
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); // interpreter specific // LP64: Used to ASSERT that r13/r14 were equal to frame's bcp/locals // but since they may not have been saved (and we don't want to // save them here (see note above) the assert is invalid.
}
void InterpreterMacroAssembler::call_VM_base(Register oop_result, Register java_thread, Register last_java_sp,
address entry_point, int number_of_arguments, bool check_exceptions) { // interpreter specific // // Note: Could avoid restoring locals ptr (callee saved) - however doesn't // really make a difference for these runtime calls, since they are // slow anyway. Btw., bcp must be saved/restored since it may change // due to GC.
NOT_LP64(assert(java_thread == noreg , "not expecting a precomputed java thread");)
save_bcp(); #ifdef ASSERT
{
Label L;
cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_base:" " last_sp != NULL");
bind(L);
} #endif/* ASSERT */ // super call
MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
entry_point, number_of_arguments,
check_exceptions); // interpreter specific
restore_bcp();
restore_locals();
}
void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { if (JvmtiExport::can_pop_frame()) {
Label L; // Initiate popframe handling only if it is not already being // processed. If the flag has the popframe_processing bit set, it // means that this code is called *during* popframe handling - we // don't want to reenter. // This method is only called just after the call into the vm in // call_VM_base, so the arg registers are available. Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit
LP64_ONLY(c_rarg0);
movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset()));
testl(pop_cond, JavaThread::popframe_pending_bit);
jcc(Assembler::zero, L);
testl(pop_cond, JavaThread::popframe_processing_bit);
jcc(Assembler::notZero, L); // Call Interpreter::remove_activation_preserving_args_entry() to get the // address of the same-named entrypoint in the generated interpreter code.
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
jmp(rax);
bind(L);
NOT_LP64(get_thread(java_thread);)
}
}
void InterpreterMacroAssembler::load_earlyret_value(TosState state) { Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
NOT_LP64(get_thread(thread);)
movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset()); const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset()); const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset()); #ifdef _LP64 switch (state) { case atos: movptr(rax, oop_addr);
movptr(oop_addr, NULL_WORD);
interp_verify_oop(rax, state); break; case ltos: movptr(rax, val_addr); break; case btos: // fall through case ztos: // fall through case ctos: // fall through case stos: // fall through case itos: movl(rax, val_addr); break; case ftos: load_float(val_addr); break; case dtos: load_double(val_addr); break; case vtos: /* nothing to do */ break; default : ShouldNotReachHere();
} // Clean up tos value in the thread object
movl(tos_addr, ilgl);
movl(val_addr, NULL_WORD); #else const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset()
+ in_ByteSize(wordSize)); switch (state) { case atos: movptr(rax, oop_addr);
movptr(oop_addr, NULL_WORD);
interp_verify_oop(rax, state); break; case ltos:
movl(rdx, val_addr1); // fall through case btos: // fall through case ztos: // fall through case ctos: // fall through case stos: // fall through case itos: movl(rax, val_addr); break; case ftos: load_float(val_addr); break; case dtos: load_double(val_addr); break; case vtos: /* nothing to do */ break; default : ShouldNotReachHere();
} #endif// _LP64 // Clean up tos value in the thread object
movl(tos_addr, ilgl);
movptr(val_addr, NULL_WORD);
NOT_LP64(movptr(val_addr1, NULL_WORD);)
}
// Initiate earlyret handling only if it is not already being processed. // If the flag has the earlyret_processing bit set, it means that this code // is called *during* earlyret handling - we don't want to reenter.
movl(tmp, Address(tmp, JvmtiThreadState::earlyret_state_offset()));
cmpl(tmp, JvmtiThreadState::earlyret_pending);
jcc(Assembler::notEqual, L);
// Call Interpreter::remove_activation_early_entry() to get the address of the // same-named entrypoint in the generated interpreter code.
NOT_LP64(get_thread(java_thread);)
movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset())); #ifdef _LP64
movl(tmp, Address(tmp, JvmtiThreadState::earlyret_tos_offset()));
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), tmp); #else
pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset()));
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1); #endif// _LP64
jmp(rax);
bind(L);
NOT_LP64(get_thread(java_thread);)
}
}
void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
load_unsigned_short(reg, Address(_bcp_register, bcp_offset));
bswapl(reg);
shrl(reg, 16);
}
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, int bcp_offset,
size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); if (index_size == sizeof(u2)) {
load_unsigned_short(index, Address(_bcp_register, bcp_offset));
} elseif (index_size == sizeof(u4)) {
movl(index, Address(_bcp_register, bcp_offset)); // Check if the secondary index definition is still ~x, otherwise // we have to change the following assembler code to calculate the // plain index.
assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
notl(index); // convert to plain index
} elseif (index_size == sizeof(u1)) {
load_unsigned_byte(index, Address(_bcp_register, bcp_offset));
} else {
ShouldNotReachHere();
}
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset,
size_t index_size) {
assert_different_registers(cache, index);
get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); // convert from field index to ConstantPoolCacheEntry index
assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line");
shll(index, 2);
}
void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset,
size_t index_size) {
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); // We use a 32-bit load here since the layout of 64-bit words on // little-endian machines allow us that.
movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); constint shift_count = (1 + byte_no) * BitsPerByte;
assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), "correct shift count");
shrl(bytecode, shift_count);
assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask);
}
get_cache_index_at_bcp(tmp, bcp_offset, index_size);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); // convert from field index to ConstantPoolCacheEntry index // and from word offset to byte offset
assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
shll(tmp, 2 + LogBytesPerWord);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); // skip past the header
addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
addptr(cache, tmp); // construct pointer to cache entry
}
void InterpreterMacroAssembler::pop(TosState state) { switch (state) { case atos: pop_ptr(); break; case btos: case ztos: case ctos: case stos: case itos: pop_i(); break; case ltos: pop_l(); break; case ftos: pop_f(xmm0); break; case dtos: pop_d(xmm0); break; case vtos: /* nothing to do */ break; default: ShouldNotReachHere();
}
interp_verify_oop(rax, state);
}
void InterpreterMacroAssembler::push(TosState state) {
interp_verify_oop(rax, state); switch (state) { case atos: push_ptr(); break; case btos: case ztos: case ctos: case stos: case itos: push_i(); break; case ltos: push_l(); break; case ftos: push_f(xmm0); break; case dtos: push_d(xmm0); break; case vtos: /* nothing to do */ break; default : ShouldNotReachHere();
}
} #else void InterpreterMacroAssembler::pop_i(Register r) {
pop(r);
}
void InterpreterMacroAssembler::pop(TosState state) { switch (state) { case atos: pop_ptr(rax); break; case btos: // fall through case ztos: // fall through case ctos: // fall through case stos: // fall through case itos: pop_i(rax); break; case ltos: pop_l(rax, rdx); break; case ftos: if (UseSSE >= 1) {
pop_f(xmm0);
} else {
pop_f();
} break; case dtos: if (UseSSE >= 2) {
pop_d(xmm0);
} else {
pop_d();
} break; case vtos: /* nothing to do */ break; default : ShouldNotReachHere();
}
interp_verify_oop(rax, state);
}
void InterpreterMacroAssembler::push_f() { // Do not schedule for no AGI! Never write beyond rsp!
subptr(rsp, 1 * wordSize);
fstp_s(Address(rsp, 0));
}
void InterpreterMacroAssembler::push_d() { // Do not schedule for no AGI! Never write beyond rsp!
subptr(rsp, 2 * wordSize);
fstp_d(Address(rsp, 0));
}
void InterpreterMacroAssembler::push(TosState state) {
interp_verify_oop(rax, state); switch (state) { case atos: push_ptr(rax); break; case btos: // fall through case ztos: // fall through case ctos: // fall through case stos: // fall through case itos: push_i(rax); break; case ltos: push_l(rax, rdx); break; case ftos: if (UseSSE >= 1) {
push_f(xmm0);
} else {
push_f();
} break; case dtos: if (UseSSE >= 2) {
push_d(xmm0);
} else {
push_d();
} break; case vtos: /* nothing to do */ break; default : ShouldNotReachHere();
}
} #endif// _LP64
// Helpers for swap and dup void InterpreterMacroAssembler::load_ptr(int n, Register val) {
movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
}
void InterpreterMacroAssembler::store_ptr(int n, Register val) {
movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
}
void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { // set sender sp
lea(_bcp_register, Address(rsp, wordSize)); // record last_sp
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), _bcp_register);
}
// Jump to from_interpreted entry of a call unless single stepping is possible // in this thread in which case we must call the i2i entry void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
prepare_to_jump_from_interpreted();
if (JvmtiExport::can_post_interpreter_events()) {
Label run_compiled_code; // JVMTI events, such as single-stepping, are implemented partly by avoiding running // compiled code in threads for which the event is enabled. Check here for // interp_only_mode if these events CAN be enabled. // interp_only is an int, on little endian it is sufficient to test the byte only // Is a cmpl faster?
LP64_ONLY(temp = r15_thread;)
NOT_LP64(get_thread(temp);)
cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
jccb(Assembler::zero, run_compiled_code);
jmp(Address(method, Method::interpreter_entry_offset()));
bind(run_compiled_code);
}
// The following two routines provide a hook so that an implementation // can schedule the dispatch in two parts. x86 does not do this. void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { // Nothing x86 specific to be done here
}
void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
dispatch_next(state, step);
}
bind(notChar); // cmpl(rcx, T_SHORT); // all that's left // jcc(Assembler::notEqual, done);
LP64_ONLY(movswl(result, result);)
NOT_LP64(shll(result, 16);) // truncate upper 16 bits
NOT_LP64(sarl(result, 16);) // and sign-extend short
// Nothing to do for T_INT
bind(done);
}
// remove activation // // Apply stack watermark barrier. // Unlock the receiver if this is a synchronized method. // Unlock any Java monitors from synchronized blocks. // Remove the activation from the stack. // // If there are locked Java monitors // If throw_monitor_exception // throws IllegalMonitorStateException // Else if install_monitor_exception // installs IllegalMonitorStateException // Else // no error processing void InterpreterMacroAssembler::remove_activation(
TosState state, Register ret_addr, bool throw_monitor_exception, bool install_monitor_exception, bool notify_jvmdi) { // Note: Registers rdx xmm0 may be in use for the // result check if synchronized method
Label unlocked, unlock, no_unlock;
constRegister rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); constRegister robj = LP64_ONLY(c_rarg1) NOT_LP64(rdx); constRegister rmon = LP64_ONLY(c_rarg1) NOT_LP64(rcx); // monitor pointers need different register // because rdx may have the result in it
NOT_LP64(get_thread(rthread);)
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily, // that would normally not be safe to use. Such bad returns into unsafe territory of // the stack, will call InterpreterRuntime::at_unwind.
Label slow_path;
Label fast_path;
safepoint_poll(slow_path, rthread, true/* at_return */, false /* in_nmethod */);
jmp(fast_path);
bind(slow_path);
push(state);
set_last_Java_frame(rthread, noreg, rbp, (address)pc(), rscratch1);
super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
NOT_LP64(get_thread(rthread);) // call_VM clobbered it, restore
reset_last_Java_frame(rthread, true);
pop(state);
bind(fast_path);
// get the value of _do_not_unlock_if_synchronized into rdx const Address do_not_unlock_if_synchronized(rthread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
movbool(rbx, do_not_unlock_if_synchronized);
movbool(do_not_unlock_if_synchronized, false); // reset the flag
// Don't unlock anything if the _do_not_unlock_if_synchronized flag // is set.
testbool(rbx);
jcc(Assembler::notZero, no_unlock);
// unlock monitor
push(state); // save result
// BasicObjectLock will be first in list, since this is a // synchronized method. However, need to check that the object has // not been unlocked by an explicit monitorexit bytecode. const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
wordSize - (int) sizeof(BasicObjectLock)); // We use c_rarg1/rdx so that if we go slow path it will be the correct // register for unlock_object to pass to VM directly
lea(robj, monitor); // address of first monitor
pop(state); if (throw_monitor_exception) { // Entry already unlocked, need to throw exception
NOT_LP64(empty_FPU_stack();) // remove possible return value from FPU-stack, otherwise stack could overflow
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_illegal_monitor_state_exception));
should_not_reach_here();
} else { // Monitor already unlocked during a stack unroll. If requested, // install an illegal_monitor_state_exception. Continue with // stack unrolling. if (install_monitor_exception) {
NOT_LP64(empty_FPU_stack();)
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::new_illegal_monitor_state_exception));
}
jmp(unlocked);
}
bind(unlock);
unlock_object(robj);
pop(state);
// Check that for block-structured locking (i.e., that all locked // objects has been unlocked)
bind(unlocked);
bind(restart); // We use c_rarg1 so that if we go slow path it will be the correct // register for unlock_object to pass to VM directly
movptr(rmon, monitor_block_top); // points to current entry, starting // with top-most entry
lea(rbx, monitor_block_bot); // points to word before bottom of // monitor block
jmp(entry);
// Entry already locked, need to throw exception
bind(exception);
if (throw_monitor_exception) { // Throw exception
NOT_LP64(empty_FPU_stack();)
MacroAssembler::call_VM(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::
throw_illegal_monitor_state_exception));
should_not_reach_here();
} else { // Stack unrolling. Unlock object and install illegal_monitor_exception. // Unlock does not block, so don't have to worry about the frame. // We don't have to preserve c_rarg1 since we are going to throw an exception.
push(state);
mov(robj, rmon); // nop if robj and rmon are the same
unlock_object(robj);
pop(state);
if (install_monitor_exception) {
NOT_LP64(empty_FPU_stack();)
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::
new_illegal_monitor_state_exception));
}
jmp(restart);
}
bind(loop); // check if current entry is used
cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
jcc(Assembler::notEqual, exception);
addptr(rmon, entry_size); // otherwise advance to next entry
bind(entry);
cmpptr(rmon, rbx); // check if bottom reached
jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
}
bind(no_unlock);
// jvmti support if (notify_jvmdi) {
notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
} else {
notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
}
// remove activation // get sender sp
movptr(rbx,
Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); if (StackReservedPages > 0) { // testing if reserved zone needs to be re-enabled Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
Label no_reserved_zone_enabling;
// Lock object // // Args: // rdx, c_rarg1: BasicObjectLock to be used for locking // // Kills: // rax, rbx void InterpreterMacroAssembler::lock_object(Register lock_reg) {
assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), "The argument is only for looks. It must be c_rarg1");
// Fast check for recursive lock. // // Can apply the optimization only if this is a stack lock // allocated in this thread. For efficiency, we can focus on // recently allocated stack locks (instead of reading the stack // base and checking whether 'mark' points inside the current // thread stack): // 1) (mark & zero_bits) == 0, and // 2) rsp <= mark < mark + os::pagesize() // // Warning: rsp + os::pagesize can overflow the stack base. We must // neither apply the optimization for an inflated lock allocated // just above the thread stack (this is why condition 1 matters) // nor apply the optimization if the stack lock is inside the stack // of another thread. The latter is avoided even in case of overflow // because we have guard pages at the end of all stacks. Hence, if // we go over the stack base and hit the stack of another thread, // this should not be in a writeable area that could contain a // stack lock allocated by that thread. As a consequence, a stack // lock less than page size away from rsp is guaranteed to be // owned by the current thread. // // These 3 tests can be done by evaluating the following // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())), // assuming both stack pointer and pagesize have their // least significant bits clear. // NOTE: the mark is in swap_reg %rax as the result of cmpxchg
subptr(swap_reg, rsp);
andptr(swap_reg, zero_bits - os::vm_page_size());
// Save the test result, for recursive case, the result is zero
movptr(Address(lock_reg, mark_offset), swap_reg);
jcc(Assembler::notZero, slow_case);
// Call the runtime routine for slow case
call_VM(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
lock_reg);
bind(done);
}
}
// Unlocks an object. Used in monitorexit bytecode and // remove_activation. Throws an IllegalMonitorException if object is // not locked by current thread. // // Args: // rdx, c_rarg1: BasicObjectLock for lock // // Kills: // rax // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) // rscratch1 (scratch reg) // rax, rbx, rcx, rdx void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx), "The argument is only for looks. It must be c_rarg1");
constRegister swap_reg = rax; // Must use rax for cmpxchg instruction constRegister header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark constRegister obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
save_bcp(); // Save in case of exception
// Convert from BasicObjectLock structure to object and BasicLock // structure Store the BasicLock address into %rax
lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
// Load oop into obj_reg(%c_rarg3)
movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
// Set the method data pointer for the current bcp. void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
assert(ProfileInterpreter, "must be profiling interpreter");
Label set_mdp;
push(rax);
push(rbx);
get_method(rbx); // Test MDO to avoid the call if it is NULL.
movptr(rax, Address(rbx, in_bytes(Method::method_data_offset())));
testptr(rax, rax);
jcc(Assembler::zero, set_mdp); // rbx: method // _bcp_register: bcp
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, _bcp_register); // rax: mdi // mdo is guaranteed to be non-zero here, we checked for it before the call.
movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset())));
addptr(rbx, in_bytes(MethodData::data_offset()));
addptr(rax, rbx);
bind(set_mdp);
movptr(Address(rbp, frame::interpreter_frame_mdp_offset * wordSize), rax);
pop(rbx);
pop(rax);
}
// If the mdp is valid, it will point to a DataLayout header which is // consistent with the bcp. The converse is highly probable also.
load_unsigned_short(arg2_reg,
Address(arg3_reg, in_bytes(DataLayout::bci_offset())));
addptr(arg2_reg, Address(rbx, Method::const_offset()));
lea(arg2_reg, Address(arg2_reg, ConstMethod::codes_offset()));
cmpptr(arg2_reg, _bcp_register);
jcc(Assembler::equal, verify_continue); // rbx: method // _bcp_register: bcp // c_rarg3: mdp
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
rbx, _bcp_register, arg3_reg);
bind(verify_continue);
pop(arg2_reg);
pop(arg3_reg);
pop(rbx);
pop(rax); #endif// ASSERT
}
void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) {
assert(ProfileInterpreter, "must be profiling interpreter");
Address data(mdp_in, constant);
movptr(data, value);
}
void InterpreterMacroAssembler::increment_mdp_data_at(Address data, bool decrement) {
assert(ProfileInterpreter, "must be profiling interpreter"); // %%% this does 64bit counters at best it is wasting space // at worst it is a rare bug when counters overflow
if (decrement) { // Decrement the register. Set condition codes.
addptr(data, -DataLayout::counter_increment); // If the decrement causes the counter to overflow, stay negative
Label L;
jcc(Assembler::negative, L);
addptr(data, DataLayout::counter_increment);
bind(L);
} else {
assert(DataLayout::counter_increment == 1, "flow-free idiom only works with 1"); // Increment the register. Set carry flag.
addptr(data, DataLayout::counter_increment); // If the increment causes the counter to overflow, pull back by 1.
sbbptr(data, 0);
}
}
void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, int flag_byte_constant) {
assert(ProfileInterpreter, "must be profiling interpreter"); int header_offset = in_bytes(DataLayout::flags_offset()); int header_bits = flag_byte_constant; // Set the flag
orb(Address(mdp_in, header_offset), header_bits);
}
void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, int offset, Register value, Register test_value_out,
Label& not_equal_continue) {
assert(ProfileInterpreter, "must be profiling interpreter"); if (test_value_out == noreg) {
cmpptr(value, Address(mdp_in, offset));
} else { // Put the test value into a register, so caller can use it:
movptr(test_value_out, Address(mdp_in, offset));
cmpptr(test_value_out, value);
}
jcc(Assembler::notEqual, not_equal_continue);
}
// If no method data exists, go to profile_continue. // Otherwise, assign to mdp
test_method_data_pointer(mdp, profile_continue);
// We are taking a branch. Increment the taken count. // We inline increment_mdp_data_at to return bumped_count in a register //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
Address data(mdp, in_bytes(JumpData::taken_offset()));
movptr(bumped_count, data);
assert(DataLayout::counter_increment == 1, "flow-free idiom only works with 1");
addptr(bumped_count, DataLayout::counter_increment);
sbbptr(bumped_count, 0);
movptr(data, bumped_count); // Store back out
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
bind(profile_continue);
}
}
void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { if (ProfileInterpreter) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
// We are taking a branch. Increment the not taken count.
increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
// The method data pointer needs to be updated to correspond to // the next bytecode
update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
bind(profile_continue);
}
}
void InterpreterMacroAssembler::profile_call(Register mdp) { if (ProfileInterpreter) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
// We are making a call. Increment the count.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
bind(profile_continue);
}
}
void InterpreterMacroAssembler::profile_final_call(Register mdp) { if (ProfileInterpreter) {
Label profile_continue;
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
// We are making a call. Increment the count.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp,
in_bytes(VirtualCallData::
virtual_call_data_size()));
bind(profile_continue);
}
}
// If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue);
Label skip_receiver_profile; if (receiver_can_be_null) {
Label not_null;
testptr(receiver, receiver);
jccb(Assembler::notZero, not_null); // We are making a call. Increment the count for null receiver.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
jmp(skip_receiver_profile);
bind(not_null);
}
// Record the receiver type.
record_klass_in_profile(receiver, mdp, reg2, true);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
bind(profile_continue);
}
}
// This routine creates a state machine for updating the multi-row // type profile at a virtual call site (or other type-sensitive bytecode). // The machine visits each row (of receiver/count) until the receiver type // is found, or until it runs out of rows. At the same time, it remembers // the location of the first empty row. (An empty row records null for its // receiver, and can be allocated for a newly-observed receiver type.) // Because there are two degrees of freedom in the state, a simple linear // search will not work; it must be a decision tree. Hence this helper // function is recursive, to generate the required tree structured code. // It's the interpreter, so we are trading off code space for speed. // See below for example code. void InterpreterMacroAssembler::record_klass_in_profile_helper( Register receiver, Register mdp, Register reg2, int start_row,
Label& done, bool is_virtual_call) { if (TypeProfileWidth == 0) { if (is_virtual_call) {
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
} #if INCLUDE_JVMCI elseif (EnableJVMCI) {
increment_mdp_data_at(mdp, in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()));
} #endif// INCLUDE_JVMCI
} else { int non_profiled_offset = -1; if (is_virtual_call) {
non_profiled_offset = in_bytes(CounterData::count_offset());
} #if INCLUDE_JVMCI elseif (EnableJVMCI) {
non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset());
} #endif// INCLUDE_JVMCI
void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Register mdp, Register reg2, int start_row, Label& done, int total_rows,
OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, int non_profiled_offset) { int last_row = total_rows - 1;
assert(start_row <= last_row, "must be work left to do"); // Test this row for both the item and for null. // Take any of three different outcomes: // 1. found item => increment count and goto done // 2. found null => keep looking for case 1, maybe allocate this cell // 3. found something else => keep looking for cases 1 and 2 // Case 3 is handled by a recursive call. for (int row = start_row; row <= last_row; row++) {
Label next_test; bool test_for_null_also = (row == start_row);
// See if the item is item[n]. int item_offset = in_bytes(item_offset_fn(row));
test_mdp_data_at(mdp, item_offset, item,
(test_for_null_also ? reg2 : noreg),
next_test); // (Reg2 now contains the item from the CallData.)
// The item is item[n]. Increment count[n]. int count_offset = in_bytes(item_count_offset_fn(row));
increment_mdp_data_at(mdp, count_offset);
jmp(done);
bind(next_test);
if (test_for_null_also) { // Failed the equality check on item[n]... Test for null.
testptr(reg2, reg2); if (start_row == last_row) { // The only thing left to do is handle the null case. if (non_profiled_offset >= 0) {
Label found_null;
jccb(Assembler::zero, found_null);
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.23 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.