/* * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Most of the runtime stubs have this simple frame layout. // This class exists to make the layout shared in one place. // Offsets are for compiler stack slots, which are jints. enum layout { // The frame sender code expects that fp will be in the "natural" place and // will override any oopMap setting for it. We must therefore force the layout // so that it agrees with the frame sender code. // we don't expect any arg reg save area so riscv asserts that // frame::arg_reg_save_area_bytes == 0
fp_off = 0, fp_off2,
return_off, return_off2,
framesize
};
};
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { int vector_size_in_bytes = 0; int vector_size_in_slots = 0; #ifdef COMPILER2 if (_save_vectors) {
vector_size_in_bytes += Matcher::scalable_vector_reg_size(T_BYTE);
vector_size_in_slots += Matcher::scalable_vector_reg_size(T_INT);
} #endif
int frame_size_in_bytes = align_up(additional_frame_words * wordSize + ra_offset_in_bytes() + wordSize, 16); // OopMap frame size is in compiler stack slots (jint's) not bytes or words int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; // The caller will allocate additional_frame_words int additional_frame_slots = additional_frame_words * wordSize / BytesPerInt; // CodeBlob frame size is in words. int frame_size_in_words = frame_size_in_bytes / wordSize;
*total_frame_words = frame_size_in_words;
// Save Integer, Float and Vector registers.
__ enter();
__ push_CPU_state(_save_vectors, vector_size_in_bytes);
// Set an oopmap for the call site. This oopmap will map all // oop-registers and debug-info registers as callee-saved. This // will allow deoptimization at this safepoint to find all possible // debug-info recordings, as well as let GC find all oops.
OopMapSet *oop_maps = new OopMapSet();
OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
assert_cond(oop_maps != NULL && oop_map != NULL);
int sp_offset_in_slots = 0; int step_in_slots = 0; if (_save_vectors) {
step_in_slots = vector_size_in_slots; for (int i = 0; i < VectorRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
VectorRegister r = as_VectorRegister(i);
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg());
}
}
step_in_slots = FloatRegister::max_slots_per_register; for (int i = 0; i < FloatRegister::number_of_registers; i++, sp_offset_in_slots += step_in_slots) {
FloatRegister r = as_FloatRegister(i);
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots), r->as_VMReg());
}
step_in_slots = Register::max_slots_per_register; // skip the slot reserved for alignment, see MacroAssembler::push_reg; // also skip x5 ~ x6 on the stack because they are caller-saved registers.
sp_offset_in_slots += Register::max_slots_per_register * 3; // besides, we ignore x0 ~ x4 because push_CPU_state won't push them on the stack. for (int i = 7; i < Register::number_of_registers; i++, sp_offset_in_slots += step_in_slots) { Register r = as_Register(i); if (r != xthread) {
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset_in_slots + additional_frame_slots), r->as_VMReg());
}
}
return oop_map;
}
void RegisterSaver::restore_live_registers(MacroAssembler* masm) { #ifdef COMPILER2
__ pop_CPU_state(_save_vectors, Matcher::scalable_vector_reg_size(T_BYTE)); #else #if !INCLUDE_JVMCI
assert(!_save_vectors, "vectors are generated only by C2 and JVMCI"); #endif
__ pop_CPU_state(_save_vectors); #endif
__ leave();
}
// Is vector's size (in bytes) bigger than a size saved by default? // riscv does not ovlerlay the floating-point registers on vector registers like aarch64. bool SharedRuntime::is_wide_vector(int size) { return UseRVV;
}
// --------------------------------------------------------------------------- // Read the array of BasicTypes from a signature, and compute where the // arguments should go. Values in the VMRegPair regs array refer to 4-byte // quantities. Values less than VMRegImpl::stack0 are registers, those above // refer to 4-byte stack slots. All stack slots are based off of the stack pointer // as framesizes are fixed. // VMRegImpl::stack0 refers to the first slot 0(sp). // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. // Register up to Register::number_of_registers) are the 64-bit // integer registers.
// Note: the INPUTS in sig_bt are in units of Java argument words, // which are 64-bit. The OUTPUTS are in 32-bit units.
// The Java calling convention is a "shifted" version of the C ABI. // By skipping the first C ABI register we can call non-static jni // methods with small numbers of arguments without having to shuffle // the arguments at all. Since we control the java ABI we ought to at // least get some advantage out of it.
int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
VMRegPair *regs, int total_args_passed) { // Create the mapping between argument positions and // registers. staticconstRegister INT_ArgReg[Argument::n_int_register_parameters_j] = {
j_rarg0, j_rarg1, j_rarg2, j_rarg3,
j_rarg4, j_rarg5, j_rarg6, j_rarg7
}; staticconst FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
j_farg0, j_farg1, j_farg2, j_farg3,
j_farg4, j_farg5, j_farg6, j_farg7
};
uint int_args = 0;
uint fp_args = 0;
uint stk_args = 0; // inc by 2 each time
for (int i = 0; i < total_args_passed; i++) { switch (sig_bt[i]) { case T_BOOLEAN: // fall through case T_CHAR: // fall through case T_BYTE: // fall through case T_SHORT: // fall through case T_INT: if (int_args < Argument::n_int_register_parameters_j) {
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
} else {
regs[i].set1(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
} break; case T_VOID: // halves of T_LONG or T_DOUBLE
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
regs[i].set_bad(); break; case T_LONG: // fall through
assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); case T_OBJECT: // fall through case T_ARRAY: // fall through case T_ADDRESS: if (int_args < Argument::n_int_register_parameters_j) {
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
} else {
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
} break; case T_FLOAT: if (fp_args < Argument::n_float_register_parameters_j) {
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
} else {
regs[i].set1(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
} break; case T_DOUBLE:
assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); if (fp_args < Argument::n_float_register_parameters_j) {
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
} else {
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
} break; default:
ShouldNotReachHere();
}
}
return align_up(stk_args, 2);
}
// Patch the callers callsite with entry to compiled code if it exists. staticvoid patch_callers_callsite(MacroAssembler *masm) {
Label L;
__ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
__ beqz(t0, L);
__ enter();
__ push_CPU_state();
// VM needs caller's callsite // VM needs target method // This needs to be a long call since we will relocate this adapter to // the codeBuffer and it may not reach
staticvoid gen_c2i_adapter(MacroAssembler *masm, int total_args_passed, int comp_args_on_stack, const BasicType *sig_bt, const VMRegPair *regs,
Label& skip_fixup) { // Before we get into the guts of the C2I adapter, see if we should be here // at all. We've come from compiled code and are attempting to jump to the // interpreter, which means the caller made a static call to get here // (vcalls always get a compiled target if there is one). Check for a // compiled target. If there is one, we need to patch the caller's call.
patch_callers_callsite(masm);
__ bind(skip_fixup);
int words_pushed = 0;
// Since all args are passed on the stack, total_args_passed * // Interpreter::stackElementSize is the space we need.
int extraspace = total_args_passed * Interpreter::stackElementSize;
__ mv(x19_sender_sp, sp);
// stack is aligned, keep it that way
extraspace = align_up(extraspace, 2 * wordSize);
if (extraspace) {
__ sub(sp, sp, extraspace);
}
// Now write the args into the outgoing interpreter space for (int i = 0; i < total_args_passed; i++) { if (sig_bt[i] == T_VOID) {
assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half"); continue;
}
// offset to start parameters int st_off = (total_args_passed - i - 1) * Interpreter::stackElementSize; int next_off = st_off - Interpreter::stackElementSize;
// Say 4 args: // i st_off // 0 32 T_LONG // 1 24 T_VOID // 2 16 T_OBJECT // 3 8 T_BOOL // - 0 return address // // However to make thing extra confusing. Because we can fit a Java long/double in // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter // leaves one slot empty and only stores to a single slot. In this case the // slot that is occupied is the T_VOID slot. See I said it was confusing.
VMReg r_1 = regs[i].first();
VMReg r_2 = regs[i].second(); if (!r_1->is_valid()) {
assert(!r_2->is_valid(), ""); continue;
} if (r_1->is_stack()) { // memory to memory use t0 int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size
+ extraspace
+ words_pushed * wordSize); if (!r_2->is_valid()) {
__ lwu(t0, Address(sp, ld_off));
__ sd(t0, Address(sp, st_off), /*temp register*/esp);
} else {
__ ld(t0, Address(sp, ld_off), /*temp register*/esp);
// Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG // T_DOUBLE and T_LONG use two slots in the interpreter if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { // ld_off == LSW, ld_off+wordSize == MSW // st_off == MSW, next_off == LSW
__ sd(t0, Address(sp, next_off), /*temp register*/esp); #ifdef ASSERT // Overwrite the unused slot with known junk
__ mv(t0, 0xdeadffffdeadaaaaul);
__ sd(t0, Address(sp, st_off), /*temp register*/esp); #endif/* ASSERT */
} else {
__ sd(t0, Address(sp, st_off), /*temp register*/esp);
}
}
} elseif (r_1->is_Register()) { Register r = r_1->as_Register(); if (!r_2->is_valid()) { // must be only an int (or less ) so move only 32bits to slot
__ sd(r, Address(sp, st_off));
} else { // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG // T_DOUBLE and T_LONG use two slots in the interpreter if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { // long/double in gpr #ifdef ASSERT // Overwrite the unused slot with known junk
__ mv(t0, 0xdeadffffdeadaaabul);
__ sd(t0, Address(sp, st_off), /*temp register*/esp); #endif/* ASSERT */
__ sd(r, Address(sp, next_off));
} else {
__ sd(r, Address(sp, st_off));
}
}
} else {
assert(r_1->is_FloatRegister(), ""); if (!r_2->is_valid()) { // only a float use just part of the slot
__ fsw(r_1->as_FloatRegister(), Address(sp, st_off));
} else { #ifdef ASSERT // Overwrite the unused slot with known junk
__ mv(t0, 0xdeadffffdeadaaacul);
__ sd(t0, Address(sp, st_off), /*temp register*/esp); #endif/* ASSERT */
__ fsd(r_1->as_FloatRegister(), Address(sp, next_off));
}
}
}
void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, int total_args_passed, int comp_args_on_stack, const BasicType *sig_bt, const VMRegPair *regs) { // Note: x19_sender_sp contains the senderSP on entry. We must // preserve it since we may do a i2c -> c2i transition if we lose a // race where compiled code goes non-entrant while we get args // ready.
// Cut-out for having no stack args. int comp_words_on_stack = align_up(comp_args_on_stack * VMRegImpl::stack_slot_size, wordSize) >> LogBytesPerWord; if (comp_args_on_stack != 0) {
__ sub(t0, sp, comp_words_on_stack * wordSize);
__ andi(sp, t0, -16);
}
// Will jump to the compiled code just as if compiled code was doing it. // Pre-load the register-jump target early, to schedule it better.
__ ld(t1, Address(xmethod, in_bytes(Method::from_compiled_offset())));
#if INCLUDE_JVMCI if (EnableJVMCI) { // check if this call should be routed towards a specific entry point
__ ld(t0, Address(xthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
Label no_alternative_target;
__ beqz(t0, no_alternative_target);
__ mv(t1, t0);
__ sd(zr, Address(xthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
__ bind(no_alternative_target);
} #endif// INCLUDE_JVMCI
// Now generate the shuffle code. for (int i = 0; i < total_args_passed; i++) { if (sig_bt[i] == T_VOID) {
assert(i > 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "missing half"); continue;
}
// Pick up 0, 1 or 2 words from SP+offset.
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?"); // Load in argument order going down. int ld_off = (total_args_passed - i - 1) * Interpreter::stackElementSize; // Point to interpreter value (vs. tag) int next_off = ld_off - Interpreter::stackElementSize;
VMReg r_1 = regs[i].first();
VMReg r_2 = regs[i].second(); if (!r_1->is_valid()) {
assert(!r_2->is_valid(), ""); continue;
} if (r_1->is_stack()) { // Convert stack slot to an SP offset (+ wordSize to account for return address ) int st_off = regs[i].first()->reg2stack() * VMRegImpl::stack_slot_size; if (!r_2->is_valid()) {
__ lw(t0, Address(esp, ld_off));
__ sd(t0, Address(sp, st_off), /*temp register*/t2);
} else { // // We are using two optoregs. This can be either T_OBJECT, // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates // two slots but only uses one for thr T_LONG or T_DOUBLE case // So we must adjust where to pick up the data to match the // interpreter. // // Interpreter local[n] == MSW, local[n+1] == LSW however locals // are accessed as negative so LSW is at LOW address
// ld_off is MSW so get LSW constint offset = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
next_off : ld_off;
__ ld(t0, Address(esp, offset)); // st_off is LSW (i.e. reg.first())
__ sd(t0, Address(sp, st_off), /*temp register*/t2);
}
} elseif (r_1->is_Register()) { // Register argument Register r = r_1->as_Register(); if (r_2->is_valid()) { // // We are using two VMRegs. This can be either T_OBJECT, // T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates // two slots but only uses one for thr T_LONG or T_DOUBLE case // So we must adjust where to pick up the data to match the // interpreter.
// this can be a misaligned move
__ ld(r, Address(esp, offset));
} else { // sign extend and use a full word?
__ lw(r, Address(esp, ld_off));
}
} else { if (!r_2->is_valid()) {
__ flw(r_1->as_FloatRegister(), Address(esp, ld_off));
} else {
__ fld(r_1->as_FloatRegister(), Address(esp, next_off));
}
}
}
__ push_cont_fastpath(xthread); // Set JavaThread::_cont_fastpath to the sp of the oldest interpreted frame we know about
// 6243940 We might end up in handle_wrong_method if // the callee is deoptimized as we race thru here. If that // happens we don't want to take a safepoint because the // caller frame will look interpreted and arguments are now // "compiled" so it is much better to make this transition // invisible to the stack walking code. Unfortunately if // we try and find the callee by normal means a safepoint // is possible. So we stash the desired callee in the thread // and the vm will find there should this case occur.
constRegister holder = t1; constRegister receiver = j_rarg0; constRegister tmp = t2; // A call-clobbered register not used for arg passing
// ------------------------------------------------------------------------- // Generate a C2I adapter. On entry we know xmethod holds the Method* during calls // to the interpreter. The args start out packed in the compiled layout. They // need to be unpacked into the interpreter layout. This will almost always // require some stack space. We grow the current (compiled) stack, then repack // the args. We finally end in a jump to the generic interpreter entry point. // On exit from the interpreter, the interpreter will restore our SP (lest the // compiled code, which relies solely on SP and not FP, get sick).
__ bind(ok); // Method might have been compiled since the call site was patched to // interpreted; if that is the case treat it as a miss so we can get // the call site corrected.
__ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
__ beqz(t0, skip_fixup);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ block_comment("} c2i_unverified_entry");
}
address c2i_entry = __ pc();
// Class initialization barrier for static methods
address c2i_no_clinit_check_entry = NULL; if (VM_Version::supports_fast_class_init_checks()) {
Label L_skip_barrier;
uint int_args = 0;
uint fp_args = 0;
uint stk_args = 0; // inc by 2 each time
for (int i = 0; i < total_args_passed; i++) { switch (sig_bt[i]) { case T_BOOLEAN: // fall through case T_CHAR: // fall through case T_BYTE: // fall through case T_SHORT: // fall through case T_INT: if (int_args < Argument::n_int_register_parameters_c) {
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
} else {
regs[i].set1(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
} break; case T_LONG: // fall through
assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); case T_OBJECT: // fall through case T_ARRAY: // fall through case T_ADDRESS: // fall through case T_METADATA: if (int_args < Argument::n_int_register_parameters_c) {
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
} else {
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
} break; case T_FLOAT: if (fp_args < Argument::n_float_register_parameters_c) {
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
} elseif (int_args < Argument::n_int_register_parameters_c) {
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
} else {
regs[i].set1(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
} break; case T_DOUBLE:
assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); if (fp_args < Argument::n_float_register_parameters_c) {
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
} elseif (int_args < Argument::n_int_register_parameters_c) {
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
} else {
regs[i].set2(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
} break; case T_VOID: // Halves of longs and doubles
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
regs[i].set_bad(); break; default:
ShouldNotReachHere();
}
}
return stk_args;
}
void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { // We always ignore the frame_slots arg and just use the space just below frame pointer // which by this time is free to use switch (ret_type) { case T_FLOAT:
__ fsw(f10, Address(fp, -3 * wordSize)); break; case T_DOUBLE:
__ fsd(f10, Address(fp, -3 * wordSize)); break; case T_VOID: break; default: {
__ sd(x10, Address(fp, -3 * wordSize));
}
}
}
void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { // We always ignore the frame_slots arg and just use the space just below frame pointer // which by this time is free to use switch (ret_type) { case T_FLOAT:
__ flw(f10, Address(fp, -3 * wordSize)); break; case T_DOUBLE:
__ fld(f10, Address(fp, -3 * wordSize)); break; case T_VOID: break; default: {
__ ld(x10, Address(fp, -3 * wordSize));
}
}
}
staticvoid save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
RegSet x; for ( int i = first_arg ; i < arg_count ; i++ ) { if (args[i].first()->is_Register()) {
x = x + args[i].first()->as_Register();
} elseif (args[i].first()->is_FloatRegister()) {
__ addi(sp, sp, -2 * wordSize);
__ fsd(args[i].first()->as_FloatRegister(), Address(sp, 0));
}
}
__ push_reg(x, sp);
}
staticvoid restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
RegSet x; for ( int i = first_arg ; i < arg_count ; i++ ) { if (args[i].first()->is_Register()) {
x = x + args[i].first()->as_Register();
} else {
;
}
}
__ pop_reg(x, sp); for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { if (args[i].first()->is_Register()) {
;
} elseif (args[i].first()->is_FloatRegister()) {
__ fld(args[i].first()->as_FloatRegister(), Address(sp, 0));
__ add(sp, sp, 2 * wordSize);
}
}
}
staticvoid verify_oop_args(MacroAssembler* masm, const methodHandle& method, const BasicType* sig_bt, const VMRegPair* regs) { constRegister temp_reg = x9; // not part of any compiled calling seq if (VerifyOops) { for (int i = 0; i < method->size_of_parameters(); i++) { if (sig_bt[i] == T_OBJECT ||
sig_bt[i] == T_ARRAY) {
VMReg r = regs[i].first();
assert(r->is_valid(), "bad oop arg"); if (r->is_stack()) {
__ ld(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
__ verify_oop(temp_reg);
} else {
__ verify_oop(r->as_Register());
}
}
}
}
}
__ ld(t0, Address(sp, ContinuationEntry::parent_offset()));
__ sd(t0, Address(xthread, JavaThread::cont_entry_offset()));
__ add(fp, sp, (int)ContinuationEntry::size() + 2 * wordSize /* 2 extra words to match up with leave() */);
}
__ enter();
stack_slots = 2; // will be adjusted in setup
OopMap* map = continuation_enter_setup(masm, stack_slots); // The frame is complete here, but we only record it for the compiled entry, so the frame would appear unsafe, // but that's okay because at the very worst we'll miss an async sample, but we're in interp_only_mode anyway.
fill_continuation_entry(masm);
__ bnez(c_rarg2, call_thaw);
// Make sure the call is patchable
__ align(NativeInstruction::instruction_size);
// Now write the args into the outgoing interpreter space bool has_receiver = false; Register receiver_reg = noreg; int member_arg_pos = -1; Register member_reg = noreg; int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); if (ref_kind != 0) {
member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
member_reg = x9; // known to be free at this point
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
} elseif (iid == vmIntrinsics::_invokeBasic) {
has_receiver = true;
} elseif (iid == vmIntrinsics::_linkToNative) {
member_arg_pos = method->size_of_parameters() - 1; // trailing NativeEntryPoint argument
member_reg = x9; // known to be free at this point
} else {
fatal("unexpected intrinsic id %d", vmIntrinsics::as_int(iid));
}
if (member_reg != noreg) { // Load the member_arg into register, if necessary.
SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
VMReg r = regs[member_arg_pos].first(); if (r->is_stack()) {
__ ld(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
} else { // no data motion is needed
member_reg = r->as_Register();
}
}
if (has_receiver) { // Make sure the receiver is loaded into a register.
assert(method->size_of_parameters() > 0, "oob");
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
VMReg r = regs[0].first();
assert(r->is_valid(), "bad receiver arg"); if (r->is_stack()) { // Porting note: This assumes that compiled calling conventions always // pass the receiver oop in a register. If this is not true on some // platform, pick a temp and load the receiver from stack.
fatal("receiver always in a register");
receiver_reg = x12; // known to be free at this point
__ ld(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size));
} else { // no data motion is needed
receiver_reg = r->as_Register();
}
}
// Figure out which address we are really jumping to:
MethodHandles::generate_method_handle_dispatch(masm, iid,
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
}
// --------------------------------------------------------------------------- // Generate a native wrapper for a given method. The method takes arguments // in the Java compiled code convention, marshals them to the native // convention (handlizes oops, etc), transitions to native, makes the call, // returns to java state (possibly blocking), unhandlizes any result and // returns. // // Critical native functions are a shorthand for the use of // GetPrimtiveArrayCritical and disallow the use of any other JNI // functions. The wrapper is expected to unpack the arguments before // passing them to the callee and perform checks before and after the // native call to ensure that they GCLocker // lock_critical/unlock_critical semantics are followed. Some other // parts of JNI setup are skipped like the tear down of the JNI handle // block and the check for pending exceptions it's impossible for them // to be thrown. // // They are roughly structured like this: // if (GCLocker::needs_gc()) SharedRuntime::block_for_jni_critical() // tranistion to thread_in_native // unpack array arguments and call native entry point // check for safepoint in progress // check if any thread suspend flags are set // call into JVM and possible unlock the JNI critical // if a GC was suppressed while in the critical native. // transition back to thread_in_Java // return to caller //
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, const methodHandle& method, int compile_id,
BasicType* in_sig_bt,
VMRegPair* in_regs,
BasicType ret_type) { if (method->is_continuation_native_intrinsic()) { int exception_offset = -1;
OopMapSet* oop_maps = new OopMapSet(); int frame_complete = -1; int stack_slots = -1; int interpreted_entry_offset = -1; int vep_offset = -1; if (method->is_continuation_enter_intrinsic()) {
gen_continuation_enter(masm,
method,
in_sig_bt,
in_regs,
exception_offset,
oop_maps,
frame_complete,
stack_slots,
interpreted_entry_offset,
vep_offset);
} elseif (method->is_continuation_yield_intrinsic()) {
gen_continuation_yield(masm,
method,
in_sig_bt,
in_regs,
oop_maps,
frame_complete,
stack_slots,
vep_offset);
} else {
guarantee(false, "Unknown Continuation native intrinsic");
}
#ifdef ASSERT if (method->is_continuation_enter_intrinsic()) {
assert(interpreted_entry_offset != -1, "Must be set");
assert(exception_offset != -1, "Must be set");
} else {
assert(interpreted_entry_offset == -1, "Must be unset");
assert(exception_offset == -1, "Must be unset");
}
assert(frame_complete != -1, "Must be set");
assert(stack_slots != -1, "Must be set");
assert(vep_offset != -1, "Must be set"); #endif
if (method->is_method_handle_intrinsic()) {
vmIntrinsics::ID iid = method->intrinsic_id();
intptr_t start = (intptr_t)__ pc(); int vep_offset = ((intptr_t)__ pc()) - start;
// First instruction must be a nop as it may need to be patched on deoptimisation
{
Assembler::IncompressibleRegion ir(masm); // keep the nop as 4 bytes for patching.
MacroAssembler::assert_alignment(__ pc());
__ nop(); // 4 bytes
}
gen_special_dispatch(masm,
method,
in_sig_bt,
in_regs); int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
__ flush(); int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually return nmethod::new_native_nmethod(method,
compile_id,
masm->code(),
vep_offset,
frame_complete,
stack_slots / VMRegImpl::slots_per_word,
in_ByteSize(-1),
in_ByteSize(-1),
(OopMapSet*)NULL);
}
address native_func = method->native_function();
assert(native_func != NULL, "must have function");
// An OopMap for lock (and class if static)
OopMapSet *oop_maps = new OopMapSet();
assert_cond(oop_maps != NULL);
intptr_t start = (intptr_t)__ pc();
// We have received a description of where all the java arg are located // on entry to the wrapper. We need to convert these args to where // the jni function will expect them. To figure out where they go // we convert the java signature to a C signature by inserting // the hidden arguments as arg[0] and possibly arg[1] (static method)
int argc = 0;
out_sig_bt[argc++] = T_ADDRESS; if (method->is_static()) {
out_sig_bt[argc++] = T_OBJECT;
}
for (int i = 0; i < total_in_args ; i++) {
out_sig_bt[argc++] = in_sig_bt[i];
}
// Now figure out where the args must be stored and how much stack space // they require. int out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
// Compute framesize for the wrapper. We need to handlize all oops in // incoming registers
// Calculate the total number of stack slots we will need.
// First count the abi requirement plus all of the outgoing args int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
// Now the space for the inbound oop handle area int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers
int oop_handle_offset = stack_slots;
stack_slots += total_save_slots;
// Now any space we need for handlizing a klass if static method
int klass_slot_offset = 0; int klass_offset = -1; int lock_slot_offset = 0; bool is_static = false;
if (method->is_synchronized()) {
lock_slot_offset = stack_slots;
stack_slots += VMRegImpl::slots_per_word;
}
// Now a place (+2) to save return values or temp during shuffling // + 4 for return address (which we own) and saved fp
stack_slots += 6;
// Ok The space we have allocated will look like: // // // FP-> | | // | 2 slots (ra) | // | 2 slots (fp) | // |---------------------| // | 2 slots for moves | // |---------------------| // | lock box (if sync) | // |---------------------| <- lock_slot_offset // | klass (if static) | // |---------------------| <- klass_slot_offset // | oopHandle area | // |---------------------| <- oop_handle_offset (8 java arg registers) // | outbound memory | // | based arguments | // | | // |---------------------| // | | // SP-> | out_preserved_slots | // //
// Now compute actual number of stack words we need rounding to make // stack properly aligned.
stack_slots = align_up(stack_slots, StackAlignmentInSlots);
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
// First thing make an ic check to see if we should even be here
// We are free to use all registers as temps without saving them and // restoring them except fp. fp is the only callee save register // as far as the interpreter and the compiler(s) are concerned.
// Verified entry point must be aligned
__ align(8);
__ bind(hit);
int vep_offset = ((intptr_t)__ pc()) - start;
// If we have to make this method not-entrant we'll overwrite its // first instruction with a jump.
{
Assembler::IncompressibleRegion ir(masm); // keep the nop as 4 bytes for patching.
MacroAssembler::assert_alignment(__ pc());
__ nop(); // 4 bytes
}
// Generate a new frame for the wrapper.
__ enter(); // -2 because return address is already present and so is saved fp
__ sub(sp, sp, stack_size - 2 * wordSize);
// Frame is now completed as far as size and linkage. int frame_complete = ((intptr_t)__ pc()) - start;
// We use x18 as the oop handle for the receiver/klass // It is callee save so it survives the call to native
constRegister oop_handle_reg = x18;
// // We immediately shuffle the arguments so that any vm call we have to // make from here on out (sync slow path, jvmti, etc.) we will have // captured the oops from our caller and have a valid oopMap for // them.
// ----------------- // The Grand Shuffle
// The Java calling convention is either equal (linux) or denser (win64) than the // c calling convention. However the because of the jni_env argument the c calling // convention always has at least one more (and two for static) arguments than Java. // Therefore if we move the args from java -> c backwards then we will never have // a register->register conflict and we don't have to build a dependency graph // and figure out how to break any cycles. //
// Record esp-based slot for receiver on stack for non-static methods int receiver_offset = -1;
// This is a trick. We double the stack slots so we can claim // the oops in the caller's frame. Since we are sure to have // more args than the caller doubling is enough to make // sure we can capture all the incoming oop args from the // caller. //
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
assert_cond(map != NULL);
int float_args = 0; int int_args = 0;
#ifdef ASSERT bool reg_destroyed[Register::number_of_registers]; bool freg_destroyed[FloatRegister::number_of_registers]; for ( int r = 0 ; r < Register::number_of_registers ; r++ ) {
reg_destroyed[r] = false;
} for ( int f = 0 ; f < FloatRegister::number_of_registers ; f++ ) {
freg_destroyed[f] = false;
}
#endif/* ASSERT */
// For JNI natives the incoming and outgoing registers are offset upwards.
GrowableArray<int> arg_order(2 * total_in_args);
VMRegPair tmp_vmreg;
tmp_vmreg.set2(x9->as_VMReg());
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
arg_order.push(i);
arg_order.push(c_arg);
}
int temploc = -1; for (int ai = 0; ai < arg_order.length(); ai += 2) { int i = arg_order.at(ai); int c_arg = arg_order.at(ai + 1);
__ block_comment(err_msg("mv %d -> %d", i, c_arg));
assert(c_arg != -1 && i != -1, "wrong order"); #ifdef ASSERT if (in_regs[i].first()->is_Register()) {
assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
} elseif (in_regs[i].first()->is_FloatRegister()) {
assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
} if (out_regs[c_arg].first()->is_Register()) {
reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
} elseif (out_regs[c_arg].first()->is_FloatRegister()) {
freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
} #endif/* ASSERT */ switch (in_sig_bt[i]) { case T_ARRAY: case T_OBJECT:
__ object_move(map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
((i == 0) && (!is_static)),
&receiver_offset);
int_args++; break; case T_VOID: break;
case T_FLOAT:
__ float_move(in_regs[i], out_regs[c_arg]);
float_args++; break;
// point c_arg at the first arg that is already loaded in case we // need to spill before we call out int c_arg = total_c_args - total_in_args;
// Pre-load a static method's oop into c_rarg1. if (method->is_static()) {
// load oop into a register
__ movoop(c_rarg1,
JNIHandles::make_local(method->method_holder()->java_mirror()));
// Now handlize the static class mirror it's known not-null.
__ sd(c_rarg1, Address(sp, klass_offset));
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
// Now get the handle
__ la(c_rarg1, Address(sp, klass_offset)); // and protect the arg if we must spill
c_arg--;
}
// Change state to native (we save the return address in the thread, since it might not // be pushed on the stack when we do a stack traversal). // We use the same pc/oopMap repeatedly when we call out
// RedefineClasses() tracing support for obsolete method entry if (log_is_enabled(Trace, redefine, class, obsolete)) { // protect the args we've loaded
save_args(masm, total_c_args, c_arg, out_regs);
__ mov_metadata(c_rarg1, method());
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
xthread, c_rarg1);
restore_args(masm, total_c_args, c_arg, out_regs);
}
// Lock a synchronized method
// Register definitions used by locking and unlocking
constRegister swap_reg = x10; constRegister obj_reg = x9; // Will contain the oop constRegister lock_reg = x30; // Address of compiler lock object (BasicLock) constRegister old_hdr = x30; // value of old header at unlock time constRegister tmp = ra;
// Load the oop from the handle
__ ld(obj_reg, Address(oop_handle_reg, 0));
if (!UseHeavyMonitors) { // Load (object->mark() | 1) into swap_reg % x10
__ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ ori(swap_reg, t0, 1);
// Save (object->mark() | 1) into BasicLock's displaced header
__ sd(swap_reg, Address(lock_reg, mark_word_offset));
// src -> dest if dest == x10 else x10 <- dest
__ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, count, /*fallthrough*/NULL);
// Test if the oopMark is an obvious stack pointer, i.e., // 1) (mark & 3) == 0, and // 2) sp <= mark < mark + os::pagesize() // These 3 tests can be done by evaluating the following // expression: ((mark - sp) & (3 - os::vm_page_size())), // assuming both stack pointer and pagesize have their // least significant 2 bits clear. // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
// Save the test result, for recursive case, the result is zero
__ sd(swap_reg, Address(lock_reg, mark_word_offset));
__ bnez(swap_reg, slow_path_lock);
} else {
__ j(slow_path_lock);
}
// Switch thread to "native transition" state before reading the synchronization state. // This additional state is necessary because reading and testing the synchronization // state is not atomic w.r.t. GC, as this scenario demonstrates: // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. // VM thread changes sync state to synchronizing and suspends threads for GC. // Thread A is resumed to finish this native method, but doesn't block here since it // didn't see any synchronization is progress, and escapes.
__ mv(t0, _thread_in_native_trans);
// Force this write out before the read below
__ membar(MacroAssembler::AnyAny);
// check for safepoint operation in progress and/or pending suspend requests
{ // We need an acquire here to ensure that any subsequent load of the // global SafepointSynchronize::_state flag is ordered after this load // of the thread-local polling word. We don't want this poll to // return false (i.e. not safepointing) and a later poll of the global // SafepointSynchronize::_state spuriously to return true. // This is to avoid a race when we're in a native->Java transition // racing the code which wakes up from a safepoint.
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.