/* * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Registers which are not saved/restored, but still they have got a frame slot. // Used to get same frame size for RegisterSaver_LiveRegs and RegisterSaver_LiveRegsWithoutR2 #define RegisterSaver_ExcludedIntReg(regname) \
{ RegisterSaver::excluded_reg, regname->encoding(), regname->as_VMReg() }
// Registers which are not saved/restored, but still they have got a frame slot. // Used to get same frame size for RegisterSaver_LiveRegs and RegisterSaver_LiveRegsWithoutR2. #define RegisterSaver_ExcludedFloatReg(regname) \
{ RegisterSaver::excluded_reg, regname->encoding(), regname->as_VMReg() }
staticconst RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { // Live registers which get spilled to the stack. Register positions // in this array correspond directly to the stack layout. // // live float registers: //
RegisterSaver_LiveFloatReg(Z_F0 ), // RegisterSaver_ExcludedFloatReg(Z_F1 ), // scratch (Z_fscratch_1)
RegisterSaver_LiveFloatReg(Z_F2 ),
RegisterSaver_LiveFloatReg(Z_F3 ),
RegisterSaver_LiveFloatReg(Z_F4 ),
RegisterSaver_LiveFloatReg(Z_F5 ),
RegisterSaver_LiveFloatReg(Z_F6 ),
RegisterSaver_LiveFloatReg(Z_F7 ),
RegisterSaver_LiveFloatReg(Z_F8 ),
RegisterSaver_LiveFloatReg(Z_F9 ),
RegisterSaver_LiveFloatReg(Z_F10),
RegisterSaver_LiveFloatReg(Z_F11),
RegisterSaver_LiveFloatReg(Z_F12),
RegisterSaver_LiveFloatReg(Z_F13),
RegisterSaver_LiveFloatReg(Z_F14),
RegisterSaver_LiveFloatReg(Z_F15), // // RegisterSaver_ExcludedIntReg(Z_R0), // scratch // RegisterSaver_ExcludedIntReg(Z_R1), // scratch
RegisterSaver_LiveIntReg(Z_R2 ),
RegisterSaver_LiveIntReg(Z_R3 ),
RegisterSaver_LiveIntReg(Z_R4 ),
RegisterSaver_LiveIntReg(Z_R5 ),
RegisterSaver_LiveIntReg(Z_R6 ),
RegisterSaver_LiveIntReg(Z_R7 ),
RegisterSaver_LiveIntReg(Z_R8 ),
RegisterSaver_LiveIntReg(Z_R9 ),
RegisterSaver_LiveIntReg(Z_R10),
RegisterSaver_LiveIntReg(Z_R11),
RegisterSaver_LiveIntReg(Z_R12),
RegisterSaver_LiveIntReg(Z_R13), // RegisterSaver_ExcludedIntReg(Z_R14), // return pc (Saved in caller frame.) // RegisterSaver_ExcludedIntReg(Z_R15) // stack pointer
};
staticconst RegisterSaver::LiveRegType RegisterSaver_LiveIntRegs[] = { // Live registers which get spilled to the stack. Register positions // in this array correspond directly to the stack layout. // // live float registers: All excluded, but still they get a stack slot to get same frame size. //
RegisterSaver_ExcludedFloatReg(Z_F0 ), // RegisterSaver_ExcludedFloatReg(Z_F1 ), // scratch (Z_fscratch_1)
RegisterSaver_ExcludedFloatReg(Z_F2 ),
RegisterSaver_ExcludedFloatReg(Z_F3 ),
RegisterSaver_ExcludedFloatReg(Z_F4 ),
RegisterSaver_ExcludedFloatReg(Z_F5 ),
RegisterSaver_ExcludedFloatReg(Z_F6 ),
RegisterSaver_ExcludedFloatReg(Z_F7 ),
RegisterSaver_ExcludedFloatReg(Z_F8 ),
RegisterSaver_ExcludedFloatReg(Z_F9 ),
RegisterSaver_ExcludedFloatReg(Z_F10),
RegisterSaver_ExcludedFloatReg(Z_F11),
RegisterSaver_ExcludedFloatReg(Z_F12),
RegisterSaver_ExcludedFloatReg(Z_F13),
RegisterSaver_ExcludedFloatReg(Z_F14),
RegisterSaver_ExcludedFloatReg(Z_F15), // // RegisterSaver_ExcludedIntReg(Z_R0), // scratch // RegisterSaver_ExcludedIntReg(Z_R1), // scratch
RegisterSaver_LiveIntReg(Z_R2 ),
RegisterSaver_LiveIntReg(Z_R3 ),
RegisterSaver_LiveIntReg(Z_R4 ),
RegisterSaver_LiveIntReg(Z_R5 ),
RegisterSaver_LiveIntReg(Z_R6 ),
RegisterSaver_LiveIntReg(Z_R7 ),
RegisterSaver_LiveIntReg(Z_R8 ),
RegisterSaver_LiveIntReg(Z_R9 ),
RegisterSaver_LiveIntReg(Z_R10),
RegisterSaver_LiveIntReg(Z_R11),
RegisterSaver_LiveIntReg(Z_R12),
RegisterSaver_LiveIntReg(Z_R13), // RegisterSaver_ExcludedIntReg(Z_R14), // return pc (Saved in caller frame.) // RegisterSaver_ExcludedIntReg(Z_R15) // stack pointer
};
staticconst RegisterSaver::LiveRegType RegisterSaver_LiveRegsWithoutR2[] = { // Live registers which get spilled to the stack. Register positions // in this array correspond directly to the stack layout. // // live float registers: //
RegisterSaver_LiveFloatReg(Z_F0 ), // RegisterSaver_ExcludedFloatReg(Z_F1 ), // scratch (Z_fscratch_1)
RegisterSaver_LiveFloatReg(Z_F2 ),
RegisterSaver_LiveFloatReg(Z_F3 ),
RegisterSaver_LiveFloatReg(Z_F4 ),
RegisterSaver_LiveFloatReg(Z_F5 ),
RegisterSaver_LiveFloatReg(Z_F6 ),
RegisterSaver_LiveFloatReg(Z_F7 ),
RegisterSaver_LiveFloatReg(Z_F8 ),
RegisterSaver_LiveFloatReg(Z_F9 ),
RegisterSaver_LiveFloatReg(Z_F10),
RegisterSaver_LiveFloatReg(Z_F11),
RegisterSaver_LiveFloatReg(Z_F12),
RegisterSaver_LiveFloatReg(Z_F13),
RegisterSaver_LiveFloatReg(Z_F14),
RegisterSaver_LiveFloatReg(Z_F15), // // RegisterSaver_ExcludedIntReg(Z_R0), // scratch // RegisterSaver_ExcludedIntReg(Z_R1), // scratch
RegisterSaver_ExcludedIntReg(Z_R2), // Omit saving R2.
RegisterSaver_LiveIntReg(Z_R3 ),
RegisterSaver_LiveIntReg(Z_R4 ),
RegisterSaver_LiveIntReg(Z_R5 ),
RegisterSaver_LiveIntReg(Z_R6 ),
RegisterSaver_LiveIntReg(Z_R7 ),
RegisterSaver_LiveIntReg(Z_R8 ),
RegisterSaver_LiveIntReg(Z_R9 ),
RegisterSaver_LiveIntReg(Z_R10),
RegisterSaver_LiveIntReg(Z_R11),
RegisterSaver_LiveIntReg(Z_R12),
RegisterSaver_LiveIntReg(Z_R13), // RegisterSaver_ExcludedIntReg(Z_R14), // return pc (Saved in caller frame.) // RegisterSaver_ExcludedIntReg(Z_R15) // stack pointer
};
// Live argument registers which get spilled to the stack. staticconst RegisterSaver::LiveRegType RegisterSaver_LiveArgRegs[] = {
RegisterSaver_LiveFloatReg(Z_FARG1),
RegisterSaver_LiveFloatReg(Z_FARG2),
RegisterSaver_LiveFloatReg(Z_FARG3),
RegisterSaver_LiveFloatReg(Z_FARG4),
RegisterSaver_LiveIntReg(Z_ARG1),
RegisterSaver_LiveIntReg(Z_ARG2),
RegisterSaver_LiveIntReg(Z_ARG3),
RegisterSaver_LiveIntReg(Z_ARG4),
RegisterSaver_LiveIntReg(Z_ARG5)
};
int RegisterSaver::live_reg_save_size(RegisterSet reg_set) { int reg_space = -1; switch (reg_set) { case all_registers: reg_space = sizeof(RegisterSaver_LiveRegs); break; case all_registers_except_r2: reg_space = sizeof(RegisterSaver_LiveRegsWithoutR2); break; case all_integer_registers: reg_space = sizeof(RegisterSaver_LiveIntRegs); break; case all_volatile_registers: reg_space = sizeof(RegisterSaver_LiveVolatileRegs); break; case arg_registers: reg_space = sizeof(RegisterSaver_LiveArgRegs); break; default: ShouldNotReachHere();
} return (reg_space / sizeof(RegisterSaver::LiveRegType)) * reg_size;
}
int RegisterSaver::live_reg_frame_size(RegisterSet reg_set) { return live_reg_save_size(reg_set) + frame::z_abi_160_size;
}
// return_pc: Specify the register that should be stored as the return pc in the current frame.
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, RegisterSet reg_set, Register return_pc) { // Record volatile registers as callee-save values in an OopMap so // their save locations will be propagated to the caller frame's // RegisterMap during StackFrameStream construction (needed for // deoptimization; see compiledVFrame::create_stack_value).
// Save return pc in old frame.
__ save_return_pc(return_pc);
// Push a new frame (includes stack linkage). // Use return_pc as scratch for push_frame. Z_R0_scratch (the default) and Z_R1_scratch are // illegally used to pass parameters by RangeCheckStub::emit_code().
__ push_frame(frame_size_in_bytes, return_pc); // We have to restore return_pc right away. // Nobody else will. Furthermore, return_pc isn't necessarily the default (Z_R14). // Nobody else knows which register we saved.
__ z_lg(return_pc, _z_abi16(return_pc) + frame_size_in_bytes, Z_SP);
// Register save area in new frame starts above z_abi_160 area. int offset = register_save_offset;
Register first = noreg; Register last = noreg; int first_offset = -1; bool float_spilled = false;
for (int i = 0; i < regstosave_num; i++, offset += reg_size) { int reg_num = live_regs[i].reg_num; int reg_type = live_regs[i].reg_type;
switch (reg_type) { case RegisterSaver::int_reg: { Register reg = as_Register(reg_num); if (last != reg->predecessor()) { if (first != noreg) {
__ z_stmg(first, last, first_offset, Z_SP);
}
first = reg;
first_offset = offset;
DEBUG_ONLY(float_spilled = false);
}
last = reg;
assert(last != Z_R0, "r0 would require special treatment");
assert(!float_spilled, "for simplicity, do not mix up ints and floats in RegisterSaver_LiveRegs[]"); break;
}
case RegisterSaver::excluded_reg: // Not saved/restored, but with dedicated slot. continue; // Continue with next loop iteration.
// Second set_callee_saved is really a waste but we'll keep things as they were for now
map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), live_regs[i].vmreg);
map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size) >> 2), live_regs[i].vmreg->next());
}
assert(first != noreg, "Should spill at least one int reg.");
__ z_stmg(first, last, first_offset, Z_SP);
// Register save area in new frame starts above z_abi_160 area. int offset = register_save_offset; for (int i = 0; i < regstosave_num; i++) { if (live_regs[i].reg_type < RegisterSaver::excluded_reg) {
map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), live_regs[i].vmreg);
map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), live_regs[i].vmreg->next());
}
offset += reg_size;
} return map;
}
// Pop the current frame and restore all the registers that we saved. void RegisterSaver::restore_live_registers(MacroAssembler* masm, RegisterSet reg_set) { int offset; constint register_save_offset = live_reg_frame_size(reg_set) - live_reg_save_size(reg_set);
Register first = noreg; Register last = noreg; int first_offset = -1; bool float_spilled = false;
int regstosave_num = 0; const RegisterSaver::LiveRegType* live_regs = NULL;
// Register save area in new frame starts above z_abi_160 area.
offset = register_save_offset;
for (int i = 0; i < regstosave_num; i++, offset += reg_size) { int reg_num = live_regs[i].reg_num; int reg_type = live_regs[i].reg_type;
switch (reg_type) { case RegisterSaver::excluded_reg: continue; // Continue with next loop iteration.
case RegisterSaver::int_reg: { Register reg = as_Register(reg_num); if (last != reg->predecessor()) { if (first != noreg) {
__ z_lmg(first, last, first_offset, Z_SP);
}
first = reg;
first_offset = offset;
DEBUG_ONLY(float_spilled = false);
}
last = reg;
assert(last != Z_R0, "r0 would require special treatment");
assert(!float_spilled, "for simplicity, do not mix up ints and floats in RegisterSaver_LiveRegs[]"); break;
}
default:
ShouldNotReachHere();
}
}
assert(first != noreg, "Should spill at least one int reg.");
__ z_lmg(first, last, first_offset, Z_SP);
// Pop the frame.
__ pop_frame();
// Restore the flags.
__ restore_return_pc();
}
// Pop the current frame and restore the registers that might be holding a result. void RegisterSaver::restore_result_registers(MacroAssembler* masm) { int i; int offset; constint regstosave_num = sizeof(RegisterSaver_LiveRegs) / sizeof(RegisterSaver::LiveRegType); constint register_save_offset = live_reg_frame_size(all_registers) - live_reg_save_size(all_registers);
// Restore all result registers (ints and floats).
offset = register_save_offset; for (int i = 0; i < regstosave_num; i++, offset += reg_size) { int reg_num = RegisterSaver_LiveRegs[i].reg_num; int reg_type = RegisterSaver_LiveRegs[i].reg_type; switch (reg_type) { case RegisterSaver::excluded_reg: continue; // Continue with next loop iteration. case RegisterSaver::int_reg: { if (as_Register(reg_num) == Z_RET) { // int result_reg
__ z_lg(as_Register(reg_num), offset, Z_SP);
} break;
} case RegisterSaver::float_reg: { if (as_FloatRegister(reg_num) == Z_FRET) { // float result_reg
__ z_ld(as_FloatRegister(reg_num), offset, Z_SP);
} break;
} default:
ShouldNotReachHere();
}
}
}
switch (ret_type) { case T_BOOLEAN: // Save shorter types as int. Do we need sign extension at restore?? case T_BYTE: case T_CHAR: case T_SHORT: case T_INT:
__ reg2mem_opt(Z_RET, memaddr, false); break; case T_OBJECT: // Save pointer types as long. case T_ARRAY: case T_ADDRESS: case T_VOID: case T_LONG:
__ reg2mem_opt(Z_RET, memaddr); break; case T_FLOAT:
__ freg2mem_opt(Z_FRET, memaddr, false); break; case T_DOUBLE:
__ freg2mem_opt(Z_FRET, memaddr); break; default:
ShouldNotReachHere(); break;
}
}
switch (ret_type) { case T_BOOLEAN: // Restore shorter types as int. Do we need sign extension at restore?? case T_BYTE: case T_CHAR: case T_SHORT: case T_INT:
__ mem2reg_opt(Z_RET, memaddr, false); break; case T_OBJECT: // Restore pointer types as long. case T_ARRAY: case T_ADDRESS: case T_VOID: case T_LONG:
__ mem2reg_opt(Z_RET, memaddr); break; case T_FLOAT:
__ mem2freg_opt(Z_FRET, memaddr, false); break; case T_DOUBLE:
__ mem2freg_opt(Z_FRET, memaddr); break; default:
ShouldNotReachHere(); break;
}
}
// --------------------------------------------------------------------------- // Read the array of BasicTypes from a signature, and compute where the // arguments should go. Values in the VMRegPair regs array refer to 4-byte // quantities. Values less than VMRegImpl::stack0 are registers, those above // refer to 4-byte stack slots. All stack slots are based off of the stack pointer // as framesizes are fixed. // VMRegImpl::stack0 refers to the first slot 0(sp). // VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Registers // up to RegisterImpl::number_of_registers are the 64-bit integer registers.
// Note: the INPUTS in sig_bt are in units of Java argument words, which are // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit // units regardless of build.
// The Java calling convention is a "shifted" version of the C ABI. // By skipping the first C ABI register we can call non-static jni methods // with small numbers of arguments without having to shuffle the arguments // at all. Since we control the java ABI we ought to at least get some // advantage out of it. int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
VMRegPair *regs, int total_args_passed) { // c2c calling conventions for compiled-compiled calls.
// An int/float occupies 1 slot here. constint inc_stk_for_intfloat = 1; // 1 slots for ints and floats. constint inc_stk_for_longdouble = 2; // 2 slots for longs and doubles.
for (int i = 0; i < total_args_passed; ++i) { switch (sig_bt[i]) { case T_BOOLEAN: case T_CHAR: case T_BYTE: case T_SHORT: case T_INT: if (ireg < z_num_iarg_registers) { // Put int/ptr in register.
regs[i].set1(z_iarg_reg[ireg]);
++ireg;
} else { // Put int/ptr on stack.
regs[i].set1(VMRegImpl::stack2reg(stk));
stk += inc_stk_for_intfloat;
} break; case T_LONG:
assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); if (ireg < z_num_iarg_registers) { // Put long in register.
regs[i].set2(z_iarg_reg[ireg]);
++ireg;
} else { // Put long on stack and align to 2 slots. if (stk & 0x1) { ++stk; }
regs[i].set2(VMRegImpl::stack2reg(stk));
stk += inc_stk_for_longdouble;
} break; case T_OBJECT: case T_ARRAY: case T_ADDRESS: if (ireg < z_num_iarg_registers) { // Put ptr in register.
regs[i].set2(z_iarg_reg[ireg]);
++ireg;
} else { // Put ptr on stack and align to 2 slots, because // "64-bit pointers record oop-ishness on 2 aligned adjacent // registers." (see OopFlow::build_oop_map). if (stk & 0x1) { ++stk; }
regs[i].set2(VMRegImpl::stack2reg(stk));
stk += inc_stk_for_longdouble;
} break; case T_FLOAT: if (freg < z_num_farg_registers) { // Put float in register.
regs[i].set1(z_farg_reg[freg]);
++freg;
} else { // Put float on stack.
regs[i].set1(VMRegImpl::stack2reg(stk));
stk += inc_stk_for_intfloat;
} break; case T_DOUBLE:
assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); if (freg < z_num_farg_registers) { // Put double in register.
regs[i].set2(z_farg_reg[freg]);
++freg;
} else { // Put double on stack and align to 2 slots. if (stk & 0x1) { ++stk; }
regs[i].set2(VMRegImpl::stack2reg(stk));
stk += inc_stk_for_longdouble;
} break; case T_VOID:
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); // Do not count halves.
regs[i].set_bad(); break; default:
ShouldNotReachHere();
}
} return align_up(stk, 2);
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2, int total_args_passed) {
assert(regs2 == NULL, "second VMRegPair array not used on this platform");
// Avoid passing C arguments in the wrong stack slots.
// 'Stk' counts stack slots. Due to alignment, 32 bit values occupy // 2 such slots, like 64 bit values do. constint inc_stk_for_intfloat = 2; // 2 slots for ints and floats. constint inc_stk_for_longdouble = 2; // 2 slots for longs and doubles.
int i; // Leave room for C-compatible ABI int stk = (frame::z_abi_160_size - frame::z_jit_out_preserve_size) / VMRegImpl::stack_slot_size; int freg = 0; int ireg = 0;
// We put the first 5 arguments into registers and the rest on the // stack. Float arguments are already in their argument registers // due to c2c calling conventions (see calling_convention). for (int i = 0; i < total_args_passed; ++i) { switch (sig_bt[i]) { case T_BOOLEAN: case T_CHAR: case T_BYTE: case T_SHORT: case T_INT: // Fall through, handle as long. case T_LONG: case T_OBJECT: case T_ARRAY: case T_ADDRESS: case T_METADATA: // Oops are already boxed if required (JNI). if (ireg < z_num_iarg_registers) {
regs[i].set2(z_iarg_reg[ireg]);
++ireg;
} else {
regs[i].set2(VMRegImpl::stack2reg(stk));
stk += inc_stk_for_longdouble;
} break; case T_FLOAT: if (freg < z_num_farg_registers) {
regs[i].set1(z_farg_reg[freg]);
++freg;
} else {
regs[i].set1(VMRegImpl::stack2reg(stk+1));
stk += inc_stk_for_intfloat;
} break; case T_DOUBLE:
assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half"); if (freg < z_num_farg_registers) {
regs[i].set2(z_farg_reg[freg]);
++freg;
} else { // Put double on stack.
regs[i].set2(VMRegImpl::stack2reg(stk));
stk += inc_stk_for_longdouble;
} break; case T_VOID: // Do not count halves.
regs[i].set_bad(); break; default:
ShouldNotReachHere();
}
} return align_up(stk, 2);
}
//---------------------------------------------------------------------- // The java_calling_convention describes stack locations as ideal slots on // a frame with no abi restrictions. Since we must observe abi restrictions // (like the placement of the register window) the slots must be biased by // the following value. //---------------------------------------------------------------------- staticint reg2slot(VMReg r) { return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
}
// Now write the args into the outgoing interpreter space. bool has_receiver = false; Register receiver_reg = noreg; int member_arg_pos = -1; Register member_reg = noreg; int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
if (ref_kind != 0) {
member_arg_pos = total_args_passed - 1; // trailing MemberName argument
member_reg = Z_R9; // Known to be free at this point.
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
} elseif (special_dispatch == vmIntrinsics::_linkToNative) {
member_arg_pos = total_args_passed - 1; // trailing NativeEntryPoint argument
member_reg = Z_R9; // known to be free at this point
} else {
guarantee(special_dispatch == vmIntrinsics::_invokeBasic, "special_dispatch=%d", vmIntrinsics::as_int(special_dispatch));
has_receiver = true;
}
if (member_reg != noreg) { // Load the member_arg into register, if necessary.
assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
VMReg r = regs[member_arg_pos].first();
assert(r->is_valid(), "bad member arg");
if (r->is_stack()) {
__ z_lg(member_reg, Address(Z_SP, reg2offset(r)));
} else { // No data motion is needed.
member_reg = r->as_Register();
}
}
if (has_receiver) { // Make sure the receiver is loaded into a register.
assert(total_args_passed > 0, "oob");
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
VMReg r = regs[0].first();
assert(r->is_valid(), "bad receiver arg");
if (r->is_stack()) { // Porting note: This assumes that compiled calling conventions always // pass the receiver oop in a register. If this is not true on some // platform, pick a temp and load the receiver from stack.
assert(false, "receiver always in a register");
receiver_reg = Z_R13; // Known to be free at this point.
__ z_lg(receiver_reg, Address(Z_SP, reg2offset(r)));
} else { // No data motion is needed.
receiver_reg = r->as_Register();
}
}
// Figure out which address we are really jumping to:
MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
receiver_reg, member_reg, /*for_compiler_entry:*/ true);
}
// Is the size of a vector size (in bytes) bigger than a size saved by default? // 8 bytes registers are saved by default on z/Architecture. bool SharedRuntime::is_wide_vector(int size) { // Note, MaxVectorSize == 8 on this platform.
assert(size <= 8, "%d bytes vectors are not supported", size); return size > 8;
}
//---------------------------------------------------------------------- // An oop arg. Must pass a handle not the oop itself //---------------------------------------------------------------------- staticvoid object_move(MacroAssembler *masm,
OopMap *map, int oop_handle_offset, int framesize_in_slots,
VMRegPair src,
VMRegPair dst, bool is_receiver, int *receiver_offset) { int frame_offset = framesize_in_slots*VMRegImpl::stack_slot_size;
assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), "only one receiving object per call, please.");
// Must pass a handle. First figure out the location we use as a handle.
if (src.first()->is_stack()) { // Oop is already on the stack, put handle on stack or in register // If handle will be on the stack, use temp reg to calculate it. Register rHandle = dst.first()->is_stack() ? Z_R1 : dst.first()->as_Register();
Label skip; int slot_in_older_frame = reg2slot(src.first());
guarantee(!is_receiver, "expecting receiver in register");
map->set_oop(VMRegImpl::stack2reg(slot_in_older_frame + framesize_in_slots));
__ add2reg(rHandle, reg2offset(src.first())+frame_offset, Z_SP);
__ load_and_test_long(Z_R0, Address(rHandle));
__ z_brne(skip); // Use a NULL handle if oop is NULL.
__ clear_reg(rHandle, true, false);
__ bind(skip);
// Copy handle to the right place (register or stack). if (dst.first()->is_stack()) {
__ z_stg(rHandle, reg2offset(dst.first()), Z_SP);
} // else // nothing to do. rHandle uses the correct register
} else { // Oop is passed in an input register. We must flush it to the stack. constRegister rOop = src.first()->as_Register(); constRegister rHandle = dst.first()->is_stack() ? Z_R1 : dst.first()->as_Register(); int oop_slot = (rOop->encoding()-Z_ARG1->encoding()) * VMRegImpl::slots_per_word + oop_handle_offset; int oop_slot_offset = oop_slot*VMRegImpl::stack_slot_size;
NearLabel skip;
if (is_receiver) {
*receiver_offset = oop_slot_offset;
}
map->set_oop(VMRegImpl::stack2reg(oop_slot));
// If Oop == NULL, use a NULL handle.
__ compare64_and_branch(rOop, (RegisterOrConstant)0L, Assembler::bcondNotEqual, skip);
__ clear_reg(rHandle, true, false);
__ bind(skip);
// Copy handle to the right place (register or stack). if (dst.first()->is_stack()) {
__ z_stg(rHandle, reg2offset(dst.first()), Z_SP);
} // else // nothing to do here, since rHandle = dst.first()->as_Register in this case.
}
}
//---------------------------------------------------------------------- // A float arg. May have to do float reg to int reg conversion //---------------------------------------------------------------------- staticvoid float_move(MacroAssembler *masm,
VMRegPair src,
VMRegPair dst, int framesize_in_slots, int workspace_slot_offset) { int frame_offset = framesize_in_slots * VMRegImpl::stack_slot_size; int workspace_offset = workspace_slot_offset * VMRegImpl::stack_slot_size;
// We do not accept an argument in a VMRegPair to be spread over two slots, // no matter what physical location (reg or stack) the slots may have. // We just check for the unaccepted slot to be invalid.
assert(!src.second()->is_valid(), "float in arg spread over two slots");
assert(!dst.second()->is_valid(), "float out arg spread over two slots");
if (src.first()->is_stack()) { if (dst.first()->is_stack()) { // stack -> stack. The easiest of the bunch.
__ z_mvc(Address(Z_SP, reg2offset(dst.first())),
Address(Z_SP, reg2offset(src.first()) + frame_offset), sizeof(float));
} else { // stack to reg
Address memaddr(Z_SP, reg2offset(src.first()) + frame_offset); if (dst.first()->is_Register()) {
__ mem2reg_opt(dst.first()->as_Register(), memaddr, false);
} else {
__ mem2freg_opt(dst.first()->as_FloatRegister(), memaddr, false);
}
}
} elseif (src.first()->is_Register()) { if (dst.first()->is_stack()) { // gpr -> stack
__ reg2mem_opt(src.first()->as_Register(),
Address(Z_SP, reg2offset(dst.first()), false ));
} else { if (dst.first()->is_Register()) { // gpr -> gpr
__ move_reg_if_needed(dst.first()->as_Register(), T_INT,
src.first()->as_Register(), T_INT);
} else { if (VM_Version::has_FPSupportEnhancements()) { // gpr -> fpr. Exploit z10 capability of direct transfer.
__ z_ldgr(dst.first()->as_FloatRegister(), src.first()->as_Register());
} else { // gpr -> fpr. Use work space on stack to transfer data.
Address stackaddr(Z_SP, workspace_offset);
__ reg2mem_opt(src.first()->as_Register(), stackaddr, false);
__ mem2freg_opt(dst.first()->as_FloatRegister(), stackaddr, false);
}
}
}
} else { if (dst.first()->is_stack()) { // fpr -> stack
__ freg2mem_opt(src.first()->as_FloatRegister(),
Address(Z_SP, reg2offset(dst.first())), false);
} else { if (dst.first()->is_Register()) { if (VM_Version::has_FPSupportEnhancements()) { // fpr -> gpr.
__ z_lgdr(dst.first()->as_Register(), src.first()->as_FloatRegister());
} else { // fpr -> gpr. Use work space on stack to transfer data.
Address stackaddr(Z_SP, workspace_offset);
//---------------------------------------------------------------------- // A double arg. May have to do double reg to long reg conversion //---------------------------------------------------------------------- staticvoid double_move(MacroAssembler *masm,
VMRegPair src,
VMRegPair dst, int framesize_in_slots, int workspace_slot_offset) { int frame_offset = framesize_in_slots*VMRegImpl::stack_slot_size; int workspace_offset = workspace_slot_offset*VMRegImpl::stack_slot_size;
// Since src is always a java calling convention we know that the // src pair is always either all registers or all stack (and aligned?)
if (src.first()->is_stack()) { if (dst.first()->is_stack()) { // stack -> stack. The easiest of the bunch.
__ z_mvc(Address(Z_SP, reg2offset(dst.first())),
Address(Z_SP, reg2offset(src.first()) + frame_offset), sizeof(double));
} else { // stack to reg
Address stackaddr(Z_SP, reg2offset(src.first()) + frame_offset);
if (dst.first()->is_Register()) {
__ mem2reg_opt(dst.first()->as_Register(), stackaddr);
} else {
__ mem2freg_opt(dst.first()->as_FloatRegister(), stackaddr);
}
}
} elseif (src.first()->is_Register()) { if (dst.first()->is_stack()) { // gpr -> stack
__ reg2mem_opt(src.first()->as_Register(),
Address(Z_SP, reg2offset(dst.first())));
} else { if (dst.first()->is_Register()) { // gpr -> gpr
__ move_reg_if_needed(dst.first()->as_Register(), T_LONG,
src.first()->as_Register(), T_LONG);
} else { if (VM_Version::has_FPSupportEnhancements()) { // gpr -> fpr. Exploit z10 capability of direct transfer.
__ z_ldgr(dst.first()->as_FloatRegister(), src.first()->as_Register());
} else { // gpr -> fpr. Use work space on stack to transfer data.
Address stackaddr(Z_SP, workspace_offset);
__ reg2mem_opt(src.first()->as_Register(), stackaddr);
__ mem2freg_opt(dst.first()->as_FloatRegister(), stackaddr);
}
}
}
} else { if (dst.first()->is_stack()) { // fpr -> stack
__ freg2mem_opt(src.first()->as_FloatRegister(),
Address(Z_SP, reg2offset(dst.first())));
} else { if (dst.first()->is_Register()) { if (VM_Version::has_FPSupportEnhancements()) { // fpr -> gpr. Exploit z10 capability of direct transfer.
__ z_lgdr(dst.first()->as_Register(), src.first()->as_FloatRegister());
} else { // fpr -> gpr. Use work space on stack to transfer data.
Address stackaddr(Z_SP, workspace_offset);
__ freg2mem_opt(src.first()->as_FloatRegister(), stackaddr);
__ mem2reg_opt(dst.first()->as_Register(), stackaddr);
}
} else { // fpr -> fpr // In theory these overlap but the ordering is such that this is likely a nop.
__ move_freg_if_needed(dst.first()->as_FloatRegister(), T_DOUBLE,
src.first()->as_FloatRegister(), T_DOUBLE);
}
}
}
}
//---------------------------------------------------------------------- // A long arg. //---------------------------------------------------------------------- staticvoid long_move(MacroAssembler *masm,
VMRegPair src,
VMRegPair dst, int framesize_in_slots) { int frame_offset = framesize_in_slots*VMRegImpl::stack_slot_size;
if (src.first()->is_stack()) { if (dst.first()->is_stack()) { // stack -> stack. The easiest of the bunch.
__ z_mvc(Address(Z_SP, reg2offset(dst.first())),
Address(Z_SP, reg2offset(src.first()) + frame_offset), sizeof(long));
} else { // stack to reg
assert(dst.first()->is_Register(), "long dst value must be in GPR");
__ mem2reg_opt(dst.first()->as_Register(),
Address(Z_SP, reg2offset(src.first()) + frame_offset));
}
} else { // reg to reg
assert(src.first()->is_Register(), "long src value must be in GPR"); if (dst.first()->is_stack()) { // reg -> stack
__ reg2mem_opt(src.first()->as_Register(),
Address(Z_SP, reg2offset(dst.first())));
} else { // reg -> reg
assert(dst.first()->is_Register(), "long dst value must be in GPR");
__ move_reg_if_needed(dst.first()->as_Register(),
T_LONG, src.first()->as_Register(), T_LONG);
}
}
}
//---------------------------------------------------------------------- // A int-like arg. //---------------------------------------------------------------------- // On z/Architecture we will store integer like items to the stack as 64 bit // items, according to the z/Architecture ABI, even though Java would only store // 32 bits for a parameter. // We do sign extension for all base types. That is ok since the only // unsigned base type is T_CHAR, and T_CHAR uses only 16 bits of an int. // Sign extension 32->64 bit will thus not affect the value. //---------------------------------------------------------------------- staticvoid move32_64(MacroAssembler *masm,
VMRegPair src,
VMRegPair dst, int framesize_in_slots) { int frame_offset = framesize_in_slots * VMRegImpl::stack_slot_size;
/////////////////////////////////////////////////////////////////////// // // Precalculations before generating any code // ///////////////////////////////////////////////////////////////////////
address native_func = method->native_function();
assert(native_func != NULL, "must have function");
//--------------------------------------------------------------------- // We have received a description of where all the java args are located // on entry to the wrapper. We need to convert these args to where // the jni function will expect them. To figure out where they go // we convert the java signature to a C signature by inserting // the hidden arguments as arg[0] and possibly arg[1] (static method). // // The first hidden argument arg[0] is a pointer to the JNI environment. // It is generated for every call. // The second argument arg[1] to the JNI call, which is hidden for static // methods, is the boxed lock object. For static calls, the lock object // is the static method itself. The oop is constructed here. for instance // calls, the lock is performed on the object itself, the pointer of // which is passed as the first visible argument. //---------------------------------------------------------------------
// Additionally, on z/Architecture we must convert integers // to longs in the C signature. We do this in advance in order to have // no trouble with indexes into the bt-arrays. // So convert the signature and registers now, and adjust the total number // of in-arguments accordingly. bool method_is_static = method->is_static(); int total_c_args = total_in_args + (method_is_static ? 2 : 1);
// Create the signature for the C call: // 1) add the JNIEnv* // 2) add the class if the method is static // 3) copy the rest of the incoming signature (shifted by the number of // hidden arguments)
int argc = 0;
out_sig_bt[argc++] = T_ADDRESS; if (method->is_static()) {
out_sig_bt[argc++] = T_OBJECT;
}
for (int i = 0; i < total_in_args; i++) {
out_sig_bt[argc++] = in_sig_bt[i];
}
/////////////////////////////////////////////////////////////////////// // Now figure out where the args must be stored and how much stack space // they require (neglecting out_preserve_stack_slots but providing space // for storing the first five register arguments). // It's weird, see int_stk_helper. ///////////////////////////////////////////////////////////////////////
//--------------------------------------------------------------------- // Compute framesize for the wrapper. // // - We need to handlize all oops passed in registers. // - We must create space for them here that is disjoint from the save area. // - We always just allocate 5 words for storing down these object. // This allows us to simply record the base and use the Ireg number to // decide which slot to use. // - Note that the reg number used to index the stack slot is the inbound // number, not the outbound number. // - We must shuffle args to match the native convention, // and to include var-args space. //---------------------------------------------------------------------
//--------------------------------------------------------------------- // Calculate the total number of stack slots we will need: // - 1) abi requirements // - 2) outgoing args // - 3) space for inbound oop handle area // - 4) space for handlizing a klass if static method // - 5) space for a lock if synchronized method // - 6) workspace (save rtn value, int<->float reg moves, ...) // - 7) filler slots for alignment //--------------------------------------------------------------------- // Here is how the space we have allocated will look like. // Since we use resize_frame, we do not create a new stack frame, // but just extend the one we got with our own data area. // // If an offset or pointer name points to a separator line, it is // assumed that addressing with offset 0 selects storage starting // at the first byte above the separator line. // // // ... ... // | caller's frame | // FP-> |---------------------| // | filler slots, if any| // 7| #slots == mult of 2 | // |---------------------| // | work space | // 6| 2 slots = 8 bytes | // |---------------------| // 5| lock box (if sync) | // |---------------------| <- lock_slot_offset // 4| klass (if static) | // |---------------------| <- klass_slot_offset // 3| oopHandle area | // | | // | | // |---------------------| <- oop_handle_offset // 2| outbound memory | // ... ... // | based arguments | // |---------------------| // | vararg | // ... ... // | area | // |---------------------| <- out_arg_slot_offset // 1| out_preserved_slots | // ... ... // | (z_abi spec) | // SP-> |---------------------| <- FP_slot_offset (back chain) // ... ... // //---------------------------------------------------------------------
// *_slot_offset indicates offset from SP in #stack slots // *_offset indicates offset from SP in #bytes
int stack_slots = c_calling_convention(out_sig_bt, out_regs, /*regs2=*/NULL, total_c_args) + // 1+2
SharedRuntime::out_preserve_stack_slots(); // see c_calling_convention
// Now the space for the inbound oop handle area. int total_save_slots = RegisterImpl::number_of_arg_registers * VMRegImpl::slots_per_word;
int oop_handle_slot_offset = stack_slots;
stack_slots += total_save_slots; // 3)
int klass_slot_offset = 0; int klass_offset = -1; if (method_is_static) { // 4)
klass_slot_offset = stack_slots;
klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
stack_slots += VMRegImpl::slots_per_word;
}
int lock_slot_offset = 0; int lock_offset = -1; if (method->is_synchronized()) { // 5)
lock_slot_offset = stack_slots;
lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size;
stack_slots += VMRegImpl::slots_per_word;
}
int workspace_slot_offset= stack_slots; // 6)
stack_slots += 2;
// Now compute actual number of stack words we need. // Round to align stack properly.
stack_slots = align_up(stack_slots, // 7)
frame::alignment_in_bytes / VMRegImpl::stack_slot_size); int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
/////////////////////////////////////////////////////////////////////// // Now we can start generating code ///////////////////////////////////////////////////////////////////////
// check ic: object class <-> cached class if (!method_is_static) __ nmethod_UEP(ic_miss); // Fill with nops (alignment of verified entry point).
__ align(CodeEntryAlignment);
__ save_return_pc();
__ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. #ifndef USE_RESIZE_FRAME
__ push_frame(frame_size_in_bytes); // Create a new frame for the wrapper. #else
__ resize_frame(-frame_size_in_bytes, Z_R0_scratch); // No new frame for the wrapper. // Just resize the existing one. #endif
// Native nmethod wrappers never take possession of the oop arguments. // So the caller will gc the arguments. // The only thing we need an oopMap for is if the call is static. // // An OopMap for lock (and class if static), and one for the VM call itself
OopMapSet *oop_maps = new OopMapSet();
OopMap *map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
////////////////////////////////////////////////////////////////////// // // The Grand Shuffle // ////////////////////////////////////////////////////////////////////// // // We immediately shuffle the arguments so that for any vm call we have // to make from here on out (sync slow path, jvmti, etc.) we will have // captured the oops from our caller and have a valid oopMap for them. // //-------------------------------------------------------------------- // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* // (derived from JavaThread* which is in Z_thread) and, if static, // the class mirror instead of a receiver. This pretty much guarantees that // register layout will not match. We ignore these extra arguments during // the shuffle. The shuffle is described by the two calling convention // vectors we have in our possession. We simply walk the java vector to // get the source locations and the c vector to get the destinations. // // This is a trick. We double the stack slots so we can claim // the oops in the caller's frame. Since we are sure to have // more args than the caller doubling is enough to make // sure we can capture all the incoming oop args from the caller. //--------------------------------------------------------------------
// Record sp-based slot for receiver on stack for non-static methods. int receiver_offset = -1;
//-------------------------------------------------------------------- // We move the arguments backwards because the floating point registers // destination will always be to a register with a greater or equal // register number or the stack. // jix is the index of the incoming Java arguments. // cix is the index of the outgoing C arguments. //--------------------------------------------------------------------
#ifdef ASSERT bool reg_destroyed[RegisterImpl::number_of_registers]; bool freg_destroyed[FloatRegisterImpl::number_of_registers]; for (int r = 0; r < RegisterImpl::number_of_registers; r++) {
reg_destroyed[r] = false;
} for (int f = 0; f < FloatRegisterImpl::number_of_registers; f++) {
freg_destroyed[f] = false;
} #endif// ASSERT
switch (in_sig_bt[jix]) { // Due to casting, small integers should only occur in pairs with type T_LONG. case T_BOOLEAN: case T_CHAR: case T_BYTE: case T_SHORT: case T_INT: // Move int and do sign extension.
move32_64(masm, in_regs[jix], out_regs[cix], stack_slots); break;
case T_LONG :
long_move(masm, in_regs[jix], out_regs[cix], stack_slots); break;
case T_ARRAY: case T_OBJECT:
object_move(masm, map, oop_handle_slot_offset, stack_slots, in_regs[jix], out_regs[cix],
((jix == 0) && (!method_is_static)),
&receiver_offset); break; case T_VOID: break;
case T_FLOAT:
float_move(masm, in_regs[jix], out_regs[cix], stack_slots, workspace_slot_offset); break;
case T_ADDRESS:
assert(false, "found T_ADDRESS in java args"); break;
default:
ShouldNotReachHere();
}
}
//-------------------------------------------------------------------- // Pre-load a static method's oop into ARG2. // Used both by locking code and the normal JNI call code. //-------------------------------------------------------------------- if (method_is_static) {
__ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), Z_ARG2);
// Now handlize the static class mirror in ARG2. It's known not-null.
__ z_stg(Z_ARG2, klass_offset, Z_SP);
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
__ add2reg(Z_ARG2, klass_offset, Z_SP);
}
// Get JNIEnv* which is first argument to native.
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.33 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.