/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
void MacroAssembler::extend_sign(Register hi, Register lo) { // According to Intel Doc. AP-526, "Integer Divide", p.18. if (VM_Version::is_P6() && hi == rdx && lo == rax) {
cdql();
} else {
movl(hi, lo);
sarl(hi, 31);
}
}
void MacroAssembler::jC2(Register tmp, Label& L) { // set parity bit if FPU flag C2 is set (via rax)
save_rax(tmp);
fwait(); fnstsw_ax();
sahf();
restore_rax(tmp); // branch
jcc(Assembler::parity, L);
}
void MacroAssembler::jnC2(Register tmp, Label& L) { // set parity bit if FPU flag C2 is set (via rax)
save_rax(tmp);
fwait(); fnstsw_ax();
sahf();
restore_rax(tmp); // branch
jcc(Assembler::noParity, L);
}
// 32bit can do a case table jump in one instruction but we no longer allow the base // to be installed in the Address class void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
assert(rscratch == noreg, "not needed");
jmp(as_Address(entry, noreg));
}
// Note: y_lo will be destroyed void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { // Long compare for Java (semantics as described in JVM spec.)
Label high, low, done;
cmpl(x_hi, y_hi);
jcc(Assembler::less, low);
jcc(Assembler::greater, high); // x_hi is the return register
xorl(x_hi, x_hi);
cmpl(x_lo, y_lo);
jcc(Assembler::below, low);
jcc(Assembler::equal, done);
void MacroAssembler::lshl(Register hi, Register lo) { // Java shift left long support (semantics as described in JVM spec., p.305) // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) // shift value is in rcx !
assert(hi != rcx, "must not use rcx");
assert(lo != rcx, "must not use rcx"); constRegister s = rcx; // shift count constint n = BitsPerWord;
Label L;
andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
cmpl(s, n); // if (s < n)
jcc(Assembler::less, L); // else (s >= n)
movl(hi, lo); // x := x << n
xorl(lo, lo); // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
bind(L); // s (mod n) < n
shldl(hi, lo); // x := x << s
shll(lo);
}
void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
assert(hi != rcx, "must not use rcx");
assert(lo != rcx, "must not use rcx"); constRegister s = rcx; // shift count constint n = BitsPerWord;
Label L;
andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
cmpl(s, n); // if (s < n)
jcc(Assembler::less, L); // else (s >= n)
movl(lo, hi); // x := x >> n if (sign_extension) sarl(hi, 31); else xorl(hi, hi); // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
bind(L); // s (mod n) < n
shrdl(lo, hi); // x := x >> s if (sign_extension) sarl(hi); else shrl(hi);
}
void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, intrax, int eip, char* msg) { // In order to get locks to work, we need to fake a in_VM state
JavaThread* thread = JavaThread::current();
JavaThreadState saved_state = thread->thread_state();
thread->set_thread_state(_thread_in_vm); if (ShowMessageBoxOnError) {
JavaThread* thread = JavaThread::current();
JavaThreadState saved_state = thread->thread_state();
thread->set_thread_state(_thread_in_vm); if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
ttyLocker ttyl;
BytecodeCounter::print();
} // To see where a verify_oop failed, get $ebx+40/X for this frame. // This is the value of eip which points to where verify_oop will return. if (os::message_box(msg, "Execution stopped, print registers?")) {
print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
BREAKPOINT;
}
}
fatal("DEBUG MESSAGE: %s", msg);
}
void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
ttyLocker ttyl;
FlagSetting fs(Debugging, true);
tty->print_cr("eip = 0x%08x", eip); #ifndef PRODUCT if ((WizardMode || Verbose) && PrintMiscellaneous) {
tty->cr();
findpc(eip);
tty->cr();
} #endif #define PRINT_REG(rax) \
{ tty->print("%s = ", #rax); os::print_location(tty, rax); }
PRINT_REG(rax);
PRINT_REG(rbx);
PRINT_REG(rcx);
PRINT_REG(rdx);
PRINT_REG(rdi);
PRINT_REG(rsi);
PRINT_REG(rbp);
PRINT_REG(rsp); #undef PRINT_REG // Print some words near top of staack. int* dump_sp = (int*) rsp; for (int col1 = 0; col1 < 8; col1++) {
tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
os::print_location(tty, *dump_sp++);
} for (int row = 0; row < 16; row++) {
tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); for (int col = 0; col < 8; col++) {
tty->print(" 0x%08x", *dump_sp++);
}
tty->cr();
} // Print some instructions around pc:
Disassembler::decode((address)eip-64, (address)eip);
tty->print_cr("--------");
Disassembler::decode((address)eip, (address)eip+32);
}
Address MacroAssembler::as_Address(AddressLiteral adr) { // amd64 always does this as a pc-rel // we can be absolute or disp based on the instruction type // jmp/call are displacements others are absolute
assert(!adr.is_lval(), "must be rval");
assert(reachable(adr), "must be"); return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc());
}
Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) {
AddressLiteral base = adr.base();
lea(rscratch, base);
Address index = adr.index();
assert(index._disp == 0, "must not have disp"); // maybe it can?
Address array(rscratch, index._index, index._scale, index._disp); return array;
}
void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
Label L, E;
#ifdef _WIN64 // Windows always allocates space for it's register args
assert(num_args <= 4, "only register arguments supported");
subq(rsp, frame::arg_reg_save_area_bytes); #endif
// Align stack if necessary
testl(rsp, 15);
jcc(Assembler::zero, L);
int MacroAssembler::corrected_idivq(Register reg) { // Full implementation of Java ldiv and lrem; checks for special // case as described in JVM spec., p.243 & p.271. The function // returns the (pc) offset of the idivl instruction - may be needed // for implicit exceptions. // // normal case special case // // input : rax: dividend min_long // reg: divisor (may not be eax/edx) -1 // // output: rax: quotient (= rax idiv reg) min_long // rdx: remainder (= rax irem reg) 0
assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); staticconst int64_t min_long = 0x8000000000000000;
Label normal_case, special_case;
// check for special case
cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/);
jcc(Assembler::notEqual, normal_case);
xorl(rdx, rdx); // prepare rdx for possible special case (where // remainder = 0)
cmpq(reg, -1);
jcc(Assembler::equal, special_case);
// handle normal case
bind(normal_case);
cdqq(); int idivq_offset = offset();
idivq(reg);
// normal and special case exit
bind(special_case);
// 32bit can do a case table jump in one instruction but we no longer allow the base // to be installed in the Address class void MacroAssembler::jump(ArrayAddress entry, Register rscratch) {
lea(rscratch, entry.base());
Address dispatch = entry.index();
assert(dispatch._base == noreg, "must be");
dispatch._base = rscratch;
jmp(dispatch);
}
void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
ShouldNotReachHere(); // 64bit doesn't use two regs
cmpq(x_lo, y_lo);
}
// src should NEVER be a real pointer. Use AddressLiteral for true pointers void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { if (is_simm32(src)) {
movptr(dst, checked_cast<int32_t>(src));
} else {
mov64(rscratch, src);
movq(dst, rscratch);
}
}
void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { // In order to get locks to work, we need to fake a in_VM state if (ShowMessageBoxOnError) {
JavaThread* thread = JavaThread::current();
JavaThreadState saved_state = thread->thread_state();
thread->set_thread_state(_thread_in_vm); #ifndef PRODUCT if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
ttyLocker ttyl;
BytecodeCounter::print();
} #endif // To see where a verify_oop failed, get $ebx+40/X for this frame. // XXX correct this offset for amd64 // This is the value of eip which points to where verify_oop will return. if (os::message_box(msg, "Execution stopped, print registers?")) {
print_state64(pc, regs);
BREAKPOINT;
}
}
fatal("DEBUG MESSAGE: %s", msg);
}
void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
ttyLocker ttyl;
FlagSetting fs(Debugging, true);
tty->print_cr("rip = 0x%016lx", (intptr_t)pc); #ifndef PRODUCT
tty->cr();
findpc(pc);
tty->cr(); #endif #define PRINT_REG(rax, value) \
{ tty->print("%s = ", #rax); os::print_location(tty, value); }
PRINT_REG(rax, regs[15]);
PRINT_REG(rbx, regs[12]);
PRINT_REG(rcx, regs[14]);
PRINT_REG(rdx, regs[13]);
PRINT_REG(rdi, regs[8]);
PRINT_REG(rsi, regs[9]);
PRINT_REG(rbp, regs[10]); // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp
PRINT_REG(rsp, (intptr_t)(®s[16]));
PRINT_REG(r8 , regs[7]);
PRINT_REG(r9 , regs[6]);
PRINT_REG(r10, regs[5]);
PRINT_REG(r11, regs[4]);
PRINT_REG(r12, regs[3]);
PRINT_REG(r13, regs[2]);
PRINT_REG(r14, regs[1]);
PRINT_REG(r15, regs[0]); #undef PRINT_REG // Print some words near the top of the stack.
int64_t* rsp = ®s[16];
int64_t* dump_sp = rsp; for (int col1 = 0; col1 < 8; col1++) {
tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
os::print_location(tty, *dump_sp++);
} for (int row = 0; row < 25; row++) {
tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); for (int col = 0; col < 4; col++) {
tty->print(" 0x%016lx", (intptr_t)*dump_sp++);
}
tty->cr();
} // Print some instructions around pc:
Disassembler::decode((address)pc-64, (address)pc);
tty->print_cr("--------");
Disassembler::decode((address)pc, (address)pc+32);
}
// The java_calling_convention describes stack locations as ideal slots on // a frame with no abi restrictions. Since we must observe abi restrictions // (like the placement of the register window) the slots must be biased by // the following value. staticint reg2offset_in(VMReg r) { // Account for saved rbp and return address // This should really be in_preserve_stack_slots return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
}
// A double move void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
// The calling conventions assures us that each VMregpair is either // all really one physical register or adjacent stack slots.
if (src.is_single_phys_reg() ) { if (dst.is_single_phys_reg()) { // In theory these overlap but the ordering is such that this is likely a nop if ( src.first() != dst.first()) {
movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
}
} else {
assert(dst.is_single_reg(), "not a stack pair");
movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
}
} elseif (dst.is_single_phys_reg()) {
assert(src.is_single_reg(), "not a stack pair");
movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
} else {
assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
}
}
// A float arg may have to do float reg int reg conversion void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) {
assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
// The calling conventions assures us that each VMregpair is either // all really one physical register or adjacent stack slots.
if (src.first()->is_stack()) { if (dst.first()->is_stack()) {
movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
} else { // stack to reg
assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
}
} elseif (dst.first()->is_stack()) { // reg to stack
assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister());
} else { // reg to reg // In theory these overlap but the ordering is such that this is likely a nop if ( src.first() != dst.first()) {
movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
}
}
}
// On 64 bit we will store integer like items to the stack as // 64 bits items (x86_32/64 abi) even though java would only store // 32bits for a parameter. On 32bit it will simply be 32 bits // So this routine will do 32->32 on 32bit and 32->64 on 64bit void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { if (src.first()->is_stack()) { if (dst.first()->is_stack()) { // stack to stack
movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp);
} else { // stack to reg
movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias));
}
} elseif (dst.first()->is_stack()) { // reg to stack // Do we really have to sign extend??? // __ movslq(src.first()->as_Register(), src.first()->as_Register());
movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register());
} else { // Do we really have to sign extend??? // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); if (dst.first() != src.first()) {
movq(dst.first()->as_Register(), src.first()->as_Register());
}
}
}
void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { if (src.first()->is_stack()) { if (dst.first()->is_stack()) { // stack to stack
movq(rax, Address(rbp, reg2offset_in(src.first())));
movq(Address(rsp, reg2offset_out(dst.first())), rax);
} else { // stack to reg
movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
}
} elseif (dst.first()->is_stack()) { // reg to stack
movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
} else { if (dst.first() != src.first()) {
movq(dst.first()->as_Register(), src.first()->as_Register());
}
}
}
// An oop arg. Must pass a handle not the oop itself void MacroAssembler::object_move(OopMap* map, int oop_handle_offset, int framesize_in_slots,
VMRegPair src,
VMRegPair dst, bool is_receiver, int* receiver_offset) {
// must pass a handle. First figure out the location we use as a handle
// Oop is already on the stack as an argument int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); if (is_receiver) {
*receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
}
map->set_oop(VMRegImpl::stack2reg(oop_slot)); // Store oop in handle area, may be NULL
movptr(Address(rsp, offset), rOop); if (is_receiver) {
*receiver_offset = offset;
}
cmpptr(rOop, NULL_WORD);
lea(rHandle, Address(rsp, offset)); // conditionally move a NULL from the handle area where it was just stored
cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
}
// If arg is on the stack then place it otherwise it is already in correct reg. if (dst.first()->is_stack()) {
movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
}
}
// See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. // Stub code is generated once and never copied. // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. void MacroAssembler::align64() {
align(64, (unsignedlonglong) pc());
}
void MacroAssembler::align(int modulus) { // 8273459: Ensure alignment is possible with current segment alignment
assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
align(modulus, offset());
}
void MacroAssembler::align(int modulus, int target) { if (target % modulus != 0) {
nop(modulus - (target % modulus));
}
}
// Writes to stack successive pages until offset reached to check for // stack overflow + shadow pages. This clobbers tmp. void MacroAssembler::bang_stack_size(Register size, Register tmp) {
movptr(tmp, rsp); // Bang stack for total size given plus shadow page size. // Bang one page at a time because large size can bang beyond yellow and // red zones.
Label loop;
bind(loop);
movl(Address(tmp, (-os::vm_page_size())), size );
subptr(tmp, os::vm_page_size());
subl(size, os::vm_page_size());
jcc(Assembler::greater, loop);
// Bang down shadow pages too. // At this point, (tmp-0) is the last address touched, so don't // touch it again. (It was touched as (tmp-pagesize) but then tmp // was post-decremented.) Skip this address by starting at i=1, and // touch a few more pages below. N.B. It is important to touch all // the way down including all pages in the shadow zone. for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / os::vm_page_size()); i++) { // this could be any sized move but this is can be a debugging crumb // so the bigger the better.
movptr(Address(tmp, (-i*os::vm_page_size())), size );
}
}
void MacroAssembler::reserved_stack_check() { // testing if reserved zone needs to be enabled
Label no_reserved_zone_enabling; Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
NOT_LP64(get_thread(rsi);)
void MacroAssembler::c2bool(Register x) { // implements x == 0 ? 0 : 1 // note: must only look at least-significant byte of x // since C-style booleans are stored in one byte // only! (was bug)
andl(x, 0xFF);
setb(Assembler::notZero, x);
}
// Wouldn't need if AddressLiteral version had new name void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
Assembler::call(L, rtype);
}
void MacroAssembler::emit_static_call_stub() { // Static stub relocation also tags the Method* in the code-stream.
mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time. // This is recognized as unresolved by relocs/nativeinst/ic code.
jump(RuntimeAddress(pc()));
}
void MacroAssembler::call_VM_base(Register oop_result, Register java_thread, Register last_java_sp,
address entry_point, int number_of_arguments, bool check_exceptions) { // determine java_thread register if (!java_thread->is_valid()) { #ifdef _LP64
java_thread = r15_thread; #else
java_thread = rdi;
get_thread(java_thread); #endif// LP64
} // determine last_java_sp register if (!last_java_sp->is_valid()) {
last_java_sp = rsp;
} // debugging support
assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); #ifdef ASSERT // TraceBytecodes does not use r12 but saves it over the call, so don't verify // r12 is the heapbase.
LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) #endif// ASSERT
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"an>);
assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"span>);
// push java thread (becomes first argument of C function)
// set last Java frame before call
assert(last_java_sp != rbp, "can't use ebp/rbp");
// Only interpreter should have to set fp
set_last_Java_frame(java_thread, last_java_sp, rbp, NULL, rscratch1);
// do the call, remove parameters
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
// restore the thread (cannot use the pushed argument since arguments // may be overwritten by C code generated by an optimizing compiler); // however can use the register value directly if it is callee saved. if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { // rdi & rsi (also r15) are callee saved -> nothing to do #ifdef ASSERT
guarantee(java_thread != rax, "change this code");
push(rax);
{ Label L;
get_thread(rax);
cmpptr(java_thread, rax);
jcc(Assembler::equal, L);
STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
bind(L);
}
pop(rax); #endif
} else {
get_thread(java_thread);
} // reset last Java frame // Only interpreter should have to clear fp
reset_last_Java_frame(java_thread, true);
// C++ interp handles this in the interpreter
check_and_handle_popframe(java_thread);
check_and_handle_earlyret(java_thread);
if (check_exceptions) { // check for pending exceptions (java_thread is set upon return)
cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); #ifndef _LP64
jump_cc(Assembler::notEqual,
RuntimeAddress(StubRoutines::forward_exception_entry())); #else // This used to conditionally jump to forward_exception however it is // possible if we relocate that the branch will not reach. So we must jump // around so we can always reach
// get oop result if there is one and reset the value in the thread if (oop_result->is_valid()) {
get_vm_result(oop_result, java_thread);
}
}
void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
// Calculate the value for last_Java_sp // somewhat subtle. call_VM does an intermediate call // which places a return address on the stack just under the // stack pointer as the user finished with it. This allows // use to retrieve last_Java_pc from last_Java_sp[-1]. // On 32bit we then have to push additional args on the stack to accomplish // the actual requested call. On 64bit call_VM only can use register args // so the only extra space is the return address that call_VM created. // This hopefully explains the calculations here.
// Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. void MacroAssembler::call_VM_leaf0(address entry_point) {
MacroAssembler::call_VM_leaf_base(entry_point, 0);
}
void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
call_VM_leaf_base(entry_point, number_of_arguments);
}
int MacroAssembler::corrected_idivl(Register reg) { // Full implementation of Java idiv and irem; checks for // special case as described in JVM spec., p.243 & p.271. // The function returns the (pc) offset of the idivl // instruction - may be needed for implicit exceptions. // // normal case special case // // input : rax,: dividend min_int // reg: divisor (may not be rax,/rdx) -1 // // output: rax,: quotient (= rax, idiv reg) min_int // rdx: remainder (= rax, irem reg) 0
assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); constint min_int = 0x80000000;
Label normal_case, special_case;
// check for special case
cmpl(rax, min_int);
jcc(Assembler::notEqual, normal_case);
xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
cmpl(reg, -1);
jcc(Assembler::equal, special_case);
// handle normal case
bind(normal_case);
cdql(); int idivl_offset = offset();
idivl(reg);
// normal and special case exit
bind(special_case);
void MacroAssembler::decrementl(Address dst, int value) {
--> --------------------
--> maximum size reached
--> --------------------
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.65Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.