/* * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// MacroAssembler extends Assembler by a few frequently used macros.
class ciTypeArray; class OopMap;
class MacroAssembler: public Assembler { public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
// Indicates whether and, if so, which registers must be preserved when calling runtime code. enum PreservationLevel {
PRESERVATION_NONE,
PRESERVATION_FRAME_LR,
PRESERVATION_FRAME_LR_GP_REGS,
PRESERVATION_FRAME_LR_GP_FP_REGS
};
// load d = *[a+si31] // Emits several instructions if the offset is not encodable in one instruction. void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop); void ld_largeoffset (Register d, int si31, Register a, int emit_filler_nop); inlinestaticbool is_ld_largeoffset(address a); inlinestaticint get_ld_largeoffset_offset(address a);
inlinevoid round_to(Register r, int modulus);
// Load/store with type given by parameter. void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed); void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes);
// Move register if destination register and target register are different inlinevoid mr_if_needed(Register rd, Register rs); inlinevoid fmr_if_needed(FloatRegister rd, FloatRegister rs); // This is dedicated for emitting scheduled mach nodes. For better // readability of the ad file I put it here. // Endgroups are not needed if // - the scheduler is off // - the scheduler found that there is a natural group end, in that // case it reduced the size of the instruction used in the test // yielding 'needed'. inlinevoid endgroup_if_needed(bool needed);
// nop padding void align(int modulus, int max = 252, int rem = 0);
// Align prefix opcode to make sure it's not on the last word of a // 64-byte block. // // Note: do not call align_prefix() in a .ad file (e.g. ppc.ad). Instead // add ins_alignment(2) to the instruct definition and implement the // compute_padding() method of the instruct node to use // compute_prefix_padding(). See loadConI32Node::compute_padding() in // ppc.ad for an example. void align_prefix();
// // Constants, loading constants, TOC support //
// Address of the global TOC. inlinestatic address global_toc(); // Offset of given address to the global TOC. inlinestaticint offset_to_global_toc(const address addr);
// Address of TOC of the current method. inline address method_toc(); // Offset of given address to TOC of the current method. inlineint offset_to_method_toc(const address addr);
inlinestaticbool is_calculate_address_from_global_toc_at(address a, address bound); // Returns address of first instruction in sequence. static address patch_calculate_address_from_global_toc_at(address a, address bound, address addr); static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
#ifdef _LP64 // Patch narrow oop constant. inlinestaticbool is_set_narrow_oop(address a, address bound); // Returns address of first instruction in sequence. static address patch_set_narrow_oop(address a, address bound, narrowOop data); static narrowOop get_narrow_oop(address a, address bound); #endif
inlinestaticbool is_load_const_at(address a);
// Emits an oop const to the constant pool, loads the constant, and // sets a relocation info with address current_pc. // Returns true if successful. bool load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc, boolfixed_size = false);
// Get the 64 bit constant from a `load_const' sequence. staticlong get_const(address load_const);
// Patch the 64 bit constant of a `load_const' sequence. This is a // low level procedure. It neither flushes the instruction cache nor // is it atomic. staticvoid patch_const(address load_const, long x);
// Metadata in code that we have to keep track of.
AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
AddressLiteral constant_metadata_address(Metadata* obj); // find_index // Oops used directly in compiled code are stored in the constant pool, // and loaded from there. // Allocate new entry for oop in constant pool. Generate relocation.
AddressLiteral allocate_oop_address(jobject obj); // Find oop obj in constant pool. Return relocation with it's index.
AddressLiteral constant_oop_address(jobject obj);
// Find oop in constant pool and emit instructions to load it. // Uses constant_oop_address. inlinevoid set_oop_constant(jobject obj, Register d); // Same as load_address. inlinevoid set_oop (AddressLiteral obj_addr, Register d);
// // branch, jump // // set dst to -1, 0, +1 as follows: if CCR0bi is "greater than", dst is set to 1, // if CCR0bi is "equal", dst is set to 0, otherwise it's set to -1. voidinline set_cmp3(Register dst); // set dst to (treat_unordered_like_less ? -1 : +1) voidinline set_cmpu3(Register dst, bool treat_unordered_like_less);
// Conditional far branch for destinations encodable in 24+2 bits. // Same interface as bc, e.g. no inverse boint-field. enum {
bc_far_optimize_not = 0,
bc_far_optimize_on_relocate = 1
}; // optimize: flag for telling the conditional far branch to optimize // itself when relocated. void bc_far(int boint, int biint, Label& dest, int optimize); void bc_far_optimized(int boint, int biint, Label& dest); // 1 or 2 instructions // Relocation of conditional far branches. staticbool is_bc_far_at(address instruction_addr); static address get_dest_of_bc_far_at(address instruction_addr); staticvoid set_dest_of_bc_far_at(address instruction_addr, address dest); private: staticboolinline is_bc_far_variant1_at(address instruction_addr); staticboolinline is_bc_far_variant2_at(address instruction_addr); staticboolinline is_bc_far_variant3_at(address instruction_addr); public:
// Convenience bc_far versions. inlinevoid blt_far(ConditionRegister crx, Label& L, int optimize); inlinevoid bgt_far(ConditionRegister crx, Label& L, int optimize); inlinevoid beq_far(ConditionRegister crx, Label& L, int optimize); inlinevoid bso_far(ConditionRegister crx, Label& L, int optimize); inlinevoid bge_far(ConditionRegister crx, Label& L, int optimize); inlinevoid ble_far(ConditionRegister crx, Label& L, int optimize); inlinevoid bne_far(ConditionRegister crx, Label& L, int optimize); inlinevoid bns_far(ConditionRegister crx, Label& L, int optimize);
// Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump. private: enum {
bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/),
bxx64_patchable_size = bxx64_patchable_instruction_count * BytesPerInstWord,
bxx64_patchable_ret_addr_offset = bxx64_patchable_size
}; void bxx64_patchable(address target, relocInfo::relocType rt, bool link); staticbool is_bxx64_patchable_at( address instruction_addr, bool link); // Does the instruction use a pc-relative encoding of the destination? staticbool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link); staticbool is_bxx64_patchable_variant1_at( address instruction_addr, bool link); // Load destination relative to global toc. staticbool is_bxx64_patchable_variant1b_at( address instruction_addr, bool link); staticbool is_bxx64_patchable_variant2_at( address instruction_addr, bool link); staticvoid set_dest_of_bxx64_patchable_at( address instruction_addr, address target, bool link); static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link);
// Clobbers all volatile, (non-floating-point) general-purpose registers for debugging purposes. // This is especially useful for making calls to the JRT in places in which this hasn't been done before; // e.g. with the introduction of LRBs (load reference barriers) for concurrent garbage collection. void clobber_volatile_gprs(Register excluded_register = noreg); void clobber_carg_stack_slots(Register tmp);
void save_nonvolatile_gprs( Register dst_base, int offset); void restore_nonvolatile_gprs(Register src_base, int offset);
void save_volatile_gprs( Register dst_base, int offset, bool include_fp_regs = true, bool include_R3_RET_reg = true); void restore_volatile_gprs(Register src_base, int offset, bool include_fp_regs = true, bool include_R3_RET_reg = true); void save_LR_CR( Register tmp); // tmp contains LR on return. void restore_LR_CR(Register tmp);
// Get current PC using bl-next-instruction trick.
address get_PC_trash_LR(Register result);
// Resize current frame either relatively wrt to current SP or absolute. void resize_frame(Register offset, Register tmp); void resize_frame(int offset, Register tmp); void resize_frame_absolute(Register addr, Register tmp1, Register tmp2);
// Push a frame of size bytes. void push_frame(Register bytes, Register tmp);
// Push a frame of size `bytes'. No abi space provided. void push_frame(unsignedint bytes, Register tmp);
// Push a frame of size `bytes' plus abi_reg_args on top. void push_frame_reg_args(unsignedint bytes, Register tmp);
// Setup up a new C frame with a spill area for non-volatile GPRs and additional // space for local variables void push_frame_reg_args_nonvolatiles(unsignedint bytes, Register tmp);
// pop current C frame void pop_frame();
// // Calls //
private:
address _last_calls_return_pc;
#ifdefined(ABI_ELFv2) // Generic version of a call to C function. // Updates and returns _last_calls_return_pc.
address branch_to(Register function_entry, bool and_link); #else // Generic version of a call to C function via a function descriptor // with variable support for C calling conventions (TOC, ENV, etc.). // updates and returns _last_calls_return_pc.
address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call, bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee); #endif
public:
// Get the pc where the last call will return to. returns _last_calls_return_pc. inline address last_calls_return_pc();
#ifdefined(ABI_ELFv2) // Call a C function via a function descriptor and use full C // calling conventions. Updates and returns _last_calls_return_pc.
address call_c(Register function_entry); // For tail calls: only branch, don't link, so callee returns to caller of this function.
address call_c_and_return_to_caller(Register function_entry);
address call_c(address function_entry, relocInfo::relocType rt); #else // Call a C function via a function descriptor and use full C // calling conventions. Updates and returns _last_calls_return_pc.
address call_c(Register function_descriptor); // For tail calls: only branch, don't link, so callee returns to caller of this function.
address call_c_and_return_to_caller(Register function_descriptor);
address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt, Register toc); #endif
protected:
// It is imperative that all calls into the VM are handled via the // call_VM macros. They make sure that the stack linkage is setup // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points // while call_VM_leaf's correspond to LEAF entry points. // // This is the base routine called by the different versions of // call_VM. The interpreter may customize this version by overriding // it for its purposes (e.g., to save/restore additional registers // when doing a VM call). // // If no last_java_sp is specified (noreg) then SP will be used instead. virtualvoid call_VM_base( // where an oop-result ends up if any; use noreg otherwise Register oop_result, // to set up last_Java_frame in stubs; use noreg otherwise Register last_java_sp, // the entry point
address entry_point, // flag which indicates if exception should be checked bool check_exception = true
);
// Support for VM calls. This is the base routine called by the // different versions of call_VM_leaf. The interpreter may customize // this version by overriding it for its purposes (e.g., to // save/restore additional registers when doing a VM call). void call_VM_leaf_base(address entry_point);
public: // Call into the VM. // Passes the thread pointer (in R3_ARG1) as a prepended argument. // Makes sure oop return values are visible to the GC. void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true); void call_VM_leaf(address entry_point); void call_VM_leaf(address entry_point, Register arg_1); void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
// Call a stub function via a function descriptor, but don't save // TOC before call, don't setup TOC and ENV for call, and don't // restore TOC after call. Updates and returns _last_calls_return_pc. inline address call_stub(Register function_entry); inlinevoid call_stub_and_return_to(Register function_entry, Register return_pc);
void post_call_nop();
// // Java utilities //
// Read from the polling page, its address is already in a register. inlinevoid load_from_polling_page(Register polling_page_address, int offset = 0); // Check whether instruction is a read access to the polling page // which was emitted by load_from_polling_page(..). staticbool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/,
address* polling_address_ptr = NULL);
// Support for NULL-checks // // Generates code that causes a NULL OS exception if the content of reg is NULL. // If the accessed location is M[reg + offset] and the offset is known, provide the // offset. No explicit code generation is needed if the offset is within a certain // range (0 <= offset <= page_size).
// If instruction is a stack bang of the form ld, stdu, or // stdux, return the banged address. Otherwise, return 0. static address get_stack_bang_address(int instruction, void* ucontext);
// Check for reserved stack access in method being exited. If the reserved // stack area was accessed, protect it again and throw StackOverflowError. void reserved_stack_check(Register return_pc);
// Atomics // CmpxchgX sets condition register to cmpX(current, compare). // (flag == ne) => (dest_current_value != compare_value), (!swapped) // (flag == eq) => (dest_current_value == compare_value), ( swapped) staticinlinebool cmpxchgx_hint_acquire_lock() { returntrue; } // The stxcx will probably not be succeeded by a releasing store. staticinlinebool cmpxchgx_hint_release_lock() { returnfalse; } staticinlinebool cmpxchgx_hint_atomic_update() { returnfalse; }
// Cmpxchg semantics enum {
MemBarNone = 0,
MemBarRel = 1,
MemBarAcq = 2,
MemBarFenceAfter = 4 // use powers of 2
}; private: // Helper functions for word/sub-word atomics. void atomic_get_and_modify_generic(Register dest_current_value, Register exchange_value, Register addr_base, Register tmp1, Register tmp2, Register tmp3, bool cmpxchgx_hint, bool is_add, int size); void cmpxchg_loop_body(ConditionRegister flag, Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base, Register tmp1, Register tmp2,
Label &retry, Label &failed, bool cmpxchgx_hint, int size); void cmpxchg_generic(ConditionRegister flag, Register dest_current_value, Register compare_value, Register exchange_value, Registeraddr_base, Register tmp1, Register tmp2, int semantics, bool cmpxchgx_hint, Register int_flag_success, bool contention_hint, boolweak, int size); public: // Temps and addr_base are killed if processor does not support Power 8 instructions. // Result will be sign extended. void getandsetb(Register dest_current_value, Register exchange_value, Register addr_base, Register tmp1, Register tmp2, Register tmp3, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, exchange_value, addr_base, tmp1, tmp2, tmp3, cmpxchgx_hint, false, 1);
} // Temps and addr_base are killed if processor does not support Power 8 instructions. // Result will be sign extended. void getandseth(Register dest_current_value, Register exchange_value, Register addr_base, Register tmp1, Register tmp2, Register tmp3, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, exchange_value, addr_base, tmp1, tmp2, tmp3, cmpxchgx_hint, false, 2);
} void getandsetw(Register dest_current_value, Register exchange_value, Register addr_base, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, exchange_value, addr_base, noreg, noreg, noreg, cmpxchgx_hint, false, 4);
} void getandsetd(Register dest_current_value, Register exchange_value, Register addr_base, bool cmpxchgx_hint); // tmp2/3 and addr_base are killed if processor does not support Power 8 instructions (tmp1 is always needed). // Result will be sign extended. void getandaddb(Register dest_current_value, Register inc_value, Register addr_base, Register tmp1, Register tmp2, Register tmp3, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, inc_value, addr_base, tmp1, tmp2, tmp3, cmpxchgx_hint, true, 1);
} // tmp2/3 and addr_base are killed if processor does not support Power 8 instructions (tmp1 is always needed). // Result will be sign extended. void getandaddh(Register dest_current_value, Register inc_value, Register addr_base, Register tmp1, Register tmp2, Register tmp3, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, inc_value, addr_base, tmp1, tmp2, tmp3, cmpxchgx_hint, true, 2);
} void getandaddw(Register dest_current_value, Register inc_value, Register addr_base, Register tmp1, bool cmpxchgx_hint) {
atomic_get_and_modify_generic(dest_current_value, inc_value, addr_base, tmp1, noreg, noreg, cmpxchgx_hint, true, 4);
} void getandaddd(Register dest_current_value, Register exchange_value, Register addr_base, Register tmp, bool cmpxchgx_hint); // Temps, addr_base and exchange_value are killed if processor does not support Power 8 instructions. // compare_value must be at least 32 bit sign extended. Result will be sign extended. void cmpxchgb(ConditionRegister flag, Register dest_current_value, Register compare_value, Register exchange_value, Registeraddr_base, Register tmp1, Register tmp2, int semantics, bool cmpxchgx_hint = false, Register int_flag_success = noreg, bool contention_hint = false, bool weak = false) {
cmpxchg_generic(flag, dest_current_value, compare_value, exchange_value, addr_base, tmp1, tmp2,
semantics, cmpxchgx_hint, int_flag_success, contention_hint, weak, 1);
} // Temps, addr_base and exchange_value are killed if processor does not support Power 8 instructions. // compare_value must be at least 32 bit sign extended. Result will be sign extended. void cmpxchgh(ConditionRegister flag, Register dest_current_value, Register compare_value, Register exchange_value, Registeraddr_base, Register tmp1, Register tmp2, int semantics, bool cmpxchgx_hint = false, Register int_flag_success = noreg, bool contention_hint = false, bool weak = false) {
cmpxchg_generic(flag, dest_current_value, compare_value, exchange_value, addr_base, tmp1, tmp2,
semantics, cmpxchgx_hint, int_flag_success, contention_hint, weak, 2);
} void cmpxchgw(ConditionRegister flag, Register dest_current_value, Register compare_value, Register exchange_value, Registeraddr_base, int semantics, bool cmpxchgx_hint = false, Register int_flag_success = noreg, bool contention_hint = false, bool weak = false) {
cmpxchg_generic(flag, dest_current_value, compare_value, exchange_value, addr_base, noreg, noreg,
semantics, cmpxchgx_hint, int_flag_success, contention_hint, weak, 4);
} void cmpxchgd(ConditionRegister flag, Register dest_current_value, RegisterOrConstant compare_value, Register exchange_value, Register addr_base, int semantics, bool cmpxchgx_hint = false, Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false, bool weak = false);
// Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow. // One of the three labels can be NULL, meaning take the fall-through. // If super_check_offset is -1, the value is loaded up from super_klass. // No registers are killed, except temp_reg and temp2_reg. // If super_check_offset is not -1, temp2_reg is not used and can be noreg. void check_klass_subtype_fast_path(Register sub_klass, Register super_klass, Register temp1_reg, Register temp2_reg,
Label* L_success,
Label* L_failure,
Label* L_slow_path = NULL, // default fall through
RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
// The rest of the type check; must be wired to a corresponding fast path. // It does not repeat the fast path logic, so don't use it standalone. // The temp_reg can be noreg, if no temps are available. // It can also be sub_klass or super_klass, meaning it's OK to kill that one. // Updates the sub's secondary super cache as necessary. void check_klass_subtype_slow_path(Register sub_klass, Register super_klass, Register temp1_reg, Register temp2_reg,
Label* L_success = NULL, Register result_reg = noreg);
// Simplified, combined version, good for typical uses. // Falls through on failure. void check_klass_subtype(Register sub_klass, Register super_klass, Register temp1_reg, Register temp2_reg,
Label& L_success);
// allocation (for C1) void tlab_allocate( Register obj, // result: pointer to object after successful allocation Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise int con_size_in_bytes, // object size in bytes if known at compile time Register t1, // temp register
Label& slow_case // continuation point if fast allocation fails
); void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2);
// Trap-instruction-based checks. // Range checks can be distinguished from zero checks as they check 32 bit, // zero checks all 64 bits (tw, td). inlinevoid trap_null_check(Register a, trap_to_bits cmp = traptoEqual); staticbool is_trap_null_check(int x) { return is_tdi(x, traptoEqual, -1/*any reg*/, 0) ||
is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
}
// Implicit or explicit null check, jumps to static address exception_entry. inlinevoid null_check_throw(Register a, int offset, Register temp_reg, address exception_entry); inlinevoid null_check(Register a, int offset, Label *Lis_null); // implicit only if Lis_null not provided
// Access heap oop, handle encoding and GC barriers. // Some GC barriers call C so use needs_frame = true if an extra frame is needed at the current call site. private: inlinevoid access_store_at(BasicType type, DecoratorSet decorators, Register base, RegisterOrConstant ind_or_offs, Register val, Register tmp1, Register tmp2, Register tmp3,
MacroAssembler::PreservationLevel preservation_level); inlinevoid access_load_at(BasicType type, DecoratorSet decorators, Register base, RegisterOrConstant ind_or_offs, Register dst, Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level, Label *L_handle_null = NULL);
public: // Specify tmp1 for better code in certain compressed oops cases. Specify Label to bail out on null oop. // tmp1, tmp2 and needs_frame are used with decorators ON_PHANTOM_OOP_REF or ON_WEAK_OOP_REF. inlinevoid load_heap_oop(Register d, RegisterOrConstant offs, Register s1, Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level,
DecoratorSet decorators = 0, Label *L_handle_null = NULL);
// Load/Store klass oop from klass field. Compress. void load_klass(Register dst, Register src); void store_klass(Register dst_oop, Register klass, Register tmp = R0); void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
// Calls verify_oop. If UseCompressedOops is on, decodes the oop. // Preserves reg. void verify_coop(Register reg, constchar*); // Emit code to verify that reg contains a valid oop if +VerifyOops is set. void verify_oop(Register reg, constchar* s = "broken oop"); void verify_oop_addr(RegisterOrConstant offs, Register base, constchar* s = "contains broken oop");
// TODO: verify method and klass metadata (compare against vptr?) void _verify_method_ptr(Register reg, constchar * msg, constchar * file, int line) {} void _verify_klass_ptr(Register reg, constchar * msg, constchar * file, int line) {}
// Convenience method returning function entry. For the ELFv1 case // creates function descriptor at the current address and returns // the pointer to it. For the ELFv2 case returns the current address. inline address function_entry();
void zap_from_to(Register low, int before, Register high, int after, Register val, Registeraddr) PRODUCT_RETURN;
};
// class SkipIfEqualZero: // // Instantiating this class will result in assembly code being output that will // jump around any code emitted between the creation of the instance and it's // automatic destruction at the end of a scope block, depending on the value of // the flag passed to the constructor, which will be checked at run-time. class SkipIfEqualZero : public StackObj { private:
MacroAssembler* _masm;
Label _label;
public: // 'Temp' is a temp register that this object can use (and trash). explicit SkipIfEqualZero(MacroAssembler*, Register temp, constbool* flag_addr); staticvoid skip_to_label_if_equal_zero(MacroAssembler*, Register temp, constbool* flag_addr, Label& label);
~SkipIfEqualZero();
};
#endif// CPU_PPC_MACROASSEMBLER_PPC_HPP
¤ Dauer der Verarbeitung: 0.9 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.