/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// the instruction sequence of li32 is as below: // lui // addiw staticbool check_li32_data_dependency(address instr) {
address lui = instr;
address addiw = lui + instruction_size;
// The natural type of an RISCV instruction is uint32_t inline NativeInstruction* nativeInstruction_at(uint32_t *addr) { return (NativeInstruction*)addr;
}
inline NativeCall* nativeCall_at(address addr); // The NativeCall is an abstraction for accessing/manipulating native // call instructions (used to manipulate inline caches, primitive & // DSO calls, etc.).
// Similar to replace_mt_safe, but just changes the destination. The // important thing is that free-running threads are able to execute // this call instruction at all times. If the call is an immediate BL // instruction we can simply rely on atomicity of 32-bit writes to // make sure other threads will see no intermediate states.
// We cannot rely on locks here, since the free-running threads must run at // full speed. // // Used in the runtime linkage of calls; see class CompiledIC. // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
// The parameter assert_lock disables the assertion during code generation. void set_destination_mt_safe(address dest, bool assert_lock = true);
// An interface for accessing/manipulating native mov reg, imm instructions. // (used to manipulate inlined 64-bit data calls, etc.) class NativeMovConstReg: public NativeInstruction { public: enum RISCV_specific_constants {
movptr_instruction_size = 6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, addi. See movptr().
load_pc_relative_instruction_size = 2 * NativeInstruction::instruction_size, // auipc, ld
instruction_offset = 0,
displacement_offset = 0
};
address instruction_address() const { return addr_at(instruction_offset); }
address next_instruction_address() const { // if the instruction at 5 * instruction_size is addi, // it means a lui + addi + slli + addi + slli + addi instruction sequence, // and the next instruction address should be addr_at(6 * instruction_size). // However, when the instruction at 5 * instruction_size isn't addi, // the next instruction address should be addr_at(5 * instruction_size) if (nativeInstruction_at(instruction_address())->is_movptr()) { if (is_addi_at(addr_at(movptr_instruction_size - NativeInstruction::instruction_size))) { // Assume: lui, addi, slli, addi, slli, addi return addr_at(movptr_instruction_size);
} else { // Assume: lui, addi, slli, addi, slli return addr_at(movptr_instruction_size - NativeInstruction::instruction_size);
}
} elseif (is_load_pc_relative_at(instruction_address())) { // Assume: auipc, ld return addr_at(load_pc_relative_instruction_size);
}
guarantee(false, "Unknown instruction in NativeMovConstReg"); return NULL;
}
intptr_t data() const; void set_data(intptr_t x);
void flush() { if (!maybe_cpool_ref(instruction_address())) {
ICache::invalidate_range(instruction_address(), movptr_instruction_size);
}
}
// RISCV should not use C1 runtime patching, but still implement // NativeMovRegMem to keep some compilers happy. class NativeMovRegMem: public NativeInstruction { public: enum RISCV_specific_constants {
instruction_size = NativeInstruction::instruction_size,
instruction_offset = 0,
data_offset = 0,
next_instruction_offset = NativeInstruction::instruction_size
};
int instruction_start() const { return instruction_offset; }
class NativeIllegalInstruction: public NativeInstruction { public: // Insert illegal opcode as specific address staticvoid insert(address code_pos);
};
// A NativePostCallNop takes the form of three instructions: // nop; lui zr, hi20; addiw zr, lo12 // // The nop is patchable for a deoptimization trap. The lui and addiw // instructions execute as nops but have a 20/12-bit payload in which we // can store an offset from the initial nop to the nmethod. class NativePostCallNop: public NativeInstruction { public: bool check() const { // Check for two instructions: nop; lui zr, hi20 // These instructions only ever appear together in a post-call // NOP, so it's unnecessary to check that the third instruction is // an addiw as well. return is_nop() && is_lui_to_zr_at(addr_at(4));
} int displacement() const; void patch(jint diff); void make_deopt();
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.