/* * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions.
*/
RegMask* live(const Node* node) { if (!node->is_Mach()) { // Don't need liveness for non-MachNodes return NULL;
}
const MachNode* const mach = node->as_Mach(); if (mach->barrier_data() == ZLoadBarrierElided) { // Don't need liveness data for nodes without barriers return NULL;
}
RegMask* live = (RegMask*)_live[node->_idx]; if (live == NULL) {
live = new (Compile::current()->comp_arena()->AmallocWords(sizeof(RegMask))) RegMask();
_live.map(node->_idx, (Node*)live);
}
RegMask& ZLoadBarrierStubC2::live() const {
RegMask* mask = barrier_set_state()->live(_node);
assert(mask != NULL, "must be mach-node with barrier"); return *mask;
}
Label* ZLoadBarrierStubC2::entry() { // The _entry will never be bound when in_scratch_emit_size() is true. // However, we still need to return a label that is not bound now, but // will eventually be bound. Any label will do, as it will only act as // a placeholder, so we return the _continuation label. return Compile::current()->output()->in_scratch_emit_size() ? &_continuation : &_entry;
}
for (int i = 0; i < stubs->length(); i++) { // Make sure there is enough space in the code buffer if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); return;
}
if (bt == T_OBJECT) { // BarrierSetC2::clone sets the offsets via BarrierSetC2::arraycopy_payload_base_offset // which 8-byte aligns them to allow for word size copies. Make sure the offsets point // to the first element in the array when cloning object arrays. Otherwise, load // barriers are applied to parts of the header. Also adjust the length accordingly.
assert(src_offset == dest_offset, "should be equal");
jlong offset = src_offset->get_long(); if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) {
assert(!UseCompressedClassPointers, "should only happen without compressed class pointers");
assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset");
length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs
src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT));
dest_offset = src_offset;
}
}
Node* payload_src = phase->basic_plus_adr(src, src_offset);
Node* payload_dst = phase->basic_plus_adr(dest, dest_offset);
assert(size->bottom_type()->is_long(), "Should be long");
// The native clone we are calling here expects the instance size in words // Add header/offset size to payload size to get instance size.
Node* const base_offset = phase->longcon(arraycopy_payload_base_offset(ac->is_clone_array()) >> LogBytesPerLong);
Node* const full_size = phase->transform_later(new AddLNode(size, base_offset));
// Initialize to union of successors for (uint i = 0; i < block->_num_succs; i++) { const uint succ_id = block->_succs[i]->_pre_order;
new_live.OR(live[succ_id]);
}
// Walk block backwards, computing liveness for (int i = block->number_of_nodes() - 1; i >= 0; --i) { const Node* const node = block->get_node(i);
// Remove def bits const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node)); const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node)); if (first != OptoReg::Bad) {
new_live.Remove(first);
} if (second != OptoReg::Bad) {
new_live.Remove(second);
}
// Add use bits for (uint j = 1; j < node->req(); ++j) { const Node* const use = node->in(j); const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use)); const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use)); if (first != OptoReg::Bad) {
new_live.Insert(first);
} if (second != OptoReg::Bad) {
new_live.Insert(second);
}
}
// If this node tracks liveness, update it
RegMask* const regs = barrier_set_state()->live(node); if (regs != NULL) {
regs->OR(new_live);
}
}
// Now at block top, see if we have any changes
new_live.SUBTRACT(old_live); if (new_live.is_NotEmpty()) { // Liveness has refined, update and propagate to prior blocks
old_live.OR(new_live); for (uint i = 1; i < block->num_preds(); ++i) {
Block* const pred = cfg->get_block_for_node(block->pred(i));
worklist.push(pred);
}
}
}
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.2 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.