/* * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions.
*/
if (size < MinTLABSize) { // The remaining space in the allocator is not enough to // fit the smallest possible TLAB. This means that the next // TLAB allocation will force the allocator to get a new // backing page anyway, which in turn means that we can then // fit the largest possible TLAB.
size = max_tlab_size();
}
return MIN2(size, max_tlab_size());
}
bool ZHeap::is_in(uintptr_t addr) const { // An address is considered to be "in the heap" if it points into // the allocated part of a page, regardless of which heap view is // used. Note that an address with the finalizable metadata bit set // is not pointing into a heap view, and therefore not considered // to be "in the heap".
if (ZAddress::is_in(addr)) { const ZPage* const page = _page_table.get(addr); if (page != NULL) { return page->is_in(addr);
}
}
if (ZHeap::heap()->has_alloc_stalled()) { // If there are stalled allocations, ensure that regardless of the // cause of the GC, we have to clear soft references, as we are just // about to increment the sequence number, and all previous allocations // will throw if not presented with enough memory.
ZHeap::heap()->set_soft_reference_policy(true);
}
class ZRendezvousClosure : public HandshakeClosure { public:
ZRendezvousClosure() :
HandshakeClosure("ZRendezvous") {}
void do_thread(Thread* thread) {}
};
void ZHeap::process_non_strong_references() { // Process Soft/Weak/Final/PhantomReferences
_reference_processor.process_references();
// Process weak roots
_weak_roots_processor.process_weak_roots();
// Unlink stale metadata and nmethods
_unload.unlink();
// Perform a handshake. This is needed 1) to make sure that stale // metadata and nmethods are no longer observable. And 2), to // prevent the race where a mutator first loads an oop, which is // logically null but not yet cleared. Then this oop gets cleared // by the reference processor and resurrection is unblocked. At // this point the mutator could see the unblocked state and pass // this invalid oop through the normal barrier path, which would // incorrectly try to mark the oop.
ZRendezvousClosure cl;
Handshake::execute(&cl);
// Unblock resurrection of weak/phantom references
ZResurrection::unblock();
// Purge stale metadata and nmethods that were unlinked
_unload.purge();
// Enqueue Soft/Weak/Final/PhantomReferences. Note that this // must be done after unblocking resurrection. Otherwise the // Finalizer thread could call Reference.get() on the Finalizers // that were just enqueued, which would incorrectly return null // during the resurrection block window, since such referents // are only Finalizable marked.
_reference_processor.enqueue_references();
// Clear old markings claim bits. // Note: Clearing _claim_strong also clears _claim_finalizable.
ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_strong);
}
void ZHeap::free_empty_pages(ZRelocationSetSelector* selector, int bulk) { // Freeing empty pages in bulk is an optimization to avoid grabbing // the page allocator lock, and trying to satisfy stalled allocations // too frequently. if (selector->should_free_empty_pages(bulk)) {
free_pages(selector->empty_pages(), true/* reclaimed */);
selector->clear_empty_pages();
}
}
void ZHeap::select_relocation_set() { // Do not allow pages to be deleted
_page_allocator.enable_deferred_delete();
// Register relocatable pages with selector
ZRelocationSetSelector selector;
ZPageTableIterator pt_iter(&_page_table); for (ZPage* page; pt_iter.next(&page);) { if (!page->is_relocatable()) { // Not relocatable, don't register continue;
}
if (page->is_marked()) { // Register live page
selector.register_live_page(page);
} else { // Register empty page
selector.register_empty_page(page);
// Do not allow pages to be deleted
_page_allocator.enable_deferred_delete();
// Print all pages
st->print_cr("ZGC Page Table:");
ZPageTableIterator iter(&_page_table); for (ZPage* page; iter.next(&page);) {
page->print_on(st);
}
// Allow pages to be deleted
_page_allocator.disable_deferred_delete();
}
bool ZHeap::print_location(outputStream* st, uintptr_t addr) const { if (LocationPrinter::is_valid_obj((void*)addr)) {
st->print(PTR_FORMAT " is a %s oop: ", addr, ZAddress::is_good(addr) ? "good" : "bad");
ZOop::from_address(addr)->print_on(st); returntrue;
}
returnfalse;
}
void ZHeap::verify() { // Heap verification can only be done between mark end and // relocate start. This is the only window where all oop are // good and the whole heap is in a consistent state.
guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
ZVerify::after_weak_processing();
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.