/* * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
double timestamp = fetch_timestamp();
MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag); int index = compute_log_index();
_records[index].thread = NULL; // Its the GC thread so it's not that interesting.
_records[index].timestamp = timestamp;
_records[index].data.is_before = before;
stringStream st(_records[index].data.buffer(), _records[index].data.size());
// Default implementation, for collectors that don't support the feature. bool CollectedHeap::supports_concurrent_gc_breakpoints() const { returnfalse;
}
bool CollectedHeap::is_oop(oop object) const { if (!is_object_aligned(object)) { returnfalse;
}
if (!is_in(object)) { returnfalse;
}
if (is_in(object->klass_or_null())) { returnfalse;
}
returntrue;
}
// Memory state functions.
CollectedHeap::CollectedHeap() :
_capacity_at_last_gc(0),
_used_at_last_gc(0),
_is_gc_active(false),
_last_whole_heap_examined_time_ns(os::javaTimeNanos()),
_total_collections(0),
_total_full_collections(0),
_gc_cause(GCCause::_no_gc),
_gc_lastcause(GCCause::_no_gc)
{ // If the minimum object size is greater than MinObjAlignment, we can // end up with a shard at the end of the buffer that's smaller than // the smallest object. We can't allow that because the buffer must // look like it's full of objects when we retire it, so we make // sure we have enough space for a filler int array object.
size_t min_size = min_dummy_object_size();
_lab_alignment_reserve = min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
// Create the ring log if (LogEvents) {
_gc_heap_log = new GCHeapLog();
} else {
_gc_heap_log = NULL;
}
}
// This interface assumes that it's being called by the // vm thread. It collects the heap assuming that the // heap lock is already held and that we are executing in // the context of the vm thread. void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
Thread* thread = Thread::current();
assert(thread->is_VM_thread(), "Precondition#1");
assert(Heap_lock->is_locked(), "Precondition#2");
GCCauseSetter gcs(this, cause); switch (cause) { case GCCause::_codecache_GC_threshold: case GCCause::_codecache_GC_aggressive: case GCCause::_heap_inspection: case GCCause::_heap_dump: case GCCause::_metadata_GC_threshold: {
HandleMark hm(thread);
do_full_collection(false); // don't clear all soft refs break;
} case GCCause::_archive_time_gc: case GCCause::_metadata_GC_clear_soft_refs: {
HandleMark hm(thread);
do_full_collection(true); // do clear all soft refs break;
} default:
ShouldNotReachHere(); // Unexpected use of this function
}
}
assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
do {
MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); if (result != NULL) { return result;
}
if (GCLocker::is_active_and_needs_gc()) { // If the GCLocker is active, just expand and allocate. // If that does not succeed, wait if this thread is not // in a critical section itself.
result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype); if (result != NULL) { return result;
}
JavaThread* jthr = JavaThread::current(); if (!jthr->in_critical()) { // Wait for JNI critical section to be exited
GCLocker::stall_until_clear(); // The GC invoked by the last thread leaving the critical // section will be a young collection and a full collection // is (currently) needed for unloading classes so continue // to the next iteration to get a full GC. continue;
} else { if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while" " in jni critical section");
} return NULL;
}
}
{ // Need lock to get self consistent gc_count's
MutexLocker ml(Heap_lock);
gc_count = Universe::heap()->total_collections();
full_gc_count = Universe::heap()->total_full_collections();
}
// Generate a VM operation
VM_CollectForMetadataAllocation op(loader_data,
word_size,
mdtype,
gc_count,
full_gc_count,
GCCause::_metadata_GC_threshold);
VMThread::execute(&op);
// If GC was locked out, try again. Check before checking success because the // prologue could have succeeded and the GC still have been locked out. if (op.gc_locked()) { continue;
}
if (op.prologue_succeeded()) { return op.result();
}
loop_count++; if ((QueuedAllocationWarningCount > 0) &&
(loop_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times," " size=" SIZE_FORMAT, loop_count, word_size);
}
} while (true); // Until a GC is done
}
#ifndef PRODUCT void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { if (CheckMemoryInitialization && ZapUnusedHeapArea) { // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
}
}
} #endif// PRODUCT
size_t CollectedHeap::max_tlab_size() const { // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. // This restriction could be removed by enabling filling with multiple arrays. // If we compute that the reasonable way as // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) // we'll overflow on the multiply, so we do the divide first. // We actually lose a little by dividing first, // but that just makes the TLAB somewhat smaller than the biggest array, // which is fine, since we'll be able to fill that.
size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + sizeof(jint) *
((juint) max_jint / (size_t) HeapWordSize); return align_down(max_int_size, MinObjAlignment);
}
size_t CollectedHeap::filler_array_hdr_size() { return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
}
size_t CollectedHeap::filler_array_min_size() { return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
}
void
CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
{
assert(words >= filler_array_min_size(), "too small for an array");
assert(words <= filler_array_max_size(), "too big for a single object");
const size_t payload_size = words - filler_array_hdr_size(); const size_t len = payload_size * HeapWordSize / sizeof(jint);
assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
ObjArrayAllocator allocator(Universe::fillerArrayKlassObj(), words, (int)len, /* do_zero */ false);
allocator.initialize(start); if (DumpSharedSpaces) { // This array is written into the CDS archive. Make sure it // has deterministic contents.
zap_filler_array_with(start, words, 0);
} else {
DEBUG_ONLY(zap_filler_array(start, words, zap);)
}
}
void
CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
{
assert(words <= filler_array_max_size(), "too big for a single object");
// Multiple objects may be required depending on the filler array maximum size. Fill // the range up to that with objects that are filler_array_max_size sized. The // remainder is filled with a single object. const size_t min = min_fill_size(); const size_t max = filler_array_max_size(); while (words > max) { const size_t cur = (words - max) >= min ? max : max - min;
fill_with_array(start, cur, zap);
start += cur;
words -= cur;
}
void CollectedHeap::ensure_parsability(bool retire_tlabs) {
assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), "Should only be called at a safepoint or at start-up");
ThreadLocalAllocStats stats;
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) {
BarrierSet::barrier_set()->make_parsable(thread); if (UseTLAB) { if (retire_tlabs) {
thread->tlab().retire(&stats);
} else {
thread->tlab().make_parsable();
}
}
}
stats.publish();
}
void CollectedHeap::resize_all_tlabs() {
assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), "Should only resize tlabs at safepoint");
if (UseTLAB && ResizeTLAB) { for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
thread->tlab().resize();
}
}
}
void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) { // It is important to do this in a way such that concurrent readers can't // temporarily think something is in the heap. (Seen this happen in asserts.)
_reserved.set_word_size(0);
_reserved.set_start((HeapWord*)rs.base());
_reserved.set_end((HeapWord*)rs.end());
}
bool CollectedHeap::promotion_should_fail(volatile size_t* count) { // Access to count is not atomic; the value does not have to be exact. if (PromotionFailureALot) { const size_t gc_num = total_collections(); const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; if (elapsed_gcs >= PromotionFailureALotInterval) { // Test for unsigned arithmetic wrap-around. if (++*count >= PromotionFailureALotCount) {
*count = 0; returntrue;
}
}
} returnfalse;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.