/* * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// INVARIANTS/NOTES // // All allocation activity covered by the G1CollectedHeap interface is // serialized by acquiring the HeapLock. This happens in mem_allocate // and allocate_new_tlab, which are the "entry" points to the // allocation code from the rest of the JVM. (Note that this does not // apply to TLAB allocation, which is not part of this interface: it // is done by clients of this interface.)
void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { // The from card cache is not the memory that is actually committed. So we cannot // take advantage of the zero_filled parameter.
reset_from_card_cache(start_idx, num_regions);
}
uint G1CollectedHeap::get_chunks_per_region() {
uint log_region_size = HeapRegion::LogOfHRGrainBytes; // Limit the expected input values to current known possible values of the // (log) region size. Adjust as necessary after testing if changing the permissible // values for region size.
assert(log_region_size >= 20 && log_region_size <= 29, "expected value in [20,29], but got %u", log_region_size); return 1u << (log_region_size / 2 - 4);
}
HeapRegion* G1CollectedHeap::new_region(size_t word_size,
HeapRegionType type, bool do_expand,
uint node_index) {
assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords, "the only time we use this to allocate a humongous region is " "when we are allocating a single humongous region");
HeapRegion* res = _hrm.allocate_free_region(type, node_index);
if (res == NULL && do_expand) { // Currently, only attempts to allocate GC alloc regions set // do_expand to true. So, we should only reach here during a // safepoint.
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert(word_size * HeapWordSize < HeapRegion::GrainBytes, "This kind of expansion should never be more than one region. Size: " SIZE_FORMAT,
word_size * HeapWordSize); if (expand_single_region(node_index)) { // Given that expand_single_region() succeeded in expanding the heap, and we // always expand the heap by an amount aligned to the heap // region size, the free list should in theory not be empty. // In either case allocate_free_region() will check for NULL.
res = _hrm.allocate_free_region(type, node_index);
}
} return res;
}
// Index of last region in the series.
uint first = first_hr->hrm_index();
uint last = first + num_regions - 1;
// We need to initialize the region(s) we just discovered. This is // a bit tricky given that it can happen concurrently with // refinement threads refining cards on these regions and // potentially wanting to refine the BOT as they are scanning // those cards (this can happen shortly after a cleanup; see CR // 6991377). So we have to set up the region(s) carefully and in // a specific order.
// The word size sum of all the regions we will allocate.
size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
assert(word_size <= word_size_sum, "sanity");
// The passed in hr will be the "starts humongous" region. The header // of the new object will be placed at the bottom of this region.
HeapWord* new_obj = first_hr->bottom(); // This will be the new top of the new object.
HeapWord* obj_top = new_obj + word_size;
// First, we need to zero the header of the space that we will be // allocating. When we update top further down, some refinement // threads might try to scan the region. By zeroing the header we // ensure that any thread that will try to scan the region will // come across the zero klass word and bail out. // // NOTE: It would not have been correct to have used // CollectedHeap::fill_with_object() and make the space look like // an int array. The thread that is doing the allocation will // later update the object header to a potentially different array // type and, for a very short period of time, the klass and length // fields will be inconsistent. This could cause a refinement // thread to calculate the object size incorrectly.
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
// Next, pad out the unused tail of the last region with filler // objects, for improved usage accounting. // How many words we use for filler objects.
size_t word_fill_size = word_size_sum - word_size;
// How many words memory we "waste" which cannot hold a filler object.
size_t words_not_fillable = 0;
if (word_fill_size >= min_fill_size()) {
fill_with_objects(obj_top, word_fill_size);
} elseif (word_fill_size > 0) { // We have space to fill, but we cannot fit an object there.
words_not_fillable = word_fill_size;
word_fill_size = 0;
}
// We will set up the first region as "starts humongous". This // will also update the BOT covering all the regions to reflect // that there is a single object that starts at the bottom of the // first region.
first_hr->set_starts_humongous(obj_top, word_fill_size);
_policy->remset_tracker()->update_at_allocate(first_hr); // Then, if there are any, we will set up the "continues // humongous" regions.
HeapRegion* hr = NULL; for (uint i = first + 1; i <= last; ++i) {
hr = region_at(i);
hr->set_continues_humongous(first_hr);
_policy->remset_tracker()->update_at_allocate(hr);
}
// Up to this point no concurrent thread would have been able to // do any scanning on any region in this series. All the top // fields still point to bottom, so the intersection between // [bottom,top] and [card_start,card_end] will be empty. Before we // update the top fields, we'll do a storestore to make sure that // no thread sees the update to top before the zeroing of the // object header and the BOT initialization.
OrderAccess::storestore();
// Now, we will update the top fields of the "continues humongous" // regions except the last one. for (uint i = first; i < last; ++i) {
hr = region_at(i);
hr->set_top(hr->end());
}
hr = region_at(last); // If we cannot fit a filler object, we must set top to the end // of the humongous object, otherwise we cannot iterate the heap // and the BOT will not be complete.
hr->set_top(hr->end() - words_not_fillable);
assert(hr->bottom() < obj_top && obj_top <= hr->end(), "obj_top should be in last region");
for (uint i = first; i <= last; ++i) {
hr = region_at(i);
_humongous_set.add(hr);
_hr_printer.alloc(hr);
}
return new_obj;
}
size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size); return align_up(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
}
// If could fit into free regions w/o expansion, try. // Otherwise, if can expand, do so. // Otherwise, if using ex regions might help, try with ex given back.
HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
assert_heap_locked_or_at_safepoint(true/* should_be_vm_thread */);
// Policy: First try to allocate a humongous object in the free list.
HeapRegion* humongous_start = _hrm.allocate_humongous(obj_regions); if (humongous_start == NULL) { // Policy: We could not find enough regions for the humongous object in the // free list. Look through the heap to find a mix of free and uncommitted regions. // If so, expand the heap and allocate the humongous object.
humongous_start = _hrm.expand_and_allocate_humongous(obj_regions); if (humongous_start != NULL) { // We managed to find a region by expanding the heap.
log_debug(gc, ergo, heap)("Heap expansion (humongous allocation request). Allocation request: " SIZE_FORMAT "B",
word_size * HeapWordSize);
policy()->record_new_heap_size(num_regions());
} else { // Policy: Potentially trigger a defragmentation GC.
}
}
HeapWord* result = NULL; if (humongous_start != NULL) {
result = humongous_obj_allocate_initialize_regions(humongous_start, obj_regions, word_size);
assert(result != NULL, "it should always return a valid result");
// A successful humongous object allocation changes the used space // information of the old generation so we need to recalculate the // sizes and update the jstat counters here.
monitoring_support()->update_sizes();
}
_verifier->verify_region_sets_optional();
return result;
}
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_size,
size_t requested_size,
size_t* actual_size) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(requested_size), "we do not allow humongous TLABs");
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
ResourceMark rm; // For retrieving the thread names in log messages.
// Make sure you read the note in attempt_allocation_humongous().
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(word_size), "attempt_allocation_slow() should not " "be called for humongous allocation requests");
// We should only get here after the first-level allocation attempt // (attempt_allocation()) failed to allocate.
// We will loop until a) we manage to successfully perform the // allocation or b) we successfully schedule a collection which // fails to perform the allocation. b) is the only case when we'll // return NULL.
HeapWord* result = NULL; for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { bool should_try_gc; bool preventive_collection_required = false;
uint gc_count_before;
{
MutexLocker x(Heap_lock);
// Now that we have the lock, we first retry the allocation in case another // thread changed the region while we were waiting to acquire the lock.
size_t actual_size;
result = _allocator->attempt_allocation(word_size, word_size, &actual_size); if (result != NULL) { return result;
}
preventive_collection_required = policy()->preventive_collection_required(1); if (!preventive_collection_required) { // We've already attempted a lock-free allocation above, so we don't want to // do it again. Let's jump straight to replacing the active region.
result = _allocator->attempt_allocation_using_new_region(word_size); if (result != NULL) { return result;
}
// If the GCLocker is active and we are bound for a GC, try expanding young gen. // This is different to when only GCLocker::needs_gc() is set: try to avoid // waiting because the GCLocker is active to not wait too long. if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) { // No need for an ergo message here, can_expand_young_list() does this when // it returns true.
result = _allocator->attempt_allocation_force(word_size); if (result != NULL) { return result;
}
}
}
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until // the GCLocker initiated GC has been performed and then retry. This includes // the case when the GC Locker is not active but has not been performed.
should_try_gc = !GCLocker::needs_gc(); // Read the GC count while still holding the Heap_lock.
gc_count_before = total_collections();
}
if (should_try_gc) {
GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection
: GCCause::_g1_inc_collection_pause; bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, gc_cause); if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
Thread::current()->name(), p2i(result)); return result;
}
if (succeeded) { // We successfully scheduled a collection which failed to allocate. No // point in trying to allocate further. We'll just return NULL.
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
SIZE_FORMAT " words", Thread::current()->name(), word_size); return NULL;
}
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
Thread::current()->name(), word_size);
} else { // Failed to schedule a collection. if (gclocker_retry_count > GCLockerRetryAllocationCount) {
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
SIZE_FORMAT " words", Thread::current()->name(), word_size); return NULL;
}
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name()); // The GCLocker is either active or the GCLocker initiated // GC has not yet been performed. Stall until it is and // then retry the allocation.
GCLocker::stall_until_clear();
gclocker_retry_count += 1;
}
// We can reach here if we were unsuccessful in scheduling a // collection (because another thread beat us to it) or if we were // stalled due to the GC locker. In either can we should retry the // allocation attempt in case another thread successfully // performed a collection and reclaimed enough space. We do the // first attempt (without holding the Heap_lock) here and the // follow-on attempt will be at the start of the next loop // iteration (after taking the Heap_lock).
size_t dummy = 0;
result = _allocator->attempt_allocation(word_size, word_size, &dummy); if (result != NULL) { return result;
}
// Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
Thread::current()->name(), try_count, word_size);
}
}
ShouldNotReachHere(); return NULL;
}
void G1CollectedHeap::begin_archive_alloc_range(bool open) {
assert_at_safepoint_on_vm_thread();
assert(_archive_allocator == nullptr, "should not be initialized");
_archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
}
bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) { // Allocations in archive regions cannot be of a size that would be considered // humongous even for a minimum-sized region, because G1 region sizes/boundaries // may be different at archive-restore time. return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
}
HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
assert_at_safepoint_on_vm_thread();
assert(_archive_allocator != nullptr, "_archive_allocator not initialized"); if (is_archive_alloc_too_large(word_size)) { return nullptr;
} return _archive_allocator->archive_mem_allocate(word_size);
}
// Call complete_archive to do the real work, filling in the MemRegion // array with the archive regions.
_archive_allocator->complete_archive(ranges, end_alignment_in_bytes); delete _archive_allocator;
_archive_allocator = nullptr;
}
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved(); for (size_t i = 0; i < count; i++) { if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) { returnfalse;
}
} returntrue;
}
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
size_t count, bool open) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MutexLocker x(Heap_lock);
// Temporarily disable pretouching of heap pages. This interface is used // when mmap'ing archived heap data in, so pre-touching is wasted.
FlagSetting fs(AlwaysPreTouch, false);
// For each specified MemRegion range, allocate the corresponding G1 // regions and mark them as archive regions. We expect the ranges // in ascending starting address order, without overlap. for (size_t i = 0; i < count; i++) {
MemRegion curr_range = ranges[i];
HeapWord* start_address = curr_range.start();
size_t word_size = curr_range.word_size();
HeapWord* last_address = curr_range.last();
size_t commits = 0;
// Check for ranges that start in the same G1 region in which the previous // range ended, and adjust the start address so we don't try to allocate // the same region again. If the current range is entirely within that // region, skip it, just adjusting the recorded top.
HeapRegion* start_region = _hrm.addr_to_region(start_address); if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
start_address = start_region->end(); if (start_address > last_address) {
increase_used(word_size * HeapWordSize);
start_region->set_top(last_address + 1); continue;
}
start_region->set_top(start_address);
curr_range = MemRegion(start_address, last_address + 1);
start_region = _hrm.addr_to_region(start_address);
}
// Perform the actual region allocation, exiting if it fails. // Then note how much new space we have allocated. if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) { returnfalse;
}
increase_used(word_size * HeapWordSize); if (commits != 0) {
log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize * commits);
}
// Mark each G1 region touched by the range as archive, add it to // the old set, and set top.
HeapRegion* curr_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
prev_last_region = last_region;
while (curr_region != NULL) {
assert(curr_region->is_empty() && !curr_region->is_pinned(), "Region already in use (index %u)", curr_region->hrm_index()); if (open) {
curr_region->set_open_archive();
} else {
curr_region->set_closed_archive();
}
_hr_printer.alloc(curr_region);
_archive_set.add(curr_region);
HeapWord* top;
HeapRegion* next_region; if (curr_region != last_region) {
top = curr_region->end();
next_region = _hrm.next_region_in_heap(curr_region);
} else {
top = last_address + 1;
next_region = NULL;
}
curr_region->set_top(top);
curr_region = next_region;
}
} returntrue;
}
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
HeapWord *prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
// For each MemRegion, create filler objects, if needed, in the G1 regions // that contain the address range. The address range actually within the // MemRegion will not be modified. That is assumed to have been initialized // elsewhere, probably via an mmap of archived heap data.
MutexLocker x(Heap_lock); for (size_t i = 0; i < count; i++) {
HeapWord* start_address = ranges[i].start();
HeapWord* last_address = ranges[i].last();
// Check for a range beginning in the same region in which the // previous one ended. if (start_region == prev_last_region) {
bottom_address = prev_last_addr + 1;
}
// Verify that the regions were all marked as archive regions by // alloc_archive_regions.
HeapRegion* curr_region = start_region; while (curr_region != NULL) {
guarantee(curr_region->is_archive(), "Expected archive region at index %u", curr_region->hrm_index()); if (curr_region != last_region) {
curr_region = _hrm.next_region_in_heap(curr_region);
} else {
curr_region = NULL;
}
}
// Fill the memory below the allocated range with dummy object(s), // if the region bottom does not match the range start, or if the previous // range ended within the same G1 region, and there is a gap.
assert(start_address >= bottom_address, "bottom address should not be greater than start address"); if (start_address > bottom_address) {
size_t fill_size = pointer_delta(start_address, bottom_address);
G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
increase_used(fill_size * HeapWordSize);
}
}
}
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(desired_word_size), "attempt_allocation() should not " "be called for humongous allocation requests");
HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
if (result == NULL) {
*actual_word_size = desired_word_size;
result = attempt_allocation_slow(desired_word_size);
}
assert_heap_not_locked(); if (result != NULL) {
assert(*actual_word_size != 0, "Actual size must have been set here");
dirty_young_block(result, *actual_word_size);
} else {
*actual_word_size = 0;
}
return result;
}
void G1CollectedHeap::populate_archive_regions_bot_part(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
HeapWord* st = ranges[0].start();
HeapWord* last = ranges[count-1].last();
HeapRegion* hr_st = _hrm.addr_to_region(st);
HeapRegion* hr_last = _hrm.addr_to_region(last);
void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
HeapWord* prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
size_t size_used = 0;
uint shrink_count = 0;
// For each Memregion, free the G1 regions that constitute it, and // notify mark-sweep that the range is no longer to be considered 'archive.'
MutexLocker x(Heap_lock); for (size_t i = 0; i < count; i++) {
HeapWord* start_address = ranges[i].start();
HeapWord* last_address = ranges[i].last();
// Check for ranges that start in the same G1 region in which the previous // range ended, and adjust the start address so we don't try to free // the same region again. If the current range is entirely within that // region, skip it. if (start_region == prev_last_region) {
start_address = start_region->end(); if (start_address > last_address) { continue;
}
start_region = _hrm.addr_to_region(start_address);
}
prev_last_region = last_region;
// After verifying that each region was marked as an archive region by // alloc_archive_regions, set it free and empty and uncommit it.
HeapRegion* curr_region = start_region; while (curr_region != NULL) {
guarantee(curr_region->is_archive(), "Expected archive region at index %u", curr_region->hrm_index());
uint curr_index = curr_region->hrm_index();
_archive_set.remove(curr_region);
curr_region->set_free();
curr_region->set_top(curr_region->bottom()); if (curr_region != last_region) {
curr_region = _hrm.next_region_in_heap(curr_region);
} else {
curr_region = NULL;
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
ResourceMark rm; // For retrieving the thread names in log messages.
// The structure of this method has a lot of similarities to // attempt_allocation_slow(). The reason these two were not merged // into a single one is that such a method would require several "if // allocation is not humongous do this, otherwise do that" // conditional paths which would obscure its flow. In fact, an early // version of this code did use a unified method which was harder to // follow and, as a result, it had subtle bugs that were hard to // track down. So keeping these two methods separate allows each to // be more readable. It will be good to keep these two in sync as // much as possible.
assert_heap_not_locked_and_not_at_safepoint();
assert(is_humongous(word_size), "attempt_allocation_humongous() " "should only be called for humongous allocations");
// Humongous objects can exhaust the heap quickly, so we should check if we // need to start a marking cycle at each humongous object allocation. We do // the check before we do the actual allocation. The reason for doing it // before the allocation is that we avoid having to keep track of the newly // allocated memory while we do a GC. if (policy()->need_to_start_conc_mark("concurrent humongous allocation",
word_size)) {
collect(GCCause::_g1_humongous_allocation);
}
// We will loop until a) we manage to successfully perform the // allocation or b) we successfully schedule a collection which // fails to perform the allocation. b) is the only case when we'll // return NULL.
HeapWord* result = NULL; for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { bool should_try_gc; bool preventive_collection_required = false;
uint gc_count_before;
{
MutexLocker x(Heap_lock);
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
preventive_collection_required = policy()->preventive_collection_required((uint)size_in_regions); if (!preventive_collection_required) { // Given that humongous objects are not allocated in young // regions, we'll first try to do the allocation without doing a // collection hoping that there's enough space in the heap.
result = humongous_obj_allocate(word_size); if (result != NULL) {
policy()->old_gen_alloc_tracker()->
add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes); return result;
}
}
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until // the GCLocker initiated GC has been performed and then retry. This includes // the case when the GC Locker is not active but has not been performed.
should_try_gc = !GCLocker::needs_gc(); // Read the GC count while still holding the Heap_lock.
gc_count_before = total_collections();
}
if (should_try_gc) {
GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection
: GCCause::_g1_humongous_allocation; bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, gc_cause); if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
Thread::current()->name(), p2i(result));
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
policy()->old_gen_alloc_tracker()->
record_collection_pause_humongous_allocation(size_in_regions * HeapRegion::GrainBytes); return result;
}
if (succeeded) { // We successfully scheduled a collection which failed to allocate. No // point in trying to allocate further. We'll just return NULL.
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
SIZE_FORMAT " words", Thread::current()->name(), word_size); return NULL;
}
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
Thread::current()->name(), word_size);
} else { // Failed to schedule a collection. if (gclocker_retry_count > GCLockerRetryAllocationCount) {
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
SIZE_FORMAT " words", Thread::current()->name(), word_size); return NULL;
}
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name()); // The GCLocker is either active or the GCLocker initiated // GC has not yet been performed. Stall until it is and // then retry the allocation.
GCLocker::stall_until_clear();
gclocker_retry_count += 1;
}
// We can reach here if we were unsuccessful in scheduling a // collection (because another thread beat us to it) or if we were // stalled due to the GC locker. In either can we should retry the // allocation attempt in case another thread successfully // performed a collection and reclaimed enough space. // Humongous object allocation always needs a lock, so we wait for the retry // in the next iteration of the loop, unlike for the regular iteration case. // Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
Thread::current()->name(), try_count, word_size);
}
}
ShouldNotReachHere(); return NULL;
}
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, bool expect_null_mutator_alloc_region) {
assert_at_safepoint_on_vm_thread();
assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region, "the current alloc region was unexpectedly found to be non-NULL");
if (!is_humongous(word_size)) { return _allocator->attempt_allocation_locked(word_size);
} else {
HeapWord* result = humongous_obj_allocate(word_size); if (result != NULL && policy()->need_to_start_conc_mark("STW humongous allocation")) {
collector_state()->set_initiate_conc_mark_if_possible(true);
} return result;
}
ShouldNotReachHere();
}
class PostCompactionPrinterClosure: public HeapRegionClosure { private:
G1HRPrinter* _hr_printer; public: bool do_heap_region(HeapRegion* hr) {
assert(!hr->is_young(), "not expecting to find young regions");
_hr_printer->post_compaction(hr); returnfalse;
}
void G1CollectedHeap::print_heap_after_full_collection() { // Post collection region logging. // We should do this after we potentially resize the heap so // that all the COMMIT / UNCOMMIT events are generated before // the compaction events. if (_hr_printer.is_active()) {
PostCompactionPrinterClosure cl(hr_printer());
heap_region_iterate(&cl);
}
}
bool G1CollectedHeap::abort_concurrent_cycle() { // Disable discovery and empty the discovered lists // for the CM ref processor.
_ref_processor_cm->disable_discovery();
_ref_processor_cm->abandon_partial_discovery();
_ref_processor_cm->verify_no_references_recorded();
// Abandon current iterations of concurrent marking and concurrent // refinement, if any are in progress. return concurrent_mark()->concurrent_cycle_abort();
}
void G1CollectedHeap::prepare_heap_for_full_collection() { // Make sure we'll choose a new allocation region afterwards.
_allocator->release_mutator_alloc_regions();
_allocator->abandon_gc_alloc_regions();
// We may have added regions to the current incremental collection // set between the last GC or pause and now. We need to clear the // incremental collection set and then start rebuilding it afresh // after this full GC.
abandon_collection_set(collection_set());
void G1CollectedHeap::prepare_heap_for_mutators() { // Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge(/*at_safepoint*/true);
DEBUG_ONLY(MetaspaceUtils::verify();)
// Prepare heap for normal collections.
assert(num_free_regions() == 0, "we should not have added any free regions");
rebuild_region_sets(false/* free_list_only */);
abort_refinement();
resize_heap_if_necessary();
uncommit_regions_if_necessary();
// Rebuild the code root lists for each region
rebuild_code_roots();
// Start a new incremental collection set for the next pause
start_new_collection_set();
_allocator->init_mutator_alloc_regions();
// Post collection state updates.
MetaspaceGC::compute_new_size();
}
void G1CollectedHeap::abort_refinement() { if (G1HotCardCache::use_cache()) {
_hot_card_cache->reset_hot_cache();
}
// Discard all remembered set updates and reset refinement statistics.
G1BarrierSet::dirty_card_queue_set().abandon_logs_and_stats();
assert(G1BarrierSet::dirty_card_queue_set().num_cards() == 0, "DCQS should be empty");
concurrent_refine()->get_and_reset_refinement_stats();
}
// At this point there should be no regions in the // entire heap tagged as young.
assert(check_young_list_empty(), "young list should be empty at this point");
// Note: since we've just done a full GC, concurrent // marking is no longer active. Therefore we need not // re-enable reference discovery for the CM ref processor. // That will be done at the start of the next marking cycle. // We also know that the STW processor should no longer // discover any new references.
assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
assert(!_ref_processor_cm->discovery_enabled(), "Postcondition");
_ref_processor_stw->verify_no_references_recorded();
_ref_processor_cm->verify_no_references_recorded();
}
// Full collection was successfully completed. returntrue;
}
void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { // Currently, there is no facility in the do_full_collection(bool) API to notify // the caller that the collection did not succeed (e.g., because it was locked // out by the GC locker). So, right now, we'll ignore the return value. // When clear_all_soft_refs is set we want to do a maximal compaction // not leaving any dead wood. bool do_maximal_compaction = clear_all_soft_refs; bool dummy = do_full_collection(true, /* explicit_gc */
clear_all_soft_refs,
do_maximal_compaction);
}
bool G1CollectedHeap::upgrade_to_full_collection() {
GCCauseSetter compaction(this, GCCause::_g1_compaction_pause);
log_info(gc, ergo)("Attempting full compaction clearing soft references"); bool success = do_full_collection(false/* explicit gc */, true/* clear_all_soft_refs */, false/* do_maximal_compaction */); // do_full_collection only fails if blocked by GC locker and that can't // be the case here since we only call this when already completed one gc.
assert(success, "invariant"); return success;
}
// In a G1 heap, we're supposed to keep allocation from failing by // incremental pauses. Therefore, at least for now, we'll favor // expansion over collection. (This might change in the future if we can // do something smarter than full collection to satisfy a failed alloc.)
result = expand_and_allocate(word_size); if (result != NULL) { return result;
}
if (do_gc) {
GCCauseSetter compaction(this, GCCause::_g1_compaction_pause); // Expansion didn't work, we'll try to do a Full GC. // If maximal_compaction is set we clear all soft references and don't // allow any dead wood to be left on the heap. if (maximal_compaction) {
log_info(gc, ergo)("Attempting maximal full compaction clearing soft references");
} else {
log_info(gc, ergo)("Attempting full compaction");
}
*gc_succeeded = do_full_collection(false, /* explicit_gc */
maximal_compaction /* clear_all_soft_refs */ ,
maximal_compaction /* do_maximal_compaction */);
}
// Attempts to allocate followed by Full GC.
HeapWord* result =
satisfy_failed_allocation_helper(word_size, true, /* do_gc */ false, /* maximum_collection */ false, /* expect_null_mutator_alloc_region */
succeeded);
if (result != NULL || !*succeeded) { return result;
}
// Attempts to allocate followed by Full GC that will collect all soft references.
result = satisfy_failed_allocation_helper(word_size, true, /* do_gc */ true, /* maximum_collection */ true, /* expect_null_mutator_alloc_region */
succeeded);
if (result != NULL || !*succeeded) { return result;
}
// Attempts to allocate, no GC
result = satisfy_failed_allocation_helper(word_size, false, /* do_gc */ false, /* maximum_collection */ true, /* expect_null_mutator_alloc_region */
succeeded);
if (result != NULL) { return result;
}
assert(!soft_ref_policy()->should_clear_all_soft_refs(), "Flag should have been handled and cleared prior to this point");
// What else? We might try synchronous finalization later. If the total // space available is large enough for the allocation, then a more // complete compaction phase than we've tried so far might be // appropriate. return NULL;
}
// Attempting to expand the heap sufficiently // to support an allocation of the given "word_size". If // successful, perform the allocation and return the address of the // allocated block, or else "NULL".
if (is_maximal_no_gc()) {
log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)"); returnfalse;
}
double expand_heap_start_time_sec = os::elapsedTime();
uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
assert(regions_to_expand > 0, "Must expand by at least one region");
if (expanded_by > 0) {
size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
policy()->record_new_heap_size(num_regions());
} else {
log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
// The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap. if (G1ExitOnExpansionFailure &&
_hrm.available() >= regions_to_expand) { // We had head room...
vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
}
} return expanded_by > 0;
}
if (expanded_by == 0) {
assert(is_maximal_no_gc(), "Should be no regions left, available: %u", _hrm.available());
log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)"); returnfalse;
}
// We should only reach here at the end of a Full GC or during Remark which // means we should not not be holding to any GC alloc regions. The method // below will make sure of that and do any remaining clean up.
_allocator->abandon_gc_alloc_regions();
// Instead of tearing down / rebuilding the free lists here, we // could instead use the remove_all_pending() method on free_list to // remove only the ones that we need to remove.
_hrm.remove_all_free_regions();
shrink_helper(shrink_bytes);
rebuild_region_sets(true/* free_list_only */);
class OldRegionSetChecker : public HeapRegionSetChecker { public: void check_mt_safety() { // Master Old Set MT safety protocol: // (a) If we're at a safepoint, operations on the master old set // should be invoked: // - by the VM thread (which will serialize them), or // - by the GC workers while holding the FreeList_lock, if we're // at a safepoint for an evacuation pause (this lock is taken // anyway when an GC alloc region is retired so that a new one // is allocated from the free list), or // - by the GC workers while holding the OldSets_lock, if we're at a // safepoint for a cleanup pause. // (b) If we're not at a safepoint, operations on the master old set // should be invoked while holding the Heap_lock.
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(), "master old set MT safety protocol at a safepoint");
} else {
guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");
}
} bool is_correct_type(HeapRegion* hr) { return hr->is_old(); } constchar* get_description() { return"Old Regions"; }
};
class ArchiveRegionSetChecker : public HeapRegionSetChecker { public: void check_mt_safety() {
guarantee(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(), "May only change archive regions during initialization or safepoint.");
} bool is_correct_type(HeapRegion* hr) { return hr->is_archive(); } constchar* get_description() { return"Archive Regions"; }
};
class HumongousRegionSetChecker : public HeapRegionSetChecker { public: void check_mt_safety() { // Humongous Set MT safety protocol: // (a) If we're at a safepoint, operations on the master humongous // set should be invoked by either the VM thread (which will // serialize them) or by the GC workers while holding the // OldSets_lock. // (b) If we're not at a safepoint, operations on the master // humongous set should be invoked while holding the Heap_lock.
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
OldSets_lock->owned_by_self(), "master humongous set MT safety protocol at a safepoint");
} else {
guarantee(Heap_lock->owned_by_self(), "master humongous set MT safety protocol outside a safepoint");
}
} bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); } constchar* get_description() { return"Humongous Regions"; }
};
// Override the default _filler_array_max_size so that no humongous filler // objects are created.
_filler_array_max_size = _humongous_object_threshold_in_words;
// Override the default _stack_chunk_max_size so that no humongous stack chunks are created
_stack_chunk_max_size = _humongous_object_threshold_in_words;
uint n_queues = ParallelGCThreads;
_task_queues = new G1ScannerTasksQueueSet(n_queues);
for (uint i = 0; i < n_queues; i++) {
G1ScannerTasksQueue* q = new G1ScannerTasksQueue();
_task_queues->register_queue(i, q);
}
jint G1CollectedHeap::initialize_service_thread() {
_service_thread = new G1ServiceThread(); if (_service_thread->osthread() == NULL) {
vm_shutdown_during_initialization("Could not create G1ServiceThread"); return JNI_ENOMEM;
} return JNI_OK;
}
jint G1CollectedHeap::initialize() {
// Necessary to satisfy locking discipline assertions.
MutexLocker x(Heap_lock);
// While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some // cases incorrectly returns the size in wordSize units rather than // HeapWordSize).
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
// Ensure that the sizes are properly aligned.
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(reserved_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(reserved_byte_size, HeapAlignment, "g1 heap");
// Reserve the maximum.
// When compressed oops are enabled, the preferred heap base // is calculated by subtracting the requested size from the // 32Gb boundary and using the result as the base address for // heap reservation. If the requested size is not aligned to // HeapRegion::GrainBytes (i.e. the alignment that is passed // into the ReservedHeapSpace constructor) then the actual // base of the reserved heap may end up differing from the // address that was requested (i.e. the preferred heap base). // If this happens then we could end up using a non-optimal // compressed oops mode.
// Create the barrier set for the entire reserved region.
G1CardTable* ct = new G1CardTable(heap_rs.region());
ct->initialize();
G1BarrierSet* bs = new G1BarrierSet(ct);
bs->initialize();
assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
BarrierSet::set_barrier_set(bs);
_card_table = ct;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.