/* * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Order regions according to GC efficiency. This will cause regions with a lot // of live objects and large remembered sets to end up at the end of the array. // Given that we might skip collecting the last few old regions, if after a few // mixed GCs the remaining have reclaimable bytes under a certain threshold, the // hope is that the ones we'll skip are ones with both large remembered sets and // a lot of live objects, not the ones with just a lot of live objects if we // ordered according to the amount of reclaimable bytes per region. staticint order_regions(HeapRegion* hr1, HeapRegion* hr2) { // Make sure that NULL entries are moved to the end. if (hr1 == NULL) { if (hr2 == NULL) { return 0;
} else { return 1;
}
} elseif (hr2 == NULL) { return -1;
}
// Determine collection set candidates: For all regions determine whether they // should be a collection set candidates, calculate their efficiency, sort and // return them as G1CollectionSetCandidates instance. // Threads calculate the GC efficiency of the regions they get to process, and // put them into some work area unsorted. At the end the array is sorted and // copied into the G1CollectionSetCandidates instance; the caller will be the new // owner of this object. class G1BuildCandidateRegionsTask : public WorkerTask {
// Work area for building the set of collection set candidates. Contains references // to heap regions with their GC efficiencies calculated. To reduce contention // on claiming array elements, worker threads claim parts of this array in chunks; // Array elements may be NULL as threads might not get enough regions to fill // up their chunks completely. // Final sorting will remove them. class G1BuildCandidateArray : public StackObj {
uint const _max_size;
uint const _chunk_size;
HeapRegion** _data;
uint volatile _cur_claim_idx;
// Calculates the maximum array size that will be used. static uint required_array_size(uint num_regions, uint chunk_size, uint num_workers) {
uint const max_waste = num_workers * chunk_size; // The array should be aligned with respect to chunk_size.
uint const aligned_num_regions = ((num_regions + chunk_size - 1) / chunk_size) * chunk_size;
// Claim a new chunk, returning its bounds [from, to[. void claim_chunk(uint& from, uint& to) {
uint result = Atomic::add(&_cur_claim_idx, _chunk_size);
assert(_max_size > result - 1, "Array too small, is %u should be %u with chunk size %u.",
_max_size, result, _chunk_size);
from = result - _chunk_size;
to = result;
}
// Set element in array. void set(uint idx, HeapRegion* hr) {
assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size);
assert(_data[idx] == NULL, "Value must not have been set.");
_data[idx] = hr;
}
void sort_and_copy_into(HeapRegion** dest, uint num_regions) { if (_cur_claim_idx == 0) { return;
} for (uint i = _cur_claim_idx; i < _max_size; i++) {
assert(_data[i] == NULL, "must be");
}
QuickSort::sort(_data, _cur_claim_idx, order_regions, true); for (uint i = num_regions; i < _max_size; i++) {
assert(_data[i] == NULL, "must be");
} for (uint i = 0; i < num_regions; i++) {
dest[i] = _data[i];
}
}
};
// Per-region closure. In addition to determining whether a region should be // added to the candidates, and calculating those regions' gc efficiencies, also // gather additional statistics. class G1BuildCandidateRegionsClosure : public HeapRegionClosure {
G1BuildCandidateArray* _array;
bool do_heap_region(HeapRegion* r) { // We will skip any region that's currently used as an old GC // alloc region (we should not consider those for collection // before we fill them up). if (should_add(r) && !G1CollectedHeap::heap()->is_old_gc_alloc_region(r)) {
add_region(r);
} elseif (r->is_old()) { // Keep remembered sets for humongous regions, otherwise clean out remembered // sets for old regions.
r->rem_set()->clear(true/* only_cardset */);
} else {
assert(r->is_archive() || !r->is_old() || !r->rem_set()->is_tracked(), "Missed to clear unused remembered set of region %u (%s) that is %s",
r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());
} returnfalse;
}
// Closure implementing early pruning (removal) of regions meeting the // G1HeapWastePercent criteria. That is, either until _max_pruned regions were // removed (for forward progress in evacuation) or the waste accumulated by the // removed regions is above max_wasted. class G1PruneRegionClosure : public HeapRegionClosure {
uint _num_pruned;
size_t _cur_wasted;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.