/* * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Remember where we stopped so that we can continue later.
set_last_setup_region(MemRegion(head.start(), tail.end()));
}
set_bottom(mr.start()); // When expanding concurrently with callers of cas_allocate, setting end // makes the new space available for allocation by other threads. So this // assignment must follow all other configuration and initialization that // might be done for expansion.
Atomic::release_store(end_addr(), mr.end());
// Mangle only the unused space that has not previously // been mangled and that has not been allocated since being // mangled. void MutableSpace::mangle_unused_area() {
mangler()->mangle_unused_area();
}
HeapWord* MutableSpace::cas_allocate(size_t size) { do { // Read top before end, else the range check may pass when it shouldn't. // If end is read first, other threads may advance end and top such that // current top > old end and current top + size > current end. Then // pointer_delta underflows, allowing installation of top > current end.
HeapWord* obj = Atomic::load_acquire(top_addr()); if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size;
HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. if (result != obj) { continue; // another thread beat us to the allocation, try again
}
assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment"); return obj;
} else { return NULL;
}
} while (true);
}
// Only used by oldgen allocation. bool MutableSpace::needs_expand(size_t word_size) const {
assert_lock_strong(PSOldGenExpand_lock); // Holding the lock means end is stable. So while top may be advancing // via concurrent allocations, there is no need to order the reads of top // and end here, unlike in cas_allocate. return pointer_delta(end(), top()) < word_size;
}
void MutableSpace::oop_iterate(OopIterateClosure* cl) {
HeapWord* obj_addr = bottom();
HeapWord* t = top(); // Could call objects iterate, but this is easier. while (obj_addr < t) {
obj_addr += cast_to_oop(obj_addr)->oop_iterate_size(cl);
}
}
void MutableSpace::object_iterate(ObjectClosure* cl) {
HeapWord* p = bottom(); while (p < top()) {
cl->do_object(cast_to_oop(p));
p += cast_to_oop(p)->size();
}
}
void MutableSpace::verify() {
HeapWord* p = bottom();
HeapWord* t = top();
HeapWord* prev_p = NULL; while (p < t) {
oopDesc::verify(cast_to_oop(p));
prev_p = p;
p += cast_to_oop(p)->size();
}
guarantee(p == top(), "end of last object must match end of space");
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.16 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.