/* * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
using metaspace::ArenaGrowthPolicy; using metaspace::CommitLimiter; using metaspace::InternalStats; using metaspace::MemRangeCounter; using metaspace::MetaspaceArena; using metaspace::SizeAtomicCounter; using metaspace::Settings; using metaspace::ArenaStats;
// See metaspaceArena.cpp : needed for predicting commit sizes. namespace metaspace { extern size_t get_raw_word_size_for_requested_word_size(size_t net_word_size);
}
void initialize(const ArenaGrowthPolicy* growth_policy, constchar* name = "gtest-MetaspaceArena") {
_growth_policy = growth_policy;
_lock = new Mutex(Monitor::nosafepoint, "gtest-MetaspaceArenaTest_lock"); // Lock during space creation, since this is what happens in the VM too // (see ClassLoaderData::metaspace_non_null(), which we mimick here).
{
MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
_arena = new MetaspaceArena(&_context.cm(), _growth_policy, _lock, &_used_words_counter, name);
}
DEBUG_ONLY(_arena->verify());
}
public:
// Create a helper; growth policy for arena is determined by the given spacetype|class tupel
MetaspaceArenaTestHelper(MetaspaceGtestContext& helper,
Metaspace::MetaspaceType space_type, bool is_class, constchar* name = "gtest-MetaspaceArena") :
_context(helper)
{
initialize(ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), name);
}
// Create a helper; growth policy is directly specified
MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, const ArenaGrowthPolicy* growth_policy, constchar* name = "gtest-MetaspaceArena") :
_context(helper)
{
initialize(growth_policy, name);
}
void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const {
_arena->usage_numbers(p_used, p_committed, p_capacity); if (p_used != NULL) { if (p_committed != NULL) {
ASSERT_GE(*p_committed, *p_used);
} // Since we own the used words counter, it should reflect our usage number 1:1
ASSERT_EQ(_used_words_counter.get(), *p_used);
} if (p_committed != NULL && p_capacity != NULL) {
ASSERT_GE(*p_capacity, *p_committed);
}
}
// Allocate; caller expects success but is not interested in return value void allocate_from_arena_with_tests_expect_success(size_t word_size) {
MetaWord* dummy = NULL;
allocate_from_arena_with_tests_expect_success(&dummy, word_size);
}
// Allocate; it may or may not work; return value in *p_return_value void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) {
// Note: usage_numbers walks all chunks in use and counts.
size_t used = 0, committed = 0, capacity = 0;
usage_numbers_with_test(&used, &committed, &capacity);
if (p == NULL) { // Allocation failed. if (Settings::new_chunks_are_fully_committed()) {
ASSERT_LT(possible_expansion, MAX_CHUNK_WORD_SIZE);
} else {
ASSERT_LT(possible_expansion, word_size);
}
ASSERT_EQ(used, used2);
ASSERT_EQ(committed, committed2);
ASSERT_EQ(capacity, capacity2);
} else { // Allocation succeeded. Should be correctly aligned.
ASSERT_TRUE(is_aligned(p, sizeof(MetaWord))); // used: may go up or may not (since our request may have been satisfied from the freeblocklist // whose content already counts as used). // committed: may go up, may not // capacity: ditto
ASSERT_GE(used2, used);
ASSERT_GE(committed2, committed);
ASSERT_GE(capacity2, capacity);
}
*p_return_value = p;
}
// Allocate; it may or may not work; but caller does not care for the result value void allocate_from_arena_with_tests(size_t word_size) {
MetaWord* dummy = NULL;
allocate_from_arena_with_tests(&dummy, word_size);
}
// Nothing should have changed. Deallocated blocks are added to the free block list // which still counts as used.
ASSERT_EQ(used2, used);
ASSERT_EQ(committed2, committed);
ASSERT_EQ(capacity2, capacity);
}
// Convenience method to return number of chunks in arena (including current chunk) int get_number_of_chunks() const { return get_arena_statistics().totals()._num;
}
// Test chunk enlargement: // A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up. // We should see at least some occurrences of chunk-in-place enlargement. staticvoid test_chunk_enlargment_simple(Metaspace::MetaspaceType spacetype, bool is_class) {
// Do this test for some of the standard types; don't do it for the boot loader type // since that one starts out with max chunk size so we would not see any enlargement.
// Test chunk enlargement: // A single MetaspaceArena, left undisturbed with place to grow. Slowly fill arena up. // We should see occurrences of chunk-in-place enlargement. // Here, we give it an ideal policy which should enable the initial chunk to grow unmolested // until finish.
TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_2) {
if (Settings::use_allocation_guard()) { return;
}
// Note: internally, chunk in-place enlargement is disallowed if growing the chunk // would cause the arena to claim more memory than its growth policy allows. This // is done to prevent the arena to grow too fast. // // In order to test in-place growth here without that restriction I give it an // artificial growth policy which starts out with a tiny chunk size, then balloons // right up to max chunk size. This will cause the initial chunk to be tiny, and // then the arena is able to grow it without violating growth policy.
chunklevel_t growth[] = { HIGHEST_CHUNK_LEVEL, ROOT_CHUNK_LEVEL };
ArenaGrowthPolicy growth_policy(growth, 2);
size_t allocated = 0; while (allocated <= MAX_CHUNK_WORD_SIZE) {
size_t s = IntRange(32, 128).random_value();
helper.allocate_from_arena_with_tests_expect_success(s);
allocated += metaspace::get_raw_word_size_for_requested_word_size(s); if (allocated <= MAX_CHUNK_WORD_SIZE) { // Chunk should have been enlarged in place
ASSERT_EQ(1, helper.get_number_of_chunks());
} else { // Next chunk should have started
ASSERT_EQ(2, helper.get_number_of_chunks());
}
}
int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
ASSERT_GT0(times_chunk_were_enlarged);
}
// Regression test: Given a single MetaspaceArena, left undisturbed with place to grow, // test that in place enlargement correctly fails if growing the chunk would bring us // beyond the max. size of a chunk.
TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_max_chunk_size) {
// we allocate first a small amount, then the full amount possible. // The sum of first and second allocation should bring us above root chunk size. // This should work, we should not see any problems, but no chunk enlargement should // happen. int n1 = metaspace::InternalStats::num_chunks_enlarged();
int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
EXPECT_0(times_chunk_were_enlarged);
}
}
// Regression test: Given a single MetaspaceArena, left undisturbed with place to grow, // test that in place enlargement correctly fails if growing the chunk would cause more // than doubling its size
TEST_VM(metaspace, MetaspaceArena_test_failing_to_enlarge_in_place_doubling_chunk_size) {
int times_chunk_were_enlarged = metaspace::InternalStats::num_chunks_enlarged() - n1;
LOG("chunk was enlarged %d times.", times_chunk_were_enlarged);
EXPECT_0(times_chunk_were_enlarged);
}
// Test the MetaspaceArenas' free block list: // Allocate, deallocate, then allocate the same block again. The second allocate should // reuse the deallocated block.
TEST_VM(metaspace, MetaspaceArena_deallocate) { if (Settings::use_allocation_guard()) { return;
} for (size_t s = 2; s <= MAX_CHUNK_WORD_SIZE; s *= 2) {
MetaspaceGtestContext context;
MetaspaceArenaTestHelper helper(context, Metaspace::StandardMetaspaceType, false);
// Actually, we should get the very same allocation back
ASSERT_EQ(p1, p2);
}
}
staticvoid test_recover_from_commit_limit_hit() {
if (Settings::new_chunks_are_fully_committed()) { return; // This would throw off the commit counting in this test.
}
// Test: // - Multiple MetaspaceArena allocate (operating under the same commit limiter). // - One, while attempting to commit parts of its current chunk on demand, // triggers the limit and cannot commit its chunk further. // - We release the other MetaspaceArena - its content is put back to the // freelists. // - We re-attempt allocation from the first manager. It should now succeed. // // This means if the first MetaspaceArena may have to let go of its current chunk and // retire it and take a fresh chunk from the freelist.
// The first MetaspaceArena mimicks a micro loader. This will fill the free // chunk list with very small chunks. We allocate from them in an interleaved // way to cause fragmentation.
MetaspaceArenaTestHelper helper1(context, Metaspace::ReflectionMetaspaceType, false);
MetaspaceArenaTestHelper helper2(context, Metaspace::ReflectionMetaspaceType, false);
// This MetaspaceArena should hit the limit. We use BootMetaspaceType here since // it gets a large initial chunk which is committed // on demand and we are likely to hit a commit limit while trying to expand it.
MetaspaceArenaTestHelper helper3(context, Metaspace::BootMetaspaceType, false);
// Allocate space until we have below two but above one granule left
size_t allocated_from_1_and_2 = 0; while (context.commit_limiter().possible_expansion_words() >= Settings::commit_granule_words() * 2 &&
allocated_from_1_and_2 < commit_limit) {
helper1.allocate_from_arena_with_tests_expect_success(1);
helper2.allocate_from_arena_with_tests_expect_success(1);
allocated_from_1_and_2 += 2;
}
// Now, allocating from helper3, creep up on the limit
size_t allocated_from_3 = 0;
MetaWord* p = NULL; while ( (helper3.allocate_from_arena_with_tests(&p, 1), p != NULL) &&
++allocated_from_3 < Settings::commit_granule_words() * 2);
// We expect the freelist to be empty of committed space...
EXPECT_0(context.cm().calc_committed_word_size());
//msthelper.cm().print_on(tty);
// Release the first MetaspaceArena.
helper1.delete_arena_with_tests();
//msthelper.cm().print_on(tty);
// Should have populated the freelist with committed space // We expect the freelist to be empty of committed space...
EXPECT_GT(context.cm().calc_committed_word_size(), (size_t)0);
// Repeat allocation from helper3, should now work.
helper3.allocate_from_arena_with_tests_expect_success(1);
// From a MetaspaceArena in a clean room allocate tiny amounts; // watch it grow. Used/committed/capacity should not grow in // large jumps. Also, different types of MetaspaceArena should // have different initial capacities.
if (!(Settings::new_chunks_are_fully_committed() && type == Metaspace::BootMetaspaceType)) { // Initial commit charge for the whole context should be one granule
ASSERT_EQ(context.committed_words(), Settings::commit_granule_words()); // Initial commit number for the arena should be less since - apart from boot loader - no // space type has large initial chunks.
ASSERT_LE(committed, Settings::commit_granule_words());
}
while (words_allocated < safety && num_capacity_jumps < 15) {
// if we want to test growth with in-place chunk enlargement, leave MetaspaceArena // undisturbed; it will have all the place to grow. Otherwise allocate from a little // side arena to increase fragmentation. // (Note that this does not completely prevent in-place chunk enlargement but makes it // rather improbable) if (!test_in_place_enlargement) {
smhelper_harrasser.allocate_from_arena_with_tests_expect_success(alloc_words * 2);
}
// used should not grow larger than what we allocated, plus possible overhead.
ASSERT_GE(used2, used);
ASSERT_LE(used2, used + alloc_words * 2);
ASSERT_LE(used2, words_allocated + 100);
used = used2;
// A jump in committed words should not be larger than commit granule size. // It can be smaller, since the current chunk of the MetaspaceArena may be // smaller than a commit granule. // (Note: unless root chunks are born fully committed)
ASSERT_GE(committed2, used2);
ASSERT_GE(committed2, committed); const size_t committed_jump = committed2 - committed; if (committed_jump > 0 && !Settings::new_chunks_are_fully_committed()) {
ASSERT_LE(committed_jump, Settings::commit_granule_words());
}
committed = committed2;
// Capacity jumps: Test that arenas capacity does not grow too fast.
ASSERT_GE(capacity2, committed2);
ASSERT_GE(capacity2, capacity); const size_t capacity_jump = capacity2 - capacity; if (capacity_jump > 0) {
LOG(">" SIZE_FORMAT "->" SIZE_FORMAT "(+" SIZE_FORMAT ")", capacity, capacity2, capacity_jump) if (capacity_jump > highest_capacity_jump) { /* Disabled for now since this is rather shaky. The way it is tested makes it too dependent * on allocation history. Need to rethink this. ASSERT_LE(capacity_jump, highest_capacity_jump * 2); ASSERT_GE(capacity_jump, MIN_CHUNK_WORD_SIZE); ASSERT_LE(capacity_jump, MAX_CHUNK_WORD_SIZE);
*/
highest_capacity_jump = capacity_jump;
}
num_capacity_jumps++;
}
capacity = capacity2;
}
// After all this work, we should see an increase in number of chunk-in-place-enlargements // (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat // complicated, see MetaspaceArena::attempt_enlarge_current_chunk()) #ifdef ASSERT if (test_in_place_enlargement) { const uintx num_chunk_enlarged_2 = metaspace::InternalStats::num_chunks_enlarged();
ASSERT_GT(num_chunk_enlarged_2, num_chunk_enlarged);
} #endif
}
// these numbers have to be in sync with arena policy numbers (see memory/metaspace/arenaGrowthPolicy.cpp)
TEST_VM(metaspace, MetaspaceArena_growth_refl_c_inplace) {
test_controlled_growth(Metaspace::ReflectionMetaspaceType, true,
word_size_for_level(CHUNK_LEVEL_1K), true);
}
/* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare, * and too large, to make any reliable guess as toward chunks get enlarged in place. TEST_VM(metaspace, MetaspaceArena_growth_boot_c_inplace) { test_controlled_growth(Metaspace::BootMetaspaceType, true, word_size_for_level(CHUNK_LEVEL_1M), true); }
/* Disabled growth tests for BootMetaspaceType: there, the growth steps are too rare, * and too large, to make any reliable guess as toward chunks get enlarged in place. TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_inplace) { test_controlled_growth(Metaspace::BootMetaspaceType, false, word_size_for_level(CHUNK_LEVEL_4M), true); }
// Test that repeated allocation-deallocation cycles with the same block size // do not increase metaspace usage after the initial allocation (the deallocated // block should be reused by the next allocation). staticvoid test_repeatedly_allocate_and_deallocate(bool is_topmost) { // Test various sizes, including (important) the max. possible block size = 1 root chunk for (size_t blocksize = Metaspace::max_allocation_word_size(); blocksize >= 1; blocksize /= 2) {
size_t used1 = 0, used2 = 0, committed1 = 0, committed2 = 0;
MetaWord* p = NULL, *p2 = NULL;
// First allocation
helper.allocate_from_arena_with_tests_expect_success(&p, blocksize); if (!is_topmost) { // another one on top, size does not matter.
helper.allocate_from_arena_with_tests_expect_success(0x10);
}
// Dealloc, alloc several times with the same size. for (int i = 0; i < 5; i ++) {
helper.deallocate_with_tests(p, blocksize);
helper.allocate_from_arena_with_tests_expect_success(&p2, blocksize); // We should get the same pointer back.
EXPECT_EQ(p2, p);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.