/* * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions.
*/
TEST_VM(os, page_size_for_region_unaligned) { if (UseLargePages) { // Given exact page size, should return that page size. for (size_t s = os::page_sizes().largest(); s != 0; s = os::page_sizes().next_smaller(s)) {
size_t actual = os::page_size_for_region_unaligned(s, 1);
ASSERT_EQ(s, actual);
}
// Given slightly larger size than a page size, return the page size. for (size_t s = os::page_sizes().largest(); s != 0; s = os::page_sizes().next_smaller(s)) {
size_t actual = os::page_size_for_region_unaligned(s + 17, 1);
ASSERT_EQ(s, actual);
}
// Given a slightly smaller size than a page size, // return the next smaller page size. for (size_t s = os::page_sizes().largest(); s != 0; s = os::page_sizes().next_smaller(s)) { const size_t expected = os::page_sizes().next_smaller(s); if (expected != 0) {
size_t actual = os::page_size_for_region_unaligned(s - 17, 1);
ASSERT_EQ(actual, expected);
}
}
// Return small page size for values less than a small page.
size_t small_page = os::page_sizes().smallest();
size_t actual = os::page_size_for_region_unaligned(small_page - 17, 1);
ASSERT_EQ(small_page, actual);
}
}
// tty->print_cr("seed %ld for %ld repeats...", seed, reps); int num; for (int k = 0; k < reps; k++) { // Use next_random so the calculation is stateless.
num = seed = os::next_random(seed); double u = (double)num / m;
ASSERT_TRUE(u >= 0.0 && u <= 1.0) << "bad random number!";
// calculate mean and variance of the random sequence
mean += u;
variance += (u*u);
}
mean /= reps;
variance /= (reps - 1);
ASSERT_EQ(num, 1043618065) << "bad seed"; // tty->print_cr("mean of the 1st 10000 numbers: %f", mean); int intmean = mean*100;
ASSERT_EQ(intmean, 50); // tty->print_cr("variance of the 1st 10000 numbers: %f", variance); int intvariance = variance*100;
ASSERT_EQ(intvariance, 33); constdouble eps = 0.0001;
t = fabsd(mean - 0.5018);
ASSERT_LT(t, eps) << "bad mean";
t = (variance - 0.3355) < 0.0 ? -(variance - 0.3355) : variance - 0.3355;
ASSERT_LT(t, eps) << "bad variance";
}
// On AIX, zero page is readable.
address unreadable = #ifdef AIX
(address) 0xFFFFFFFFFFFF0000ULL; #else
(address) 0 #endif
;
ResourceMark rm; char buf[64];
stringStream ss(buf, sizeof(buf));
outputStream* out = &ss; // outputStream* out = tty; // enable for printout
// Test dumping unreadable memory // Exclude test for Windows for now, since it needs SEH handling to work which cannot be // guaranteed when we call directly into VM code. (see JDK-8220220) #ifndef _WIN32
do_test_print_hex_dump(unreadable, 100, 1, pattern_not_readable[0]);
do_test_print_hex_dump(unreadable, 100, 2, pattern_not_readable[1]);
do_test_print_hex_dump(unreadable, 100, 4, pattern_not_readable[2]);
do_test_print_hex_dump(unreadable, 100, 8, pattern_not_readable[3]); #endif
// Test dumping readable memory
address arr = (address)os::malloc(100, mtInternal); for (int c = 0; c < 100; c++) {
arr[c] = c;
}
// PrintFn is expected to be int (*)(char*, size_t, const char*, ...). // But jio_snprintf is a C-linkage function with that signature, which // has a different type on some platforms (like Solaris). template<typename PrintFn> staticvoid test_snprintf(PrintFn pf, bool expect_count) { constchar expected[] = "abcdefghijklmnopqrstuvwxyz"; constint expected_len = sizeof(expected) - 1; const size_t padding_size = 10; char buffer[2 * (sizeof(expected) + padding_size)]; char check_buffer[sizeof(buffer)]; constchar check_char = '1'; // Something not in expected.
memset(check_buffer, check_char, sizeof(check_buffer)); const size_t sizes_to_test[] = { sizeof(buffer) - padding_size, // Fits, with plenty of space to spare. sizeof(buffer)/2, // Fits, with space to spare. sizeof(buffer)/4, // Doesn't fit. sizeof(expected) + padding_size + 1, // Fits, with a little room to spare sizeof(expected) + padding_size, // Fits exactly. sizeof(expected) + padding_size - 1, // Doesn't quite fit.
2, // One char + terminating NUL.
1, // Only space for terminating NUL.
0 }; // No space at all. for (unsigned i = 0; i < ARRAY_SIZE(sizes_to_test); ++i) {
memset(buffer, check_char, sizeof(buffer)); // To catch stray writes.
size_t test_size = sizes_to_test[i];
ResourceMark rm;
stringStream s;
s.print("test_size: " SIZE_FORMAT, test_size);
SCOPED_TRACE(s.as_string());
size_t prefix_size = padding_size;
guarantee(test_size <= (sizeof(buffer) - prefix_size), "invariant");
size_t write_size = MIN2(sizeof(expected), test_size);
size_t suffix_size = sizeof(buffer) - prefix_size - write_size; char* write_start = buffer + prefix_size; char* write_end = write_start + write_size;
int result = pf(write_start, test_size, "%s", expected);
// Verify no scribbling on prefix or suffix.
ASSERT_EQ(0, strncmp(buffer, check_buffer, prefix_size));
ASSERT_EQ(0, strncmp(write_end, check_buffer, suffix_size));
}
// Special case of 0-length buffer with empty (except for terminator) output.
check_snprintf_result(0, 0, pf(NULL, 0, "%s", ""), expect_count);
check_snprintf_result(0, 0, pf(NULL, 0, ""), expect_count);
}
// This is probably equivalent to os::snprintf, but we're being // explicit about what we're testing here. staticint vsnprintf_wrapper(char* buf, size_t len, constchar* fmt, ...) {
va_list args;
va_start(args, fmt); int result = os::vsnprintf(buf, len, fmt, args);
va_end(args); return result;
}
// These are declared in jvm.h; test here, with related functions. extern"C" { int jio_vsnprintf(char*, size_t, constchar*, va_list); int jio_snprintf(char*, size_t, constchar*, ...);
}
// This is probably equivalent to jio_snprintf, but we're being // explicit about what we're testing here. staticint jio_vsnprintf_wrapper(char* buf, size_t len, constchar* fmt, ...) {
va_list args;
va_start(args, fmt); int result = jio_vsnprintf(buf, len, fmt, args);
va_end(args); return result;
}
#ifdef __APPLE__ // Not all macOS versions can use os::reserve_memory (i.e. anon_mmap) API // to reserve executable memory, so before attempting to use it, // we need to verify that we can do so by asking for a tiny executable // memory chunk. staticinlinebool can_reserve_executable_memory(void) { bool executable = true;
size_t len = 128; char* p = os::reserve_memory(len, executable); bool exec_supported = (p != NULL); if (exec_supported) {
os::release_memory(p, len);
} return exec_supported;
} #endif
// Test that os::release_memory() can deal with areas containing multiple mappings. #define PRINT_MAPPINGS(s) { tty->print_cr("%s", s); os::print_memory_mappings((char*)p, total_range_len, tty); } //#define PRINT_MAPPINGS
// Release a range allocated with reserve_multiple carefully, to not trip mapping // asserts on Windows in os::release_memory() staticvoid carefully_release_multiple(address start, int num_stripes, size_t stripe_len) { for (int stripe = 0; stripe < num_stripes; stripe++) {
address q = start + (stripe * stripe_len);
EXPECT_TRUE(os::release_memory((char*)q, stripe_len));
}
}
#ifndef _AIX // JDK-8257041 // Reserve an area consisting of multiple mappings // (from multiple calls to os::reserve_memory) static address reserve_multiple(int num_stripes, size_t stripe_len) {
assert(is_aligned(stripe_len, os::vm_allocation_granularity()), "Sanity");
#ifdef __APPLE__ // Workaround: try reserving executable memory to figure out // if such operation is supported on this macOS version constbool exec_supported = can_reserve_executable_memory(); #endif
address p = NULL; for (int tries = 0; tries < 256 && p == NULL; tries ++) {
size_t total_range_len = num_stripes * stripe_len; // Reserve a large contiguous area to get the address space...
p = (address)os::reserve_memory(total_range_len);
EXPECT_NE(p, (address)NULL); // .. release it...
EXPECT_TRUE(os::release_memory((char*)p, total_range_len)); // ... re-reserve in the same spot multiple areas... for (int stripe = 0; stripe < num_stripes; stripe++) {
address q = p + (stripe * stripe_len); // Commit, alternatingly with or without exec permission, // to prevent kernel from folding these mappings. #ifdef __APPLE__ constbool executable = exec_supported ? (stripe % 2 == 0) : false; #else constbool executable = stripe % 2 == 0; #endif
q = (address)os::attempt_reserve_memory_at((char*)q, stripe_len, executable); if (q == NULL) { // Someone grabbed that area concurrently. Cleanup, then retry.
tty->print_cr("reserve_multiple: retry (%d)...", stripe);
carefully_release_multiple(p, stripe, stripe_len);
p = NULL;
} else {
EXPECT_TRUE(os::commit_memory((char*)q, stripe_len, executable));
}
}
} return p;
} #endif// !AIX
// Reserve an area with a single call to os::reserve_memory, // with multiple committed and uncommitted regions static address reserve_one_commit_multiple(int num_stripes, size_t stripe_len) {
assert(is_aligned(stripe_len, os::vm_allocation_granularity()), "Sanity");
size_t total_range_len = num_stripes * stripe_len;
address p = (address)os::reserve_memory(total_range_len);
EXPECT_NE(p, (address)NULL); for (int stripe = 0; stripe < num_stripes; stripe++) {
address q = p + (stripe * stripe_len); if (stripe % 2 == 0) {
EXPECT_TRUE(os::commit_memory((char*)q, stripe_len, false));
}
} return p;
}
// With NMT enabled, this will trigger JDK-8263464. For now disable the test if NMT=on. if (MemTracker::tracking_level() > NMT_off) { return;
}
// Test that we can release an area created with multiple reservation calls // What we do: // A) we reserve 6 small segments (stripes) adjacent to each other. We commit // them with alternating permissions to prevent the kernel from folding them into // a single segment. // -stripe-stripe-stripe-stripe-stripe-stripe- // B) we release the middle four stripes with a single os::release_memory call. This // tests that os::release_memory indeed works across multiple segments created with // multiple os::reserve calls. // -stripe-___________________________-stripe- // C) Into the now vacated address range between the first and the last stripe, we // re-reserve a new memory range. We expect this to work as a proof that the address // range was really released by the single release call (B). // // Note that this is inherently racy. Between (B) and (C), some other thread may have // reserved something into the hole in the meantime. Therefore we keep that range small and // entrenched between the first and last stripe, which reduces the chance of some concurrent // thread grabbing that memory.
// .. release the middle stripes...
address p_middle_stripes = p + stripe_len; const size_t middle_stripe_len = (num_stripes - 2) * stripe_len;
{ // On Windows, temporarily switch on UseNUMAInterleaving to allow release_memory to release // multiple mappings in one go (otherwise we assert, which we test too, see death test below).
WINDOWS_ONLY(NUMASwitcher b(true);)
ASSERT_TRUE(os::release_memory((char*)p_middle_stripes, middle_stripe_len));
}
PRINT_MAPPINGS("B");
// ...re-reserve the middle stripes. This should work unless release silently failed.
address p2 = (address)os::attempt_reserve_memory_at((char*)p_middle_stripes, middle_stripe_len);
ASSERT_EQ(p2, p_middle_stripes);
PRINT_MAPPINGS("C");
// Clean up. Release all mappings.
{
WINDOWS_ONLY(NUMASwitcher b(true);) // allow release_memory to release multiple regions
ASSERT_TRUE(os::release_memory((char*)p, total_range_len));
}
} #endif// !AIX
#ifdef _WIN32 // On Windows, test that we recognize bad ranges. // On debug this would assert. Test that too. // On other platforms, we are unable to recognize bad ranges. #ifdef ASSERT
TEST_VM_ASSERT_MSG(os, release_bad_ranges, ".*bad release") { #else
TEST_VM(os, release_bad_ranges) { #endif char* p = os::reserve_memory(4 * M);
ASSERT_NE(p, (char*)NULL); // Release part of range
ASSERT_FALSE(os::release_memory(p, M)); // Release part of range
ASSERT_FALSE(os::release_memory(p + M, M)); // Release more than the range (explicitly switch off NUMA here // to make os::release_memory() test more strictly and to not // accidentally release neighbors)
{
NUMASwitcher b(false);
ASSERT_FALSE(os::release_memory(p, M * 5));
ASSERT_FALSE(os::release_memory(p - M, M * 5));
ASSERT_FALSE(os::release_memory(p - M, M * 6));
}
ASSERT_TRUE(os::release_memory(p, 4 * M)); // Release for real
ASSERT_FALSE(os::release_memory(p, 4 * M)); // Again, should fail
} #endif// _WIN32
TEST_VM(os, release_one_mapping_multi_commits) { // Test that we can release an area consisting of interleaved // committed and uncommitted regions: const size_t stripe_len = 4 * M; constint num_stripes = 4; const size_t total_range_len = stripe_len * num_stripes;
// re-reserve it. This should work unless release failed.
address p2 = (address)os::attempt_reserve_memory_at((char*)p, total_range_len);
ASSERT_EQ(p2, p);
PRINT_MAPPINGS("C");
TEST_VM(os, show_mappings_full_range) { // Reserve a small range and fill it with a marker string, should show up // on implementations displaying range snippets char* p = os::reserve_memory(1 * M, false, mtInternal); if (p != nullptr) { if (os::commit_memory(p, 1 * M, false)) {
strcpy(p, "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
}
}
test_show_mappings(nullptr, 0); if (p != nullptr) {
os::release_memory(p, 1 * M);
}
}
#ifdef _WIN32 // Test os::win32::find_mapping
TEST_VM(os, find_mapping_simple) { const size_t total_range_len = 4 * M;
os::win32::mapping_info_t mapping_info;
// Some obvious negatives
ASSERT_FALSE(os::win32::find_mapping((address)NULL, &mapping_info));
ASSERT_FALSE(os::win32::find_mapping((address)4711, &mapping_info));
// A simple allocation
{
address p = (address)os::reserve_memory(total_range_len);
ASSERT_NE(p, (address)NULL);
PRINT_MAPPINGS("A"); for (size_t offset = 0; offset < total_range_len; offset += 4711) {
ASSERT_TRUE(os::win32::find_mapping(p + offset, &mapping_info));
ASSERT_EQ(mapping_info.base, p);
ASSERT_EQ(mapping_info.regions, 1);
ASSERT_EQ(mapping_info.size, total_range_len);
ASSERT_EQ(mapping_info.committed_size, 0);
} // Test just outside the allocation if (os::win32::find_mapping(p - 1, &mapping_info)) {
ASSERT_NE(mapping_info.base, p);
} if (os::win32::find_mapping(p + total_range_len, &mapping_info)) {
ASSERT_NE(mapping_info.base, p);
}
ASSERT_TRUE(os::release_memory((char*)p, total_range_len));
PRINT_MAPPINGS("B");
ASSERT_FALSE(os::win32::find_mapping(p, &mapping_info));
}
}
TEST_VM(os, find_mapping_2) { // A more complex allocation, consisting of multiple regions. const size_t total_range_len = 4 * M;
os::win32::mapping_info_t mapping_info;
TEST_VM(os, os_pagesizes) {
ASSERT_EQ(os::min_page_size(), 4 * K);
ASSERT_LE(os::min_page_size(), (size_t)os::vm_page_size()); // The vm_page_size should be the smallest in the set of allowed page sizes // (contract says "default" page size but a lot of code actually assumes // this to be the smallest page size; notable, deliberate exception is // AIX which can have smaller page sizes but those are not part of the // page_sizes() set).
ASSERT_EQ(os::page_sizes().smallest(), (size_t)os::vm_page_size()); // The large page size, if it exists, shall be part of the set if (UseLargePages) {
ASSERT_GT(os::large_page_size(), (size_t)os::vm_page_size());
ASSERT_TRUE(os::page_sizes().contains(os::large_page_size()));
}
os::page_sizes().print_on(tty);
tty->cr();
}
// Canary should still be intact
EXPECT_EQ(buffer[os::iso8601_timestamp_size], 'X');
}
TEST_VM(os, is_first_C_frame) { #if !defined(_WIN32) && !defined(ZERO)
frame invalid_frame;
EXPECT_TRUE(os::is_first_C_frame(&invalid_frame)); // the frame has zeroes for all values
frame cur_frame = os::current_frame(); // this frame has to have a sender
EXPECT_FALSE(os::is_first_C_frame(&cur_frame)); #endif// _WIN32
}
#ifdef __GLIBC__
TEST_VM(os, trim_native_heap) {
EXPECT_TRUE(os::can_trim_native_heap());
os::size_change_t sc;
sc.before = sc.after = (size_t)-1;
EXPECT_TRUE(os::trim_native_heap(&sc));
tty->print_cr(SIZE_FORMAT "->" SIZE_FORMAT, sc.before, sc.after); // Regardless of whether we freed memory, both before and after // should be somewhat believable numbers (RSS). const size_t min = 5 * M; const size_t max = LP64_ONLY(20 * G) NOT_LP64(3 * G);
ASSERT_LE(min, sc.before);
ASSERT_GT(max, sc.before);
ASSERT_LE(min, sc.after);
ASSERT_GT(max, sc.after); // Should also work
EXPECT_TRUE(os::trim_native_heap());
} #else
TEST_VM(os, trim_native_heap) {
EXPECT_FALSE(os::can_trim_native_heap());
} #endif// __GLIBC__
TEST_VM(os, open_O_CLOEXEC) { #if !defined(_WIN32) int fd = os::open("test_file.txt", O_RDWR | O_CREAT | O_TRUNC, 0666); // open will use O_CLOEXEC
EXPECT_TRUE(fd > 0); int flags = ::fcntl(fd, F_GETFD);
EXPECT_TRUE((flags & FD_CLOEXEC) != 0); // if O_CLOEXEC worked, then FD_CLOEXEC should be ON
::close(fd); #endif
}
¤ Dauer der Verarbeitung: 0.20 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.