/* * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/ #include"precompiled.hpp" #include"memory/allocation.hpp" #include"memory/metaspace.hpp" #include"memory/metaspaceUtils.hpp" #include"services/mallocTracker.hpp" #include"services/memReporter.hpp" #include"services/threadStackTracker.hpp" #include"services/virtualMemoryTracker.hpp" #include"utilities/globalDefinitions.hpp"
// Summary by memory type for (int index = 0; index < mt_number_of_types; index ++) {
MEMFLAGS flag = NMTUtil::index_to_flag(index); // thread stack is reported as part of thread category if (flag == mtThreadStack) continue;
MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag);
VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag);
int num_omitted =
report_malloc_sites() +
report_virtual_memory_allocation_sites(); if (num_omitted > 0) {
assert(scale() > 1, "sanity");
out->print_cr("(%d call sites weighting less than 1%s each omitted.)",
num_omitted, current_scale());
out->cr();
}
}
int MemDetailReporter::report_malloc_sites() {
MallocSiteIterator malloc_itr = _baseline.malloc_sites(MemBaseline::by_size); if (malloc_itr.is_empty()) return 0;
outputStream* out = output();
const MallocSite* malloc_site; int num_omitted = 0; while ((malloc_site = malloc_itr.next()) != NULL) { // Don't report if site has never allocated less than one unit of whatever our scale is if (scale() > 1 && amount_in_current_scale(malloc_site->size()) == 0
DEBUG_ONLY(&& amount_in_current_scale(malloc_site->peak_size()) == 0)) {
num_omitted ++; continue;
} const NativeCallStack* stack = malloc_site->call_stack();
stack->print_on(out);
out->print("%29s", " ");
MEMFLAGS flag = malloc_site->flag();
assert(NMTUtil::flag_is_valid(flag) && flag != mtNone, "Must have a valid memory type");
print_malloc(malloc_site->counter(), flag);
out->print_cr("\n");
} return num_omitted;
}
int MemDetailReporter::report_virtual_memory_allocation_sites() {
VirtualMemorySiteIterator virtual_memory_itr =
_baseline.virtual_memory_sites(MemBaseline::by_size);
if (virtual_memory_itr.is_empty()) return 0;
outputStream* out = output(); const VirtualMemoryAllocationSite* virtual_memory_site; int num_omitted = 0; while ((virtual_memory_site = virtual_memory_itr.next()) != NULL) { // Don't report free sites; does not count toward omitted count. if (virtual_memory_site->reserved() == 0) { continue;
} // Don't report if site has reserved less than one unit of whatever our scale is if (scale() > 1 && amount_in_current_scale(virtual_memory_site->reserved()) == 0) {
num_omitted++; continue;
} const NativeCallStack* stack = virtual_memory_site->call_stack();
stack->print_on(out);
out->print("%28s (", " ");
print_total(virtual_memory_site->reserved(), virtual_memory_site->committed());
MEMFLAGS flag = virtual_memory_site->flag(); if (flag != mtNone) {
out->print(" Type=%s", NMTUtil::flag_to_name(flag));
}
out->print_cr(")\n");
} return num_omitted;
}
void MemDetailReporter::report_virtual_memory_map() { // Virtual memory map always in base address order
VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations(); const ReservedMemoryRegion* rgn;
if (all_committed) {
CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions(); const CommittedMemoryRegion* committed_rgn = itr.next(); if (committed_rgn->size() == reserved_rgn->size() && committed_rgn->call_stack()->equals(*stack)) { // One region spanning the entire reserved region, with the same stack trace. // Don't print this regions because the "reserved and committed" line above // already indicates that the region is committed.
assert(itr.next() == NULL, "Unexpectedly more than one regions"); return;
}
}
CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions(); const CommittedMemoryRegion* committed_rgn; while ((committed_rgn = itr.next()) != NULL) { // Don't report if size is too small if (amount_in_current_scale(committed_rgn->size()) == 0) continue;
stack = committed_rgn->call_stack();
out->print("\n\t");
print_virtual_memory_region("committed", committed_rgn->base(), committed_rgn->size()); if (stack->is_empty()) {
out->print_cr(" ");
} else {
out->print_cr(" from");
stack->print_on(out, 12);
}
}
}
void MemSummaryDiffReporter::report_diff() {
outputStream* out = output();
out->print_cr("\nNative Memory Tracking:\n");
if (scale() > 1) {
out->print_cr("(Omitting categories weighting less than 1%s)", current_scale());
out->cr();
}
// Summary diff by memory type for (int index = 0; index < mt_number_of_types; index ++) {
MEMFLAGS flag = NMTUtil::index_to_flag(index); // thread stack is reported as part of thread category if (flag == mtThreadStack) continue;
diff_summary_of_type(flag,
_early_baseline.malloc_memory(flag),
_early_baseline.virtual_memory(flag),
_early_baseline.metaspace_stats(),
_current_baseline.malloc_memory(flag),
_current_baseline.virtual_memory(flag),
_current_baseline.metaspace_stats());
}
}
out->print("%s" SIZE_FORMAT "%s", alloc_type, amount_in_current_scale(current_amount), scale); // Report type only if it is valid and not under "thread" category if (flags != mtNone && flags != mtThread) {
out->print(" type=%s", NMTUtil::flag_to_name(flags));
}
long amount_diff = diff_in_current_scale(current_amount, early_amount); if (amount_diff != 0) {
out->print(" %+ld%s", amount_diff, scale);
} if (current_count > 0) {
out->print(" #" SIZE_FORMAT "", current_count); if (current_count != early_count) {
out->print(" %+d", (int)(current_count - early_count));
}
}
}
outputStream* out = output(); constchar* scale = current_scale();
// Total reserved and committed memory in current baseline
size_t current_reserved_amount = reserved_total (current_malloc, current_vm);
size_t current_committed_amount = committed_total(current_malloc, current_vm);
// Total reserved and committed memory in early baseline
size_t early_reserved_amount = reserved_total(early_malloc, early_vm);
size_t early_committed_amount = committed_total(early_malloc, early_vm);
// Adjust virtual memory total if (flag == mtThread) { const VirtualMemory* early_thread_stack_usage =
_early_baseline.virtual_memory(mtThreadStack); const VirtualMemory* current_thread_stack_usage =
_current_baseline.virtual_memory(mtThreadStack);
void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early, const MallocSite* current) const { if (early->flag() != current->flag()) { // If malloc site type changed, treat it as deallocation of old type and // allocation of new type.
old_malloc_site(early);
new_malloc_site(current);
} else {
diff_malloc_site(current->call_stack(), current->size(), current->count(),
early->size(), early->count(), early->flag());
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.