/* * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
/* * HPROF binary format - description copied from: * src/share/demo/jvmti/hprof/hprof_io.c * * * header "JAVA PROFILE 1.0.2" (0-terminated) * * u4 size of identifiers. Identifiers are used to represent * UTF8 strings, objects, stack traces, etc. They usually * have the same size as host pointers. * u4 high word * u4 low word number of milliseconds since 0:00 GMT, 1/1/70 * [record]* a sequence of records. * * * Record format: * * u1 a TAG denoting the type of the record * u4 number of *microseconds* since the time stamp in the * header. (wraps around in a little more than an hour) * u4 number of bytes *remaining* in the record. Note that * this number excludes the tag and the length field itself. * [u1]* BODY of the record (a sequence of bytes) * * * The following TAGs are supported: * * TAG BODY notes *---------------------------------------------------------- * HPROF_UTF8 a UTF8-encoded name * * id name ID * [u1]* UTF8 characters (no trailing zero) * * HPROF_LOAD_CLASS a newly loaded class * * u4 class serial number (> 0) * id class object ID * u4 stack trace serial number * id class name ID * * HPROF_UNLOAD_CLASS an unloading class * * u4 class serial_number * * HPROF_FRAME a Java stack frame * * id stack frame ID * id method name ID * id method signature ID * id source file name ID * u4 class serial number * i4 line number. >0: normal * -1: unknown * -2: compiled method * -3: native method * * HPROF_TRACE a Java stack trace * * u4 stack trace serial number * u4 thread serial number * u4 number of frames * [id]* stack frame IDs * * * HPROF_ALLOC_SITES a set of heap allocation sites, obtained after GC * * u2 flags 0x0001: incremental vs. complete * 0x0002: sorted by allocation vs. live * 0x0004: whether to force a GC * u4 cutoff ratio * u4 total live bytes * u4 total live instances * u8 total bytes allocated * u8 total instances allocated * u4 number of sites that follow * [u1 is_array: 0: normal object * 2: object array * 4: boolean array * 5: char array * 6: float array * 7: double array * 8: byte array * 9: short array * 10: int array * 11: long array * u4 class serial number (may be zero during startup) * u4 stack trace serial number * u4 number of bytes alive * u4 number of instances alive * u4 number of bytes allocated * u4]* number of instance allocated * * HPROF_START_THREAD a newly started thread. * * u4 thread serial number (> 0) * id thread object ID * u4 stack trace serial number * id thread name ID * id thread group name ID * id thread group parent name ID * * HPROF_END_THREAD a terminating thread. * * u4 thread serial number * * HPROF_HEAP_SUMMARY heap summary * * u4 total live bytes * u4 total live instances * u8 total bytes allocated * u8 total instances allocated * * HPROF_HEAP_DUMP denote a heap dump * * [heap dump sub-records]* * * There are four kinds of heap dump sub-records: * * u1 sub-record type * * HPROF_GC_ROOT_UNKNOWN unknown root * * id object ID * * HPROF_GC_ROOT_THREAD_OBJ thread object * * id thread object ID (may be 0 for a * thread newly attached through JNI) * u4 thread sequence number * u4 stack trace sequence number * * HPROF_GC_ROOT_JNI_GLOBAL JNI global ref root * * id object ID * id JNI global ref ID * * HPROF_GC_ROOT_JNI_LOCAL JNI local ref * * id object ID * u4 thread serial number * u4 frame # in stack trace (-1 for empty) * * HPROF_GC_ROOT_JAVA_FRAME Java stack frame * * id object ID * u4 thread serial number * u4 frame # in stack trace (-1 for empty) * * HPROF_GC_ROOT_NATIVE_STACK Native stack * * id object ID * u4 thread serial number * * HPROF_GC_ROOT_STICKY_CLASS System class * * id object ID * * HPROF_GC_ROOT_THREAD_BLOCK Reference from thread block * * id object ID * u4 thread serial number * * HPROF_GC_ROOT_MONITOR_USED Busy monitor * * id object ID * * HPROF_GC_CLASS_DUMP dump of a class object * * id class object ID * u4 stack trace serial number * id super class object ID * id class loader object ID * id signers object ID * id protection domain object ID * id reserved * id reserved * * u4 instance size (in bytes) * * u2 size of constant pool * [u2, constant pool index, * ty, type * 2: object * 4: boolean * 5: char * 6: float * 7: double * 8: byte * 9: short * 10: int * 11: long * vl]* and value * * u2 number of static fields * [id, static field name, * ty, type, * vl]* and value * * u2 number of inst. fields (not inc. super) * [id, instance field name, * ty]* type * * HPROF_GC_INSTANCE_DUMP dump of a normal object * * id object ID * u4 stack trace serial number * id class object ID * u4 number of bytes that follow * [vl]* instance field values (class, followed * by super, super's super ...) * * HPROF_GC_OBJ_ARRAY_DUMP dump of an object array * * id array object ID * u4 stack trace serial number * u4 number of elements * id array class ID * [id]* elements * * HPROF_GC_PRIM_ARRAY_DUMP dump of a primitive array * * id array object ID * u4 stack trace serial number * u4 number of elements * u1 element type * 4: boolean array * 5: char array * 6: float array * 7: double array * 8: byte array * 9: short array * 10: int array * 11: long array * [u1]* elements * * HPROF_CPU_SAMPLES a set of sample traces of running threads * * u4 total number of samples * u4 # of traces * [u4 # of samples * u4]* stack trace serial number * * HPROF_CONTROL_SETTINGS the settings of on/off switches * * u4 0x00000001: alloc traces on/off * 0x00000002: cpu sampling on/off * u2 stack trace depth * * * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally * be generated as a sequence of heap dump segments. This sequence is * terminated by an end record. The additional tags allowed by format * "JAVA PROFILE 1.0.2" are: * * HPROF_HEAP_DUMP_SEGMENT denote a heap dump segment * * [heap dump sub-records]* * The same sub-record types allowed by HPROF_HEAP_DUMP * * HPROF_HEAP_DUMP_END denotes the end of a heap dump *
*/
// Default stack trace ID (used for dummy HPROF_TRACE record) enum {
STACK_TRACE_ID = 1,
INITIAL_CLASS_COUNT = 200
};
// Supports I/O operations for a dump // Base class for dump and parallel dump class AbstractDumpWriter : public StackObj { protected: enum {
io_buffer_max_size = 1*M,
io_buffer_max_waste = 10*K,
dump_segment_header_size = 9
};
bool _in_dump_segment; // Are we currently in a dump segment? bool _is_huge_sub_record; // Are we writing a sub-record larger than the buffer size?
DEBUG_ONLY(size_t _sub_record_left;) // The bytes not written for the current sub-record.
DEBUG_ONLY(bool _sub_record_ended;) // True if we have called the end_sub_record().
// Start a new sub-record. Starts a new heap dump segment if needed. void start_sub_record(u1 tag, u4 len); // Ends the current sub-record. void end_sub_record(); // Finishes the current dump segment if not already finished. void finish_dump_segment(bool force_flush = false); // Refresh to get new buffer void refresh() {
assert (_in_dump_segment ==false, "Sanity check");
_buffer = NULL;
_size = io_buffer_max_size;
_pos = 0; // Force flush to guarantee data from parallel dumper are written.
flush(true);
} // Called when finished to release the threads. virtualvoid deactivate() = 0;
};
// Makes sure we inline the fast write into the write_u* functions. This is a big speedup. #define WRITE_KNOWN_TYPE(p, len) do { if (can_write_fast((len))) write_fast((p), (len)); \ else write_raw((p), (len)); } while (0)
// We use java mirror as the class ID void AbstractDumpWriter::write_classID(Klass* k) {
write_objectID(k->java_mirror());
}
void AbstractDumpWriter::finish_dump_segment(bool force_flush) { if (_in_dump_segment) {
assert(_sub_record_left == 0, "Last sub-record not written completely");
assert(_sub_record_ended, "sub-record must have ended");
// Fix up the dump segment length if we haven't written a huge sub-record last // (in which case the segment length was already set to the correct value initially). if (!_is_huge_sub_record) {
assert(position() > dump_segment_header_size, "Dump segment should have some content");
Bytes::put_Java_u4((address) (buffer() + 5),
(u4) (position() - dump_segment_header_size));
} else { // Finish process huge sub record // Set _is_huge_sub_record to false so the parallel dump writer can flush data to file.
_is_huge_sub_record = false;
}
_in_dump_segment = false;
flush(force_flush);
}
}
void AbstractDumpWriter::start_sub_record(u1 tag, u4 len) { if (!_in_dump_segment) { if (position() > 0) {
flush();
}
assert(position() == 0 && buffer_size() > dump_segment_header_size, "Must be at the start");
write_u1(HPROF_HEAP_DUMP_SEGMENT);
write_u4(0); // timestamp // Will be fixed up later if we add more sub-records. If this is a huge sub-record, // this is already the correct length, since we don't add more sub-records.
write_u4(len);
assert(Bytes::get_Java_u4((address)(buffer() + 5)) == len, "Inconsistent size!");
_in_dump_segment = true;
_is_huge_sub_record = len > buffer_size() - dump_segment_header_size;
} elseif (_is_huge_sub_record || (len > buffer_size() - position())) { // This object will not fit in completely or the last sub-record was huge. // Finish the current segment and try again.
finish_dump_segment();
start_sub_record(tag, len);
void AbstractDumpWriter::end_sub_record() {
assert(_in_dump_segment, "must be in dump segment");
assert(_sub_record_left == 0, "sub-record not written completely");
assert(!_sub_record_ended, "Must not have ended yet");
debug_only(_sub_record_ended = true);
}
// Supports I/O operations for a dump
class DumpWriter : public AbstractDumpWriter { private:
CompressionBackend _backend; // Does the actual writing. protected: void flush(bool force = false) override;
public: // Takes ownership of the writer and compressor.
DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor);
// total number of bytes written to the disk
julong bytes_written() const override { return (julong) _backend.get_written(); }
// Called by threads used for parallel writing. void writer_loop() { _backend.thread_loop(); } // Called when finish to release the threads. void deactivate() override { flush(); _backend.deactivate(); } // Get the backend pointer, used by parallel dump writer.
CompressionBackend* backend_ptr() { return &_backend; }
};
// Check for error after constructing the object and destroy it in case of an error.
DumpWriter::DumpWriter(AbstractWriter* writer, AbstractCompressor* compressor) :
AbstractDumpWriter(),
_backend(writer, compressor, io_buffer_max_size, io_buffer_max_waste) {
flush();
}
// flush any buffered bytes to the file void DumpWriter::flush(bool force) {
_backend.get_new_buffer(&_buffer, &_pos, &_size, force);
}
// Buffer queue used for parallel dump. struct ParWriterBufferQueueElem { char* _buffer;
size_t _used;
ParWriterBufferQueueElem* _next;
};
// Support parallel heap dump. class ParDumpWriter : public AbstractDumpWriter { private: // Lock used to guarantee the integrity of multiple buffers writing. static Monitor* _lock; // Pointer of backend from global DumpWriter.
CompressionBackend* _backend_ptr; charconst * _err;
ParWriterBufferQueue* _buffer_queue;
size_t _internal_buffer_used; char* _buffer_base; bool _split_data; staticconst uint BackendFlushThreshold = 2; protected: void flush(bool force = false) override {
assert(_pos != 0, "must not be zero"); if (_pos != 0) {
refresh_buffer();
}
if (_split_data || _is_huge_sub_record) { return;
}
if (should_flush_buf_list(force)) {
assert(!_in_dump_segment && !_split_data && !_is_huge_sub_record, "incomplete data send to backend!\n");
flush_to_backend(force);
}
}
public: // Check for error after constructing the object and destroy it in case of an error.
ParDumpWriter(DumpWriter* dw) :
AbstractDumpWriter(),
_backend_ptr(dw->backend_ptr()),
_buffer_queue((new (std::nothrow) ParWriterBufferQueue())),
_buffer_base(NULL),
_split_data(false) { // prepare internal buffer
allocate_internal_buffer();
}
~ParDumpWriter() {
assert(_buffer_queue != NULL, "Sanity check");
assert((_internal_buffer_used == 0) && (_buffer_queue->is_empty()), "All data must be send to backend"); if (_buffer_base != NULL) {
os::free(_buffer_base);
_buffer_base = NULL;
} delete _buffer_queue;
_buffer_queue = NULL;
}
// total number of bytes written to the disk
julong bytes_written() const override { return (julong) _backend_ptr->get_written(); } charconst* error() const override { return _err == NULL ? _backend_ptr->error() : _err; }
staticvoid before_work() {
assert(_lock == NULL, "ParDumpWriter lock must be initialized only once");
_lock = new (std::nothrow) PaddedMonitor(Mutex::safepoint, "ParallelHProfWriter_lock");
}
staticvoid after_work() {
assert(_lock != NULL, "ParDumpWriter lock is not initialized"); delete _lock;
_lock = NULL;
}
// write raw bytes void write_raw(constvoid* s, size_t len) override {
assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large");
debug_only(_sub_record_left -= len);
assert(!_split_data, "Invalid split data");
_split_data = true; // flush buffer to make room. while (len > buffer_size() - position()) {
assert(!_in_dump_segment || _is_huge_sub_record, "Cannot overflow in non-huge sub-record.");
size_t to_write = buffer_size() - position();
memcpy(buffer() + position(), s, to_write);
s = (void*) ((char*) s + to_write);
len -= to_write;
set_position(position() + to_write);
flush();
}
_split_data = false;
memcpy(buffer() + position(), s, len);
set_position(position() + len);
}
void flush_to_backend(bool force) { // Guarantee there is only one writer updating the backend buffers.
MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); while (!_buffer_queue->is_empty()) {
ParWriterBufferQueueElem* entry = _buffer_queue->dequeue();
flush_buffer(entry->_buffer, entry->_used); // Delete buffer and entry.
reclaim_entry(entry);
entry = NULL;
}
assert(_pos == 0, "available buffer must be empty before flush"); // Flush internal buffer. if (_internal_buffer_used > 0) {
flush_buffer(_buffer_base, _internal_buffer_used);
os::free(_buffer_base);
_pos = 0;
_internal_buffer_used = 0;
_buffer_base = _buffer = NULL; // Allocate internal buffer for future use.
allocate_internal_buffer();
}
}
};
Monitor* ParDumpWriter::_lock = NULL;
// Support class with a collection of functions used when dumping the heap
class DumperSupport : AllStatic { public:
// write a header of the given type staticvoid write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len);
// returns hprof tag for the given type signature static hprofTag sig2tag(Symbol* sig); // returns hprof tag for the given basic type static hprofTag type2tag(BasicType type); // Returns the size of the data to write. static u4 sig2size(Symbol* sig);
// returns the size of the instance of the given class static u4 instance_size(Klass* k);
// dump a jfloat staticvoid dump_float(AbstractDumpWriter* writer, jfloat f); // dump a jdouble staticvoid dump_double(AbstractDumpWriter* writer, jdouble d); // dumps the raw value of the given field staticvoid dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset); // returns the size of the static fields; also counts the static fields static u4 get_static_fields_size(InstanceKlass* ik, u2& field_count); // dumps static fields of the given class staticvoid dump_static_fields(AbstractDumpWriter* writer, Klass* k); // dump the raw values of the instance fields of the given object staticvoid dump_instance_fields(AbstractDumpWriter* writer, oop o); // get the count of the instance fields for a given class static u2 get_instance_fields_count(InstanceKlass* ik); // dumps the definition of the instance fields for a given class staticvoid dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k); // creates HPROF_GC_INSTANCE_DUMP record for the given object staticvoid dump_instance(AbstractDumpWriter* writer, oop o); // creates HPROF_GC_CLASS_DUMP record for the given instance class staticvoid dump_instance_class(AbstractDumpWriter* writer, Klass* k); // creates HPROF_GC_CLASS_DUMP record for a given array class staticvoid dump_array_class(AbstractDumpWriter* writer, Klass* k);
// creates HPROF_GC_OBJ_ARRAY_DUMP record for the given object array staticvoid dump_object_array(AbstractDumpWriter* writer, objArrayOop array); // creates HPROF_GC_PRIM_ARRAY_DUMP record for the given type array staticvoid dump_prim_array(AbstractDumpWriter* writer, typeArrayOop array); // create HPROF_FRAME record for the given method and bci staticvoid dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num, Method* m, int bci);
// check if we need to truncate an array staticint calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size);
// fixes up the current dump record and writes HPROF_HEAP_DUMP_END record staticvoid end_of_dump(AbstractDumpWriter* writer);
static oop mask_dormant_archived_object(oop o) { if (o != NULL && o->klass()->java_mirror() == NULL) { // Ignore this object since the corresponding java mirror is not loaded. // Might be a dormant archive object. return NULL;
} else { return o;
}
}
};
// write a header of the given type void DumperSupport:: write_header(AbstractDumpWriter* writer, hprofTag tag, u4 len) {
writer->write_u1(tag);
writer->write_u4(0); // current ticks
writer->write_u4(len);
}
// returns hprof tag for the given type signature
hprofTag DumperSupport::sig2tag(Symbol* sig) { switch (sig->char_at(0)) { case JVM_SIGNATURE_CLASS : return HPROF_NORMAL_OBJECT; case JVM_SIGNATURE_ARRAY : return HPROF_NORMAL_OBJECT; case JVM_SIGNATURE_BYTE : return HPROF_BYTE; case JVM_SIGNATURE_CHAR : return HPROF_CHAR; case JVM_SIGNATURE_FLOAT : return HPROF_FLOAT; case JVM_SIGNATURE_DOUBLE : return HPROF_DOUBLE; case JVM_SIGNATURE_INT : return HPROF_INT; case JVM_SIGNATURE_LONG : return HPROF_LONG; case JVM_SIGNATURE_SHORT : return HPROF_SHORT; case JVM_SIGNATURE_BOOLEAN : return HPROF_BOOLEAN; default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
}
}
hprofTag DumperSupport::type2tag(BasicType type) { switch (type) { case T_BYTE : return HPROF_BYTE; case T_CHAR : return HPROF_CHAR; case T_FLOAT : return HPROF_FLOAT; case T_DOUBLE : return HPROF_DOUBLE; case T_INT : return HPROF_INT; case T_LONG : return HPROF_LONG; case T_SHORT : return HPROF_SHORT; case T_BOOLEAN : return HPROF_BOOLEAN; default : ShouldNotReachHere(); /* to shut up compiler */ return HPROF_BYTE;
}
}
u4 DumperSupport::sig2size(Symbol* sig) { switch (sig->char_at(0)) { case JVM_SIGNATURE_CLASS: case JVM_SIGNATURE_ARRAY: returnsizeof(address); case JVM_SIGNATURE_BOOLEAN: case JVM_SIGNATURE_BYTE: return 1; case JVM_SIGNATURE_SHORT: case JVM_SIGNATURE_CHAR: return 2; case JVM_SIGNATURE_INT: case JVM_SIGNATURE_FLOAT: return 4; case JVM_SIGNATURE_LONG: case JVM_SIGNATURE_DOUBLE: return 8; default: ShouldNotReachHere(); /* to shut up compiler */ return 0;
}
}
template<typename T, typename F> T bit_cast(F from) { // replace with the real thing when we can use c++20
T to;
static_assert(sizeof(to) == sizeof(from), "must be of the same size");
memcpy(&to, &from, sizeof(to)); return to;
}
// dumps the raw value of the given field void DumperSupport::dump_field_value(AbstractDumpWriter* writer, char type, oop obj, int offset) { switch (type) { case JVM_SIGNATURE_CLASS : case JVM_SIGNATURE_ARRAY : {
oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>(offset); if (o != NULL && log_is_enabled(Debug, cds, heap) && mask_dormant_archived_object(o) == NULL) {
ResourceMark rm;
log_debug(cds, heap)("skipped dormant archived object " INTPTR_FORMAT " (%s) referenced by " INTPTR_FORMAT " (%s)",
p2i(o), o->klass()->external_name(),
p2i(obj), obj->klass()->external_name());
}
o = mask_dormant_archived_object(o);
assert(oopDesc::is_oop_or_null(o), "Expected an oop or NULL at " PTR_FORMAT, p2i(o));
writer->write_objectID(o); break;
} case JVM_SIGNATURE_BYTE : {
jbyte b = obj->byte_field(offset);
writer->write_u1(b); break;
} case JVM_SIGNATURE_CHAR : {
jchar c = obj->char_field(offset);
writer->write_u2(c); break;
} case JVM_SIGNATURE_SHORT : {
jshort s = obj->short_field(offset);
writer->write_u2(s); break;
} case JVM_SIGNATURE_FLOAT : {
jfloat f = obj->float_field(offset);
dump_float(writer, f); break;
} case JVM_SIGNATURE_DOUBLE : {
jdouble d = obj->double_field(offset);
dump_double(writer, d); break;
} case JVM_SIGNATURE_INT : {
jint i = obj->int_field(offset);
writer->write_u4(i); break;
} case JVM_SIGNATURE_LONG : {
jlong l = obj->long_field(offset);
writer->write_u8(l); break;
} case JVM_SIGNATURE_BOOLEAN : {
jboolean b = obj->bool_field(offset);
writer->write_u1(b); break;
} default : {
ShouldNotReachHere(); break;
}
}
}
// returns the size of the instance of the given class
u4 DumperSupport::instance_size(Klass* k) {
InstanceKlass* ik = InstanceKlass::cast(k);
u4 size = 0;
for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) { if (!fld.access_flags().is_static()) {
size += sig2size(fld.signature());
}
} return size;
}
for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) { if (fldc.access_flags().is_static()) {
field_count++;
size += sig2size(fldc.signature());
}
}
// Add in resolved_references which is referenced by the cpCache // The resolved_references is an array per InstanceKlass holding the // strings and other oops resolved from the constant pool.
oop resolved_references = ik->constants()->resolved_references_or_null(); if (resolved_references != NULL) {
field_count++;
size += sizeof(address);
// Add in the resolved_references of the used previous versions of the class // in the case of RedefineClasses
InstanceKlass* prev = ik->previous_versions(); while (prev != NULL && prev->constants()->resolved_references_or_null() != NULL) {
field_count++;
size += sizeof(address);
prev = prev->previous_versions();
}
}
// We write the value itself plus a name and a one byte type tag per field. return size + field_count * (sizeof(address) + 1);
}
// dumps static fields of the given class void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
InstanceKlass* ik = InstanceKlass::cast(k);
// dump the field descriptors and raw values for (FieldStream fld(ik, true, true); !fld.eos(); fld.next()) { if (fld.access_flags().is_static()) {
Symbol* sig = fld.signature();
writer->write_symbolID(fld.name()); // name
writer->write_u1(sig2tag(sig)); // type
// value
dump_field_value(writer, sig->char_at(0), ik->java_mirror(), fld.offset());
}
}
// Add resolved_references for each class that has them
oop resolved_references = ik->constants()->resolved_references_or_null(); if (resolved_references != NULL) {
writer->write_symbolID(vmSymbols::resolved_references_name()); // name
writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
writer->write_objectID(resolved_references);
// Also write any previous versions
InstanceKlass* prev = ik->previous_versions(); while (prev != NULL && prev->constants()->resolved_references_or_null() != NULL) {
writer->write_symbolID(vmSymbols::resolved_references_name()); // name
writer->write_u1(sig2tag(vmSymbols::object_array_signature())); // type
writer->write_objectID(prev->constants()->resolved_references());
prev = prev->previous_versions();
}
}
}
// dump the raw values of the instance fields of the given object void DumperSupport::dump_instance_fields(AbstractDumpWriter* writer, oop o) {
InstanceKlass* ik = InstanceKlass::cast(o->klass());
for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) { if (!fld.access_flags().is_static()) {
Symbol* sig = fld.signature();
dump_field_value(writer, sig->char_at(0), o, fld.offset());
}
}
}
// dumps the definition of the instance fields for a given class
u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) {
u2 field_count = 0;
for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) { if (!fldc.access_flags().is_static()) field_count++;
}
return field_count;
}
// dumps the definition of the instance fields for a given class void DumperSupport::dump_instance_field_descriptors(AbstractDumpWriter* writer, Klass* k) {
InstanceKlass* ik = InstanceKlass::cast(k);
// dump the field descriptors for (FieldStream fld(ik, true, true); !fld.eos(); fld.next()) { if (!fld.access_flags().is_static()) {
Symbol* sig = fld.signature();
writer->write_symbolID(fld.name()); // name
writer->write_u1(sig2tag(sig)); // type
}
}
}
// creates HPROF_GC_INSTANCE_DUMP record for the given object void DumperSupport::dump_instance(AbstractDumpWriter* writer, oop o) {
InstanceKlass* ik = InstanceKlass::cast(o->klass());
u4 is = instance_size(ik);
u4 size = 1 + sizeof(address) + 4 + sizeof(address) + 4 + is;
// number of bytes that follow
writer->write_u4(is);
// field values
dump_instance_fields(writer, o);
writer->end_sub_record();
}
// creates HPROF_GC_CLASS_DUMP record for the given instance class void DumperSupport::dump_instance_class(AbstractDumpWriter* writer, Klass* k) {
InstanceKlass* ik = InstanceKlass::cast(k);
// We can safepoint and do a heap dump at a point where we have a Klass, // but no java mirror class has been setup for it. So we need to check // that the class is at least loaded, to avoid crash from a null mirror. if (!ik->is_loaded()) { return;
}
// class ID
writer->write_classID(ik);
writer->write_u4(STACK_TRACE_ID);
// super class ID
InstanceKlass* java_super = ik->java_super(); if (java_super == NULL) {
writer->write_objectID(oop(NULL));
} else {
writer->write_classID(java_super);
}
// description of instance fields
writer->write_u2(instance_fields_count);
dump_instance_field_descriptors(writer, ik);
writer->end_sub_record();
}
// creates HPROF_GC_CLASS_DUMP record for the given array class void DumperSupport::dump_array_class(AbstractDumpWriter* writer, Klass* k) {
InstanceKlass* ik = NULL; // bottom class for object arrays, NULL for primitive type arrays if (k->is_objArray_klass()) {
Klass *bk = ObjArrayKlass::cast(k)->bottom_klass();
assert(bk != NULL, "checking"); if (bk->is_instance_klass()) {
ik = InstanceKlass::cast(bk);
}
}
// super class of array classes is java.lang.Object
InstanceKlass* java_super = k->java_super();
assert(java_super != NULL, "checking");
writer->write_classID(java_super);
// Hprof uses an u4 as record length field, // which means we need to truncate arrays that are too long. int DumperSupport::calculate_array_max_length(AbstractDumpWriter* writer, arrayOop array, short header_size) {
BasicType type = ArrayKlass::cast(array->klass())->element_type();
assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type");
int length = array->length();
int type_size; if (type == T_OBJECT) {
type_size = sizeof(address);
} else {
type_size = type2aelembytes(type);
}
// nothing to copy if (length == 0) {
writer->end_sub_record(); return;
}
// If the byte ordering is big endian then we can copy most types directly
switch (type) { case T_INT : { if (Endian::is_Java_byte_ordering_different()) {
WRITE_ARRAY(array, int, u4, length);
} else {
writer->write_raw(array->int_at_addr(0), length_in_bytes);
} break;
} case T_BYTE : {
writer->write_raw(array->byte_at_addr(0), length_in_bytes); break;
} case T_CHAR : { if (Endian::is_Java_byte_ordering_different()) {
WRITE_ARRAY(array, char, u2, length);
} else {
writer->write_raw(array->char_at_addr(0), length_in_bytes);
} break;
} case T_SHORT : { if (Endian::is_Java_byte_ordering_different()) {
WRITE_ARRAY(array, short, u2, length);
} else {
writer->write_raw(array->short_at_addr(0), length_in_bytes);
} break;
} case T_BOOLEAN : { if (Endian::is_Java_byte_ordering_different()) {
WRITE_ARRAY(array, bool, u1, length);
} else {
writer->write_raw(array->bool_at_addr(0), length_in_bytes);
} break;
} case T_LONG : { if (Endian::is_Java_byte_ordering_different()) {
WRITE_ARRAY(array, long, u8, length);
} else {
writer->write_raw(array->long_at_addr(0), length_in_bytes);
} break;
}
// handle float/doubles in a special value to ensure than NaNs are // written correctly. TO DO: Check if we can avoid this on processors that // use IEEE 754.
case T_FLOAT : { for (int i = 0; i < length; i++) {
dump_float(writer, array->float_at(i));
} break;
} case T_DOUBLE : { for (int i = 0; i < length; i++) {
dump_double(writer, array->double_at(i));
} break;
} default : ShouldNotReachHere();
}
writer->end_sub_record();
}
// create a HPROF_FRAME record of the given Method* and bci void DumperSupport::dump_stack_frame(AbstractDumpWriter* writer, int frame_serial_num, int class_serial_num,
Method* m, int bci) { int line_number; if (m->is_native()) {
line_number = -3; // native frame
} else {
line_number = m->line_number_from_bci(bci);
}
write_header(writer, HPROF_FRAME, 4*oopSize + 2*sizeof(u4));
writer->write_id(frame_serial_num); // frame serial number
writer->write_symbolID(m->name()); // method's name
writer->write_symbolID(m->signature()); // method's signature
assert(m->method_holder()->is_instance_klass(), "not InstanceKlass");
writer->write_symbolID(m->method_holder()->source_file_name()); // source file name
writer->write_u4(class_serial_num); // class serial number
writer->write_u4((u4) line_number); // line number
}
// Support class used to generate HPROF_UTF8 records from the entries in the // SymbolTable.
void JNIGlobalsDumper::do_oop(oop* obj_p) {
oop o = NativeAccess<AS_NO_KEEPALIVE>::oop_load(obj_p);
// ignore these if (o == NULL) return; // we ignore global ref to symbols and other internal objects if (o->is_instance() || o->is_objArray() || o->is_typeArray()) {
u4 size = 1 + 2 * sizeof(address);
writer()->start_sub_record(HPROF_GC_ROOT_JNI_GLOBAL, size);
writer()->write_objectID(o);
writer()->write_rootID(obj_p); // global ref ID
writer()->end_sub_record();
}
};
// Support class used to generate HPROF_GC_ROOT_STICKY_CLASS records
// Large object heap dump support. // To avoid memory consumption, when dumping large objects such as huge array and // large objects whose size are larger than LARGE_OBJECT_DUMP_THRESHOLD, the scanned // partial object/array data will be sent to the backend directly instead of caching // the whole object/array in the internal buffer. // The HeapDumpLargeObjectList is used to save the large object when dumper scans // the heap. The large objects could be added (push) parallelly by multiple dumpers, // But they will be removed (popped) serially only by the VM thread. class HeapDumpLargeObjectList : public CHeapObj<mtInternal> { private: class HeapDumpLargeObjectListElem : public CHeapObj<mtInternal> { public:
HeapDumpLargeObjectListElem(oop obj) : _obj(obj), _next(NULL) { }
oop _obj;
HeapDumpLargeObjectListElem* _next;
};
// Support class using when iterating over the heap. class HeapObjectDumper : public ObjectClosure { private:
AbstractDumpWriter* _writer;
HeapDumpLargeObjectList* _list;
// called for each object in the heap void do_object(oop o);
};
void HeapObjectDumper::do_object(oop o) { // skip classes as these emitted as HPROF_GC_CLASS_DUMP records if (o->klass() == vmClasses::Class_klass()) { if (!java_lang_Class::is_primitive(o)) { return;
}
}
// If large object list exists and it is large object/array, // add oop into the list and skip scan. VM thread will process it later. if (_list != NULL && is_large(o)) {
_list->atomic_push(o); return;
}
if (o->is_instance()) { // create a HPROF_GC_INSTANCE record for each object
DumperSupport::dump_instance(writer(), o);
} elseif (o->is_objArray()) { // create a HPROF_GC_OBJ_ARRAY_DUMP record for each object array
DumperSupport::dump_object_array(writer(), objArrayOop(o));
} elseif (o->is_typeArray()) { // create a HPROF_GC_PRIM_ARRAY_DUMP record for each type array
DumperSupport::dump_prim_array(writer(), typeArrayOop(o));
}
}
bool HeapObjectDumper::is_large(oop o) {
size_t size = 0; if (o->is_instance()) { // Use o->size() * 8 as the upper limit of instance size to avoid iterating static fields
size = o->size() * 8;
} elseif (o->is_objArray()) {
objArrayOop array = objArrayOop(o);
BasicType type = ArrayKlass::cast(array->klass())->element_type();
assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type"); int length = array->length(); int type_size = sizeof(address);
size = (size_t)length * type_size;
} elseif (o->is_typeArray()) {
typeArrayOop array = typeArrayOop(o);
BasicType type = ArrayKlass::cast(array->klass())->element_type();
assert(type >= T_BOOLEAN && type <= T_OBJECT, "invalid array element type"); int length = array->length(); int type_size = type2aelembytes(type);
size = (size_t)length * type_size;
} return size > HeapDumpLargeObjectList::LargeObjectSizeThreshold;
}
// The dumper controller for parallel heap dump class DumperController : public CHeapObj<mtInternal> { private: bool _started;
Monitor* _lock;
uint _dumper_number;
uint _complete_number;
void wait_all_dumpers_complete() {
assert (_started == true, "wrong state when wait for dumper complete");
MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); while (_complete_number != _dumper_number) {
ml.wait();
}
_started = false;
}
};
// The VM operation that performs the heap dump class VM_HeapDumper : public VM_GC_Operation, public WorkerTask { private: static VM_HeapDumper* _global_dumper; static DumpWriter* _global_writer;
DumpWriter* _local_writer;
JavaThread* _oome_thread;
Method* _oome_constructor; bool _gc_before_heap_dump;
GrowableArray<Klass*>* _klass_map;
ThreadStackTrace** _stack_traces; int _num_threads; // parallel heap dump support
uint _num_dumper_threads;
uint _num_writer_threads;
DumperController* _dumper_controller;
ParallelObjectIterator* _poi;
HeapDumpLargeObjectList* _large_object_list;
// VMDumperType is for thread that dumps both heap and non-heap data. staticconst size_t VMDumperType = 0; staticconst size_t WriterType = 1; staticconst size_t DumperType = 2; // worker id of VMDumper thread. staticconst size_t VMDumperWorkerId = 0;
size_t get_worker_type(uint worker_id) {
assert(_num_writer_threads >= 1, "Must be at least one writer"); // worker id of VMDumper that dump heap and non-heap data if (worker_id == VMDumperWorkerId) { return VMDumperType;
}
// worker id of dumper starts from 1, which only dump heap datar if (worker_id < _num_dumper_threads) { return DumperType;
}
// worker id of writer starts from _num_dumper_threads return WriterType;
}
void prepare_parallel_dump(uint num_total) {
assert (_dumper_controller == NULL, "dumper controller must be NULL");
assert (num_total > 0, "active workers number must >= 1"); // Dumper threads number must not be larger than active workers number. if (num_total < _num_dumper_threads) {
_num_dumper_threads = num_total - 1;
} // Calculate dumper and writer threads number.
_num_writer_threads = num_total - _num_dumper_threads; // If dumper threads number is 1, only the VMThread works as a dumper. // If dumper threads number is equal to active workers, need at lest one worker thread as writer. if (_num_dumper_threads > 0 && _num_writer_threads == 0) {
_num_writer_threads = 1;
_num_dumper_threads = num_total - _num_writer_threads;
} // Prepare parallel writer. if (_num_dumper_threads > 1) {
ParDumpWriter::before_work(); // Number of dumper threads that only iterate heap.
uint _heap_only_dumper_threads = _num_dumper_threads - 1 /* VMDumper thread */;
_dumper_controller = new (std::nothrow) DumperController(_heap_only_dumper_threads);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.