/* * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
void
JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) { // It is expected that any Agent threads will be created as // Java Threads. If this is the case, notification of the creation // of the thread is given in JavaThread::thread_main().
assert(thread == JavaThread::current(), "sanity check");
for (int i=0; i<len; i++) {
_cache[i] = _elements->at(i)->getCacheValue(); // // The cache entry has gone bad. Without a valid frame pointer // value, the entry is useless so we simply delete it in product // mode. The call to remove() will rebuild the cache again // without the bad entry. // if (_cache[i] == NULL) {
assert(false, "cannot recache NULL elements");
remove(i); return;
}
}
_cache[len] = NULL;
// number of elements in the collection int GrowableCache::length() { return _elements->length();
}
// get the value of the index element in the collection
GrowableElement* GrowableCache::at(int index) {
GrowableElement *e = (GrowableElement *) _elements->at(index);
assert(e != NULL, "e != NULL"); return e;
}
int GrowableCache::find(GrowableElement* e) { return _elements->find(e, GrowableCache::equals);
}
// append a copy of the element to the end of the collection void GrowableCache::append(GrowableElement* e) {
GrowableElement *new_e = e->clone();
_elements->append(new_e);
recache();
}
// remove the element at index void GrowableCache::remove (int index) {
GrowableElement *e = _elements->at(index);
assert(e != NULL, "e != NULL");
_elements->remove(e); delete e;
recache();
}
// clear out all elements, release all heap space and // let our listener know that things have changed. void GrowableCache::clear() { int len = _elements->length(); for (int i=0; i<len; i++) { delete _elements->at(i);
}
_elements->clear();
recache();
}
// add/remove breakpoint to/from versions of the method that are EMCP.
Thread *thread = Thread::current();
InstanceKlass* ik = _method->method_holder();
Symbol* m_name = _method->name();
Symbol* m_signature = _method->signature();
// search previous versions if they exist for (InstanceKlass* pv_node = ik->previous_versions();
pv_node != NULL;
pv_node = pv_node->previous_versions()) {
Array<Method*>* methods = pv_node->methods();
for (int i = methods->length() - 1; i >= 0; i--) {
Method* method = methods->at(i); // Only set breakpoints in EMCP methods. // EMCP methods are old but not obsolete. Equivalent // Modulo Constant Pool means the method is equivalent except // the constant pool and instructions that access the constant // pool might be different. // If a breakpoint is set in a redefined method, its EMCP methods // must have a breakpoint also. // None of the methods are deleted until none are running. // This code could set a breakpoint in a method that // is never reached, but this won't be noticeable to the programmer. if (!method->is_obsolete() &&
method->name() == m_name &&
method->signature() == m_signature) {
ResourceMark rm;
log_debug(redefine, class, breakpoint)
("%sing breakpoint in %s(%s)", meth_act == &Method::set_breakpoint ? "sett" : "clear",
method->name()->as_C_string(), method->signature()->as_C_string());
(method->*meth_act)(_bci); break;
}
}
}
}
void JvmtiBreakpoints::clearall_in_class_at_safepoint(Klass* klass) { bool changed = true; // We are going to run thru the list of bkpts // and delete some. This deletion probably alters // the list in some implementation defined way such // that when we delete entry i, the next entry might // no longer be at i+1. To be safe, each time we delete // an entry, we'll just start again from the beginning. // We'll stop when we make a pass thru the whole list without // deleting anything. while (changed) { int len = _bps.length();
changed = false; for (int i = 0; i < len; i++) {
JvmtiBreakpoint& bp = _bps.at(i); if (bp.method()->method_holder() == klass) {
bp.clear();
_bps.remove(i); // This changed 'i' so we have to start over.
changed = true; break;
}
}
}
}
// Check that the klass is assignable to a type with the given signature. // Another solution could be to use the function Klass::is_subtype_of(type). // But the type class can be forced to load/initialize eagerly in such a case. // This may cause unexpected consequences like CFLH or class-init JVMTI events. // It is better to avoid such a behavior. bool VM_BaseGetOrSetLocal::is_assignable(constchar* ty_sign, Klass* klass, Thread* thread) {
assert(ty_sign != NULL, "type signature must not be NULL");
assert(thread != NULL, "thread must not be NULL");
assert(klass != NULL, "klass must not be NULL");
int len = (int) strlen(ty_sign); if (ty_sign[0] == JVM_SIGNATURE_CLASS &&
ty_sign[len-1] == JVM_SIGNATURE_ENDCLASS) { // Need pure class/interface name
ty_sign++;
len -= 2;
}
TempNewSymbol ty_sym = SymbolTable::new_symbol(ty_sign, len); if (klass->name() == ty_sym) { returntrue;
} // Compare primary supers int super_depth = klass->super_depth(); int idx; for (idx = 0; idx < super_depth; idx++) { if (klass->primary_super_of_depth(idx)->name() == ty_sym) { returntrue;
}
} // Compare secondary supers const Array<Klass*>* sec_supers = klass->secondary_supers(); for (idx = 0; idx < sec_supers->length(); idx++) { if (((Klass*) sec_supers->at(idx))->name() == ty_sym) { returntrue;
}
} returnfalse;
}
bool VM_BaseGetOrSetLocal::check_slot_type_lvt(javaVFrame* jvf) {
Method* method = jvf->method(); if (!method->has_localvariable_table()) { // Just to check index boundaries.
jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0; if (_index < 0 || _index + extra_slot >= method->max_locals()) {
_result = JVMTI_ERROR_INVALID_SLOT; returnfalse;
} returntrue;
}
jint num_entries = method->localvariable_table_length(); if (num_entries == 0) {
_result = JVMTI_ERROR_INVALID_SLOT; returnfalse; // There are no slots
} int signature_idx = -1; int vf_bci = jvf->bci();
LocalVariableTableElement* table = method->localvariable_table_start(); for (int i = 0; i < num_entries; i++) { int start_bci = table[i].start_bci; int end_bci = start_bci + table[i].length;
// Here we assume that locations of LVT entries // with the same slot number cannot be overlapped if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
signature_idx = (int) table[i].descriptor_cp_index; break;
}
} if (signature_idx == -1) {
_result = JVMTI_ERROR_INVALID_SLOT; returnfalse; // Incorrect slot index
}
Symbol* sign_sym = method->constants()->symbol_at(signature_idx);
BasicType slot_type = Signature::basic_type(sign_sym);
switch (slot_type) { case T_BYTE: case T_SHORT: case T_CHAR: case T_BOOLEAN:
slot_type = T_INT; break; case T_ARRAY:
slot_type = T_OBJECT; break; default: break;
}; if (_type != slot_type) {
_result = JVMTI_ERROR_TYPE_MISMATCH; returnfalse;
}
jobject jobj = _value.l; if (_set && slot_type == T_OBJECT && jobj != NULL) { // NULL reference is allowed // Check that the jobject class matches the return type signature.
oop obj = JNIHandles::resolve_external_guard(jobj);
NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
Klass* ob_k = obj->klass();
NULL_CHECK(ob_k, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
bool VM_GetOrSetLocal::doit_prologue() { if (!_eb.deoptimize_objects(_depth, _depth)) { // The target frame is affected by a reallocation failure.
_result = JVMTI_ERROR_OUT_OF_MEMORY; returnfalse;
}
frame fr = _jvf->fr(); if (_set && _depth != 0 && Continuation::is_frame_in_continuation(_jvf->thread(), fr)) {
_result = JVMTI_ERROR_OPAQUE_FRAME; // deferred locals are not fully supported in continuations return;
}
Method* method = _jvf->method(); if (getting_receiver()) { if (method->is_static()) {
_result = JVMTI_ERROR_INVALID_SLOT; return;
}
} else { if (method->is_native()) {
_result = JVMTI_ERROR_OPAQUE_FRAME; return;
}
if (!check_slot_type_no_lvt(_jvf)) { return;
} if (method->has_localvariable_table() &&
!check_slot_type_lvt(_jvf)) { return;
}
}
InterpreterOopMap oop_mask;
_jvf->method()->mask_for(_jvf->bci(), &oop_mask); if (oop_mask.is_dead(_index)) { // The local can be invalid and uninitialized in the scope of current bci
_result = JVMTI_ERROR_INVALID_SLOT; return;
} if (_set) { if (fr.is_heap_frame()) { // we want this check after the check for JVMTI_ERROR_INVALID_SLOT
assert(Continuation::is_frame_in_continuation(_jvf->thread(), fr), "sanity check"); // If the topmost frame is a heap frame, then it hasn't been thawed. This can happen // if we are executing at a return barrier safepoint. The callee frame has been popped, // but the caller frame has not been thawed. We can't support a JVMTI SetLocal in the callee // frame at this point, because we aren't truly in the callee yet. // fr.is_heap_frame() is impossible if a continuation is at a single step or breakpoint.
_result = JVMTI_ERROR_OPAQUE_FRAME; // deferred locals are not fully supported in continuations return;
}
// Force deoptimization of frame if compiled because it's // possible the compiler emitted some locals as constant values, // meaning they are not mutable. if (can_be_deoptimized(_jvf)) { // Continuation can't be unmounted at this point (it was checked/reported in get_java_vframe). if (Continuation::is_frame_in_continuation(_jvf->thread(), fr)) {
_result = JVMTI_ERROR_OPAQUE_FRAME; // can't deoptimize for top continuation frame return;
}
// Schedule deoptimization so that eventually the local // update will be written to an interpreter frame.
Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
// Now store a new value for the local which will be applied // once deoptimization occurs. Note however that while this // write is deferred until deoptimization actually happens // can vframe created after this point will have its locals // reflecting this update so as far as anyone can see the // write has already taken place.
// If we are updating an oop then get the oop from the handle // since the handle will be long gone by the time the deopt // happens. The oop stored in the deferred local will be // gc'd on its own. if (_type == T_OBJECT) {
_value.l = cast_from_oop<jobject>(JNIHandles::resolve_external_guard(_value.l));
} // Re-read the vframe so we can see that it is deoptimized // [ Only need because of assert in update_local() ]
_jvf = get_java_vframe();
((compiledVFrame*)_jvf)->update_local(_type, _index, _value); return;
}
StackValueCollection *locals = _jvf->locals();
Thread* current_thread = VMThread::vm_thread();
HandleMark hm(current_thread);
switch (_type) { case T_INT: locals->set_int_at (_index, _value.i); break; case T_LONG: locals->set_long_at (_index, _value.j); break; case T_FLOAT: locals->set_float_at (_index, _value.f); break; case T_DOUBLE: locals->set_double_at(_index, _value.d); break; case T_OBJECT: {
Handle ob_h(current_thread, JNIHandles::resolve_external_guard(_value.l));
locals->set_obj_at (_index, ob_h); break;
} default: ShouldNotReachHere();
}
_jvf->set_locals(locals);
} else { if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
assert(getting_receiver(), "Can only get here when getting receiver");
oop receiver = _jvf->fr().get_native_receiver();
_value.l = JNIHandles::make_local(_calling_thread, receiver);
} else {
StackValueCollection *locals = _jvf->locals();
switch (_type) { case T_INT: _value.i = locals->int_at (_index); break; case T_LONG: _value.j = locals->long_at (_index); break; case T_FLOAT: _value.f = locals->float_at (_index); break; case T_DOUBLE: _value.d = locals->double_at(_index); break; case T_OBJECT: { // Wrap the oop to be returned in a local JNI handle since // oops_do() no longer applies after doit() is finished.
oop obj = locals->obj_at(_index)();
_value.l = JNIHandles::make_local(_calling_thread, obj); break;
} default: ShouldNotReachHere();
}
}
}
}
bool VM_BaseGetOrSetLocal::allow_nested_vm_operations() const { returntrue; // May need to deoptimize
}
/////////////////////////////////////////////////////////////// // // class VM_GetOrSetLocal //
///////////////////////////////////////////////////////////////////////////////////////// // // class JvmtiSuspendControl - see comments in jvmtiImpl.hpp //
JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event( constchar* name, constvoid* code_begin, constvoid* code_end) {
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED); // Need to make a copy of the name since we don't know how long // the event poster will keep it around after we enqueue the // deferred event and return. strdup() failure is handled in // the post() routine below.
event._event_data.dynamic_code_generated.name = os::strdup(name);
event._event_data.dynamic_code_generated.code_begin = code_begin;
event._event_data.dynamic_code_generated.code_end = code_end; return event;
}
JvmtiDeferredEvent JvmtiDeferredEvent::class_unload_event(constchar* name) {
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_CLASS_UNLOAD); // Need to make a copy of the name since we don't know how long // the event poster will keep it around after we enqueue the // deferred event and return. strdup() failure is handled in // the post() routine below.
event._event_data.class_unload.name = os::strdup(name); return event;
}
void JvmtiDeferredEvent::post() {
assert(Thread::current()->is_service_thread(), "Service thread must post enqueued events"); switch(_type) { case TYPE_COMPILED_METHOD_LOAD: {
nmethod* nm = _event_data.compiled_method_load;
JvmtiExport::post_compiled_method_load(nm); break;
} case TYPE_COMPILED_METHOD_UNLOAD: {
JvmtiExport::post_compiled_method_unload(
_event_data.compiled_method_unload.method_id,
_event_data.compiled_method_unload.code_begin); break;
} case TYPE_DYNAMIC_CODE_GENERATED: {
JvmtiExport::post_dynamic_code_generated_internal( // if strdup failed give the event a default name
(_event_data.dynamic_code_generated.name == NULL)
? "unknown_code" : _event_data.dynamic_code_generated.name,
_event_data.dynamic_code_generated.code_begin,
_event_data.dynamic_code_generated.code_end); if (_event_data.dynamic_code_generated.name != NULL) { // release our copy
os::free((void *)_event_data.dynamic_code_generated.name);
} break;
} case TYPE_CLASS_UNLOAD: {
JvmtiExport::post_class_unload_internal( // if strdup failed give the event a default name
(_event_data.class_unload.name == NULL)
? "unknown_class" : _event_data.class_unload.name); if (_event_data.class_unload.name != NULL) { // release our copy
os::free((void *)_event_data.class_unload.name);
} break;
} default:
ShouldNotReachHere();
}
}
void JvmtiDeferredEvent::post_compiled_method_load_event(JvmtiEnv* env) {
assert(_type == TYPE_COMPILED_METHOD_LOAD, "only user of this method");
nmethod* nm = _event_data.compiled_method_load;
JvmtiExport::post_compiled_method_load(env, nm);
}
// Keep the nmethod for compiled_method_load from being unloaded. void JvmtiDeferredEvent::oops_do(OopClosure* f, CodeBlobClosure* cf) { if (cf != NULL && _type == TYPE_COMPILED_METHOD_LOAD) {
cf->do_code_blob(_event_data.compiled_method_load);
}
}
// The GC calls this and marks the nmethods here on the stack so that // they cannot be unloaded while in the queue. void JvmtiDeferredEvent::nmethods_do(CodeBlobClosure* cf) { if (cf != NULL && _type == TYPE_COMPILED_METHOD_LOAD) {
cf->do_code_blob(_event_data.compiled_method_load);
}
}
bool JvmtiDeferredEventQueue::has_events() { // We save the queued events before the live phase and post them when it starts. // This code could skip saving the events on the queue before the live // phase and ignore them, but this would change how we do things now. // Starting the service thread earlier causes this to be called before the live phase begins. // The events on the queue should all be posted after the live phase so this is an // ok check. Before the live phase, DynamicCodeGenerated events are posted directly. // If we add other types of events to the deferred queue, this could get ugly. return JvmtiEnvBase::get_phase() == JVMTI_PHASE_LIVE && _queue_head != NULL;
}
void JvmtiDeferredEventQueue::enqueue(JvmtiDeferredEvent event) { // Events get added to the end of the queue (and are pulled off the front).
QueueNode* node = new QueueNode(event); if (_queue_tail == NULL) {
_queue_tail = _queue_head = node;
} else {
assert(_queue_tail->next() == NULL, "Must be the last element in the list");
_queue_tail->set_next(node);
_queue_tail = node;
}
void JvmtiDeferredEventQueue::post(JvmtiEnv* env) { // Post events while nmethods are still in the queue and can't be unloaded. while (_queue_head != NULL) {
_queue_head->event().post_compiled_method_load_event(env);
dequeue();
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.