/* * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// These counters are used to assign an unique ID to each compilation. volatile jint CompileBroker::_compilation_id = 0; volatile jint CompileBroker::_osr_compilation_id = 0; volatile jint CompileBroker::_native_compilation_id = 0;
// Timers and counters for generating statistics
elapsedTimer CompileBroker::_t_total_compilation;
elapsedTimer CompileBroker::_t_osr_compilation;
elapsedTimer CompileBroker::_t_standard_compilation;
elapsedTimer CompileBroker::_t_invalidated_compilation;
elapsedTimer CompileBroker::_t_bailedout_compilation;
int CompileBroker::_total_bailout_count = 0; int CompileBroker::_total_invalidated_count = 0; int CompileBroker::_total_compile_count = 0; int CompileBroker::_total_osr_compile_count = 0; int CompileBroker::_total_standard_compile_count = 0; int CompileBroker::_total_compiler_stopped_count = 0; int CompileBroker::_total_compiler_restarted_count = 0;
int CompileBroker::_sum_osr_bytes_compiled = 0; int CompileBroker::_sum_standard_bytes_compiled = 0; int CompileBroker::_sum_nmethod_size = 0; int CompileBroker::_sum_nmethod_code_size = 0;
if (DirectivesParser::has_file()) { return DirectivesParser::parse_from_flag();
} elseif (CompilerDirectivesPrint) { // Print default directive even when no other was added
DirectivesStack::print(tty);
}
CompileTaskWrapper::~CompileTaskWrapper() {
CompilerThread* thread = CompilerThread::current();
CompileTask* task = thread->task();
CompileLog* log = thread->log(); if (log != NULL && !task->is_unloaded()) task->log_task_done(log);
thread->set_task(NULL);
thread->set_env(NULL); if (task->is_blocking()) { bool free_task = false;
{
MutexLocker notifier(thread, task->lock());
task->mark_complete(); #if INCLUDE_JVMCI if (CompileBroker::compiler(task->comp_level())->is_jvmci()) { if (!task->has_waiter()) { // The waiting thread timed out and thus did not free the task.
free_task = true;
}
task->set_blocking_jvmci_compile_state(NULL);
} #endif if (!free_task) { // Notify the waiting thread that the compilation has completed // so that it can free the task.
task->lock()->notify_all();
}
} if (free_task) { // The task can only be freed once the task lock is released.
CompileTask::free(task);
}
} else {
task->mark_complete();
// By convention, the compiling thread is responsible for // recycling a non-blocking CompileTask.
CompileTask::free(task);
}
}
/** * Check if a CompilerThread can be removed and update count if requested.
*/ bool CompileBroker::can_remove(CompilerThread *ct, bool do_it) {
assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here"); if (!ReduceNumberOfCompilerThreads) returnfalse;
// Keep at least 1 compiler thread of each type. if (compiler_count < 2) returnfalse;
// Keep thread alive for at least some time. if (ct->idle_time_millis() < (c1 ? 500 : 100)) returnfalse;
#if INCLUDE_JVMCI if (compiler->is_jvmci()) { // Handles for JVMCI thread objects may get released concurrently. if (do_it) {
assert(CompileThread_lock->owner() == ct, "must be holding lock");
} else { // Skip check if it's the last thread and let caller check again. returntrue;
}
} #endif
// We only allow the last compiler thread of each type to get removed.
jobject last_compiler = c1 ? compiler1_object(compiler_count - 1)
: compiler2_object(compiler_count - 1); if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) { if (do_it) {
assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent.
compiler->set_num_compiler_threads(compiler_count - 1); #if INCLUDE_JVMCI if (compiler->is_jvmci()) { // Old j.l.Thread object can die when no longer referenced elsewhere.
JNIHandles::destroy_global(compiler2_object(compiler_count - 1));
_compiler2_objects[compiler_count - 1] = NULL;
} #endif
} returntrue;
} returnfalse;
}
/** * Add a CompileTask to a CompileQueue.
*/ void CompileQueue::add(CompileTask* task) {
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
task->set_next(NULL);
task->set_prev(NULL);
if (_last == NULL) { // The compile queue is empty.
assert(_first == NULL, "queue is empty");
_first = task;
_last = task;
} else { // Append the task to the queue.
assert(_last->next() == NULL, "not last");
_last->set_next(task);
task->set_prev(_last);
_last = task;
}
++_size;
// Mark the method as being in the compile queue.
task->method()->set_queued_for_compilation();
if (CIPrintCompileQueue) {
print_tty();
}
if (LogCompilation && xtty != NULL) {
task->log_task_queued();
}
// Notify CompilerThreads that a task is available.
MethodCompileQueue_lock->notify_all();
}
/** * Empties compilation queue by putting all compilation tasks onto * a freelist. Furthermore, the method wakes up all threads that are * waiting on a compilation task to finish. This can happen if background * compilation is disabled.
*/ void CompileQueue::free_all() {
MutexLocker mu(MethodCompileQueue_lock);
CompileTask* next = _first;
// Iterate over all tasks in the compile queue while (next != NULL) {
CompileTask* current = next;
next = current->next();
{ // Wake up thread that blocks on the compile task.
MutexLocker ct_lock(current->lock());
current->lock()->notify();
} // Put the task back on the freelist.
CompileTask::free(current);
}
_first = NULL;
_last = NULL;
// Wake up all threads that block on the queue.
MethodCompileQueue_lock->notify_all();
}
/** * Get the next CompileTask from a CompileQueue
*/
CompileTask* CompileQueue::get(CompilerThread* thread) { // save methods from RedefineClasses across safepoint // across MethodCompileQueue_lock below.
methodHandle save_method;
methodHandle save_hot_method;
MonitorLocker locker(MethodCompileQueue_lock); // If _first is NULL we have no more compile jobs. There are two reasons for // having no compile jobs: First, we compiled everything we wanted. Second, // we ran out of code cache so compilation has been disabled. In the latter // case we perform code cache sweeps to free memory such that we can re-enable // compilation. while (_first == NULL) { // Exit loop if compilation is disabled forever if (CompileBroker::is_compilation_disabled_forever()) { return NULL;
}
AbstractCompiler* compiler = thread->compiler();
guarantee(compiler != nullptr, "Compiler object must exist");
compiler->on_empty_queue(this, thread); if (_first != nullptr) { // The call to on_empty_queue may have temporarily unlocked the MCQ lock // so check again whether any tasks were added to the queue. break;
}
// If there are no compilation tasks and we can compile new jobs // (i.e., there is enough free space in the code cache) there is // no need to invoke the GC. // We need a timed wait here, since compiler threads can exit if compilation // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads // is not critical and we do not want idle compiler threads to wake up too often.
locker.wait(5*1000);
if (UseDynamicNumberOfCompilerThreads && _first == NULL) { // Still nothing to compile. Give caller a chance to stop this thread. if (CompileBroker::can_remove(CompilerThread::current(), false)) return NULL;
}
}
if (CompileBroker::is_compilation_disabled_forever()) { return NULL;
}
if (task != NULL) { // Save method pointers across unlock safepoint. The task is removed from // the compilation queue, which is walked during RedefineClasses.
Thread* thread = Thread::current();
save_method = methodHandle(thread, task->method());
save_hot_method = methodHandle(thread, task->hot_method());
// Clean & deallocate stale compile tasks. // Temporarily releases MethodCompileQueue lock. void CompileQueue::purge_stale_tasks() {
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); if (_first_stale != NULL) { // Stale tasks are purged when MCQ lock is released, // but _first_stale updates are protected by MCQ lock. // Once task processing starts and MCQ lock is released, // other compiler threads can reuse _first_stale.
CompileTask* head = _first_stale;
_first_stale = NULL;
{
MutexUnlocker ul(MethodCompileQueue_lock); for (CompileTask* task = head; task != NULL; ) {
CompileTask* next_task = task->next();
CompileTaskWrapper ctw(task); // Frees the task
task->set_failure_reason("stale task");
task = next_task;
}
}
}
}
void CompileQueue::remove(CompileTask* task) {
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); if (task->prev() != NULL) {
task->prev()->set_next(task->next());
} else { // max is the first element
assert(task == _first, "Sanity");
_first = task->next();
}
if (task->next() != NULL) {
task->next()->set_prev(task->prev());
} else { // max is the last element
assert(task == _last, "Sanity");
_last = task->prev();
}
--_size;
}
void CompileQueue::remove_and_mark_stale(CompileTask* task) {
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
remove(task);
// Enqueue the task for reclamation (should be done outside MCQ lock)
task->set_next(_first_stale);
task->set_prev(NULL);
_first_stale = task;
}
// methods in the compile queue need to be marked as used on the stack // so that they don't get reclaimed by Redefine Classes void CompileQueue::mark_on_stack() {
CompileTask* task = _first; while (task != NULL) {
task->mark_on_stack();
task = task->next();
}
}
CompileQueue* CompileBroker::compile_queue(int comp_level) { if (is_c2_compile(comp_level)) return _c2_compile_queue; if (is_c1_compile(comp_level)) return _c1_compile_queue; return NULL;
}
void CompileQueue::print_tty() {
stringStream ss; // Dump the compile queue into a buffer before locking the tty
print(&ss);
{
ttyLocker ttyl;
tty->print("%s", ss.freeze());
}
}
#if INCLUDE_JFR && COMPILER2_OR_JVMCI // It appends new compiler phase names to growable array phase_names(a new CompilerPhaseType mapping // in compiler/compilerEvent.cpp) and registers it with its serializer. // // c2 uses explicit CompilerPhaseType idToPhase mapping in opto/phasetype.hpp, // so if c2 is used, it should be always registered first. // This function is called during vm initialization. void register_jfr_phasetype_serializer(CompilerType compiler_type) {
ResourceMark rm; staticbool first_registration = true; if (compiler_type == compiler_jvmci) {
CompilerEvent::PhaseEvent::get_phase_id("NOT_A_PHASE_NAME", false, false, false);
first_registration = false; #ifdef COMPILER2
} elseif (compiler_type == compiler_c2) {
assert(first_registration, "invariant"); // c2 must be registered first. for (int i = 0; i < PHASE_NUM_TYPES; i++) { constchar* phase_name = CompilerPhaseTypeHelper::to_description((CompilerPhaseType) i);
CompilerEvent::PhaseEvent::get_phase_id(phase_name, false, false, false);
}
first_registration = false; #endif// COMPILER2
}
} #endif// INCLUDE_JFR && COMPILER2_OR_JVMCI
// ------------------------------------------------------------------ // CompileBroker::compilation_init // // Initialize the Compilation object void CompileBroker::compilation_init_phase1(JavaThread* THREAD) { // No need to initialize compilation system if we do not use it. if (!UseCompiler) { return;
} // Set the interface to the current compiler(s).
_c1_count = CompilationPolicy::c1_count();
_c2_count = CompilationPolicy::c2_count();
#if INCLUDE_JVMCI if (EnableJVMCI) { // This is creating a JVMCICompiler singleton.
JVMCICompiler* jvmci = new JVMCICompiler();
if (UseJVMCICompiler) {
_compilers[1] = jvmci; if (FLAG_IS_DEFAULT(JVMCIThreads)) { if (BootstrapJVMCI) { // JVMCI will bootstrap so give it more threads
_c2_count = MIN2(32, os::active_processor_count());
}
} else {
_c2_count = JVMCIThreads;
} if (FLAG_IS_DEFAULT(JVMCIHostThreads)) {
} else { #ifdef COMPILER1
_c1_count = JVMCIHostThreads; #endif// COMPILER1
}
}
} #endif// INCLUDE_JVMCI
#ifdef COMPILER1 if (_c1_count > 0) {
_compilers[0] = new Compiler();
} #endif// COMPILER1
#ifdef COMPILER2 if (true JVMCI_ONLY( && !UseJVMCICompiler)) { if (_c2_count > 0) {
_compilers[1] = new C2Compiler(); // Register c2 first as c2 CompilerPhaseType idToPhase mapping is explicit. // idToPhase mapping for c2 is in opto/phasetype.hpp
JFR_ONLY(register_jfr_phasetype_serializer(compiler_c2);)
}
} #endif// COMPILER2
#if INCLUDE_JVMCI // Register after c2 registration. // JVMCI CompilerPhaseType idToPhase mapping is dynamic. if (EnableJVMCI) {
JFR_ONLY(register_jfr_phasetype_serializer(compiler_jvmci);)
} #endif// INCLUDE_JVMCI
// Start the compiler thread(s)
init_compiler_threads(); // totalTime performance counter is always created as it is required // by the implementation of java.lang.management.CompilationMXBean.
{ // Ensure OOM leads to vm_exit_during_initialization.
EXCEPTION_MARK;
_perf_total_compilation =
PerfDataManager::create_counter(JAVA_CI, "totalTime",
PerfData::U_Ticks, CHECK);
}
#ifdefined(ASSERT) && COMPILER2_OR_JVMCI // Stress testing. Dedicated threads revert optimizations based on escape analysis concurrently to // the running java application. Configured with vm options DeoptimizeObjectsALot*. class DeoptimizeObjectsALotThread : public JavaThread {
// Entry for DeoptimizeObjectsALotThread. The threads are started in // CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) {
DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread); bool enter_single_loop;
{
MonitorLocker ml(dt, EscapeBarrier_lock, Mutex::_no_safepoint_check_flag); staticint single_thread_count = 0;
enter_single_loop = single_thread_count++ < DeoptimizeObjectsALotThreadCountSingle;
} if (enter_single_loop) {
dt->deoptimize_objects_alot_loop_single();
} else {
dt->deoptimize_objects_alot_loop_all();
}
}
// Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each // barrier targets a single thread which is selected round robin. void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_single() {
HandleMark hm(this); while (true) { for (JavaThreadIteratorWithHandle jtiwh; JavaThread *deoptee_thread = jtiwh.next(); ) {
{ // Begin new scope for escape barrier
HandleMarkCleaner hmc(this);
ResourceMark rm(this);
EscapeBarrier eb(true, this, deoptee_thread);
eb.deoptimize_objects(100);
} // Now sleep after the escape barriers destructor resumed deoptee_thread.
sleep(DeoptimizeObjectsALotInterval);
}
}
}
// Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each // barrier targets all java threads in the vm at once. void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_all() {
HandleMark hm(this); while (true) {
{ // Begin new scope for escape barrier
HandleMarkCleaner hmc(this);
ResourceMark rm(this);
EscapeBarrier eb(true, this);
eb.deoptimize_objects_all_threads();
} // Now sleep after the escape barriers destructor resumed the java threads.
sleep(DeoptimizeObjectsALotInterval);
}
} #endif// defined(ASSERT) && COMPILER2_OR_JVMCI
switch (type) { case compiler_t:
assert(comp != NULL, "Compiler instance missing."); if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) {
CompilerCounters* counters = new CompilerCounters();
new_thread = new CompilerThread(queue, counters);
} break; #ifdefined(ASSERT) && COMPILER2_OR_JVMCI case deoptimizer_t:
new_thread = new DeoptimizeObjectsALotThread(); break; #endif// ASSERT default:
ShouldNotReachHere();
}
// At this point the new CompilerThread data-races with this startup // thread (which is the main thread and NOT the VM thread). // This means Java bytecodes being executed at startup can // queue compile jobs which will run at whatever default priority the // newly created CompilerThread runs at.
// At this point it may be possible that no osthread was created for the // JavaThread due to lack of resources. We will handle that failure below. // Also check new_thread so that static analysis is happy. if (new_thread != NULL && new_thread->osthread() != NULL) {
Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle));
if (type == compiler_t) {
CompilerThread::cast(new_thread)->set_compiler(comp);
}
// Note that we cannot call os::set_priority because it expects Java // priorities and we are *explicitly* using OS priorities so that it's // possible to set the compiler thread priority higher than any Java // thread.
int native_prio = CompilerThreadPriority; if (native_prio == -1) { if (UseCriticalCompilerThreadPriority) {
native_prio = os::java_to_os_priority[CriticalPriority];
} else {
native_prio = os::java_to_os_priority[NearMaxPriority];
}
}
os::set_native_priority(new_thread, native_prio);
// Note that this only sets the JavaThread _priority field, which by // definition is limited to Java priorities and not OS priorities.
JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NearMaxPriority);
} else { // osthread initialization failure if (UseDynamicNumberOfCompilerThreads && type == compiler_t
&& comp->num_compiler_threads() > 0) { // The new thread is not known to Thread-SMR yet so we can just delete. delete new_thread; return NULL;
} else {
vm_exit_during_initialization("java.lang.OutOfMemoryError",
os::native_thread_creation_failed_msg());
}
}
os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
return new_thread;
}
void CompileBroker::init_compiler_threads() { // Ensure any exceptions lead to vm_exit_during_initialization.
EXCEPTION_MARK; #if !defined(ZERO)
assert(_c2_count > 0 || _c1_count > 0, "No compilers?"); #endif// !ZERO // Initialize the compilation queue if (_c2_count > 0) { constchar* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue";
_c2_compile_queue = new CompileQueue(name);
_compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler);
_compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler);
} if (_c1_count > 0) {
_c1_compile_queue = new CompileQueue("C1 compile queue");
_compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler);
_compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler);
}
char name_buffer[256];
for (int i = 0; i < _c2_count; i++) {
jobject thread_handle = NULL; // Create all j.l.Thread objects for C1 and C2 threads here, but only one // for JVMCI compiler which can create further ones on demand.
JVMCI_ONLY(if (!UseJVMCICompiler || !UseDynamicNumberOfCompilerThreads || i == 0) {) // Create a name for our thread.
sprintf(name_buffer, "%s CompilerThread%d", _compilers[1]->name(), i);
Handle thread_oop = create_thread_oop(name_buffer, CHECK);
thread_handle = JNIHandles::make_global(thread_oop);
JVMCI_ONLY(})
_compiler2_objects[i] = thread_handle;
_compiler2_logs[i] = NULL;
if (!UseDynamicNumberOfCompilerThreads || i == 0) {
JavaThread *ct = make_thread(compiler_t, thread_handle, _c2_compile_queue, _compilers[1], THREAD);
assert(ct != NULL, "should have been handled for initial thread");
_compilers[1]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) {
ResourceMark rm;
ThreadsListHandle tlh; // name() depends on the TLH.
assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
tty->print_cr("Added initial compiler thread %s", ct->name());
}
}
}
for (int i = 0; i < _c1_count; i++) { // Create a name for our thread.
sprintf(name_buffer, "C1 CompilerThread%d", i);
Handle thread_oop = create_thread_oop(name_buffer, CHECK);
jobject thread_handle = JNIHandles::make_global(thread_oop);
_compiler1_objects[i] = thread_handle;
_compiler1_logs[i] = NULL;
if (!UseDynamicNumberOfCompilerThreads || i == 0) {
JavaThread *ct = make_thread(compiler_t, thread_handle, _c1_compile_queue, _compilers[0], THREAD);
assert(ct != NULL, "should have been handled for initial thread");
_compilers[0]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) {
ResourceMark rm;
ThreadsListHandle tlh; // name() depends on the TLH.
assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
tty->print_cr("Added initial compiler thread %s", ct->name());
}
}
}
julong available_memory = os::available_memory(); // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All).
size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled),
available_cc_p = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled);
// Only do attempt to start additional threads if the lock is free. if (!CompileThread_lock->try_lock()) return;
if (_c2_compile_queue != NULL) { int old_c2_count = _compilers[1]->num_compiler_threads(); int new_c2_count = MIN4(_c2_count,
_c2_compile_queue->size() / 2,
(int)(available_memory / (200*M)),
(int)(available_cc_np / (128*K)));
for (int i = old_c2_count; i < new_c2_count; i++) { #if INCLUDE_JVMCI if (UseJVMCICompiler) { // Native compiler threads as used in C1/C2 can reuse the j.l.Thread // objects as their existence is completely hidden from the rest of // the VM (and those compiler threads can't call Java code to do the // creation anyway). For JVMCI we have to create new j.l.Thread objects // as they are visible and we can see unexpected thread lifecycle // transitions if we bind them to new JavaThreads. if (!THREAD->can_call_java()) break; char name_buffer[256];
sprintf(name_buffer, "%s CompilerThread%d", _compilers[1]->name(), i);
Handle thread_oop;
{ // We have to give up the lock temporarily for the Java calls.
MutexUnlocker mu(CompileThread_lock);
thread_oop = create_thread_oop(name_buffer, THREAD);
} if (HAS_PENDING_EXCEPTION) { if (TraceCompilerThreads) {
ResourceMark rm;
tty->print_cr("JVMCI compiler thread creation failed:");
PENDING_EXCEPTION->print();
}
CLEAR_PENDING_EXCEPTION; break;
} // Check if another thread has beaten us during the Java calls. if (_compilers[1]->num_compiler_threads() != i) break;
jobject thread_handle = JNIHandles::make_global(thread_oop);
assert(compiler2_object(i) == NULL, "Old one must be released!");
_compiler2_objects[i] = thread_handle;
} #endif
JavaThread *ct = make_thread(compiler_t, compiler2_object(i), _c2_compile_queue, _compilers[1], THREAD); if (ct == NULL) break;
_compilers[1]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) {
ResourceMark rm;
ThreadsListHandle tlh; // name() depends on the TLH.
assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
tty->print_cr("Added compiler thread %s (available memory: %dMB, available non-profiled code cache: %dMB)",
ct->name(), (int)(available_memory/M), (int)(available_cc_np/M));
}
}
}
if (_c1_compile_queue != NULL) { int old_c1_count = _compilers[0]->num_compiler_threads(); int new_c1_count = MIN4(_c1_count,
_c1_compile_queue->size() / 4,
(int)(available_memory / (100*M)),
(int)(available_cc_p / (128*K)));
for (int i = old_c1_count; i < new_c1_count; i++) {
JavaThread *ct = make_thread(compiler_t, compiler1_object(i), _c1_compile_queue, _compilers[0], THREAD); if (ct == NULL) break;
_compilers[0]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) {
ResourceMark rm;
ThreadsListHandle tlh; // name() depends on the TLH.
assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
tty->print_cr("Added compiler thread %s (available memory: %dMB, available profiled code cache: %dMB)",
ct->name(), (int)(available_memory/M), (int)(available_cc_p/M));
}
}
}
CompileThread_lock->unlock();
}
/** * Set the methods on the stack as on_stack so that redefine classes doesn't * reclaim them. This method is executed at a safepoint.
*/ void CompileBroker::mark_on_stack() {
assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); // Since we are at a safepoint, we do not need a lock to access // the compile queues. if (_c2_compile_queue != NULL) {
_c2_compile_queue->mark_on_stack();
} if (_c1_compile_queue != NULL) {
_c1_compile_queue->mark_on_stack();
}
}
// ------------------------------------------------------------------ // CompileBroker::compile_method // // Request compilation of a method. void CompileBroker::compile_method_base(const methodHandle& method, int osr_bci, int comp_level, const methodHandle& hot_method, int hot_count,
CompileTask::CompileReason compile_reason, bool blocking,
Thread* thread) {
guarantee(!method->is_abstract(), "cannot compile abstract methods");
assert(method->method_holder()->is_instance_klass(), "sanity check");
assert(!method->method_holder()->is_not_initialized(), "method holder must be initialized");
assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys");
// A request has been made for compilation. Before we do any // real work, check to see if the method has been compiled // in the meantime with a definitive result. if (compilation_is_complete(method, osr_bci, comp_level)) { return;
}
#ifndef PRODUCT if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) { if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) { // Positive OSROnlyBCI means only compile that bci. Negative means don't compile that BCI. return;
}
} #endif
// If this method is already in the compile queue, then // we do not block the current thread. if (compilation_is_in_queue(method)) { // We may want to decay our counter a bit here to prevent // multiple denied requests for compilation. This is an // open compilation policy issue. Note: The other possibility, // in the case that this is a blocking compile request, is to have // all subsequent blocking requesters wait for completion of // ongoing compiles. Note that in this case we'll need a protocol // for freeing the associated compile tasks. [Or we could have // a single static monitor on which all these waiters sleep.] return;
}
// Tiered policy requires MethodCounters to exist before adding a method to // the queue. Create if we don't have them yet.
method->get_method_counters(thread);
// Outputs from the following MutexLocker block:
CompileTask* task = NULL;
CompileQueue* queue = compile_queue(comp_level);
// Make sure the method has not slipped into the queues since // last we checked; note that those checks were "fast bail-outs". // Here we need to be more careful, see 14012000 below. if (compilation_is_in_queue(method)) { return;
}
// We need to check again to see if the compilation has // completed. A previous compilation may have registered // some result. if (compilation_is_complete(method, osr_bci, comp_level)) { return;
}
// We now know that this compilation is not pending, complete, // or prohibited. Assign a compile_id to this compilation // and check to see if it is in our [Start..Stop) range. int compile_id = assign_compile_id(method, osr_bci); if (compile_id == 0) { // The compilation falls outside the allowed range. return;
}
#if INCLUDE_JVMCI if (UseJVMCICompiler && blocking) { // Don't allow blocking compiles for requests triggered by JVMCI. if (thread->is_Compiler_thread()) {
blocking = false;
}
if (!UseJVMCINativeLibrary) { // Don't allow blocking compiles if inside a class initializer or while performing class loading
vframeStream vfst(JavaThread::cast(thread)); for (; !vfst.at_end(); vfst.next()) { if (vfst.method()->is_static_initializer() ||
(vfst.method()->method_holder()->is_subclass_of(vmClasses::ClassLoader_klass()) &&
vfst.method()->name() == vmSymbols::loadClass_name())) {
blocking = false; break;
}
}
}
// Don't allow blocking compilation requests to JVMCI // if JVMCI itself is not yet initialized if (!JVMCI::is_compiler_initialized() && compiler(comp_level)->is_jvmci()) {
blocking = false;
}
// Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown // to avoid deadlock between compiler thread(s) and threads run at shutdown // such as the DestroyJavaVM thread. if (JVMCI::in_shutdown()) {
blocking = false;
}
} #endif// INCLUDE_JVMCI
// We will enter the compilation in the queue. // 14012000: Note that this sets the queued_for_compile bits in // the target method. We can now reason that a method cannot be // queued for compilation more than once, as follows: // Before a thread queues a task for compilation, it first acquires // the compile queue lock, then checks if the method's queued bits // are set or it has already been compiled. Thus there can not be two // instances of a compilation task for the same method on the // compilation queue. Consider now the case where the compilation // thread has already removed a task for that method from the queue // and is in the midst of compiling it. In this case, the // queued_for_compile bits must be set in the method (and these // will be visible to the current thread, since the bits were set // under protection of the compile queue lock, which we hold now. // When the compilation completes, the compiler thread first sets // the compilation result and then clears the queued_for_compile // bits. Neither of these actions are protected by a barrier (or done // under the protection of a lock), so the only guarantee we have // (on machines with TSO (Total Store Order)) is that these values // will update in that order. As a result, the only combinations of // these bits that the current thread will see are, in temporal order: // <RESULT, QUEUE> : // <0, 1> : in compile queue, but not yet compiled // <1, 1> : compiled but queue bit not cleared // <1, 0> : compiled and queue bit cleared // Because we first check the queue bits then check the result bits, // we are assured that we cannot introduce a duplicate task. // Note that if we did the tests in the reverse order (i.e. check // result then check queued bit), we could get the result bit before // the compilation completed, and the queue bit after the compilation // completed, and end up introducing a "duplicate" (redundant) task. // In that case, the compiler thread should first check if a method // has already been compiled before trying to compile it. // NOTE: in the event that there are multiple compiler threads and // there is de-optimization/recompilation, things will get hairy, // and in that case it's best to protect both the testing (here) of // these bits, and their updating (here and elsewhere) under a // common lock.
task = create_compile_task(queue,
compile_id, method,
osr_bci, comp_level,
hot_method, hot_count, compile_reason,
blocking);
}
if (blocking) {
wait_for_completion(task);
}
}
nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, int comp_level, const methodHandle& hot_method, int hot_count,
CompileTask::CompileReason compile_reason,
TRAPS) { // Do nothing if compilebroker is not initialized or compiles are submitted on level none if (!_initialized || comp_level == CompLevel_none) { return NULL;
}
AbstractCompiler *comp = CompileBroker::compiler(comp_level);
assert(comp != NULL, "Ensure we have a compiler");
DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp); // CompileBroker::compile_method can trap and can have pending async exception.
nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_method, hot_count, compile_reason, directive, THREAD);
DirectivesStack::release(directive); return nm;
}
nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, int comp_level, const methodHandle& hot_method, int hot_count,
CompileTask::CompileReason compile_reason,
DirectiveSet* directive,
TRAPS) {
// make sure arguments make sense
assert(method->method_holder()->is_instance_klass(), "not an instance method");
assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
assert(!method->method_holder()->is_not_initialized(), "method holder must be initialized"); // return quickly if possible
// lock, make sure that the compilation // isn't prohibited in a straightforward way.
AbstractCompiler* comp = CompileBroker::compiler(comp_level); if (comp == NULL || compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) { return NULL;
}
if (osr_bci == InvocationEntryBci) { // standard compilation
CompiledMethod* method_code = method->code(); if (method_code != NULL && method_code->is_nmethod()) { if (compilation_is_complete(method, osr_bci, comp_level)) { return (nmethod*) method_code;
}
} if (method->is_not_compilable(comp_level)) { return NULL;
}
} else { // osr compilation // We accept a higher level osr method
nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false); if (nm != NULL) return nm; if (method->is_not_osr_compilable(comp_level)) return NULL;
}
assert(!HAS_PENDING_EXCEPTION, "No exception should be present"); // some prerequisites that are compiler specific if (comp->is_c2() || comp->is_jvmci()) {
method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NONASYNC_NULL); // Resolve all classes seen in the signature of the method // we are compiling.
Method::load_signature_classes(method, CHECK_AND_CLEAR_NONASYNC_NULL);
}
// If the method is native, do the lookup in the thread requesting // the compilation. Native lookups can load code, which is not // permitted during compilation. // // Note: A native method implies non-osr compilation which is // checked with an assertion at the entry of this method. if (method->is_native() && !method->is_method_handle_intrinsic()) {
address adr = NativeLookup::lookup(method, THREAD); if (HAS_PENDING_EXCEPTION) { // In case of an exception looking up the method, we just forget // about it. The interpreter will kick-in and throw the exception.
method->set_not_compilable("NativeLookup::lookup failed"); // implies is_not_osr_compilable()
CLEAR_PENDING_EXCEPTION; return NULL;
}
assert(method->has_native_function(), "must have native code by now");
}
// RedefineClasses() has replaced this method; just return if (method->is_old()) { return NULL;
}
// JVMTI -- post_compile_event requires jmethod_id() that may require // a lock the compiling thread can not acquire. Prefetch it here. if (JvmtiExport::should_post_compiled_method_load()) {
method->jmethod_id();
}
// do the compilation if (method->is_native()) { if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) { #ifdefined(X86) && !defined(ZERO) // The following native methods: // // java.lang.Float.intBitsToFloat // java.lang.Float.floatToRawIntBits // java.lang.Double.longBitsToDouble // java.lang.Double.doubleToRawLongBits // // are called through the interpreter even if interpreter native stubs // are not preferred (i.e., calling through adapter handlers is preferred). // The reason is that on x86_32 signaling NaNs (sNaNs) are not preserved // if the version of the methods from the native libraries is called. // As the interpreter and the C2-intrinsified version of the methods preserves // sNaNs, that would result in an inconsistent way of handling of sNaNs. if ((UseSSE >= 1 &&
(method->intrinsic_id() == vmIntrinsics::_intBitsToFloat ||
method->intrinsic_id() == vmIntrinsics::_floatToRawIntBits)) ||
(UseSSE >= 2 &&
(method->intrinsic_id() == vmIntrinsics::_longBitsToDouble ||
method->intrinsic_id() == vmIntrinsics::_doubleToRawLongBits))) { return NULL;
} #endif// X86 && !ZERO
// To properly handle the appendix argument for out-of-line calls we are using a small trampoline that // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime). // // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter // in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls.
AdapterHandlerLibrary::create_native_wrapper(method);
} else { return NULL;
}
} else { // If the compiler is shut off due to code cache getting full // fail out now so blocking compiles dont hang the java thread if (!should_compile_new_jobs()) { return NULL;
} bool is_blocking = !directive->BackgroundCompilationOption || ReplayCompiles;
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, compile_reason, is_blocking, THREAD);
}
// ------------------------------------------------------------------ // CompileBroker::compilation_is_complete // // See if compilation of this method is already complete. bool CompileBroker::compilation_is_complete(const methodHandle& method, int osr_bci, int comp_level) { bool is_osr = (osr_bci != standard_entry_bci); if (is_osr) { if (method->is_not_osr_compilable(comp_level)) { returntrue;
} else {
nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true); return (result != NULL);
}
} else { if (method->is_not_compilable(comp_level)) { returntrue;
} else {
CompiledMethod* result = method->code(); if (result == NULL) returnfalse; return comp_level == result->comp_level();
}
}
}
/** * See if this compilation is already requested. * * Implementation note: there is only a single "is in queue" bit * for each method. This means that the check below is overly * conservative in the sense that an osr compilation in the queue * will block a normal compilation from entering the queue (and vice * versa). This can be remedied by a full queue search to disambiguate * cases. If it is deemed profitable, this may be done.
*/ bool CompileBroker::compilation_is_in_queue(const methodHandle& method) { return method->queued_for_compilation();
}
// ------------------------------------------------------------------ // CompileBroker::compilation_is_prohibited // // See if this compilation is not allowed. bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) { bool is_native = method->is_native(); // Some compilers may not support the compilation of natives.
AbstractCompiler *comp = compiler(comp_level); if (is_native && (!CICompileNatives || comp == NULL)) {
method->set_not_compilable_quietly("native methods not supported", comp_level); returntrue;
}
bool is_osr = (osr_bci != standard_entry_bci); // Some compilers may not support on stack replacement. if (is_osr && (!CICompileOSR || comp == NULL)) {
method->set_not_osr_compilable("OSR not supported", comp_level); returntrue;
}
// The method may be explicitly excluded by the user. double scale; if (excluded || (CompilerOracle::has_option_value(method, CompileCommand::CompileThresholdScaling, scale) && scale == 0)) { bool quietly = CompilerOracle::be_quiet(); if (PrintCompilation && !quietly) { // This does not happen quietly...
ResourceMark rm;
tty->print("### Excluding %s:%s",
method->is_native() ? "generation of native wrapper" : "compile",
(method->is_static() ? " static" : ""));
method->print_short_name(tty);
tty->cr();
}
method->set_not_compilable("excluded by CompileCommand", comp_level, !quietly);
}
returnfalse;
}
/** * Generate serialized IDs for compilation requests. If certain debugging flags are used * and the ID is not within the specified range, the method is not compiled and 0 is returned. * The function also allows to generate separate compilation IDs for OSR compilations.
*/ int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) { #ifdef ASSERT bool is_osr = (osr_bci != standard_entry_bci); int id; if (method->is_native()) {
assert(!is_osr, "can't be osr"); // Adapters, native wrappers and method handle intrinsics // should be generated always. return Atomic::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1);
} elseif (CICountOSR && is_osr) {
id = Atomic::add(&_osr_compilation_id, 1); if (CIStartOSR <= id && id < CIStopOSR) { return id;
}
} else {
id = Atomic::add(&_compilation_id, 1); if (CIStart <= id && id < CIStop) { return id;
}
}
// Method was not in the appropriate compilation range.
method->set_not_compilable_quietly("Not in requested compile id range"); return 0; #else // CICountOSR is a develop flag and set to 'false' by default. In a product built, // only _compilation_id is incremented. return Atomic::add(&_compilation_id, 1); #endif
}
// ------------------------------------------------------------------ // CompileBroker::assign_compile_id_unlocked // // Public wrapper for assign_compile_id that acquires the needed locks
uint CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) {
MutexLocker locker(thread, MethodCompileQueue_lock); return assign_compile_id(method, osr_bci);
}
// ------------------------------------------------------------------ // CompileBroker::create_compile_task // // Create a CompileTask object representing the current request for // compilation. Add this task to the queue.
CompileTask* CompileBroker::create_compile_task(CompileQueue* queue, int compile_id, const methodHandle& method, int osr_bci, int comp_level, const methodHandle& hot_method, int hot_count,
CompileTask::CompileReason compile_reason, bool blocking) {
CompileTask* new_task = CompileTask::allocate();
new_task->initialize(compile_id, method, osr_bci, comp_level,
hot_method, hot_count, compile_reason,
blocking);
queue->add(new_task); return new_task;
}
#if INCLUDE_JVMCI // The number of milliseconds to wait before checking if // JVMCI compilation has made progress. staticconstlong JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000;
// The number of JVMCI compilation progress checks that must fail // before unblocking a thread waiting for a blocking compilation. staticconstint JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10;
/** * Waits for a JVMCI compiler to complete a given task. This thread * waits until either the task completes or it sees no JVMCI compilation * progress for N consecutive milliseconds where N is * JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE * * JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS. * * @return true if this thread needs to free/recycle the task
*/ bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) {
assert(UseJVMCICompiler, "sanity");
MonitorLocker ml(thread, task->lock()); int progress_wait_attempts = 0;
jint thread_jvmci_compilation_ticks = 0;
jint global_jvmci_compilation_ticks = jvmci->global_compilation_ticks(); while (!task->is_complete() && !is_compilation_disabled_forever() &&
ml.wait(JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) {
JVMCICompileState* jvmci_compile_state = task->blocking_jvmci_compile_state();
bool progress; if (jvmci_compile_state != NULL) {
jint ticks = jvmci_compile_state->compilation_ticks();
progress = (ticks - thread_jvmci_compilation_ticks) != 0;
JVMCI_event_1("waiting on compilation %d [ticks=%d]", task->compile_id(), ticks);
thread_jvmci_compilation_ticks = ticks;
} else { // Still waiting on JVMCI compiler queue. This thread may be holding a lock // that all JVMCI compiler threads are blocked on. We use the global JVMCI // compilation ticks to determine whether JVMCI compilation // is still making progress through the JVMCI compiler queue.
jint ticks = jvmci->global_compilation_ticks();
progress = (ticks - global_jvmci_compilation_ticks) != 0;
JVMCI_event_1("waiting on compilation %d to be queued [ticks=%d]", task->compile_id(), ticks);
global_jvmci_compilation_ticks = ticks;
}
if (!progress) { if (++progress_wait_attempts == JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS) { if (PrintCompilation) {
task->print(tty, "wait for blocking compilation timed out");
}
JVMCI_event_1("waiting on compilation %d timed out", task->compile_id()); break;
}
} else {
progress_wait_attempts = 0;
}
}
task->clear_waiter(); return task->is_complete();
} #endif
/** * Wait for the compilation task to complete.
*/ void CompileBroker::wait_for_completion(CompileTask* task) { if (CIPrintCompileQueue) {
ttyLocker ttyl;
tty->print_cr("BLOCKING FOR COMPILE");
}
assert(task->is_blocking(), "can only wait on blocking task");
JavaThread* thread = JavaThread::current();
methodHandle method(thread, task->method()); bool free_task; #if INCLUDE_JVMCI
AbstractCompiler* comp = compiler(task->comp_level()); if (comp->is_jvmci() && !task->should_wait_for_compilation()) { // It may return before compilation is completed.
free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread);
} else #endif
{
MonitorLocker ml(thread, task->lock());
free_task = true; while (!task->is_complete() && !is_compilation_disabled_forever()) {
ml.wait();
}
}
if (free_task) { if (is_compilation_disabled_forever()) {
CompileTask::free(task); return;
}
// It is harmless to check this status without the lock, because
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.58 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.