/* * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved. * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
void TaskTerminator::DelayContext::do_step() {
assert(_yield_count < WorkStealingYieldsBeforeSleep, "Number of yields too large"); // Each spin iteration is counted as a yield for purposes of // deciding when to sleep.
_yield_count++; // Periodically yield instead of spinning after WorkStealingSpinToYieldRatio // spins. if (_hard_spin_count > WorkStealingSpinToYieldRatio) {
os::naked_yield();
reset_hard_spin_information();
} else { // Hard spin this time for (uint j = 0; j < _hard_spin_limit; j++) {
SpinPause();
}
_hard_spin_count++; // Increase the hard spinning period but only up to a limit.
_hard_spin_limit = MIN2(2 * _hard_spin_limit,
(uint) WorkStealingHardSpins);
}
}
if (_offered_termination == _n_threads) {
prepare_for_return(the_thread);
assert_queue_set_empty(); returntrue;
}
for (;;) { if (_spin_master == NULL) {
_spin_master = the_thread;
DelayContext delay_context;
while (!delay_context.needs_sleep()) {
size_t tasks; bool should_exit_termination;
{
MutexUnlocker y(&_blocker, Mutex::_no_safepoint_check_flag);
delay_context.do_step(); // Intentionally read the number of tasks outside the mutex since this // is potentially a long operation making the locked section long.
tasks = tasks_in_queue_set();
should_exit_termination = exit_termination(tasks, terminator);
} // Immediately check exit conditions after re-acquiring the lock. if (_offered_termination == _n_threads) {
prepare_for_return(the_thread);
assert_queue_set_empty(); returntrue;
} elseif (should_exit_termination) {
prepare_for_return(the_thread, tasks);
_offered_termination--; returnfalse;
}
} // Give up spin master before sleeping.
_spin_master = NULL;
} bool timed_out = x.wait(WorkStealingSleepMillis);
// Immediately check exit conditions after re-acquiring the lock. if (_offered_termination == _n_threads) {
prepare_for_return(the_thread);
assert_queue_set_empty(); returntrue;
} elseif (!timed_out) { // We were woken up. Don't bother waking up more tasks.
prepare_for_return(the_thread, 0);
_offered_termination--; returnfalse;
} else {
size_t tasks = tasks_in_queue_set(); if (exit_termination(tasks, terminator)) {
prepare_for_return(the_thread, tasks);
_offered_termination--; returnfalse;
}
}
}
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.