/* * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
if (gzip_compress_func == NULL) {
gzip_compress_func = (GzipCompressFunc) load_gzip_func("ZIP_GZip_Fully");
if (gzip_compress_func == NULL) { return"Cannot get ZIP_GZip_Fully function";
}
}
if (gzip_init_func == NULL) {
gzip_init_func = (GzipInitFunc) load_gzip_func("ZIP_GZip_InitParams");
if (gzip_init_func == NULL) { return"Cannot get ZIP_GZip_InitParams function";
}
}
charconst* result = gzip_init_func(block_size, needed_out_size,
needed_tmp_size, _level);
*needed_out_size += 1024; // Add extra space for the comment in the first chunk.
if (_is_first) { char buf[128]; // Write the block size used as a comment in the first gzip chunk, so the // code used to read it later can make a good choice of the buffer sizes it uses.
jio_snprintf(buf, sizeof(buf), "HPROF BLOCKSIZE=" SIZE_FORMAT, _block_size);
*compressed_size = gzip_compress_func(in, in_size, out, out_size, tmp, tmp_size, _level,
buf, &msg);
_is_first = false;
} else {
*compressed_size = gzip_compress_func(in, in_size, out, out_size, tmp, tmp_size, _level,
NULL, &msg);
}
if (_current == NULL) {
set_error("Could not allocate memory for buffer");
}
_active = (_err == NULL);
}
CompressionBackend::~CompressionBackend() {
assert(!_active, "Must not be active by now");
assert(_nr_of_threads == 0, "Must have no active threads");
assert(_to_compress.is_empty() && _finished.is_empty(), "Still work to do");
free_work_list(&_unused);
free_work(_current);
assert(_works_created == 0, "All work must have been freed");
void CompressionBackend::free_work_list(WorkList* list) { while (!list->is_empty()) {
free_work(list->remove_first());
}
}
void CompressionBackend::do_foreground_work() {
assert(!_to_compress.is_empty(), "Must have work to do");
assert(_lock->owned_by_self(), "Must have the lock");
WriteWork* work = _to_compress.remove_first();
MutexUnlocker mu(_lock, Mutex::_no_safepoint_check_flag);
do_compress(work);
finish_work(work);
}
while (_active && _to_compress.is_empty()) {
ml.wait();
}
return _to_compress.remove_first();
}
void CompressionBackend::flush_external_buffer(char* buffer, size_t used, size_t max) {
assert(buffer != NULL && used != 0 && max != 0, "Invalid data send to compression backend");
assert(_active == true, "Backend must be active when flushing external buffer"); char* buf;
size_t tmp_used = 0;
size_t tmp_max = 0;
MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); // First try current buffer. Use it if empty. if (_current->_in_used == 0) {
buf = _current->_in;
} else { // If current buffer is not clean, flush it.
MutexUnlocker ml(_lock, Mutex::_no_safepoint_check_flag);
get_new_buffer(&buf, &tmp_used, &tmp_max, true);
}
assert (_current->_in != NULL && _current->_in_max >= max &&
_current->_in_used == 0, "Invalid buffer from compression backend"); // Copy data to backend buffer.
memcpy(buf, buffer, used);
assert(_current->_in == buf, "Must be current");
_current->_in_used += used;
}
void CompressionBackend::get_new_buffer(char** buffer, size_t* used, size_t* max, bool force_reset) { if (_active) {
MonitorLocker ml(_lock, Mutex::_no_safepoint_check_flag); if (*used > 0 || force_reset) {
_current->_in_used += *used; // Check if we do not waste more than _max_waste. If yes, write the buffer. // Otherwise return the rest of the buffer as the new buffer. if (_current->_in_max - _current->_in_used <= _max_waste || force_reset) {
_current->_id = _next_id++;
_to_compress.add_last(_current);
_current = NULL;
ml.notify_all();
} else {
*buffer = _current->_in + _current->_in_used;
*used = 0;
*max = _current->_in_max - _current->_in_used; return;
}
}
while ((_current == NULL) && _unused.is_empty() && _active) { // Add more work objects if needed. if (!_work_creation_failed && (_works_created <= _nr_of_threads)) {
WriteWork* work = allocate_work(_in_size, _out_size, _tmp_size);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.