field = bt_ctf_field_create(type); if (!field) {
pr_err("failed to create a field %s\n", name); return -1;
}
if (sign) {
ret = bt_ctf_field_signed_integer_set_value(field, val); if (ret) {
pr_err("failed to set field value %s\n", name); goto err;
}
} else {
ret = bt_ctf_field_unsigned_integer_set_value(field, val); if (ret) {
pr_err("failed to set field value %s\n", name); goto err;
}
}
ret = bt_ctf_event_set_payload(event, name, field); if (ret) {
pr_err("failed to set payload %s\n", name); goto err;
}
if (flags & TEP_FIELD_IS_STRING) return cw->data.string;
if (!(flags & TEP_FIELD_IS_SIGNED)) { /* unsigned long are mostly pointers */ if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER) return cw->data.u64_hex;
}
if (flags & TEP_FIELD_IS_SIGNED) { if (field->size == 8) return cw->data.s64; else return cw->data.s32;
}
if (field->size == 8) return cw->data.u64; else return cw->data.u32;
}
staticunsignedlonglong adjust_signedness(unsignedlonglong value_int, int size)
{ unsignedlonglong value_mask;
/* * value_mask = (1 << (size * 8 - 1)) - 1. * Directly set value_mask for code readers.
*/ switch (size) { case 1:
value_mask = 0x7fULL; break; case 2:
value_mask = 0x7fffULL; break; case 4:
value_mask = 0x7fffffffULL; break; case 8: /* * For 64 bit value, return it self. There is no need * to fill high bit.
*/ /* Fall through */ default: /* BUG! */ return value_int;
}
/* If it is a positive value, don't adjust. */ if ((value_int & (~0ULL - value_mask)) == 0) return value_int;
/* Fill upper part of value_int with 1 to make it a negative long long. */ return (value_int & value_mask) | ~value_mask;
}
staticint string_set_value(struct bt_ctf_field *field, constchar *string)
{ char *buffer = NULL;
size_t len = strlen(string), i, p; int err;
for (i = p = 0; i < len; i++, p++) { if (isprint(string[i])) { if (!buffer) continue;
buffer[p] = string[i];
} else { char numstr[5];
for (i = 0; i < n_items; i++) { if (flags & TEP_FIELD_IS_ARRAY)
field = bt_ctf_field_array_get_field(array_field, i); else
field = bt_ctf_field_create(type);
if (!field) {
pr_err("failed to create a field %s\n", name); return -1;
}
if (flags & TEP_FIELD_IS_STRING)
ret = string_set_value(field, data + offset + i * len); else { unsignedlonglong value_int;
value_int = tep_read_number(
fmtf->event->tep,
data + offset + i * len, len);
if (!(flags & TEP_FIELD_IS_SIGNED))
ret = bt_ctf_field_unsigned_integer_set_value(
field, value_int); else
ret = bt_ctf_field_signed_integer_set_value(
field, adjust_signedness(value_int, len));
}
if (ret) {
pr_err("failed to set file value %s\n", name); goto err_put_field;
} if (!(flags & TEP_FIELD_IS_ARRAY)) {
ret = bt_ctf_event_set_payload(event, name, field); if (ret) {
pr_err("failed to set payload %s\n", name); goto err_put_field;
}
}
bt_ctf_field_put(field);
} if (flags & TEP_FIELD_IS_ARRAY) {
ret = bt_ctf_event_set_payload(event, name, array_field); if (ret) {
pr_err("Failed add payload array %s\n", name); return -1;
}
bt_ctf_field_put(array_field);
} return 0;
len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
len_field = bt_ctf_field_create(len_type); if (!len_field) {
pr_err("failed to create 'raw_len' for bpf output event\n");
ret = -1; goto put_len_type;
}
ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); if (ret) {
pr_err("failed to set field value for raw_len\n"); goto put_len_field;
}
ret = bt_ctf_event_set_payload(event, "raw_len", len_field); if (ret) {
pr_err("failed to set payload to raw_len\n"); goto put_len_field;
}
seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
seq_field = bt_ctf_field_create(seq_type); if (!seq_field) {
pr_err("failed to create 'raw_data' for bpf output event\n");
ret = -1; goto put_seq_type;
}
ret = bt_ctf_field_sequence_set_length(seq_field, len_field); if (ret) {
pr_err("failed to set length of 'raw_data'\n"); goto put_seq_field;
}
for (i = 0; i < nr_elements; i++) { struct bt_ctf_field *elem_field =
bt_ctf_field_sequence_get_field(seq_field, i);
ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
((u32 *)(sample->raw_data))[i]);
bt_ctf_field_put(elem_field); if (ret) {
pr_err("failed to set raw_data[%d]\n", i); goto put_seq_field;
}
}
ret = bt_ctf_event_set_payload(event, "raw_data", seq_field); if (ret)
pr_err("failed to set payload for raw_data\n");
len_type = bt_ctf_event_class_get_field_by_name(
event_class, "perf_callchain_size");
len_field = bt_ctf_field_create(len_type); if (!len_field) {
pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
ret = -1; goto put_len_type;
}
ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); if (ret) {
pr_err("failed to set field value for perf_callchain_size\n"); goto put_len_field;
}
ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field); if (ret) {
pr_err("failed to set payload to perf_callchain_size\n"); goto put_len_field;
}
seq_type = bt_ctf_event_class_get_field_by_name(
event_class, "perf_callchain");
seq_field = bt_ctf_field_create(seq_type); if (!seq_field) {
pr_err("failed to create 'perf_callchain' for callchain output event\n");
ret = -1; goto put_seq_type;
}
ret = bt_ctf_field_sequence_set_length(seq_field, len_field); if (ret) {
pr_err("failed to set length of 'perf_callchain'\n"); goto put_seq_field;
}
for (i = 0; i < nr_elements; i++) { struct bt_ctf_field *elem_field =
bt_ctf_field_sequence_get_field(seq_field, i);
ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
((u64 *)(callchain->ips))[i]);
bt_ctf_field_put(elem_field); if (ret) {
pr_err("failed to set callchain[%d]\n", i); goto put_seq_field;
}
}
ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field); if (ret)
pr_err("failed to set payload for raw_data\n");
staticint add_generic_values(struct ctf_writer *cw, struct bt_ctf_event *event, struct evsel *evsel, struct perf_sample *sample)
{
u64 type = evsel->core.attr.sample_type; int ret;
/* * missing: * PERF_SAMPLE_TIME - not needed as we have it in * ctf event header * PERF_SAMPLE_READ - TODO * PERF_SAMPLE_RAW - tracepoint fields are handled separately * PERF_SAMPLE_BRANCH_STACK - TODO * PERF_SAMPLE_REGS_USER - TODO * PERF_SAMPLE_STACK_USER - TODO
*/
if (type & PERF_SAMPLE_IP) {
ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); if (ret) return -1;
}
if (type & PERF_SAMPLE_TID) {
ret = value_set_s32(cw, event, "perf_tid", sample->tid); if (ret) return -1;
ret = value_set_s32(cw, event, "perf_pid", sample->pid); if (ret) return -1;
}
if ((type & PERF_SAMPLE_ID) ||
(type & PERF_SAMPLE_IDENTIFIER)) {
ret = value_set_u64(cw, event, "perf_id", sample->id); if (ret) return -1;
}
if (type & PERF_SAMPLE_STREAM_ID) {
ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); if (ret) return -1;
}
if (type & PERF_SAMPLE_PERIOD) {
ret = value_set_u64(cw, event, "perf_period", sample->period); if (ret) return -1;
}
if (type & PERF_SAMPLE_WEIGHT) {
ret = value_set_u64(cw, event, "perf_weight", sample->weight); if (ret) return -1;
}
if (type & PERF_SAMPLE_DATA_SRC) {
ret = value_set_u64(cw, event, "perf_data_src",
sample->data_src); if (ret) return -1;
}
if (type & PERF_SAMPLE_TRANSACTION) {
ret = value_set_u64(cw, event, "perf_transaction",
sample->transaction); if (ret) return -1;
}
return 0;
}
staticint ctf_stream__flush(struct ctf_stream *cs)
{ int err = 0;
if (cs) {
err = bt_ctf_stream_flush(cs->stream); if (err)
pr_err("CTF stream %d flush failed\n", cs->cpu);
pr("Flush stream for cpu %d (%u samples)\n",
cs->cpu, cs->count);
staticint get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, struct evsel *evsel)
{ int cpu = 0;
if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
cpu = sample->cpu;
if (cpu > cw->stream_cnt) {
pr_err("Event was recorded for CPU %d, limit is at %d.\n",
cpu, cw->stream_cnt);
cpu = 0;
}
return cpu;
}
#define STREAM_FLUSH_COUNT 100000
/* * Currently we have no other way to determine the * time for the stream flush other than keep track * of the number of events and check it against * threshold.
*/ staticbool is_flush_needed(struct ctf_stream *cs)
{ return cs->count >= STREAM_FLUSH_COUNT;
}
/* alias was already assigned */ if (field->alias != field->name) return bt_ctf_event_class_add_field(event_class, type,
(char *)field->alias);
name = field->name;
/* If 'name' is a keywork, add prefix. */ if (bt_ctf_validate_identifier(name))
name = change_name(name, field->name, -1);
if (!name) {
pr_err("Failed to fix invalid identifier."); return -1;
} while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
bt_ctf_field_type_put(t);
name = change_name(name, field->name, dup++); if (!name) {
pr_err("Failed to create dup name for '%s'\n", field->name); return -1;
}
}
ret = bt_ctf_event_class_add_field(event_class, type, name); if (!ret)
field->alias = name;
for (field = fields; field; field = field->next) { struct bt_ctf_field_type *type; unsignedlong flags = field->flags;
pr2(" field '%s'\n", field->name);
type = get_tracepoint_field_type(cw, field); if (!type) return -1;
/* * A string is an array of chars. For this we use the string * type and don't care that it is an array. What we don't * support is an array of strings.
*/ if (flags & TEP_FIELD_IS_STRING)
flags &= ~TEP_FIELD_IS_ARRAY;
if (flags & TEP_FIELD_IS_ARRAY)
type = bt_ctf_field_type_array_create(type, field->arraylen);
ret = event_class_add_field(event_class, type, field);
if (flags & TEP_FIELD_IS_ARRAY)
bt_ctf_field_type_put(type);
if (ret) {
pr_err("Failed to add field '%s': %d\n",
field->name, ret); return -1;
}
}
/* * missing: * PERF_SAMPLE_TIME - not needed as we have it in * ctf event header * PERF_SAMPLE_READ - TODO * PERF_SAMPLE_CALLCHAIN - TODO * PERF_SAMPLE_RAW - tracepoint fields and BPF output * are handled separately * PERF_SAMPLE_BRANCH_STACK - TODO * PERF_SAMPLE_REGS_USER - TODO * PERF_SAMPLE_STACK_USER - TODO
*/
#define ADD_FIELD(cl, t, n) \ do { \
pr2(" field '%s'\n", n); \ if (bt_ctf_event_class_add_field(cl, t, n)) { \
pr_err("Failed to add field '%s';\n", n); \ return -1; \
} \
} while (0)
if (type & PERF_SAMPLE_IP)
ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
ret = add_comm_event(cw); if (ret) return ret;
ret = add_exit_event(cw); if (ret) return ret;
ret = add_fork_event(cw); if (ret) return ret;
ret = add_mmap_event(cw); if (ret) return ret;
ret = add_mmap2_event(cw); if (ret) return ret; return 0;
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.24Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.