// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2009-2013, 2016-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2014, Sony Mobile Communications AB. *
*/
/* Maximum transfer length for single DMA descriptor */ #define MX_TX_RX_LEN SZ_64K #define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT) /* Maximum transfer length for all DMA descriptors */ #define MX_DMA_TX_RX_LEN (2 * MX_TX_RX_LEN) #define MX_DMA_BLOCKS (MX_DMA_TX_RX_LEN / QUP_READ_LIMIT)
/* * Minimum transfer timeout for i2c transfers in seconds. It will be added on * the top of maximum transfer time calculated from i2c bus speed to compensate * the overheads.
*/ #define TOUT_MIN 2
/* Default values. Use these if FW query fails */ #define DEFAULT_CLK_FREQ I2C_MAX_STANDARD_MODE_FREQ #define DEFAULT_SRC_CLK 20000000
/* * Max tags length (start, stop and maximum 2 bytes address) for each QUP * data transfer
*/ #define QUP_MAX_TAGS_LEN 4 /* Max data length for each DATARD tags */ #define RECV_MAX_DATA_LEN 254 /* TAG length for DATA READ in RX FIFO */ #define READ_RX_TAGS_LEN 2
#define QUP_BUS_WIDTH 8
staticunsignedint scl_freq;
module_param_named(scl_freq, scl_freq, uint, 0444);
MODULE_PARM_DESC(scl_freq, "SCL frequency override");
/* * count: no of blocks * pos: current block number * tx_tag_len: tx tag length for current block * rx_tag_len: rx tag length for current block * data_len: remaining data length for current message * cur_blk_len: data length for current block * total_tx_len: total tx length including tag bytes for current QUP transfer * total_rx_len: total rx length including tag bytes for current QUP transfer * tx_fifo_data_pos: current byte number in TX FIFO word * tx_fifo_free: number of free bytes in current QUP block write. * rx_fifo_data_pos: current byte number in RX FIFO word * fifo_available: number of available bytes in RX FIFO for current * QUP block read * tx_fifo_data: QUP TX FIFO write works on word basis (4 bytes). New byte write * to TX FIFO will be appended in this data and will be written to * TX FIFO when all the 4 bytes are available. * rx_fifo_data: QUP RX FIFO read works on word basis (4 bytes). This will * contains the 4 bytes of RX data. * cur_data: pointer to tell cur data position for current message * cur_tx_tags: pointer to tell cur position in tags * tx_tags_sent: all tx tag bytes have been written in FIFO word * send_last_word: for tx FIFO, last word send is pending in current block * rx_bytes_read: if all the bytes have been read from rx FIFO. * rx_tags_fetched: all the rx tag bytes have been fetched from rx fifo word * is_tx_blk_mode: whether tx uses block or FIFO mode in case of non BAM xfer. * is_rx_blk_mode: whether rx uses block or FIFO mode in case of non BAM xfer. * tags: contains tx tag bytes for current QUP transfer
*/ struct qup_i2c_block { int count; int pos; int tx_tag_len; int rx_tag_len; int data_len; int cur_blk_len; int total_tx_len; int total_rx_len; int tx_fifo_data_pos; int tx_fifo_free; int rx_fifo_data_pos; int fifo_available;
u32 tx_fifo_data;
u32 rx_fifo_data;
u8 *cur_data;
u8 *cur_tx_tags; bool tx_tags_sent; bool send_last_word; bool rx_tags_fetched; bool rx_bytes_read; bool is_tx_blk_mode; bool is_rx_blk_mode;
u8 tags[6];
};
/* dma parameters */ bool is_dma; /* To check if the current transfer is using DMA */ bool use_dma; unsignedint max_xfer_sg_len; unsignedint tag_buf_pos; /* The threshold length above which block mode will be used */ unsignedint blk_mode_threshold; struct dma_pool *dpool; struct qup_i2c_tag start_tag; struct qup_i2c_bam brx; struct qup_i2c_bam btx;
struct completion xfer; /* function to write data in tx fifo */ void (*write_tx_fifo)(struct qup_i2c_dev *qup); /* function to read data from rx fifo */ void (*read_rx_fifo)(struct qup_i2c_dev *qup); /* function to write tags in tx fifo for i2c read transfer */ void (*write_rx_tags)(struct qup_i2c_dev *qup);
};
/* Clear the error bits in QUP_ERROR_FLAGS */ if (qup_err)
writel(qup_err, qup->base + QUP_ERROR_FLAGS);
/* Clear the error bits in QUP_I2C_STATUS */ if (bus_err)
writel(bus_err, qup->base + QUP_I2C_STATUS);
/* * Check for BAM mode and returns if already error has come for current * transfer. In Error case, sometimes, QUP generates more than one * interrupt.
*/ if (qup->use_dma && (qup->qup_err || qup->bus_err)) return IRQ_HANDLED;
/* Reset the QUP State in case of error */ if (qup_err || bus_err) { /* * Don’t reset the QUP state in case of BAM mode. The BAM * flush operation needs to be scheduled in transfer function * which will clear the remaining schedule descriptors in BAM * HW FIFO and generates the BAM interrupt.
*/ if (!qup->use_dma)
writel(QUP_RESET_STATE, qup->base + QUP_STATE); goto done;
}
if (opflags & QUP_OUT_SVC_FLAG) {
writel(QUP_OUT_SVC_FLAG, qup->base + QUP_OPERATIONAL);
if (opflags & OUT_BLOCK_WRITE_REQ) {
blk->tx_fifo_free += qup->out_blk_sz; if (qup->msg->flags & I2C_M_RD)
qup->write_rx_tags(qup); else
qup->write_tx_fifo(qup);
}
}
if (opflags & QUP_IN_SVC_FLAG) {
writel(QUP_IN_SVC_FLAG, qup->base + QUP_OPERATIONAL);
if (qup->msg->flags & I2C_M_RD) { if (!blk->rx_bytes_read) return IRQ_HANDLED;
} else { /* * Ideally, QUP_MAX_OUTPUT_DONE_FLAG should be checked * for FIFO mode also. But, QUP_MAX_OUTPUT_DONE_FLAG lags * behind QUP_OUTPUT_SERVICE_FLAG sometimes. The only reason * of interrupt for write message in FIFO mode is * QUP_MAX_OUTPUT_DONE_FLAG condition.
*/ if (blk->is_tx_blk_mode && !(opflags & QUP_MX_OUTPUT_DONE)) return IRQ_HANDLED;
}
/* * State transition takes 3 AHB clocks cycles + 3 I2C master clock * cycles. So retry once after a 1uS delay.
*/ do {
state = readl(qup->base + QUP_STATE);
if (qup_i2c_poll_state(qup, state) != 0) return -EIO; return 0;
}
/* Check if I2C bus returns to IDLE state */ staticint qup_i2c_bus_active(struct qup_i2c_dev *qup, int len)
{ unsignedlong timeout;
u32 status; int ret = 0;
timeout = jiffies + len * 4; for (;;) {
status = readl(qup->base + QUP_I2C_STATUS); if (!(status & I2C_STATUS_BUS_ACTIVE)) break;
if (time_after(jiffies, timeout)) {
ret = -ETIMEDOUT; break;
}
if (msg->flags & I2C_M_RD) { while (qup->blk.pos < blocks) {
tlen = (i == (blocks - 1)) ? rem : limit;
tags = &qup->start_tag.start[qup->tag_buf_pos + len];
len += qup_i2c_set_tags(tags, qup, msg);
qup->blk.data_len -= tlen;
/* scratch buf to read the start and len tags */
ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++],
&qup->brx.tag.start[0],
2, qup, DMA_FROM_DEVICE);
if (ret) return ret;
ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++],
&msg->buf[limit * i],
tlen, qup,
DMA_FROM_DEVICE); if (ret) return ret;
i++;
qup->blk.pos = i;
}
ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
&qup->start_tag.start[qup->tag_buf_pos],
len, qup, DMA_TO_DEVICE); if (ret) return ret;
qup->tag_buf_pos += len;
} else { while (qup->blk.pos < blocks) {
tlen = (i == (blocks - 1)) ? rem : limit;
tags = &qup->start_tag.start[qup->tag_buf_pos + tx_len];
len = qup_i2c_set_tags(tags, qup, msg);
qup->blk.data_len -= tlen;
ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
tags, len,
qup, DMA_TO_DEVICE); if (ret) return ret;
staticint qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
{ struct dma_async_tx_descriptor *txd, *rxd = NULL; int ret = 0;
dma_cookie_t cookie_rx, cookie_tx;
u32 len = 0;
u32 tx_cnt = qup->btx.sg_cnt, rx_cnt = qup->brx.sg_cnt;
/* schedule the EOT and FLUSH I2C tags */
len = 1; if (rx_cnt) {
qup->btx.tag.start[0] = QUP_BAM_INPUT_EOT;
len++;
/* scratch buf to read the BAM EOT FLUSH tags */
ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++],
&qup->brx.tag.start[0],
1, qup, DMA_FROM_DEVICE); if (ret) return ret;
}
qup->btx.tag.start[len - 1] = QUP_BAM_FLUSH_STOP;
ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++], &qup->btx.tag.start[0],
len, qup, DMA_TO_DEVICE); if (ret) return ret;
txd = dmaengine_prep_slave_sg(qup->btx.dma, qup->btx.sg, tx_cnt,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_PREP_FENCE); if (!txd) {
dev_err(qup->dev, "failed to get tx desc\n");
ret = -EINVAL; goto desc_err;
}
if (!rx_cnt) {
txd->callback = qup_i2c_bam_cb;
txd->callback_param = qup;
}
cookie_tx = dmaengine_submit(txd); if (dma_submit_error(cookie_tx)) {
ret = -EINVAL; goto desc_err;
}
dma_async_issue_pending(qup->btx.dma);
if (rx_cnt) {
rxd = dmaengine_prep_slave_sg(qup->brx.dma, qup->brx.sg,
rx_cnt, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT); if (!rxd) {
dev_err(qup->dev, "failed to get rx desc\n");
ret = -EINVAL;
ret = qup_i2c_bam_make_desc(qup, qup->msg); if (ret) break;
/* * Make DMA descriptor and schedule the BAM transfer if its * already crossed the maximum length. Since the memory for all * tags buffers have been taken for 2 maximum possible * transfers length so it will never cross the buffer actual * length.
*/ if (qup->btx.sg_cnt > qup->max_xfer_sg_len ||
qup->brx.sg_cnt > qup->max_xfer_sg_len ||
qup->is_last) {
ret = qup_i2c_bam_schedule_desc(qup); if (ret) break;
qup_i2c_bam_clear_tag_buffers(qup);
}
}
out:
disable_irq(qup->irq);
qup->msg = NULL; return ret;
}
staticint qup_i2c_wait_for_complete(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{ unsignedlong left; int ret = 0;
left = wait_for_completion_timeout(&qup->xfer, qup->xfer_timeout); if (!left) {
writel(1, qup->base + QUP_SW_RESET);
ret = -ETIMEDOUT;
}
if (qup->bus_err || qup->qup_err)
ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO;
/* * Configure registers related with reconfiguration during run and call it * before each i2c sub transfer.
*/ staticvoid qup_i2c_conf_count_v2(struct qup_i2c_dev *qup)
{ struct qup_i2c_block *blk = &qup->blk;
u32 qup_config = I2C_MINI_CORE | I2C_N_VAL_V2;
/* * Configure registers related with transfer mode (FIFO/Block) * before starting of i2c transfer. It will be called only once in * QUP RESET state.
*/ staticvoid qup_i2c_conf_mode_v2(struct qup_i2c_dev *qup)
{ struct qup_i2c_block *blk = &qup->blk;
u32 io_mode = QUP_REPACK_EN;
/* * Read the data and tags from RX FIFO. Since in read case, the tags will be * preceded by received data bytes so * 1. Check if rx_tags_fetched is false i.e. the start of QUP block so receive * all tag bytes and discard that. * 2. Read the data from RX FIFO. When all the data bytes have been read then * set rx_bytes_read to true.
*/ staticvoid qup_i2c_read_rx_fifo_v2(struct qup_i2c_dev *qup)
{ struct qup_i2c_block *blk = &qup->blk;
if (!blk->rx_tags_fetched) {
qup_i2c_recv_tags(qup);
blk->rx_tags_fetched = true;
}
qup_i2c_recv_data(qup); if (!blk->cur_blk_len)
blk->rx_bytes_read = true;
}
/* * Write bytes in TX FIFO for write message in QUP v2 i2c transfer. QUP TX FIFO * write works on word basis (4 bytes). Append new data byte write for TX FIFO * in tx_fifo_data and write to TX FIFO when all the 4 bytes are present.
*/ staticvoid
qup_i2c_write_blk_data(struct qup_i2c_dev *qup, u8 **data, unsignedint *len)
{ struct qup_i2c_block *blk = &qup->blk; unsignedint j;
/* Transfer tags for read message in QUP v2 i2c transfer. */ staticvoid qup_i2c_write_rx_tags_v2(struct qup_i2c_dev *qup)
{ struct qup_i2c_block *blk = &qup->blk;
qup_i2c_write_blk_data(qup, &blk->cur_tx_tags, &blk->tx_tag_len); if (blk->tx_fifo_data_pos)
writel(blk->tx_fifo_data, qup->base + QUP_OUT_FIFO_BASE);
}
/* * Write the data and tags in TX FIFO. Since in write case, both tags and data * need to be written and QUP write tags can have maximum 256 data length, so * * 1. Check if tx_tags_sent is false i.e. the start of QUP block so write the * tags to TX FIFO and set tx_tags_sent to true. * 2. Check if send_last_word is true. It will be set when last few data bytes * (less than 4 bytes) are remaining to be written in FIFO because of no FIFO * space. All this data bytes are available in tx_fifo_data so write this * in FIFO. * 3. Write the data to TX FIFO and check for cur_blk_len. If it is non zero * then more data is pending otherwise following 3 cases can be possible * a. if tx_fifo_data_pos is zero i.e. all the data bytes in this block * have been written in TX FIFO so nothing else is required. * b. tx_fifo_free is non zero i.e tx FIFO is free so copy the remaining data * from tx_fifo_data to tx FIFO. Since, qup_i2c_write_blk_data do write * in 4 bytes and FIFO space is in multiple of 4 bytes so tx_fifo_free * will be always greater than or equal to 4 bytes. * c. tx_fifo_free is zero. In this case, last few bytes (less than 4 * bytes) are copied to tx_fifo_data but couldn't be sent because of * FIFO full so make send_last_word true.
*/ staticvoid qup_i2c_write_tx_fifo_v2(struct qup_i2c_dev *qup)
{ struct qup_i2c_block *blk = &qup->blk;
if (!blk->tx_tags_sent) {
qup_i2c_write_blk_data(qup, &blk->cur_tx_tags,
&blk->tx_tag_len);
blk->tx_tags_sent = true;
}
if (blk->send_last_word) goto send_last_word;
qup_i2c_write_blk_data(qup, &blk->cur_data, &blk->cur_blk_len); if (!blk->cur_blk_len) { if (!blk->tx_fifo_data_pos) return;
/* * Main transfer function which read or write i2c data. * The QUP v2 supports reconfiguration during run in which multiple i2c sub * transfers can be scheduled.
*/ staticint
qup_i2c_conf_xfer_v2(struct qup_i2c_dev *qup, bool is_rx, bool is_first, bool change_pause_state)
{ struct qup_i2c_block *blk = &qup->blk; struct i2c_msg *msg = qup->msg; int ret;
/* * Check if its SMBus Block read for which the top level read will be * done into 2 QUP reads. One with message length 1 while other one is * with actual length.
*/ if (qup_i2c_check_msg_len(msg)) { if (qup->is_smbus_read) { /* * If the message length is already read in * the first byte of the buffer, account for * that by setting the offset
*/
blk->cur_data += 1;
is_first = false;
} else {
change_pause_state = false;
}
}
ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); if (ret) return ret;
}
reinit_completion(&qup->xfer);
enable_irq(qup->irq); /* * In FIFO mode, tx FIFO can be written directly while in block mode the * it will be written after getting OUT_BLOCK_WRITE_REQ interrupt
*/ if (!blk->is_tx_blk_mode) {
blk->tx_fifo_free = qup->out_fifo_sz;
if (is_rx)
qup_i2c_write_rx_tags_v2(qup); else
qup_i2c_write_tx_fifo_v2(qup);
}
ret = qup_i2c_change_state(qup, QUP_RUN_STATE); if (ret) goto err;
ret = qup_i2c_wait_for_complete(qup, msg); if (ret) goto err;
/* Move to pause state for all the transfers, except last one */ if (change_pause_state) {
ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE); if (ret) goto err;
}
err:
disable_irq(qup->irq); return ret;
}
/* * Transfer one read/write message in i2c transfer. It splits the message into * multiple of blk_xfer_limit data length blocks and schedule each * QUP block individually.
*/ staticint qup_i2c_xfer_v2_msg(struct qup_i2c_dev *qup, int msg_id, bool is_rx)
{ int ret = 0; unsignedint data_len, i; struct i2c_msg *msg = qup->msg; struct qup_i2c_block *blk = &qup->blk;
u8 *msg_buf = msg->buf;
/* * QUP v2 supports 3 modes * Programmed IO using FIFO mode : Less than FIFO size * Programmed IO using Block mode : Greater than FIFO size * DMA using BAM : Appropriate for any transaction size but the address should * be DMA applicable * * This function determines the mode which will be used for this transfer. An * i2c transfer contains multiple message. Following are the rules to determine * the mode used. * 1. Determine complete length, maximum tx and rx length for complete transfer. * 2. If complete transfer length is greater than fifo size then use the DMA * mode. * 3. In FIFO or block mode, tx and rx can operate in different mode so check * for maximum tx and rx length to determine mode.
*/ staticint
qup_i2c_determine_mode_v2(struct qup_i2c_dev *qup, struct i2c_msg msgs[], int num)
{ int idx; bool no_dma = false; unsignedint max_tx_len = 0, max_rx_len = 0, total_len = 0;
/* All i2c_msgs should be transferred using either dma or cpu */ for (idx = 0; idx < num; idx++) { if (msgs[idx].flags & I2C_M_RD)
max_rx_len = max_t(unsignedint, max_rx_len,
msgs[idx].len); else
max_tx_len = max_t(unsignedint, max_tx_len,
msgs[idx].len);
if (is_vmalloc_addr(msgs[idx].buf))
no_dma = true;
/* * The QUP block will issue a NACK and STOP on the bus when reaching * the end of the read, the length of the read is specified as one byte * which limits the possible read to 256 (QUP_READ_LIMIT) bytes.
*/ staticconststruct i2c_adapter_quirks qup_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
.max_read_len = QUP_READ_LIMIT,
};
qup->brx.sg = devm_kcalloc(&pdev->dev,
blocks, sizeof(*qup->brx.sg),
GFP_KERNEL); if (!qup->brx.sg) {
ret = -ENOMEM; goto fail_dma;
}
sg_init_table(qup->brx.sg, blocks);
/* 2 tag bytes for each block + 5 for start, stop tags */
size = blocks * 2 + 5;
qup->start_tag.start = devm_kzalloc(&pdev->dev,
size, GFP_KERNEL); if (!qup->start_tag.start) {
ret = -ENOMEM; goto fail_dma;
}
qup->brx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL); if (!qup->brx.tag.start) {
ret = -ENOMEM; goto fail_dma;
}
qup->btx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL); if (!qup->btx.tag.start) {
ret = -ENOMEM; goto fail_dma;
}
qup->is_dma = true;
qup->icc_path = devm_of_icc_get(&pdev->dev, NULL); if (IS_ERR(qup->icc_path)) return dev_err_probe(&pdev->dev, PTR_ERR(qup->icc_path), "failed to get interconnect path\n");
}
nodma: /* We support frequencies up to FAST Mode Plus (1MHz) */ if (!clk_freq || clk_freq > I2C_MAX_FAST_MODE_PLUS_FREQ) {
dev_err(qup->dev, "clock frequency not supported %d\n",
clk_freq);
ret = -EINVAL; goto fail_dma;
}
qup->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(qup->base)) {
ret = PTR_ERR(qup->base); goto fail_dma;
}
qup->irq = platform_get_irq(pdev, 0); if (qup->irq < 0) {
ret = qup->irq; goto fail_dma;
}
if (has_acpi_companion(qup->dev)) {
ret = device_property_read_u32(qup->dev, "src-clock-hz", &src_clk_freq); if (ret) {
dev_notice(qup->dev, "using default src-clock-hz %d",
DEFAULT_SRC_CLK);
}
ACPI_COMPANION_SET(&qup->adap.dev, ACPI_COMPANION(qup->dev));
} else {
qup->clk = devm_clk_get(qup->dev, "core"); if (IS_ERR(qup->clk)) {
dev_err(qup->dev, "Could not get core clock\n");
ret = PTR_ERR(qup->clk); goto fail_dma;
}
qup->pclk = devm_clk_get(qup->dev, "iface"); if (IS_ERR(qup->pclk)) {
dev_err(qup->dev, "Could not get iface clock\n");
ret = PTR_ERR(qup->pclk); goto fail_dma;
}
qup_i2c_enable_clocks(qup);
src_clk_freq = clk_get_rate(qup->clk);
}
qup->src_clk_freq = src_clk_freq;
/* * Bootloaders might leave a pending interrupt on certain QUP's, * so we reset the core before registering for interrupts.
*/
writel(1, qup->base + QUP_SW_RESET);
ret = qup_i2c_poll_state_valid(qup); if (ret) goto fail;
/* * The block/fifo size w.r.t. 'actual data' is 1/2 due to 'tag' * associated with each byte written/received
*/
size = QUP_OUTPUT_BLOCK_SIZE(io_mode); if (size >= ARRAY_SIZE(blk_sizes)) {
ret = -EIO; goto fail;
}
qup->out_blk_sz = blk_sizes[size];
size = QUP_INPUT_BLOCK_SIZE(io_mode); if (size >= ARRAY_SIZE(blk_sizes)) {
ret = -EIO; goto fail;
}
qup->in_blk_sz = blk_sizes[size];
if (is_qup_v1) { /* * in QUP v1, QUP_CONFIG uses N as 15 i.e 16 bits constitutes a * single transfer but the block size is in bytes so divide the * in_blk_sz and out_blk_sz by 2
*/
qup->in_blk_sz /= 2;
qup->out_blk_sz /= 2;
qup->write_tx_fifo = qup_i2c_write_tx_fifo_v1;
qup->read_rx_fifo = qup_i2c_read_rx_fifo_v1;
qup->write_rx_tags = qup_i2c_write_rx_tags_v1;
} else {
qup->write_tx_fifo = qup_i2c_write_tx_fifo_v2;
qup->read_rx_fifo = qup_i2c_read_rx_fifo_v2;
qup->write_rx_tags = qup_i2c_write_rx_tags_v2;
}
/* * Time it takes for a byte to be clocked out on the bus. * Each byte takes 9 clock cycles (8 bits + 1 ack).
*/
one_bit_t = (USEC_PER_SEC / clk_freq) + 1;
qup->one_byte_t = one_bit_t * 9;
qup->xfer_timeout = TOUT_MIN * HZ +
usecs_to_jiffies(MX_DMA_TX_RX_LEN * qup->one_byte_t);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.