// SPDX-License-Identifier: BSD-3-Clause /* * Copyright (c) 2020, MIPI Alliance, Inc. * * Author: Nicolas Pitre <npitre@baylibre.com> * * Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on * v1.x of the spec and v2.0 will likely be split out.
*/
rh->ibi_chunk_sz = dma_get_cache_alignment();
rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES; /* * Round IBI data chunk size to number of bytes supported by * the HW. Chunk size can be 2^n number of DWORDs which is the * same as 2^(n+2) bytes, where n is 0..6.
*/
rh->ibi_chunk_sz = umax(4, rh->ibi_chunk_sz);
rh->ibi_chunk_sz = roundup_pow_of_two(rh->ibi_chunk_sz); if (rh->ibi_chunk_sz > 256) {
ret = -EINVAL; goto err_out;
}
/* first word of Data Buffer Descriptor Structure */ if (!xfer->data)
xfer->data_len = 0;
*ring_data++ =
FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
((i == n - 1) ? DATA_BUF_IOC : 0);
/* 2nd and 3rd words of Data Buffer Descriptor Structure */ if (xfer->data) {
buf = xfer->bounce_buf ? xfer->bounce_buf : xfer->data;
xfer->data_dma =
dma_map_single(&hci->master.dev,
buf,
xfer->data_len,
xfer->rnw ?
DMA_FROM_DEVICE :
DMA_TO_DEVICE); if (dma_mapping_error(&hci->master.dev,
xfer->data_dma)) {
hci_dma_unmap_xfer(hci, xfer_list, i); return -ENOMEM;
}
*ring_data++ = lower_32_bits(xfer->data_dma);
*ring_data++ = upper_32_bits(xfer->data_dma);
} else {
*ring_data++ = 0;
*ring_data++ = 0;
}
/* * We may update the hardware view of the enqueue pointer * only if we didn't reach its dequeue pointer.
*/
op2_val = rh_reg_read(RING_OPERATION2); if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) { /* the ring is full */
hci_dma_unmap_xfer(hci, xfer_list, i + 1); return -EBUSY;
}
}
/* take care to update the hardware enqueue pointer atomically */
spin_lock_irq(&rh->lock);
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_CR_ENQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
spin_unlock_irq(&rh->lock);
/* stop the ring */
rh_reg_write(RING_CONTROL, RING_CTRL_ABORT); if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) { /* * We're deep in it if ever this condition is ever met. * Hardware might still be writing to memory, etc.
*/
dev_crit(&hci->master.dev, "unable to abort the ring\n");
WARN_ON(1);
}
for (i = 0; i < n; i++) { struct hci_xfer *xfer = xfer_list + i; int idx = xfer->ring_entry;
/* * At the time the abort happened, the xfer might have * completed already. If not then replace corresponding * descriptor entries with a no-op.
*/ if (idx >= 0) {
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
/* let's find all we can about this IBI */ for (ptr = deq_ptr; ptr != enq_ptr;
ptr = (ptr + 1) % rh->ibi_status_entries) {
u32 ibi_status, *ring_ibi_status; unsignedint chunks;
if (last_ptr == -1) { /* this IBI sequence is not yet complete */
DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr); return;
}
deq_ptr = last_ptr + 1;
deq_ptr %= rh->ibi_status_entries;
if (ibi_status_error) {
dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr); goto done;
}
/* determine who this is for */
dev = i3c_hci_addr_to_dev(hci, ibi_addr); if (!dev) {
dev_err(&hci->master.dev, "IBI for unknown device %#x\n", ibi_addr); goto done;
}
dev_data = i3c_dev_get_master_data(dev);
dev_ibi = dev_data->ibi_data; if (ibi_size > dev_ibi->max_len) {
dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
ibi_size, dev_ibi->max_len); goto done;
}
/* * This ring model is not suitable for zero-copy processing of IBIs. * We have the data chunk ring wrap-around to deal with, meaning * that the payload might span multiple chunks beginning at the * end of the ring and wrap to the start of the ring. Furthermore * there is no guarantee that those chunks will be released in order * and in a timely manner by the upper driver. So let's just copy * them to a discrete buffer. In practice they're supposed to be * small anyway.
*/
slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool); if (!slot) {
dev_err(&hci->master.dev, "no free slot for IBI\n"); goto done;
}
/* copy first part of the payload */
ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
ring_ibi_data = rh->ibi_data + ibi_data_offset;
ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset;
first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr)
* rh->ibi_chunk_sz; if (first_part > ibi_size)
first_part = ibi_size;
dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
first_part, DMA_FROM_DEVICE);
memcpy(slot->data, ring_ibi_data, first_part);
/* copy second part if any */ if (ibi_size > first_part) { /* we wrap back to the start and copy remaining data */
ring_ibi_data = rh->ibi_data;
ring_ibi_data_dma = rh->ibi_data_dma;
dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
ibi_size - first_part, DMA_FROM_DEVICE);
memcpy(slot->data + first_part, ring_ibi_data,
ibi_size - first_part);
}
for (i = 0; i < rings->total; i++) { struct hci_rh_data *rh;
u32 status;
rh = &rings->headers[i];
status = rh_reg_read(INTR_STATUS);
DBG("rh%d status: %#x", i, status); if (!status) continue;
rh_reg_write(INTR_STATUS, status);
if (status & INTR_IBI_READY)
hci_dma_process_ibi(hci, rh); if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR))
hci_dma_xfer_done(hci, rh); if (status & INTR_RING_OP)
complete(&rh->op_done);
if (status & INTR_TRANSFER_ABORT) {
u32 ring_status;
dev_notice_ratelimited(&hci->master.dev, "ring %d: Transfer Aborted\n", i);
mipi_i3c_hci_resume(hci);
ring_status = rh_reg_read(RING_STATUS); if (!(ring_status & RING_STATUS_RUNNING) &&
status & INTR_TRANSFER_COMPLETION &&
status & INTR_TRANSFER_ERR) { /* * Ring stop followed by run is an Intel * specific required quirk after resuming the * halted controller. Do it only when the ring * is not in running state after a transfer * error.
*/
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
RING_CTRL_RUN_STOP);
}
} if (status & INTR_WARN_INS_STOP_MODE)
dev_warn_ratelimited(&hci->master.dev, "ring %d: Inserted Stop on Mode Change\n", i); if (status & INTR_IBI_RING_FULL)
dev_err_ratelimited(&hci->master.dev, "ring %d: IBI Ring Full Condition\n", i);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.