/* * Let's initialize data thresholds to half of the actual FIFO size. * The start thresholds aren't used (set to 0) as the FIFO is always * serviced before the corresponding command is queued.
*/
rx_thresh = FIELD_GET(RX_DATA_BUFFER_SIZE, size_val);
tx_thresh = FIELD_GET(TX_DATA_BUFFER_SIZE, size_val); if (hci->version_major == 1) { /* those are expressed as 2^[n+1), so just sub 1 if not 0 */ if (rx_thresh)
rx_thresh -= 1; if (tx_thresh)
tx_thresh -= 1;
pio->rx_thresh_size = 2 << rx_thresh;
pio->tx_thresh_size = 2 << tx_thresh;
} else { /* size is 2^(n+1) and threshold is 2^n i.e. already halved */
pio->rx_thresh_size = 1 << rx_thresh;
pio->tx_thresh_size = 1 << tx_thresh;
}
val = FIELD_PREP(DATA_RX_BUF_THLD, rx_thresh) |
FIELD_PREP(DATA_TX_BUF_THLD, tx_thresh);
pio_reg_write(DATA_BUFFER_THLD_CTRL, val);
/* * Let's raise an interrupt as soon as there is one free cmd slot * or one available response or IBI. For IBI data let's use half the * IBI queue size within allowed bounds.
*/
ibi_val = FIELD_GET(IBI_STATUS_SIZE, size_val);
pio->max_ibi_thresh = clamp_val(ibi_val/2, 1, 63);
val = FIELD_PREP(QUEUE_IBI_STATUS_THLD, 1) |
FIELD_PREP(QUEUE_IBI_DATA_THLD, pio->max_ibi_thresh) |
FIELD_PREP(QUEUE_RESP_BUF_THLD, 1) |
FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD, 1);
pio_reg_write(QUEUE_THLD_CTRL, val);
pio->reg_queue_thresh = val;
/* Disable all IRQs but allow all status bits */
pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
pio_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
/* Always accept error interrupts (will be activated on first xfer) */
pio->enabled_irqs = STAT_ALL_ERRORS;
p = xfer->data;
p += (xfer->data_len - xfer->data_left) / 4;
if (count >= 4) { unsignedint nr_words = count / 4; /* extract data from FIFO */
xfer->data_left -= nr_words * 4;
DBG("now %d left %d", nr_words * 4, xfer->data_left); while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
count &= 3; if (count) { /* * There are trailing bytes in the last word. * Fetch it and extract bytes in an endian independent way. * Unlike the TX case, we must not write memory past the * end of the destination buffer.
*/
u8 *p_byte = (u8 *)p;
u32 data = pio_reg_read(XFER_DATA_PORT);
xfer->data_word_before_partial = data;
xfer->data_left -= count;
data = (__force u32) cpu_to_le32(data); while (count--) {
*p_byte++ = data;
data >>= 8;
}
}
}
p = xfer->data;
p += (xfer->data_len - xfer->data_left) / 4;
while (xfer->data_left >= 4) { /* bail out if FIFO free space is below set threshold */ if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD)) returnfalse; /* we can fill up to that TX threshold */
nr_words = min(xfer->data_left / 4, pio->tx_thresh_size); /* push data into the FIFO */
xfer->data_left -= nr_words * 4;
DBG("now %d left %d", nr_words * 4, xfer->data_left); while (nr_words--)
pio_reg_write(XFER_DATA_PORT, *p++);
}
if (xfer->data_left) { /* * There are trailing bytes to send. We can simply load * them from memory as a word which will keep those bytes * in their proper place even on a BE system. This will * also get some bytes past the actual buffer but no one * should care as they won't be sent out.
*/ if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD)) returnfalse;
DBG("trailing %d", xfer->data_left);
pio_reg_write(XFER_DATA_PORT, *p);
xfer->data_left = 0;
}
last_word = (count == 1) ? from_last : *from++; if (xfer->data_left < 4) { /* * Like in hci_pio_do_trailing_rx(), preserve original * word to be stored partially then store bytes it * in an endian independent way.
*/
u8 *p_byte = xfer->data;
DBG("resp = 0x%08x", resp); if (tid != xfer->cmd_tid) {
dev_err(&hci->master.dev, "response tid=%d when expecting %d\n",
tid, xfer->cmd_tid); /* let's pretend it is a prog error... any of them */
hci_pio_err(hci, pio, STAT_PROG_ERRORS); returnfalse;
}
xfer->response = resp;
if (pio->curr_rx == xfer) { /* * Response availability implies RX completion. * Retrieve trailing RX data if any. * Note that short reads are possible.
*/ unsignedint received, expected, to_keep;
received = xfer->data_len - xfer->data_left;
expected = RESP_DATA_LENGTH(xfer->response); if (expected > received) {
hci_pio_do_trailing_rx(hci, pio,
expected - received);
} elseif (received > expected) { /* we consumed data meant for next xfer */
to_keep = DIV_ROUND_UP(expected, 4);
hci_pio_push_to_next_rx(hci, xfer, to_keep);
}
/* then process the RX list pointer */ if (hci_pio_process_rx(hci, pio))
pio->enabled_irqs &= ~STAT_RX_THLD;
}
/* * We're about to give back ownership of the xfer structure * to the waiting instance. Make sure no reference to it * still exists.
*/ if (pio->curr_rx == xfer) {
DBG("short RX ?");
pio->curr_rx = pio->curr_rx->next_data;
} elseif (pio->curr_tx == xfer) {
DBG("short TX ?");
pio->curr_tx = pio->curr_tx->next_data;
} elseif (xfer->data_left) {
DBG("PIO xfer count = %d after response",
xfer->data_left);
}
pio->curr_resp = xfer->next_resp; if (xfer->completion)
complete(xfer->completion);
} return !pio->curr_resp;
}
staticbool hci_pio_process_cmd(struct i3c_hci *hci, struct hci_pio_data *pio)
{ while (pio->curr_xfer &&
(pio_reg_read(INTR_STATUS) & STAT_CMD_QUEUE_READY)) { /* * Always process the data FIFO before sending the command * so needed TX data or RX space is available upfront.
*/
hci_pio_queue_data(hci, pio); /* * Then queue our response request. This will also process * the response FIFO in case it got suddenly filled up * with results from previous commands.
*/
hci_pio_queue_resp(hci, pio); /* * Finally send the command.
*/
hci_pio_write_cmd(hci, pio->curr_xfer); /* * And move on.
*/
pio->curr_xfer = pio->curr_xfer->next_xfer;
} return !pio->curr_xfer;
}
staticint hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
{ struct hci_pio_data *pio = hci->io_data; struct hci_xfer *prev_queue_tail; int i;
DBG("n = %d", n);
/* link xfer instances together and initialize data count */ for (i = 0; i < n; i++) {
xfer[i].next_xfer = (i + 1 < n) ? &xfer[i + 1] : NULL;
xfer[i].next_data = NULL;
xfer[i].next_resp = NULL;
xfer[i].data_left = xfer[i].data_len;
}
staticbool hci_pio_dequeue_xfer_common(struct i3c_hci *hci, struct hci_pio_data *pio, struct hci_xfer *xfer, int n)
{ struct hci_xfer *p, **p_prev_next; int i;
/* * To safely dequeue a transfer request, it must be either entirely * processed, or not yet processed at all. If our request tail is * reachable from either the data or resp list that means the command * was submitted and not yet completed.
*/ for (p = pio->curr_resp; p; p = p->next_resp) for (i = 0; i < n; i++) if (p == &xfer[i]) goto pio_screwed; for (p = pio->curr_rx; p; p = p->next_data) for (i = 0; i < n; i++) if (p == &xfer[i]) goto pio_screwed; for (p = pio->curr_tx; p; p = p->next_data) for (i = 0; i < n; i++) if (p == &xfer[i]) goto pio_screwed;
/* * The command was completed, or wasn't yet submitted. * Unlink it from the que if the later.
*/
p_prev_next = &pio->curr_xfer; for (p = pio->curr_xfer; p; p = p->next_xfer) { if (p == &xfer[0]) {
*p_prev_next = xfer[n - 1].next_xfer; break;
}
p_prev_next = &p->next_xfer;
}
/* return true if we actually unqueued something */ return !!p;
pio_screwed: /* * Life is tough. We must invalidate the hardware state and * discard everything that is still queued.
*/ for (p = pio->curr_resp; p; p = p->next_resp) {
p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED); if (p->completion)
complete(p->completion);
} for (p = pio->curr_xfer; p; p = p->next_xfer) {
p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED); if (p->completion)
complete(p->completion);
}
pio->curr_xfer = pio->curr_rx = pio->curr_tx = pio->curr_resp = NULL;
returntrue;
}
staticbool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
{ struct hci_pio_data *pio = hci->io_data; int ret;
ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
spin_unlock_irq(&pio->lock); return ret;
}
staticvoid hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
u32 status)
{ /* TODO: this ought to be more sophisticated eventually */
if (pio_reg_read(INTR_STATUS) & STAT_RESP_READY) { /* this may happen when an error is signaled with ROC unset */
u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
dev_err(&hci->master.dev, "orphan response (%#x) on error\n", resp);
}
/* dump states on programming errors */ if (status & STAT_PROG_ERRORS) {
u32 queue = pio_reg_read(QUEUE_CUR_STATUS);
u32 data = pio_reg_read(DATA_BUFFER_CUR_STATUS);
/* just bust out everything with pending responses for now */
hci_pio_dequeue_xfer_common(hci, pio, pio->curr_resp, 1); /* ... and half-way TX transfers if any */ if (pio->curr_tx && pio->curr_tx->data_left != pio->curr_tx->data_len)
hci_pio_dequeue_xfer_common(hci, pio, pio->curr_tx, 1); /* then reset the hardware */
mipi_i3c_hci_pio_reset(hci);
mipi_i3c_hci_resume(hci);
p = ibi->data_ptr;
p += (ibi->seg_len - ibi->seg_cnt) / 4;
while ((nr_words = ibi->seg_cnt/4)) { /* determine our IBI queue threshold value */
thresh_val = min(nr_words, pio->max_ibi_thresh);
hci_pio_set_ibi_thresh(hci, pio, thresh_val); /* bail out if we don't have that amount of data ready */ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD)) returnfalse; /* extract the data from the IBI port */
nr_words = thresh_val;
ibi->seg_cnt -= nr_words * 4;
DBG("now %d left %d", nr_words * 4, ibi->seg_cnt); while (nr_words--)
*p++ = pio_reg_read(IBI_PORT);
}
if (ibi->seg_cnt) { /* * There are trailing bytes in the last word. * Fetch it and extract bytes in an endian independent way. * Unlike the TX case, we must not write past the end of * the destination buffer.
*/
u32 data;
u8 *p_byte = (u8 *)p;
hci_pio_set_ibi_thresh(hci, pio, 1); if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD)) returnfalse;
DBG("trailing %d", ibi->seg_cnt);
data = pio_reg_read(IBI_PORT);
data = (__force u32) cpu_to_le32(data); while (ibi->seg_cnt--) {
*p_byte++ = data;
data >>= 8;
}
}
/* * We have a new IBI. Try to set up its payload retrieval. * When returning true, the IBI data has to be consumed whether * or not we are set up to capture it. If we return true with * ibi->slot == NULL that means the data payload has to be * drained out of the IBI port and dropped.
*/
if (!ibi->slot && !ibi->seg_cnt && ibi->last_seg) if (!hci_pio_prep_new_ibi(hci, pio)) returnfalse;
for (;;) {
u32 ibi_status; unsignedint ibi_addr;
if (ibi->slot) { if (!hci_pio_get_ibi_segment(hci, pio)) returnfalse;
ibi->slot->len += ibi->seg_len;
ibi->data_ptr += ibi->seg_len; if (ibi->last_seg) { /* was the last segment: submit it and leave */
i3c_master_queue_ibi(ibi->slot->dev, ibi->slot);
ibi->slot = NULL;
hci_pio_set_ibi_thresh(hci, pio, 1); returntrue;
}
} elseif (ibi->seg_cnt) { /* * No slot but a non-zero count. This is the result * of some error and the payload must be drained. * This normally does not happen therefore no need * to be extra optimized here.
*/
hci_pio_set_ibi_thresh(hci, pio, 1); do { if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD)) returnfalse;
pio_reg_read(IBI_PORT);
} while (--ibi->seg_cnt); if (ibi->last_seg) returntrue;
}
/* try to move to the next segment right away */
hci_pio_set_ibi_thresh(hci, pio, 1); if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD)) returnfalse;
ibi_status = pio_reg_read(IBI_PORT);
ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status); if (ibi->addr != ibi_addr) { /* target address changed before last segment */
dev_err(&hci->master.dev, "unexp IBI address changed from %d to %d\n",
ibi->addr, ibi_addr);
hci_pio_free_ibi_slot(hci, pio);
}
ibi->last_seg = ibi_status & IBI_LAST_STATUS;
ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
ibi->seg_cnt = ibi->seg_len; if (ibi->slot && ibi->slot->len + ibi->seg_len > ibi->max_len) {
dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
ibi->slot->len + ibi->seg_len, ibi->max_len);
hci_pio_free_ibi_slot(hci, pio);
}
}
spin_lock(&pio->lock);
status = pio_reg_read(INTR_STATUS);
DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS; if (!status) {
spin_unlock(&pio->lock); returnfalse;
}
if (status & STAT_IBI_STATUS_THLD)
hci_pio_process_ibi(hci, pio);
if (status & STAT_RX_THLD) if (hci_pio_process_rx(hci, pio))
pio->enabled_irqs &= ~STAT_RX_THLD; if (status & STAT_TX_THLD) if (hci_pio_process_tx(hci, pio))
pio->enabled_irqs &= ~STAT_TX_THLD; if (status & STAT_RESP_READY) if (hci_pio_process_resp(hci, pio))
pio->enabled_irqs &= ~STAT_RESP_READY;
if (unlikely(status & STAT_LATENCY_WARNINGS)) {
pio_reg_write(INTR_STATUS, status & STAT_LATENCY_WARNINGS);
dev_warn_ratelimited(&hci->master.dev, "encountered warning condition %#lx\n",
status & STAT_LATENCY_WARNINGS);
}
if (unlikely(status & STAT_ALL_ERRORS)) {
pio_reg_write(INTR_STATUS, status & STAT_ALL_ERRORS);
hci_pio_err(hci, pio, status & STAT_ALL_ERRORS);
}
if (status & STAT_CMD_QUEUE_READY) if (hci_pio_process_cmd(hci, pio))
pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.