int in_fifo_sz; int out_fifo_sz; int in_blk_sz; int out_blk_sz;
struct spi_transfer *xfer; struct completion done; int error; int w_size; /* bytes per SPI word */ int n_words; int tx_bytes; int rx_bytes; const u8 *tx_buf;
u8 *rx_buf; int qup_v1;
int mode; struct dma_slave_config rx_conf; struct dma_slave_config tx_conf;
do { /* ACK by clearing service flag */
writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
controller->base + QUP_OPERATIONAL);
if (!remainder) gotoexit;
if (is_block_mode) {
num_words = (remainder > words_per_block) ?
words_per_block : remainder;
} else { if (!spi_qup_is_flag_set(controller,
QUP_OP_IN_FIFO_NOT_EMPTY)) break;
num_words = 1;
}
/* read up to the maximum transfer size available */
spi_qup_read_from_fifo(controller, num_words);
remainder -= num_words;
/* if block mode, check to see if next block is available */ if (is_block_mode && !spi_qup_is_flag_set(controller,
QUP_OP_IN_BLOCK_READ_REQ)) break;
} while (remainder);
/* * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block * reads, it has to be cleared again at the very end. However, be sure * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be * present and this is used to determine if transaction is complete
*/ exit: if (!remainder) {
*opflags = readl_relaxed(controller->base + QUP_OPERATIONAL); if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
controller->base + QUP_OPERATIONAL);
}
}
staticvoid spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
{ const u8 *tx_buf = controller->tx_buf; int i, num_bytes;
u32 word, data;
for (; num_words; num_words--) {
word = 0;
num_bytes = min_t(int, spi_qup_len(controller) -
controller->tx_bytes,
controller->w_size); if (tx_buf) for (i = 0; i < num_bytes; i++) {
data = tx_buf[controller->tx_bytes + i];
word |= data << (BITS_PER_BYTE * (3 - i));
}
if (rx_sgl)
qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
SPI_MAX_XFER, &rx_nents) / qup->w_size; if (tx_sgl)
qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
SPI_MAX_XFER, &tx_nents) / qup->w_size; if (!qup->n_words) return -EIO;
ret = spi_qup_io_config(spi, xfer); if (ret) return ret;
/* before issuing the descriptors, set the QUP to run */
ret = spi_qup_set_state(qup, QUP_STATE_RUN); if (ret) {
dev_warn(qup->dev, "cannot set RUN state\n"); return ret;
} if (rx_sgl) {
ret = spi_qup_prep_sg(host, rx_sgl, rx_nents,
DMA_DEV_TO_MEM, rx_done); if (ret) return ret;
dma_async_issue_pending(host->dma_rx);
}
if (tx_sgl) {
ret = spi_qup_prep_sg(host, tx_sgl, tx_nents,
DMA_MEM_TO_DEV, tx_done); if (ret) return ret;
dma_async_issue_pending(host->dma_tx);
}
if (!wait_for_completion_timeout(&qup->done, timeout)) return -ETIMEDOUT;
for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
; for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
;
/* * if the transaction is small enough, we need * to fallback to FIFO mode
*/ if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
qup->mode = QUP_IO_M_MODE_FIFO;
ret = spi_qup_io_config(spi, xfer); if (ret) return ret;
ret = spi_qup_set_state(qup, QUP_STATE_RUN); if (ret) {
dev_warn(qup->dev, "cannot set RUN state\n"); return ret;
}
ret = spi_qup_set_state(qup, QUP_STATE_PAUSE); if (ret) {
dev_warn(qup->dev, "cannot set PAUSE state\n"); return ret;
}
if (qup->mode == QUP_IO_M_MODE_FIFO)
spi_qup_write(qup);
ret = spi_qup_set_state(qup, QUP_STATE_RUN); if (ret) {
dev_warn(qup->dev, "cannot set RUN state\n"); return ret;
}
if (!wait_for_completion_timeout(&qup->done, timeout)) return -ETIMEDOUT;
if (qup_err) { if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
dev_warn(controller->dev, "OUTPUT_OVER_RUN\n"); if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
dev_warn(controller->dev, "INPUT_UNDER_RUN\n"); if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n"); if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
dev_warn(controller->dev, "INPUT_OVER_RUN\n");
error = -EIO;
}
if (spi_err) { if (spi_err & SPI_ERROR_CLK_OVER_RUN)
dev_warn(controller->dev, "CLK_OVER_RUN\n"); if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
dev_warn(controller->dev, "CLK_UNDER_RUN\n");
error = -EIO;
}
spin_lock(&controller->lock); if (!controller->error)
controller->error = error;
spin_unlock(&controller->lock);
if (spi_qup_is_dma_xfer(controller->mode)) {
writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
} else { if (opflags & QUP_OP_IN_SERVICE_FLAG)
spi_qup_read(controller, &opflags);
if (opflags & QUP_OP_OUT_SERVICE_FLAG)
spi_qup_write(controller);
if (!spi_qup_data_pending(controller))
complete(&controller->done);
}
if (error)
complete(&controller->done);
if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) { if (!spi_qup_is_dma_xfer(controller->mode)) { if (spi_qup_data_pending(controller)) return IRQ_HANDLED;
}
complete(&controller->done);
}
return IRQ_HANDLED;
}
/* set clock freq ... bits per word, determine mode */ staticint spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
{ struct spi_qup *controller = spi_controller_get_devdata(spi->controller); int ret;
if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
dev_err(controller->dev, "too big size for loopback %d > %d\n",
xfer->len, controller->in_fifo_sz); return -EIO;
}
ret = dev_pm_opp_set_rate(controller->dev, xfer->speed_hz); if (ret) {
dev_err(controller->dev, "fail to set frequency %d",
xfer->speed_hz); return -EIO;
}
if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
dev_err(controller->dev, "cannot set RESET state\n"); return -EIO;
}
switch (controller->mode) { case QUP_IO_M_MODE_FIFO:
writel_relaxed(controller->n_words,
controller->base + QUP_MX_READ_CNT);
writel_relaxed(controller->n_words,
controller->base + QUP_MX_WRITE_CNT); /* must be zero for FIFO */
writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); break; case QUP_IO_M_MODE_BAM:
writel_relaxed(controller->n_words,
controller->base + QUP_MX_INPUT_CNT);
writel_relaxed(controller->n_words,
controller->base + QUP_MX_OUTPUT_CNT); /* must be zero for BLOCK and BAM */
writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
if (!controller->qup_v1) { void __iomem *input_cnt;
input_cnt = controller->base + QUP_MX_INPUT_CNT; /* * for DMA transfers, both QUP_MX_INPUT_CNT and * QUP_MX_OUTPUT_CNT must be zero to all cases but one. * That case is a non-balanced transfer when there is * only a rx_buf.
*/ if (xfer->tx_buf)
writel_relaxed(0, input_cnt); else
writel_relaxed(controller->n_words, input_cnt);
writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
} break; case QUP_IO_M_MODE_BLOCK:
reinit_completion(&controller->done);
writel_relaxed(controller->n_words,
controller->base + QUP_MX_INPUT_CNT);
writel_relaxed(controller->n_words,
controller->base + QUP_MX_OUTPUT_CNT); /* must be zero for BLOCK and BAM */
writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); break; default:
dev_err(controller->dev, "unknown mode = %d\n",
controller->mode); return -EIO;
}
iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); /* Set input and output transfer mode */
iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
/* * HS_MODE improves signal stability for spi-clk high rates, * but is invalid in loop back mode.
*/ if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
config |= SPI_CONFIG_HS_MODE; else
config &= ~SPI_CONFIG_HS_MODE;
/* use num-cs unless not present or out of range */ if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
num_cs > SPI_NUM_CHIPSELECTS)
host->num_chipselect = SPI_NUM_CHIPSELECTS; else
host->num_chipselect = num_cs;
ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) {
dev_err(dev, "cannot set RESET state\n"); goto error_clk;
}
writel_relaxed(0, base + QUP_OPERATIONAL);
writel_relaxed(0, base + QUP_IO_M_MODES);
if (!controller->qup_v1)
writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
base + SPI_ERROR_FLAGS_EN);
/* if earlier version of the QUP, disable INPUT_OVERRUN */ if (controller->qup_v1)
writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
base + QUP_ERROR_FLAGS_EN);
writel_relaxed(0, base + SPI_CONFIG);
writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
IRQF_TRIGGER_HIGH, pdev->name, controller); if (ret) goto error_clk;
if (pm_runtime_suspended(device)) {
ret = spi_qup_pm_resume_runtime(device); if (ret) return ret;
}
ret = spi_controller_suspend(host); if (ret) return ret;
ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) return ret;
if (ret >= 0) {
ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret)
dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
ERR_PTR(ret));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.