u64_stats_add(&stats->bytes, xfer->len); if (spi_valid_txbuf(msg, xfer))
u64_stats_add(&stats->bytes_tx, xfer->len); if (spi_valid_rxbuf(msg, xfer))
u64_stats_add(&stats->bytes_rx, xfer->len);
u64_stats_update_end(&stats->syncp);
put_cpu();
}
/* * modalias support makes "modprobe $MODALIAS" new-style hotplug work, * and the sysfs version makes coldplug work too.
*/ staticconststruct spi_device_id *spi_match_id(conststruct spi_device_id *id, constchar *name)
{ while (id->name[0]) { if (!strcmp(name, id->name)) return id;
id++;
} return NULL;
}
/** * __spi_register_driver - register a SPI driver * @owner: owner module of the driver to register * @sdrv: the driver to register * Context: can sleep * * Return: zero on success, else a negative error code.
*/ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
{
sdrv->driver.owner = owner;
sdrv->driver.bus = &spi_bus_type;
/* * For Really Good Reasons we use spi: modaliases not of: * modaliases for DT so module autoloading won't work if we * don't have a spi_device_id as well as a compatible string.
*/ if (sdrv->driver.of_match_table) { conststruct of_device_id *of_id;
for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
of_id++) { constchar *of_name;
/* Strip off any vendor prefix */
of_name = strnchr(of_id->compatible, sizeof(of_id->compatible), ','); if (of_name)
of_name++; else
of_name = of_id->compatible;
if (sdrv->id_table) { conststruct spi_device_id *spi_id;
spi_id = spi_match_id(sdrv->id_table, of_name); if (spi_id) continue;
} else { if (strcmp(sdrv->driver.name, of_name) == 0) continue;
}
pr_warn("SPI driver %s has no spi_device_id for %s\n",
sdrv->driver.name, of_id->compatible);
}
}
/* * SPI devices should normally not be created by SPI device drivers; that * would make them board-specific. Similarly with SPI controller drivers. * Device registration normally goes into like arch/.../mach.../board-YYY.c * with other readonly (flashable) information about mainboard devices.
*/
/* * Used to protect add/del operation for board_info list and * spi_controller list, and their matching process also used * to protect object of type struct idr.
*/ static DEFINE_MUTEX(board_lock);
/** * spi_alloc_device - Allocate a new SPI device * @ctlr: Controller to which device is connected * Context: can sleep * * Allows a driver to allocate and initialize a spi_device without * registering it immediately. This allows a driver to directly * fill the spi_device with device parameters before calling * spi_add_device() on it. * * Caller is responsible to call spi_add_device() on the returned * spi_device structure to add it to the SPI controller. If the caller * needs to discard the spi_device without adding it, then it should * call spi_dev_put() on it. * * Return: a pointer to the new device, or NULL.
*/ struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
{ struct spi_device *spi;
/* * Zero(0) is a valid physical CS value and can be located at any * logical CS in the spi->chip_select[]. If all the physical CS * are initialized to 0 then It would be difficult to differentiate * between a valid physical CS 0 & an unused logical CS whose physical * CS can be 0. As a solution to this issue initialize all the CS to -1. * Now all the unused logical CS will have -1 physical CS value & can be * ignored while performing physical CS validity checks.
*/ #define SPI_INVALID_CS ((s8)-1)
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { /* Chipselects are numbered 0..max; validate. */
cs = spi_get_chipselect(spi, idx); if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
ctlr->num_chipselect); return -EINVAL;
}
}
/* * Make sure that multiple logical CS doesn't map to the same physical CS. * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
*/ if (!spi_controller_is_target(ctlr)) { for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); if (status) return status;
}
}
/* Set the bus ID string */
spi_dev_set_name(spi);
/* * We need to make sure there's no other device with this * chipselect **BEFORE** we call setup(), else we'll trash * its configuration.
*/
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); if (status) return status;
/* Controller may unregister concurrently */ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
!device_is_registered(&ctlr->dev)) { return -ENODEV;
}
if (ctlr->cs_gpiods) {
u8 cs;
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
cs = spi_get_chipselect(spi, idx); if (is_valid_cs(cs))
spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
}
}
/* * Drivers may modify this initial i/o setup, but will * normally rely on the device being setup. Devices * using SPI_CS_HIGH can't coexist well otherwise...
*/
status = spi_setup(spi); if (status < 0) {
dev_err(dev, "can't setup %s, status %d\n",
dev_name(&spi->dev), status); return status;
}
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev); if (status < 0) {
dev_err(dev, "can't add %s, status %d\n",
dev_name(&spi->dev), status);
spi_cleanup(spi);
} else {
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
}
return status;
}
/** * spi_add_device - Add spi_device allocated with spi_alloc_device * @spi: spi_device to register * * Companion function to spi_alloc_device. Devices allocated with * spi_alloc_device can be added onto the SPI bus with this function. * * Return: 0 on success; negative errno on failure
*/ int spi_add_device(struct spi_device *spi)
{ struct spi_controller *ctlr = spi->controller; int status;
/* Set the bus ID string */
spi_dev_set_name(spi);
mutex_lock(&ctlr->add_lock);
status = __spi_add_device(spi);
mutex_unlock(&ctlr->add_lock); return status;
}
EXPORT_SYMBOL_GPL(spi_add_device);
/** * spi_new_device - instantiate one new SPI device * @ctlr: Controller to which device is connected * @chip: Describes the SPI device * Context: can sleep * * On typical mainboards, this is purely internal; and it's not needed * after board init creates the hard-wired devices. Some development * platforms may not be able to use spi_register_board_info though, and * this is exported so that for example a USB or parport based adapter * driver could add devices (which it would learn about out-of-band). * * Return: the new device, or NULL.
*/ struct spi_device *spi_new_device(struct spi_controller *ctlr, struct spi_board_info *chip)
{ struct spi_device *proxy; int status;
/* * NOTE: caller did any chip->bus_num checks necessary. * * Also, unless we change the return value convention to use * error-or-pointer (not NULL-or-pointer), troubleshootability * suggests syslogged diagnostics are best here (ugh).
*/
proxy = spi_alloc_device(ctlr); if (!proxy) return NULL;
/* Use provided chip-select for proxy device */
spi_set_all_cs_unused(proxy);
spi_set_chipselect(proxy, 0, chip->chip_select);
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
proxy->irq = chip->irq;
strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
proxy->dev.platform_data = (void *) chip->platform_data;
proxy->controller_data = chip->controller_data;
proxy->controller_state = NULL; /* * By default spi->chip_select[0] will hold the physical CS number, * so set bit 0 in spi->cs_index_mask.
*/
proxy->cs_index_mask = BIT(0);
if (chip->swnode) {
status = device_add_software_node(&proxy->dev, chip->swnode); if (status) {
dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
chip->modalias, status); goto err_dev_put;
}
}
status = spi_add_device(proxy); if (status < 0) goto err_dev_put;
/** * spi_unregister_device - unregister a single SPI device * @spi: spi_device to unregister * * Start making the passed SPI device vanish. Normally this would be handled * by spi_unregister_controller().
*/ void spi_unregister_device(struct spi_device *spi)
{ struct fwnode_handle *fwnode;
dev = spi_new_device(ctlr, bi); if (!dev)
dev_err(ctlr->dev.parent, "can't create new device for %s\n",
bi->modalias);
}
/** * spi_register_board_info - register SPI devices for a given board * @info: array of chip descriptors * @n: how many descriptors are provided * Context: can sleep * * Board-specific early init code calls this (probably during arch_initcall) * with segments of the SPI device table. Any device nodes are created later, * after the relevant parent SPI controller (bus_num) is defined. We keep * this table of devices forever, so that reloading a controller driver will * not make Linux forget about these hard-wired devices. * * Other code can also call this, e.g. a particular add-on board might provide * SPI devices through its expansion connector, so code initializing that board * would naturally declare its SPI devices. * * The board info passed can safely be __initdata ... but be careful of * any embedded pointers (platform_data, etc), they're copied as-is. * * Return: zero on success, else a negative error code.
*/ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
{ struct boardinfo *bi; int i;
if (!n) return 0;
bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); if (!bi) return -ENOMEM;
for (i = 0; i < n; i++, bi++, info++) { struct spi_controller *ctlr;
/** * spi_res_alloc - allocate a spi resource that is life-cycle managed * during the processing of a spi_message while using * spi_transfer_one * @spi: the SPI device for which we allocate memory * @release: the release code to execute for this resource * @size: size to alloc and return * @gfp: GFP allocation flags * * Return: the pointer to the allocated data * * This may get enhanced in the future to allocate from a memory pool * of the @spi_device or @spi_controller to avoid repeated allocations.
*/ staticvoid *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
size_t size, gfp_t gfp)
{ struct spi_res *sres;
sres = kzalloc(sizeof(*sres) + size, gfp); if (!sres) return NULL;
spi_for_each_valid_cs(spi, idx) { if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
last = true;
} return last;
}
staticvoid spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
{ /* * Historically ACPI has no means of the GPIO polarity and * thus the SPISerialBus() resource defines it on the per-chip * basis. In order to avoid a chain of negations, the GPIO * polarity is considered being Active High. Even for the cases * when _DSD() is involved (in the updated versions of ACPI) * the GPIO CS polarity must be defined Active High to avoid * ambiguity. That's why we use enable, that takes SPI_CS_HIGH * into account.
*/ if (is_acpi_device_node(dev_fwnode(&spi->dev)))
gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable); else /* Polarity handled by GPIO library */
gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
if (activate)
spi_delay_exec(&spi->cs_setup, NULL); else
spi_delay_exec(&spi->cs_inactive, NULL);
}
/* * Avoid calling into the driver (or doing delays) if the chip select * isn't actually changing from the last time this was called.
*/ if (!force && (enable == spi_is_last_cs(spi)) &&
(spi->controller->last_cs_index_mask == spi->cs_index_mask) &&
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) return;
ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); if (ret != 0) return ret;
sg = &sgt->sgl[0]; for (i = 0; i < sgs; i++) {
if (vmalloced_buf || kmap_buf) { /* * Next scatterlist entry size is the minimum between * the desc_len and the remaining buffer length that * fits in a page.
*/
min = min_t(size_t, desc_len,
min_t(size_t, len,
PAGE_SIZE - offset_in_page(buf))); if (vmalloced_buf)
vm_page = vmalloc_to_page(buf); else
vm_page = kmap_to_page(buf); if (!vm_page) {
sg_free_table(sgt); return -ENOMEM;
}
sg_set_page(sg, vm_page,
min, offset_in_page(buf));
} else {
min = min_t(size_t, len, desc_len);
sg_buf = buf;
sg_set_buf(sg, sg_buf, min);
}
buf += min;
len -= min;
sg = sg_next(sg);
}
ret = dma_map_sgtable(dev, sgt, dir, attrs); if (ret < 0) {
sg_free_table(sgt); return ret;
}
ret = -ENOMSG;
list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* The sync is done before each transfer. */ unsignedlong attrs = DMA_ATTR_SKIP_CPU_SYNC;
if (!ctlr->can_dma(ctlr, msg->spi, xfer)) continue;
if (xfer->tx_buf != NULL) {
ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
(void *)xfer->tx_buf,
xfer->len, DMA_TO_DEVICE,
attrs); if (ret != 0) return ret;
xfer->tx_sg_mapped = true;
}
if (xfer->rx_buf != NULL) {
ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE, attrs); if (ret != 0) {
spi_unmap_buf_attrs(ctlr, tx_dev,
&xfer->tx_sg, DMA_TO_DEVICE,
attrs);
return ret;
}
xfer->rx_sg_mapped = true;
}
} /* No transfer has been mapped, bail out with success */ if (ret) return 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* The sync has already been done after each transfer. */ unsignedlong attrs = DMA_ATTR_SKIP_CPU_SYNC;
if (xfer->rx_sg_mapped)
spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
DMA_FROM_DEVICE, attrs);
xfer->rx_sg_mapped = false;
list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* * Restore the original value of tx_buf or rx_buf if they are * NULL.
*/ if (xfer->tx_buf == ctlr->dummy_tx)
xfer->tx_buf = NULL; if (xfer->rx_buf == ctlr->dummy_rx)
xfer->rx_buf = NULL;
}
if (spi_controller_is_target(ctlr)) { if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); return -EINTR;
}
} else { if (!speed_hz)
speed_hz = 100000;
/* * For each byte we wait for 8 cycles of the SPI clock. * Since speed is defined in Hz and we want milliseconds, * use respective multiplier, but before the division, * otherwise we may get 0 for short transfers.
*/
ms = 8LL * MSEC_PER_SEC * xfer->len;
do_div(ms, speed_hz);
/* * Increase it twice and add 200 ms tolerance, use * predefined maximum in case of overflow.
*/
ms += ms + 200; if (ms > UINT_MAX)
ms = UINT_MAX;
ms = wait_for_completion_timeout(&ctlr->xfer_completion,
msecs_to_jiffies(ms));
if (ms == 0) {
SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
dev_err(&msg->spi->dev, "SPI transfer timed out\n"); return -ETIMEDOUT;
}
if (xfer->error & SPI_TRANS_FAIL_IO) return -EIO;
}
return 0;
}
staticvoid _spi_transfer_delay_ns(u32 ns)
{ if (!ns) return; if (ns <= NSEC_PER_USEC) {
ndelay(ns);
} else {
u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
fsleep(us);
}
}
int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
{
u32 delay = _delay->value;
u32 unit = _delay->unit;
u32 hz;
if (!delay) return 0;
switch (unit) { case SPI_DELAY_UNIT_USECS:
delay *= NSEC_PER_USEC; break; case SPI_DELAY_UNIT_NSECS: /* Nothing to do here */ break; case SPI_DELAY_UNIT_SCK: /* Clock cycles need to be obtained from spi_transfer */ if (!xfer) return -EINVAL; /* * If there is unknown effective speed, approximate it * by underestimating with half of the requested Hz.
*/
hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; if (!hz) return -EINVAL;
/* Return early on "fast" mode - for everything but USECS */ if (!delay) { if (unit == SPI_DELAY_UNIT_USECS)
_spi_transfer_delay_ns(default_delay_ns); return;
}
ret = spi_delay_exec(&xfer->cs_change_delay, xfer); if (ret) {
dev_err_once(&msg->spi->dev, "Use of unsupported delay unit %i, using default of %luus\n",
unit, default_delay_ns / NSEC_PER_USEC);
_spi_transfer_delay_ns(default_delay_ns);
}
}
/* * spi_transfer_one_message - Default implementation of transfer_one_message() * * This is a standard implementation of transfer_one_message() for * drivers which implement a transfer_one() operation. It provides * standard handling of delays and chip select management.
*/ staticint spi_transfer_one_message(struct spi_controller *ctlr, struct spi_message *msg)
{ struct spi_transfer *xfer; bool keep_cs = false; int ret = 0; struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
out: if (ret != 0 || !keep_cs)
spi_set_cs(msg->spi, false, false);
if (msg->status == -EINPROGRESS)
msg->status = ret;
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
spi_finalize_current_message(ctlr);
return ret;
}
/** * spi_finalize_current_transfer - report completion of a transfer * @ctlr: the controller reporting completion * * Called by SPI drivers using the core transfer_one_message() * implementation to notify it that the current interrupt driven * transfer has finished and the next one may be scheduled.
*/ void spi_finalize_current_transfer(struct spi_controller *ctlr)
{
complete(&ctlr->xfer_completion);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
if (!was_busy && ctlr->auto_runtime_pm) {
ret = pm_runtime_get_sync(ctlr->dev.parent); if (ret < 0) {
pm_runtime_put_noidle(ctlr->dev.parent);
dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
if (!was_busy && ctlr->prepare_transfer_hardware) {
ret = ctlr->prepare_transfer_hardware(ctlr); if (ret) {
dev_err(&ctlr->dev, "failed to prepare transfer hardware: %d\n",
ret);
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
/* * Drivers implementation of transfer_one_message() must arrange for * spi_finalize_current_message() to get called. Most drivers will do * this in the calling context, but some don't. For those cases, a * completion is used to guarantee that this function does not return * until spi_finalize_current_message() is done accessing * ctlr->cur_msg. * Use of the following two flags enable to opportunistically skip the * use of the completion since its use involves expensive spin locks. * In case of a race with the context that calls * spi_finalize_current_message() the completion will always be used, * due to strict ordering of these flags using barriers.
*/
WRITE_ONCE(ctlr->cur_msg_incomplete, true);
WRITE_ONCE(ctlr->cur_msg_need_completion, false);
reinit_completion(&ctlr->cur_msg_completion);
smp_wmb(); /* Make these available to spi_finalize_current_message() */
ret = ctlr->transfer_one_message(ctlr, msg); if (ret) {
dev_err(&ctlr->dev, "failed to transfer one message from queue\n"); return ret;
}
WRITE_ONCE(ctlr->cur_msg_need_completion, true);
smp_mb(); /* See spi_finalize_current_message()... */ if (READ_ONCE(ctlr->cur_msg_incomplete))
wait_for_completion(&ctlr->cur_msg_completion);
return 0;
}
/** * __spi_pump_messages - function which processes SPI message queue * @ctlr: controller to process queue for * @in_kthread: true if we are in the context of the message pump thread * * This function checks if there is any SPI message in the queue that * needs processing and if so call out to the driver to initialize hardware * and transfer each message. * * Note that it is called both from the kthread itself and also from * inside spi_sync(); the queue extraction handling at the top of the * function should deal with this safely.
*/ staticvoid __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{ struct spi_message *msg; bool was_busy = false; unsignedlong flags; int ret;
/* Take the I/O mutex */
mutex_lock(&ctlr->io_mutex);
/** * spi_pump_messages - kthread work function which processes spi message queue * @work: pointer to kthread work struct contained in the controller struct
*/ staticvoid spi_pump_messages(struct kthread_work *work)
{ struct spi_controller *ctlr =
container_of(work, struct spi_controller, pump_messages);
__spi_pump_messages(ctlr, true);
}
/** * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp * @ctlr: Pointer to the spi_controller structure of the driver * @xfer: Pointer to the transfer being timestamped * @progress: How many words (not bytes) have been transferred so far * @irqs_off: If true, will disable IRQs and preemption for the duration of the * transfer, for less jitter in time measurement. Only compatible * with PIO drivers. If true, must follow up with * spi_take_timestamp_post or otherwise system will crash. * WARNING: for fully predictable results, the CPU frequency must * also be under control (governor). * * This is a helper for drivers to collect the beginning of the TX timestamp * for the requested byte from the SPI transfer. The frequency with which this * function must be called (once per word, once for the whole transfer, once * per batch of words etc) is arbitrary as long as the @tx buffer offset is * greater than or equal to the requested byte at the time of the call. The * timestamp is only taken once, at the first such call. It is assumed that * the driver advances its @tx buffer pointer monotonically.
*/ void spi_take_timestamp_pre(struct spi_controller *ctlr, struct spi_transfer *xfer,
size_t progress, bool irqs_off)
{ if (!xfer->ptp_sts) return;
if (xfer->timestamped) return;
if (progress > xfer->ptp_sts_word_pre) return;
/* Capture the resolution of the timestamp */
xfer->ptp_sts_word_pre = progress;
if (irqs_off) {
local_irq_save(ctlr->irq_flags);
preempt_disable();
}
/** * spi_take_timestamp_post - helper to collect the end of the TX timestamp * @ctlr: Pointer to the spi_controller structure of the driver * @xfer: Pointer to the transfer being timestamped * @progress: How many words (not bytes) have been transferred so far * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. * * This is a helper for drivers to collect the end of the TX timestamp for * the requested byte from the SPI transfer. Can be called with an arbitrary * frequency: only the first call where @tx exceeds or is equal to the * requested word will be timestamped.
*/ void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer,
size_t progress, bool irqs_off)
{ if (!xfer->ptp_sts) return;
if (xfer->timestamped) return;
if (progress < xfer->ptp_sts_word_post) return;
ptp_read_system_postts(xfer->ptp_sts);
if (irqs_off) {
local_irq_restore(ctlr->irq_flags);
preempt_enable();
}
/* Capture the resolution of the timestamp */
xfer->ptp_sts_word_post = progress;
/** * spi_set_thread_rt - set the controller to pump at realtime priority * @ctlr: controller to boost priority of * * This can be called because the controller requested realtime priority * (by setting the ->rt value before calling spi_register_controller()) or * because a device on the bus said that its transfers needed realtime * priority. * * NOTE: at the moment if any device on a bus says it needs realtime then * the thread will be at realtime priority for all transfers on that * controller. If this eventually becomes a problem we may see if we can * find a way to boost the priority only temporarily during relevant * transfers.
*/ staticvoid spi_set_thread_rt(struct spi_controller *ctlr)
{
dev_info(&ctlr->dev, "will run message pump with realtime priority\n");
sched_set_fifo(ctlr->kworker->task);
}
/* * Controller config will indicate if this controller should run the * message pump with high (realtime) priority to reduce the transfer * latency on the bus by minimising the delay between a transfer * request and the scheduling of the message pump thread. Without this * setting the message pump thread will remain at default priority.
*/ if (ctlr->rt)
spi_set_thread_rt(ctlr);
return 0;
}
/** * spi_get_next_queued_message() - called by driver to check for queued * messages * @ctlr: the controller to check for queued messages * * If there are more messages in the queue, the next message is returned from * this call. * * Return: the next message in the queue, else NULL if the queue is empty.
*/ struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
{ struct spi_message *next; unsignedlong flags;
/* Get a pointer to the next message, if any */
spin_lock_irqsave(&ctlr->queue_lock, flags);
next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
queue);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
/* * __spi_unoptimize_message - shared implementation of spi_unoptimize_message() * and spi_maybe_unoptimize_message() * @msg: the message to unoptimize * * Peripheral drivers should use spi_unoptimize_message() and callers inside * core should use spi_maybe_unoptimize_message() rather than calling this * function directly. * * It is not valid to call this on a message that is not currently optimized.
*/ staticvoid __spi_unoptimize_message(struct spi_message *msg)
{ struct spi_controller *ctlr = msg->spi->controller;
if (ctlr->unoptimize_message)
ctlr->unoptimize_message(msg);
spi_res_release(ctlr, msg);
msg->optimized = false;
msg->opt_state = NULL;
}
/* * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral * @msg: the message to unoptimize * * This function is used to unoptimize a message if and only if it was * optimized by the core (via spi_maybe_optimize_message()).
*/ staticvoid spi_maybe_unoptimize_message(struct spi_message *msg)
{ if (!msg->pre_optimized && msg->optimized &&
!msg->spi->controller->defer_optimize_message)
__spi_unoptimize_message(msg);
}
/** * spi_finalize_current_message() - the current message is complete * @ctlr: the controller to return the message to * * Called by the driver to notify the core that the message in the front of the * queue is complete and can be removed from the queue.
*/ void spi_finalize_current_message(struct spi_controller *ctlr)
{ struct spi_transfer *xfer; struct spi_message *mesg; int ret;
if (unlikely(ctlr->ptp_sts_supported))
list_for_each_entry(xfer, &mesg->transfers, transfer_list)
WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
spi_unmap_msg(ctlr, mesg);
if (mesg->prepared && ctlr->unprepare_message) {
ret = ctlr->unprepare_message(ctlr, mesg); if (ret) {
dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
ret);
}
}
mesg->prepared = false;
spi_maybe_unoptimize_message(mesg);
WRITE_ONCE(ctlr->cur_msg_incomplete, false);
smp_mb(); /* See __spi_pump_transfer_message()... */ if (READ_ONCE(ctlr->cur_msg_need_completion))
complete(&ctlr->cur_msg_completion);
trace_spi_message_done(mesg);
mesg->state = NULL; if (mesg->complete)
mesg->complete(mesg->context);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);
/* * This is a bit lame, but is optimized for the common execution path. * A wait_queue on the ctlr->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead.
*/ do {
spin_lock_irqsave(&ctlr->queue_lock, flags); if (list_empty(&ctlr->queue) && !ctlr->busy) {
ctlr->running = false;
spin_unlock_irqrestore(&ctlr->queue_lock, flags); return 0;
}
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
usleep_range(10000, 11000);
} while (--limit);
return -EBUSY;
}
staticint spi_destroy_queue(struct spi_controller *ctlr)
{ int ret;
ret = spi_stop_queue(ctlr);
/* * kthread_flush_worker will block until all work is done. * If the reason that stop_queue timed out is that the work will never * finish, then it does no good to call flush/stop thread, so * return anyway.
*/ if (ret) {
dev_err(&ctlr->dev, "problem destroying queue\n"); return ret;
}
/** * spi_queued_transfer - transfer function for queued transfers * @spi: SPI device which is requesting transfer * @msg: SPI message which is to handled is queued to driver queue * * Return: zero on success, else a negative error code.
*/ staticint spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{ return __spi_queued_transfer(spi, msg, true);
}
staticint spi_controller_initialize_queue(struct spi_controller *ctlr)
{ int ret;
ctlr->transfer = spi_queued_transfer; if (!ctlr->transfer_one_message)
ctlr->transfer_one_message = spi_transfer_one_message;
/* Initialize and start queue */
ret = spi_init_queue(ctlr); if (ret) {
dev_err(&ctlr->dev, "problem initializing queue\n"); goto err_init_queue;
}
ctlr->queued = true;
ret = spi_start_queue(ctlr); if (ret) {
dev_err(&ctlr->dev, "problem starting queue\n"); goto err_start_queue;
}
/** * spi_flush_queue - Send all pending messages in the queue from the callers' * context * @ctlr: controller to process queue for * * This should be used when one wants to ensure all pending messages have been * sent before doing something. Is used by the spi-mem code to make sure SPI * memory operations do not preempt regular SPI transfers that have been queued * before the spi-mem operation.
*/ void spi_flush_queue(struct spi_controller *ctlr)
{ if (ctlr->transfer == spi_queued_transfer)
__spi_pump_messages(ctlr, false);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.