switch (ctx->fmtinfo->bpp) { case 8:
extract = CAL_PIX_PROC_EXTRACT_B8; pack = CAL_PIX_PROC_PACK_B8; break; case 10:
extract = CAL_PIX_PROC_EXTRACT_B10_MIPI; pack = CAL_PIX_PROC_PACK_B16; break; case 12:
extract = CAL_PIX_PROC_EXTRACT_B12_MIPI; pack = CAL_PIX_PROC_PACK_B16; break; case 16:
extract = CAL_PIX_PROC_EXTRACT_B16_LE; pack = CAL_PIX_PROC_PACK_B16; break; default: /* * If you see this warning then it means that you added * some new entry in the cal_formats[] array with a different * bit per pixel values then the one supported below. * Either add support for the new bpp value below or adjust * the new entry to use one of the value below. * * Instead of failing here just use 8 bpp as a default.
*/
dev_warn_once(ctx->cal->dev, "%s:%d:%s: bpp:%d unsupported! Overwritten with 8.\n",
__FILE__, __LINE__, __func__, ctx->fmtinfo->bpp);
extract = CAL_PIX_PROC_EXTRACT_B8; pack = CAL_PIX_PROC_PACK_B8; break;
}
val = cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx)); /* 64 bit word means no skipping */
cal_set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK); /* * The XSIZE field is expressed in 64-bit units and prevents overflows * in case of synchronization issues by limiting the number of bytes * written per line.
*/
cal_set_field(&val, stride / 8, CAL_WR_DMA_XSIZE_MASK);
cal_write(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx), val);
ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->dma_ctx,
cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx)));
}
/* * Request DMA stop and wait until it completes. If completion times * out, forcefully disable the DMA.
*/
spin_lock_irq(&ctx->dma.lock);
ctx->dma.state = CAL_DMA_STOP_REQUESTED;
spin_unlock_irq(&ctx->dma.lock);
time_left = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx),
msecs_to_jiffies(500)); if (!time_left) {
ctx_err(ctx, "failed to disable dma cleanly\n");
cal_ctx_wr_dma_disable(ctx);
}
/* * Track a sequence number for each virtual channel, which is shared by * all contexts using the same virtual channel. This is done using the * CSI-2 frame number as a base.
*/ staticvoid cal_update_seq_number(struct cal_ctx *ctx)
{ struct cal_dev *cal = ctx->cal; struct cal_camerarx *phy = ctx->phy;
u16 prev_frame_num, frame_num;
u8 vc = ctx->vc;
if (ctx->dma.state == CAL_DMA_STOP_REQUESTED) { /* * If a stop is requested, disable the write DMA context * immediately. The CAL_WR_DMA_CTRL_j.MODE field is shadowed, * the current frame will complete and the DMA will then stop.
*/
cal_ctx_wr_dma_disable(ctx);
ctx->dma.state = CAL_DMA_STOP_PENDING;
} elseif (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) { /* * Otherwise, if a new buffer is available, queue it to the * hardware.
*/ struct cal_buffer *buf;
dma_addr_t addr;
/* If the DMA context was stopping, it is now stopped. */ if (ctx->dma.state == CAL_DMA_STOP_PENDING) {
ctx->dma.state = CAL_DMA_STOPPED;
wake_up(&ctx->dma.wait);
}
/* If a new buffer was queued, complete the current buffer. */ if (ctx->dma.pending) {
buf = ctx->dma.active;
ctx->dma.active = ctx->dma.pending;
ctx->dma.pending = NULL;
}
staticvoid cal_irq_handle_wdma(struct cal_ctx *ctx, bool start, bool end)
{ /* * CAL HW interrupts are inherently racy. If we get both start and end * interrupts, we don't know what has happened: did the DMA for a single * frame start and end, or did one frame end and a new frame start? * * Usually for normal pixel frames we get the interrupts separately. If * we do get both, we have to guess. The assumption in the code below is * that the active vertical area is larger than the blanking vertical * area, and thus it is more likely that we get the end of the old frame * and the start of a new frame. * * However, for embedded data, which is only a few lines high, we always * get both interrupts. Here the assumption is that we get both for the * same frame.
*/ if (ctx->v_fmt.fmt.pix.height < 10) { if (start)
cal_irq_wdma_start(ctx);
if (end)
cal_irq_wdma_end(ctx);
} else { if (end)
cal_irq_wdma_end(ctx);
for (i = 0; i < 3; ++i) {
status[i] = cal_read(cal, CAL_HL_IRQSTATUS(i)); if (status[i])
cal_write(cal, CAL_HL_IRQSTATUS(i), status[i]);
}
if (status[0]) { if (status[0] & CAL_HL_IRQ_OCPO_ERR_MASK)
dev_err_ratelimited(cal->dev, "OCPO ERROR\n");
for (i = 0; i < cal->data->num_csi2_phy; ++i) { if (status[0] & CAL_HL_IRQ_CIO_MASK(i)) {
u32 cio_stat = cal_read(cal,
CAL_CSI2_COMPLEXIO_IRQSTATUS(i));
dev_err_ratelimited(cal->dev, "CIO%u error: %#08x\n", i, cio_stat);
phy_dbg(1, phy, "Using source %s for capture\n", subdev->name);
pad = media_entity_get_fwnode_pad(&subdev->entity,
of_fwnode_handle(phy->source_ep_node),
MEDIA_PAD_FL_SOURCE); if (pad < 0) {
phy_err(phy, "Source %s has no connected source pad\n",
subdev->name); return pad;
}
ret = media_create_pad_link(&subdev->entity, pad,
&phy->subdev.entity, CAL_CAMERARX_PAD_SINK,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED); if (ret) {
phy_err(phy, "Failed to create media link for source %s\n",
subdev->name); return ret;
}
/* ------------------------------------------------------------------ * Media and V4L2 device handling * ------------------------------------------------------------------
*/
/* * Register user-facing devices. To be called at the end of the probe function * when all resources are initialized and ready.
*/ staticint cal_media_register(struct cal_dev *cal)
{ int ret;
ret = media_device_register(&cal->mdev); if (ret) {
cal_err(cal, "Failed to register media device\n"); return ret;
}
/* * Register the async notifier. This may trigger registration of the * V4L2 video devices if all subdevs are ready.
*/
ret = cal_async_notifier_register(cal); if (ret) {
media_device_unregister(&cal->mdev); return ret;
}
return 0;
}
/* * Unregister the user-facing devices, but don't free memory yet. To be called * at the beginning of the remove function, to disallow access from userspace.
*/ staticvoid cal_media_unregister(struct cal_dev *cal)
{ unsignedint i;
/* Unregister all the V4L2 video devices. */ for (i = 0; i < cal->num_contexts; i++)
cal_ctx_v4l2_unregister(cal->ctx[i]);
/* * Initialize the in-kernel objects. To be called at the beginning of the probe * function, before the V4L2 device is used by the driver.
*/ staticint cal_media_init(struct cal_dev *cal)
{ struct media_device *mdev = &cal->mdev; int ret;
/* * Initialize the V4L2 device (despite the function name, this performs * initialization, not registration).
*/
cal->v4l2_dev.mdev = mdev;
ret = v4l2_device_register(cal->dev, &cal->v4l2_dev); if (ret) {
cal_err(cal, "Failed to register V4L2 device\n"); return ret;
}
/* * Cleanup the in-kernel objects, freeing memory. To be called at the very end * of the remove sequence, when nothing (including userspace) can access the * objects anymore.
*/ staticvoid cal_media_cleanup(struct cal_dev *cal)
{
v4l2_device_unregister(&cal->v4l2_dev);
media_device_cleanup(&cal->mdev);
vb2_dma_contig_clear_max_seg_size(cal->dev);
}
/* ------------------------------------------------------------------ * Initialization and module stuff * ------------------------------------------------------------------
*/
staticstruct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst)
{ struct cal_ctx *ctx; int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL;
dev_warn(cal->dev, "failed to get ti,camerrx-control: %ld\n",
PTR_ERR(syscon));
/* * Backward DTS compatibility. If syscon entry is not present then * check if the camerrx_control resource is present.
*/
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "camerrx_control");
base = devm_ioremap_resource(cal->dev, res); if (IS_ERR(base)) {
cal_err(cal, "failed to ioremap camerrx_control\n"); return PTR_ERR(base);
}
/* * In this case the base already point to the direct CM register so no * need for an offset.
*/
cal->syscon_camerrx = syscon;
cal->syscon_camerrx_offset = 0;
return 0;
}
staticint cal_probe(struct platform_device *pdev)
{ struct cal_dev *cal; bool connected = false; unsignedint i; int ret; int irq;
cal = devm_kzalloc(&pdev->dev, sizeof(*cal), GFP_KERNEL); if (!cal) return -ENOMEM;
cal->data = of_device_get_match_data(&pdev->dev); if (!cal->data) {
dev_err(&pdev->dev, "Could not get feature data based on compatible version\n"); return -ENODEV;
}
/* Read the revision and hardware info to verify hardware access. */
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev); if (ret) goto error_pm_runtime;
if (cal->data->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) { /* * Apply errata on both port everytime we (re-)enable * the clock
*/ for (i = 0; i < cal->data->num_csi2_phy; i++)
cal_camerarx_i913_errata(cal->phy[i]);
}
/* * Enable global interrupts that are not related to a particular * CAMERARAX or context.
*/
cal_write(cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_OCPO_ERR_MASK);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.