for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (formats[i].code == code) return &formats[i];
}
return NULL;
}
/* * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format * @video: ISP video instance * @mbus: v4l2_mbus_framefmt format (input) * @pix: v4l2_pix_format format (output) * * Fill the output pix structure with information from the input mbus format. * The bytesperline and sizeimage fields are computed from the requested bytes * per line value in the pix format and information from the video instance. * * Return the number of padding bytes at end of line.
*/ staticunsignedint isp_video_mbus_to_pix(conststruct isp_video *video, conststruct v4l2_mbus_framefmt *mbus, struct v4l2_pix_format *pix)
{ unsignedint bpl = pix->bytesperline; unsignedint min_bpl; unsignedint i;
for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (formats[i].code == mbus->code) break;
}
if (WARN_ON(i == ARRAY_SIZE(formats))) return 0;
min_bpl = pix->width * formats[i].bpp;
/* Clamp the requested bytes per line value. If the maximum bytes per * line value is zero, the module doesn't support user configurable line * sizes. Override the requested value with the minimum in that case.
*/ if (video->bpl_max)
bpl = clamp(bpl, min_bpl, video->bpl_max); else
bpl = min_bpl;
if (!video->bpl_zero_padding || bpl != min_bpl)
bpl = ALIGN(bpl, video->bpl_alignment);
/* Skip the last format in the loop so that it will be selected if no * match is found.
*/ for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) { if (formats[i].pixelformat == pix->pixelformat) break;
}
/* Return a pointer to the ISP video instance at the far end of the pipeline. */ staticint isp_video_get_graph_data(struct isp_video *video, struct isp_pipeline *pipe)
{ struct media_pipeline_entity_iter iter; struct media_entity *entity; struct isp_video *far_end = NULL; int ret;
ret = media_pipeline_entity_iter_init(&pipe->pipe, &iter); if (ret) return ret;
/* Refuse to prepare the buffer is the video node has registered an * error. We don't need to take any lock here as the operation is * inherently racy. The authoritative check will be performed in the * queue handler, which can't return an error, this check is just a best * effort to notify userspace as early as possible.
*/ if (unlikely(video->error)) return -EIO;
addr = vb2_dma_contig_plane_dma_addr(buf, 0); if (!IS_ALIGNED(addr, 32)) {
dev_dbg(video->isp->dev, "Buffer address must be aligned to 32 bytes boundary.\n"); return -EINVAL;
}
/* * isp_video_buffer_queue - Add buffer to streaming queue * @buf: Video buffer * * In memory-to-memory mode, start streaming on the pipeline if buffers are * queued on both the input and the output, if the pipeline isn't already busy. * If the pipeline is busy, it will be restarted in the output module interrupt * handler.
*/ staticvoid isp_video_buffer_queue(struct vb2_buffer *buf)
{ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); struct isp_buffer *buffer = to_isp_buffer(vbuf); struct isp_video *video = vfh->video; struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); enum isp_pipeline_state state; unsignedlong flags; unsignedint empty; unsignedint start;
spin_lock_irqsave(&video->irqlock, flags);
if (unlikely(video->error)) {
vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR);
spin_unlock_irqrestore(&video->irqlock, flags); return;
}
start = isp_pipeline_ready(pipe); if (start)
pipe->state |= ISP_PIPELINE_STREAM;
spin_unlock_irqrestore(&pipe->lock, flags);
if (start)
omap3isp_pipeline_set_stream(pipe,
ISP_PIPELINE_STREAM_SINGLESHOT);
}
}
/* * omap3isp_video_return_buffers - Return all queued buffers to videobuf2 * @video: ISP video object * @state: new state for the returned buffers * * Return all buffers queued on the video node to videobuf2 in the given state. * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error * when starting the stream, or VB2_BUF_STATE_ERROR otherwise. * * The function must be called with the video irqlock held.
*/ staticvoid omap3isp_video_return_buffers(struct isp_video *video, enum vb2_buffer_state state)
{ while (!list_empty(&video->dmaqueue)) { struct isp_buffer *buf;
/* In sensor-to-memory mode, the stream can be started synchronously * to the stream on command. In memory-to-memory mode, it will be * started when buffers are queued on both the input and output.
*/ if (pipe->input) return 0;
ret = omap3isp_pipeline_set_stream(pipe,
ISP_PIPELINE_STREAM_CONTINUOUS); if (ret < 0) {
spin_lock_irqsave(&video->irqlock, flags);
omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED);
spin_unlock_irqrestore(&video->irqlock, flags); return ret;
}
spin_lock_irqsave(&video->irqlock, flags); if (list_empty(&video->dmaqueue))
video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
spin_unlock_irqrestore(&video->irqlock, flags);
/* * omap3isp_video_buffer_next - Complete the current buffer and return the next * @video: ISP video object * * Remove the current video buffer from the DMA queue and fill its timestamp and * field count before handing it back to videobuf2. * * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise. * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE. * * The DMA queue is expected to contain at least one buffer. * * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is * empty.
*/ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
{ struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); enum vb2_buffer_state vb_state; struct isp_buffer *buf; unsignedlong flags;
spin_lock_irqsave(&video->irqlock, flags); if (WARN_ON(list_empty(&video->dmaqueue))) {
spin_unlock_irqrestore(&video->irqlock, flags); return NULL;
}
/* Do frame number propagation only if this is the output video node. * Frame number either comes from the CSI receivers or it gets * incremented here if H3A is not active. * Note: There is no guarantee that the output buffer will finish * first, so the input number might lag behind by 1 in some cases.
*/ if (video == pipe->output && !pipe->do_propagation)
buf->vb.sequence =
atomic_inc_return(&pipe->frame_number); else
buf->vb.sequence = atomic_read(&pipe->frame_number);
if (pipe->field != V4L2_FIELD_NONE)
buf->vb.sequence /= 2;
buf->vb.field = pipe->field;
/* Report pipeline errors to userspace on the capture device side. */ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
vb_state = VB2_BUF_STATE_ERROR;
pipe->error = false;
} else {
vb_state = VB2_BUF_STATE_DONE;
}
vb2_buffer_done(&buf->vb.vb2_buf, vb_state);
spin_lock_irqsave(&video->irqlock, flags);
if (list_empty(&video->dmaqueue)) { enum isp_pipeline_state state;
spin_unlock_irqrestore(&video->irqlock, flags);
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
state = ISP_PIPELINE_QUEUE_OUTPUT
| ISP_PIPELINE_STREAM; else
state = ISP_PIPELINE_QUEUE_INPUT
| ISP_PIPELINE_STREAM;
/* * omap3isp_video_cancel_stream - Cancel stream on a video node * @video: ISP video object * * Cancelling a stream returns all buffers queued on the video node to videobuf2 * in the erroneous state and makes sure no new buffer can be queued.
*/ void omap3isp_video_cancel_stream(struct isp_video *video)
{ unsignedlong flags;
/* * omap3isp_video_resume - Perform resume operation on the buffers * @video: ISP video object * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise * * This function is intended to be used on suspend/resume scenario. It * requests video queue layer to discard buffers marked as DONE if it's in * continuous mode and requests ISP modules to queue again the ACTIVE buffer * if there's any.
*/ void omap3isp_video_resume(struct isp_video *video, int continuous)
{ struct isp_buffer *buf = NULL;
/* Replace unsupported field orders with sane defaults. */ switch (format->fmt.pix.field) { case V4L2_FIELD_NONE: /* Progressive is supported everywhere. */ break; case V4L2_FIELD_ALTERNATE: /* ALTERNATE is not supported on output nodes. */ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
format->fmt.pix.field = V4L2_FIELD_NONE; break; case V4L2_FIELD_INTERLACED: /* The ISP has no concept of video standard, select the * top-bottom order when the unqualified interlaced order is * requested.
*/
format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB;
fallthrough; case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: /* Interlaced orders are only supported at the CCDC output. */ if (video != &video->isp->isp_ccdc.video_out)
format->fmt.pix.field = V4L2_FIELD_NONE; break; case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: default: /* All other field orders are currently unsupported, default to * progressive.
*/
format->fmt.pix.field = V4L2_FIELD_NONE; break;
}
/* Fill the bytesperline and sizeimage fields by converting to media bus * format and back to pixel format.
*/
isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
switch (sel->target) { case V4L2_SEL_TGT_CROP: case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; break; case V4L2_SEL_TGT_COMPOSE: case V4L2_SEL_TGT_COMPOSE_BOUNDS: case V4L2_SEL_TGT_COMPOSE_DEFAULT: if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; break; default: return -EINVAL;
}
subdev = isp_video_remote_subdev(video, &pad); if (subdev == NULL) return -EINVAL;
/* Try the get selection operation first and fallback to get format if not * implemented.
*/
sdsel.pad = pad;
ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel); if (!ret)
sel->r = sdsel.r; if (ret != -ENOIOCTLCMD) return ret;
format.pad = pad;
ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format); if (ret < 0) return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
ctrls.count = 1;
ctrls.controls = &ctrl;
ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &video->video,
NULL, &ctrls); if (ret < 0) {
dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
pipe->external->name); return ret;
}
pipe->external_rate = ctrl.value64;
if (media_entity_enum_test(&pipe->ent_enum,
&isp->isp_ccdc.subdev.entity)) { unsignedint rate = UINT_MAX; /* * Check that maximum allowed CCDC pixel rate isn't * exceeded by the pixel rate.
*/
omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate); if (pipe->external_rate > rate) return -ENOSPC;
}
return 0;
}
/* * Stream management * * Every ISP pipeline has a single input and a single output. The input can be * either a sensor or a video node. The output is always a video node. * * As every pipeline has an output video node, the ISP video objects at the * pipeline output stores the pipeline state. It tracks the streaming state of * both the input and output, as well as the availability of buffers. * * In sensor-to-memory mode, frames are always available at the pipeline input. * Starting the sensor usually requires I2C transfers and must be done in * interruptible context. The pipeline is started and stopped synchronously * to the stream on/off commands. All modules in the pipeline will get their * subdev set stream handler called. The module at the end of the pipeline must * delay starting the hardware until buffers are available at its output. * * In memory-to-memory mode, starting/stopping the stream requires * synchronization between the input and output. ISP modules can't be stopped * in the middle of a frame, and at least some of the modules seem to become * busy as soon as they're started, even if they don't receive a frame start * event. For that reason frames need to be processed in single-shot mode. The * driver needs to wait until a frame is completely processed and written to * memory before restarting the pipeline for the next frame. Pipelined * processing might be possible but requires more testing. * * Stream start must be delayed until buffers are available at both the input * and output. The pipeline must be started in the vb2 queue callback with * the buffers queue spinlock held. The modules subdev set stream operation must * not sleep.
*/ staticint
isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{ struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); enum isp_pipeline_state state; struct isp_pipeline *pipe; unsignedlong flags; int ret;
if (type != video->type) return -EINVAL;
mutex_lock(&video->stream_lock);
/* Start streaming on the pipeline. No link touching an entity in the * pipeline can be activated or deactivated once streaming is started.
*/
pipe = to_isp_pipeline(&video->video.entity) ? : &video->pipe;
ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev); if (ret) goto err_enum_init;
ret = video_device_pipeline_start(&video->video, &pipe->pipe); if (ret < 0) goto err_pipeline_start;
/* Verify that the currently configured format matches the output of * the connected subdev.
*/
ret = isp_video_check_format(video, vfh); if (ret < 0) goto err_check_format;
ret = isp_video_get_graph_data(video, pipe); if (ret < 0) goto err_check_format;
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT; else
state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
ret = isp_video_check_external_subdevs(video, pipe); if (ret < 0) goto err_check_format;
/* Set the maximum time per frame as the value requested by userspace. * This is a soft limit that can be overridden if the hardware doesn't * support the request limit.
*/ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
pipe->max_timeperframe = vfh->timeperframe;
mutex_lock(&video->queue_lock);
ret = vb2_streamon(&vfh->queue, type);
mutex_unlock(&video->queue_lock); if (ret < 0) goto err_check_format;
mutex_unlock(&video->stream_lock);
return 0;
err_check_format:
video_device_pipeline_stop(&video->video);
err_pipeline_start: /* TODO: Implement PM QoS */ /* The DMA queue must be emptied here, otherwise CCDC interrupts that * will get triggered the next time the CCDC is powered up will try to * access buffers that might have been freed but still present in the * DMA queue. This can easily get triggered if the above * omap3isp_pipeline_set_stream() call fails on a system with a * free-running sensor.
*/
INIT_LIST_HEAD(&video->dmaqueue);
video->queue = NULL;
int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
{ int ret;
video->video.v4l2_dev = vdev;
ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); if (ret < 0)
dev_err(video->isp->dev, "%s: could not register video device (%d)\n",
__func__, ret);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.5Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.