/* * Make sure that the state switch is allowed and add logging for debugging * purposes
*/ staticint switch_state(struct vpu_instance *inst, enum vpu_instance_state state)
{ switch (state) { case VPU_INST_STATE_NONE: break; case VPU_INST_STATE_OPEN: if (inst->state != VPU_INST_STATE_NONE) goto invalid_state_switch; goto valid_state_switch; case VPU_INST_STATE_INIT_SEQ: if (inst->state != VPU_INST_STATE_OPEN && inst->state != VPU_INST_STATE_STOP) goto invalid_state_switch; goto valid_state_switch; case VPU_INST_STATE_PIC_RUN: if (inst->state != VPU_INST_STATE_INIT_SEQ) goto invalid_state_switch; goto valid_state_switch; case VPU_INST_STATE_STOP: goto valid_state_switch;
}
invalid_state_switch:
WARN(1, "Invalid state switch from %s to %s.\n",
state_to_str(inst->state), state_to_str(state)); return -EINVAL;
valid_state_switch:
dev_dbg(inst->dev->dev, "Switch state from %s to %s.\n",
state_to_str(inst->state), state_to_str(state));
inst->state = state; return 0;
}
staticint wave5_vpu_dec_set_eos_on_firmware(struct vpu_instance *inst)
{ int ret;
ret = wave5_vpu_dec_update_bitstream_buffer(inst, 0); if (ret) { /* * To set the EOS flag, a command is sent to the firmware. * That command may never return (timeout) or may report an error.
*/
dev_err(inst->dev->dev, "Setting EOS for the bitstream, fail: %d\n", ret); return ret;
} return 0;
}
/* Handle the case the last bitstream buffer has been picked */ if (src_buf == m2m_ctx->last_src_buf) { int ret;
m2m_ctx->last_src_buf = NULL;
ret = wave5_vpu_dec_set_eos_on_firmware(inst); if (ret)
dev_warn(inst->dev->dev, "Setting EOS for the bitstream, fail: %d\n", ret); break;
}
}
dev_dbg(inst->dev->dev, "%s: Fetch output info from firmware.", __func__);
ret = wave5_vpu_dec_get_output_info(inst, &dec_info); if (ret) {
dev_warn(inst->dev->dev, "%s: could not get output info.", __func__);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx); return;
}
if (!vb2_is_streaming(dst_vq)) {
dev_dbg(inst->dev->dev, "%s: capture is not streaming..", __func__);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx); return;
}
/* Remove decoded buffer from the ready queue now that it has been * decoded.
*/ if (dec_info.index_frame_decoded >= 0) { struct vb2_buffer *vb = vb2_get_buffer(dst_vq,
dec_info.index_frame_decoded); if (vb) {
dec_buf = to_vb2_v4l2_buffer(vb);
dec_buf->vb2_buf.timestamp = inst->timestamp;
} else {
dev_warn(inst->dev->dev, "%s: invalid decoded frame index %i",
__func__, dec_info.index_frame_decoded);
}
}
if (dec_info.index_frame_display >= 0) {
disp_buf = v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, dec_info.index_frame_display); if (!disp_buf)
dev_warn(inst->dev->dev, "%s: invalid display frame index %i",
__func__, dec_info.index_frame_display);
}
/* If there is anything to display, do that now */ if (disp_buf) { struct vpu_dst_buffer *dst_vpu_buf = wave5_to_vpu_dst_buf(disp_buf);
/* * During a resolution change and while draining, the firmware may flush * the reorder queue regardless of having a matching decoding operation * pending. Only terminate the job if there are no more IRQ coming.
*/
wave5_vpu_dec_give_command(inst, DEC_GET_QUEUE_STATUS, &q_status); if (q_status.report_queue_count == 0 &&
(q_status.instance_queue_count == 0 || dec_info.sequence_changed)) {
dev_dbg(inst->dev->dev, "%s: finishing job.\n", __func__);
pm_runtime_put_autosuspend(inst->dev->dev);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
}
}
staticint wave5_vpu_dec_stop(struct vpu_instance *inst)
{ int ret = 0; unsignedlong flags; struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
spin_lock_irqsave(&inst->state_spinlock, flags);
if (m2m_ctx->is_draining) {
ret = -EBUSY; goto unlock_and_return;
}
if (inst->state != VPU_INST_STATE_NONE) { /* * Temporarily release the state_spinlock so that subsequent * calls do not block on a mutex while inside this spinlock.
*/
spin_unlock_irqrestore(&inst->state_spinlock, flags);
ret = wave5_vpu_dec_set_eos_on_firmware(inst); if (ret) return ret;
spin_lock_irqsave(&inst->state_spinlock, flags); /* * TODO eliminate this check by using a separate check for * draining triggered by a resolution change.
*/ if (m2m_ctx->is_draining) {
ret = -EBUSY; goto unlock_and_return;
}
}
/* * Used to remember the EOS state after the streamoff/on transition on * the capture queue.
*/
inst->eos = true;
/* * Deferred to device run in case it wasn't in the ring buffer * yet. In other case, we have to send the EOS signal to the * firmware so that any pending PIC_RUN ends without new * bitstream buffer.
*/ if (m2m_ctx->last_src_buf) goto unlock_and_return;
if (inst->state == VPU_INST_STATE_NONE) {
send_eos_event(inst);
flag_last_buffer_done(inst);
}
ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, dc); if (ret) return ret;
switch (dc->cmd) { case V4L2_DEC_CMD_STOP:
ret = wave5_vpu_dec_stop(inst); /* Just in case we don't have anything to decode anymore */
v4l2_m2m_try_schedule(m2m_ctx); break; case V4L2_DEC_CMD_START:
ret = wave5_vpu_dec_start(inst); break; default:
ret = -EINVAL;
}
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, /* * Firmware does not support CREATE_BUFS for CAPTURE queue. Since * there is no immediate use-case for supporting CREATE_BUFS on * just the OUTPUT queue, disable CREATE_BUFS altogether.
*/
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
ret = wave5_vpu_dec_register_frame_buffer_ex(inst, non_linear_num, linear_num,
fb_stride, inst->dst_fmt.height); if (ret) {
dev_dbg(inst->dev->dev, "%s: vpu_dec_register_frame_buffer_ex fail: %d",
__func__, ret); return ret;
}
/* * Mark all frame buffers as out of display, to avoid using them before * the application have them queued.
*/ for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) {
ret = wave5_vpu_dec_set_disp_flag(inst, i); if (ret) {
dev_dbg(inst->dev->dev, "%s: Setting display flag of buf index: %u, fail: %d\n",
__func__, i, ret);
}
}
ret = wave5_vpu_dec_clr_disp_flag(inst, vbuf->vb2_buf.index); if (ret)
dev_dbg(inst->dev->dev, "%s: Clearing display flag of buf index: %u, fail: %d\n",
__func__, i, ret);
}
ret = write_to_ringbuffer(inst, src_buf, src_size, ring_buffer, wr_ptr); if (ret) {
dev_err(inst->dev->dev, "Write src buf (%u) to ring buffer, fail: %d\n",
vbuf->vb2_buf.index, ret); return ret;
}
ret = wave5_vpu_dec_update_bitstream_buffer(inst, src_size); if (ret) {
dev_dbg(inst->dev->dev, "update_bitstream_buffer fail: %d for src buf (%u)\n",
ret, vbuf->vb2_buf.index); break;
}
vpu_buf->consumed = true;
/* Don't write buffers passed the last one while draining. */ if (v4l2_m2m_is_last_draining_src_buf(m2m_ctx, vbuf)) {
dev_dbg(inst->dev->dev, "last src buffer written to the ring buffer\n"); break;
}
}
if (inst->state == VPU_INST_STATE_PIC_RUN) { struct vpu_dst_buffer *vpu_buf = wave5_to_vpu_dst_buf(vbuf); int ret;
/* * The buffer is already registered just clear the display flag * to let the firmware know it can be used.
*/
vpu_buf->display = false;
ret = wave5_vpu_dec_clr_disp_flag(inst, vb->index); if (ret) {
dev_dbg(inst->dev->dev, "%s: Clearing the display flag of buffer index: %u, fail: %d\n",
__func__, vb->index, ret);
}
}
if (vb2_is_streaming(vb->vb2_queue) && v4l2_m2m_dst_buf_is_last(m2m_ctx)) { unsignedint i;
for (i = 0; i < vb->num_planes; i++)
vb2_set_plane_payload(vb, i, 0);
ret = wave5_vpu_dec_open(inst, &open_param); if (ret) {
dev_dbg(inst->dev->dev, "%s: decoder opening, fail: %d\n",
__func__, ret); goto free_bitstream_vbuf;
}
ret = switch_state(inst, VPU_INST_STATE_OPEN); if (ret) goto free_bitstream_vbuf;
} elseif (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { struct dec_initial_info *initial_info =
&inst->codec_info->dec_info.initial_info;
if (inst->state == VPU_INST_STATE_STOP)
ret = switch_state(inst, VPU_INST_STATE_INIT_SEQ); if (ret) goto return_buffers;
if (inst->state == VPU_INST_STATE_INIT_SEQ &&
inst->dev->product_code == WAVE521C_CODE) { if (initial_info->luma_bitdepth != 8) {
dev_info(inst->dev->dev, "%s: no support for %d bit depth",
__func__, initial_info->luma_bitdepth);
ret = -EINVAL; goto return_buffers;
}
}
for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) {
ret = wave5_vpu_dec_set_disp_flag(inst, i); if (ret)
dev_dbg(inst->dev->dev, "%s: Setting display flag of buf index: %u, fail: %d\n",
__func__, i, ret);
}
while ((buf = v4l2_m2m_src_buf_remove(m2m_ctx))) {
dev_dbg(inst->dev->dev, "%s: (Multiplanar) buf type %4u | index %4u\n",
__func__, buf->vb2_buf.type, buf->vb2_buf.index);
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
}
while (wave5_vpu_dec_get_output_info(inst, &dec_info) == 0) { if (dec_info.index_frame_display >= 0)
wave5_vpu_dec_set_disp_flag(inst, dec_info.index_frame_display);
}
ret = wave5_vpu_flush_instance(inst); if (ret) return ret;
/* Reset the ring buffer information */
new_rd_ptr = wave5_vpu_dec_get_rd_ptr(inst);
inst->last_rd_ptr = new_rd_ptr;
inst->codec_info->dec_info.stream_rd_ptr = new_rd_ptr;
inst->codec_info->dec_info.stream_wr_ptr = new_rd_ptr;
if (v4l2_m2m_has_stopped(m2m_ctx))
send_eos_event(inst);
/* streamoff on output cancels any draining operation */
inst->eos = false;
for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) {
ret = wave5_vpu_dec_set_disp_flag(inst, i); if (ret)
dev_dbg(inst->dev->dev, "%s: Setting display flag of buf index: %u, fail: %d\n",
__func__, i, ret);
}
while ((buf = v4l2_m2m_dst_buf_remove(m2m_ctx))) {
u32 plane;
dev_dbg(inst->dev->dev, "%s: buf type %4u | index %4u\n",
__func__, buf->vb2_buf.type, buf->vb2_buf.index);
dev_dbg(inst->dev->dev, "%s: Fill the ring buffer with new bitstream data", __func__);
pm_runtime_resume_and_get(inst->dev->dev);
ret = fill_ringbuffer(inst); if (ret) {
dev_warn(inst->dev->dev, "Filling ring buffer failed\n"); goto finish_job_and_return;
}
switch (inst->state) { case VPU_INST_STATE_OPEN:
ret = initialize_sequence(inst); if (ret) { unsignedlong flags;
case VPU_INST_STATE_INIT_SEQ: /* * Do this early, preparing the fb can trigger an IRQ before * we had a chance to switch, which leads to an invalid state * change.
*/
switch_state(inst, VPU_INST_STATE_PIC_RUN);
/* * During DRC, the picture decoding remains pending, so just leave the job * active until this decode operation completes.
*/
wave5_vpu_dec_give_command(inst, DEC_GET_QUEUE_STATUS, &q_status);
/* * The sequence must be analyzed first to calculate the proper * size of the auxiliary buffers.
*/
ret = wave5_prepare_fb(inst); if (ret) {
dev_warn(inst->dev->dev, "Framebuffer preparation, fail: %d\n", ret);
switch_state(inst, VPU_INST_STATE_STOP); break;
}
if (q_status.instance_queue_count) {
dev_dbg(inst->dev->dev, "%s: leave with active job", __func__); return;
}
fallthrough; case VPU_INST_STATE_PIC_RUN:
ret = start_decode(inst, &fail_res); if (ret) {
dev_err(inst->dev->dev, "Frame decoding on m2m context (%p), fail: %d (result: %d)\n",
m2m_ctx, ret, fail_res); break;
} /* Return so that we leave this job active */
dev_dbg(inst->dev->dev, "%s: leave with active job", __func__); return; default:
WARN(1, "Execution of a job in state %s illegal.\n", state_to_str(inst->state)); break;
}
ret = switch_state(inst, VPU_INST_STATE_STOP); if (ret) return;
ret = wave5_vpu_dec_set_eos_on_firmware(inst); if (ret)
dev_warn(inst->dev->dev, "Setting EOS for the bitstream, fail: %d\n", ret);
}
staticint wave5_vpu_dec_job_ready(void *priv)
{ struct vpu_instance *inst = priv; struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx; unsignedlong flags; int ret = 0;
spin_lock_irqsave(&inst->state_spinlock, flags);
switch (inst->state) { case VPU_INST_STATE_NONE:
dev_dbg(inst->dev->dev, "Decoder must be open to start queueing M2M jobs!\n"); break; case VPU_INST_STATE_OPEN: if (wave5_is_draining_or_eos(inst) || !v4l2_m2m_has_stopped(m2m_ctx) ||
v4l2_m2m_num_src_bufs_ready(m2m_ctx) > 0) {
ret = 1; break;
}
dev_dbg(inst->dev->dev, "Decoder must be draining or >= 1 OUTPUT queue buffer must be queued!\n"); break; case VPU_INST_STATE_INIT_SEQ: case VPU_INST_STATE_PIC_RUN: if (!m2m_ctx->cap_q_ctx.q.streaming) {
dev_dbg(inst->dev->dev, "CAPTURE queue must be streaming to queue jobs!\n"); break;
} elseif (v4l2_m2m_num_dst_bufs_ready(m2m_ctx) < (inst->fbc_buf_count - 1)) {
dev_dbg(inst->dev->dev, "No capture buffer ready to decode!\n"); break;
} elseif (!wave5_is_draining_or_eos(inst) &&
!v4l2_m2m_num_src_bufs_ready(m2m_ctx)) {
dev_dbg(inst->dev->dev, "No bitstream data to decode!\n"); break;
}
ret = 1; break; case VPU_INST_STATE_STOP:
dev_dbg(inst->dev->dev, "Decoder is stopped, not running.\n"); break;
}
inst->v4l2_m2m_dev = inst->dev->v4l2_m2m_dec_dev;
inst->v4l2_fh.m2m_ctx =
v4l2_m2m_ctx_init(inst->v4l2_m2m_dev, inst, wave5_vpu_dec_queue_init); if (IS_ERR(inst->v4l2_fh.m2m_ctx)) {
ret = PTR_ERR(inst->v4l2_fh.m2m_ctx); goto cleanup_inst;
}
m2m_ctx = inst->v4l2_fh.m2m_ctx;
v4l2_m2m_set_src_buffered(m2m_ctx, true);
v4l2_m2m_set_dst_buffered(m2m_ctx, true); /* * We use the M2M job queue to ensure synchronization of steps where * needed, as IOCTLs can occur at anytime and we need to run commands on * the firmware in a specified order. * In order to initialize the sequence on the firmware within an M2M * job, the M2M framework needs to be able to queue jobs before * the CAPTURE queue has been started, because we need the results of the * initialization to properly prepare the CAPTURE queue with the correct * amount of buffers. * By setting ignore_cap_streaming to true the m2m framework will call * job_ready as soon as the OUTPUT queue is streaming, instead of * waiting until both the CAPTURE and OUTPUT queues are streaming.
*/
m2m_ctx->ignore_cap_streaming = true;
/* * For Wave515 SRAM memory was already allocated * at wave5_vpu_dec_register_device()
*/ if (inst->dev->product_code != WAVE515_CODE)
wave5_vdi_allocate_sram(inst->dev);
ret = mutex_lock_interruptible(&dev->dev_lock); if (ret) goto cleanup_inst;
if (list_empty(&dev->instances))
pm_runtime_use_autosuspend(inst->dev->dev);
int wave5_vpu_dec_register_device(struct vpu_device *dev)
{ struct video_device *vdev_dec; int ret;
/* * Secondary AXI setup for Wave515 is done by INIT_VPU command, * i.e. wave5_vpu_init(), that's why we allocate SRAM memory early.
*/ if (dev->product_code == WAVE515_CODE)
wave5_vdi_allocate_sram(dev);
vdev_dec = devm_kzalloc(dev->v4l2_dev.dev, sizeof(*vdev_dec), GFP_KERNEL); if (!vdev_dec) return -ENOMEM;
dev->v4l2_m2m_dec_dev = v4l2_m2m_init(&wave5_vpu_dec_m2m_ops); if (IS_ERR(dev->v4l2_m2m_dec_dev)) {
ret = PTR_ERR(dev->v4l2_m2m_dec_dev);
dev_err(dev->dev, "v4l2_m2m_init, fail: %d\n", ret); return -EINVAL;
}
ret = video_register_device(vdev_dec, VFL_TYPE_VIDEO, -1); if (ret) return ret;
video_set_drvdata(vdev_dec, dev);
return 0;
}
void wave5_vpu_dec_unregister_device(struct vpu_device *dev)
{ /* * Here is a freeing pair for Wave515 SRAM memory allocation * happened at wave5_vpu_dec_register_device().
*/ if (dev->product_code == WAVE515_CODE)
wave5_vdi_free_sram(dev);
video_unregister_device(dev->video_dev_dec); if (dev->v4l2_m2m_dec_dev)
v4l2_m2m_release(dev->v4l2_m2m_dec_dev);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.7 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.