while (!list_empty(lh)) {
b = list_entry(lh->next, struct s5p_mfc_buf, list); for (i = 0; i < b->b->vb2_buf.num_planes; i++)
vb2_set_plane_payload(&b->b->vb2_buf, i, 0);
vb2_buffer_done(&b->b->vb2_buf, VB2_BUF_STATE_ERROR);
list_del(&b->list);
}
}
if (test_bit(0, &dev->hw_lock))
atomic_inc(&dev->watchdog_cnt); if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) { /* * This means that hw is busy and no interrupts were * generated by hw for the Nth time of running this * watchdog timer. This usually means a serious hw * error. Now it is time to kill all instances and * reset the MFC.
*/
mfc_err("Time out during waiting for HW\n");
schedule_work(&dev->watchdog_work);
}
dev->watchdog_timer.expires = jiffies +
msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
add_timer(&dev->watchdog_timer);
}
staticvoid s5p_mfc_watchdog_worker(struct work_struct *work)
{ struct s5p_mfc_dev *dev; struct s5p_mfc_ctx *ctx; unsignedlong flags; int mutex_locked; int i, ret;
dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
mfc_err("Driver timeout error handling\n"); /* * Lock the mutex that protects open and release. * This is necessary as they may load and unload firmware.
*/
mutex_locked = mutex_trylock(&dev->mfc_mutex); if (!mutex_locked)
mfc_err("Error: some instance may be closing/opening\n");
spin_lock_irqsave(&dev->irqlock, flags);
s5p_mfc_clock_off(dev);
for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
ctx = dev->ctx[i]; if (!ctx) continue;
ctx->state = MFCINST_ERROR;
s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
clear_work_bit(ctx);
wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0);
}
clear_bit(0, &dev->hw_lock);
spin_unlock_irqrestore(&dev->irqlock, flags);
/* De-init MFC */
s5p_mfc_deinit_hw(dev);
/* * Double check if there is at least one instance running. * If no instance is in memory than no firmware should be present
*/ if (dev->num_inst > 0) {
ret = s5p_mfc_load_firmware(dev); if (ret) {
mfc_err("Failed to reload FW\n"); goto unlock;
}
s5p_mfc_clock_on(dev);
ret = s5p_mfc_init_hw(dev);
s5p_mfc_clock_off(dev); if (ret)
mfc_err("Failed to reinit FW\n");
}
unlock: if (mutex_locked)
mutex_unlock(&dev->mfc_mutex);
}
/* Make sure we actually have a new frame before continuing. */
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) return;
dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
/* * Copy timestamp / timecode from decoded src to dst and set * appropriate flags.
*/
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
/* If frame is same as previous then skip and do not dequeue */ if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) { if (!ctx->after_packed_pb)
ctx->sequence++;
ctx->after_packed_pb = 0; return;
}
ctx->sequence++; /* * The MFC returns address of the buffer, now we have to * check which vb2_buffer does it correspond to
*/
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
/* Check if this is the buffer we're looking for */ if (addr == dspl_y_addr) {
list_del(&dst_buf->list);
ctx->dst_queue_cnt--;
dst_buf->b->sequence = ctx->sequence; if (s5p_mfc_hw_call(dev->mfc_ops,
get_pic_type_top, ctx) ==
s5p_mfc_hw_call(dev->mfc_ops,
get_pic_type_bot, ctx))
dst_buf->b->field = V4L2_FIELD_NONE; else
dst_buf->b->field =
V4L2_FIELD_INTERLACED;
vb2_set_plane_payload(&dst_buf->b->vb2_buf, 0,
ctx->luma_size);
vb2_set_plane_payload(&dst_buf->b->vb2_buf, 1,
ctx->chroma_size);
clear_bit(dst_buf->b->vb2_buf.index,
&ctx->dec_dst_flag);
/* All frames remaining in the buffer have been extracted */ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) { if (ctx->state == MFCINST_RES_CHANGE_FLUSH) { staticconststruct v4l2_event ev_src_ch = {
.type = V4L2_EVENT_SOURCE_CHANGE,
.u.src_change.changes =
V4L2_EVENT_SRC_CH_RESOLUTION,
};
if (ctx) { /* Error recovery is dependent on the state of context */ switch (ctx->state) { case MFCINST_RES_CHANGE_INIT: case MFCINST_RES_CHANGE_FLUSH: case MFCINST_RES_CHANGE_END: case MFCINST_FINISHING: case MFCINST_FINISHED: case MFCINST_RUNNING: /* * It is highly probable that an error occurred * while decoding a frame
*/
clear_work_bit(ctx);
ctx->state = MFCINST_ERROR; /* Mark all dst buffers as having an error */
s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); /* Mark all src buffers as having an error */
s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
wake_up_ctx(ctx, reason, err); break; default:
clear_work_bit(ctx);
ctx->state = MFCINST_ERROR;
wake_up_ctx(ctx, reason, err); break;
}
}
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
s5p_mfc_clock_off(dev);
wake_up_dev(dev, reason, err);
}
mfc_debug_enter(); /* Reset the timeout watchdog */
atomic_set(&dev->watchdog_cnt, 0);
spin_lock(&dev->irqlock);
ctx = dev->ctx[dev->curr_ctx]; /* Get the reason of interrupt and the error code */
reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev);
err = s5p_mfc_hw_call(dev->mfc_ops, get_int_err, dev);
mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err); switch (reason) { case S5P_MFC_R2H_CMD_ERR_RET: /* An error has occurred */ if (ctx->state == MFCINST_RUNNING &&
(s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
dev->warn_start ||
err == S5P_FIMV_ERR_NO_VALID_SEQ_HDR ||
err == S5P_FIMV_ERR_INCOMPLETE_FRAME ||
err == S5P_FIMV_ERR_TIMEOUT))
s5p_mfc_handle_frame(ctx, reason, err); else
s5p_mfc_handle_error(dev, ctx, reason, err);
clear_bit(0, &dev->enter_suspend); break;
case S5P_MFC_R2H_CMD_SLICE_DONE_RET: case S5P_MFC_R2H_CMD_FIELD_DONE_RET: case S5P_MFC_R2H_CMD_FRAME_DONE_RET: if (ctx->c_ops->post_frame_start) { if (ctx->c_ops->post_frame_start(ctx))
mfc_err("post_frame_start() failed\n");
case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
ctx->inst_no = MFC_NO_INSTANCE_SET;
ctx->state = MFCINST_FREE; goto irq_cleanup_hw;
case S5P_MFC_R2H_CMD_SYS_INIT_RET: case S5P_MFC_R2H_CMD_FW_STATUS_RET: case S5P_MFC_R2H_CMD_SLEEP_RET: case S5P_MFC_R2H_CMD_WAKEUP_RET: if (ctx)
clear_work_bit(ctx);
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
clear_bit(0, &dev->hw_lock);
clear_bit(0, &dev->enter_suspend);
wake_up_dev(dev, reason, err); break;
case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
s5p_mfc_handle_init_buffers(ctx, reason, err); break;
/* if dev is null, do cleanup that doesn't need dev */
mfc_debug_enter(); if (dev)
mutex_lock(&dev->mfc_mutex);
vb2_queue_release(&ctx->vq_src);
vb2_queue_release(&ctx->vq_dst); if (dev) {
s5p_mfc_clock_on(dev);
/* Mark context as idle */
clear_work_bit_irqsave(ctx); /* * If instance was initialised and not yet freed, * return instance and free resources
*/ if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) {
mfc_debug(2, "Has to free instance\n");
s5p_mfc_close_mfc_inst(dev, ctx);
} /* hardware locking scheme */ if (dev->curr_ctx == ctx->num)
clear_bit(0, &dev->hw_lock);
dev->num_inst--; if (dev->num_inst == 0) {
mfc_debug(2, "Last instance\n");
s5p_mfc_deinit_hw(dev);
timer_delete_sync(&dev->watchdog_timer);
s5p_mfc_clock_off(dev); if (s5p_mfc_power_off(dev) < 0)
mfc_err("Power off failed\n");
} else {
mfc_debug(2, "Shutting down clock\n");
s5p_mfc_clock_off(dev);
}
} if (dev)
dev->ctx[ctx->num] = NULL;
s5p_mfc_dec_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh); /* vdev is gone if dev is null */ if (dev)
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mfc_debug_leave(); if (dev)
mutex_unlock(&dev->mfc_mutex);
/* * The memdevs are not proper OF platform devices, so in order for them * to be treated as valid DMA masters we need a bit of a hack to force * them to inherit the MFC node's DMA configuration.
*/
of_dma_configure(child, dev->of_node, true);
if (device_add(child) == 0) {
ret = of_reserved_mem_device_init_by_idx(child, dev->of_node,
idx); if (ret == 0) return child;
device_del(child);
}
err:
put_device(child); return NULL;
}
/* * Create and initialize virtual devices for accessing * reserved memory regions.
*/
mfc_dev->mem_dev[BANK_L_CTX] = s5p_mfc_alloc_memdev(dev, "left",
BANK_L_CTX); if (!mfc_dev->mem_dev[BANK_L_CTX]) return -ENODEV;
mfc_dev->mem_dev[BANK_R_CTX] = s5p_mfc_alloc_memdev(dev, "right",
BANK_R_CTX); if (!mfc_dev->mem_dev[BANK_R_CTX]) {
device_unregister(mfc_dev->mem_dev[BANK_L_CTX]); return -ENODEV;
}
/* Allocate memory for firmware and initialize both banks addresses */
ret = s5p_mfc_alloc_firmware(mfc_dev); if (ret) {
device_unregister(mfc_dev->mem_dev[BANK_R_CTX]);
device_unregister(mfc_dev->mem_dev[BANK_L_CTX]); return ret;
}
/* Valid buffers passed to MFC encoder with LAST_FRAME command * should not have address of bank2 - MFC will treat it as a null frame. * To avoid such situation we set bank2 address below the pool address.
*/
mfc_dev->dma_base[BANK_R_CTX] = bank2_dma_addr - align_size;
if (IS_ENABLED(CONFIG_DMA_CMA) || exynos_is_iommu_available(dev))
mem_size = SZ_8M;
if (mfc_mem_size)
mem_size = memparse(mfc_mem_size, NULL);
mfc_dev->mem_bitmap = bitmap_zalloc(mem_size >> PAGE_SHIFT, GFP_KERNEL); if (!mfc_dev->mem_bitmap) return -ENOMEM;
mfc_dev->mem_virt = dma_alloc_coherent(dev, mem_size,
&mfc_dev->mem_base, GFP_KERNEL); if (!mfc_dev->mem_virt) {
bitmap_free(mfc_dev->mem_bitmap);
dev_err(dev, "failed to preallocate %ld MiB for the firmware and context buffers\n",
(mem_size / SZ_1M)); return -ENOMEM;
}
mfc_dev->mem_size = mem_size;
mfc_dev->dma_base[BANK_L_CTX] = mfc_dev->mem_base;
mfc_dev->dma_base[BANK_R_CTX] = mfc_dev->mem_base;
/* * MFC hardware cannot handle 0 as a base address, so mark first 128K * as used (to keep required base alignment) and adjust base address
*/ if (mfc_dev->mem_base == (dma_addr_t)0) { unsignedint offset = 1 << MFC_BASE_ALIGN_ORDER;
/* Initialize HW ops and commands based on MFC version */
s5p_mfc_init_hw_ops(dev);
s5p_mfc_init_hw_cmds(dev);
s5p_mfc_init_regs(dev);
/* Register decoder and encoder */
ret = video_register_device(dev->vfd_dec, VFL_TYPE_VIDEO, 0); if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto err_dec_reg;
}
v4l2_info(&dev->v4l2_dev, "decoder registered as /dev/video%d\n", dev->vfd_dec->num);
ret = video_register_device(dev->vfd_enc, VFL_TYPE_VIDEO, 0); if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto err_enc_reg;
}
v4l2_info(&dev->v4l2_dev, "encoder registered as /dev/video%d\n", dev->vfd_enc->num);
/* * Clear ctx dev pointer to avoid races between s5p_mfc_remove() * and s5p_mfc_release() and s5p_mfc_release() accessing ctx->dev * after s5p_mfc_remove() is run during unbind.
*/
mutex_lock(&dev->mfc_mutex); for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
ctx = dev->ctx[i]; if (!ctx) continue; /* clear ctx->dev */
ctx->dev = NULL;
}
mutex_unlock(&dev->mfc_mutex);
if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
mfc_err("Error: going to suspend for a second time\n"); return -EIO;
}
/* Check if we're processing then wait if it necessary. */ while (test_and_set_bit(0, &m_dev->hw_lock) != 0) { /* Try and lock the HW */ /* Wait on the interrupt waitqueue */
ret = wait_event_interruptible_timeout(m_dev->queue,
m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT)); if (ret == 0) {
mfc_err("Waiting for hardware to finish timed out\n");
clear_bit(0, &m_dev->enter_suspend); return -EIO;
}
}
ret = s5p_mfc_sleep(m_dev); if (ret) {
clear_bit(0, &m_dev->enter_suspend);
clear_bit(0, &m_dev->hw_lock);
} return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.