/* * This maps our nodes onto the inputs/outputs of the actual PiSP Back End. * Be wary of the word "OUTPUT" which is used ambiguously here. In a V4L2 * context it means an input to the hardware (source image or metadata). * Elsewhere it means an output from the hardware.
*/ enum pispbe_node_ids {
MAIN_INPUT_NODE,
TDN_INPUT_NODE,
STITCH_INPUT_NODE,
OUTPUT0_NODE,
OUTPUT1_NODE,
TDN_OUTPUT_NODE,
STITCH_OUTPUT_NODE,
CONFIG_NODE,
PISPBE_NUM_NODES
};
/* * Structure to describe a single node /dev/video<N> which represents a single * input or output queue to the PiSP Back End device.
*/ struct pispbe_node { unsignedint id; int vfl_dir; enum v4l2_buf_type buf_type; struct video_device vfd; struct media_pad pad; struct media_intf_devnode *intf_devnode; struct media_link *intf_link; struct pispbe_dev *pispbe; /* Video device lock */ struct mutex node_lock; /* vb2_queue lock */ struct mutex queue_lock; struct list_head ready_queue; struct vb2_queue queue; struct v4l2_format format; conststruct pisp_be_format *pisp_format;
};
/* For logging only, use the entity name with "pispbe" and separator removed */ #define NODE_NAME(node) \
(node_desc[(node)->id].ent_name + sizeof(PISPBE_NAME))
/* Records details of the jobs currently running or queued on the h/w. */ struct pispbe_job { bool valid; /* * An array of buffer pointers - remember it's source buffers first, * then captures, then metadata last.
*/ struct pispbe_buffer *buf[PISPBE_NUM_NODES];
};
/* * Queue a job to the h/w. If the h/w is idle it will begin immediately. * Caller must ensure it is "safe to queue", i.e. we don't already have a * queued, unstarted job.
*/ staticvoid pispbe_queue_job(struct pispbe_dev *pispbe, struct pispbe_job_descriptor *job)
{ unsignedint begin, end;
if (pispbe_rd(pispbe, PISP_BE_STATUS_REG) & PISP_BE_STATUS_QUEUED)
dev_err(pispbe->dev, "ERROR: not safe to queue new job!\n");
/* * Write configuration to hardware. DMA addresses and enable flags * are passed separately, because the driver needs to sanitize them, * and we don't want to modify (or be vulnerable to modifications of) * the mmap'd buffer.
*/ for (unsignedint u = 0; u < N_HW_ADDRESSES; ++u) {
pispbe_wr(pispbe, PISP_BE_IO_ADDR_LOW(u),
lower_32_bits(job->hw_dma_addrs[u]));
pispbe_wr(pispbe, PISP_BE_IO_ADDR_HIGH(u),
upper_32_bits(job->hw_dma_addrs[u]));
}
pispbe_wr(pispbe, PISP_BE_GLOBAL_BAYER_ENABLE,
job->hw_enables.bayer_enables);
pispbe_wr(pispbe, PISP_BE_GLOBAL_RGB_ENABLE,
job->hw_enables.rgb_enables);
/* Everything else is as supplied by the user. */
begin = offsetof(struct pisp_be_config, global.bayer_order) / sizeof(u32);
end = sizeof(struct pisp_be_config) / sizeof(u32); for (unsignedint u = begin; u < end; u++)
pispbe_wr(pispbe, PISP_BE_CONFIG_BASE_REG + sizeof(u32) * u,
((u32 *)job->config)[u]);
/* Read back the addresses -- an error here could be fatal */ for (unsignedint u = 0; u < N_HW_ADDRESSES; ++u) { unsignedint offset = PISP_BE_IO_ADDR_LOW(u);
u64 along = pispbe_rd(pispbe, offset);
along += ((u64)pispbe_rd(pispbe, offset + 4)) << 32; if (along != (u64)(job->hw_dma_addrs[u])) {
dev_dbg(pispbe->dev, "ISP BE config error: check if ISP RAMs enabled?\n"); return;
}
}
/* * Write tile pointer to hardware. The IOMMU should prevent * out-of-bounds offsets reaching non-ISP buffers.
*/
pispbe_wr(pispbe, PISP_BE_TILE_ADDR_LO_REG, lower_32_bits(job->tiles));
pispbe_wr(pispbe, PISP_BE_TILE_ADDR_HI_REG, upper_32_bits(job->tiles));
/* * Determine the base plane size. This will not be the same * as node->format.fmt.pix_mp.plane_fmt[0].sizeimage for a single * plane buffer in an mplane format.
*/
size = node->format.fmt.pix_mp.plane_fmt[0].bytesperline *
node->format.fmt.pix_mp.height;
for (p = 0; p < num_planes && p < PISPBE_MAX_PLANES; p++) {
addr[p] = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, p);
plane_factor += node->pisp_format->plane_factor[p];
}
for (; p < PISPBE_MAX_PLANES && node->pisp_format->plane_factor[p]; p++) { /* * Calculate the address offset of this plane as needed * by the hardware. This is specifically for non-mplane * buffer formats, where there are 3 image planes, e.g. * for the V4L2_PIX_FMT_YUV420 format.
*/
addr[p] = addr[0] + ((size * plane_factor) >> 3);
plane_factor += node->pisp_format->plane_factor[p];
}
/* Take a copy of the "enable" bitmaps so we can modify them. */
hw_en->bayer_enables = config->config.global.bayer_enables;
hw_en->rgb_enables = config->config.global.rgb_enables;
/* * Main input first. There are 3 address pointers, corresponding to up * to 3 planes.
*/
ret = pispbe_get_planes_addr(addrs, buf[MAIN_INPUT_NODE],
&pispbe->node[MAIN_INPUT_NODE]); if (ret <= 0) { /* Shouldn't happen, we have validated an input is available. */
dev_warn(pispbe->dev, "ISP-BE missing input\n");
hw_en->bayer_enables = 0;
hw_en->rgb_enables = 0; return;
}
/* * Now TDN/Stitch inputs and outputs. These are single-plane and only * used with Bayer input. Input enables must match the requirements * of the processing stages, otherwise the hardware can lock up!
*/ if (hw_en->bayer_enables & PISP_BE_BAYER_ENABLE_INPUT) {
addrs[3] = pispbe_get_addr(buf[TDN_INPUT_NODE]); if (addrs[3] == 0 ||
!(hw_en->bayer_enables & PISP_BE_BAYER_ENABLE_TDN_INPUT) ||
!(hw_en->bayer_enables & PISP_BE_BAYER_ENABLE_TDN) ||
(config->config.tdn.reset & 1)) {
hw_en->bayer_enables &=
~(PISP_BE_BAYER_ENABLE_TDN_INPUT |
PISP_BE_BAYER_ENABLE_TDN_DECOMPRESS); if (!(config->config.tdn.reset & 1))
hw_en->bayer_enables &=
~PISP_BE_BAYER_ENABLE_TDN;
}
/* Main image output channels. */ for (unsignedint i = 0; i < PISP_BACK_END_NUM_OUTPUTS; i++) {
ret = pispbe_get_planes_addr(addrs + 7 + 3 * i,
buf[OUTPUT0_NODE + i],
&pispbe->node[OUTPUT0_NODE + i]); if (ret <= 0)
hw_en->rgb_enables &= ~(PISP_BE_RGB_ENABLE_OUTPUT0 << i);
}
}
/* * Prepare a job description to be submitted to the HW. * * To schedule a job, we need all streaming nodes (apart from Output0, * Output1, Tdn and Stitch) to have a buffer ready, which must * include at least a config buffer and a main input image. * * For Output0, Output1, Tdn and Stitch, a buffer only needs to be * available if the blocks are enabled in the config. * * If all the buffers required to form a job are available, append the * job descriptor to the job queue to be later queued to the HW. * * Returns 0 if a job has been successfully prepared, < 0 otherwise.
*/ staticint pispbe_prepare_job(struct pispbe_dev *pispbe)
{ struct pispbe_job_descriptor __free(kfree) *job = NULL; struct pispbe_buffer *buf[PISPBE_NUM_NODES] = {}; unsignedint streaming_map; unsignedint config_index; struct pispbe_node *node;
/* remember: srcimages, captures then metadata */ for (unsignedint i = 0; i < PISPBE_NUM_NODES; i++) { unsignedint bayer_en =
job->config->config.global.bayer_enables; unsignedint rgb_en =
job->config->config.global.rgb_enables; bool ignore_buffers = false;
/* Config node is handled outside the loop above. */ if (i == CONFIG_NODE) continue;
buf[i] = NULL; if (!(streaming_map & BIT(i))) continue;
if ((!(rgb_en & PISP_BE_RGB_ENABLE_OUTPUT0) &&
i == OUTPUT0_NODE) ||
(!(rgb_en & PISP_BE_RGB_ENABLE_OUTPUT1) &&
i == OUTPUT1_NODE) ||
(!(bayer_en & PISP_BE_BAYER_ENABLE_TDN_INPUT) &&
i == TDN_INPUT_NODE) ||
(!(bayer_en & PISP_BE_BAYER_ENABLE_TDN_OUTPUT) &&
i == TDN_OUTPUT_NODE) ||
(!(bayer_en & PISP_BE_BAYER_ENABLE_STITCH_INPUT) &&
i == STITCH_INPUT_NODE) ||
(!(bayer_en & PISP_BE_BAYER_ENABLE_STITCH_OUTPUT) &&
i == STITCH_OUTPUT_NODE)) { /* * Ignore Output0/Output1/Tdn/Stitch buffer check if the * global enables aren't set for these blocks. If a * buffer has been provided, we dequeue it back to the * user with the other in-use buffers.
*/
ignore_buffers = true;
}
node = &pispbe->node[i];
/* Pull a buffer from each V4L2 queue to form the queued job */
buf[i] = list_first_entry_or_null(&node->ready_queue, struct pispbe_buffer,
ready_list); if (buf[i]) {
list_del(&buf[i]->ready_list);
job->buffers[i] = buf[i];
}
if (!buf[i] && !ignore_buffers) goto err_return_buffers;
}
/* Convert buffers to DMA addresses for the hardware */
pispbe_xlate_addrs(pispbe, job, buf);
scoped_guard(spinlock_irqsave, &pispbe->hw_lock) { if (clear_hw_busy)
pispbe->hw_busy = false;
if (pispbe->hw_busy) return;
job = list_first_entry_or_null(&pispbe->job_queue, struct pispbe_job_descriptor,
queue); if (!job) return;
list_del(&job->queue);
for (unsignedint i = 0; i < PISPBE_NUM_NODES; i++)
pispbe->queued_job.buf[i] = job->buffers[i];
pispbe->queued_job.valid = true;
pispbe->hw_busy = true;
}
/* * We can kick the job off without the hw_lock, as this can * never run again until hw_busy is cleared, which will happen * only when the following job has been queued and an interrupt * is rised.
*/
pispbe_queue_job(pispbe, job);
kfree(job);
}
u = pispbe_rd(pispbe, PISP_BE_INTERRUPT_STATUS_REG); if (u == 0) return IRQ_NONE;
pispbe_wr(pispbe, PISP_BE_INTERRUPT_STATUS_REG, u);
u = pispbe_rd(pispbe, PISP_BE_BATCH_STATUS_REG);
done = (uint8_t)u;
started = (uint8_t)(u >> 8);
/* * Be aware that done can go up by 2 and started by 1 when: a job that * we previously saw "start" now finishes, and we then queued a new job * which we see both start and finish "simultaneously".
*/ if (pispbe->running_job.valid && pispbe->done != done) {
pispbe_isr_jobdone(pispbe, &pispbe->running_job);
memset(&pispbe->running_job, 0, sizeof(pispbe->running_job));
pispbe->done++;
}
if (pispbe->started != started) {
pispbe->started++;
can_queue_another = 1;
for (unsignedint i = 0; i < num_planes; i++) { unsignedlong size = NODE_IS_MPLANE(node) ?
node->format.fmt.pix_mp.plane_fmt[i].sizeimage :
node->format.fmt.meta.buffersize;
if (vb2_plane_size(vb, i) < size) {
dev_dbg(pispbe->dev, "data will not fit into plane %d (%lu < %lu)\n",
i, vb2_plane_size(vb, i), size); return -EINVAL;
}
dev_dbg(pispbe->dev, "%s: for node %s\n", __func__, NODE_NAME(node));
list_add_tail(&buffer->ready_list, &node->ready_queue);
/* * Every time we add a buffer, check if there's now some work for the hw * to do.
*/ if (!pispbe_prepare_job(pispbe))
pispbe_schedule(pispbe, false);
}
/* * Now this is a bit awkward. In a simple M2M device we could just wait * for all queued jobs to complete, but here there's a risk that a * partial set of buffers was queued and cannot be run. For now, just * cancel all buffers stuck in the "ready queue", then wait for any * running job. * * This may return buffers out of order.
*/
dev_dbg(pispbe->dev, "%s: for node %s\n", __func__, NODE_NAME(node)); do {
buf = list_first_entry_or_null(&node->ready_queue, struct pispbe_buffer,
ready_list); if (buf) {
list_del(&buf->ready_list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
} while (buf);
if (pispbe->streaming_map == 0) { /* * If all nodes have stopped streaming release all jobs * without holding the lock.
*/
list_splice_init(&pispbe->job_queue, &tmp_list);
}
spin_unlock_irq(&pispbe->hw_lock);
if (!NODE_IS_META(node) || NODE_IS_CAPTURE(node)) {
dev_dbg(pispbe->dev, "Cannot get capture fmt for meta output node %s\n",
NODE_NAME(node)); return -EINVAL;
}
*f = node->format;
dev_dbg(pispbe->dev, "Get output format for meta node %s\n",
NODE_NAME(node));
return 0;
}
staticconststruct pisp_be_format *pispbe_find_fmt(unsignedint fourcc)
{ for (unsignedint i = 0; i < ARRAY_SIZE(supported_formats); i++) { if (supported_formats[i].fourcc == fourcc) return &supported_formats[i];
}
/* * Fill in the actual colour space when the requested one was * not supported. This also catches the case when the "default" * colour space was requested (as that's never in the mask).
*/ if (!(V4L2_COLORSPACE_MASK(f->fmt.pix_mp.colorspace) &
fmt->colorspace_mask))
f->fmt.pix_mp.colorspace = fmt->colorspace_default;
/* In all cases, we only support the defaults for these: */
f->fmt.pix_mp.ycbcr_enc =
V4L2_MAP_YCBCR_ENC_DEFAULT(f->fmt.pix_mp.colorspace);
f->fmt.pix_mp.xfer_func =
V4L2_MAP_XFER_FUNC_DEFAULT(f->fmt.pix_mp.colorspace);
if (!NODE_IS_META(node) || NODE_IS_CAPTURE(node)) {
dev_dbg(pispbe->dev, "Cannot set capture fmt for meta output node %s\n",
NODE_NAME(node)); return -EINVAL;
}
/* Check the HW is present and has a known version */
u = pispbe_rd(pispbe, PISP_BE_VERSION_REG);
dev_dbg(pispbe->dev, "pispbe_probe: HW version: 0x%08x", u);
pispbe->hw_version = u; if ((u & ~PISP_BE_VERSION_MINOR_BITS) != PISP_BE_VERSION_2712) return -ENODEV;
if (u != 0 || pispbe->done != pispbe->started) {
dev_err(pispbe->dev, "pispbe_probe: HW is stuck or busy\n"); return -EBUSY;
}
/* * AXI QOS=0, CACHE=4'b0010, PROT=3'b011 * Also set "chicken bits" 22:20 which enable sub-64-byte bursts * and AXI AWID/BID variability (on versions which support this).
*/
pispbe_wr(pispbe, PISP_BE_AXI_REG, 0x32703200u);
/* Enable both interrupt flags */
pispbe_wr(pispbe, PISP_BE_INTERRUPT_EN_REG, 0x00000003u);
return 0;
}
/* Probe the ISP-BE hardware block, as a single platform device. */ staticint pispbe_probe(struct platform_device *pdev)
{ struct pispbe_dev *pispbe; int ret;
pispbe = devm_kzalloc(&pdev->dev, sizeof(*pispbe), GFP_KERNEL); if (!pispbe) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.