/* * This assumes that if we're switching to 2D, we're switching * away from 3D, and vice versa. Hence, if we're switching to * the 2D core, we need to flush the 3D depth and color caches, * otherwise we need to flush the 2D pixel engine cache.
*/ if (gpu->exec_state == ETNA_PIPE_2D)
flush = VIVS_GL_FLUSH_CACHE_PE2D; elseif (gpu->exec_state == ETNA_PIPE_3D)
flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
/* * Safely replace the WAIT of a waitlink with a new command and argument. * The GPU may be executing this WAIT while we're modifying it, so we have * to write it in a specific order to avoid the GPU branching to somewhere * else. 'wl_offset' is the offset to the first byte of the WAIT command.
*/ staticvoid etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer, unsignedint wl_offset, u32 cmd, u32 arg)
{
u32 *lw = buffer->vaddr + wl_offset;
lw[1] = arg;
mb();
lw[0] = cmd;
mb();
}
/* * Ensure that there is space in the command buffer to contiguously write * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
*/ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buffer, unsignedint cmd_dwords)
{ if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
buffer->user_size = 0;
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH(dwords),
link_target);
} else { /* Replace the last link-wait with an "END" command */
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
VIV_FE_END_HEADER_OP_END, 0);
}
}
/* Append a 'sync point' to the ring buffer. */ void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsignedint event)
{ struct etnaviv_cmdbuf *buffer = &gpu->buffer; unsignedint waitlink_offset = buffer->user_size - 16;
u32 dwords, target;
lockdep_assert_held(&gpu->lock);
/* * We need at most 3 dwords in the return target: * 1 event + 1 end + 1 wait + 1 link.
*/
dwords = 4;
target = etnaviv_buffer_reserve(gpu, buffer, dwords);
/* Signal sync point event */
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
/* Stop the FE to 'pause' the GPU */
CMD_END(buffer);
/* * Kick off the 'sync point' command by replacing the previous * WAIT with a link to the address in the ring buffer.
*/
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH(dwords),
target);
}
/* * If we need maintenance prior to submitting this buffer, we will * need to append a mmu flush load state, followed by a new * link to this buffer - a total of four additional words.
*/ if (need_flush || switch_context) {
u32 target, extra_dwords;
/* link command */
extra_dwords = 1;
/* flush command */ if (need_flush) { if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1; else
extra_dwords += 3;
}
/* pipe switch commands */ if (switch_context)
extra_dwords += 4;
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords); /* * Switch MMU context if necessary. Must be done after the * link target has been calculated, as the jump forward in the * kernel ring still uses the last active MMU context before * the switch.
*/ if (switch_mmu_context) { struct etnaviv_iommu_context *old_context = gpu->mmu_context;
if (switch_context) {
etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
gpu->exec_state = exec_state;
}
/* And the link to the submitted buffer */
link_target = etnaviv_cmdbuf_get_va(cmdbuf,
&gpu->mmu_context->cmdbuf_mapping);
CMD_LINK(buffer, link_dwords, link_target);
/* Update the link target to point to above instructions */
link_target = target;
link_dwords = extra_dwords;
}
/* * Append a LINK to the submitted command buffer to return to * the ring buffer. return_target is the ring target address. * We need at most 7 dwords in the return target: 2 cache flush + * 2 semaphore stall + 1 event + 1 wait + 1 link.
*/
return_dwords = 7;
/* * When the BLT engine is present we need 6 more dwords in the return * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable, * but we don't need the normal TS flush state.
*/ if (has_blt)
return_dwords += 6;
if (drm_debug_enabled(DRM_UT_DRIVER))
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
return_target,
etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
cmdbuf->vaddr);
/* * Kick off the submitted command by replacing the previous * WAIT with a link to the address in the ring buffer.
*/
etnaviv_buffer_replace_wait(buffer, waitlink_offset,
VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
link_target);
if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.