if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV;
if (!vc4->v3d) {
DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n"); return -ENODEV;
}
spin_lock_irqsave(&vc4->job_lock, irqflags);
kernel_state = vc4->hang_state; if (!kernel_state) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags); return -ENOENT;
}
state = &kernel_state->user_state;
/* If the user's array isn't big enough, just return the * required array size.
*/ if (get_state->bo_count < state->bo_count) {
get_state->bo_count = state->bo_count;
spin_unlock_irqrestore(&vc4->job_lock, irqflags); return 0;
}
if (!kernel_state->bo) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags); return;
}
k = 0; for (i = 0; i < 2; i++) { if (!exec[i]) continue;
for (j = 0; j < exec[i]->bo_count; j++) {
bo = to_vc4_bo(exec[i]->bo[j]);
/* Retain BOs just in case they were marked purgeable. * This prevents the BO from being purged before * someone had a chance to dump the hang state.
*/
WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt);
drm_gem_object_get(exec[i]->bo[j]);
kernel_state->bo[k++] = exec[i]->bo[j];
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { /* No need to retain BOs coming from the ->unref_list * because they are naturally unpurgeable.
*/
drm_gem_object_get(&bo->base.base);
kernel_state->bo[k++] = &bo->base.base;
}
}
WARN_ON_ONCE(k != state->bo_count);
if (exec[0])
state->start_bin = exec[0]->ct0ca; if (exec[1])
state->start_render = exec[1]->ct1ca;
/* We need to turn purgeable BOs into unpurgeable ones so that * userspace has a chance to dump the hang state before the kernel * decides to purge those BOs. * Note that BO consistency at dump time cannot be guaranteed. For * example, if the owner of these BOs decides to re-use them or mark * them purgeable again there's nothing we can do to prevent it.
*/ for (i = 0; i < kernel_state->user_state.bo_count; i++) { struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
mutex_lock(&vc4->power_lock); if (vc4->power_refcount) { /* Power the device off and back on the by dropping the * reference on runtime PM.
*/
pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
pm_runtime_get_sync(&vc4->v3d->pdev->dev);
}
mutex_unlock(&vc4->power_lock);
vc4_irq_reset(dev);
/* Rearm the hangcheck -- another job might have been waiting * for our hung one to get kicked off, and vc4_irq_reset() * would have started it.
*/
vc4_queue_hangcheck(dev);
}
/* If we've made any progress in execution, rearm the timer * and wait.
*/ if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
(render_exec && ct1ca != render_exec->last_ct1ca)) { if (bin_exec)
bin_exec->last_ct0ca = ct0ca; if (render_exec)
render_exec->last_ct1ca = ct1ca;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
vc4_queue_hangcheck(dev); return;
}
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
/* We've gone too long with no progress, reset. This has to * be done from a work struct, since resetting can sleep and * this timer hook isn't allowed to.
*/
schedule_work(&vc4->hangcheck.reset_work);
}
/* Set the current and end address of the control list. * Writing the end register is what starts the job.
*/
V3D_WRITE(V3D_CTNCA(thread), start);
V3D_WRITE(V3D_CTNEA(thread), end);
}
int
vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, bool interruptible)
{ struct vc4_dev *vc4 = to_vc4_dev(dev); int ret = 0; unsignedlong timeout_expire;
DEFINE_WAIT(wait);
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV;
/* Flush the GPU L2 caches. These caches sit on top of system * L3 (the 128kb or so shared with the CPU), and are * non-allocating in the L3.
*/
V3D_WRITE(V3D_L2CACTL,
V3D_L2CACTL_L2CCLR);
/* Sets the registers for the next job to be actually be executed in * the hardware. * * The job_lock should be held during this.
*/ void
vc4_submit_next_bin_job(struct drm_device *dev)
{ struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *exec;
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return;
again:
exec = vc4_first_bin_job(vc4); if (!exec) return;
vc4_flush_caches(dev);
/* Only start the perfmon if it was not already started by a previous * job.
*/ if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
vc4_perfmon_start(vc4, exec->perfmon);
/* Either put the job in the binner if it uses the binner, or * immediately move it to the to-be-rendered queue.
*/ if (exec->ct0ca != exec->ct0ea) {
trace_vc4_submit_cl(dev, false, exec->seqno, exec->ct0ca,
exec->ct0ea);
submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
} else { struct vc4_exec_info *next;
vc4_move_job_to_render(dev, exec);
next = vc4_first_bin_job(vc4);
/* We can't start the next bin job if the previous job had a * different perfmon instance attached to it. The same goes * if one of them had a perfmon attached to it and the other * one doesn't.
*/ if (next && next->perfmon == exec->perfmon) goto again;
}
}
/* A previous RCL may have written to one of our textures, and * our full cache flush at bin time may have occurred before * that RCL completed. Flush the texture cache now, but not * the instructions or uniforms (since we don't write those * from an RCL).
*/
vc4_flush_texture_caches(dev);
for (i = 0; i < exec->bo_count; i++) {
bo = to_vc4_bo(exec->bo[i]);
dma_resv_add_fence(bo->base.base.resv, exec->fence,
DMA_RESV_USAGE_READ);
}
for (i = 0; i < exec->rcl_write_bo_count; i++) {
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
dma_resv_add_fence(bo->base.base.resv, exec->fence,
DMA_RESV_USAGE_WRITE);
}
}
/* Takes the reservation lock on all the BOs being referenced, so that * at queue submit time we can update the reservations. * * We don't lock the RCL the tile alloc/state BOs, or overflow memory * (all of which are on exec->unref_list). They're entirely private * to vc4, so we don't attach dma-buf fences to them.
*/ staticint
vc4_lock_bo_reservations(struct vc4_exec_info *exec, struct drm_exec *exec_ctx)
{ int ret;
/* Reserve space for our shared (read-only) fence references, * before we commit the CL to the hardware.
*/
drm_exec_init(exec_ctx, DRM_EXEC_INTERRUPTIBLE_WAIT, exec->bo_count);
drm_exec_until_all_locked(exec_ctx) {
ret = drm_exec_prepare_array(exec_ctx, exec->bo,
exec->bo_count, 1);
}
if (ret) {
drm_exec_fini(exec_ctx); return ret;
}
return 0;
}
/* Queues a struct vc4_exec_info for execution. If no job is * currently executing, then submits it. * * Unlike most GPUs, our hardware only handles one command list at a * time. To queue multiple jobs at once, we'd need to edit the * previous command list to have a jump to the new one at the end, and * then bump the end address. That's a change for a later date, * though.
*/ staticint
vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, struct drm_exec *exec_ctx, struct drm_syncobj *out_sync)
{ struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *renderjob;
uint64_t seqno; unsignedlong irqflags; struct vc4_fence *fence;
if (out_sync)
drm_syncobj_replace_fence(out_sync, exec->fence);
vc4_attach_fences(exec);
drm_exec_fini(exec_ctx);
list_add_tail(&exec->head, &vc4->bin_job_list);
/* If no bin job was executing and if the render job (if any) has the * same perfmon as our job attached to it (or if both jobs don't have * perfmon activated), then kick ours off. Otherwise, it'll get * started when the previous job's flush/render done interrupt occurs.
*/
renderjob = vc4_first_render_job(vc4); if (vc4_first_bin_job(vc4) == exec &&
(!renderjob || renderjob->perfmon == exec->perfmon)) {
vc4_submit_next_bin_job(dev);
vc4_queue_hangcheck(dev);
}
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return 0;
}
/** * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects * referenced by the job. * @dev: DRM device * @file_priv: DRM file for this fd * @exec: V3D job being set up * * The command validator needs to reference BOs by their index within * the submitted job's BO list. This does the validation of the job's * BO list and reference counting for the lifetime of the job.
*/ staticint
vc4_cl_lookup_bos(struct drm_device *dev, struct drm_file *file_priv, struct vc4_exec_info *exec)
{ struct drm_vc4_submit_cl *args = exec->args; int ret = 0; int i;
exec->bo_count = args->bo_handle_count;
if (!exec->bo_count) { /* See comment on bo_index for why we have to check * this.
*/
DRM_DEBUG("Rendering requires BOs to validate\n"); return -EINVAL;
}
ret = drm_gem_objects_lookup(file_priv, u64_to_user_ptr(args->bo_handles),
exec->bo_count, &exec->bo);
if (ret) goto fail_put_bo;
for (i = 0; i < exec->bo_count; i++) {
ret = vc4_bo_inc_usecnt(to_vc4_bo(exec->bo[i])); if (ret) goto fail_dec_usecnt;
}
return 0;
fail_dec_usecnt: /* Decrease usecnt on acquired objects. * We cannot rely on vc4_complete_exec() to release resources here, * because vc4_complete_exec() has no information about which BO has * had its ->usecnt incremented. * To make things easier we just free everything explicitly and set * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release' * step.
*/ for (i-- ; i >= 0; i--)
vc4_bo_dec_usecnt(to_vc4_bo(exec->bo[i]));
fail_put_bo: /* Release any reference to acquired objects. */ for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
drm_gem_object_put(exec->bo[i]);
/* Allocate space where we'll store the copied in user command lists * and shader records. * * We don't just copy directly into the BOs because we need to * read the contents back for validation, and I think the * bo->vaddr is uncached access.
*/
temp = kvmalloc_array(temp_size, 1, GFP_KERNEL); if (!temp) {
drm_err(dev, "Failed to allocate storage for copying " "in bin/render CLs.\n");
ret = -ENOMEM; goto fail;
}
bin = temp + bin_offset;
exec->shader_rec_u = temp + shader_rec_offset;
exec->uniforms_u = temp + uniforms_offset;
exec->shader_state = temp + exec_size;
exec->shader_state_size = args->shader_rec_count;
if (copy_from_user(bin,
u64_to_user_ptr(args->bin_cl),
args->bin_cl_size)) {
ret = -EFAULT; goto fail;
}
if (copy_from_user(exec->shader_rec_u,
u64_to_user_ptr(args->shader_rec),
args->shader_rec_size)) {
ret = -EFAULT; goto fail;
}
if (copy_from_user(exec->uniforms_u,
u64_to_user_ptr(args->uniforms),
args->uniforms_size)) {
ret = -EFAULT; goto fail;
}
bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL); if (IS_ERR(bo)) {
drm_err(dev, "Couldn't allocate BO for binning\n");
ret = PTR_ERR(bo); goto fail;
}
exec->exec_bo = &bo->base;
/* If we got force-completed because of GPU reset rather than * through our IRQ handler, signal the fence now.
*/ if (exec->fence) {
dma_fence_signal(exec->fence);
dma_fence_put(exec->fence);
}
if (exec->bo) { for (i = 0; i < exec->bo_count; i++) { struct vc4_bo *bo = to_vc4_bo(exec->bo[i]);
/* Free up the allocation of any bin slots we used. */
spin_lock_irqsave(&vc4->job_lock, irqflags);
vc4->bin_alloc_used &= ~exec->bin_slots;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
/* Release the reference on the binner BO if needed. */ if (exec->bin_bo_used)
vc4_v3d_bin_bo_put(vc4);
/* Release the reference we had on the perf monitor. */
vc4_perfmon_put(exec->perfmon);
/* Scheduled when any job has been completed, this walks the list of * jobs that had completed and unrefs their BOs and frees their exec * structs.
*/ staticvoid
vc4_job_done_work(struct work_struct *work)
{ struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, job_done_work);
vc4_job_handle_completed(vc4);
}
staticint
vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
uint64_t seqno,
uint64_t *timeout_ns)
{ unsignedlong start = jiffies; int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
uint64_t delta = jiffies_to_nsecs(jiffies - start);
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV;
if (args->pad != 0) return -EINVAL;
ret = drm_gem_dma_resv_wait(file_priv, args->handle, true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted * such that the ioctl will be restarted.
*/
delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); if (delta_ns < args->timeout_ns)
args->timeout_ns -= delta_ns; else
args->timeout_ns = 0;
return ret;
}
/** * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * This is the main entrypoint for userspace to submit a 3D frame to * the GPU. Userspace provides the binner command list (if * applicable), and the kernel sets up the render command list to draw * to the framebuffer described in the ioctl, using the command lists * that the 3D engine's binner will produce.
*/ int
vc4_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file = file_priv->driver_priv; struct drm_vc4_submit_cl *args = data; struct drm_syncobj *out_sync = NULL; struct vc4_exec_info *exec; struct drm_exec exec_ctx; struct dma_fence *in_fence; int ret = 0;
ret = vc4_cl_lookup_bos(dev, file_priv, exec); if (ret) goto fail;
if (args->perfmonid) {
exec->perfmon = vc4_perfmon_find(vc4file,
args->perfmonid); if (!exec->perfmon) {
ret = -ENOENT; goto fail;
}
}
if (args->in_sync) {
ret = drm_syncobj_find_fence(file_priv, args->in_sync,
0, 0, &in_fence); if (ret) goto fail;
/* When the fence (or fence array) is exclusively from our * context we can skip the wait since jobs are executed in * order of their submission through this ioctl and this can * only have fences from a prior job.
*/ if (!dma_fence_match_context(in_fence,
vc4->dma_fence_context)) {
ret = dma_fence_wait(in_fence, true); if (ret) {
dma_fence_put(in_fence); goto fail;
}
}
dma_fence_put(in_fence);
}
if (exec->args->bin_cl_size != 0) {
ret = vc4_get_bcl(dev, exec); if (ret) goto fail;
} else {
exec->ct0ca = 0;
exec->ct0ea = 0;
}
ret = vc4_get_rcl(dev, exec); if (ret) goto fail;
ret = vc4_lock_bo_reservations(exec, &exec_ctx); if (ret) goto fail;
if (args->out_sync) {
out_sync = drm_syncobj_find(file_priv, args->out_sync); if (!out_sync) {
ret = -EINVAL; goto fail_unreserve;
}
/* We replace the fence in out_sync in vc4_queue_submit since * the render job could execute immediately after that call. * If it finishes before our ioctl processing resumes the * render job fence could already have been freed.
*/
}
/* Clear this out of the struct we'll be putting in the queue, * since it's part of our stack.
*/
exec->args = NULL;
ret = vc4_queue_submit(dev, exec, &exec_ctx, out_sync);
/* The syncobj isn't part of the exec data and we need to free our * reference even if job submission failed.
*/ if (out_sync)
drm_syncobj_put(out_sync);
if (ret) goto fail_unreserve;
/* Return the seqno for our job. */
args->seqno = vc4->emit_seqno;
/* Waiting for exec to finish would need to be done before * unregistering V3D.
*/
WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
/* V3D should already have disabled its interrupt and cleared * the overflow allocation registers. Now free the object.
*/ if (vc4->bin_bo) {
drm_gem_object_put(&vc4->bin_bo->base.base);
vc4->bin_bo = NULL;
}
if (vc4->hang_state)
vc4_free_hang_state(dev, vc4->hang_state);
}
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV;
switch (args->madv) { case VC4_MADV_DONTNEED: case VC4_MADV_WILLNEED: break; default: return -EINVAL;
}
if (args->pad != 0) return -EINVAL;
gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT;
}
bo = to_vc4_bo(gem_obj);
/* Only BOs exposed to userspace can be purged. */ if (bo->madv == __VC4_MADV_NOTSUPP) {
DRM_DEBUG("madvise not supported on this BO\n");
ret = -EINVAL; goto out_put_gem;
}
/* Not sure it's safe to purge imported BOs. Let's just assume it's * not until proven otherwise.
*/ if (gem_obj->import_attach) {
DRM_DEBUG("madvise not supported on imported BOs\n");
ret = -EINVAL; goto out_put_gem;
}
mutex_lock(&bo->madv_lock);
if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
!refcount_read(&bo->usecnt)) { /* If the BO is about to be marked as purgeable, is not used * and is not already purgeable or purged, add it to the * purgeable list.
*/
vc4_bo_add_to_purgeable_pool(bo);
} elseif (args->madv == VC4_MADV_WILLNEED &&
bo->madv == VC4_MADV_DONTNEED &&
!refcount_read(&bo->usecnt)) { /* The BO has not been purged yet, just remove it from * the purgeable list.
*/
vc4_bo_remove_from_purgeable_pool(bo);
}
/* Save the purged state. */
args->retained = bo->madv != __VC4_MADV_PURGED;
/* Update internal madv state only if the bo was not purged. */ if (bo->madv != __VC4_MADV_PURGED)
bo->madv = args->madv;
mutex_unlock(&bo->madv_lock);
ret = 0;
out_put_gem:
drm_gem_object_put(gem_obj);
return ret;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.36 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.