/** * xe_drm_client_alloc() - Allocate drm client * @void: No arg * * Allocate drm client struct to track client memory against * same till client life. Call this API whenever new client * has opened xe device. * * Return: pointer to client struct or NULL if can't allocate
*/ struct xe_drm_client *xe_drm_client_alloc(void)
{ struct xe_drm_client *client;
client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) return NULL;
/** * __xe_drm_client_free() - Free client struct * @kref: The reference * * This frees client struct. Call this API when xe device is closed * by drm client. * * Return: void
*/ void __xe_drm_client_free(struct kref *kref)
{ struct xe_drm_client *client =
container_of(kref, typeof(*client), kref);
kfree(client);
}
#ifdef CONFIG_PROC_FS /** * xe_drm_client_add_bo() - Add BO for tracking client mem usage * @client: The drm client ptr * @bo: The xe BO ptr * * Add all BO created by individual drm client by calling this function. * This helps in tracking client memory usage. * * Return: void
*/ void xe_drm_client_add_bo(struct xe_drm_client *client, struct xe_bo *bo)
{
XE_WARN_ON(bo->client);
XE_WARN_ON(!list_empty(&bo->client_link));
/** * xe_drm_client_remove_bo() - Remove BO for tracking client mem usage * @bo: The xe BO ptr * * Remove all BO removed by individual drm client by calling this function. * This helps in tracking client memory usage. * * Return: void
*/ void xe_drm_client_remove_bo(struct xe_bo *bo)
{ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); struct xe_drm_client *client = bo->client;
spin_lock(&client->bos_lock); /* The bo ref will prevent this bo from being removed from the list */
xe_assert(xef->xe, !list_empty(&bo->client_link));
}
/* * RING_TIMESTAMP registers are inaccessible in VF mode. * Without drm-total-cycles-*, other keys provide little value. * Show all or none of the optional "run_ticks" keys in this case.
*/ if (IS_SRIOV_VF(xe)) return;
/* * Wait for any exec queue going away: their cycles will get updated on * context switch out, so wait for that to happen
*/
wait_var_event(&xef->exec_queue.pending_removal,
!atomic_read(&xef->exec_queue.pending_removal));
xe_pm_runtime_get(xe); if (!force_wake_get_any_engine(xe, &hwe, &fw_ref)) {
xe_pm_runtime_put(xe); return;
}
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
xa_for_each(&xef->exec_queue.xa, i, q) {
xe_exec_queue_get(q);
mutex_unlock(&xef->exec_queue.lock);
/* * Engines may be fused off or not exposed to userspace. Don't * return anything if this entire class is not available
*/ if (!capacity[class]) continue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.