// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
**************************************************************************/
/* * Helper macro to get dx_ctx_node if available otherwise print an error * message. This is for use in command verifier function where if dx_ctx_node * is not set then command is invalid.
*/ #define VMW_GET_CTX_NODE(__sw_context) \
({ \
__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
__sw_context->dx_ctx_node; \
}); \
})
/** * struct vmw_relocation - Buffer object relocation * * @head: List head for the command submission context's relocation list * @vbo: Non ref-counted pointer to buffer object * @mob_loc: Pointer to location for mob id to be modified * @location: Pointer to location for guest pointer to be modified
*/ struct vmw_relocation { struct list_head head; struct vmw_bo *vbo; union {
SVGAMobId *mob_loc;
SVGAGuestPtr *location;
};
};
/** * enum vmw_resource_relocation_type - Relocation type for resources * * @vmw_res_rel_normal: Traditional relocation. The resource id in the * command stream is replaced with the actual id after validation. * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced * with a NOP. * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after * validation is -1, the command is replaced with a NOP. Otherwise no action. * @vmw_res_rel_max: Last value in the enum - used for error checking
*/ enum vmw_resource_relocation_type {
vmw_res_rel_normal,
vmw_res_rel_nop,
vmw_res_rel_cond_nop,
vmw_res_rel_max
};
/** * struct vmw_resource_relocation - Relocation info for resources * * @head: List head for the software context's relocation list. * @res: Non-ref-counted pointer to the resource. * @offset: Offset of single byte entries into the command buffer where the id * that needs fixup is located. * @rel_type: Type of relocation.
*/ struct vmw_resource_relocation { struct list_head head; conststruct vmw_resource *res;
u32 offset:29; enum vmw_resource_relocation_type rel_type:3;
};
/** * struct vmw_ctx_validation_info - Extra validation metadata for contexts * * @head: List head of context list * @ctx: The context resource * @cur: The context's persistent binding state * @staged: The binding state changes of this command buffer
*/ struct vmw_ctx_validation_info { struct list_head head; struct vmw_resource *ctx; struct vmw_ctx_binding_state *cur; struct vmw_ctx_binding_state *staged;
};
/** * struct vmw_cmd_entry - Describe a command for the verifier * * @func: Call-back to handle the command. * @user_allow: Whether allowed from the execbuf ioctl. * @gb_disable: Whether disabled if guest-backed objects are available. * @gb_enable: Whether enabled iff guest-backed objects are available. * @cmd_name: Name of the command.
*/ struct vmw_cmd_entry { int (*func) (struct vmw_private *, struct vmw_sw_context *,
SVGA3dCmdHeader *); bool user_allow; bool gb_disable; bool gb_enable; constchar *cmd_name;
};
staticint vmw_resource_context_res_add(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *ctx); staticint vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGAMobId *id, struct vmw_bo **vmw_bo_p); /** * vmw_ptr_diff - Compute the offset from a to b in bytes * * @a: A starting pointer. * @b: A pointer offset in the same address space. * * Returns: The offset in bytes between the two pointers.
*/ static size_t vmw_ptr_diff(void *a, void *b)
{ return (unsignedlong) b - (unsignedlong) a;
}
/** * vmw_execbuf_bindings_commit - Commit modified binding state * * @sw_context: The command submission context * @backoff: Whether this is part of the error path and binding state changes * should be ignored
*/ staticvoid vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, bool backoff)
{ struct vmw_ctx_validation_info *entry;
list_for_each_entry(entry, &sw_context->ctx_list, head) { if (!backoff)
vmw_binding_state_commit(entry->cur, entry->staged);
if (entry->staged != sw_context->staged_bindings)
vmw_binding_state_free(entry->staged); else
sw_context->staged_bindings_inuse = false;
}
/* List entries are freed with the validation context */
INIT_LIST_HEAD(&sw_context->ctx_list);
}
/** * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced * * @sw_context: The command submission context
*/ staticvoid vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
{ if (sw_context->dx_query_mob)
vmw_context_bind_dx_query(sw_context->dx_query_ctx,
sw_context->dx_query_mob);
}
/** * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to * the validate list. * * @dev_priv: Pointer to the device private: * @sw_context: The command submission context * @res: Pointer to the resource * @node: The validation node holding the context resource metadata
*/ staticint vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *res, struct vmw_ctx_validation_info *node)
{ int ret;
ret = vmw_resource_context_res_add(dev_priv, sw_context, res); if (unlikely(ret != 0)) goto out_err;
if (!sw_context->staged_bindings) {
sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv); if (IS_ERR(sw_context->staged_bindings)) {
ret = PTR_ERR(sw_context->staged_bindings);
sw_context->staged_bindings = NULL; goto out_err;
}
}
if (sw_context->staged_bindings_inuse) {
node->staged = vmw_binding_state_alloc(dev_priv); if (IS_ERR(node->staged)) {
ret = PTR_ERR(node->staged);
node->staged = NULL; goto out_err;
}
} else {
node->staged = sw_context->staged_bindings;
sw_context->staged_bindings_inuse = true;
}
/** * vmw_execbuf_res_size - calculate extra size fore the resource validation node * * @dev_priv: Pointer to the device private struct. * @res_type: The resource type. * * Guest-backed contexts and DX contexts require extra size to store execbuf * private information in the validation node. Typically the binding manager * associated data structures. * * Returns: The extra size requirement based on resource type.
*/ staticunsignedint vmw_execbuf_res_size(struct vmw_private *dev_priv, enum vmw_res_type res_type)
{ return (res_type == vmw_res_dx_context ||
(res_type == vmw_res_context && dev_priv->has_mob)) ? sizeof(struct vmw_ctx_validation_info) : 0;
}
/** * vmw_execbuf_rcache_update - Update a resource-node cache entry * * @rcache: Pointer to the entry to update. * @res: Pointer to the resource. * @private: Pointer to the execbuf-private space in the resource validation * node.
*/ staticvoid vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, struct vmw_resource *res, void *private)
{
rcache->res = res;
rcache->private = private;
rcache->valid = 1;
rcache->valid_handle = 0;
}
/** * vmw_execbuf_res_val_add - Add a resource to the validation list. * * @sw_context: Pointer to the software context. * @res: Unreferenced rcu-protected pointer to the resource. * @dirty: Whether to change dirty status. * @flags: specifies whether to use the context or not * * Returns: 0 on success. Negative error code on failure. Typical error codes * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
*/ staticint vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context, struct vmw_resource *res,
u32 dirty,
u32 flags)
{ struct vmw_private *dev_priv = res->dev_priv; int ret; enum vmw_res_type res_type = vmw_res_type(res); struct vmw_res_cache_entry *rcache; struct vmw_ctx_validation_info *ctx_info; bool first_usage; unsignedint priv_size;
rcache = &sw_context->res_cache[res_type]; if (likely(rcache->valid && rcache->res == res)) { if (dirty)
vmw_validation_res_set_dirty(sw_context->ctx,
rcache->private, dirty); return 0;
}
if ((flags & vmw_val_add_flag_noctx) != 0) {
ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
(void **)&ctx_info, NULL); if (ret) return ret;
} else {
priv_size = vmw_execbuf_res_size(dev_priv, res_type);
ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
dirty, (void **)&ctx_info,
&first_usage); if (ret) return ret;
if (priv_size && first_usage) {
ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
ctx_info); if (ret) {
VMW_DEBUG_USER("Failed first usage context setup.\n"); return ret;
}
}
}
/** * vmw_view_res_val_add - Add a view and the surface it's pointing to to the * validation list * * @sw_context: The software context holding the validation list. * @view: Pointer to the view resource. * * Returns 0 if success, negative error code otherwise.
*/ staticint vmw_view_res_val_add(struct vmw_sw_context *sw_context, struct vmw_resource *view)
{ int ret;
/* * First add the resource the view is pointing to, otherwise it may be * swapped out when the view is validated.
*/
ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
vmw_view_dirtying(view), vmw_val_add_flag_noctx); if (ret) return ret;
/** * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing * to to the validation list. * * @sw_context: The software context holding the validation list. * @view_type: The view type to look up. * @id: view id of the view. * * The view is represented by a view id and the DX context it's created on, or * scheduled for creation on. If there is no DX context set, the function will * return an -EINVAL error pointer. * * Returns: Unreferenced pointer to the resource on success, negative error * pointer on failure.
*/ staticstruct vmw_resource *
vmw_view_id_val_add(struct vmw_sw_context *sw_context, enum vmw_view_type view_type, u32 id)
{ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; struct vmw_resource *view; int ret;
if (!ctx_node) return ERR_PTR(-EINVAL);
view = vmw_view_lookup(sw_context->man, view_type, id); if (IS_ERR(view)) return view;
ret = vmw_view_res_val_add(sw_context, view); if (ret) return ERR_PTR(ret);
return view;
}
/** * vmw_resource_context_res_add - Put resources previously bound to a context on * the validation list * * @dev_priv: Pointer to a device private structure * @sw_context: Pointer to a software context used for this command submission * @ctx: Pointer to the context resource * * This function puts all resources that were previously bound to @ctx on the * resource validation list. This is part of the context state reemission
*/ staticint vmw_resource_context_res_add(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *ctx)
{ struct list_head *binding_list; struct vmw_ctx_bindinfo *entry; int ret = 0; struct vmw_resource *res;
u32 i;
u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
/* Add all cotables to the validation list. */ if (has_sm4_context(dev_priv) &&
vmw_res_type(ctx) == vmw_res_dx_context) { for (i = 0; i < cotable_max; ++i) {
res = vmw_context_cotable(ctx, i); if (IS_ERR_OR_NULL(res)) continue;
ret = vmw_execbuf_res_val_add(sw_context, res,
VMW_RES_DIRTY_SET,
vmw_val_add_flag_noctx); if (unlikely(ret != 0)) return ret;
}
}
/* Add all resources bound to the context to the validation list */
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
list_for_each_entry(entry, binding_list, ctx_list) { if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res); else
ret = vmw_execbuf_res_val_add(sw_context, entry->res,
vmw_binding_dirtying(entry->bt),
vmw_val_add_flag_noctx); if (unlikely(ret != 0)) break;
}
if (has_sm4_context(dev_priv) &&
vmw_res_type(ctx) == vmw_res_dx_context) { struct vmw_bo *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx); if (dx_query_mob) {
vmw_bo_placement_set(dx_query_mob,
VMW_BO_DOMAIN_MOB,
VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx,
dx_query_mob);
}
}
/** * vmw_resource_relocation_add - Add a relocation to the relocation list * * @sw_context: Pointer to the software context. * @res: The resource. * @offset: Offset into the command buffer currently being parsed where the id * that needs fixup is located. Granularity is one byte. * @rel_type: Relocation type.
*/ staticint vmw_resource_relocation_add(struct vmw_sw_context *sw_context, conststruct vmw_resource *res, unsignedlong offset, enum vmw_resource_relocation_type
rel_type)
{ struct vmw_resource_relocation *rel;
rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel)); if (unlikely(!rel)) {
VMW_DEBUG_USER("Failed to allocate a resource relocation.\n"); return -ENOMEM;
}
/** * vmw_resource_relocations_free - Free all relocations on a list * * @list: Pointer to the head of the relocation list
*/ staticvoid vmw_resource_relocations_free(struct list_head *list)
{ /* Memory is validation context memory, so no need to free it */
INIT_LIST_HEAD(list);
}
/** * vmw_resource_relocations_apply - Apply all relocations on a list * * @cb: Pointer to the start of the command buffer bein patch. This need not be * the same buffer as the one being parsed when the relocation list was built, * but the contents must be the same modulo the resource ids. * @list: Pointer to the head of the relocation list.
*/ staticvoid vmw_resource_relocations_apply(uint32_t *cb, struct list_head *list)
{ struct vmw_resource_relocation *rel;
/* Validate the struct vmw_resource_relocation member size */
BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
/** * vmw_resources_reserve - Reserve all resources on the sw_context's resource * list. * * @sw_context: Pointer to the software context. * * Note that since vmware's command submission currently is protected by the * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since * only a single thread at once will attempt this.
*/ staticint vmw_resources_reserve(struct vmw_sw_context *sw_context)
{ int ret;
ret = vmw_validation_res_reserve(sw_context->ctx, true); if (ret) return ret;
if (sw_context->dx_query_mob) { struct vmw_bo *expected_dx_query_mob;
expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); if (expected_dx_query_mob &&
expected_dx_query_mob != sw_context->dx_query_mob) {
ret = -EINVAL;
}
}
return ret;
}
/** * vmw_cmd_res_check - Check that a resource is present and if so, put it on the * resource validate list unless it's already there. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @res_type: Resource type. * @dirty: Whether to change dirty status. * @converter: User-space visible type specific information. * @id_loc: Pointer to the location in the command buffer currently being parsed * from where the user-space resource id handle is located. * @p_res: Pointer to pointer to resource validation node. Populated on * exit.
*/ staticint
vmw_cmd_res_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, enum vmw_res_type res_type,
u32 dirty, conststruct vmw_user_resource_conv *converter,
uint32_t *id_loc, struct vmw_resource **p_res)
{ struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; struct vmw_resource *res; int ret = 0; bool needs_unref = false;
if (p_res)
*p_res = NULL;
if (*id_loc == SVGA3D_INVALID_ID) { if (res_type == vmw_res_context) {
VMW_DEBUG_USER("Illegal context invalid id.\n"); return -EINVAL;
} return 0;
}
if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
res = rcache->res; if (dirty)
vmw_validation_res_set_dirty(sw_context->ctx,
rcache->private, dirty);
} else { unsignedint size = vmw_execbuf_res_size(dev_priv, res_type);
ret = vmw_validation_preload_res(sw_context->ctx, size); if (ret) return ret;
ret = vmw_user_resource_lookup_handle
(dev_priv, sw_context->fp->tfile, *id_loc, converter, &res); if (ret != 0) {
VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
(unsignedint) *id_loc); return ret;
}
needs_unref = true;
ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none); if (unlikely(ret != 0)) goto res_check_done;
/** * vmw_rebind_contexts - Rebind all resources previously bound to referenced * contexts. * * @sw_context: Pointer to the software context. * * Rebind context binding points that have been scrubbed because of eviction.
*/ staticint vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{ struct vmw_ctx_validation_info *val; int ret;
list_for_each_entry(val, &sw_context->ctx_list, head) {
ret = vmw_binding_rebind_all(val->cur); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS)
VMW_DEBUG_USER("Failed to rebind context.\n"); return ret;
}
ret = vmw_rebind_all_dx_query(val->ctx); if (ret != 0) {
VMW_DEBUG_USER("Failed to rebind queries.\n"); return ret;
}
}
return 0;
}
/** * vmw_view_bindings_add - Add an array of view bindings to a context binding * state tracker. * * @sw_context: The execbuf state used for this command. * @view_type: View type for the bindings. * @binding_type: Binding type for the bindings. * @shader_slot: The shader slot to user for the bindings. * @view_ids: Array of view ids to be bound. * @num_views: Number of view ids in @view_ids. * @first_slot: The binding slot to be used for the first view id in @view_ids.
*/ staticint vmw_view_bindings_add(struct vmw_sw_context *sw_context, enum vmw_view_type view_type, enum vmw_ctx_binding_type binding_type,
uint32 shader_slot,
uint32 view_ids[], u32 num_views,
u32 first_slot)
{ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
u32 i;
if (!ctx_node) return -EINVAL;
for (i = 0; i < num_views; ++i) { struct vmw_ctx_bindinfo_view binding; struct vmw_resource *view = NULL;
/** * vmw_execbuf_info_from_res - Get the private validation metadata for a * recently validated resource * * @sw_context: Pointer to the command submission context * @res: The resource * * The resource pointed to by @res needs to be present in the command submission * context's resource cache and hence the last resource of that type to be * processed by the validation code. * * Return: a pointer to the private metadata of the resource, or NULL if it * wasn't found
*/ staticstruct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, struct vmw_resource *res)
{ struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[vmw_res_type(res)];
if (rcache->valid && rcache->res == res) return rcache->private;
/** * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. * * @dev_priv: The device private structure. * @new_query_bo: The new buffer holding query results. * @sw_context: The software context used for this command submission. * * This function checks whether @new_query_bo is suitable for holding query * results, and if another buffer currently is pinned for query results. If so, * the function prepares the state of @sw_context for switching pinned buffers * after successful submission of the current command batch.
*/ staticint vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, struct vmw_bo *new_query_bo, struct vmw_sw_context *sw_context)
{ struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context]; int ret;
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n"); return -EINVAL;
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
ret = vmw_validation_add_bo(sw_context->ctx,
sw_context->cur_query_bo); if (unlikely(ret != 0)) return ret;
}
sw_context->cur_query_bo = new_query_bo;
vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
ret = vmw_validation_add_bo(sw_context->ctx,
dev_priv->dummy_query_bo); if (unlikely(ret != 0)) return ret;
}
return 0;
}
/** * vmw_query_bo_switch_commit - Finalize switching pinned query buffer * * @dev_priv: The device private structure. * @sw_context: The software context used for this command submission batch. * * This function will check if we're switching query buffers, and will then, * issue a dummy occlusion query wait used as a query barrier. When the fence * object following that query wait has signaled, we are sure that all preceding * queries have finished, and the old query buffer can be unpinned. However, * since both the new query buffer and the old one are fenced with that fence, * we can do an asynchronus unpin now, and be sure that the old query buffer * won't be moved until the fence has signaled. * * As mentioned above, both the new - and old query buffers need to be fenced * using a sequence emitted *after* calling this function.
*/ staticvoid vmw_query_bo_switch_commit(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context)
{ /* * The validate list should still hold references to all * contexts here.
*/ if (sw_context->needs_post_query_barrier) { struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context]; struct vmw_resource *ctx; int ret;
BUG_ON(!ctx_entry->valid);
ctx = ctx_entry->res;
ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
if (unlikely(ret != 0))
VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
}
if (dev_priv->pinned_bo != sw_context->cur_query_bo) { if (dev_priv->pinned_bo) {
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
vmw_bo_unreference(&dev_priv->pinned_bo);
}
if (!sw_context->needs_post_query_barrier) {
vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
/* * We pin also the dummy_query_bo buffer so that we * don't need to validate it when emitting dummy queries * in context destroy paths.
*/ if (!dev_priv->dummy_query_bo_pinned) {
vmw_bo_pin_reserved(dev_priv->dummy_query_bo, true);
dev_priv->dummy_query_bo_pinned = true;
}
/** * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle * to a MOB id. * * @dev_priv: Pointer to a device private structure. * @sw_context: The software context used for this command batch validation. * @id: Pointer to the user-space handle to be translated. * @vmw_bo_p: Points to a location that, on successful return will carry a * non-reference-counted pointer to the buffer object identified by the * user-space handle in @id. * * This function saves information needed to translate a user-space buffer * handle to a MOB id. The translation does not take place immediately, but * during a call to vmw_apply_relocations(). * * This function builds a relocation list and a list of buffers to validate. The * former needs to be freed using either vmw_apply_relocations() or * vmw_free_relocations(). The latter needs to be freed using * vmw_clear_validations.
*/ staticint vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGAMobId *id, struct vmw_bo **vmw_bo_p)
{ struct vmw_bo *vmw_bo, *tmp_bo;
uint32_t handle = *id; struct vmw_relocation *reloc; int ret;
vmw_validation_preload_bo(sw_context->ctx);
ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); if (ret != 0) {
drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n"); return PTR_ERR(vmw_bo);
}
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
tmp_bo = vmw_bo;
vmw_user_bo_unref(&tmp_bo); if (unlikely(ret != 0)) return ret;
reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); if (!reloc) return -ENOMEM;
/** * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle * to a valid SVGAGuestPtr * * @dev_priv: Pointer to a device private structure. * @sw_context: The software context used for this command batch validation. * @ptr: Pointer to the user-space handle to be translated. * @vmw_bo_p: Points to a location that, on successful return will carry a * non-reference-counted pointer to the DMA buffer identified by the user-space * handle in @id. * * This function saves information needed to translate a user-space buffer * handle to a valid SVGAGuestPtr. The translation does not take place * immediately, but during a call to vmw_apply_relocations(). * * This function builds a relocation list and a list of buffers to validate. * The former needs to be freed using either vmw_apply_relocations() or * vmw_free_relocations(). The latter needs to be freed using * vmw_clear_validations.
*/ staticint vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr, struct vmw_bo **vmw_bo_p)
{ struct vmw_bo *vmw_bo, *tmp_bo;
uint32_t handle = ptr->gmrId; struct vmw_relocation *reloc; int ret;
vmw_validation_preload_bo(sw_context->ctx);
ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); if (ret != 0) {
drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n"); return PTR_ERR(vmw_bo);
}
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
tmp_bo = vmw_bo;
vmw_user_bo_unref(&tmp_bo); if (unlikely(ret != 0)) return ret;
reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); if (!reloc) return -ENOMEM;
/** * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. * * This function adds the new query into the query COTABLE
*/ staticint vmw_cmd_dx_define_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery); struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_resource *cotable_res; int ret;
if (!ctx_node) return -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
cmd->body.type >= SVGA3D_QUERYTYPE_MAX) return -EINVAL;
cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); if (IS_ERR_OR_NULL(cotable_res)) return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
return ret;
}
/** * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. * * The query bind operation will eventually associate the query ID with its * backing MOB. In this function, we take the user mode MOB ID and use * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
*/ staticint vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery); struct vmw_bo *vmw_bo; int ret;
cmd = container_of(header, typeof(*cmd), header);
/* * Look up the buffer pointed to by q.mobid, put it on the relocation * list so its kernel mode MOB ID can be filled in later
*/
ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo);
/** * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_end_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery); int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); if (unlikely(ret != 0)) return ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo); if (unlikely(ret != 0)) return ret;
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
return ret;
}
/** * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_end_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery); int ret;
cmd = container_of(header, typeof(*cmd), header); if (dev_priv->has_mob) {
VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); if (unlikely(ret != 0)) return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.guestResult, &vmw_bo); if (unlikely(ret != 0)) return ret;
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
return ret;
}
/** * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery); int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header); if (unlikely(ret != 0)) return ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo); if (unlikely(ret != 0)) return ret;
return 0;
}
/** * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_wait_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery); int ret;
cmd = container_of(header, typeof(*cmd), header); if (dev_priv->has_mob) {
VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
/** * vmw_cmd_res_switch_backup - Utility function to handle backup buffer * switching * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @res: Pointer to the resource. * @buf_id: Pointer to the user-space backup buffer handle in the command * stream. * @backup_offset: Offset of backup into MOB. * * This function prepares for registering a switch of backup buffers in the * resource metadata just prior to unreserving. It's basically a wrapper around * vmw_cmd_res_switch_backup with a different interface.
*/ staticint vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *res, uint32_t *buf_id, unsignedlong backup_offset)
{ struct vmw_bo *vbo; void *info; int ret;
info = vmw_execbuf_info_from_res(sw_context, res); if (!info) return -EINVAL;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); if (ret) return ret;
/** * vmw_cmd_switch_backup - Utility function to handle backup buffer switching * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @res_type: The resource type. * @converter: Information about user-space binding for this resource type. * @res_id: Pointer to the user-space resource handle in the command stream. * @buf_id: Pointer to the user-space backup buffer handle in the command * stream. * @backup_offset: Offset of backup into MOB. * * This function prepares for registering a switch of backup buffers in the * resource metadata just prior to unreserving. It's basically a wrapper around * vmw_cmd_res_switch_backup with a different interface.
*/ staticint vmw_cmd_switch_backup(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, enum vmw_res_type res_type, conststruct vmw_user_resource_conv
*converter, uint32_t *res_id, uint32_t *buf_id, unsignedlong backup_offset)
{ struct vmw_resource *res; int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
VMW_RES_DIRTY_NONE, converter, res_id, &res); if (ret) return ret;
/** * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
container_of(header, typeof(*cmd), header);
/** * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_update_gb_image(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
container_of(header, typeof(*cmd), header);
/** * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
container_of(header, typeof(*cmd), header);
/** * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
container_of(header, typeof(*cmd), header);
/** * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
container_of(header, typeof(*cmd), header);
/** * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
container_of(header, typeof(*cmd), header);
/** * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
container_of(header, typeof(*cmd), header);
/** * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_shader_define(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader); int ret;
size_t size; struct vmw_resource *ctx;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
VMW_RES_DIRTY_SET, user_context_converter,
&cmd->body.cid, &ctx); if (unlikely(ret != 0)) return ret;
/** * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_shader_destroy(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader); int ret; struct vmw_resource *ctx;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
VMW_RES_DIRTY_SET, user_context_converter,
&cmd->body.cid, &ctx); if (unlikely(ret != 0)) return ret;
if (unlikely(!dev_priv->has_mob)) return 0;
ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
cmd->body.type, &sw_context->staged_cmd_res); if (unlikely(ret != 0)) return ret;
/** * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_set_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader); struct vmw_ctx_bindinfo_shader binding; struct vmw_resource *ctx, *res = NULL; struct vmw_ctx_validation_info *ctx_info; int ret;
cmd = container_of(header, typeof(*cmd), header);
if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsignedint) cmd->body.type); return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
VMW_RES_DIRTY_SET, user_context_converter,
&cmd->body.cid, &ctx); if (unlikely(ret != 0)) return ret;
if (!dev_priv->has_mob) return 0;
if (cmd->body.shid != SVGA3D_INVALID_ID) { /* * This is the compat shader path - Per device guest-backed * shaders, but user-space thinks it's per context host- * backed shaders.
*/
res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid, cmd->body.type); if (!IS_ERR(res)) {
ret = vmw_execbuf_res_val_add(sw_context, res,
VMW_RES_DIRTY_NONE,
vmw_val_add_flag_noctx); if (unlikely(ret != 0)) return ret;
ret = vmw_resource_relocation_add
(sw_context, res,
vmw_ptr_diff(sw_context->buf_start,
&cmd->body.shid),
vmw_res_rel_normal); if (unlikely(ret != 0)) return ret;
}
}
if (IS_ERR_OR_NULL(res)) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
VMW_RES_DIRTY_NONE,
user_shader_converter, &cmd->body.shid,
&res); if (unlikely(ret != 0)) return ret;
}
ctx_info = vmw_execbuf_info_from_res(sw_context, ctx); if (!ctx_info) return -EINVAL;
/** * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_set_shader_const(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst); int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
VMW_RES_DIRTY_SET, user_context_converter,
&cmd->body.cid, NULL); if (unlikely(ret != 0)) return ret;
if (dev_priv->has_mob)
header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
return 0;
}
/** * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
container_of(header, typeof(*cmd), header);
/** * vmw_cmd_dx_set_single_constant_buffer - Validate * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
/** * vmw_cmd_dx_set_constant_buffer_offset - Validate * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream.
*/ staticint
vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.