/* * We're assigning this as it is needed if the shm is to be * registered. If this function returns OK then the caller expected * to call teedev_ctx_get() or clear shm->ctx in case it's not * needed any longer.
*/
shm->ctx = ctx;
rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); if (rc) {
ret = ERR_PTR(rc); goto err_kfree;
}
/** * tee_shm_alloc_user_buf() - Allocate shared memory for user space * @ctx: Context that allocates the shared memory * @size: Requested size of shared memory * * Memory allocated as user space shared memory is automatically freed when * the TEE file pointer is closed. The primary usage of this function is * when the TEE driver doesn't support registering ordinary user space * memory. * * @returns a pointer to 'struct tee_shm'
*/ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
{
u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL; struct tee_device *teedev = ctx->teedev; struct tee_shm *shm; void *ret; int id;
mutex_lock(&teedev->mutex);
id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex); if (id < 0) return ERR_PTR(id);
mutex_lock(&teedev->mutex);
ret = idr_replace(&teedev->idr, shm, id);
mutex_unlock(&teedev->mutex); if (IS_ERR(ret)) {
tee_shm_free(shm); return ret;
}
return shm;
}
/** * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer * @ctx: Context that allocates the shared memory * @size: Requested size of shared memory * * The returned memory registered in secure world and is suitable to be * passed as a memory buffer in parameter argument to * tee_client_invoke_func(). The memory allocated is later freed with a * call to tee_shm_free(). * * @returns a pointer to 'struct tee_shm'
*/ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
{
u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
/** * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared * kernel buffer * @ctx: Context that allocates the shared memory * @size: Requested size of shared memory * * This function returns similar shared memory as * tee_shm_alloc_kernel_buf(), but with the difference that the memory * might not be registered in secure world in case the driver supports * passing memory not registered in advance. * * This function should normally only be used internally in the TEE * drivers. * * @returns a pointer to 'struct tee_shm'
*/ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
{
u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL;
/* * Ignore alignment since this is already going to be page aligned * and there's no need for any larger alignment.
*/
shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
GFP_KERNEL | __GFP_ZERO); if (!shm->kaddr) return -ENOMEM;
shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); if (!shm->pages) {
ret = ERR_PTR(-ENOMEM); goto err_free_shm;
}
len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
&off); if (unlikely(len <= 0)) {
ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM); goto err_free_shm_pages;
} elseif (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) { /* * If we only got a few pages, update to release the * correct amount below.
*/
shm->num_pages = len / PAGE_SIZE;
ret = ERR_PTR(-ENOMEM); goto err_put_shm_pages;
}
/* * iov_iter_extract_kvec_pages does not get reference on the pages, * get a reference on them.
*/ if (iov_iter_is_kvec(iter))
shm_get_kernel_pages(shm->pages, num_pages);
mutex_lock(&teedev->mutex);
ret = idr_replace(&teedev->idr, shm, id);
mutex_unlock(&teedev->mutex); if (IS_ERR(ret)) {
tee_shm_free(shm); return ret;
}
return shm;
}
/** * tee_shm_register_kernel_buf() - Register kernel memory to be shared with * secure world * @ctx: Context that registers the shared memory * @addr: The buffer * @length: Length of the buffer * * @returns a pointer to 'struct tee_shm'
*/
/** * tee_shm_get_va() - Get virtual address of a shared memory plus an offset * @shm: Shared memory handle * @offs: Offset from start of this shared memory * @returns virtual address of the shared memory + offs if offs is within * the bounds of this shared memory, else an ERR_PTR
*/ void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
{ if (!shm->kaddr) return ERR_PTR(-EINVAL); if (offs >= shm->size) return ERR_PTR(-EINVAL); return (char *)shm->kaddr + offs;
}
EXPORT_SYMBOL_GPL(tee_shm_get_va);
/** * tee_shm_get_pa() - Get physical address of a shared memory plus an offset * @shm: Shared memory handle * @offs: Offset from start of this shared memory * @pa: Physical address to return * @returns 0 if offs is within the bounds of this shared memory, else an * error code.
*/ int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
{ if (offs >= shm->size) return -EINVAL; if (pa)
*pa = shm->paddr + offs; return 0;
}
EXPORT_SYMBOL_GPL(tee_shm_get_pa);
/** * tee_shm_get_from_id() - Find shared memory object and increase reference * count * @ctx: Context owning the shared memory * @id: Id of shared memory object * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
*/ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
{ struct tee_device *teedev; struct tee_shm *shm;
if (!ctx) return ERR_PTR(-EINVAL);
teedev = ctx->teedev;
mutex_lock(&teedev->mutex);
shm = idr_find(&teedev->idr, id); /* * If the tee_shm was found in the IDR it must have a refcount * larger than 0 due to the guarantee in tee_shm_put() below. So * it's safe to use refcount_inc().
*/ if (!shm || shm->ctx != ctx)
shm = ERR_PTR(-EINVAL); else
refcount_inc(&shm->refcount);
mutex_unlock(&teedev->mutex); return shm;
}
EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
if (!shm || !shm->ctx || !shm->ctx->teedev) return;
teedev = shm->ctx->teedev;
mutex_lock(&teedev->mutex); if (refcount_dec_and_test(&shm->refcount)) { /* * refcount has reached 0, we must now remove it from the * IDR before releasing the mutex. This will guarantee that * the refcount_inc() in tee_shm_get_from_id() never starts * from 0.
*/ if (shm->id >= 0)
idr_remove(&teedev->idr, shm->id);
do_release = true;
}
mutex_unlock(&teedev->mutex);
if (do_release)
tee_shm_release(teedev, shm);
}
EXPORT_SYMBOL_GPL(tee_shm_put);
Messung V0.5
¤ Dauer der Verarbeitung: 0.22 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.