/* * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2 * * Copyright (C) 2010 Samsung Electronics * * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation.
*/
#define dprintk(level, fmt, arg...) \ do { \ if (debug >= level) \
printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
} while (0)
struct vb2_dma_sg_buf { struct device *dev; void *vaddr; struct page **pages; struct frame_vector *vec; int offset; enum dma_data_direction dma_dir; struct sg_table sg_table; /* * This will point to sg_table when used with the MMAP or USERPTR * memory model, and to the dma_buf sglist when used with the * DMABUF memory model.
*/ struct sg_table *dma_sgt;
size_t size; unsignedint num_pages;
refcount_t refcount; struct vb2_vmarea_handler handler;
/* * NOTE: dma-sg allocates memory using the page allocator directly, so * there is no memory consistency guarantee, hence dma-sg ignores DMA * attributes passed from the upper layer.
*/
buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL); if (!buf->pages) goto fail_pages_array_alloc;
ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags); if (ret) goto fail_pages_alloc;
ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, 0, size, GFP_KERNEL); if (ret) goto fail_table_alloc;
/* Prevent the device from being released while the buffer is used */
buf->dev = get_device(dev);
sgt = &buf->sg_table; /* * No need to sync to the device, this will happen later when the * prepare() memop is called.
*/ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
DMA_ATTR_SKIP_CPU_SYNC)) goto fail_map;
buf->pages = frame_vector_pages(vec); if (IS_ERR(buf->pages)) goto userptr_fail_sgtable;
buf->num_pages = frame_vector_count(vec);
if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, buf->offset, size, 0)) goto userptr_fail_sgtable;
sgt = &buf->sg_table; /* * No need to sync to the device, this will happen later when the * prepare() memop is called.
*/ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
DMA_ATTR_SKIP_CPU_SYNC)) goto userptr_fail_map;
/* * @put_userptr: inform the allocator that a USERPTR buffer will no longer * be used
*/ staticvoid vb2_dma_sg_put_userptr(void *buf_priv)
{ struct vb2_dma_sg_buf *buf = buf_priv; struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages;
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt); if (buf->dma_dir == DMA_FROM_DEVICE ||
buf->dma_dir == DMA_BIDIRECTIONAL) while (--i >= 0)
set_page_dirty_lock(buf->pages[i]);
vb2_destroy_framevec(buf->vec);
kfree(buf);
}
attach = kzalloc(sizeof(*attach), GFP_KERNEL); if (!attach) return -ENOMEM;
sgt = &attach->sgt; /* Copy the buf->base_sgt scatter list to the attachment, as we can't * map the same scatter list to multiple attachments at the same time.
*/
ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL); if (ret) {
kfree(attach); return -ENOMEM;
}
rd = buf->dma_sgt->sgl;
wr = sgt->sgl; for (i = 0; i < sgt->orig_nents; ++i) {
sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
rd = sg_next(rd);
wr = sg_next(wr);
}
/* release any previous cache */ if (attach->dma_dir != DMA_NONE) {
dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
attach->dma_dir = DMA_NONE;
}
/* mapping to the client with new direction */ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
pr_err("failed to map scatterlist\n"); return ERR_PTR(-EIO);
}
attach->dma_dir = dma_dir;
return sgt;
}
staticvoid vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, struct sg_table *sgt, enum dma_data_direction dma_dir)
{ /* nothing to be done here */
}
staticvoid vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
{ /* drop reference obtained in vb2_dma_sg_get_dmabuf */
vb2_dma_sg_put(dbuf->priv);
}
if (WARN_ON(!buf->db_attach)) {
pr_err("trying to pin a non attached buffer\n"); return -EINVAL;
}
if (WARN_ON(buf->dma_sgt)) {
pr_err("dmabuf buffer is already pinned\n"); return 0;
}
/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir); if (IS_ERR(sgt)) {
pr_err("Error getting dmabuf scatterlist\n"); return -EINVAL;
}
buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM);
buf->dev = dev; /* create attachment for the dmabuf with the user device */
dba = dma_buf_attach(dbuf, buf->dev); if (IS_ERR(dba)) {
pr_err("failed to attach dmabuf\n");
kfree(buf); return dba;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.