/** * genwqe_search_pin() - Search for the mapping for a userspace address * @cfile: Descriptor of opened file * @u_addr: User virtual address * @size: Size of buffer * @virt_addr: Virtual address to be updated * * Return: Pointer to the corresponding mapping NULL if not found
*/ staticstruct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile, unsignedlong u_addr, unsignedint size, void **virt_addr)
{ unsignedlong flags; struct dma_mapping *m;
/** * __genwqe_search_mapping() - Search for the mapping for a userspace address * @cfile: descriptor of opened file * @u_addr: user virtual address * @size: size of buffer * @dma_addr: DMA address to be updated * @virt_addr: Virtual address to be updated * Return: Pointer to the corresponding mapping NULL if not found
*/ staticstruct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile, unsignedlong u_addr, unsignedint size,
dma_addr_t *dma_addr, void **virt_addr)
{ unsignedlong flags; struct dma_mapping *m; struct pci_dev *pci_dev = cfile->cd->pci_dev;
/* * This is really a bug, because those things should * have been already tidied up. * * GENWQE_MAPPING_RAW should have been removed via mmunmap(). * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code.
*/
dev_err(&pci_dev->dev, "[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n",
__func__, i++, dma_map->u_vaddr,
(unsignedlong)dma_map->k_vaddr,
(unsignedlong)dma_map->dma_addr);
if (dma_map->type == GENWQE_MAPPING_RAW) { /* we allocated this dynamically */
__genwqe_free_consistent(cd, dma_map->size,
dma_map->k_vaddr,
dma_map->dma_addr);
kfree(dma_map);
} elseif (dma_map->type == GENWQE_MAPPING_SGL_TEMP) { /* we use dma_map statically from the request */
genwqe_user_vunmap(cd, dma_map);
}
}
}
/* * This is not a bug, because a killed processed might * not call the unpin ioctl, which is supposed to free * the resources. * * Pinnings are dymically allocated and need to be * deleted.
*/
list_del_init(&dma_map->pin_list);
genwqe_user_vunmap(cd, dma_map);
kfree(dma_map);
}
}
/** * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files * @cd: GenWQE device information * @sig: Signal to send out * * E.g. genwqe_send_signal(cd, SIGIO);
*/ staticint genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
{ unsignedint files = 0; unsignedlong flags; struct genwqe_file *cfile;
spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */
INIT_LIST_HEAD(&cfile->map_list);
spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */
INIT_LIST_HEAD(&cfile->pin_list);
filp->private_data = cfile;
genwqe_add_file(cd, cfile); return 0;
}
/** * genwqe_fasync() - Setup process to receive SIGIO. * @fd: file descriptor * @filp: file handle * @mode: file mode * * Sending a signal is working as following: * * if (cdev->async_queue) * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN); * * Some devices also implement asynchronous notification to indicate * when the device can be written; in this case, of course, * kill_fasync must be called with a mode of POLL_OUT.
*/ staticint genwqe_fasync(int fd, struct file *filp, int mode)
{ struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
/** * genwqe_release() - file close * @inode: file system information * @filp: file handle * * This function is executed whenever an application calls 'close(fd_genwqe)' * * Return: always 0
*/ staticint genwqe_release(struct inode *inode, struct file *filp)
{ struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; struct genwqe_dev *cd = cfile->cd;
/* there must be no entries in these lists! */
genwqe_remove_mappings(cfile);
genwqe_remove_pinnings(cfile);
/* remove this filp from the asynchronously notified filp's */
genwqe_fasync(-1, filp, 0);
/* * For this to work we must not release cd when this cfile is * not yet released, otherwise the list entry is invalid, * because the list itself gets reinstantiated!
*/
genwqe_del_file(cd, cfile);
kfree(cfile); return 0;
}
/** * genwqe_mmap() - Provide contignous buffers to userspace * @filp: File pointer (unused) * @vma: VMA area to map * * We use mmap() to allocate contignous buffers used for DMA * transfers. After the buffer is allocated we remap it to user-space * and remember a reference to our dma_mapping data structure, where * we store the associated DMA address and allocated size. * * When we receive a DDCB execution request with the ATS bits set to * plain buffer, we lookup our dma_mapping list to find the * corresponding DMA address for the associated user-space address.
*/ staticint genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
{ int rc; unsignedlong pfn, vsize = vma->vm_end - vma->vm_start; struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; struct genwqe_dev *cd = cfile->cd; struct dma_mapping *dma_map;
if (vsize == 0) return -EINVAL;
if (get_order(vsize) > MAX_PAGE_ORDER) return -ENOMEM;
/* * We must be 4 byte aligned. Buffer must be 0 appened * to have defined values when calculating CRC.
*/
tocopy = min_t(size_t, load->size, FLASH_BLOCK);
blocks_to_flash = load->size / FLASH_BLOCK; while (load->size) { /* * We must be 4 byte aligned. Buffer must be 0 appened * to have defined values when calculating CRC.
*/
tocopy = min_t(size_t, load->size, FLASH_BLOCK);
/** * ddcb_cmd_cleanup() - Remove dynamically created fixup entries * @cfile: Descriptor of opened file * @req: DDCB work request * * Only if there are any. Pinnings are not removed.
*/ staticint ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
{ unsignedint i; struct dma_mapping *dma_map; struct genwqe_dev *cd = cfile->cd;
for (i = 0; i < DDCB_FIXUPS; i++) {
dma_map = &req->dma_mappings[i];
if (dma_mapping_used(dma_map)) {
__genwqe_del_mapping(cfile, dma_map);
genwqe_user_vunmap(cd, dma_map);
} if (req->sgls[i].sgl != NULL)
genwqe_free_sync_sgl(cd, &req->sgls[i]);
} return 0;
}
/** * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references * @cfile: Descriptor of opened file * @req: DDCB work request * * Before the DDCB gets executed we need to handle the fixups. We * replace the user-space addresses with DMA addresses or do * additional setup work e.g. generating a scatter-gather list which * is used to describe the memory referred to in the fixup.
*/ staticint ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
{ int rc; unsignedint asiv_offs, i; struct genwqe_dev *cd = cfile->cd; struct genwqe_ddcb_cmd *cmd = &req->cmd; struct dma_mapping *m;
for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
i++, asiv_offs += 0x08) {
case ATS_TYPE_DATA: break; /* nothing to do here */
case ATS_TYPE_FLAT_RDWR: case ATS_TYPE_FLAT_RD: {
u_addr = be64_to_cpu(*((__be64 *)&cmd->
asiv[asiv_offs]));
u_size = be32_to_cpu(*((__be32 *)&cmd->
asiv[asiv_offs + 0x08]));
/* * No data available. Ignore u_addr in this * case and set addr to 0. Hardware must not * fetch the buffer.
*/ if (u_size == 0x0) {
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(0x0); break;
}
m = __genwqe_search_mapping(cfile, u_addr, u_size,
&d_addr, NULL); if (m == NULL) {
rc = -EFAULT; goto err_out;
}
/* * No data available. Ignore u_addr in this * case and set addr to 0. Hardware must not * fetch the empty sgl.
*/ if (u_size == 0x0) {
*((__be64 *)&cmd->asiv[asiv_offs]) =
cpu_to_be64(0x0); break;
}
m = genwqe_search_pin(cfile, u_addr, u_size, NULL); if (m != NULL) {
page_offs = (u_addr -
(u64)m->u_vaddr)/PAGE_SIZE;
} else {
m = &req->dma_mappings[i];
genwqe_mapping_init(m,
GENWQE_MAPPING_SGL_TEMP);
if (ats_flags == ATS_TYPE_SGL_RD)
m->write = 0;
rc = genwqe_user_vmap(cd, m, (void *)u_addr,
u_size); if (rc != 0) goto err_out;
/* Copy back only the modifed fields. Do not copy ASIV
back since the copy got modified by the driver. */ if (copy_to_user((void __user *)arg, cmd, sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
ddcb_requ_free(cmd); return -EFAULT;
}
/** * genwqe_device_create() - Create and configure genwqe char device * @cd: genwqe device descriptor * * This function must be called before we create any more genwqe * character devices, because it is allocating the major and minor * number which are supposed to be used by the client drivers.
*/ int genwqe_device_create(struct genwqe_dev *cd)
{ int rc; struct pci_dev *pci_dev = cd->pci_dev;
/* * Here starts the individual setup per client. It must * initialize its own cdev data structure with its own fops. * The appropriate devnum needs to be created. The ranges must * not overlap.
*/
rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
GENWQE_MAX_MINOR, GENWQE_DEVNAME); if (rc < 0) {
dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n"); goto err_dev;
}
/* * Finally the device in /dev/... must be created. The rule is * to use card%d_clientname for each created device.
*/
cd->dev = device_create_with_groups(cd->class_genwqe,
&cd->pci_dev->dev,
cd->devnum_genwqe, cd,
genwqe_attribute_groups,
GENWQE_DEVNAME "%u_card",
cd->card_idx); if (IS_ERR(cd->dev)) {
rc = PTR_ERR(cd->dev); goto err_cdev;
}
dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
rc = genwqe_kill_fasync(cd, SIGIO); if (rc > 0) { /* give kill_timeout seconds to close file descriptors ... */ for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
genwqe_open_files(cd); i++) {
dev_info(&pci_dev->dev, " %d sec ...", i);
cond_resched();
msleep(1000);
}
/* if no open files we can safely continue, else ... */ if (!genwqe_open_files(cd)) return 0;
dev_warn(&pci_dev->dev, "[%s] send SIGKILL and wait ...\n", __func__);
rc = genwqe_terminate(cd); if (rc) { /* Give kill_timout more seconds to end processes */ for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
genwqe_open_files(cd); i++) {
dev_warn(&pci_dev->dev, " %d sec ...", i);
cond_resched();
msleep(1000);
}
}
} return 0;
}
/** * genwqe_device_remove() - Remove genwqe's char device * @cd: GenWQE device information * * This function must be called after the client devices are removed * because it will free the major/minor number range for the genwqe * drivers. * * This function must be robust enough to be called twice.
*/ int genwqe_device_remove(struct genwqe_dev *cd)
{ int rc; struct pci_dev *pci_dev = cd->pci_dev;
if (!genwqe_device_initialized(cd)) return 1;
genwqe_inform_and_stop_processes(cd);
/* * We currently do wait until all filedescriptors are * closed. This leads to a problem when we abort the * application which will decrease this reference from * 1/unused to 0/illegal and not from 2/used 1/empty.
*/
rc = kref_read(&cd->cdev_genwqe.kobj.kref); if (rc != 1) {
dev_err(&pci_dev->dev, "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
panic("Fatal err: cannot free resources with pending references!");
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.