/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2013 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
staticint usnic_uiom_get_pages(unsignedlong addr, size_t size, int writable, int dmasync, struct usnic_uiom_reg *uiomr)
{ struct list_head *chunk_list = &uiomr->chunk_list; unsignedint gup_flags = FOLL_LONGTERM; struct page **page_list; struct scatterlist *sg; struct usnic_uiom_chunk *chunk; unsignedlong locked; unsignedlong lock_limit; unsignedlong cur_base; unsignedlong npages; int ret; int off; int i;
dma_addr_t pa; struct mm_struct *mm;
/* * If the combination of the addr and size requested for this memory * region causes an integer overflow, return error.
*/ if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size)) return -EINVAL;
if (!size) return -EINVAL;
if (!can_do_mlock()) return -EPERM;
INIT_LIST_HEAD(chunk_list);
page_list = (struct page **) __get_free_page(GFP_KERNEL); if (!page_list) return -ENOMEM;
flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
list);
list_for_each_entry(interval_node, intervals, link) {
iter_chunk: for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
pa = sg_phys(&chunk->page_list[i]); if ((va >> PAGE_SHIFT) < interval_node->start) continue;
if ((va >> PAGE_SHIFT) == interval_node->start) { /* First page of the interval */
va_start = va;
pa_start = pa;
pa_end = pa;
}
WARN_ON(va_start == -EINVAL);
if ((pa_end + PAGE_SIZE != pa) &&
(pa != pa_start)) { /* PAs are not contiguous */
size = pa_end - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
size, flags, GFP_ATOMIC); if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err); goto err_out;
}
va_start = va;
pa_start = pa;
pa_end = pa;
}
if ((va >> PAGE_SHIFT) == interval_node->last) { /* Last page of the interval */
size = pa - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
size, flags, GFP_ATOMIC); if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err); goto err_out;
} break;
}
if (pa != pa_start)
pa_end += PAGE_SIZE;
}
if (i == chunk->nents) { /* * Hit last entry of the chunk, * hence advance to next chunk
*/
chunk = list_first_entry(&chunk->list, struct usnic_uiom_chunk,
list); goto iter_chunk;
}
}
struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, unsignedlong addr, size_t size, int writable, int dmasync)
{ struct usnic_uiom_reg *uiomr; unsignedlong va_base, vpn_start, vpn_last; unsignedlong npages; int offset, err;
LIST_HEAD(sorted_diff_intervals);
/* * Intel IOMMU map throws an error if a translation entry is * changed from read to write. This module may not unmap * and then remap the entry after fixing the permission * b/c this open up a small windows where hw DMA may page fault * Hence, make all entries to be writable.
*/
writable = 1;
err = iommu_attach_device(pd->domain, dev); if (err) goto out_free_dev;
if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
usnic_err("IOMMU of %s does not support cache coherency\n",
dev_name(dev));
err = -EINVAL; goto out_detach_device;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.