/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
staticstruct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
u64 length, int access_flags)
{ /* * Force registering the memory as writable if the underlying pages * are writable. This is so rereg can change the access permissions * from readable to writable without having to run through ib_umem_get * again
*/ if (!ib_access_writable(access_flags)) { unsignedlong untagged_start = untagged_addr(start); struct vm_area_struct *vma;
mmap_read_lock(current->mm); /* * FIXME: Ideally this would iterate over all the vmas that * cover the memory, but for now it requires a single vma to * entirely cover the MR to support RO mappings.
*/
vma = find_vma(current->mm, untagged_start); if (vma && vma->vm_end >= untagged_start + length &&
vma->vm_start <= untagged_start) { if (vma->vm_flags & VM_WRITE)
access_flags |= IB_ACCESS_LOCAL_WRITE;
} else {
access_flags |= IB_ACCESS_LOCAL_WRITE;
}
/* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs, * we assume that the calls can't run concurrently. Otherwise, a * race exists.
*/
err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry); if (err) return ERR_PTR(err);
if (flags & IB_MR_REREG_PD) {
err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
to_mpd(pd)->pdn);
if (err) goto release_mpt_entry;
}
if (flags & IB_MR_REREG_ACCESS) { if (ib_access_writable(mr_access_flags) &&
!mmr->umem->writable) {
err = -EPERM; goto release_mpt_entry;
}
/* If we couldn't transfer the MR to the HCA, just remember to * return a failure. But dereg_mr will free the resources.
*/
err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); if (!err && flags & IB_MR_REREG_ACCESS)
mmr->mmr.access = mr_access_flags;
release_mpt_entry:
mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); if (err) return ERR_PTR(err); return NULL;
}
staticint
mlx4_alloc_priv_pages(struct ib_device *device, struct mlx4_ib_mr *mr, int max_pages)
{ int ret;
/* Ensure that size is aligned to DMA cacheline * requirements. * max_pages is limited to MLX4_MAX_FAST_REG_PAGES * so page_map_size will never cross PAGE_SIZE.
*/
mr->page_map_size = roundup(max_pages * sizeof(u64),
MLX4_MR_PAGES_ALIGN);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.