// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*/
#include <linux/libnvdimm.h>
#include"rxe.h" #include"rxe_loc.h"
/* Return a random 8 bit key value that is * different than the last_key. Set last_key to -1 * if this is the first key for an MR or MW
*/
u8 rxe_get_next_key(u32 last_key)
{
u8 key;
do {
get_random_bytes(&key, 1);
} while (key == last_key);
return key;
}
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{ switch (mr->ibmr.type) { case IB_MR_TYPE_DMA: return 0;
case IB_MR_TYPE_USER: case IB_MR_TYPE_MEM_REG: if (iova < mr->ibmr.iova ||
iova + length > mr->ibmr.iova + mr->ibmr.length) {
rxe_dbg_mr(mr, "iova/length out of range\n"); return -EINVAL;
} return 0;
default:
rxe_dbg_mr(mr, "mr type not supported\n"); return -EINVAL;
}
}
/* set ibmr->l/rkey and also copy into private l/rkey * for user MRs these will always be the same * for cases where caller 'owns' the key portion * they may be different until REG_MR WQE is executed.
*/
mr->lkey = mr->ibmr.lkey = key;
mr->rkey = mr->ibmr.rkey = key;
__sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0); if (!__sg_page_iter_next(&sg_iter)) return 0;
do {
xas_lock(&xas); while (true) {
page = sg_page_iter_page(&sg_iter);
if (persistent && !is_pmem_page(page)) {
rxe_dbg_mr(mr, "Page can't be persistent\n");
xas_set_err(&xas, -EINVAL); break;
}
xas_store(&xas, page); if (xas_error(&xas)) break;
xas_next(&xas); if (!__sg_page_iter_next(&sg_iter)) break;
}
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));
return xas_error(&xas);
}
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, int access, struct rxe_mr *mr)
{ struct ib_umem *umem; int err;
rxe_mr_init(access, mr);
xa_init(&mr->page_list);
umem = ib_umem_get(&rxe->ib_dev, start, length, access); if (IS_ERR(umem)) {
rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
(int)PTR_ERR(umem)); return PTR_ERR(umem);
}
staticint rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
XA_STATE(xas, &mr->page_list, 0); int i = 0; int err;
xa_init(&mr->page_list);
do {
xas_lock(&xas); while (i != num_buf) {
xas_store(&xas, XA_ZERO_ENTRY); if (xas_error(&xas)) break;
xas_next(&xas);
i++;
}
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));
err = xas_error(&xas); if (err) return err;
mr->num_buf = num_buf;
return 0;
}
int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
{ int err;
/* always allow remote access for FMRs */
rxe_mr_init(RXE_ACCESS_REMOTE, mr);
err = rxe_mr_alloc(mr, max_pages); if (err) goto err1;
/* copy data in or out of a wqe, i.e. sg list * under the control of a dma descriptor
*/ int copy_data( struct rxe_pd *pd, int access, struct rxe_dma_info *dma, void *addr, int length, enum rxe_mr_copy_dir dir)
{ int bytes; struct rxe_sge *sge = &dma->sge[dma->cur_sge]; int offset = dma->sge_offset; int resid = dma->resid; struct rxe_mr *mr = NULL;
u64 iova; int err;
/* See IBA oA19-28 */
err = mr_check_range(mr, iova, sizeof(value)); if (unlikely(err)) {
rxe_dbg_mr(mr, "iova out of range\n"); return RESPST_ERR_RKEY_VIOLATION;
}
page_offset = rxe_mr_iova_to_page_offset(mr, iova);
index = rxe_mr_iova_to_index(mr, iova);
page = xa_load(&mr->page_list, index); if (!page) return RESPST_ERR_RKEY_VIOLATION;
}
/* See IBA A19.4.2 */ if (unlikely(page_offset & 0x7)) {
rxe_dbg_mr(mr, "misaligned address\n"); return RESPST_ERR_MISALIGNED_ATOMIC;
}
va = kmap_local_page(page); /* Do atomic write after all prior operations have completed */
smp_store_release(&va[page_offset >> 3], value);
kunmap_local(va);
return RESPST_NONE;
}
int advance_dma_data(struct rxe_dma_info *dma, unsignedint length)
{ struct rxe_sge *sge = &dma->sge[dma->cur_sge]; int offset = dma->sge_offset; int resid = dma->resid;
while (length) { unsignedint bytes;
if (offset >= sge->length) {
sge++;
dma->cur_sge++;
offset = 0; if (dma->cur_sge >= dma->num_sge) return -ENOSPC;
}
if (atomic_read(&mr->num_mw) > 0) {
rxe_dbg_mr(mr, "Attempt to invalidate an MR while bound to MWs\n");
ret = -EINVAL; goto err_drop_ref;
}
if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) {
rxe_dbg_mr(mr, "Type (%d) is wrong\n", mr->ibmr.type);
ret = -EINVAL; goto err_drop_ref;
}
mr->state = RXE_MR_STATE_FREE;
ret = 0;
err_drop_ref:
rxe_put(mr);
err: return ret;
}
/* user can (re)register fast MR by executing a REG_MR WQE. * user is expected to hold a reference on the ib mr until the * WQE completes. * Once a fast MR is created this is the only way to change the * private keys. It is the responsibility of the user to maintain * the ib mr keys in sync with rxe mr keys.
*/ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{ struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
u32 key = wqe->wr.wr.reg.key;
u32 access = wqe->wr.wr.reg.access;
/* user can only register MR in free state */ if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
rxe_dbg_mr(mr, "mr->lkey = 0x%x not free\n", mr->lkey); return -EINVAL;
}
/* user can only register mr with qp in same protection domain */ if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n"); return -EINVAL;
}
/* user is only allowed to change key portion of l/rkey */ if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
rxe_dbg_mr(mr, "key = 0x%x has wrong index mr->lkey = 0x%x\n",
key, mr->lkey); return -EINVAL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.