/* The number of cache line to prefetch Until threshold state */ #define WQ_PREFETCH_MAX 2 /* The number of cache line to prefetch After threshold state */ #define WQ_PREFETCH_MIN 1 /* Threshold state */ #define WQ_PREFETCH_THRESHOLD 256
/* sizes of the SQ/RQ ctxt */ #define Q_CTXT_SIZE 48 #define CTXT_RSVD 240
/* If only one page, use 0-level CLA */ if (wq->num_q_pages == 1)
wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq_page_addr); else
wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
for (i = 0; i < wq->q_depth; i++) {
rq->cqe[i] = dma_alloc_coherent(&pdev->dev, sizeof(*rq->cqe[i]),
&rq->cqe_dma[i], GFP_KERNEL); if (!rq->cqe[i]) goto err_cqe_alloc;
}
err = alloc_rq_skb_arr(rq); if (err) {
dev_err(&pdev->dev, "Failed to allocate rq priv data\n"); return err;
}
err = alloc_rq_cqe(rq); if (err) {
dev_err(&pdev->dev, "Failed to allocate rq cqe\n"); goto err_alloc_rq_cqe;
}
/* HW requirements: Must be at least 32 bit */
pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
&rq->pi_dma_addr, GFP_KERNEL); if (!rq->pi_virt_addr) {
err = -ENOMEM; goto err_pi_virt;
}
/** * hinic_get_sq_free_wqebbs - return number of free wqebbs for use * @sq: send queue * * Return number of free wqebbs
**/ int hinic_get_sq_free_wqebbs(struct hinic_sq *sq)
{ struct hinic_wq *wq = sq->wq;
return atomic_read(&wq->delta) - 1;
}
/** * hinic_get_rq_free_wqebbs - return number of free wqebbs for use * @rq: recv queue * * Return number of free wqebbs
**/ int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
{ struct hinic_wq *wq = rq->wq;
/* set MSS value */
*queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
*queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
}
/** * hinic_sq_prepare_wqe - prepare wqe before insert to the queue * @sq: send queue * @sq_wqe: wqe to prepare * @sges: sges for use by the wqe for send for buf addresses * @nr_sges: number of sges
**/ void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, int nr_sges)
{ int i;
sq_prepare_ctrl(&sq_wqe->ctrl, nr_sges);
sq_prepare_task(&sq_wqe->task);
for (i = 0; i < nr_sges; i++)
sq_wqe->buf_descs[i].sge = sges[i];
}
/** * sq_prepare_db - prepare doorbell to write * @sq: send queue * @prod_idx: pi value for the doorbell * @cos: cos of the doorbell * * Return db value
**/ static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsignedint cos)
{ struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx));
/* Data should be written to HW in Big Endian Format */ return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) |
HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) |
HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) |
HINIC_SQ_DB_INFO_SET(cos, COS) |
HINIC_SQ_DB_INFO_SET(qp->q_id, QID));
}
/** * hinic_sq_write_db- write doorbell * @sq: send queue * @prod_idx: pi value for the doorbell * @wqe_size: wqe size * @cos: cos of the wqe
**/ void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsignedint wqe_size, unsignedint cos)
{ struct hinic_wq *wq = sq->wq;
/* increment prod_idx to the next */
prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
prod_idx = SQ_MASKED_IDX(sq, prod_idx);
/** * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi * @sq: sq to get wqe from * @wqe_size: wqe size * @prod_idx: returned pi * * Return wqe pointer
**/ struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, unsignedint wqe_size, u16 *prod_idx)
{ struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size,
prod_idx);
if (IS_ERR(hw_wqe)) return NULL;
return &hw_wqe->sq_wqe;
}
/** * hinic_sq_return_wqe - return the wqe to the sq * @sq: send queue * @wqe_size: the size of the wqe
**/ void hinic_sq_return_wqe(struct hinic_sq *sq, unsignedint wqe_size)
{
hinic_return_wqe(sq->wq, wqe_size);
}
/** * hinic_sq_write_wqe - write the wqe to the sq * @sq: send queue * @prod_idx: pi of the wqe * @sq_wqe: the wqe to write * @skb: skb to save * @wqe_size: the size of the wqe
**/ void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, struct hinic_sq_wqe *sq_wqe, struct sk_buff *skb, unsignedint wqe_size)
{ struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe;
sq->saved_skb[prod_idx] = skb;
/* The data in the HW should be in Big Endian Format */
hinic_cpu_to_be32(sq_wqe, wqe_size);
hinic_write_wqe(sq->wq, hw_wqe, wqe_size);
}
/** * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the * wqe only have one wqebb * @sq: send queue * @skb: return skb that was saved * @wqe_size: the wqe size ptr * @cons_idx: consumer index of the wqe * * Return wqe in ci position
**/ struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq, struct sk_buff **skb, unsignedint *wqe_size, u16 *cons_idx)
{ struct hinic_hw_wqe *hw_wqe; struct hinic_sq_wqe *sq_wqe; struct hinic_sq_ctrl *ctrl; unsignedint buf_sect_len;
u32 ctrl_info;
/* read the ctrl section for getting wqe size */
hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx); if (IS_ERR(hw_wqe)) return NULL;
/** * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci * @sq: send queue * @skb: return skb that was saved * @wqe_size: the size of the wqe * @cons_idx: consumer index of the wqe * * Return wqe in ci position
**/ struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, struct sk_buff **skb, unsignedint wqe_size, u16 *cons_idx)
{ struct hinic_hw_wqe *hw_wqe;
/** * hinic_sq_put_wqe - release the ci for new wqes * @sq: send queue * @wqe_size: the size of the wqe
**/ void hinic_sq_put_wqe(struct hinic_sq *sq, unsignedint wqe_size)
{
hinic_put_wqe(sq->wq, wqe_size);
}
/** * hinic_sq_get_sges - get sges from the wqe * @sq_wqe: wqe to get the sges from its buffer addresses * @sges: returned sges * @nr_sges: number sges to return
**/ void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, int nr_sges)
{ int i;
for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) {
sges[i] = sq_wqe->buf_descs[i].sge;
hinic_be32_to_cpu(&sges[i], sizeof(sges[i]));
}
}
/** * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi * @rq: rq to get wqe from * @wqe_size: wqe size * @prod_idx: returned pi * * Return wqe pointer
**/ struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, unsignedint wqe_size, u16 *prod_idx)
{ struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size,
prod_idx);
if (IS_ERR(hw_wqe)) return NULL;
return &hw_wqe->rq_wqe;
}
/** * hinic_rq_write_wqe - write the wqe to the rq * @rq: recv queue * @prod_idx: pi of the wqe * @rq_wqe: the wqe to write * @skb: skb to save
**/ void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb)
{ struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe;
rq->saved_skb[prod_idx] = skb;
/* The data in the HW should be in Big Endian Format */
hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe));
/** * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci * @rq: recv queue * @wqe_size: the size of the wqe * @skb: return saved skb * @cons_idx: consumer index of the wqe * * Return wqe in ci position
**/ struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, unsignedint wqe_size, struct sk_buff **skb, u16 *cons_idx)
{ struct hinic_hw_wqe *hw_wqe; struct hinic_rq_cqe *cqe; int rx_done;
u32 status;
hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx); if (IS_ERR(hw_wqe)) return NULL;
cqe = rq->cqe[*cons_idx];
status = be32_to_cpu(cqe->status);
rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE); if (!rx_done) return NULL;
*skb = rq->saved_skb[*cons_idx];
return &hw_wqe->rq_wqe;
}
/** * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position * @rq: recv queue * @wqe_size: the size of the wqe * @skb: return saved skb * @cons_idx: consumer index in the wq * * Return wqe in incremented ci position
**/ struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, unsignedint wqe_size, struct sk_buff **skb,
u16 *cons_idx)
{ struct hinic_wq *wq = rq->wq; struct hinic_hw_wqe *hw_wqe; unsignedint num_wqebbs;
/** * hinic_rq_put_wqe - release the ci for new wqes * @rq: recv queue * @cons_idx: consumer index of the wqe * @wqe_size: the size of the wqe
**/ void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, unsignedint wqe_size)
{ struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
u32 status = be32_to_cpu(cqe->status);
status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE);
/* Rx WQE size is 1 WQEBB, no wq shadow*/
cqe->status = cpu_to_be32(status);
wmb(); /* clear done flag */
hinic_put_wqe(rq->wq, wqe_size);
}
/** * hinic_rq_get_sge - get sge from the wqe * @rq: recv queue * @rq_wqe: wqe to get the sge from its buf address * @cons_idx: consumer index * @sge: returned sge
**/ void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
u16 cons_idx, struct hinic_sge *sge)
{ struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
u32 len = be32_to_cpu(cqe->len);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.