/* * Work around 1: * At some situations, the controller may get stale data address in TRB * at below sequences: * 1. Controller read TRB includes data address * 2. Software updates TRBs includes data address and Cycle bit * 3. Controller read TRB which includes Cycle bit * 4. DMA run with stale data address * * To fix this problem, driver needs to make the first TRB in TD as invalid. * After preparing all TRBs driver needs to check the position of DMA and * if the DMA point to the first just added TRB and doorbell is 1, * then driver must defer making this TRB as valid. This TRB will be make * as valid during adding next TRB only if DMA is stopped or at TRBERR * interrupt. *
*/
if (!ring->trbs) {
ring->trbs = dma_pool_alloc(pdev->eps_dma_pool,
GFP_DMA32 | GFP_ATOMIC,
&ring->dma); if (!ring->trbs) return -ENOMEM;
}
memset(ring->trbs, 0, TR_SEG_SIZE);
if (!pep->num) return 0;
/* Initialize the last TRB as Link TRB */
link_trb = (ring->trbs + (TRBS_PER_SEGMENT - 1));
link_trb->buffer = cpu_to_le32(TRB_BUFFER(ring->dma));
link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) |
TRB_TOGGLE);
return 0;
}
/* * Stalls and flushes selected endpoint. * Endpoint must be selected before invoking this function.
*/ staticvoid cdns2_ep_stall_flush(struct cdns2_endpoint *pep)
{ struct cdns2_device *pdev = pep->pdev; int val;
/* * Increment a trb index. * * The index should never point to the last link TRB in TR. After incrementing, * if it point to the link TRB, wrap around to the beginning and revert * cycle state bit. The link TRB is always at the last TRB entry.
*/ staticvoid cdns2_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
{
(*index)++; if (*index == (trb_in_seg - 1)) {
*index = 0;
*cs ^= 1;
}
}
/* * Enable/disable LPM. * * If bit USBCS_LPMNYET is not set and device receive Extended Token packet, * then controller answer with ACK handshake. * If bit USBCS_LPMNYET is set and device receive Extended Token packet, * then controller answer with NYET handshake.
*/ staticvoid cdns2_enable_l1(struct cdns2_device *pdev, int enable)
{ if (enable) {
clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_LPMNYET);
writeb(LPMCLOCK_SLEEP_ENTRY, &pdev->usb_regs->lpmclock);
} else {
set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_LPMNYET);
}
}
/* All TRBs have finished, clear the counter. */
preq->finished_trb = 0;
trace_cdns2_request_giveback(preq);
if (request->complete) {
spin_unlock(&pdev->lock);
usb_gadget_giveback_request(&pep->endpoint, request);
spin_lock(&pdev->lock);
}
if (request->buf == pdev->zlp_buf)
cdns2_gadget_ep_free_request(&pep->endpoint, request);
}
staticvoid cdns2_wa1_restore_cycle_bit(struct cdns2_endpoint *pep)
{ /* Work around for stale data address in TRB. */ if (pep->wa1_set) {
trace_cdns2_wa1(pep, "restore cycle bit");
/* Driver can't update LINK TRB if it is current processed. */ if (doorbell && dma_index == TRBS_PER_SEGMENT - 1) {
pep->ep_state |= EP_DEFERRED_DRDY; return -ENOBUFS;
}
/* Update C bt in Link TRB before starting DMA. */
link_trb = ring->trbs + (TRBS_PER_SEGMENT - 1);
/* * For TRs size equal 2 enabling TRB_CHAIN for epXin causes * that DMA stuck at the LINK TRB. * On the other hand, removing TRB_CHAIN for longer TRs for * epXout cause that DMA stuck after handling LINK TRB. * To eliminate this strange behavioral driver set TRB_CHAIN * bit only for TR size > 2.
*/ if (pep->type == USB_ENDPOINT_XFER_ISOC || TRBS_PER_SEGMENT > 2)
ch_bit = TRB_CHAIN;
if (pep->type == USB_ENDPOINT_XFER_ISOC) { /* * To speed up DMA performance address should not exceed 4KB. * for high bandwidth transfer and driver will split * such buffer into two TRBs.
*/
num_trbs = DIV_ROUND_UP(len +
(addr & (TRB_MAX_ISO_BUFF_SIZE - 1)),
TRB_MAX_ISO_BUFF_SIZE);
if (pep->interval > 1)
num_trbs = pep->dir ? num_trbs * pep->interval : 1;
} elseif (pep->dir) { /* * One extra link trb for IN direction. * Sometimes DMA doesn't want advance to next TD and transfer * hangs. This extra Link TRB force DMA to advance to next TD.
*/
num_trbs++;
}
for_each_sg(req->sg, sg, req->num_sgs, i) {
len = sg_dma_len(sg);
num_trbs += cdns2_count_trbs(pep, sg_dma_address(sg), len);
len = min(len, full_len);
/* * For HS ISO transfer TRBs should not exceed max packet size. * When DMA is working, and data exceed max packet size then * some data will be read in single mode instead burst mode. * This behavior will drastically reduce the copying speed. * To avoid this we need one or two extra TRBs. * This issue occurs for UVC class with sg_supported = 1 * because buffers addresses are not aligned to 1024.
*/ if (pep->type == USB_ENDPOINT_XFER_ISOC) {
u8 temp;
trb_len += len;
temp = trb_len >> 10;
if (temp) { if (trb_len % 1024)
num_trbs = num_trbs + temp; else
num_trbs = num_trbs + temp - 1;
trb_len = trb_len - (temp << 10);
}
}
full_len -= len; if (full_len == 0) break;
}
return num_trbs;
}
/* * Function prepares the array with optimized AXI burst value for different * transfer lengths. Controller handles the final data which are less * then AXI burst size as single byte transactions. * e.g.: * Let's assume that driver prepares trb with trb->length 700 and burst size * will be set to 128. In this case the controller will handle a first 512 as * single AXI transaction but the next 188 bytes will be handled * as 47 separate AXI transaction. * The better solution is to use the burst size equal 16 and then we will * have only 25 AXI transaction (10 * 64 + 15 *4).
*/ staticvoid cdsn2_isoc_burst_opt(struct cdns2_device *pdev)
{ int axi_burst_option[] = {1, 2, 4, 8, 16, 32, 64, 128}; int best_burst; int array_size; int opt_burst; int trb_size; int i, j;
array_size = ARRAY_SIZE(axi_burst_option);
for (i = 0; i <= MAX_ISO_SIZE; i++) {
trb_size = i / 4;
best_burst = trb_size ? trb_size : 1;
staticvoid cdns2_ep_tx_isoc(struct cdns2_endpoint *pep, struct cdns2_request *preq, int num_trbs)
{ struct scatterlist *sg = NULL;
u32 remaining_packet_size = 0; struct cdns2_trb *trb; bool first_trb = true;
dma_addr_t trb_dma;
u32 trb_buff_len;
u32 block_length; int td_idx = 0; int split_size;
u32 full_len; int enqd_len; int sent_len; int sg_iter;
u32 control; int num_tds;
u32 length;
/* * For OUT direction 1 TD per interval is enough * because TRBs are not dumped by controller.
*/
num_tds = pep->dir ? pep->interval : 1;
split_size = preq->request.num_sgs ? 1024 : 3072;
/* * For IN direction driver has to set the IOC for * last TRB in last TD. * For OUT direction driver must set IOC and ISP * only for last TRB in each TDs.
*/ if (enqd_len + trb_buff_len >= full_len || !pep->dir)
control |= TRB_IOC | TRB_ISP;
/* * Don't give the first TRB to the hardware (by toggling * the cycle bit) until we've finished creating all the * other TRBs.
*/ if (first_trb) {
first_trb = false; if (pep->ring.pcs == 0)
control |= TRB_CYCLE;
} else {
control |= pep->ring.pcs;
}
if (enqd_len + trb_buff_len < full_len)
control |= TRB_CHAIN;
/* Point to next bad TRB. */
trb->buffer = cpu_to_le32(pep->ring.dma +
(ring->enqueue * TRB_SIZE));
trb->length = 0;
trb->control = cpu_to_le32(control); break;
}
/* * Don't give the first TRB to the hardware (by toggling * the cycle bit) until we've finished creating all the * other TRBs.
*/ if (sg_iter == 0)
control = control ^ TRB_CYCLE;
/* For last TRB in TD. */ if (sg_iter == (trbs_per_td - (pep->dir ? 2 : 1)))
control |= TRB_IOC; else
control |= TRB_CHAIN;
/* * The first packet after doorbell can be corrupted so, * driver prepares 0 length packet as first packet.
*/
buffer = pep->ring.dma + pep->ring.dequeue * TRB_SIZE;
hw_ccs = !!DMA_EP_STS_CCS(readl(&pdev->adma_regs->ep_sts));
/* * LINK TRB is used to force updating cycle bit in controller and * move to correct place in transfer ring.
*/
trb++;
trb->length = 0;
trb->buffer = cpu_to_le32(TRB_BUFFER(buffer));
trb->control = cpu_to_le32((hw_ccs ? TRB_CYCLE : 0) |
TRB_TYPE(TRB_LINK) | TRB_CHAIN);
if (hw_ccs != pep->ring.ccs)
trb->control |= cpu_to_le32(TRB_TOGGLE);
if (!pep->wa1_set && !(pep->ep_state & EP_STALLED) && !pep->skip) { if (pep->type == USB_ENDPOINT_XFER_ISOC) {
ret = cdns2_prepare_first_isoc_transfer(pdev, pep); if (ret) return 0;
}
cdns2_set_drdy(pdev, pep);
}
return 0;
}
/* Prepare and start transfer for all not started requests. */ staticint cdns2_start_all_request(struct cdns2_device *pdev, struct cdns2_endpoint *pep)
{ struct cdns2_request *preq; int ret;
while (!list_empty(&pep->deferred_list)) {
preq = cdns2_next_preq(&pep->deferred_list);
ret = cdns2_ep_run_transfer(pep, preq); if (ret) return ret;
/* * Check whether trb has been handled by DMA. * * Endpoint must be selected before invoking this function. * * Returns false if request has not been handled by DMA, else returns true. * * SR - start ring * ER - end ring * DQ = ring->dequeue - dequeue position * EQ = ring->enqueue - enqueue position * ST = preq->start_trb - index of first TRB in transfer ring * ET = preq->end_trb - index of last TRB in transfer ring * CI = current_index - index of processed TRB by DMA. * * As first step, we check if the TRB between the ST and ET. * Then, we check if cycle bit for index pep->dequeue * is correct. * * some rules: * 1. ring->dequeue never equals to current_index. * 2 ring->enqueue never exceed ring->dequeue * 3. exception: ring->enqueue == ring->dequeue * and ring->free_trbs is zero. * This case indicate that TR is full. * * At below two cases, the request have been handled. * Case 1 - ring->dequeue < current_index * SR ... EQ ... DQ ... CI ... ER * SR ... DQ ... CI ... EQ ... ER * * Case 2 - ring->dequeue > current_index * This situation takes place when CI go through the LINK TRB at the end of * transfer ring. * SR ... CI ... EQ ... DQ ... ER
*/ staticbool cdns2_trb_handled(struct cdns2_endpoint *pep, struct cdns2_request *preq)
{ struct cdns2_device *pdev = pep->pdev; struct cdns2_ring *ring; struct cdns2_trb *trb; int current_index = 0; int handled = 0; int doorbell;
/* * Only ISO transfer can use 2 entries outside the standard * Transfer Ring. First of them is used as zero length packet and the * second as LINK TRB.
*/ if (current_index >= TRBS_PER_SEGMENT) goto finish;
/* Current trb doesn't belong to this request. */ if (preq->start_trb < preq->end_trb) { if (ring->dequeue > preq->end_trb) goto finish;
if (ring->dequeue < preq->start_trb) goto finish;
}
/* * The TRB was changed as link TRB, and the request * was handled at ep_dequeue.
*/ while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK &&
le32_to_cpu(trb->length)) {
trace_cdns2_complete_trb(pep, trb);
cdns2_ep_inc_deq(&pep->ring);
trb = pep->ring.trbs + pep->ring.dequeue;
}
/* * Re-select endpoint. It could be changed by other CPU * during handling usb_gadget_giveback_request.
*/
cdns2_select_ep(pdev, pep->endpoint.address);
while (cdns2_trb_handled(pep, preq)) {
preq->finished_trb++;
if (preq->finished_trb >= preq->num_of_trb)
request_handled = true;
/* * Sometimes ISO Error for mult=1 or mult=2 is not propagated on time * from USB module to DMA module. To protect against this driver * checks also the txcs/rxcs registers.
*/ if ((ep_sts_reg & DMA_EP_STS_ISOERR) || isoerror) {
clear_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
if (ep_sts_reg & DMA_EP_STS_TRBERR || pep->skip) { if (pep->ep_state & EP_STALL_PENDING &&
!(ep_sts_reg & DMA_EP_STS_DESCMIS))
cdns2_ep_stall_flush(pep);
/* * For isochronous transfer driver completes request on * IOC or on TRBERR. IOC appears only when device receive * OUT data packet. If host disable stream or lost some packet * then the only way to finish all queued transfer is to do it * on TRBERR event.
*/ if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->wa1_set) { if (!pep->dir)
clear_reg_bit_32(&pdev->adma_regs->ep_cfg,
DMA_EP_CFG_ENABLE);
/* * The USBIRQ_URESET is reported at the beginning of * reset signal. 100ms is enough time to finish reset * process. For high-speed reset procedure is completed * when controller detect HS mode.
*/ for (i = 0; i < 100; i++) {
mdelay(1);
speed = cdns2_get_speed(pdev); if (speed == USB_SPEED_HIGH) break;
}
if (pdev->gadget.speed == USB_SPEED_FULL) if (pep->type == USB_ENDPOINT_XFER_INT)
pep->interval = desc->bInterval;
if (pep->interval > ISO_MAX_INTERVAL &&
pep->type == USB_ENDPOINT_XFER_ISOC) {
dev_err(pdev->dev, "ISO period is limited to %d (current: %d)\n",
ISO_MAX_INTERVAL, pep->interval);
ret = -EINVAL; gotoexit;
}
/* * During ISO OUT traffic DMA reads Transfer Ring for the EP which has * never got doorbell. * This issue was detected only on simulation, but to avoid this issue * driver add protection against it. To fix it driver enable ISO OUT * endpoint before setting DRBL. This special treatment of ISO OUT * endpoints are recommended by controller specification.
*/ if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->dir)
enable = 0;
ret = cdns2_alloc_tr_segment(pep); if (ret) gotoexit;
ret = cdns2_ep_config(pep, enable); if (ret) {
cdns2_free_tr_segment(pep);
ret = -EINVAL; gotoexit;
}
/* * Driver needs some time before resetting endpoint. * It need waits for clearing DBUSY bit or for timeout expired. * 10us is enough time for controller to stop transfer.
*/
readl_poll_timeout_atomic(&pdev->adma_regs->ep_sts, val,
!(val & DMA_EP_STS_DBUSY), 1, 10);
writel(DMA_EP_CMD_EPRST, &pdev->adma_regs->ep_cmd);
/* Update ring only if removed request is on pending_req_list list. */ if (req_on_hw_ring && link_trb) { /* Stop DMA */
writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
if (ep_correct && !(pep->ep_state & EP_CLAIMED)) return pep;
}
return ERR_PTR(-ENOENT);
}
/* * Function used to recognize which endpoints will be used to optimize * on-chip memory usage.
*/ staticstruct
usb_ep *cdns2_gadget_match_ep(struct usb_gadget *gadget, struct usb_endpoint_descriptor *desc, struct usb_ss_ep_comp_descriptor *comp_desc)
{ struct cdns2_device *pdev = gadget_to_cdns2_device(gadget); struct cdns2_endpoint *pep; unsignedlong flags;
pep = cdns2_find_available_ep(pdev, desc); if (IS_ERR(pep)) {
dev_err(pdev->dev, "no available ep\n"); return NULL;
}
staticvoid cdns2_free_all_eps(struct cdns2_device *pdev)
{ int i;
for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++)
cdns2_free_tr_segment(&pdev->eps[i]);
}
/* Initializes software endpoints of gadget. */ staticint cdns2_init_eps(struct cdns2_device *pdev)
{ struct cdns2_endpoint *pep; int i;
for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) { bool direction = !(i & 1); /* Start from OUT endpoint. */
u8 epnum = ((i + 1) >> 1);
/* * Endpoints are being held in pdev->eps[] in form: * ep0, ep1out, ep1in ... ep15out, ep15in.
*/ if (!CDNS2_IF_EP_EXIST(pdev, epnum, direction)) continue;
pep = &pdev->eps[i];
pep->pdev = pdev;
pep->num = epnum; /* 0 for OUT, 1 for IN. */
pep->dir = direction ? USB_DIR_IN : USB_DIR_OUT;
pep->idx = i;
/* Ep0in and ep0out are represented by pdev->eps[0]. */ if (!epnum) { int ret;
/* * Driver assumes that each USBHS controller has at least * one IN and one OUT non control endpoint.
*/ if (!pdev->onchip_tx_buf && !pdev->onchip_rx_buf) {
ret = -EINVAL;
dev_err(pdev->dev, "Invalid on-chip memory configuration\n"); goto put_gadget;
}
if (!(pdev->eps_supported & ~0x00010001)) {
ret = -EINVAL;
dev_err(pdev->dev, "No hardware endpoints available\n"); goto put_gadget;
}
max_speed = usb_get_maximum_speed(pdev->dev);
switch (max_speed) { case USB_SPEED_FULL: case USB_SPEED_HIGH: break; default:
dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
max_speed);
fallthrough; case USB_SPEED_UNKNOWN:
max_speed = USB_SPEED_HIGH; break;
}
int cdns2_gadget_init(struct cdns2_device *pdev)
{ int ret;
/* Ensure 32-bit DMA Mask. */
ret = dma_set_mask_and_coherent(pdev->dev, DMA_BIT_MASK(32)); if (ret) {
dev_err(pdev->dev, "Failed to set dma mask: %d\n", ret); return ret;
}
pm_runtime_get_sync(pdev->dev);
cdsn2_isoc_burst_opt(pdev);
ret = cdns2_gadget_start(pdev); if (ret) {
pm_runtime_put_sync(pdev->dev); return ret;
}
/* * Because interrupt line can be shared with other components in * driver it can't use IRQF_ONESHOT flag here.
*/
ret = devm_request_threaded_irq(pdev->dev, pdev->irq,
cdns2_usb_irq_handler,
cdns2_thread_irq_handler,
IRQF_SHARED,
dev_name(pdev->dev),
pdev); if (ret) goto err0;
return 0;
err0:
cdns2_gadget_remove(pdev);
return ret;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.13 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.