/* * Work around 1: * At some situations, the controller may get stale data address in TRB * at below sequences: * 1. Controller read TRB includes data address * 2. Software updates TRBs includes data address and Cycle bit * 3. Controller read TRB which includes Cycle bit * 4. DMA run with stale data address * * To fix this problem, driver needs to make the first TRB in TD as invalid. * After preparing all TRBs driver needs to check the position of DMA and * if the DMA point to the first just added TRB and doorbell is 1, * then driver must defer making this TRB as valid. This TRB will be make * as valid during adding next TRB only if DMA is stopped or at TRBERR * interrupt. * * Issue has been fixed in DEV_VER_V3 version of controller. * * Work around 2: * Controller for OUT endpoints has shared on-chip buffers for all incoming * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA * in correct order. If the first packet in the buffer will not be handled, * then the following packets directed for other endpoints and functions * will be blocked. * Additionally the packets directed to one endpoint can block entire on-chip * buffers. In this case transfer to other endpoints also will blocked. * * To resolve this issue after raising the descriptor missing interrupt * driver prepares internal usb_request object and use it to arm DMA transfer. * * The problematic situation was observed in case when endpoint has been enabled * but no usb_request were queued. Driver try detects such endpoints and will * use this workaround only for these endpoint. * * Driver use limited number of buffer. This number can be set by macro * CDNS3_WA2_NUM_BUFFERS. * * Such blocking situation was observed on ACM gadget. For this function * host send OUT data packet but ACM function is not prepared for this packet. * It's cause that buffer placed in on chip memory block transfer to other * endpoints. * * Issue has been fixed in DEV_VER_V2 version of controller. *
*/
/** * cdns3_clear_register_bit - clear bit in given register. * @ptr: address of device controller register to be read and changed * @mask: bits requested to clar
*/ staticvoid cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
{
mask = readl(ptr) & ~mask;
writel(mask, ptr);
}
/** * cdns3_set_register_bit - set bit in given register. * @ptr: address of device controller register to be read and changed * @mask: bits requested to set
*/ void cdns3_set_register_bit(void __iomem *ptr, u32 mask)
{
mask = readl(ptr) | mask;
writel(mask, ptr);
}
/** * cdns3_ep_addr_to_index - Macro converts endpoint address to * index of endpoint object in cdns3_device.eps[] container * @ep_addr: endpoint address for which endpoint object is required *
*/
u8 cdns3_ep_addr_to_index(u8 ep_addr)
{ return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0));
}
staticint cdns3_get_dma_pos(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep)
{ int dma_index;
/** * cdns3_next_request - returns next request from list * @list: list containing requests * * Returns request or NULL if no requests in list
*/ struct usb_request *cdns3_next_request(struct list_head *list)
{ return list_first_entry_or_null(list, struct usb_request, list);
}
/** * cdns3_next_align_buf - returns next buffer from list * @list: list containing buffers * * Returns buffer or NULL if no buffers in list
*/ staticstruct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
{ return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
}
/** * cdns3_next_priv_request - returns next request from list * @list: list containing requests * * Returns request or NULL if no requests in list
*/ staticstruct cdns3_request *cdns3_next_priv_request(struct list_head *list)
{ return list_first_entry_or_null(list, struct cdns3_request, list);
}
/** * cdns3_get_tdl - gets current tdl for selected endpoint. * @priv_dev: extended gadget object * * Before calling this function the appropriate endpoint must * be selected by means of cdns3_select_ep function.
*/ staticint cdns3_get_tdl(struct cdns3_device *priv_dev)
{ if (priv_dev->dev_ver < DEV_VER_V3) return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); else return readl(&priv_dev->regs->ep_tdl);
}
/** * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint * @priv_ep: endpoint object * * Function will return 0 on success or -ENOMEM on allocation error
*/ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
{ struct cdns3_device *priv_dev = priv_ep->cdns3_dev; int ring_size = TRB_RING_SIZE; int num_trbs = ring_size / TRB_SIZE; struct cdns3_trb *link_trb;
if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size)
cdns3_free_trb_pool(priv_ep);
if (!priv_ep->trb_pool) {
priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool,
GFP_ATOMIC,
&priv_ep->trb_pool_dma);
if (!priv_ep->trb_pool) return -ENOMEM;
priv_ep->alloc_ring_size = ring_size;
}
memset(priv_ep->trb_pool, 0, ring_size);
priv_ep->num_trbs = num_trbs;
if (!priv_ep->num) return 0;
/* Initialize the last TRB as Link TRB */
link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
if (priv_ep->use_streams) { /* * For stream capable endpoints driver use single correct TRB. * The last trb has zeroed cycle bit
*/
link_trb->control = 0;
} else {
link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma));
link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE);
} return 0;
}
/** * cdns3_ep_stall_flush - Stalls and flushes selected endpoint * @priv_ep: endpoint object * * Endpoint must be selected before call to this function
*/ staticvoid cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
{ struct cdns3_device *priv_dev = priv_ep->cdns3_dev; int val;
for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) if (priv_dev->eps[i])
priv_dev->eps[i]->flags &= ~EP_CONFIGURED;
}
/** * cdns3_ep_inc_trb - increment a trb index. * @index: Pointer to the TRB index to increment. * @cs: Cycle state * @trb_in_seg: number of TRBs in segment * * The index should never point to the link TRB. After incrementing, * if it is point to the link TRB, wrap around to the beginning and revert * cycle state bit The * link TRB is always at the last TRB entry.
*/ staticvoid cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
{
(*index)++; if (*index == (trb_in_seg - 1)) {
*index = 0;
*cs ^= 1;
}
}
/** * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. * @priv_dev: Extended gadget object * @enable: Enable/disable permit to transition to L1. * * If bit USB_CONF_L1EN is set and device receive Extended Token packet, * then controller answer with ACK handshake. * If bit USB_CONF_L1DS is set and device receive Extended Token packet, * then controller answer with NYET handshake.
*/ void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable)
{ if (enable)
writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); else
writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf);
}
/** * cdns3_start_all_request - add to ring all request not started * @priv_dev: Extended gadget object * @priv_ep: The endpoint for whom request will be started. * * Returns return ENOMEM if transfer ring i not enough TRBs to start * all requests.
*/ staticint cdns3_start_all_request(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep)
{ struct usb_request *request; int ret = 0;
u8 pending_empty = list_empty(&priv_ep->pending_req_list);
/* * If the last pending transfer is INTERNAL * OR streams are enabled for this endpoint * do NOT start new transfer till the last one is pending
*/ if (!pending_empty) { struct cdns3_request *priv_req;
/* * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set * driver try to detect whether endpoint need additional internal * buffer for unblocking on-chip FIFO buffer. This flag will be cleared * if before first DESCMISS interrupt the DMA will be armed.
*/ #define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \ if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
(reg) |= EP_STS_EN_DESCMISEN; \
} } while (0)
/* unmap the gadget request before copying data */
usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req,
priv_ep->dir);
cdns3_wa2_descmiss_copy_data(priv_ep, req); if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
req->length != req->actual) { /* wait for next part of transfer */ /* re-map the gadget request buffer*/
usb_gadget_map_request_by_dev(priv_dev->sysdev, req,
usb_endpoint_dir_in(priv_ep->endpoint.desc)); return NULL;
}
/* * If transfer was queued before DESCMISS appear than we * can disable handling of DESCMISS interrupt. Driver assumes that it * can disable special treatment for this endpoint.
*/ if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
u32 reg;
/* * DESCMISS transfer has been finished, so data will be * directly copied from internal allocated usb_request * objects.
*/ if (pending_empty && !descmiss_empty &&
!(priv_req->flags & REQUEST_INTERNAL)) {
cdns3_wa2_descmiss_copy_data(priv_ep,
&priv_req->request);
/** * cdns3_wa2_descmissing_packet - handles descriptor missing event. * @priv_ep: extended gadget object * * This function is used only for WA2. For more information see Work around 2 * description.
*/ staticvoid cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
{ struct cdns3_request *priv_req; struct usb_request *request;
u8 pending_empty = list_empty(&priv_ep->pending_req_list);
/* check for pending transfer */ if (!pending_empty) {
trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n"); return;
}
/* if this field is still assigned it indicate that transfer related * with this request has not been finished yet. Driver in this * case simply allocate next request and assign flag REQUEST_INTERNAL_CH * flag to previous one. It will indicate that current request is * part of the previous one.
*/ if (priv_ep->descmis_req)
priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH;
/** * cdns3_gadget_giveback - call struct usb_request's ->complete callback * @priv_ep: The endpoint to whom the request belongs to * @priv_req: The request we're giving back * @status: completion code for the request * * Must be called with controller's lock held and interrupts disabled. This * function will unmap @req and call its ->complete() callback to notify upper * layers that it has completed.
*/ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, struct cdns3_request *priv_req, int status)
{ struct cdns3_device *priv_dev = priv_ep->cdns3_dev; struct usb_request *request = &priv_req->request;
list_del_init(&request->list);
if (request->status == -EINPROGRESS)
request->status = status;
if (likely(!(priv_req->flags & REQUEST_UNALIGNED)))
usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request,
priv_ep->dir);
if ((priv_req->flags & REQUEST_UNALIGNED) &&
priv_ep->dir == USB_DIR_OUT && !request->status) { /* Make DMA buffer CPU accessible */
dma_sync_single_for_cpu(priv_dev->sysdev,
priv_req->aligned_buf->dma,
request->actual,
priv_req->aligned_buf->dir);
memcpy(request->buf, priv_req->aligned_buf->buf,
request->actual);
}
priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); /* All TRBs have finished, clear the counter */
priv_req->finished_trb = 0;
trace_cdns3_gadget_giveback(priv_req);
if (priv_dev->dev_ver < DEV_VER_V2) {
request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep,
priv_req); if (!request) return;
}
/* * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify * gadget composite driver.
*/ if (request->complete && request->buf != priv_dev->zlp_buf) {
spin_unlock(&priv_dev->lock);
usb_gadget_giveback_request(&priv_ep->endpoint,
request);
spin_lock(&priv_dev->lock);
}
if (request->buf == priv_dev->zlp_buf)
cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
}
staticvoid cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
{ /* Work around for stale data address in TRB*/ if (priv_ep->wa1_set) {
trace_cdns3_wa1(priv_ep, "restore cycle bit");
/* * For DEV_VER_V2 controller version we have enabled * USB_CONF2_EN_TDL_TRB in DMULT configuration. * This enables TDL calculation based on TRB, hence setting TDL in TRB.
*/ if (priv_dev->dev_ver >= DEV_VER_V2) { if (priv_dev->gadget.speed == USB_SPEED_SUPER)
trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl));
}
priv_req->flags |= REQUEST_PENDING;
trb->control = cpu_to_le32(control);
trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
/* * Memory barrier - Cycle Bit must be set before trb->length and * trb->buffer fields.
*/
wmb();
/* always first element */
writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma),
&priv_dev->regs->ep_traddr);
if (!(priv_ep->flags & EP_STALLED)) {
trace_cdns3_ring(priv_ep); /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
priv_ep->prime_flag = false;
/* * Controller version DEV_VER_V2 tdl calculation * is based on TRB
*/
/* ISO transfer require each SOF have a TD, each TD include some TRBs */ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
num_trb = priv_ep->interval * num_trb_req; else
num_trb = num_trb_req;
/* Driver can't update LINK TRB if it is current processed. */ if (doorbell && dma_index == priv_ep->num_trbs - 1) {
priv_ep->flags |= EP_DEFERRED_DRDY; return -ENOBUFS;
}
/*updating C bt in Link TRB before starting DMA*/
link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); /* * For TRs size equal 2 enabling TRB_CHAIN for epXin causes * that DMA stuck at the LINK TRB. * On the other hand, removing TRB_CHAIN for longer TRs for * epXout cause that DMA stuck after handling LINK TRB. * To eliminate this strange behavioral driver set TRB_CHAIN * bit only for TR size > 2.
*/ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC ||
TRBS_PER_SEGMENT > 2)
ch_bit = TRB_CHAIN;
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { /* * ISO require LINK TRB must be first one of TD. * Fill LINK TRBs for left trb space to simply software process logic.
*/ while (priv_ep->enqueue) {
*trb = *link_trb;
trace_cdns3_prepare_trb(priv_ep, trb);
if (priv_dev->dev_ver <= DEV_VER_V2)
togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
/* set incorrect Cycle Bit for first trb*/
control = priv_ep->pcs ? 0 : TRB_CYCLE;
trb->length = 0; if (priv_dev->dev_ver >= DEV_VER_V2) {
u16 td_size;
td_size = DIV_ROUND_UP(request->length,
priv_ep->endpoint.maxpacket); if (priv_dev->gadget.speed == USB_SPEED_SUPER)
trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); else
control |= TRB_TDL_HS_SIZE(td_size);
}
do {
u32 length;
if (!(sg_iter % num_trb_req) && sg_supported)
s = request->sg;
/* fill TRB */
control |= TRB_TYPE(TRB_NORMAL); if (sg_supported) {
trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s)));
length = sg_dma_len(s);
} else {
trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
length = request->length;
}
if (priv_ep->flags & EP_TDLCHK_EN)
total_tdl += DIV_ROUND_UP(length,
priv_ep->endpoint.maxpacket);
trb_burst = priv_ep->trb_burst_size;
/* * Supposed DMA cross 4k bounder problem should be fixed at DEV_VER_V2, but still * met problem when do ISO transfer if sg enabled. * * Data pattern likes below when sg enabled, package size is 1k and mult is 2 * [UVC Header(8B) ] [data(3k - 8)] ... * * The received data at offset 0xd000 will get 0xc000 data, len 0x70. Error happen * as below pattern: * 0xd000: wrong * 0xe000: wrong * 0xf000: correct * 0x10000: wrong * 0x11000: wrong * 0x12000: correct * ... * * But it is still unclear about why error have not happen below 0xd000, it should * cross 4k bounder. But anyway, the below code can fix this problem. * * To avoid DMA cross 4k bounder at ISO transfer, reduce burst len according to 16.
*/ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_dev->dev_ver <= DEV_VER_V2) if (ALIGN_DOWN(trb->buffer, SZ_4K) !=
ALIGN_DOWN(trb->buffer + length, SZ_4K))
trb_burst = 16;
/* * first trb should be prepared as last to avoid processing * transfer to early
*/ if (sg_iter != 0)
control |= pcs;
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
control |= ioc | TRB_ISP;
} else { /* for last element in TD or in SG list */ if (sg_iter == (num_trb - 1) && sg_iter != 0)
control |= pcs | ioc | TRB_ISP;
}
if (sg_iter)
trb->control = cpu_to_le32(control); else
priv_req->trb->control = cpu_to_le32(control);
if (sg_supported) {
trb->control |= cpu_to_le32(TRB_ISP); /* Don't set chain bit for last TRB */ if ((sg_iter % num_trb_req) < num_trb_req - 1)
trb->control |= cpu_to_le32(TRB_CHAIN);
/* * Memory barrier - cycle bit must be set before other filds in trb.
*/
wmb();
/* give the TD to the consumer*/ if (togle_pcs)
trb->control = trb->control ^ cpu_to_le32(1);
if (priv_dev->dev_ver <= DEV_VER_V2)
cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
if (num_trb > 1) { int i = 0;
while (i < num_trb) {
trace_cdns3_prepare_trb(priv_ep, trb + i); if (trb + i == link_trb) {
trb = priv_ep->trb_pool;
num_trb = num_trb - i;
i = 0;
} else {
i++;
}
}
} else {
trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
}
/* * Memory barrier - Cycle Bit must be set before trb->length and * trb->buffer fields.
*/
wmb();
/* * For DMULT mode we can set address to transfer ring only once after * enabling endpoint.
*/ if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { /* * Until SW is not ready to handle the OUT transfer the ISO OUT * Endpoint should be disabled (EP_CFG.ENABLE = 0). * EP_CFG_ENABLE must be set before updating ep_traddr.
*/ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir &&
!(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) {
priv_ep->flags |= EP_QUIRK_ISO_OUT_EN;
cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
EP_CFG_ENABLE);
}
/** * cdns3_trb_handled - check whether trb has been handled by DMA * * @priv_ep: extended endpoint object. * @priv_req: request object for checking * * Endpoint must be selected before invoking this function. * * Returns false if request has not been handled by DMA, else returns true. * * SR - start ring * ER - end ring * DQ = priv_ep->dequeue - dequeue position * EQ = priv_ep->enqueue - enqueue position * ST = priv_req->start_trb - index of first TRB in transfer ring * ET = priv_req->end_trb - index of last TRB in transfer ring * CI = current_index - index of processed TRB by DMA. * * As first step, we check if the TRB between the ST and ET. * Then, we check if cycle bit for index priv_ep->dequeue * is correct. * * some rules: * 1. priv_ep->dequeue never equals to current_index. * 2 priv_ep->enqueue never exceed priv_ep->dequeue * 3. exception: priv_ep->enqueue == priv_ep->dequeue * and priv_ep->free_trbs is zero. * This case indicate that TR is full. * * At below two cases, the request have been handled. * Case 1 - priv_ep->dequeue < current_index * SR ... EQ ... DQ ... CI ... ER * SR ... DQ ... CI ... EQ ... ER * * Case 2 - priv_ep->dequeue > current_index * This situation takes place when CI go through the LINK TRB at the end of * transfer ring. * SR ... CI ... EQ ... DQ ... ER
*/ staticbool cdns3_trb_handled(struct cdns3_endpoint *priv_ep, struct cdns3_request *priv_req)
{ struct cdns3_device *priv_dev = priv_ep->cdns3_dev; struct cdns3_trb *trb; int current_index = 0; int handled = 0; int doorbell;
while (!list_empty(&priv_ep->pending_req_list)) {
request = cdns3_next_request(&priv_ep->pending_req_list);
priv_req = to_cdns3_request(request);
trb = priv_ep->trb_pool + priv_ep->dequeue;
/* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
/* ISO ep_traddr may stop at LINK TRB */ if (priv_ep->dequeue == cdns3_get_dma_pos(priv_dev, priv_ep) &&
priv_ep->type == USB_ENDPOINT_XFER_ISOC) break;
if (!request->stream_id) { /* Re-select endpoint. It could be changed by other CPU * during handling usb_gadget_giveback_request.
*/
cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
while (cdns3_trb_handled(priv_ep, priv_req)) {
priv_req->finished_trb++; if (priv_req->finished_trb >= priv_req->num_of_trb)
request_handled = true;
if (request_handled) { /* TRBs are duplicated by priv_ep->interval time for ISO IN */ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_ep->dir)
request->actual /= priv_ep->interval;
if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
TRBS_PER_SEGMENT == 2) break;
} else { /* Re-select endpoint. It could be changed by other CPU * during handling usb_gadget_giveback_request.
*/
cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
/* * Continue the previous transfer: * There is some racing between ERDY and PRIME. The device send * ERDY and almost in the same time Host send PRIME. It cause * that host ignore the ERDY packet and driver has to send it * again.
*/ if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) ||
EP_STS_HOSTPP(ep_sts_reg))) {
writel(EP_CMD_ERDY |
EP_CMD_ERDY_SID(priv_ep->last_stream_id),
&priv_dev->regs->ep_cmd);
ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC);
} else {
priv_ep->prime_flag = true;
if (deferred_request && !pending_request) {
cdns3_start_all_request(priv_dev, priv_ep);
}
}
}
if (ep_sts_reg & EP_STS_TRBERR) { if (priv_ep->flags & EP_STALL_PENDING &&
!(ep_sts_reg & EP_STS_DESCMIS &&
priv_dev->dev_ver < DEV_VER_V2)) {
cdns3_ep_stall_flush(priv_ep);
}
/* * For isochronous transfer driver completes request on * IOC or on TRBERR. IOC appears only when device receive * OUT data packet. If host disable stream or lost some packet * then the only way to finish all queued transfer is to do it * on TRBERR event.
*/ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC &&
!priv_ep->wa1_set) { if (!priv_ep->dir) {
u32 ep_cfg = readl(&priv_dev->regs->ep_cfg);
/* * MD_EXIT interrupt sets when stream capable endpoint exits * from MOVE DATA state of Bulk IN/OUT stream protocol state machine
*/ if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) &&
(priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) {
priv_ep->ep_sts_pending = 0;
cdns3_transfer_completed(priv_dev, priv_ep);
}
/* * WA2: this condition should only be meet when * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. * In other cases this interrupt will be disabled.
*/ if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
!(priv_ep->flags & EP_STALLED))
cdns3_wa2_descmissing_packet(priv_ep);
/** * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device * @priv_dev: extended gadget object * @usb_ists: bitmap representation of device's reported interrupts * (usb_ists register value)
*/ staticvoid cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
u32 usb_ists)
__must_hold(&priv_dev->lock)
{ int speed = 0;
trace_cdns3_usb_irq(priv_dev, usb_ists); if (usb_ists & USB_ISTS_L1ENTI) { /* * WORKAROUND: CDNS3 controller has issue with hardware resuming * from L1. To fix it, if any DMA transfer is pending driver * must starts driving resume signal immediately.
*/ if (readl(&priv_dev->regs->drbl))
__cdns3_gadget_wakeup(priv_dev);
}
if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { if (priv_dev->gadget_driver &&
priv_dev->gadget_driver->suspend) {
spin_unlock(&priv_dev->lock);
priv_dev->gadget_driver->suspend(&priv_dev->gadget);
spin_lock(&priv_dev->lock);
}
}
if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { if (priv_dev->gadget_driver &&
priv_dev->gadget_driver->resume) {
spin_unlock(&priv_dev->lock);
priv_dev->gadget_driver->resume(&priv_dev->gadget);
spin_lock(&priv_dev->lock);
}
}
/* reset*/ if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { if (priv_dev->gadget_driver) {
spin_unlock(&priv_dev->lock);
usb_gadget_udc_reset(&priv_dev->gadget,
priv_dev->gadget_driver);
spin_lock(&priv_dev->lock);
/*read again to check the actual speed*/
speed = cdns3_get_speed(priv_dev);
priv_dev->gadget.speed = speed;
cdns3_hw_reset_eps_config(priv_dev);
cdns3_ep0_config(priv_dev);
}
}
}
/** * cdns3_device_irq_handler - interrupt handler for device part of controller * * @irq: irq number for cdns3 core device * @data: structure of cdns3 * * Returns IRQ_HANDLED or IRQ_NONE
*/ static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
{ struct cdns3_device *priv_dev = data; struct cdns *cdns = dev_get_drvdata(priv_dev->dev);
irqreturn_t ret = IRQ_NONE;
u32 reg;
if (cdns->in_lpm) return ret;
/* check USB device interrupt */
reg = readl(&priv_dev->regs->usb_ists); if (reg) { /* After masking interrupts the new interrupts won't be * reported in usb_ists/ep_ists. In order to not lose some * of them driver disables only detected interrupts. * They will be enabled ASAP after clearing source of * interrupt. This an unusual behavior only applies to * usb_ists register.
*/
reg = ~reg & readl(&priv_dev->regs->usb_ien); /* mask deferred interrupt. */
writel(reg, &priv_dev->regs->usb_ien);
ret = IRQ_WAKE_THREAD;
}
/* check endpoint interrupt */
reg = readl(&priv_dev->regs->ep_ists); if (reg) {
writel(0, &priv_dev->regs->ep_ien);
ret = IRQ_WAKE_THREAD;
}
return ret;
}
/** * cdns3_device_thread_irq_handler - interrupt handler for device part * of controller * * @irq: irq number for cdns3 core device * @data: structure of cdns3 * * Returns IRQ_HANDLED or IRQ_NONE
*/ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
{ struct cdns3_device *priv_dev = data;
irqreturn_t ret = IRQ_NONE; unsignedlong flags; unsignedint bit; unsignedlong reg;
/** * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP * * The real reservation will occur during write to EP_CFG register, * this function is used to check if the 'size' reservation is allowed. * * @priv_dev: extended gadget object * @size: the size (KB) for EP would like to allocate * @is_in: endpoint direction * * Return 0 if the required size can met or negative value on failure
*/ staticint cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, int size, int is_in)
{ int remained;
/* 2KB are reserved for EP0*/
remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2;
if (is_in) { if (remained < size) return -EPERM;
priv_dev->onchip_used_size += size;
} else { int required;
/** * ALL OUT EPs are shared the same chunk onchip memory, so * driver checks if it already has assigned enough buffers
*/ if (priv_dev->out_mem_is_allocated >= size) return 0;
/* * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This * results in data corruption when it crosses the 4K border. The corruption * specifically occurs from the position (4K - (address & 0x7F)) to 4K. * * So force trb_burst_size to 16 at such platform.
*/ if (priv_dev->dev_ver < DEV_VER_V2)
priv_ep->trb_burst_size = 16;
/* onchip buffer is only allocated before configuration */ if (!priv_dev->hw_configured_flag) {
ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
!!priv_ep->dir); if (ret) {
dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); return ret;
}
}
/* * Stream capable endpoints are handled by using ep_tdl * register. Other endpoints use TDL from TRB feature.
*/
cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb,
mask);
}
/* Enable Stream Bit TDL chk and SID chk */
ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK;
}
ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
EP_CFG_MULT(priv_ep->mult) | /* must match EP setting */
EP_CFG_BUFFERING(buffering) |
EP_CFG_MAXBURST(maxburst);
list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { unsignedlong num; int ret; /* ep name pattern likes epXin or epXout */ char c[2] = {ep->name[2], '\0'};
ret = kstrtoul(c, 10, &num); if (ret) return ERR_PTR(ret);
priv_ep = ep_to_cdns3_ep(ep); if (cdns3_ep_dir_is_correct(desc, priv_ep)) { if (!(priv_ep->flags & EP_CLAIMED)) {
priv_ep->num = num; return priv_ep;
}
}
}
return ERR_PTR(-ENOENT);
}
/* * Cadence IP has one limitation that all endpoints must be configured * (Type & MaxPacketSize) before setting configuration through hardware * register, it means we can't change endpoints configuration after * set_configuration. * * This function set EP_CLAIMED flag which is added when the gadget driver * uses usb_ep_autoconfig to configure specific endpoint; * When the udc driver receives set_configurion request, * it goes through all claimed endpoints, and configure all endpoints * accordingly. * * At usb_ep_ops.enable/disable, we only enable and disable endpoint through * ep_cfg register which can be changed after set_configuration, and do * some software operation accordingly.
*/ staticstruct
usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, struct usb_endpoint_descriptor *desc, struct usb_ss_ep_comp_descriptor *comp_desc)
{ struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); struct cdns3_endpoint *priv_ep; unsignedlong flags;
priv_ep = cdns3_find_available_ep(priv_dev, desc); if (IS_ERR(priv_ep)) {
dev_err(priv_dev->dev, "no available ep\n"); return NULL;
}
/** * cdns3_gadget_ep_free_request - Free memory occupied by request * @ep: endpoint object associated with request * @request: request to free memory
*/
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.25Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.