/* In absence of a request, bail out, must have been dequeued */ if (!req) return;
/* * Request not active, move on to processing queue, active request * was probably dequeued
*/ if (!req->active) goto next_chunk;
/* Check if HW has moved on */ if (VHUB_EP_DMA_RPTR(stat) != 0) {
EPDBG(ep, "DMA read pointer not 0 !\n"); return;
}
/* No current DMA ongoing */
req->active = false;
/* Grab length out of HW */
len = VHUB_EP_DMA_TX_SIZE(stat);
/* If not using DMA, copy data out if needed */ if (!req->req.dma && !ep->epn.is_in && len) { if (req->req.actual + len > req->req.length) {
req->last_desc = 1;
status = -EOVERFLOW; goto done;
} else {
memcpy(req->req.buf + req->req.actual, ep->buf, len);
}
} /* Adjust size */
req->req.actual += len;
/* Check for short packet */ if (len < ep->ep.maxpacket)
req->last_desc = 1;
done: /* That's it ? complete the request and pick a new one */ if (req->last_desc >= 0) {
ast_vhub_done(ep, req, status);
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
queue);
/* * Due to lock dropping inside "done" the next request could * already be active, so check for that and bail if needed.
*/ if (!req || req->active) return;
}
next_chunk:
ast_vhub_epn_kick(ep, req);
}
staticinlineunsignedint ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
{ /* * d_next == d_last means descriptor list empty to HW, * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors * in the list
*/ return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) &
(AST_VHUB_DESCS_COUNT - 1);
}
/* Calculate next chunk size */
chunk = len - act; if (chunk <= ep->epn.chunk_max) { /* * Is this the last packet ? Because of having up to 8 * packets in a descriptor we can't just compare "chunk" * with ep.maxpacket. We have to see if it's a multiple * of it to know if we have to send a zero packet. * Sadly that involves a modulo which is a bit expensive * but probably still better than not doing it.
*/ if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
req->last_desc = d_num;
} else {
chunk = ep->epn.chunk_max;
}
/* Interrupt if end of request or no more descriptors */
/* * TODO: Be smarter about it, if we don't have enough * descriptors request an interrupt before queue empty * or so in order to be able to populate more before * the HW runs out. This isn't a problem at the moment * as we use 256 descriptors and only put at most one * request in the ring.
*/
desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk)); if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT);
if (is_last_desc) { /* * Because we can only have one request at a time * in our descriptor list in this implementation, * d_last and ep->d_last should now be equal
*/
CHECK(ep, d_last == ep->epn.d_last, "DMA read ptr mismatch %d vs %d\n",
d_last, ep->epn.d_last);
/* Note: done will drop and re-acquire the lock */
ast_vhub_done(ep, req, 0);
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
queue); break;
}
}
/* More work ? */ if (req)
ast_vhub_epn_kick_desc(ep, req);
}
/* Endpoint enabled ? */ if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
!ep->dev->enabled) {
EPDBG(ep, "Enqueuing request on wrong or disabled EP\n"); return -ESHUTDOWN;
}
/* Map request for DMA if possible. For now, the rule for DMA is * that: * * * For single stage mode (no descriptors): * * - The buffer is aligned to a 8 bytes boundary (HW requirement) * - For a OUT endpoint, the request size is a multiple of the EP * packet size (otherwise the controller will DMA past the end * of the buffer if the host is sending a too long packet). * * * For descriptor mode (tx only for now), always. * * We could relax the latter by making the decision to use the bounce * buffer based on the size of a given *segment* of the request rather * than the whole request.
*/ if (ep->epn.desc_mode ||
((((unsignedlong)u_req->buf & 7) == 0) &&
(ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
ep->epn.is_in); if (rc) {
dev_warn(&vhub->pdev->dev, "Request mapping failure %d\n", rc); return rc;
}
} else
u_req->dma = 0;
/* Add request to list and kick processing if empty */
list_add_tail(&req->queue, &ep->queue); if (empty) { if (ep->epn.desc_mode)
ast_vhub_epn_kick_desc(ep, req); else
ast_vhub_epn_kick(ep, req);
}
spin_unlock_irqrestore(&vhub->lock, flags);
/* Wait for it to complete */ for (loops = 0; loops < 1000; loops++) {
state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
state = VHUB_EP_DMA_PROC_STATUS(state); if (state == EP_DMA_PROC_RX_IDLE ||
state == EP_DMA_PROC_TX_IDLE) break;
udelay(1);
} if (loops >= 1000)
dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n");
/* If we don't have to restart the endpoint, that's it */ if (!restart_ep) return;
/* Restart the endpoint */ if (ep->epn.desc_mode) { /* * Take out descriptors by resetting the DMA read * pointer to be equal to the CPU write pointer. * * Note: If we ever support creating descriptors for * requests that aren't the head of the queue, we * may have to do something more complex here, * especially if the request being taken out is * not the current head descriptors.
*/
reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) |
VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next);
writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
/* Then turn it back on */
writel(ep->epn.dma_conf,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
} else { /* Single mode: just turn it back on */
writel(ep->epn.dma_conf,
ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
}
}
/* Can we use DMA descriptor mode ? */
ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in; if (ep->epn.desc_mode)
memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT);
/* * Large send function can send up to 8 packets from * one descriptor with a limit of 4095 bytes.
*/
ep->epn.chunk_max = ep->ep.maxpacket; if (ep->epn.is_in) {
ep->epn.chunk_max <<= 3; while (ep->epn.chunk_max > 4095)
ep->epn.chunk_max -= ep->ep.maxpacket;
}
switch(type) { case USB_ENDPOINT_XFER_CONTROL:
EPDBG(ep, "Only one control endpoint\n"); return -EINVAL; case USB_ENDPOINT_XFER_INT:
ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT); break; case USB_ENDPOINT_XFER_BULK:
ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK); break; case USB_ENDPOINT_XFER_ISOC:
ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO);
ep->epn.is_iso = true; break; default: return -EINVAL;
}
/* Encode the rest of the EP config register */ if (maxpacket < 1024)
ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket); if (!ep->epn.is_in)
ep_conf |= VHUB_EP_CFG_DIR_OUT;
ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc));
ep_conf |= VHUB_EP_CFG_ENABLE;
ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1);
EPVDBG(ep, "config=%08x\n", ep_conf);
/* Find a free one (no device) */
spin_lock_irqsave(&vhub->lock, flags); for (i = 0; i < vhub->max_epns; i++) if (vhub->epns[i].dev == NULL) break; if (i >= vhub->max_epns) {
spin_unlock_irqrestore(&vhub->lock, flags); return NULL;
}
/* Set it up */
ep = &vhub->epns[i];
ep->dev = d;
spin_unlock_irqrestore(&vhub->lock, flags);
DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr);
INIT_LIST_HEAD(&ep->queue);
ep->d_idx = addr;
ep->vhub = vhub;
ep->ep.ops = &ast_vhub_epn_ops;
ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr);
d->epns[addr-1] = ep;
ep->epn.g_idx = i;
ep->epn.regs = vhub->regs + 0x200 + (i * 0x10);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.