/* * Call internal queue directly after dropping the lock. This is * safe to do as the reply is always the last thing done when * processing a SETUP packet, usually as a tail call
*/
spin_unlock(&ep->vhub->lock); if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
rc = std_req_stall; else
rc = std_req_data;
spin_lock(&ep->vhub->lock); return rc;
}
int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
{
u8 *buffer = ep->buf; unsignedint i;
va_list args;
va_start(args, len);
/* Copy data directly into EP buffer */ for (i = 0; i < len; i++)
buffer[i] = va_arg(args, int);
va_end(args);
/* req->buf NULL means data is already there */ return ast_vhub_reply(ep, NULL, len);
}
/* * Check our state, cancel pending requests if needed * * Note: Under some circumstances, we can get a new setup * packet while waiting for the stall ack, just accept it. * * In any case, a SETUP packet in wrong state should have * reset the HW state machine, so let's just log, nuke * requests, move on.
*/ if (ep->ep0.state != ep0_state_token &&
ep->ep0.state != ep0_state_stall) {
EPDBG(ep, "wrong state\n");
ast_vhub_nuke(ep, -EIO);
}
/* Calculate next state for EP0 */
ep->ep0.state = ep0_state_data;
ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
/* If this is the vHub, we handle requests differently */
std_req_rc = std_req_driver; if (ep->dev == NULL) { if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
std_req_rc = ast_vhub_std_hub_request(ep, &crq); elseif ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
std_req_rc = ast_vhub_class_hub_request(ep, &crq); else
std_req_rc = std_req_stall;
} elseif ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
std_req_rc = ast_vhub_std_dev_request(ep, &crq);
/* Act upon result */ switch(std_req_rc) { case std_req_complete: goto complete; case std_req_stall: goto stall; case std_req_driver: break; case std_req_data: return;
}
/* Pass request up to the gadget driver */ if (WARN_ON(!ep->dev)) goto stall; if (ep->dev->driver) {
EPDBG(ep, "forwarding to gadget...\n");
spin_unlock(&ep->vhub->lock);
rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
spin_lock(&ep->vhub->lock);
EPDBG(ep, "driver returned %d\n", rc);
} else {
EPDBG(ep, "no gadget for request !\n");
} if (rc >= 0) return;
/* If this is a 0-length request, it's the gadget trying to * send a status on our behalf. We take it from here.
*/ if (req->req.length == 0)
req->last_desc = 1;
/* Are we done ? Complete request, otherwise wait for next interrupt */ if (req->last_desc >= 0) {
EPVDBG(ep, "complete send %d/%d\n",
req->req.actual, req->req.length);
ep->ep0.state = ep0_state_status;
writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
ast_vhub_done(ep, req, 0); return;
}
/* * Next chunk cropped to max packet size. Also check if this * is the last packet
*/
chunk = req->req.length - req->req.actual; if (chunk > ep->ep.maxpacket)
chunk = ep->ep.maxpacket; elseif ((chunk < ep->ep.maxpacket) || !req->req.zero)
req->last_desc = 1;
/* * Copy data if any (internal requests already have data * in the EP buffer)
*/ if (chunk && req->req.buf)
memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
/* Are we getting more than asked ? */ if (len > remain) {
EPDBG(ep, "receiving too much (ovf: %d) !\n",
len - remain);
len = remain;
rc = -EOVERFLOW;
}
/* Hardware return wrong data len */ if (len < ep->ep.maxpacket && len != remain) {
EPDBG(ep, "using expected data len instead\n");
len = remain;
}
switch(ep->ep0.state) { case ep0_state_token: /* There should be no request queued in that state... */ if (req) {
dev_warn(dev, "request present while in TOKEN state\n");
ast_vhub_nuke(ep, -EINVAL);
}
dev_warn(dev, "ack while in TOKEN state\n");
stall = true; break; case ep0_state_data: /* Check the state bits corresponding to our direction */ if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
(!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
(ep->ep0.dir_in != in_ack)) { /* In that case, ignore interrupt */
dev_warn(dev, "irq state mismatch"); break;
} /* * We are in data phase and there's no request, something is * wrong, stall
*/ if (!req) {
dev_warn(dev, "data phase, no request\n");
stall = true; break;
}
/* We have a request, handle data transfers */ if (ep->ep0.dir_in)
ast_vhub_ep0_do_send(ep, req); else
ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat)); return; case ep0_state_status: /* Nuke stale requests */ if (req) {
dev_warn(dev, "request present while in STATUS state\n");
ast_vhub_nuke(ep, -EINVAL);
}
/* * If the status phase completes with the wrong ack, stall * the endpoint just in case, to abort whatever the host * was doing.
*/ if (ep->ep0.dir_in == in_ack) {
dev_warn(dev, "status direction mismatch\n");
stall = true;
} break; case ep0_state_stall: /* * There shouldn't be any request left, but nuke just in case * otherwise the stale request will block subsequent ones
*/
ast_vhub_nuke(ep, -EIO); break;
}
/* Reset to token state or stall */ if (stall) {
writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
ep->ep0.state = ep0_state_stall;
} else
ep->ep0.state = ep0_state_token;
}
/* Not endpoint 0 ? */ if (WARN_ON(ep->d_idx != 0)) return -EINVAL;
/* Disabled device */ if (ep->dev && !ep->dev->enabled) return -ESHUTDOWN;
/* Data, no buffer and not internal ? */ if (u_req->length && !u_req->buf && !req->internal) {
dev_warn(dev, "Request with no buffer !\n"); return -EINVAL;
}
/* EP0 can only support a single request at a time */ if (!list_empty(&ep->queue) ||
ep->ep0.state == ep0_state_token ||
ep->ep0.state == ep0_state_stall) {
dev_warn(dev, "EP0: Request in wrong state\n");
EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
list_empty(&ep->queue), ep->ep0.state);
spin_unlock_irqrestore(&vhub->lock, flags); return -EBUSY;
}
/* Add request to list and kick processing if empty */
list_add_tail(&req->queue, &ep->queue);
if (ep->ep0.dir_in) { /* IN request, send data */
ast_vhub_ep0_do_send(ep, req);
} elseif (u_req->length == 0) { /* 0-len request, send completion as rx */
EPVDBG(ep, "0-length rx completion\n");
ep->ep0.state = ep0_state_status;
writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
ast_vhub_done(ep, req, 0);
} else { /* OUT request, start receiver */
ast_vhub_ep0_rx_prime(ep);
}
/* Only one request can be in the queue */
req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
/* Is it ours ? */ if (req && u_req == &req->req) {
EPVDBG(ep, "dequeue req @%p\n", req);
/* * We don't have to deal with "active" as all * DMAs go to the EP buffers, not the request.
*/
ast_vhub_done(ep, req, -ECONNRESET);
/* We do stall the EP to clean things up in HW */
writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
ep->ep0.state = ep0_state_status;
ep->ep0.dir_in = false;
rc = 0;
}
spin_unlock_irqrestore(&vhub->lock, flags); return rc;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.