/* If device is detached then speed will be USB_SPEED_UNKNOWN.*/ return USB_SPEED_UNKNOWN;
}
/* * Given a port state, this function returns a value that would result in the * port being in the same state, if the value was written to the port status * control register. * Save Read Only (RO) bits and save read/write bits where * writing a 0 clears the bit and writing a 1 sets the bit (RWS). * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
*/
u32 cdnsp_port_state_to_neutral(u32 state)
{ /* Save read-only status and port state. */ return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS);
}
/** * cdnsp_find_next_ext_cap - Find the offset of the extended capabilities * with capability ID id. * @base: PCI MMIO registers base address. * @start: Address at which to start looking, (0 or HCC_PARAMS to start at * beginning of list) * @id: Extended capability ID to search for. * * Returns the offset of the next matching extended capability structure. * Some capabilities can occur several times, * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all.
*/ int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
{
u32 offset = start;
u32 next;
u32 val;
if (!start || start == HCC_PARAMS_OFFSET) {
val = readl(base + HCC_PARAMS_OFFSET); if (val == ~0) return 0;
offset = HCC_EXT_CAPS(val) << 2; if (!offset) return 0;
}
do {
val = readl(base + offset); if (val == ~0) return 0;
if (EXT_CAPS_ID(val) == id && offset != start) return offset;
next = EXT_CAPS_NEXT(val);
offset += next << 2;
} while (next);
/* * Force controller into halt state. * * Disable any IRQs and clear the run/stop bit. * Controller will complete any current and actively pipelined transactions, and * should halt within 16 ms of the run/stop bit being cleared. * Read controller Halted bit in the status register to see when the * controller is finished.
*/ int cdnsp_halt(struct cdnsp_device *pdev)
{ int ret;
u32 val;
cdnsp_quiesce(pdev);
ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val,
val & STS_HALT, 1,
CDNSP_MAX_HALT_USEC); if (ret) {
dev_err(pdev->dev, "ERROR: Device halt failed\n"); return ret;
}
/* * Wait for the STS_HALT Status bit to be 0 to indicate the device is * running.
*/
ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
!(temp & STS_HALT), 1,
CDNSP_MAX_HALT_USEC); if (ret) {
pdev->cdnsp_state = CDNSP_STATE_DYING;
dev_err(pdev->dev, "ERROR: Controller run failed\n");
}
return ret;
}
/* * Reset a halted controller. * * This resets pipelines, timers, counters, state machines, etc. * Transactions will be terminated immediately, and operational registers * will be set to their defaults.
*/ int cdnsp_reset(struct cdnsp_device *pdev)
{
u32 command;
u32 temp; int ret;
temp = readl(&pdev->op_regs->status);
if (temp == ~(u32)0) {
dev_err(pdev->dev, "Device not accessible, reset failed.\n"); return -ENODEV;
}
if ((temp & STS_HALT) == 0) {
dev_err(pdev->dev, "Controller not halted, aborting reset.\n"); return -EINVAL;
}
/* * CDNSP cannot write any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared.
*/
ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
!(temp & STS_CNR), 1,
10 * 1000);
if (ret) {
dev_err(pdev->dev, "ERROR: Controller not ready to work\n"); return ret;
}
dev_dbg(pdev->dev, "Controller ready to work");
return ret;
}
/* * cdnsp_get_endpoint_index - Find the index for an endpoint given its * descriptor.Use the return value to right shift 1 for the bitmask. * * Index = (epnum * 2) + direction - 1, * where direction = 0 for OUT, 1 for IN. * For control endpoints, the IN index is used (OUT index is unused), so * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
*/ staticunsignedint
cdnsp_get_endpoint_index(conststruct usb_endpoint_descriptor *desc)
{ unsignedint index = (unsignedint)usb_endpoint_num(desc);
if (usb_endpoint_xfer_control(desc)) return index * 2;
/* * Find the flag for this endpoint (for use in the control context). Use the * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is * bit 1, etc.
*/ staticunsignedint
cdnsp_get_endpoint_flag(conststruct usb_endpoint_descriptor *desc)
{ return 1 << (cdnsp_get_endpoint_index(desc) + 1);
}
int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
{ struct cdnsp_device *pdev = pep->pdev; struct usb_request *request; int ret;
ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction); if (ret) {
trace_cdnsp_request_enqueue_error(preq); return ret;
}
list_add_tail(&preq->list, &pep->pending_list);
trace_cdnsp_request_enqueue(preq);
switch (usb_endpoint_type(pep->endpoint.desc)) { case USB_ENDPOINT_XFER_CONTROL:
ret = cdnsp_queue_ctrl_tx(pdev, preq); break; case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT:
ret = cdnsp_queue_bulk_tx(pdev, preq); break; case USB_ENDPOINT_XFER_ISOC:
ret = cdnsp_queue_isoc_tx(pdev, preq);
}
/* * Remove the request's TD from the endpoint ring. This may cause the * controller to stop USB transfers, potentially stopping in the middle of a * TRB buffer. The controller should pick up where it left off in the TD, * unless a Set Transfer Ring Dequeue Pointer is issued. * * The TRBs that make up the buffers for the canceled request will be "removed" * from the ring. Since the ring is a contiguous structure, they can't be * physically removed. Instead, there are two options: * * 1) If the controller is in the middle of processing the request to be * canceled, we simply move the ring's dequeue pointer past those TRBs * using the Set Transfer Ring Dequeue Pointer command. This will be * the common case, when drivers timeout on the last submitted request * and attempt to cancel. * * 2) If the controller is in the middle of a different TD, we turn the TRBs * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained. * The controller will need to invalidate the any TRBs it has cached after * the stop endpoint command. * * 3) The TD may have completed by the time the Stop Endpoint Command * completes, so software needs to handle that case too. *
*/ int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
{ struct cdnsp_device *pdev = pep->pdev; int ret_stop = 0; int ret_rem;
trace_cdnsp_request_dequeue(preq);
if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
/* * When a device's add flag and drop flag are zero, any subsequent * configure endpoint command will leave that endpoint's state * untouched. Make sure we don't leave any old state in the input * endpoint contexts.
*/
ctrl_ctx->drop_flags = 0;
ctrl_ctx->add_flags = 0;
slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
/* Endpoint 0 is always valid */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) {
ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = 0;
ep_ctx->deq = 0;
ep_ctx->tx_info = 0;
}
}
/* Issue a configure endpoint command and wait for it to finish. */ staticint cdnsp_configure_endpoint(struct cdnsp_device *pdev)
{ int ret;
cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev); if (ret) {
dev_err(pdev->dev, "ERR: unexpected command completion code 0x%x.\n", ret); return -EINVAL;
}
cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb); if (!cmd_deq_dma) return -EINVAL;
while (1) {
flags = le32_to_cpu(event->event_cmd.flags);
/* Check the owner of the TRB. */ if ((flags & TRB_CYCLE) != cycle_state) { /* * Give some extra time to get chance controller * to finish command before returning error code. * Checking CMD_RING_BUSY is not sufficient because * this bit is cleared to '0' when the Command * Descriptor has been executed by controller * and not when command completion event has * be added to event ring.
*/ if (retry--) {
udelay(20); continue;
}
return -EINVAL;
}
cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
/* * Check whether the completion event is for last queued * command.
*/ if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION ||
cmd_dma != (u64)cmd_deq_dma) { if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
event++; continue;
}
if (cdnsp_last_trb_on_ring(pdev->event_ring,
event_deq_seg, event))
cycle_state ^= 1;
pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status)); if (pdev->cmd.status == COMP_SUCCESS) return 0;
return -pdev->cmd.status;
}
}
int cdnsp_halt_endpoint(struct cdnsp_device *pdev, struct cdnsp_ep *pep, int value)
{ int ret;
trace_cdnsp_ep_halt(value ? "Set" : "Clear");
ret = cdnsp_cmd_stop_ep(pdev, pep); if (ret) return ret;
if (value) { if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) {
cdnsp_queue_halt_endpoint(pdev, pep->idx);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
}
/* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) {
__le32 le32 = cpu_to_le32(BIT(i));
/* * This submits a Reset Device Command, which will set the device state to 0, * set the device address to 0, and disable all the endpoints except the default * control endpoint. The USB core should come back and call * cdnsp_setup_device(), and then re-set up the configuration.
*/ int cdnsp_reset_device(struct cdnsp_device *pdev)
{ struct cdnsp_slot_ctx *slot_ctx; int slot_state; int ret, i;
/* If device is not setup, there is no point in resetting it. */
slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
trace_cdnsp_reset_device(slot_ctx);
/* * During Reset Device command controller shall transition the * endpoint ep0 to the Running State.
*/
pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED);
pdev->eps[0].ep_state |= EP_ENABLED;
if (slot_state <= SLOT_STATE_DEFAULT) return 0;
cdnsp_queue_reset_device(pdev);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
/* * After Reset Device command all not default endpoints * are in Disabled state.
*/ for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED;
trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
if (ret)
dev_err(pdev->dev, "Reset device failed with error code %d",
ret);
return ret;
}
/* * Sets the MaxPStreams field and the Linear Stream Array field. * Sets the dequeue pointer to the stream context array.
*/ staticvoid cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev, struct cdnsp_ep_ctx *ep_ctx, struct cdnsp_stream_info *stream_info)
{
u32 max_primary_streams;
/* MaxPStreams is the number of stream context array entries, not the * number we're actually using. Must be in 2^(MaxPstreams + 1) format. * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
*/
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
| EP_HAS_LSA);
ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
}
/* * The drivers use this function to prepare a bulk endpoints to use streams. * * Don't allow the call to succeed if endpoint only supports one stream * (which means it doesn't support streams at all).
*/ int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
{ unsignedint num_streams = usb_ss_max_streams(pep->endpoint.comp_desc); unsignedint num_stream_ctxs; int ret;
if (num_streams == 0) return 0;
if (num_streams > STREAM_NUM_STREAMS) return -EINVAL;
/* * Add two to the number of streams requested to account for * stream 0 that is reserved for controller usage and one additional * for TASK SET FULL response.
*/
num_streams += 2;
/* The stream context array size must be a power of two */
num_stream_ctxs = roundup_pow_of_two(num_streams);
int cdnsp_enable_slot(struct cdnsp_device *pdev)
{ struct cdnsp_slot_ctx *slot_ctx; int slot_state; int ret;
/* If device is not setup, there is no point in resetting it */
slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
if (slot_state != SLOT_STATE_DISABLED) return 0;
cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev); if (ret) goto show_trace;
/* * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS.
*/ int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup)
{ struct cdnsp_input_control_ctx *ctrl_ctx; struct cdnsp_slot_ctx *slot_ctx; int dev_state = 0; int ret;
if (!pdev->slot_id) {
trace_cdnsp_slot_id("incorrect"); return -EINVAL;
}
/* limit speed if necessary */
max_speed = min(driver->max_speed, g->max_speed);
ret = cdnsp_run(pdev, max_speed);
spin_unlock_irqrestore(&pdev->lock, flags);
return ret;
}
/* * Update Event Ring Dequeue Pointer: * - When all events have finished * - To avoid "Event Ring Full Error" condition
*/ void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev, union cdnsp_trb *event_ring_deq,
u8 clear_ehb)
{
u64 temp_64;
dma_addr_t deq;
/* If necessary, update the HW's version of the event ring deq ptr. */ if (event_ring_deq != pdev->event_ring->dequeue) {
deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
pdev->event_ring->dequeue);
temp_64 &= ERST_PTR_MASK;
temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK);
}
/* Clear the event handler busy flag (RW1C). */ if (clear_ehb)
temp_64 |= ERST_EHB; else
temp_64 &= ~ERST_EHB;
seg = pdev->cmd_ring->first_seg; for (i = 0; i < pdev->cmd_ring->num_segs; i++) {
memset(seg->trbs, 0, sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1));
seg = seg->next;
}
/* Set the address in the Command Ring Control register. */
val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
(pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
pdev->cmd_ring->cycle_state;
cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
}
staticvoid cdnsp_consume_all_events(struct cdnsp_device *pdev)
{ struct cdnsp_segment *event_deq_seg; union cdnsp_trb *event_ring_deq; union cdnsp_trb *event;
u32 cycle_bit;
/* * Stop controller. * This function is called by the gadget core when the driver is removed. * Disable slot, disable IRQs, and quiesce the controller.
*/ staticint cdnsp_gadget_udc_stop(struct usb_gadget *g)
{ struct cdnsp_device *pdev = gadget_to_cdnsp(g); unsignedlong flags;
/* Set to XBUF_TX_TAG_MASK_0 register. */
reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32); /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */
reg += pep->number * sizeof(u32) * 2;
staticint cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
{ int max_streams = HCC_MAX_PSA(pdev->hcc_params); struct cdnsp_ep *pep; int i;
INIT_LIST_HEAD(&pdev->gadget.ep_list);
if (max_streams < STREAM_LOG_STREAMS) {
dev_err(pdev->dev, "Stream size %d not supported\n",
max_streams); return -EINVAL;
}
max_streams = STREAM_LOG_STREAMS;
for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { bool direction = !(i & 1); /* Start from OUT endpoint. */
u8 epnum = ((i + 1) >> 1);
if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction)) continue;
pep = &pdev->eps[i];
pep->pdev = pdev;
pep->number = epnum;
pep->direction = direction; /* 0 for OUT, 1 for IN. */
/* * Ep0 is bidirectional, so ep0in and ep0out are represented by * pdev->eps[0]
*/ if (epnum == 0) {
snprintf(pep->name, sizeof(pep->name), "ep%d%s",
epnum, "BiDir");
/* * Override the APB timeout value to give the controller more time for * enabling UTMI clock and synchronizing APB and UTMI clock domains. * This fix is platform specific and is required to fixes issue with * reading incorrect value from PORTSC register after resuming * from L1 state.
*/
cdnsp_set_apb_timeout_value(pdev);
cdnsp_get_rev_cap(pdev);
/* Make sure the Device Controller is halted. */
ret = cdnsp_halt(pdev); if (ret) return ret;
/* Reset the internal controller memory state and registers. */
ret = cdnsp_reset(pdev); if (ret) return ret;
/* * Set dma_mask and coherent_dma_mask to 64-bits, * if controller supports 64-bit addressing.
*/ if (HCC_64BIT_ADDR(pdev->hcc_params) &&
!dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) {
dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n");
dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64));
} else { /* * This is to avoid error in cases where a 32-bit USB * controller is used on a 64-bit capable system.
*/
ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret;
/* * Software workaround for U1: after transition * to U1 the controller starts gating clock, and in some cases, * it causes that controller stack.
*/
reg = readl(&pdev->port3x_regs->mode_2);
reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN;
writel(reg, &pdev->port3x_regs->mode_2);
return 0;
}
staticint __cdnsp_gadget_init(struct cdns *cdns)
{ struct cdnsp_device *pdev;
u32 max_speed; int ret = -ENOMEM;
cdns_drd_gadget_on(cdns);
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); if (!pdev) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.