#define AR_BUFFER_SIZE (32*1024) #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE) /* we need at least two pages for proper list management */ #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
/* * A buffer that contains a block of DMA-able coherent memory used for * storing a portion of a DMA descriptor program.
*/ struct descriptor_buffer { struct list_head list;
dma_addr_t buffer_bus;
size_t buffer_size;
size_t used; struct descriptor buffer[];
};
/* * List of page-sized buffers for storing DMA descriptors. * Head of list contains buffers in use and tail of list contains * free buffers.
*/ struct list_head buffer_list;
/* * Pointer to a buffer inside buffer_list that contains the tail * end of the current DMA program.
*/ struct descriptor_buffer *buffer_tail;
/* * The descriptor containing the branch address of the first * descriptor that has not yet been filled by the device.
*/ struct descriptor *last;
/* * The last descriptor block in the DMA program. It contains the branch * address that must be updated upon appending a new descriptor.
*/ struct descriptor *prev; int prev_z;
__iomem char *registers; int node_id; int generation; int request_generation; /* for timestamping incoming requests */ unsigned quirks; unsignedint pri_req_max;
u32 bus_time; bool bus_time_running; bool is_root; bool csr_state_setclear_abdicate; int n_ir; int n_it; /* * Spinlock for accessing fw_ohci data. Never call out of * this driver with this lock held.
*/
spinlock_t lock;
// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia // ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register // (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not // clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register, // while it is probable due to detection of any type of PCIe error. #define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
/* In case of multiple matches in ohci_quirks[], only the first one is used. */ staticconststruct { unsignedshort vendor, device, revision, flags;
} ohci_quirks[] = {
{PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER},
switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_READ_QUADLET_RESPONSE: case TCODE_CYCLE_START:
snprintf(specific, sizeof(specific), " = %08x",
be32_to_cpu((__force __be32)header[3])); break; case TCODE_WRITE_BLOCK_REQUEST: case TCODE_READ_BLOCK_REQUEST: case TCODE_READ_BLOCK_RESPONSE: case TCODE_LOCK_REQUEST: case TCODE_LOCK_RESPONSE:
snprintf(specific, sizeof(specific), " %x,%x",
async_header_get_data_length(header),
async_header_get_extended_tcode(header)); break; default:
specific[0] = '\0';
}
switch (tcode) { case TCODE_STREAM_DATA:
ohci_notice(ohci, "A%c %s, %s\n",
dir, evts[evt], tcodes[tcode]); break; case TCODE_LINK_INTERNAL:
ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
dir, evts[evt], header[1], header[2]); break; case TCODE_WRITE_QUADLET_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: case TCODE_READ_QUADLET_REQUEST: case TCODE_READ_BLOCK_REQUEST: case TCODE_LOCK_REQUEST:
ohci_notice(ohci, "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
dir, speed, async_header_get_tlabel(header),
async_header_get_source(header), async_header_get_destination(header),
evts[evt], tcodes[tcode], async_header_get_offset(header), specific); break; default:
ohci_notice(ohci, "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
dir, speed, async_header_get_tlabel(header),
async_header_get_source(header), async_header_get_destination(header),
evts[evt], tcodes[tcode], specific);
}
}
staticinlinevoid flush_writes(conststruct fw_ohci *ohci)
{ /* Do a dummy read to flush writes. */
reg_read(ohci, OHCI1394_Version);
}
/* * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex. * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg() * directly. Exceptions are intrinsically serialized contexts like pci_probe.
*/ staticint read_phy_reg(struct fw_ohci *ohci, int addr)
{
u32 val; int i;
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl); if (!~val) return -ENODEV; /* Card was ejected. */
if (val & OHCI1394_PhyControl_ReadDone) return OHCI1394_PhyControl_ReadData(val);
/* * Try a few times without waiting. Sleeping is necessary * only when the link/PHY interface is busy.
*/ if (i >= 3)
msleep(1);
}
ohci_err(ohci, "failed to read phy reg %d\n", addr);
dump_stack();
return -EBUSY;
}
staticint write_phy_reg(conststruct fw_ohci *ohci, int addr, u32 val)
{ int i;
reg_write(ohci, OHCI1394_PhyControl,
OHCI1394_PhyControl_Write(addr, val)); for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl); if (!~val) return -ENODEV; /* Card was ejected. */
if (!(val & OHCI1394_PhyControl_WritePending)) return 0;
if (i >= 3)
msleep(1);
}
ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
dump_stack();
return -EBUSY;
}
staticint update_phy_reg(struct fw_ohci *ohci, int addr, int clear_bits, int set_bits)
{ int ret = read_phy_reg(ohci, addr); if (ret < 0) return ret;
/* * The interrupt status bits are cleared by writing a one bit. * Avoid clearing them unless explicitly requested in set_bits.
*/ if (addr == 5)
clear_bits |= PHY_INT_STATUS_BITS;
wmb(); /* finish init of new descriptors before branch_address update */
d = &ctx->descriptors[ctx->last_buffer_index];
d->branch_address |= cpu_to_le32(1);
/* * We search for the buffer that contains the last AR packet DMA data written * by the controller.
*/ staticunsignedint ar_search_last_active_buffer(struct ar_context *ctx, unsignedint *buffer_offset)
{ unsignedint i, next_i, last = ctx->last_buffer_index;
__le16 res_count, next_res_count;
i = ar_first_buffer_index(ctx);
res_count = READ_ONCE(ctx->descriptors[i].res_count);
/* A buffer that is not yet completely filled must be the last one. */ while (i != last && res_count == 0) {
/* Peek at the next descriptor. */
next_i = ar_next_buffer_index(i);
rmb(); /* read descriptors in order */
next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); /* * If the next descriptor is still empty, we must stop at this * descriptor.
*/ if (next_res_count == cpu_to_le16(PAGE_SIZE)) { /* * The exception is when the DMA data for one packet is * split over three buffers; in this case, the middle * buffer's descriptor might be never updated by the * controller and look still empty, and we have to peek * at the third one.
*/ if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
next_i = ar_next_buffer_index(next_i);
rmb();
next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); if (next_res_count != cpu_to_le16(PAGE_SIZE)) goto next_buffer_is_active;
}
break;
}
next_buffer_is_active:
i = next_i;
res_count = next_res_count;
}
/* * Several controllers, notably from NEC and VIA, forget to * write ack_complete status at PHY packet reception.
*/ if (evt == OHCI1394_evt_no_status && tcode == TCODE_LINK_INTERNAL)
p.ack = ACK_COMPLETE;
/* * The OHCI bus reset handler synthesizes a PHY packet with * the new generation number when a bus reset happens (see * section 8.4.2.3). This helps us determine when a request * was received and make sure we send the response in the same * generation. We only need this for requests; for responses * we use the unique tlabel for finding the matching * request. * * Alas some chips sometimes emit bus reset packets with a * wrong generation. We set the correct generation for these * at a slightly incorrect time (in bus_reset_work).
*/ if (evt == OHCI1394_evt_bus_reset) { if (!(ohci->quirks & QUIRK_RESET_PACKET))
ohci->request_generation = (p.header[2] >> 16) & 0xff;
} elseif (ctx == &ohci->ar_request_ctx) {
fw_core_handle_request(&ohci->card, &p);
} else {
fw_core_handle_response(&ohci->card, &p);
}
i = ar_first_buffer_index(ctx); while (i != end_buffer) {
dma_sync_single_for_device(ctx->ohci->card.device,
ar_buffer_bus(ctx, i),
PAGE_SIZE, DMA_FROM_DEVICE);
ar_context_link_page(ctx, i);
i = ar_next_buffer_index(i);
}
}
if (end_buffer_index < ar_first_buffer_index(ctx)) { // The filled part of the overall buffer wraps around; handle all packets up to the // buffer end here. If the last packet wraps around, its tail will be visible after // the buffer end because the buffer start pages are mapped there again. void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
p = handle_ar_packets(ctx, p, buffer_end); if (p < buffer_end) goto error; // adjust p to point back into the actual buffer
p -= AR_BUFFERS * PAGE_SIZE;
}
p = handle_ar_packets(ctx, p, end); if (p != end) { if (p > end)
ar_context_abort(ctx, "inconsistent descriptor"); goto error;
}
for (i = 0; i < AR_BUFFERS; i++) {
ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
DMA_FROM_DEVICE, GFP_KERNEL); if (!ctx->pages[i]) goto out_of_memory;
set_page_private(ctx->pages[i], dma_addr);
dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
DMA_FROM_DEVICE);
}
for (i = 0; i < AR_BUFFERS; i++)
pages[i] = ctx->pages[i]; for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
pages[AR_BUFFERS + i] = ctx->pages[i];
ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); if (!ctx->buffer) goto out_of_memory;
/* figure out which descriptor the branch address goes in */ if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) return d; else return d + z - 1;
}
/* If the branch address points to a buffer outside of the
* current buffer, advance to the next buffer. */ if (address < desc->buffer_bus ||
address >= desc->buffer_bus + desc->used)
desc = list_entry(desc->list.next, struct descriptor_buffer, list);
d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
last = find_branch_descriptor(d, z);
if (!ctx->callback(ctx, d, last)) break;
if (old_desc != desc) { // If we've advanced to the next buffer, move the previous buffer to the // free list.
old_desc->used = 0;
guard(spinlock_irqsave)(&ctx->ohci->lock);
list_move_tail(&old_desc->list, &ctx->buffer_list);
}
ctx->last = last;
}
}
/* * Allocate a new buffer and add it to the list of free buffers for this * context. Must be called with ohci->lock held.
*/ staticint context_add_buffer(struct context *ctx)
{ struct descriptor_buffer *desc;
dma_addr_t bus_addr; int offset;
/* * 16MB of descriptors should be far more than enough for any DMA * program. This will catch run-away userspace or DoS attacks.
*/ if (ctx->total_allocation >= 16*1024*1024) return -ENOMEM;
desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC); if (!desc) return -ENOMEM;
offset = (void *)&desc->buffer - (void *)desc; /* * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads * for descriptors, even 0x10-byte ones. This can cause page faults when * an IOMMU is in use and the oversized read crosses a page boundary. * Work around this by always leaving at least 0x10 bytes of padding.
*/
desc->buffer_size = PAGE_SIZE - offset - 0x10;
desc->buffer_bus = bus_addr + offset;
desc->used = 0;
/* * We put a dummy descriptor in the buffer that has a NULL * branch address and looks like it's been sent. That way we * have a descriptor to append DMA programs to.
*/
memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
ctx->last = ctx->buffer_tail->buffer;
ctx->prev = ctx->buffer_tail->buffer;
ctx->prev_z = 1;
/* Must be called with ohci->lock held */ staticstruct descriptor *context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
{ struct descriptor *d = NULL; struct descriptor_buffer *desc = ctx->buffer_tail;
if (z * sizeof(*d) > desc->buffer_size) return NULL;
if (z * sizeof(*d) > desc->buffer_size - desc->used) { /* No room for the descriptor in this buffer, so advance to the
* next one. */
if (desc->list.next == &ctx->buffer_list) { /* If there is no free buffer next in the list,
* allocate one. */ if (context_add_buffer(ctx) < 0) return NULL;
}
desc = list_entry(desc->list.next, struct descriptor_buffer, list);
ctx->buffer_tail = desc;
}
d = desc->buffer + desc->used / sizeof(*d);
memset(d, 0, z * sizeof(*d));
*d_bus = desc->buffer_bus + desc->used;
/* * VT6306 incorrectly checks only the single descriptor at the * CommandPtr when the wake bit is written, so if it's a * multi-descriptor block starting with an INPUT_MORE, put a copy of * the branch address in the first descriptor. * * Not doing this for transmit contexts since not sure how it interacts * with skip addresses.
*/ if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
d_branch != ctx->prev &&
(ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
ctx->prev->branch_address = cpu_to_le32(d_bus | z);
}
/* * This function appends a packet to the DMA queue for transmission. * Must always be called with the ochi->lock held to ensure proper * generation handling and locking around packet queue manipulation.
*/ staticint at_context_queue_packet(struct at_context *ctx, struct fw_packet *packet)
{ struct context *context = &ctx->context; struct fw_ohci *ohci = context->ohci;
dma_addr_t d_bus, payload_bus; struct driver_data *driver_data; struct descriptor *d, *last;
__le32 *header; int z, tcode;
d = context_get_descriptors(context, 4, &d_bus); if (d == NULL) {
packet->ack = RCODE_SEND_ERROR; return -1;
}
tcode = async_header_get_tcode(packet->header);
header = (__le32 *) &d[1]; switch (tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: case TCODE_WRITE_RESPONSE: case TCODE_READ_QUADLET_REQUEST: case TCODE_READ_BLOCK_REQUEST: case TCODE_READ_QUADLET_RESPONSE: case TCODE_READ_BLOCK_RESPONSE: case TCODE_LOCK_REQUEST: case TCODE_LOCK_RESPONSE:
ohci1394_at_data_set_src_bus_id(header, false);
ohci1394_at_data_set_speed(header, packet->speed);
ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header));
ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header));
ohci1394_at_data_set_tcode(header, tcode);
/* FIXME: Document how the locking works. */ if (ohci->generation != packet->generation) { if (packet->payload_mapped)
dma_unmap_single(ohci->card.device, payload_bus,
packet->payload_length, DMA_TO_DEVICE);
packet->ack = RCODE_GENERATION; return -1;
}
context_append(context, d, z, 4 - z);
if (context->running)
reg_write(ohci, CONTROL_SET(context->regs), CONTEXT_WAKE); else
context_run(context, 0);
return 0;
}
staticvoid at_context_flush(struct at_context *ctx)
{ // Avoid dead lock due to programming mistake. if (WARN_ON_ONCE(current_work() == &ctx->work)) return;
case OHCI1394_evt_flushed: /* * The packet was flushed should give same error as * when we try to use a stale generation count.
*/
packet->ack = RCODE_GENERATION; break;
case OHCI1394_evt_missing_ack: if (READ_ONCE(ctx->flushing))
packet->ack = RCODE_GENERATION; else { /* * Using a valid (current) generation count, but the * node is not on the bus or not sending acks.
*/
packet->ack = RCODE_NO_ACK;
} break;
case ACK_COMPLETE + 0x10: case ACK_PENDING + 0x10: case ACK_BUSY_X + 0x10: case ACK_BUSY_A + 0x10: case ACK_BUSY_B + 0x10: case ACK_DATA_ERROR + 0x10: case ACK_TYPE_ERROR + 0x10:
packet->ack = evt - 0x10; break;
case OHCI1394_evt_no_status: if (READ_ONCE(ctx->flushing)) {
packet->ack = RCODE_GENERATION; break;
}
fallthrough;
i = csr - CSR_CONFIG_ROM; if (i + length > CONFIG_ROM_SIZE) {
fw_fill_response(&response, packet->header,
RCODE_ADDRESS_ERROR, NULL, 0);
} elseif (!tcode_is_read_request(tcode)) {
fw_fill_response(&response, packet->header,
RCODE_TYPE_ERROR, NULL, 0);
} else {
fw_fill_response(&response, packet->header, RCODE_COMPLETE,
(void *) ohci->config_rom + i, length);
}
// Timestamping on behalf of the hardware.
response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
fw_core_handle_response(&ohci->card, &response);
}
out: // Timestamping on behalf of the hardware.
response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
fw_core_handle_response(&ohci->card, &response);
}
/* * Some controllers exhibit one or more of the following bugs when updating the * iso cycle timer register: * - When the lowest six bits are wrapping around to zero, a read that happens * at the same time will return garbage in the lowest ten bits. * - When the cycleOffset field wraps around to zero, the cycleCount field is * not incremented for about 60 ns. * - Occasionally, the entire register reads zero. * * To catch these, we read the register three times and ensure that the * difference between each two consecutive reads is approximately the same, i.e. * less than twice the other. Furthermore, any negative difference indicates an * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to * execute, so we have enough precision to compute the ratio of the differences.)
*/ static u32 get_cycle_time(struct fw_ohci *ohci)
{
u32 c0, c1, c2;
u32 t0, t1, t2;
s32 diff01, diff12; int i;
if (has_reboot_by_cycle_timer_read_quirk(ohci)) return 0;
/* * This function has to be called at least every 64 seconds. The bus_time * field stores not only the upper 25 bits of the BUS_TIME register but also * the most significant bit of the cycle timer in bit 6 so that we can detect * changes in this bit.
*/ static u32 update_bus_time(struct fw_ohci *ohci)
{
u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
/* * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059. * Construct the selfID from phy register contents.
*/ staticint find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
{ int reg, i, pos, err; bool is_initiated_reset;
u32 self_id = 0;
// link active 1, speed 3, bridge 0, contender 1, more packets 0.
phy_packet_set_packet_identifier(&self_id, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID);
phy_packet_self_id_zero_set_link_active(&self_id, true);
phy_packet_self_id_zero_set_scode(&self_id, SCODE_800);
phy_packet_self_id_zero_set_contender(&self_id, true);
reg = reg_read(ohci, OHCI1394_NodeID); if (!(reg & OHCI1394_NodeID_idValid)) {
ohci_notice(ohci, "node ID not valid, new bus reset in progress\n"); return -EBUSY;
}
phy_packet_self_id_set_phy_id(&self_id, reg & 0x3f);
/* * The count in the SelfIDCount register is the number of * bytes in the self ID receive buffer. Since we also receive * the inverted quadlets and a header quadlet, we shift one * bit extra to get the actual number of self IDs.
*/
self_id_count = ohci1394_self_id_count_get_size(reg) >> 1;
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
u32 id = cond_le32_to_cpu(ohci->self_id[i], has_be_header_quirk(ohci));
u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1], has_be_header_quirk(ohci));
if (id != ~id2) { /* * If the invalid data looks like a cycle start packet, * it's likely to be the result of the cycle master * having a wrong gap count. In this case, the self IDs * so far are valid and should be processed so that the * bus manager can then correct the gap count.
*/ if (id == 0xffff008f) {
ohci_notice(ohci, "ignoring spurious self IDs\n");
self_id_count = j; break;
}
if (ohci->quirks & QUIRK_TI_SLLZ059) {
self_id_count = find_and_insert_self_id(ohci, self_id_count); if (self_id_count < 0) {
ohci_notice(ohci, "could not construct local self ID\n"); return;
}
}
if (self_id_count == 0) {
ohci_notice(ohci, "no self IDs\n"); return;
}
rmb();
/* * Check the consistency of the self IDs we just read. The * problem we face is that a new bus reset can start while we * read out the self IDs from the DMA buffer. If this happens, * the DMA buffer will be overwritten with new self IDs and we * will read out inconsistent data. The OHCI specification * (section 11.2) recommends a technique similar to * linux/seqlock.h, where we remember the generation of the * self IDs in the buffer before reading them out and compare * it to the current generation after reading them out. If * the two generations match we know we have a consistent set * of self IDs.
*/
reg = reg_read(ohci, OHCI1394_SelfIDCount);
new_generation = ohci1394_self_id_count_get_generation(reg); if (new_generation != generation) {
ohci_notice(ohci, "new bus reset, discarding self ids\n"); return;
}
// FIXME: Document how the locking works.
scoped_guard(spinlock_irq, &ohci->lock) {
ohci->generation = -1; // prevent AT packet queueing
context_stop(&ohci->at_request_ctx.context);
context_stop(&ohci->at_response_ctx.context);
}
/* * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent * packets in the AT queues and software needs to drain them. * Some OHCI 1.1 controllers (JMicron) apparently require this too.
*/
at_context_flush(&ohci->at_request_ctx);
at_context_flush(&ohci->at_response_ctx);
if (ohci->quirks & QUIRK_RESET_PACKET)
ohci->request_generation = generation;
// This next bit is unrelated to the AT context stuff but we have to do it under the // spinlock also. If a new config rom was set up before this reset, the old one is // now no longer in use and we can free it. Update the config rom pointers to point // to the current config rom and clear the next_config_rom pointer so a new update // can take place. if (ohci->next_config_rom != NULL) { if (ohci->next_config_rom != ohci->config_rom) {
free_rom = ohci->config_rom;
free_rom_bus = ohci->config_rom_bus;
}
ohci->config_rom = ohci->next_config_rom;
ohci->config_rom_bus = ohci->next_config_rom_bus;
ohci->next_config_rom = NULL;
// Restore config_rom image and manually update config_rom registers. // Writing the header quadlet will indicate that the config rom is ready, // so we do that last.
reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2]));
ohci->config_rom[0] = ohci->next_header;
reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header));
}
if (unlikely(param_debug > 0)) {
dev_notice_ratelimited(ohci->card.device, "The debug parameter is superseded by tracepoints events, and deprecated.");
}
/* * busReset and postedWriteErr events must not be cleared yet * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
*/
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
trace_irqs(ohci->card.index, event);
log_irqs(ohci, event); // The flag is masked again at bus_reset_work() scheduled by selfID event. if (event & OHCI1394_busReset)
reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
if (event & OHCI1394_selfIDComplete)
queue_work(selfid_workqueue, &ohci->bus_reset_work);
if (event & OHCI1394_RQPkt)
queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
if (event & OHCI1394_RSPkt)
queue_work(ohci->card.async_wq, &ohci->ar_response_ctx.work);
if (event & OHCI1394_reqTxComplete)
queue_work(ohci->card.async_wq, &ohci->at_request_ctx.work);
if (event & OHCI1394_respTxComplete)
queue_work(ohci->card.async_wq, &ohci->at_response_ctx.work);
while (iso_event) {
i = ffs(iso_event) - 1;
fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.22Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.