/* Required number of TX DMA slots per TX frame. * This currently is 2, because we put the header and the ieee80211 frame
* into separate slots. */ #define TX_SLOTS_PER_FRAME 2
staticint alloc_ringmemory(struct b43_dmaring *ring)
{ /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K * alignment and 8K buffers for 64-bit DMA with 8K alignment. * In practice we could use smaller buffers for the latter, but the * alignment is really important because of the hardware bug. If bit * 0x00001000 is used in DMA address, some hardware (like BCM4331) * copies that bit into B43_DMA64_RXSTATUS and we get false values from * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use * more than 256 slots for ring.
*/
u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
ring_mem_size, &(ring->dmabase),
GFP_KERNEL); if (!ring->descbase) return -ENOMEM;
for (i = 0; i < 10; i++) {
offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
B43_DMA32_TXSTATUS;
value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) {
value &= B43_DMA64_TXSTAT; if (value == B43_DMA64_TXSTAT_DISABLED ||
value == B43_DMA64_TXSTAT_IDLEWAIT ||
value == B43_DMA64_TXSTAT_STOPPED) break;
} else {
value &= B43_DMA32_TXSTATE; if (value == B43_DMA32_TXSTAT_DISABLED ||
value == B43_DMA32_TXSTAT_IDLEWAIT ||
value == B43_DMA32_TXSTAT_STOPPED) break;
}
msleep(1);
}
offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
b43_write32(dev, mmio_base + offset, 0); for (i = 0; i < 10; i++) {
offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
B43_DMA32_TXSTATUS;
value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) {
value &= B43_DMA64_TXSTAT; if (value == B43_DMA64_TXSTAT_DISABLED) {
i = -1; break;
}
} else {
value &= B43_DMA32_TXSTATE; if (value == B43_DMA32_TXSTAT_DISABLED) {
i = -1; break;
}
}
msleep(1);
} if (i != -1) {
b43err(dev->wl, "DMA TX reset timed out\n"); return -ENODEV;
} /* ensure the reset is completed. */
msleep(1);
return 0;
}
/* Check if a DMA mapping address is invalid. */ staticbool b43_dma_mapping_error(struct b43_dmaring *ring,
dma_addr_t addr,
size_t buffersize, bool dma_to_device)
{ if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) returntrue;
switch (ring->type) { case B43_DMA_30BIT: if ((u64)addr + buffersize > (1ULL << 30)) goto address_error; break; case B43_DMA_32BIT: if ((u64)addr + buffersize > (1ULL << 32)) goto address_error; break; case B43_DMA_64BIT: /* Currently we can't have addresses beyond
* 64bit in the kernel. */ break;
}
/* The address is OK. */ returnfalse;
address_error: /* We can't support this address. Unmap it again. */
unmap_descbuffer(ring, addr, buffersize, dma_to_device);
/* Allocate the initial descbuffers. * This is used for an RX ring only.
*/ staticint alloc_initial_descbuffers(struct b43_dmaring *ring)
{ int i, err = -ENOMEM; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta;
for (i = 0; i < ring->nr_slots; i++) {
desc = ring->ops->idx2desc(ring, i, &meta);
/* Do initial setup of the DMA controller. * Reset the controller, write the ring busaddress * and switch the "enable" bit on.
*/ staticint dmacontroller_setup(struct b43_dmaring *ring)
{ int err = 0;
u32 value;
u32 addrext; bool parity = ring->dev->dma.parity;
u32 addrlo;
u32 addrhi;
/* Device IRQs are disabled prior entering this function, * so no need to take care of concurrency with rx handler stuff.
*/
dmacontroller_cleanup(ring);
free_all_descbuffers(ring);
free_ringmemory(ring);
/* Some hardware with 64-bit DMA seems to be bugged and looks for translation * bit in low address word instead of high one.
*/ staticbool b43_dma_translation_in_low_word(struct b43_wldev *dev, enum b43_dmatype type)
{ if (type != B43_DMA_64BIT) returntrue;
int b43_dma_init(struct b43_wldev *dev)
{ struct b43_dma *dma = &dev->dma; enum b43_dmatype type = b43_engine_type(dev); int err;
err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type)); if (err) {
b43err(dev->wl, "The machine/kernel does not support " "the required %u-bit DMA mask\n", type); return err;
}
/* Generate a cookie for the TX header. */ static u16 generate_cookie(struct b43_dmaring *ring, int slot)
{
u16 cookie;
/* Use the upper 4 bits of the cookie as * DMA controller ID and store the slot number * in the lower 12 bits. * Note that the cookie must never be 0, as this * is a special value used in RX path. * It can also not be 0xFFFF because that is special * for multicast frames.
*/
cookie = (((u16)ring->index + 1) << 12);
B43_WARN_ON(slot & ~0x0FFF);
cookie |= (u16)slot;
return cookie;
}
/* Inspect a cookie and find out to which controller/slot it belongs. */ static struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
{ struct b43_dma *dma = &dev->dma; struct b43_dmaring *ring = NULL;
switch (cookie & 0xF000) { case 0x1000:
ring = dma->tx_ring_AC_BK; break; case 0x2000:
ring = dma->tx_ring_AC_BE; break; case 0x3000:
ring = dma->tx_ring_AC_VI; break; case 0x4000:
ring = dma->tx_ring_AC_VO; break; case 0x5000:
ring = dma->tx_ring_mcast; break;
}
*slot = (cookie & 0x0FFF); if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
b43dbg(dev->wl, "TX-status contains " "invalid cookie: 0x%04X\n", cookie); return NULL;
}
/* Important note: If the number of used DMA slots per TX frame * is changed here, the TX_SLOTS_PER_FRAME definition at the top of * the file has to be updated, too!
*/
if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* Tell the firmware about the cookie of the last
* mcast frame, so it can clear the more-data bit in it. */
b43_shm_write16(ring->dev, B43_SHM_SHARED,
B43_SHM_SH_MCASTCOOKIE, cookie);
} /* Now transfer the whole frame. */
wmb();
ops->poke_tx(ring, next_slot(ring, slot)); return 0;
staticinlineint should_inject_overflow(struct b43_dmaring *ring)
{ #ifdef CONFIG_B43_DEBUG if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { /* Check if we should inject another ringbuffer overflow
* to test handling of this situation in the stack. */ unsignedlong next_overflow;
if (dev->qos_enabled) { /* 0 = highest priority */ switch (queue_prio) { default:
B43_WARN_ON(1);
fallthrough; case 0:
ring = dev->dma.tx_ring_AC_VO; break; case 1:
ring = dev->dma.tx_ring_AC_VI; break; case 2:
ring = dev->dma.tx_ring_AC_BE; break; case 3:
ring = dev->dma.tx_ring_AC_BK; break;
}
} else
ring = dev->dma.tx_ring_AC_BE;
hdr = (struct ieee80211_hdr *)skb->data; if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* The multicast ring will be sent after the DTIM */
ring = dev->dma.tx_ring_mcast; /* Set the more-data bit. Ucode will clear it on
* the last frame for us. */
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
} else { /* Decide by priority where to put this frame. */
ring = select_ring_by_priority(
dev, skb_get_queue_mapping(skb));
}
B43_WARN_ON(!ring->tx);
if (unlikely(ring->stopped)) { /* We get here only because of a bug in mac80211. * Because of a race, one packet may be queued after * the queue is stopped, thus we got called when we shouldn't.
* For now, just refuse the transmit. */ if (b43_debug(dev, B43_DBG_DMAVERBOSE))
b43err(dev->wl, "Packet after queue stopped\n");
err = -ENOSPC; goto out;
}
if (WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME)) { /* If we get here, we have a real error with the queue
* full, but queues not stopped. */
b43err(dev->wl, "DMA queue overflow\n");
err = -ENOSPC; goto out;
}
/* Assign the queue number to the ring (if not already done before) * so TX status handling can use it. The queue to ring mapping is
* static, so we don't need to store it per frame. */
ring->queue_prio = skb_get_queue_mapping(skb);
err = dma_tx_fragment(ring, skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key
* anymore and must not transmit it unencrypted. */
ieee80211_free_txskb(dev->wl->hw, skb);
err = 0; goto out;
} if (unlikely(err)) {
b43err(dev->wl, "DMA tx mapping failure\n"); goto out;
} if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
should_inject_overflow(ring)) { /* This TX ring is full. */ unsignedint skb_mapping = skb_get_queue_mapping(skb);
b43_stop_queue(dev, skb_mapping);
dev->wl->tx_queue_stopped[skb_mapping] = true;
ring->stopped = true; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
}
}
out:
ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return;
B43_WARN_ON(!ring->tx);
/* Sanity check: TX packets are processed in-order on one ring. * Check if the slot deduced from the cookie really is the first
* used slot. */
firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0)
firstused = ring->nr_slots + firstused;
skip = 0; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in * malfunction, memory leaks and/or stall of DMA functionality.
*/ if (slot == next_slot(ring, next_slot(ring, firstused))) { /* If a single header/data pair was missed, skip over * the first two slots in an attempt to recover.
*/
slot = firstused;
skip = 2; if (!err_out1) { /* Report the error once. */
b43dbg(dev->wl, "Skip on DMA ring %d slot %d.\n",
ring->index, slot);
err_out1 = 1;
}
} else { /* More than a single header/data pair were missed. * Report this error. If running with open-source * firmware, then reset the controller to * revive operation.
*/
b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
ring->index, firstused, slot); if (dev->fw.opensource)
b43_controller_restart(dev, "Out of order TX"); return;
}
}
ops = ring->ops; while (1) {
B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); /* get meta - ignore returned value */
ops->idx2desc(ring, slot, &meta);
if (b43_dma_ptr_is_poisoned(meta->skb)) {
b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " "on ring %d\n",
slot, firstused, ring->index); break;
}
if (meta->skb) { struct b43_private_tx_info *priv_info =
b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
if (meta->is_last_fragment) { struct ieee80211_tx_info *info;
if (unlikely(!meta->skb)) { /* This is a scatter-gather fragment of a frame, * so the skb pointer must not be NULL.
*/
b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n",
slot, firstused, ring->index); break;
}
info = IEEE80211_SKB_CB(meta->skb);
/* * Call back to inform the ieee80211 subsystem about * the status of the transmission. When skipping over * a missed TX status report, use a status structure * filled with zeros to indicate that the frame was not * sent (frame_count 0) and not acknowledged
*/ if (unlikely(skip))
txstat = &fake; else
txstat = status;
/* skb will be freed by ieee80211_tx_status_skb().
* Poison our pointer. */
meta->skb = B43_DMA_PTR_POISON;
} else { /* No need to call free_descriptor_buffer here, as * this is only the txhdr, which is not allocated.
*/ if (unlikely(meta->skb)) {
b43dbg(dev->wl, "TX status unexpected non-NULL skb " "at slot %d (first=%d) on ring %d\n",
slot, firstused, ring->index); break;
}
}
/* Everything unmapped and free'd. So it's not used anymore. */
ring->used_slots--;
if (meta->is_last_fragment && !skip) { /* This is the last scatter-gather
* fragment of the frame. We are done. */ break;
}
slot = next_slot(ring, slot); if (skip > 0)
--skip;
} if (ring->stopped) {
B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
ring->stopped = false;
}
if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
dev->wl->tx_queue_stopped[ring->queue_prio] = false;
} else { /* If the driver queue is running wake the corresponding
* mac80211 queue. */
b43_wake_queue(dev, ring->queue_prio); if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
} /* Add work to the queue. */
ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0;
do {
udelay(2);
barrier();
len = le16_to_cpu(rxhdr->frame_len);
} while (len == 0 && i++ < 5); if (unlikely(len == 0)) {
dmaaddr = meta->dmaaddr; goto drop_recycle_buffer;
}
} if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { /* Something went wrong with the DMA.
* The device did not touch the buffer and did not overwrite the poison. */
b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
dmaaddr = meta->dmaaddr; goto drop_recycle_buffer;
} if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet.
*/ int cnt = 0;
s32 tmp = len;
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
skb_put(skb, len + ring->frameoffset);
skb_pull(skb, ring->frameoffset);
b43_rx(ring->dev, skb, rxhdr);
drop: return;
drop_recycle_buffer: /* Poison and recycle the RX buffer. */
b43_poison_rx_buffer(ring, skb);
sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
}
void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
{ int current_slot, previous_slot;
B43_WARN_ON(ring->tx);
/* Device has filled all buffers, drop all packets and let TCP * decrease speed. * Decrement RX index by one will let the device to see all slots * as free again
*/ /* *TODO: How to increase rx_drop in mac80211?
*/
current_slot = ring->ops->get_current_rxslot(ring);
previous_slot = prev_slot(ring, current_slot);
ring->ops->set_current_rxslot(ring, previous_slot);
}
void b43_dma_rx(struct b43_dmaring *ring)
{ conststruct b43_dma_ops *ops = ring->ops; int slot, current_slot; int used_slots = 0;
/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
* This is called from PIO code, so DMA structures are not available. */ void b43_dma_direct_fifo_rx(struct b43_wldev *dev, unsignedint engine_index, bool enable)
{ enum b43_dmatype type;
u16 mmio_base;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.