?? bcm43xx_dma.c
字號:
err_destroy_tx0: bcm43xx_destroy_dmaring(dma->tx_ring0); dma->tx_ring0 = NULL;no_dma:#ifdef CONFIG_BCM43XX_PIO printk(KERN_WARNING PFX "DMA not supported on this device." " Falling back to PIO.\n"); bcm->__using_pio = 1; return -ENOSYS;#else printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. " "Please recompile the driver with PIO support.\n"); return -ENODEV;#endif /* CONFIG_BCM43XX_PIO */}/* Generate a cookie for the TX header. */static u16 generate_cookie(struct bcm43xx_dmaring *ring, int slot){ u16 cookie = 0x1000; /* Use the upper 4 bits of the cookie as * DMA controller ID and store the slot number * in the lower 12 bits. * Note that the cookie must never be 0, as this * is a special value used in RX path. */ switch (ring->index) { case 0: cookie = 0xA000; break; case 1: cookie = 0xB000; break; case 2: cookie = 0xC000; break; case 3: cookie = 0xD000; break; case 4: cookie = 0xE000; break; case 5: cookie = 0xF000; break; } assert(((u16)slot & 0xF000) == 0x0000); cookie |= (u16)slot; return cookie;}/* Inspect a cookie and find out to which controller/slot it belongs. */staticstruct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm, u16 cookie, int *slot){ struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm); struct bcm43xx_dmaring *ring = NULL; switch (cookie & 0xF000) { case 0xA000: ring = dma->tx_ring0; break; case 0xB000: ring = dma->tx_ring1; break; case 0xC000: ring = dma->tx_ring2; break; case 0xD000: ring = dma->tx_ring3; break; case 0xE000: ring = dma->tx_ring4; break; case 0xF000: ring = dma->tx_ring5; break; default: assert(0); } *slot = (cookie & 0x0FFF); assert(*slot >= 0 && *slot < ring->nr_slots); return ring;}static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, int slot){ u16 offset; int descsize; /* Everything is ready to start. Buffers are DMA mapped and * associated with slots. * "slot" is the last slot of the new frame we want to transmit. * Close your seat belts now, please. */ wmb(); slot = next_slot(ring, slot); offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX; descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64) : sizeof(struct bcm43xx_dmadesc32); bcm43xx_dma_write(ring, offset, (u32)(slot * descsize));}static void dma_tx_fragment(struct bcm43xx_dmaring *ring, struct sk_buff *skb, u8 cur_frag){ int slot; struct bcm43xx_dmadesc_generic *desc; struct bcm43xx_dmadesc_meta *meta; dma_addr_t dmaaddr; struct sk_buff *bounce_skb; assert(skb_shinfo(skb)->nr_frags == 0); slot = request_slot(ring); desc = bcm43xx_dma_idx2desc(ring, slot, &meta); /* Add a device specific TX header. */ assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr)); /* Reserve enough headroom for the device tx header. */ __skb_push(skb, sizeof(struct bcm43xx_txhdr)); /* Now calculate and add the tx header. * The tx header includes the PLCP header. */ bcm43xx_generate_txhdr(ring->bcm, (struct bcm43xx_txhdr *)skb->data, skb->data + sizeof(struct bcm43xx_txhdr), skb->len - sizeof(struct bcm43xx_txhdr), (cur_frag == 0), generate_cookie(ring, slot)); dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) { /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */ if (!dma_mapping_error(dmaaddr)) unmap_descbuffer(ring, dmaaddr, skb->len, 1); bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA); if (!bounce_skb) return; dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1); if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) { if (!dma_mapping_error(dmaaddr)) unmap_descbuffer(ring, dmaaddr, skb->len, 1); dev_kfree_skb_any(bounce_skb); assert(0); return; } memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); dev_kfree_skb_any(skb); skb = bounce_skb; } meta->skb = skb; meta->dmaaddr = dmaaddr; fill_descriptor(ring, desc, dmaaddr, skb->len, 1, 1, 1); /* Now transfer the whole frame. */ dmacontroller_poke_tx(ring, slot);}int bcm43xx_dma_tx(struct bcm43xx_private *bcm, struct ieee80211_txb *txb){ /* We just received a packet from the kernel network subsystem. * Add headers and DMA map the memory. Poke * the device to send the stuff. * Note that this is called from atomic context. */ struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1; u8 i; struct sk_buff *skb; assert(ring->tx); if (unlikely(free_slots(ring) < txb->nr_frags)) { /* The queue should be stopped, * if we are low on free slots. * If this ever triggers, we have to lower the suspend_mark. */ dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n"); return -ENOMEM; } for (i = 0; i < txb->nr_frags; i++) { skb = txb->fragments[i]; /* Take skb from ieee80211_txb_free */ txb->fragments[i] = NULL; dma_tx_fragment(ring, skb, i); } ieee80211_txb_free(txb); return 0;}void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm, struct bcm43xx_xmitstatus *status){ struct bcm43xx_dmaring *ring; struct bcm43xx_dmadesc_generic *desc; struct bcm43xx_dmadesc_meta *meta; int is_last_fragment; int slot; u32 tmp; ring = parse_cookie(bcm, status->cookie, &slot); assert(ring); assert(ring->tx); while (1) { assert(slot >= 0 && slot < ring->nr_slots); desc = bcm43xx_dma_idx2desc(ring, slot, &meta); if (ring->dma64) { tmp = le32_to_cpu(desc->dma64.control0); is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND); } else { tmp = le32_to_cpu(desc->dma32.control); is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND); } unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); free_descriptor_buffer(ring, meta, 1); /* Everything belonging to the slot is unmapped * and freed, so we can return it. */ return_slot(ring, slot); if (is_last_fragment) break; slot = next_slot(ring, slot); } bcm->stats.last_tx = jiffies;}static void dma_rx(struct bcm43xx_dmaring *ring, int *slot){ struct bcm43xx_dmadesc_generic *desc; struct bcm43xx_dmadesc_meta *meta; struct bcm43xx_rxhdr *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = bcm43xx_dma_idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; if (ring->index == 3) { /* We received an xmit status. */ struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; struct bcm43xx_xmitstatus stat; int i = 0; stat.cookie = le16_to_cpu(hw->cookie); while (stat.cookie == 0) { if (unlikely(++i >= 10000)) { assert(0); break; } udelay(2); barrier(); stat.cookie = le16_to_cpu(hw->cookie); } stat.flags = hw->flags; stat.cnt1 = hw->cnt1; stat.cnt2 = hw->cnt2; stat.seq = le16_to_cpu(hw->seq); stat.unknown = le16_to_cpu(hw->unknown); bcm43xx_debugfs_log_txstat(ring->bcm, &stat); bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat); /* recycle the descriptor buffer. */ sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); return; } rxhdr = (struct bcm43xx_rxhdr *)skb->data; len = le16_to_cpu(rxhdr->frame_length); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_length); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { /* recycle the descriptor buffer. */ sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); goto drop; } } if (unlikely(len > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet. */ int cnt = 0; s32 tmp = len; while (1) { desc = bcm43xx_dma_idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } printkl(KERN_ERR PFX "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } len -= IEEE80211_FCS_LEN; dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n"); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); goto drop; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); err = bcm43xx_rx(ring->bcm, skb, rxhdr); if (err) { dev_kfree_skb_irq(skb); goto drop; }drop: return;}void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring){ u32 status; u16 descptr; int slot, current_slot;#ifdef CONFIG_BCM43XX_DEBUG int used_slots = 0;#endif assert(!ring->tx); if (ring->dma64) { status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS); descptr = (status & BCM43xx_DMA64_RXSTATDPTR); current_slot = descptr / sizeof(struct bcm43xx_dmadesc64); } else { status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS); descptr = (status & BCM43xx_DMA32_RXDPTR); current_slot = descptr / sizeof(struct bcm43xx_dmadesc32); } assert(current_slot >= 0 && current_slot < ring->nr_slots); slot = ring->current_slot; for ( ; slot != current_slot; slot = next_slot(ring, slot)) { dma_rx(ring, &slot);#ifdef CONFIG_BCM43XX_DEBUG if (++used_slots > ring->max_used_slots) ring->max_used_slots = used_slots;#endif } if (ring->dma64) { bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, (u32)(slot * sizeof(struct bcm43xx_dmadesc64))); } else { bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, (u32)(slot * sizeof(struct bcm43xx_dmadesc32))); } ring->current_slot = slot;}void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring){ assert(ring->tx); bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1); if (ring->dma64) { bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL) | BCM43xx_DMA64_TXSUSPEND); } else { bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL) | BCM43xx_DMA32_TXSUSPEND); }}void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring){ assert(ring->tx); if (ring->dma64) { bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL) & ~BCM43xx_DMA64_TXSUSPEND); } else { bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL) & ~BCM43xx_DMA32_TXSUSPEND); } bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);}
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -