?? bcm43xx_dma.c
字號:
/* Broadcom BCM43xx wireless driver DMA ringbuffer and descriptor allocation/management Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de> Some code in this file is derived from the b44.c driver Copyright (C) 2002 David S. Miller Copyright (C) Pekka Pietikainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA.*/#include "bcm43xx.h"#include "bcm43xx_dma.h"#include "bcm43xx_main.h"#include "bcm43xx_debugfs.h"#include "bcm43xx_power.h"#include "bcm43xx_xmit.h"#include <linux/dma-mapping.h>#include <linux/pci.h>#include <linux/delay.h>#include <linux/skbuff.h>static inline int free_slots(struct bcm43xx_dmaring *ring){ return (ring->nr_slots - ring->used_slots);}static inline int next_slot(struct bcm43xx_dmaring *ring, int slot){ assert(slot >= -1 && slot <= ring->nr_slots - 1); if (slot == ring->nr_slots - 1) return 0; return slot + 1;}static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot){ assert(slot >= 0 && slot <= ring->nr_slots - 1); if (slot == 0) return ring->nr_slots - 1; return slot - 1;}/* Request a slot for usage. */static inlineint request_slot(struct bcm43xx_dmaring *ring){ int slot; assert(ring->tx); assert(!ring->suspended); assert(free_slots(ring) != 0); slot = next_slot(ring, ring->current_slot); ring->current_slot = slot; ring->used_slots++; /* Check the number of available slots and suspend TX, * if we are running low on free slots. */ if (unlikely(free_slots(ring) < ring->suspend_mark)) { netif_stop_queue(ring->bcm->net_dev); ring->suspended = 1; }#ifdef CONFIG_BCM43XX_DEBUG if (ring->used_slots > ring->max_used_slots) ring->max_used_slots = ring->used_slots;#endif /* CONFIG_BCM43XX_DEBUG*/ return slot;}/* Return a slot to the free slots. */static inlinevoid return_slot(struct bcm43xx_dmaring *ring, int slot){ assert(ring->tx); ring->used_slots--; /* Check if TX is suspended and check if we have * enough free slots to resume it again. */ if (unlikely(ring->suspended)) { if (free_slots(ring) >= ring->resume_mark) { ring->suspended = 0; netif_wake_queue(ring->bcm->net_dev); } }}u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx){ static const u16 map64[] = { BCM43xx_MMIO_DMA64_BASE0, BCM43xx_MMIO_DMA64_BASE1, BCM43xx_MMIO_DMA64_BASE2, BCM43xx_MMIO_DMA64_BASE3, BCM43xx_MMIO_DMA64_BASE4, BCM43xx_MMIO_DMA64_BASE5, }; static const u16 map32[] = { BCM43xx_MMIO_DMA32_BASE0, BCM43xx_MMIO_DMA32_BASE1, BCM43xx_MMIO_DMA32_BASE2, BCM43xx_MMIO_DMA32_BASE3, BCM43xx_MMIO_DMA32_BASE4, BCM43xx_MMIO_DMA32_BASE5, }; if (dma64bit) { assert(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map64)); return map64[controller_idx]; } assert(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map32)); return map32[controller_idx];}static inlinedma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring, unsigned char *buf, size_t len, int tx){ dma_addr_t dmaaddr; int direction = PCI_DMA_FROMDEVICE; if (tx) direction = PCI_DMA_TODEVICE; dmaaddr = pci_map_single(ring->bcm->pci_dev, buf, len, direction); return dmaaddr;}static inlinevoid unmap_descbuffer(struct bcm43xx_dmaring *ring, dma_addr_t addr, size_t len, int tx){ if (tx) { pci_unmap_single(ring->bcm->pci_dev, addr, len, PCI_DMA_TODEVICE); } else { pci_unmap_single(ring->bcm->pci_dev, addr, len, PCI_DMA_FROMDEVICE); }}static inlinevoid sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring, dma_addr_t addr, size_t len){ assert(!ring->tx); pci_dma_sync_single_for_cpu(ring->bcm->pci_dev, addr, len, PCI_DMA_FROMDEVICE);}static inlinevoid sync_descbuffer_for_device(struct bcm43xx_dmaring *ring, dma_addr_t addr, size_t len){ assert(!ring->tx); pci_dma_sync_single_for_cpu(ring->bcm->pci_dev, addr, len, PCI_DMA_TODEVICE);}/* Unmap and free a descriptor buffer. */static inlinevoid free_descriptor_buffer(struct bcm43xx_dmaring *ring, struct bcm43xx_dmadesc_meta *meta, int irq_context){ assert(meta->skb); if (irq_context) dev_kfree_skb_irq(meta->skb); else dev_kfree_skb(meta->skb); meta->skb = NULL;}static int alloc_ringmemory(struct bcm43xx_dmaring *ring){ ring->descbase = pci_alloc_consistent(ring->bcm->pci_dev, BCM43xx_DMA_RINGMEMSIZE, &(ring->dmabase)); if (!ring->descbase) { /* Allocation may have failed due to pci_alloc_consistent insisting on use of GFP_DMA, which is more restrictive than necessary... */ struct dma_desc *rx_ring; dma_addr_t rx_ring_dma; rx_ring = kzalloc(BCM43xx_DMA_RINGMEMSIZE, GFP_KERNEL); if (!rx_ring) goto out_err; rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring, BCM43xx_DMA_RINGMEMSIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(rx_ring_dma) || rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) { /* Sigh... */ if (!pci_dma_mapping_error(rx_ring_dma)) pci_unmap_single(ring->bcm->pci_dev, rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE, PCI_DMA_BIDIRECTIONAL); rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring, BCM43xx_DMA_RINGMEMSIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(rx_ring_dma) || rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) { assert(0); if (!pci_dma_mapping_error(rx_ring_dma)) pci_unmap_single(ring->bcm->pci_dev, rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE, PCI_DMA_BIDIRECTIONAL); goto out_err; } } ring->descbase = rx_ring; ring->dmabase = rx_ring_dma; } memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE); return 0;out_err: printk(KERN_ERR PFX "DMA ringmemory allocation failed\n"); return -ENOMEM;}static void free_ringmemory(struct bcm43xx_dmaring *ring){ struct device *dev = &(ring->bcm->pci_dev->dev); dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, ring->descbase, ring->dmabase);}/* Reset the RX DMA channel */int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, u16 mmio_base, int dma64){ int i; u32 value; u16 offset; offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL; bcm43xx_write32(bcm, mmio_base + offset, 0); for (i = 0; i < 1000; i++) { offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS; value = bcm43xx_read32(bcm, mmio_base + offset); if (dma64) { value &= BCM43xx_DMA64_RXSTAT; if (value == BCM43xx_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= BCM43xx_DMA32_RXSTATE; if (value == BCM43xx_DMA32_RXSTAT_DISABLED) { i = -1; break; } } udelay(10); } if (i != -1) { printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n"); return -ENODEV; } return 0;}/* Reset the RX DMA channel */int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, u16 mmio_base, int dma64){ int i; u32 value; u16 offset; for (i = 0; i < 1000; i++) { offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS; value = bcm43xx_read32(bcm, mmio_base + offset); if (dma64) { value &= BCM43xx_DMA64_TXSTAT; if (value == BCM43xx_DMA64_TXSTAT_DISABLED || value == BCM43xx_DMA64_TXSTAT_IDLEWAIT || value == BCM43xx_DMA64_TXSTAT_STOPPED) break; } else { value &= BCM43xx_DMA32_TXSTATE; if (value == BCM43xx_DMA32_TXSTAT_DISABLED || value == BCM43xx_DMA32_TXSTAT_IDLEWAIT || value == BCM43xx_DMA32_TXSTAT_STOPPED) break; } udelay(10); } offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL; bcm43xx_write32(bcm, mmio_base + offset, 0); for (i = 0; i < 1000; i++) { offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS; value = bcm43xx_read32(bcm, mmio_base + offset); if (dma64) { value &= BCM43xx_DMA64_TXSTAT; if (value == BCM43xx_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= BCM43xx_DMA32_TXSTATE; if (value == BCM43xx_DMA32_TXSTAT_DISABLED) { i = -1; break; } } udelay(10); } if (i != -1) { printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n"); return -ENODEV; } /* ensure the reset is completed. */ udelay(300); return 0;}static void fill_descriptor(struct bcm43xx_dmaring *ring, struct bcm43xx_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq){ int slot; slot = bcm43xx_dma_desc2idx(ring, desc); assert(slot >= 0 && slot < ring->nr_slots); if (ring->dma64) { u32 ctl0 = 0, ctl1 = 0; u32 addrlo, addrhi; u32 addrext; addrlo = (u32)(dmaaddr & 0xFFFFFFFF); addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING); addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT); addrhi |= ring->routing; if (slot == ring->nr_slots - 1) ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= BCM43xx_DMA64_DCTL0_IRQ; ctl1 |= (bufsize - ring->frameoffset) & BCM43xx_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT) & BCM43xx_DMA64_DCTL1_ADDREXT_MASK; desc->dma64.control0 = cpu_to_le32(ctl0); desc->dma64.control1 = cpu_to_le32(ctl1); desc->dma64.address_low = cpu_to_le32(addrlo); desc->dma64.address_high = cpu_to_le32(addrhi); } else { u32 ctl; u32 addr; u32 addrext; addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING); addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING) >> BCM43xx_DMA32_ROUTING_SHIFT; addr |= ring->routing; ctl = (bufsize - ring->frameoffset) & BCM43xx_DMA32_DCTL_BYTECNT; if (slot == ring->nr_slots - 1) ctl |= BCM43xx_DMA32_DCTL_DTABLEEND; if (start) ctl |= BCM43xx_DMA32_DCTL_FRAMESTART; if (end) ctl |= BCM43xx_DMA32_DCTL_FRAMEEND; if (irq) ctl |= BCM43xx_DMA32_DCTL_IRQ; ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT) & BCM43xx_DMA32_DCTL_ADDREXT_MASK; desc->dma32.control = cpu_to_le32(ctl);
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -