亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? bcm43xx_dma.c

?? 無線網卡驅動,有很好的參考價值,在linux_2.6.21下可以直接使用,如果在其他平臺,可以參考移植
?? C
?? 第 1 頁 / 共 3 頁
字號:
err_destroy_tx0:	bcm43xx_destroy_dmaring(dma->tx_ring0);	dma->tx_ring0 = NULL;no_dma:#ifdef CONFIG_BCM43XX_PIO	printk(KERN_WARNING PFX "DMA not supported on this device."				" Falling back to PIO.\n");	bcm->__using_pio = 1;	return -ENOSYS;#else	printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "			    "Please recompile the driver with PIO support.\n");	return -ENODEV;#endif /* CONFIG_BCM43XX_PIO */}/* Generate a cookie for the TX header. */static u16 generate_cookie(struct bcm43xx_dmaring *ring,			   int slot){	u16 cookie = 0x1000;	/* Use the upper 4 bits of the cookie as	 * DMA controller ID and store the slot number	 * in the lower 12 bits.	 * Note that the cookie must never be 0, as this	 * is a special value used in RX path.	 */	switch (ring->index) {	case 0:		cookie = 0xA000;		break;	case 1:		cookie = 0xB000;		break;	case 2:		cookie = 0xC000;		break;	case 3:		cookie = 0xD000;		break;	case 4:		cookie = 0xE000;		break;	case 5:		cookie = 0xF000;		break;	}	assert(((u16)slot & 0xF000) == 0x0000);	cookie |= (u16)slot;	return cookie;}/* Inspect a cookie and find out to which controller/slot it belongs. */staticstruct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,				      u16 cookie, int *slot){	struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);	struct bcm43xx_dmaring *ring = NULL;	switch (cookie & 0xF000) {	case 0xA000:		ring = dma->tx_ring0;		break;	case 0xB000:		ring = dma->tx_ring1;		break;	case 0xC000:		ring = dma->tx_ring2;		break;	case 0xD000:		ring = dma->tx_ring3;		break;	case 0xE000:		ring = dma->tx_ring4;		break;	case 0xF000:		ring = dma->tx_ring5;		break;	default:		assert(0);	}	*slot = (cookie & 0x0FFF);	assert(*slot >= 0 && *slot < ring->nr_slots);	return ring;}static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,				  int slot){	u16 offset;	int descsize;	/* Everything is ready to start. Buffers are DMA mapped and	 * associated with slots.	 * "slot" is the last slot of the new frame we want to transmit.	 * Close your seat belts now, please.	 */	wmb();	slot = next_slot(ring, slot);	offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;	descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)		: sizeof(struct bcm43xx_dmadesc32);	bcm43xx_dma_write(ring, offset,			(u32)(slot * descsize));}static void dma_tx_fragment(struct bcm43xx_dmaring *ring,			    struct sk_buff *skb,			    u8 cur_frag){	int slot;	struct bcm43xx_dmadesc_generic *desc;	struct bcm43xx_dmadesc_meta *meta;	dma_addr_t dmaaddr;	struct sk_buff *bounce_skb;	assert(skb_shinfo(skb)->nr_frags == 0);	slot = request_slot(ring);	desc = bcm43xx_dma_idx2desc(ring, slot, &meta);	/* Add a device specific TX header. */	assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));	/* Reserve enough headroom for the device tx header. */	__skb_push(skb, sizeof(struct bcm43xx_txhdr));	/* Now calculate and add the tx header.	 * The tx header includes the PLCP header.	 */	bcm43xx_generate_txhdr(ring->bcm,			       (struct bcm43xx_txhdr *)skb->data,			       skb->data + sizeof(struct bcm43xx_txhdr),			       skb->len - sizeof(struct bcm43xx_txhdr),			       (cur_frag == 0),			       generate_cookie(ring, slot));	dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);	if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {		/* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */		if (!dma_mapping_error(dmaaddr))			unmap_descbuffer(ring, dmaaddr, skb->len, 1);		bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA);		if (!bounce_skb)			return;		dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1);		if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {			if (!dma_mapping_error(dmaaddr))				unmap_descbuffer(ring, dmaaddr, skb->len, 1);			dev_kfree_skb_any(bounce_skb);			assert(0);			return;		}		memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);		dev_kfree_skb_any(skb);		skb = bounce_skb;	}	meta->skb = skb;	meta->dmaaddr = dmaaddr;	fill_descriptor(ring, desc, dmaaddr,			skb->len, 1, 1, 1);	/* Now transfer the whole frame. */	dmacontroller_poke_tx(ring, slot);}int bcm43xx_dma_tx(struct bcm43xx_private *bcm,		   struct ieee80211_txb *txb){	/* We just received a packet from the kernel network subsystem.	 * Add headers and DMA map the memory. Poke	 * the device to send the stuff.	 * Note that this is called from atomic context.	 */	struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;	u8 i;	struct sk_buff *skb;	assert(ring->tx);	if (unlikely(free_slots(ring) < txb->nr_frags)) {		/* The queue should be stopped,		 * if we are low on free slots.		 * If this ever triggers, we have to lower the suspend_mark.		 */		dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");		return -ENOMEM;	}	for (i = 0; i < txb->nr_frags; i++) {		skb = txb->fragments[i];		/* Take skb from ieee80211_txb_free */		txb->fragments[i] = NULL;		dma_tx_fragment(ring, skb, i);	}	ieee80211_txb_free(txb);	return 0;}void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,				   struct bcm43xx_xmitstatus *status){	struct bcm43xx_dmaring *ring;	struct bcm43xx_dmadesc_generic *desc;	struct bcm43xx_dmadesc_meta *meta;	int is_last_fragment;	int slot;	u32 tmp;	ring = parse_cookie(bcm, status->cookie, &slot);	assert(ring);	assert(ring->tx);	while (1) {		assert(slot >= 0 && slot < ring->nr_slots);		desc = bcm43xx_dma_idx2desc(ring, slot, &meta);		if (ring->dma64) {			tmp = le32_to_cpu(desc->dma64.control0);			is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);		} else {			tmp = le32_to_cpu(desc->dma32.control);			is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);		}		unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);		free_descriptor_buffer(ring, meta, 1);		/* Everything belonging to the slot is unmapped		 * and freed, so we can return it.		 */		return_slot(ring, slot);		if (is_last_fragment)			break;		slot = next_slot(ring, slot);	}	bcm->stats.last_tx = jiffies;}static void dma_rx(struct bcm43xx_dmaring *ring,		   int *slot){	struct bcm43xx_dmadesc_generic *desc;	struct bcm43xx_dmadesc_meta *meta;	struct bcm43xx_rxhdr *rxhdr;	struct sk_buff *skb;	u16 len;	int err;	dma_addr_t dmaaddr;	desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);	skb = meta->skb;	if (ring->index == 3) {		/* We received an xmit status. */		struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;		struct bcm43xx_xmitstatus stat;		int i = 0;		stat.cookie = le16_to_cpu(hw->cookie);		while (stat.cookie == 0) {			if (unlikely(++i >= 10000)) {				assert(0);				break;			}			udelay(2);			barrier();			stat.cookie = le16_to_cpu(hw->cookie);		}		stat.flags = hw->flags;		stat.cnt1 = hw->cnt1;		stat.cnt2 = hw->cnt2;		stat.seq = le16_to_cpu(hw->seq);		stat.unknown = le16_to_cpu(hw->unknown);		bcm43xx_debugfs_log_txstat(ring->bcm, &stat);		bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);		/* recycle the descriptor buffer. */		sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);		return;	}	rxhdr = (struct bcm43xx_rxhdr *)skb->data;	len = le16_to_cpu(rxhdr->frame_length);	if (len == 0) {		int i = 0;		do {			udelay(2);			barrier();			len = le16_to_cpu(rxhdr->frame_length);		} while (len == 0 && i++ < 5);		if (unlikely(len == 0)) {			/* recycle the descriptor buffer. */			sync_descbuffer_for_device(ring, meta->dmaaddr,						   ring->rx_buffersize);			goto drop;		}	}	if (unlikely(len > ring->rx_buffersize)) {		/* The data did not fit into one descriptor buffer		 * and is split over multiple buffers.		 * This should never happen, as we try to allocate buffers		 * big enough. So simply ignore this packet.		 */		int cnt = 0;		s32 tmp = len;		while (1) {			desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);			/* recycle the descriptor buffer. */			sync_descbuffer_for_device(ring, meta->dmaaddr,						   ring->rx_buffersize);			*slot = next_slot(ring, *slot);			cnt++;			tmp -= ring->rx_buffersize;			if (tmp <= 0)				break;		}		printkl(KERN_ERR PFX "DMA RX buffer too small "			"(len: %u, buffer: %u, nr-dropped: %d)\n",			len, ring->rx_buffersize, cnt);		goto drop;	}	len -= IEEE80211_FCS_LEN;	dmaaddr = meta->dmaaddr;	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);	if (unlikely(err)) {		dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");		sync_descbuffer_for_device(ring, dmaaddr,					   ring->rx_buffersize);		goto drop;	}	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);	skb_put(skb, len + ring->frameoffset);	skb_pull(skb, ring->frameoffset);	err = bcm43xx_rx(ring->bcm, skb, rxhdr);	if (err) {		dev_kfree_skb_irq(skb);		goto drop;	}drop:	return;}void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring){	u32 status;	u16 descptr;	int slot, current_slot;#ifdef CONFIG_BCM43XX_DEBUG	int used_slots = 0;#endif	assert(!ring->tx);	if (ring->dma64) {		status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);		descptr = (status & BCM43xx_DMA64_RXSTATDPTR);		current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);	} else {		status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);		descptr = (status & BCM43xx_DMA32_RXDPTR);		current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);	}	assert(current_slot >= 0 && current_slot < ring->nr_slots);	slot = ring->current_slot;	for ( ; slot != current_slot; slot = next_slot(ring, slot)) {		dma_rx(ring, &slot);#ifdef CONFIG_BCM43XX_DEBUG		if (++used_slots > ring->max_used_slots)			ring->max_used_slots = used_slots;#endif	}	if (ring->dma64) {		bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,				(u32)(slot * sizeof(struct bcm43xx_dmadesc64)));	} else {		bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,				(u32)(slot * sizeof(struct bcm43xx_dmadesc32)));	}	ring->current_slot = slot;}void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring){	assert(ring->tx);	bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);	if (ring->dma64) {		bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,				bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)				| BCM43xx_DMA64_TXSUSPEND);	} else {		bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,				bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)				| BCM43xx_DMA32_TXSUSPEND);	}}void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring){	assert(ring->tx);	if (ring->dma64) {		bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,				bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)				& ~BCM43xx_DMA64_TXSUSPEND);	} else {		bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,				bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)				& ~BCM43xx_DMA32_TXSUSPEND);	}	bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);}

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
亚洲综合免费观看高清完整版| 亚洲成a人片在线不卡一二三区| 国产精品人人做人人爽人人添| 亚洲精品日韩综合观看成人91| 日本vs亚洲vs韩国一区三区| 成人黄色小视频在线观看| 欧美天天综合网| 国产蜜臀97一区二区三区| 日韩国产欧美在线观看| 成人美女视频在线看| 欧美一级xxx| 亚洲免费av网站| 国产精品一区二区在线观看不卡 | 精品一区二区综合| 在线观看一区二区视频| 国产欧美精品区一区二区三区| 日韩成人午夜电影| 日本久久电影网| 成人欧美一区二区三区1314| 欧美日本韩国一区二区三区视频| 中文欧美字幕免费| 国产最新精品免费| 日韩视频在线你懂得| 综合欧美一区二区三区| 国产美女av一区二区三区| 欧美一区二区在线播放| 亚洲一区二区三区四区五区中文| 91在线观看成人| 国产精品高潮呻吟| 国产成人亚洲精品青草天美| 欧美xxxx老人做受| 老鸭窝一区二区久久精品| 欧美精品黑人性xxxx| 亚洲无人区一区| 欧美无乱码久久久免费午夜一区| 亚洲欧美另类图片小说| 91在线精品一区二区三区| 国产精品伦一区二区三级视频| 国产成人在线视频播放| 久久综合色综合88| 狠狠色丁香婷综合久久| 精品国产自在久精品国产| 精品午夜一区二区三区在线观看| 欧美大度的电影原声| 激情欧美一区二区三区在线观看| 欧美tk—视频vk| 国产一区二区三区电影在线观看 | 椎名由奈av一区二区三区| 99久久精品免费| 亚洲日本乱码在线观看| 色婷婷亚洲综合| 亚洲在线观看免费| 在线观看91精品国产麻豆| 久久国产精品99久久人人澡| 日韩免费一区二区三区在线播放| 国内精品伊人久久久久av影院 | 中文字幕免费在线观看视频一区| 国产mv日韩mv欧美| 最好看的中文字幕久久| 欧美日韩一区二区三区不卡| 麻豆免费看一区二区三区| 九九精品一区二区| 国产精品丝袜黑色高跟| 欧美综合久久久| 日本在线不卡视频| 国产欧美在线观看一区| 91福利国产成人精品照片| 另类小说图片综合网| 国产精品色呦呦| 欧美日韩色综合| 国产高清一区日本| 亚洲一区在线播放| 久久九九99视频| 欧美曰成人黄网| 中文字幕综合网| 日韩高清在线电影| 日韩一级大片在线| 狠狠狠色丁香婷婷综合激情| 无码av中文一区二区三区桃花岛| 欧美人与禽zozo性伦| 国产精品亚洲专一区二区三区 | 日韩免费看的电影| 国产午夜精品一区二区三区视频| 91免费国产在线观看| 久久99久久久久| 亚洲男人的天堂在线观看| 精品少妇一区二区三区免费观看| 91香蕉视频黄| 国产麻豆成人传媒免费观看| 一区二区三区日韩精品视频| 久久久久国产精品免费免费搜索| 欧美性高清videossexo| 成人免费视频免费观看| 美女视频免费一区| 亚洲午夜精品网| 成人欧美一区二区三区视频网页| 欧美成人一区二区三区在线观看 | 国内精品嫩模私拍在线| 亚洲图片有声小说| 综合欧美一区二区三区| 久久精品一区蜜桃臀影院| 91精品国产欧美日韩| 91福利国产成人精品照片| 成人动漫一区二区在线| 国产精品羞羞答答xxdd| 久久精品av麻豆的观看方式| 午夜精品久久久久| 亚洲无人区一区| 一区二区三区中文在线观看| 国产精品久久久久久久久久免费看| 26uuu亚洲综合色欧美| 日韩一区二区免费在线观看| 欧美日韩一卡二卡三卡| 日本韩国精品在线| 91在线丨porny丨国产| 成人av小说网| a级精品国产片在线观看| 国产成人免费视频一区| 国产一区在线看| 国产乱淫av一区二区三区| 久久99国产精品久久| 麻豆视频观看网址久久| 秋霞电影网一区二区| 日本一道高清亚洲日美韩| 日韩主播视频在线| 麻豆精品一区二区三区| 久久66热偷产精品| 国产麻豆视频精品| 成人性视频免费网站| a美女胸又www黄视频久久| 国产精品久久久久久福利一牛影视| 久久久亚洲欧洲日产国码αv| 久久久久久久久久久黄色| 亚洲国产经典视频| 国产精品久久久久一区二区三区| 国产精品第五页| 亚洲一区二区三区国产| 日韩中文字幕区一区有砖一区 | 激情伊人五月天久久综合| 极品少妇一区二区| 成人a区在线观看| 色狠狠av一区二区三区| 欧美美女一区二区在线观看| 精品毛片乱码1区2区3区| 国产亚洲精品aa| 亚洲免费观看高清| 日本亚洲三级在线| 国产乱子伦视频一区二区三区 | 久久精品国产亚洲高清剧情介绍| 久久电影网站中文字幕| 国产69精品久久777的优势| 91在线免费视频观看| 欧美久久久久久久久中文字幕| 精品少妇一区二区三区在线视频 | 亚洲婷婷国产精品电影人久久| 亚洲激情五月婷婷| 久久精工是国产品牌吗| 97久久精品人人澡人人爽| 91精品国产91综合久久蜜臀| 国产精品―色哟哟| 日日夜夜一区二区| 99麻豆久久久国产精品免费| 欧美精品久久99久久在免费线| 久久久久久一二三区| 亚洲精品综合在线| 国产一区二区精品在线观看| 日本乱码高清不卡字幕| 久久久99精品免费观看不卡| 亚洲v日本v欧美v久久精品| 国产精品系列在线播放| 欧美日韩亚洲综合在线 欧美亚洲特黄一级| 日韩欧美亚洲国产另类| 亚洲黄色免费电影| 国产suv精品一区二区6| 欧美一区欧美二区| 亚洲精品日韩综合观看成人91| 国模娜娜一区二区三区| 欧美视频一二三区| 一色屋精品亚洲香蕉网站| 精品中文字幕一区二区| 欧美精品日韩精品| 亚洲精选视频免费看| 粉嫩aⅴ一区二区三区四区五区| 日韩亚洲欧美在线| 亚洲成av人片在www色猫咪| 91啪亚洲精品| 国产欧美一区二区三区在线老狼| 日本欧美韩国一区三区| 欧美视频一区二区三区在线观看 | 国产成人久久精品77777最新版本| 一区二区高清在线| 成人黄色软件下载| 国产午夜一区二区三区| 久久av资源网| 欧美一级免费观看| 丝袜美腿高跟呻吟高潮一区| 在线观看不卡一区| 伊人夜夜躁av伊人久久| 日本韩国欧美在线| 一区二区不卡在线播放|