亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? dmapool.c

?? 最新最穩定的Linux內存管理模塊源代碼
?? C
字號:
/* * DMA Pool allocator * * Copyright 2001 David Brownell * Copyright 2007 Intel Corporation *   Author: Matthew Wilcox <willy@linux.intel.com> * * This software may be redistributed and/or modified under the terms of * the GNU General Public License ("GPL") version 2 as published by the * Free Software Foundation. * * This allocator returns small blocks of a given size which are DMA-able by * the given device.  It uses the dma_alloc_coherent page allocator to get * new pages, then splits them up into blocks of the required size. * Many older drivers still have their own code to do this. * * The current design of this allocator is fairly simple.  The pool is * represented by the 'struct dma_pool' which keeps a doubly-linked list of * allocated pages.  Each page in the page_list is split into blocks of at * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked * list of free blocks within the page.  Used blocks aren't tracked, but we * keep a count of how many are currently allocated from each page. */#include <linux/device.h>#include <linux/dma-mapping.h>#include <linux/dmapool.h>#include <linux/kernel.h>#include <linux/list.h>#include <linux/module.h>#include <linux/mutex.h>#include <linux/poison.h>#include <linux/sched.h>#include <linux/slab.h>#include <linux/spinlock.h>#include <linux/string.h>#include <linux/types.h>#include <linux/wait.h>#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)#define DMAPOOL_DEBUG 1#endifstruct dma_pool {		/* the pool */	struct list_head page_list;	spinlock_t lock;	size_t size;	struct device *dev;	size_t allocation;	size_t boundary;	char name[32];	wait_queue_head_t waitq;	struct list_head pools;};struct dma_page {		/* cacheable header for 'allocation' bytes */	struct list_head page_list;	void *vaddr;	dma_addr_t dma;	unsigned int in_use;	unsigned int offset;};#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)static DEFINE_MUTEX(pools_lock);static ssize_tshow_pools(struct device *dev, struct device_attribute *attr, char *buf){	unsigned temp;	unsigned size;	char *next;	struct dma_page *page;	struct dma_pool *pool;	next = buf;	size = PAGE_SIZE;	temp = scnprintf(next, size, "poolinfo - 0.1\n");	size -= temp;	next += temp;	mutex_lock(&pools_lock);	list_for_each_entry(pool, &dev->dma_pools, pools) {		unsigned pages = 0;		unsigned blocks = 0;		list_for_each_entry(page, &pool->page_list, page_list) {			pages++;			blocks += page->in_use;		}		/* per-pool info, no real statistics yet */		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",				 pool->name, blocks,				 pages * (pool->allocation / pool->size),				 pool->size, pages);		size -= temp;		next += temp;	}	mutex_unlock(&pools_lock);	return PAGE_SIZE - size;}static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);/** * dma_pool_create - Creates a pool of consistent memory blocks, for dma. * @name: name of pool, for diagnostics * @dev: device that will be doing the DMA * @size: size of the blocks in this pool. * @align: alignment requirement for blocks; must be a power of two * @boundary: returned blocks won't cross this power of two boundary * Context: !in_interrupt() * * Returns a dma allocation pool with the requested characteristics, or * null if one can't be created.  Given one of these pools, dma_pool_alloc() * may be used to allocate memory.  Such memory will all have "consistent" * DMA mappings, accessible by the device and its driver without using * cache flushing primitives.  The actual size of blocks allocated may be * larger than requested because of alignment. * * If @boundary is nonzero, objects returned from dma_pool_alloc() won't * cross that size boundary.  This is useful for devices which have * addressing restrictions on individual DMA transfers, such as not crossing * boundaries of 4KBytes. */struct dma_pool *dma_pool_create(const char *name, struct device *dev,				 size_t size, size_t align, size_t boundary){	struct dma_pool *retval;	size_t allocation;	if (align == 0) {		align = 1;	} else if (align & (align - 1)) {		return NULL;	}	if (size == 0) {		return NULL;	} else if (size < 4) {		size = 4;	}	if ((size % align) != 0)		size = ALIGN(size, align);	allocation = max_t(size_t, size, PAGE_SIZE);	if (!boundary) {		boundary = allocation;	} else if ((boundary < size) || (boundary & (boundary - 1))) {		return NULL;	}	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));	if (!retval)		return retval;	strlcpy(retval->name, name, sizeof(retval->name));	retval->dev = dev;	INIT_LIST_HEAD(&retval->page_list);	spin_lock_init(&retval->lock);	retval->size = size;	retval->boundary = boundary;	retval->allocation = allocation;	init_waitqueue_head(&retval->waitq);	if (dev) {		int ret;		mutex_lock(&pools_lock);		if (list_empty(&dev->dma_pools))			ret = device_create_file(dev, &dev_attr_pools);		else			ret = 0;		/* note:  not currently insisting "name" be unique */		if (!ret)			list_add(&retval->pools, &dev->dma_pools);		else {			kfree(retval);			retval = NULL;		}		mutex_unlock(&pools_lock);	} else		INIT_LIST_HEAD(&retval->pools);	return retval;}EXPORT_SYMBOL(dma_pool_create);static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page){	unsigned int offset = 0;	unsigned int next_boundary = pool->boundary;	do {		unsigned int next = offset + pool->size;		if (unlikely((next + pool->size) >= next_boundary)) {			next = next_boundary;			next_boundary += pool->boundary;		}		*(int *)(page->vaddr + offset) = next;		offset = next;	} while (offset < pool->allocation);}static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags){	struct dma_page *page;	page = kmalloc(sizeof(*page), mem_flags);	if (!page)		return NULL;	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,					 &page->dma, mem_flags);	if (page->vaddr) {#ifdef	DMAPOOL_DEBUG		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);#endif		pool_initialise_page(pool, page);		list_add(&page->page_list, &pool->page_list);		page->in_use = 0;		page->offset = 0;	} else {		kfree(page);		page = NULL;	}	return page;}static inline int is_page_busy(struct dma_page *page){	return page->in_use != 0;}static void pool_free_page(struct dma_pool *pool, struct dma_page *page){	dma_addr_t dma = page->dma;#ifdef	DMAPOOL_DEBUG	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);#endif	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);	list_del(&page->page_list);	kfree(page);}/** * dma_pool_destroy - destroys a pool of dma memory blocks. * @pool: dma pool that will be destroyed * Context: !in_interrupt() * * Caller guarantees that no more memory from the pool is in use, * and that nothing will try to use the pool after this call. */void dma_pool_destroy(struct dma_pool *pool){	mutex_lock(&pools_lock);	list_del(&pool->pools);	if (pool->dev && list_empty(&pool->dev->dma_pools))		device_remove_file(pool->dev, &dev_attr_pools);	mutex_unlock(&pools_lock);	while (!list_empty(&pool->page_list)) {		struct dma_page *page;		page = list_entry(pool->page_list.next,				  struct dma_page, page_list);		if (is_page_busy(page)) {			if (pool->dev)				dev_err(pool->dev,					"dma_pool_destroy %s, %p busy\n",					pool->name, page->vaddr);			else				printk(KERN_ERR				       "dma_pool_destroy %s, %p busy\n",				       pool->name, page->vaddr);			/* leak the still-in-use consistent memory */			list_del(&page->page_list);			kfree(page);		} else			pool_free_page(pool, page);	}	kfree(pool);}EXPORT_SYMBOL(dma_pool_destroy);/** * dma_pool_alloc - get a block of consistent memory * @pool: dma pool that will produce the block * @mem_flags: GFP_* bitmask * @handle: pointer to dma address of block * * This returns the kernel virtual address of a currently unused block, * and reports its dma address through the handle. * If such a memory block can't be allocated, %NULL is returned. */void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,		     dma_addr_t *handle){	unsigned long flags;	struct dma_page *page;	size_t offset;	void *retval;	spin_lock_irqsave(&pool->lock, flags); restart:	list_for_each_entry(page, &pool->page_list, page_list) {		if (page->offset < pool->allocation)			goto ready;	}	page = pool_alloc_page(pool, GFP_ATOMIC);	if (!page) {		if (mem_flags & __GFP_WAIT) {			DECLARE_WAITQUEUE(wait, current);			__set_current_state(TASK_INTERRUPTIBLE);			__add_wait_queue(&pool->waitq, &wait);			spin_unlock_irqrestore(&pool->lock, flags);			schedule_timeout(POOL_TIMEOUT_JIFFIES);			spin_lock_irqsave(&pool->lock, flags);			__remove_wait_queue(&pool->waitq, &wait);			goto restart;		}		retval = NULL;		goto done;	} ready:	page->in_use++;	offset = page->offset;	page->offset = *(int *)(page->vaddr + offset);	retval = offset + page->vaddr;	*handle = offset + page->dma;#ifdef	DMAPOOL_DEBUG	memset(retval, POOL_POISON_ALLOCATED, pool->size);#endif done:	spin_unlock_irqrestore(&pool->lock, flags);	return retval;}EXPORT_SYMBOL(dma_pool_alloc);static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma){	unsigned long flags;	struct dma_page *page;	spin_lock_irqsave(&pool->lock, flags);	list_for_each_entry(page, &pool->page_list, page_list) {		if (dma < page->dma)			continue;		if (dma < (page->dma + pool->allocation))			goto done;	}	page = NULL; done:	spin_unlock_irqrestore(&pool->lock, flags);	return page;}/** * dma_pool_free - put block back into dma pool * @pool: the dma pool holding the block * @vaddr: virtual address of block * @dma: dma address of block * * Caller promises neither device nor driver will again touch this block * unless it is first re-allocated. */void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma){	struct dma_page *page;	unsigned long flags;	unsigned int offset;	page = pool_find_page(pool, dma);	if (!page) {		if (pool->dev)			dev_err(pool->dev,				"dma_pool_free %s, %p/%lx (bad dma)\n",				pool->name, vaddr, (unsigned long)dma);		else			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",			       pool->name, vaddr, (unsigned long)dma);		return;	}	offset = vaddr - page->vaddr;#ifdef	DMAPOOL_DEBUG	if ((dma - page->dma) != offset) {		if (pool->dev)			dev_err(pool->dev,				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",				pool->name, vaddr, (unsigned long long)dma);		else			printk(KERN_ERR			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",			       pool->name, vaddr, (unsigned long long)dma);		return;	}	{		unsigned int chain = page->offset;		while (chain < pool->allocation) {			if (chain != offset) {				chain = *(int *)(page->vaddr + chain);				continue;			}			if (pool->dev)				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "					"already free\n", pool->name,					(unsigned long long)dma);			else				printk(KERN_ERR "dma_pool_free %s, dma %Lx "					"already free\n", pool->name,					(unsigned long long)dma);			return;		}	}	memset(vaddr, POOL_POISON_FREED, pool->size);#endif	spin_lock_irqsave(&pool->lock, flags);	page->in_use--;	*(int *)vaddr = page->offset;	page->offset = offset;	if (waitqueue_active(&pool->waitq))		wake_up_locked(&pool->waitq);	/*	 * Resist a temptation to do	 *    if (!is_page_busy(page)) pool_free_page(pool, page);	 * Better have a few empty pages hang around.	 */	spin_unlock_irqrestore(&pool->lock, flags);}EXPORT_SYMBOL(dma_pool_free);/* * Managed DMA pool */static void dmam_pool_release(struct device *dev, void *res){	struct dma_pool *pool = *(struct dma_pool **)res;	dma_pool_destroy(pool);}static int dmam_pool_match(struct device *dev, void *res, void *match_data){	return *(struct dma_pool **)res == match_data;}/** * dmam_pool_create - Managed dma_pool_create() * @name: name of pool, for diagnostics * @dev: device that will be doing the DMA * @size: size of the blocks in this pool. * @align: alignment requirement for blocks; must be a power of two * @allocation: returned blocks won't cross this boundary (or zero) * * Managed dma_pool_create().  DMA pool created with this function is * automatically destroyed on driver detach. */struct dma_pool *dmam_pool_create(const char *name, struct device *dev,				  size_t size, size_t align, size_t allocation){	struct dma_pool **ptr, *pool;	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);	if (!ptr)		return NULL;	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);	if (pool)		devres_add(dev, ptr);	else		devres_free(ptr);	return pool;}EXPORT_SYMBOL(dmam_pool_create);/** * dmam_pool_destroy - Managed dma_pool_destroy() * @pool: dma pool that will be destroyed * * Managed dma_pool_destroy(). */void dmam_pool_destroy(struct dma_pool *pool){	struct device *dev = pool->dev;	dma_pool_destroy(pool);	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));}EXPORT_SYMBOL(dmam_pool_destroy);

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
久久久久久久久97黄色工厂| 久久疯狂做爰流白浆xx| 国产精品网站在线播放| 日韩精品一区二区三区视频播放| 4438x亚洲最大成人网| 4438x亚洲最大成人网| 91麻豆精品国产自产在线| 91麻豆精品国产91久久久久久久久| 欧美日韩视频第一区| 欧美军同video69gay| 欧美精品久久久久久久多人混战| 欧美日韩综合色| 91精品国产综合久久精品app | 在线成人免费视频| 欧美挠脚心视频网站| 欧美一级视频精品观看| 精品欧美一区二区三区精品久久| 精品va天堂亚洲国产| 久久久精品黄色| 中文字幕一区二区三区视频| 亚洲视频香蕉人妖| 亚洲一区二区美女| 毛片一区二区三区| 国产ts人妖一区二区| a4yy欧美一区二区三区| 91极品视觉盛宴| 亚洲一二三区不卡| 午夜激情综合网| 经典三级视频一区| 福利视频网站一区二区三区| 97aⅴ精品视频一二三区| 91官网在线观看| 欧美一区二区播放| 日本一区免费视频| 一区二区三区四区视频精品免费 | 中文字幕高清不卡| 亚洲色图一区二区三区| 天堂av在线一区| 国产毛片精品视频| 91成人网在线| 精品捆绑美女sm三区| 国产精品视频一二三区 | 精品中文字幕一区二区小辣椒 | 欧美成人一区二区三区 | 福利电影一区二区| 欧美在线视频不卡| 日韩精品一区二区三区视频在线观看 | 亚洲综合区在线| 久99久精品视频免费观看| 懂色中文一区二区在线播放| 日本韩国欧美一区| www日韩大片| 亚洲乱码国产乱码精品精98午夜| 日韩**一区毛片| 不卡高清视频专区| 日韩一级高清毛片| 亚洲男人的天堂av| 久久91精品久久久久久秒播| 91免费版在线| 久久久一区二区| 日韩二区三区四区| 成人午夜电影小说| 日韩一级大片在线观看| 亚洲天天做日日做天天谢日日欢| 久草在线在线精品观看| 欧美专区日韩专区| 欧美激情资源网| 久久国产剧场电影| 欧美日韩国产成人在线免费| 国产精品免费人成网站| 美国十次了思思久久精品导航| 在线观看欧美日本| 中文字幕在线观看不卡视频| 麻豆精品精品国产自在97香蕉| 欧美最新大片在线看| 国产精品精品国产色婷婷| 蜜臀久久99精品久久久久宅男 | 日韩一卡二卡三卡四卡| 一区二区三区四区在线免费观看| 国产成人av一区二区三区在线观看| 8v天堂国产在线一区二区| 亚洲青青青在线视频| 国产精品白丝jk黑袜喷水| 91精品国产综合久久小美女| 亚洲综合在线观看视频| 高清不卡一二三区| 亚洲精品一区二区精华| 久久精品国产77777蜜臀| 欧美区在线观看| 亚洲国产精品影院| 欧亚洲嫩模精品一区三区| 亚洲欧美日韩综合aⅴ视频| 成人中文字幕在线| 国产日韩欧美制服另类| 国产一区二区精品久久99| 欧美xxxxx牲另类人与| 秋霞国产午夜精品免费视频| 欧美精品xxxxbbbb| 亚洲一区二区精品视频| 欧美天堂一区二区三区| 亚洲国产精品久久人人爱| 色婷婷久久久久swag精品| 亚洲免费观看视频| 在线免费精品视频| 亚洲一二三四区不卡| 欧亚一区二区三区| 亚洲mv在线观看| 欧美二区在线观看| 蜜臀99久久精品久久久久久软件 | 亚洲欧洲日韩女同| 一本一道波多野结衣一区二区| 亚洲精品免费视频| 欧美亚洲动漫另类| 午夜视频一区在线观看| 欧美精品tushy高清| 日韩成人一区二区三区在线观看| 欧美一级片在线观看| 美女www一区二区| www欧美成人18+| 成人精品免费视频| 一区二区不卡在线视频 午夜欧美不卡在| 91麻豆文化传媒在线观看| 亚洲一区成人在线| 日韩一区二区影院| 国产一区二区毛片| 国产精品国产三级国产普通话蜜臀 | 首页国产欧美久久| 精品国产乱码久久久久久久久 | 日韩三区在线观看| 九九精品视频在线看| 国产人成一区二区三区影院| 成人黄色在线网站| 亚洲黄一区二区三区| 欧美高清视频一二三区| 黄一区二区三区| 中文字幕一区二区三区精华液 | 亚洲欧美日韩综合aⅴ视频| 欧美系列亚洲系列| 卡一卡二国产精品| 中文一区在线播放| 欧美日韩你懂的| 国产自产高清不卡| 日韩一区日韩二区| 欧美顶级少妇做爰| 风间由美一区二区三区在线观看| 亚洲欧美色图小说| 日韩欧美成人一区| 91天堂素人约啪| 美女高潮久久久| 亚洲另类在线一区| 日韩亚洲电影在线| www.综合网.com| 六月丁香综合在线视频| 国产精品久久久久久久第一福利| 欧美片网站yy| 成人av电影在线观看| 午夜国产不卡在线观看视频| 国产午夜精品一区二区三区四区| 欧美曰成人黄网| 国产寡妇亲子伦一区二区| 亚洲电影视频在线| 欧美国产97人人爽人人喊| 欧美精品久久天天躁| 99国产欧美久久久精品| 久久精品72免费观看| 亚洲裸体xxx| 国产人伦精品一区二区| 91精品国产综合久久精品app| 不卡的av中国片| 精品在线免费视频| 亚洲一区二区高清| 中文字幕一区二区三区四区| 欧美精品一区二区在线观看| 欧美日韩另类一区| 91丨porny丨首页| 国v精品久久久网| 精彩视频一区二区| 日韩精品电影在线观看| 综合久久国产九一剧情麻豆| 久久老女人爱爱| 欧美大片日本大片免费观看| 91碰在线视频| av在线播放成人| 国产精品1区二区.| 蜜桃视频在线观看一区二区| 亚洲国产精品嫩草影院| 亚洲三级电影全部在线观看高清| 亚洲精品在线观看网站| 日韩丝袜情趣美女图片| 欧美日韩黄视频| 欧美三级日韩三级国产三级| av午夜一区麻豆| 粉嫩蜜臀av国产精品网站| 精品在线一区二区| 麻豆国产精品官网| 日韩电影在线看| 日本网站在线观看一区二区三区| 亚洲v日本v欧美v久久精品| 一区二区三区在线免费播放| 中文字幕一区二区三区在线不卡 |