亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? slab.c

?? 嵌入式系統設計與實例開發源碼
?? C
?? 第 1 頁 / 共 4 頁
字號:
/* * linux/mm/slab.c * Written by Mark Hemment, 1996/97. * (markhe@nextd.demon.co.uk) * * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli * * Major cleanup, different bufctl logic, per-cpu arrays *	(c) 2000 Manfred Spraul * * An implementation of the Slab Allocator as described in outline in; *	UNIX Internals: The New Frontiers by Uresh Vahalia *	Pub: Prentice Hall	ISBN 0-13-101908-2 * or with a little more detail in; *	The Slab Allocator: An Object-Caching Kernel Memory Allocator *	Jeff Bonwick (Sun Microsystems). *	Presented at: USENIX Summer 1994 Technical Conference * * * The memory is organized in caches, one cache for each object type. * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) * Each cache consists out of many slabs (they are small (usually one * page long) and always contiguous), and each slab contains multiple * initialized objects. * * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, * normal). If you need a special memory type, then must create a new * cache for that memory type. * * In order to reduce fragmentation, the slabs are sorted in 3 groups: *   full slabs with 0 free objects *   partial slabs *   empty slabs with no allocated objects * * If partial slabs exist, then new allocations come from these slabs, * otherwise from empty slabs or new slabs are allocated. * * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache * during kmem_cache_destroy(). The caller must prevent concurrent allocs. * * On SMP systems, each cache has a short per-cpu head array, most allocs * and frees go into that array, and if that array overflows, then 1/2 * of the entries in the array are given back into the global cache. * This reduces the number of spinlock operations. * * The c_cpuarray may not be read with enabled local interrupts. * * SMP synchronization: *  constructors and destructors are called without any locking. *  Several members in kmem_cache_t and slab_t never change, they *	are accessed without any locking. *  The per-cpu arrays are never accessed from the wrong cpu, no locking. *  The non-constant members are protected with a per-cache irq spinlock. * * Further notes from the original documentation: * * 11 April '97.  Started multi-threading - markhe *	The global cache-chain is protected by the semaphore 'cache_chain_sem'. *	The sem is only needed when accessing/extending the cache-chain, which *	can never happen inside an interrupt (kmem_cache_create(), *	kmem_cache_shrink() and kmem_cache_reap()). * *	To prevent kmem_cache_shrink() trying to shrink a 'growing' cache (which *	maybe be sleeping and therefore not holding the semaphore/lock), the *	growing field is used.  This also prevents reaping from a cache. * *	At present, each engine can be growing a cache.  This should be blocked. * */#include	<linux/config.h>#include	<linux/slab.h>#include	<linux/interrupt.h>#include	<linux/init.h>#include	<linux/compiler.h>#include	<asm/uaccess.h>/* * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, *		  SLAB_RED_ZONE & SLAB_POISON. *		  0 for faster, smaller code (especially in the critical paths). * * STATS	- 1 to collect stats for /proc/slabinfo. *		  0 for faster, smaller code (especially in the critical paths). * * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) */#ifdef CONFIG_DEBUG_SLAB#define	DEBUG		1#define	STATS		1#define	FORCED_DEBUG	1#else#define	DEBUG		0#define	STATS		0#define	FORCED_DEBUG	0#endif/* * Parameters for kmem_cache_reap */#define REAP_SCANLEN	10#define REAP_PERFECT	10/* Shouldn't this be in a header file somewhere? */#define	BYTES_PER_WORD		sizeof(void *)/* Legal flag mask for kmem_cache_create(). */#if DEBUG# define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \			 SLAB_NO_REAP | SLAB_CACHE_DMA | \			 SLAB_MUST_HWCACHE_ALIGN)#else# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \			 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN)#endif/* * kmem_bufctl_t: * * Bufctl's are used for linking objs within a slab * linked offsets. * * This implementaion relies on "struct page" for locating the cache & * slab an object belongs to. * This allows the bufctl structure to be small (one int), but limits * the number of objects a slab (not a cache) can contain when off-slab * bufctls are used. The limit is the size of the largest general cache * that does not use off-slab slabs. * For 32bit archs with 4 kB pages, is this 56. * This is not serious, as it is only for large objects, when it is unwise * to have too many per slab. * Note: This limit can be raised by introducing a general cache whose size * is less than 512 (PAGE_SIZE<<3), but greater than 256. */#define BUFCTL_END 0xffffFFFF#define	SLAB_LIMIT 0xffffFFFEtypedef unsigned int kmem_bufctl_t;/* Max number of objs-per-slab for caches which use off-slab slabs. * Needed to avoid a possible looping condition in kmem_cache_grow(). */static unsigned long offslab_limit;/* * slab_t * * Manages the objs in a slab. Placed either at the beginning of mem allocated * for a slab, or allocated from an general cache. * Slabs are chained into three list: fully used, partial, fully free slabs. */typedef struct slab_s {	struct list_head	list;	unsigned long		colouroff;	void			*s_mem;		/* including colour offset */	unsigned int		inuse;		/* num of objs active in slab */	kmem_bufctl_t		free;} slab_t;#define slab_bufctl(slabp) \	((kmem_bufctl_t *)(((slab_t*)slabp)+1))/* * cpucache_t * * Per cpu structures * The limit is stored in the per-cpu structure to reduce the data cache * footprint. */typedef struct cpucache_s {	unsigned int avail;	unsigned int limit;} cpucache_t;#define cc_entry(cpucache) \	((void **)(((cpucache_t*)(cpucache))+1))#define cc_data(cachep) \	((cachep)->cpudata[smp_processor_id()])/* * kmem_cache_t * * manages a cache. */#define CACHE_NAMELEN	20	/* max name length for a slab cache */struct kmem_cache_s {/* 1) each alloc & free */	/* full, partial first, then free */	struct list_head	slabs_full;	struct list_head	slabs_partial;	struct list_head	slabs_free;	unsigned int		objsize;	unsigned int	 	flags;	/* constant flags */	unsigned int		num;	/* # of objs per slab */	spinlock_t		spinlock;#ifdef CONFIG_SMP	unsigned int		batchcount;#endif/* 2) slab additions /removals */	/* order of pgs per slab (2^n) */	unsigned int		gfporder;	/* force GFP flags, e.g. GFP_DMA */	unsigned int		gfpflags;	size_t			colour;		/* cache colouring range */	unsigned int		colour_off;	/* colour offset */	unsigned int		colour_next;	/* cache colouring */	kmem_cache_t		*slabp_cache;	unsigned int		growing;	unsigned int		dflags;		/* dynamic flags */	/* constructor func */	void (*ctor)(void *, kmem_cache_t *, unsigned long);	/* de-constructor func */	void (*dtor)(void *, kmem_cache_t *, unsigned long);	unsigned long		failures;/* 3) cache creation/removal */	char			name[CACHE_NAMELEN];	struct list_head	next;#ifdef CONFIG_SMP/* 4) per-cpu data */	cpucache_t		*cpudata[NR_CPUS];#endif#if STATS	unsigned long		num_active;	unsigned long		num_allocations;	unsigned long		high_mark;	unsigned long		grown;	unsigned long		reaped;	unsigned long 		errors;#ifdef CONFIG_SMP	atomic_t		allochit;	atomic_t		allocmiss;	atomic_t		freehit;	atomic_t		freemiss;#endif#endif};/* internal c_flags */#define	CFLGS_OFF_SLAB	0x010000UL	/* slab management in own cache */#define	CFLGS_OPTIMIZE	0x020000UL	/* optimized slab lookup *//* c_dflags (dynamic flags). Need to hold the spinlock to access this member */#define	DFLGS_GROWN	0x000001UL	/* don't reap a recently grown */#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)#define	OPTIMIZE(x)	((x)->flags & CFLGS_OPTIMIZE)#define	GROWN(x)	((x)->dlags & DFLGS_GROWN)#if STATS#define	STATS_INC_ACTIVE(x)	((x)->num_active++)#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)#define	STATS_INC_GROWN(x)	((x)->grown++)#define	STATS_INC_REAPED(x)	((x)->reaped++)#define	STATS_SET_HIGH(x)	do { if ((x)->num_active > (x)->high_mark) \					(x)->high_mark = (x)->num_active; \				} while (0)#define	STATS_INC_ERR(x)	((x)->errors++)#else#define	STATS_INC_ACTIVE(x)	do { } while (0)#define	STATS_DEC_ACTIVE(x)	do { } while (0)#define	STATS_INC_ALLOCED(x)	do { } while (0)#define	STATS_INC_GROWN(x)	do { } while (0)#define	STATS_INC_REAPED(x)	do { } while (0)#define	STATS_SET_HIGH(x)	do { } while (0)#define	STATS_INC_ERR(x)	do { } while (0)#endif#if STATS && defined(CONFIG_SMP)#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)#else#define STATS_INC_ALLOCHIT(x)	do { } while (0)#define STATS_INC_ALLOCMISS(x)	do { } while (0)#define STATS_INC_FREEHIT(x)	do { } while (0)#define STATS_INC_FREEMISS(x)	do { } while (0)#endif#if DEBUG/* Magic nums for obj red zoning. * Placed in the first word before and the first word after an obj. */#define	RED_MAGIC1	0x5A2CF071UL	/* when obj is active */#define	RED_MAGIC2	0x170FC2A5UL	/* when obj is inactive *//* ...and for poisoning */#define	POISON_BYTE	0x5a		/* byte value for poisoning */#define	POISON_END	0xa5		/* end-byte of poisoning */#endif/* maximum size of an obj (in 2^order pages) */#define	MAX_OBJ_ORDER	5	/* 32 pages *//* * Do not go above this order unless 0 objects fit into the slab. */#define	BREAK_GFP_ORDER_HI	2#define	BREAK_GFP_ORDER_LO	1static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;/* * Absolute limit for the gfp order */#define	MAX_GFP_ORDER	5	/* 32 pages *//* Macros for storing/retrieving the cachep and or slab from the * global 'mem_map'. These are used to find the slab an obj belongs to. * With kfree(), these are used to find the cache which an obj belongs to. */#define	SET_PAGE_CACHE(pg,x)  ((pg)->list.next = (struct list_head *)(x))#define	GET_PAGE_CACHE(pg)    ((kmem_cache_t *)(pg)->list.next)#define	SET_PAGE_SLAB(pg,x)   ((pg)->list.prev = (struct list_head *)(x))#define	GET_PAGE_SLAB(pg)     ((slab_t *)(pg)->list.prev)/* Size description struct for general caches. */typedef struct cache_sizes {	size_t		 cs_size;	kmem_cache_t	*cs_cachep;	kmem_cache_t	*cs_dmacachep;} cache_sizes_t;static cache_sizes_t cache_sizes[] = {#if PAGE_SIZE == 4096	{    32,	NULL, NULL},#endif	{    64,	NULL, NULL},	{   128,	NULL, NULL},	{   256,	NULL, NULL},	{   512,	NULL, NULL},	{  1024,	NULL, NULL},	{  2048,	NULL, NULL},	{  4096,	NULL, NULL},	{  8192,	NULL, NULL},	{ 16384,	NULL, NULL},	{ 32768,	NULL, NULL},	{ 65536,	NULL, NULL},	{131072,	NULL, NULL},	{     0,	NULL, NULL}};/* internal cache of cache description objs */static kmem_cache_t cache_cache = {	slabs_full:	LIST_HEAD_INIT(cache_cache.slabs_full),	slabs_partial:	LIST_HEAD_INIT(cache_cache.slabs_partial),	slabs_free:	LIST_HEAD_INIT(cache_cache.slabs_free),	objsize:	sizeof(kmem_cache_t),	flags:		SLAB_NO_REAP,	spinlock:	SPIN_LOCK_UNLOCKED,	colour_off:	L1_CACHE_BYTES,	name:		"kmem_cache",};/* Guard access to the cache-chain. */static struct semaphore	cache_chain_sem;/* Place maintainer for reaping. */static kmem_cache_t *clock_searchp = &cache_cache;#define cache_chain (cache_cache.next)#ifdef CONFIG_SMP/* * chicken and egg problem: delay the per-cpu array allocation * until the general caches are up. */static int g_cpucache_up;static void enable_cpucache (kmem_cache_t *cachep);static void enable_all_cpucaches (void);#endif/* Cal the num objs, wastage, and bytes left over for a given slab size. */static void kmem_cache_estimate (unsigned long gfporder, size_t size,		 int flags, size_t *left_over, unsigned int *num){	int i;	size_t wastage = PAGE_SIZE<<gfporder;	size_t extra = 0;	size_t base = 0;	if (!(flags & CFLGS_OFF_SLAB)) {		base = sizeof(slab_t);		extra = sizeof(kmem_bufctl_t);	}	i = 0;	while (i*size + L1_CACHE_ALIGN(base+i*extra) <= wastage)		i++;	if (i > 0)		i--;	if (i > SLAB_LIMIT)		i = SLAB_LIMIT;	*num = i;	wastage -= i*size;	wastage -= L1_CACHE_ALIGN(base+i*extra);	*left_over = wastage;}/* Initialisation - setup the `cache' cache. */void __init kmem_cache_init(void){	size_t left_over;	init_MUTEX(&cache_chain_sem);	INIT_LIST_HEAD(&cache_chain);	kmem_cache_estimate(0, cache_cache.objsize, 0,			&left_over, &cache_cache.num);	if (!cache_cache.num)		BUG();	cache_cache.colour = left_over/cache_cache.colour_off;	cache_cache.colour_next = 0;}/* Initialisation - setup remaining internal and general caches. * Called after the gfp() functions have been enabled, and before smp_init(). */void __init kmem_cache_sizes_init(void){	cache_sizes_t *sizes = cache_sizes;	char name[20];	/*	 * Fragmentation resistance on low memory - only use bigger	 * page orders on machines with more than 32MB of memory.	 */	if (num_physpages > (32 << 20) >> PAGE_SHIFT)		slab_break_gfp_order = BREAK_GFP_ORDER_HI;	do {		/* For performance, all the general caches are L1 aligned.		 * This should be particularly beneficial on SMP boxes, as it		 * eliminates "false sharing".		 * Note for systems short on memory removing the alignment will		 * allow tighter packing of the smaller caches. */		sprintf(name,"size-%Zd",sizes->cs_size);		if (!(sizes->cs_cachep =			kmem_cache_create(name, sizes->cs_size,					0, SLAB_HWCACHE_ALIGN, NULL, NULL))) {			BUG();		}		/* Inc off-slab bufctl limit until the ceiling is hit. */		if (!(OFF_SLAB(sizes->cs_cachep))) {			offslab_limit = sizes->cs_size-sizeof(slab_t);			offslab_limit /= 2;		}		sprintf(name, "size-%Zd(DMA)",sizes->cs_size);		sizes->cs_dmacachep = kmem_cache_create(name, sizes->cs_size, 0,			      SLAB_CACHE_DMA|SLAB_HWCACHE_ALIGN, NULL, NULL);		if (!sizes->cs_dmacachep)			BUG();		sizes++;	} while (sizes->cs_size);}int __init kmem_cpucache_init(void){#ifdef CONFIG_SMP	g_cpucache_up = 1;	enable_all_cpucaches();#endif	return 0;}__initcall(kmem_cpucache_init);/* Interface to system's page allocator. No need to hold the cache-lock. */static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags){	void	*addr;	/*	 * If we requested dmaable memory, we will get it. Even if we	 * did not request dmaable memory, we might get it, but that	 * would be relatively rare and ignorable.	 */	flags |= cachep->gfpflags;	addr = (void*) __get_free_pages(flags, cachep->gfporder);	/* Assume that now we have the pages no one else can legally	 * messes with the 'struct page's.	 * However vm_scan() might try to test the structure to see if	 * it is a named-page or buffer-page.  The members it tests are	 * of no interest here.....	 */	return addr;}/* Interface to system's page release. */static inline void kmem_freepages (kmem_cache_t *cachep, void *addr){	unsigned long i = (1<<cachep->gfporder);	struct page *page = virt_to_page(addr);	/* free_pages() does not clear the type bit - we do that.

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
麻豆国产精品官网| 欧美片网站yy| 国产蜜臀av在线一区二区三区| 一区二区三区四区五区视频在线观看 | 国产精品99久久久久久似苏梦涵| 在线一区二区三区| 一区二区国产视频| 偷拍一区二区三区| 欧美视频一区二区三区| 国产精品久99| gogo大胆日本视频一区| 亚洲另类在线一区| 欧美综合天天夜夜久久| 亚洲丰满少妇videoshd| 91精品午夜视频| 国产经典欧美精品| 亚洲视频免费看| 欧美另类一区二区三区| 三级亚洲高清视频| 久久久亚洲欧洲日产国码αv| 久久97超碰色| 亚洲精品国产精华液| 欧美色倩网站大全免费| 日韩高清一级片| 久久亚洲精品国产精品紫薇| av电影在线观看一区| 亚洲www啪成人一区二区麻豆| 久久久综合网站| 欧美美女bb生活片| 成人午夜精品在线| 视频一区二区三区中文字幕| 26uuu精品一区二区在线观看| 91精品久久久久久蜜臀| 国产一区在线视频| 亚洲高清免费一级二级三级| 日本一区二区在线不卡| 制服丝袜亚洲色图| 色综合久久中文综合久久97| 狠狠久久亚洲欧美| 亚洲777理论| 亚洲日本va午夜在线影院| 日韩欧美一级二级三级久久久| 日韩成人免费在线| 欧美一区二区三区视频| 色综合久久久久久久久久久| 国产精品一线二线三线精华| 日韩av高清在线观看| 亚洲mv在线观看| 亚洲一本大道在线| 亚洲午夜免费电影| 亚洲精品视频自拍| 亚洲欧美激情在线| 亚洲精品大片www| 亚洲免费av观看| 亚洲综合在线观看视频| 亚洲乱码中文字幕综合| 亚洲精品乱码久久久久久黑人| 中文字幕中文字幕中文字幕亚洲无线| 久久免费午夜影院| 国产精品亲子乱子伦xxxx裸| 中文幕一区二区三区久久蜜桃| 国产精品免费久久| 国产精品久久久久久一区二区三区| 国产精品美日韩| 一区二区三区精品视频| 男人的天堂久久精品| 国模套图日韩精品一区二区| 成人18视频日本| 欧美日韩精品一区二区| 亚洲精品一区二区三区蜜桃下载| 久久人人超碰精品| 亚洲精品中文字幕在线观看| 国产精品久久久久aaaa| 亚洲高清视频的网址| 极品少妇xxxx偷拍精品少妇| www.66久久| 日韩欧美aaaaaa| 亚洲激情网站免费观看| 麻豆精品视频在线观看免费| 岛国av在线一区| 91精品午夜视频| 亚洲国产精品成人综合| 日韩精品专区在线影院重磅| 亚洲伦理在线精品| 爽爽淫人综合网网站| 丁香激情综合国产| 精品日韩一区二区三区| 亚洲激情第一区| 国产成人免费xxxxxxxx| 久久一日本道色综合| 亚洲人成在线观看一区二区| 国产一区二区三区精品欧美日韩一区二区三区 | 性感美女极品91精品| 麻豆精品精品国产自在97香蕉| 色综合久久久久久久| 国产精品久久久久久福利一牛影视| 青青草国产成人99久久| 欧美羞羞免费网站| 一区二区三区日韩精品| 国产精品一线二线三线| 欧美精品一二三四| 午夜欧美视频在线观看| 欧美在线免费视屏| 一区二区三区影院| 欧美性一二三区| 久久99日本精品| 精品国产一区二区三区久久影院| 青青草原综合久久大伊人精品| 欧美午夜视频网站| 日韩专区一卡二卡| 日韩美女视频一区二区在线观看| 麻豆国产精品一区二区三区| 精品欧美一区二区在线观看 | 首页综合国产亚洲丝袜| 欧美裸体一区二区三区| 美女爽到高潮91| 久久久久国产精品麻豆ai换脸| 成人黄色av电影| 亚洲自拍与偷拍| 欧美一区二区三区喷汁尤物| 日日摸夜夜添夜夜添亚洲女人| 99久久综合色| 奇米影视一区二区三区小说| 精品国产乱码久久久久久影片| 成人晚上爱看视频| 亚洲福利一区二区三区| 国产欧美精品区一区二区三区| 91麻豆免费视频| 免费成人在线播放| 亚洲婷婷在线视频| 日韩无一区二区| 91福利视频网站| 成人性视频网站| 另类小说一区二区三区| 一区二区三区在线高清| 国产色产综合产在线视频| 91精品国产手机| 日本道色综合久久| 色综合一个色综合| 久久99国产乱子伦精品免费| 亚洲激情综合网| 亚洲综合免费观看高清完整版| 中文字幕精品三区| 久久亚洲捆绑美女| 亚洲精品一区在线观看| 日韩视频永久免费| 91精品综合久久久久久| 欧美精选在线播放| 91精品国产日韩91久久久久久| 色狠狠桃花综合| 欧美日韩美少妇| 欧美一二三四区在线| 日韩精品中午字幕| 欧美成人a∨高清免费观看| 欧美刺激脚交jootjob| 精品久久国产老人久久综合| 精品少妇一区二区三区在线播放 | 最新日韩在线视频| 国产精品色呦呦| 一区二区三区成人| 天堂一区二区在线免费观看| 夜色激情一区二区| 国产欧美日韩另类视频免费观看| 国产欧美一区二区精品性| 中文字幕精品在线不卡| 亚洲免费av高清| 蜜臀va亚洲va欧美va天堂| 国产在线看一区| 欧美视频在线一区| 日韩免费观看高清完整版| 国产精品久久久久久久久果冻传媒 | 国产亚洲欧美一区在线观看| 亚洲人成电影网站色mp4| 日日夜夜精品视频免费 | 亚洲午夜成aⅴ人片| 久久精品国产免费| 色婷婷综合五月| 在线观看一区日韩| 国产亚洲一区二区在线观看| 亚洲免费在线电影| 国产一区二区三区免费在线观看| 国内精品久久久久影院薰衣草| 久久疯狂做爰流白浆xx| 久久99精品久久久久久国产越南| 精品伊人久久久久7777人| 国产激情视频一区二区三区欧美| 国产一区二区三区香蕉| 成人精品视频一区二区三区| 成人av网址在线| 欧美电影一区二区| 久久久精品日韩欧美| 一区二区三国产精华液| 免费观看在线综合色| 欧美在线视频全部完| 制服丝袜在线91| 亚洲精品国产一区二区精华液 | 欧美精品一区二区精品网| 国产精品丝袜在线| 日本免费新一区视频| 91啪亚洲精品|