亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? slab.c

?? ARM 嵌入式 系統 設計與實例開發 實驗教材 二源碼
?? C
?? 第 1 頁 / 共 4 頁
字號:
/* * linux/mm/slab.c * Written by Mark Hemment, 1996/97. * (markhe@nextd.demon.co.uk) * * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli * * Major cleanup, different bufctl logic, per-cpu arrays *	(c) 2000 Manfred Spraul * * An implementation of the Slab Allocator as described in outline in; *	UNIX Internals: The New Frontiers by Uresh Vahalia *	Pub: Prentice Hall	ISBN 0-13-101908-2 * or with a little more detail in; *	The Slab Allocator: An Object-Caching Kernel Memory Allocator *	Jeff Bonwick (Sun Microsystems). *	Presented at: USENIX Summer 1994 Technical Conference * * * The memory is organized in caches, one cache for each object type. * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) * Each cache consists out of many slabs (they are small (usually one * page long) and always contiguous), and each slab contains multiple * initialized objects. * * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, * normal). If you need a special memory type, then must create a new * cache for that memory type. * * In order to reduce fragmentation, the slabs are sorted in 3 groups: *   full slabs with 0 free objects *   partial slabs *   empty slabs with no allocated objects * * If partial slabs exist, then new allocations come from these slabs, * otherwise from empty slabs or new slabs are allocated. * * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache * during kmem_cache_destroy(). The caller must prevent concurrent allocs. * * On SMP systems, each cache has a short per-cpu head array, most allocs * and frees go into that array, and if that array overflows, then 1/2 * of the entries in the array are given back into the global cache. * This reduces the number of spinlock operations. * * The c_cpuarray may not be read with enabled local interrupts. * * SMP synchronization: *  constructors and destructors are called without any locking. *  Several members in kmem_cache_t and slab_t never change, they *	are accessed without any locking. *  The per-cpu arrays are never accessed from the wrong cpu, no locking. *  The non-constant members are protected with a per-cache irq spinlock. * * Further notes from the original documentation: * * 11 April '97.  Started multi-threading - markhe *	The global cache-chain is protected by the semaphore 'cache_chain_sem'. *	The sem is only needed when accessing/extending the cache-chain, which *	can never happen inside an interrupt (kmem_cache_create(), *	kmem_cache_shrink() and kmem_cache_reap()). * *	To prevent kmem_cache_shrink() trying to shrink a 'growing' cache (which *	maybe be sleeping and therefore not holding the semaphore/lock), the *	growing field is used.  This also prevents reaping from a cache. * *	At present, each engine can be growing a cache.  This should be blocked. * */#include	<linux/config.h>#include	<linux/slab.h>#include	<linux/interrupt.h>#include	<linux/init.h>#include	<linux/compiler.h>#include	<asm/uaccess.h>/* * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, *		  SLAB_RED_ZONE & SLAB_POISON. *		  0 for faster, smaller code (especially in the critical paths). * * STATS	- 1 to collect stats for /proc/slabinfo. *		  0 for faster, smaller code (especially in the critical paths). * * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) */#ifdef CONFIG_DEBUG_SLAB#define	DEBUG		1#define	STATS		1#define	FORCED_DEBUG	1#else#define	DEBUG		0#define	STATS		0#define	FORCED_DEBUG	0#endif/* * Parameters for kmem_cache_reap */#define REAP_SCANLEN	10#define REAP_PERFECT	10/* Shouldn't this be in a header file somewhere? */#define	BYTES_PER_WORD		sizeof(void *)/* Legal flag mask for kmem_cache_create(). */#if DEBUG# define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \			 SLAB_NO_REAP | SLAB_CACHE_DMA | \			 SLAB_MUST_HWCACHE_ALIGN)#else# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \			 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN)#endif/* * kmem_bufctl_t: * * Bufctl's are used for linking objs within a slab * linked offsets. * * This implementaion relies on "struct page" for locating the cache & * slab an object belongs to. * This allows the bufctl structure to be small (one int), but limits * the number of objects a slab (not a cache) can contain when off-slab * bufctls are used. The limit is the size of the largest general cache * that does not use off-slab slabs. * For 32bit archs with 4 kB pages, is this 56. * This is not serious, as it is only for large objects, when it is unwise * to have too many per slab. * Note: This limit can be raised by introducing a general cache whose size * is less than 512 (PAGE_SIZE<<3), but greater than 256. */#define BUFCTL_END 0xffffFFFF#define	SLAB_LIMIT 0xffffFFFEtypedef unsigned int kmem_bufctl_t;/* Max number of objs-per-slab for caches which use off-slab slabs. * Needed to avoid a possible looping condition in kmem_cache_grow(). */static unsigned long offslab_limit;/* * slab_t * * Manages the objs in a slab. Placed either at the beginning of mem allocated * for a slab, or allocated from an general cache. * Slabs are chained into three list: fully used, partial, fully free slabs. */typedef struct slab_s {	struct list_head	list;	unsigned long		colouroff;	void			*s_mem;		/* including colour offset */	unsigned int		inuse;		/* num of objs active in slab */	kmem_bufctl_t		free;} slab_t;#define slab_bufctl(slabp) \	((kmem_bufctl_t *)(((slab_t*)slabp)+1))/* * cpucache_t * * Per cpu structures * The limit is stored in the per-cpu structure to reduce the data cache * footprint. */typedef struct cpucache_s {	unsigned int avail;	unsigned int limit;} cpucache_t;#define cc_entry(cpucache) \	((void **)(((cpucache_t*)(cpucache))+1))#define cc_data(cachep) \	((cachep)->cpudata[smp_processor_id()])/* * kmem_cache_t * * manages a cache. */#define CACHE_NAMELEN	20	/* max name length for a slab cache */struct kmem_cache_s {/* 1) each alloc & free */	/* full, partial first, then free */	struct list_head	slabs_full;	struct list_head	slabs_partial;	struct list_head	slabs_free;	unsigned int		objsize;	unsigned int	 	flags;	/* constant flags */	unsigned int		num;	/* # of objs per slab */	spinlock_t		spinlock;#ifdef CONFIG_SMP	unsigned int		batchcount;#endif/* 2) slab additions /removals */	/* order of pgs per slab (2^n) */	unsigned int		gfporder;	/* force GFP flags, e.g. GFP_DMA */	unsigned int		gfpflags;	size_t			colour;		/* cache colouring range */	unsigned int		colour_off;	/* colour offset */	unsigned int		colour_next;	/* cache colouring */	kmem_cache_t		*slabp_cache;	unsigned int		growing;	unsigned int		dflags;		/* dynamic flags */	/* constructor func */	void (*ctor)(void *, kmem_cache_t *, unsigned long);	/* de-constructor func */	void (*dtor)(void *, kmem_cache_t *, unsigned long);	unsigned long		failures;/* 3) cache creation/removal */	char			name[CACHE_NAMELEN];	struct list_head	next;#ifdef CONFIG_SMP/* 4) per-cpu data */	cpucache_t		*cpudata[NR_CPUS];#endif#if STATS	unsigned long		num_active;	unsigned long		num_allocations;	unsigned long		high_mark;	unsigned long		grown;	unsigned long		reaped;	unsigned long 		errors;#ifdef CONFIG_SMP	atomic_t		allochit;	atomic_t		allocmiss;	atomic_t		freehit;	atomic_t		freemiss;#endif#endif};/* internal c_flags */#define	CFLGS_OFF_SLAB	0x010000UL	/* slab management in own cache */#define	CFLGS_OPTIMIZE	0x020000UL	/* optimized slab lookup *//* c_dflags (dynamic flags). Need to hold the spinlock to access this member */#define	DFLGS_GROWN	0x000001UL	/* don't reap a recently grown */#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)#define	OPTIMIZE(x)	((x)->flags & CFLGS_OPTIMIZE)#define	GROWN(x)	((x)->dlags & DFLGS_GROWN)#if STATS#define	STATS_INC_ACTIVE(x)	((x)->num_active++)#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)#define	STATS_INC_GROWN(x)	((x)->grown++)#define	STATS_INC_REAPED(x)	((x)->reaped++)#define	STATS_SET_HIGH(x)	do { if ((x)->num_active > (x)->high_mark) \					(x)->high_mark = (x)->num_active; \				} while (0)#define	STATS_INC_ERR(x)	((x)->errors++)#else#define	STATS_INC_ACTIVE(x)	do { } while (0)#define	STATS_DEC_ACTIVE(x)	do { } while (0)#define	STATS_INC_ALLOCED(x)	do { } while (0)#define	STATS_INC_GROWN(x)	do { } while (0)#define	STATS_INC_REAPED(x)	do { } while (0)#define	STATS_SET_HIGH(x)	do { } while (0)#define	STATS_INC_ERR(x)	do { } while (0)#endif#if STATS && defined(CONFIG_SMP)#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)#else#define STATS_INC_ALLOCHIT(x)	do { } while (0)#define STATS_INC_ALLOCMISS(x)	do { } while (0)#define STATS_INC_FREEHIT(x)	do { } while (0)#define STATS_INC_FREEMISS(x)	do { } while (0)#endif#if DEBUG/* Magic nums for obj red zoning. * Placed in the first word before and the first word after an obj. */#define	RED_MAGIC1	0x5A2CF071UL	/* when obj is active */#define	RED_MAGIC2	0x170FC2A5UL	/* when obj is inactive *//* ...and for poisoning */#define	POISON_BYTE	0x5a		/* byte value for poisoning */#define	POISON_END	0xa5		/* end-byte of poisoning */#endif/* maximum size of an obj (in 2^order pages) */#define	MAX_OBJ_ORDER	5	/* 32 pages *//* * Do not go above this order unless 0 objects fit into the slab. */#define	BREAK_GFP_ORDER_HI	2#define	BREAK_GFP_ORDER_LO	1static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;/* * Absolute limit for the gfp order */#define	MAX_GFP_ORDER	5	/* 32 pages *//* Macros for storing/retrieving the cachep and or slab from the * global 'mem_map'. These are used to find the slab an obj belongs to. * With kfree(), these are used to find the cache which an obj belongs to. */#define	SET_PAGE_CACHE(pg,x)  ((pg)->list.next = (struct list_head *)(x))#define	GET_PAGE_CACHE(pg)    ((kmem_cache_t *)(pg)->list.next)#define	SET_PAGE_SLAB(pg,x)   ((pg)->list.prev = (struct list_head *)(x))#define	GET_PAGE_SLAB(pg)     ((slab_t *)(pg)->list.prev)/* Size description struct for general caches. */typedef struct cache_sizes {	size_t		 cs_size;	kmem_cache_t	*cs_cachep;	kmem_cache_t	*cs_dmacachep;} cache_sizes_t;static cache_sizes_t cache_sizes[] = {#if PAGE_SIZE == 4096	{    32,	NULL, NULL},#endif	{    64,	NULL, NULL},	{   128,	NULL, NULL},	{   256,	NULL, NULL},	{   512,	NULL, NULL},	{  1024,	NULL, NULL},	{  2048,	NULL, NULL},	{  4096,	NULL, NULL},	{  8192,	NULL, NULL},	{ 16384,	NULL, NULL},	{ 32768,	NULL, NULL},	{ 65536,	NULL, NULL},	{131072,	NULL, NULL},	{     0,	NULL, NULL}};/* internal cache of cache description objs */static kmem_cache_t cache_cache = {	slabs_full:	LIST_HEAD_INIT(cache_cache.slabs_full),	slabs_partial:	LIST_HEAD_INIT(cache_cache.slabs_partial),	slabs_free:	LIST_HEAD_INIT(cache_cache.slabs_free),	objsize:	sizeof(kmem_cache_t),	flags:		SLAB_NO_REAP,	spinlock:	SPIN_LOCK_UNLOCKED,	colour_off:	L1_CACHE_BYTES,	name:		"kmem_cache",};/* Guard access to the cache-chain. */static struct semaphore	cache_chain_sem;/* Place maintainer for reaping. */static kmem_cache_t *clock_searchp = &cache_cache;#define cache_chain (cache_cache.next)#ifdef CONFIG_SMP/* * chicken and egg problem: delay the per-cpu array allocation * until the general caches are up. */static int g_cpucache_up;static void enable_cpucache (kmem_cache_t *cachep);static void enable_all_cpucaches (void);#endif/* Cal the num objs, wastage, and bytes left over for a given slab size. */static void kmem_cache_estimate (unsigned long gfporder, size_t size,		 int flags, size_t *left_over, unsigned int *num){	int i;	size_t wastage = PAGE_SIZE<<gfporder;	size_t extra = 0;	size_t base = 0;	if (!(flags & CFLGS_OFF_SLAB)) {		base = sizeof(slab_t);		extra = sizeof(kmem_bufctl_t);	}	i = 0;	while (i*size + L1_CACHE_ALIGN(base+i*extra) <= wastage)		i++;	if (i > 0)		i--;	if (i > SLAB_LIMIT)		i = SLAB_LIMIT;	*num = i;	wastage -= i*size;	wastage -= L1_CACHE_ALIGN(base+i*extra);	*left_over = wastage;}/* Initialisation - setup the `cache' cache. */void __init kmem_cache_init(void){	size_t left_over;	init_MUTEX(&cache_chain_sem);	INIT_LIST_HEAD(&cache_chain);	kmem_cache_estimate(0, cache_cache.objsize, 0,			&left_over, &cache_cache.num);	if (!cache_cache.num)		BUG();	cache_cache.colour = left_over/cache_cache.colour_off;	cache_cache.colour_next = 0;}/* Initialisation - setup remaining internal and general caches. * Called after the gfp() functions have been enabled, and before smp_init(). */void __init kmem_cache_sizes_init(void){	cache_sizes_t *sizes = cache_sizes;	char name[20];	/*	 * Fragmentation resistance on low memory - only use bigger	 * page orders on machines with more than 32MB of memory.	 */	if (num_physpages > (32 << 20) >> PAGE_SHIFT)		slab_break_gfp_order = BREAK_GFP_ORDER_HI;	do {		/* For performance, all the general caches are L1 aligned.		 * This should be particularly beneficial on SMP boxes, as it		 * eliminates "false sharing".		 * Note for systems short on memory removing the alignment will		 * allow tighter packing of the smaller caches. */		sprintf(name,"size-%Zd",sizes->cs_size);		if (!(sizes->cs_cachep =			kmem_cache_create(name, sizes->cs_size,					0, SLAB_HWCACHE_ALIGN, NULL, NULL))) {			BUG();		}		/* Inc off-slab bufctl limit until the ceiling is hit. */		if (!(OFF_SLAB(sizes->cs_cachep))) {			offslab_limit = sizes->cs_size-sizeof(slab_t);			offslab_limit /= 2;		}		sprintf(name, "size-%Zd(DMA)",sizes->cs_size);		sizes->cs_dmacachep = kmem_cache_create(name, sizes->cs_size, 0,			      SLAB_CACHE_DMA|SLAB_HWCACHE_ALIGN, NULL, NULL);		if (!sizes->cs_dmacachep)			BUG();		sizes++;	} while (sizes->cs_size);}int __init kmem_cpucache_init(void){#ifdef CONFIG_SMP	g_cpucache_up = 1;	enable_all_cpucaches();#endif	return 0;}__initcall(kmem_cpucache_init);/* Interface to system's page allocator. No need to hold the cache-lock. */static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags){	void	*addr;	/*	 * If we requested dmaable memory, we will get it. Even if we	 * did not request dmaable memory, we might get it, but that	 * would be relatively rare and ignorable.	 */	flags |= cachep->gfpflags;	addr = (void*) __get_free_pages(flags, cachep->gfporder);	/* Assume that now we have the pages no one else can legally	 * messes with the 'struct page's.	 * However vm_scan() might try to test the structure to see if	 * it is a named-page or buffer-page.  The members it tests are	 * of no interest here.....	 */	return addr;}/* Interface to system's page release. */static inline void kmem_freepages (kmem_cache_t *cachep, void *addr){	unsigned long i = (1<<cachep->gfporder);	struct page *page = virt_to_page(addr);	/* free_pages() does not clear the type bit - we do that.

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
亚洲女爱视频在线| 亚洲精品一二三区| 一本大道av一区二区在线播放 | 日韩欧美专区在线| 99vv1com这只有精品| 日韩电影在线一区| 亚洲女性喷水在线观看一区| 日韩精品一区二区三区在线播放| 色94色欧美sute亚洲13| 国产乱码精品一区二区三区忘忧草 | 91麻豆精品一区二区三区| 久久国产精品一区二区| 亚洲亚洲精品在线观看| 中文一区二区在线观看| 日韩欧美一区中文| 欧美日韩日日骚| 91亚洲精品一区二区乱码| 国产一区二区三区不卡在线观看| 日日夜夜免费精品| 亚洲香蕉伊在人在线观| 亚洲美女电影在线| 国产精品久久久久久久浪潮网站| 亚洲精品一线二线三线| 欧美一区二区啪啪| 欧美精品免费视频| 欧美性videosxxxxx| 一本到一区二区三区| 色综合久久久久久久久| 91小视频免费看| www.日韩精品| 波多野结衣中文字幕一区二区三区| 极品美女销魂一区二区三区免费| 日本在线观看不卡视频| 午夜激情综合网| 日韩影视精彩在线| 奇米777欧美一区二区| 男男视频亚洲欧美| 蜜臀av性久久久久av蜜臀妖精| 日韩激情视频在线观看| 免费看黄色91| 极品少妇一区二区| 国产精品99久久久久久似苏梦涵 | 大白屁股一区二区视频| 国产东北露脸精品视频| 成人午夜av电影| 99视频在线精品| 91传媒视频在线播放| 在线观看一区不卡| 欧美电影一区二区| 欧美大片拔萝卜| 国产日韩欧美亚洲| 亚洲欧美日韩电影| 亚洲国产日产av| 老司机精品视频在线| 国产美女一区二区三区| 成人免费三级在线| 色婷婷精品久久二区二区蜜臂av | 婷婷中文字幕一区三区| 美国欧美日韩国产在线播放| 国产在线精品免费| 99久精品国产| 欧美日韩夫妻久久| 精品国产精品网麻豆系列| 欧美极品美女视频| 亚洲黄色小说网站| 奇米影视一区二区三区| 国产精品456| 欧美综合亚洲图片综合区| 日韩欧美激情在线| 中文字幕av一区 二区| 亚洲v中文字幕| 国产一区二区三区在线看麻豆| 成人福利电影精品一区二区在线观看| 色综合久久88色综合天天免费| 欧美精品v国产精品v日韩精品| 精品欧美一区二区久久| 亚洲丝袜精品丝袜在线| 日本成人在线看| 成人动漫视频在线| 3d动漫精品啪啪一区二区竹菊| 国产欧美日本一区二区三区| 亚洲成人免费影院| 国产精品亚洲第一区在线暖暖韩国 | 激情小说亚洲一区| 在线精品国精品国产尤物884a| 26uuu亚洲综合色| 一区二区三区四区不卡视频| 久久国产生活片100| 色噜噜狠狠成人中文综合| 精品久久久久香蕉网| 一区二区三区中文免费| 国产精品一区在线观看你懂的| 欧美视频日韩视频在线观看| 国产色综合一区| 奇米精品一区二区三区四区| 色一情一乱一乱一91av| 国产亚洲视频系列| 免费看黄色91| 欧美剧在线免费观看网站| 国产精品盗摄一区二区三区| 久久99精品国产麻豆婷婷 | 久久久久99精品国产片| 午夜精品久久久久久久 | 精品乱人伦小说| 亚洲午夜电影网| 91丨porny丨国产| 日本一区二区视频在线观看| 日本色综合中文字幕| 91黄色免费观看| 一区在线播放视频| 国产成人精品免费视频网站| 日韩三级伦理片妻子的秘密按摩| 亚洲自拍偷拍图区| 91在线你懂得| 国产精品伦理在线| 丁香婷婷综合网| 久久久精品免费观看| 另类专区欧美蜜桃臀第一页| 欧美日韩高清影院| 亚洲一二三四区不卡| 94色蜜桃网一区二区三区| 中文字幕第一页久久| 国产大陆亚洲精品国产| 久久综合九色综合欧美98 | 99久久精品情趣| 欧美激情在线观看视频免费| 国产精品99久久久久久宅男| 26uuu精品一区二区三区四区在线| 日韩**一区毛片| 欧美一区二区三区白人| 美女精品一区二区| 日韩一区二区三区免费看| 免费成人av资源网| 精品久久久久一区| 国精产品一区一区三区mba视频| 精品免费日韩av| 激情小说亚洲一区| 国产三级欧美三级日产三级99 | 成人欧美一区二区三区视频网页| 成av人片一区二区| 国产精品久久久久久久久免费樱桃| 国产成人aaa| 中文字幕中文字幕一区| 色狠狠色噜噜噜综合网| 亚洲一级片在线观看| 欧美精品一二三| 另类小说一区二区三区| www成人在线观看| 国产河南妇女毛片精品久久久| 国产欧美日韩在线视频| 91色.com| 视频一区二区三区在线| 精品免费视频一区二区| 成人av综合在线| 亚洲国产日韩精品| 精品国产91洋老外米糕| 成人综合在线观看| 亚洲一二三四在线观看| 日韩欧美精品在线| 成人福利在线看| 亚洲成av人片在线| 精品捆绑美女sm三区| k8久久久一区二区三区| 亚洲大片在线观看| 精品国产区一区| 色婷婷综合久久久久中文一区二区| 午夜精品久久久久久久99水蜜桃 | 欧美无砖砖区免费| 蜜桃久久久久久久| 国产精品私人影院| 欧美日韩国产高清一区二区| 国产精品一区二区视频| 一区二区三区四区在线播放| 日韩午夜中文字幕| 成人av中文字幕| 蜜臀精品久久久久久蜜臀| 国产精品久久久久影院色老大 | 国产三级欧美三级| 欧美亚洲综合久久| 国产乱理伦片在线观看夜一区| 国产在线视频一区二区| 国产suv一区二区三区88区| 欧美图片一区二区三区| 精品国产污网站| 色综合亚洲欧洲| 韩国三级在线一区| 一二三区精品视频| 国产欧美一区二区精品仙草咪 | 精品视频1区2区3区| 国产精品性做久久久久久| 午夜在线电影亚洲一区| 国产蜜臀av在线一区二区三区| 6080午夜不卡| 色综合久久88色综合天天6| 激情成人综合网| 香港成人在线视频| 亚洲免费三区一区二区| 久久久精品影视| 日韩免费视频一区二区| 欧美无砖砖区免费|