亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? slab.c

?? ARM 嵌入式 系統 設計與實例開發 實驗教材 二源碼
?? C
?? 第 1 頁 / 共 4 頁
字號:
	 * The pages have been unlinked from their cache-slab,	 * but their 'struct page's might be accessed in	 * vm_scan(). Shouldn't be a worry.	 */	while (i--) {		PageClearSlab(page);		page++;	}	free_pages((unsigned long)addr, cachep->gfporder);}#if DEBUGstatic inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr){	int size = cachep->objsize;	if (cachep->flags & SLAB_RED_ZONE) {		addr += BYTES_PER_WORD;		size -= 2*BYTES_PER_WORD;	}	memset(addr, POISON_BYTE, size);	*(unsigned char *)(addr+size-1) = POISON_END;}static inline int kmem_check_poison_obj (kmem_cache_t *cachep, void *addr){	int size = cachep->objsize;	void *end;	if (cachep->flags & SLAB_RED_ZONE) {		addr += BYTES_PER_WORD;		size -= 2*BYTES_PER_WORD;	}	end = memchr(addr, POISON_END, size);	if (end != (addr+size-1))		return 1;	return 0;}#endif/* Destroy all the objs in a slab, and release the mem back to the system. * Before calling the slab must have been unlinked from the cache. * The cache-lock is not held/needed. */static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp){	if (cachep->dtor#if DEBUG		|| cachep->flags & (SLAB_POISON | SLAB_RED_ZONE)#endif	) {		int i;		for (i = 0; i < cachep->num; i++) {			void* objp = slabp->s_mem+cachep->objsize*i;#if DEBUG			if (cachep->flags & SLAB_RED_ZONE) {				if (*((unsigned long*)(objp)) != RED_MAGIC1)					BUG();				if (*((unsigned long*)(objp + cachep->objsize						-BYTES_PER_WORD)) != RED_MAGIC1)					BUG();				objp += BYTES_PER_WORD;			}#endif			if (cachep->dtor)				(cachep->dtor)(objp, cachep, 0);#if DEBUG			if (cachep->flags & SLAB_RED_ZONE) {				objp -= BYTES_PER_WORD;			}				if ((cachep->flags & SLAB_POISON)  &&				kmem_check_poison_obj(cachep, objp))				BUG();#endif		}	}	kmem_freepages(cachep, slabp->s_mem-slabp->colouroff);	if (OFF_SLAB(cachep))		kmem_cache_free(cachep->slabp_cache, slabp);}/** * kmem_cache_create - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @offset: The offset to use within the page. * @flags: SLAB flags * @ctor: A constructor for the objects. * @dtor: A destructor for the objects. * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. * The @ctor is run when new pages are allocated by the cache * and the @dtor is run before the pages are handed back. * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * to catch references to uninitialised memory. * * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check * for buffer overruns. * * %SLAB_NO_REAP - Don't automatically reap this cache when we're under * memory pressure. * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline.  This can be beneficial if you're counting cycles as closely * as davem. */kmem_cache_t *kmem_cache_create (const char *name, size_t size, size_t offset,	unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),	void (*dtor)(void*, kmem_cache_t *, unsigned long)){	const char *func_nm = KERN_ERR "kmem_create: ";	size_t left_over, align, slab_size;	kmem_cache_t *cachep = NULL;	/*	 * Sanity checks... these are all serious usage bugs.	 */	if ((!name) ||		((strlen(name) >= CACHE_NAMELEN - 1)) ||		in_interrupt() ||		(size < BYTES_PER_WORD) ||		(size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||		(dtor && !ctor) ||		(offset < 0 || offset > size))			BUG();#if DEBUG	if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {		/* No constructor, but inital state check requested */		printk("%sNo con, but init state check requested - %s\n", func_nm, name);		flags &= ~SLAB_DEBUG_INITIAL;	}	if ((flags & SLAB_POISON) && ctor) {		/* request for poisoning, but we can't do that with a constructor */		printk("%sPoisoning requested, but con given - %s\n", func_nm, name);		flags &= ~SLAB_POISON;	}#if FORCED_DEBUG	if ((size < (PAGE_SIZE>>3)) && !(flags & SLAB_MUST_HWCACHE_ALIGN))		/*		 * do not red zone large object, causes severe		 * fragmentation.		 */		flags |= SLAB_RED_ZONE;	if (!ctor)		flags |= SLAB_POISON;#endif#endif	/*	 * Always checks flags, a caller might be expecting debug	 * support which isn't available.	 */	if (flags & ~CREATE_MASK)		BUG();	/* Get cache's description obj. */	cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);	if (!cachep)		goto opps;	memset(cachep, 0, sizeof(kmem_cache_t));	/* Check that size is in terms of words.  This is needed to avoid	 * unaligned accesses for some archs when redzoning is used, and makes	 * sure any on-slab bufctl's are also correctly aligned.	 */	if (size & (BYTES_PER_WORD-1)) {		size += (BYTES_PER_WORD-1);		size &= ~(BYTES_PER_WORD-1);		printk("%sForcing size word alignment - %s\n", func_nm, name);	}	#if DEBUG	if (flags & SLAB_RED_ZONE) {		/*		 * There is no point trying to honour cache alignment		 * when redzoning.		 */		flags &= ~SLAB_HWCACHE_ALIGN;		size += 2*BYTES_PER_WORD;	/* words for redzone */	}#endif	align = BYTES_PER_WORD;	if (flags & SLAB_HWCACHE_ALIGN)		align = L1_CACHE_BYTES;	/* Determine if the slab management is 'on' or 'off' slab. */	if (size >= (PAGE_SIZE>>3))		/*		 * Size is large, assume best to place the slab management obj		 * off-slab (should allow better packing of objs).		 */		flags |= CFLGS_OFF_SLAB;	if (flags & SLAB_HWCACHE_ALIGN) {		/* Need to adjust size so that objs are cache aligned. */		/* Small obj size, can get at least two per cache line. */		/* FIXME: only power of 2 supported, was better */		while (size < align/2)			align /= 2;		size = (size+align-1)&(~(align-1));	}	/* Cal size (in pages) of slabs, and the num of objs per slab.	 * This could be made much more intelligent.  For now, try to avoid	 * using high page-orders for slabs.  When the gfp() funcs are more	 * friendly towards high-order requests, this should be changed.	 */	do {		unsigned int break_flag = 0;cal_wastage:		kmem_cache_estimate(cachep->gfporder, size, flags,						&left_over, &cachep->num);		if (break_flag)			break;		if (cachep->gfporder >= MAX_GFP_ORDER)			break;		if (!cachep->num)			goto next;		if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) {			/* Oops, this num of objs will cause problems. */			cachep->gfporder--;			break_flag++;			goto cal_wastage;		}		/*		 * Large num of objs is good, but v. large slabs are currently		 * bad for the gfp()s.		 */		if (cachep->gfporder >= slab_break_gfp_order)			break;		if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))			break;	/* Acceptable internal fragmentation. */next:		cachep->gfporder++;	} while (1);	if (!cachep->num) {		printk("kmem_cache_create: couldn't create cache %s.\n", name);		kmem_cache_free(&cache_cache, cachep);		cachep = NULL;		goto opps;	}	slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(kmem_bufctl_t)+sizeof(slab_t));	/*	 * If the slab has been placed off-slab, and we have enough space then	 * move it on-slab. This is at the expense of any extra colouring.	 */	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {		flags &= ~CFLGS_OFF_SLAB;		left_over -= slab_size;	}	/* Offset must be a multiple of the alignment. */	offset += (align-1);	offset &= ~(align-1);	if (!offset)		offset = L1_CACHE_BYTES;	cachep->colour_off = offset;	cachep->colour = left_over/offset;	/* init remaining fields */	if (!cachep->gfporder && !(flags & CFLGS_OFF_SLAB))		flags |= CFLGS_OPTIMIZE;	cachep->flags = flags;	cachep->gfpflags = 0;	if (flags & SLAB_CACHE_DMA)		cachep->gfpflags |= GFP_DMA;	spin_lock_init(&cachep->spinlock);	cachep->objsize = size;	INIT_LIST_HEAD(&cachep->slabs_full);	INIT_LIST_HEAD(&cachep->slabs_partial);	INIT_LIST_HEAD(&cachep->slabs_free);	if (flags & CFLGS_OFF_SLAB)		cachep->slabp_cache = kmem_find_general_cachep(slab_size,0);	cachep->ctor = ctor;	cachep->dtor = dtor;	/* Copy name over so we don't have problems with unloaded modules */	strcpy(cachep->name, name);#ifdef CONFIG_SMP	if (g_cpucache_up)		enable_cpucache(cachep);#endif	/* Need the semaphore to access the chain. */	down(&cache_chain_sem);	{		struct list_head *p;		list_for_each(p, &cache_chain) {			kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);			/* The name field is constant - no lock needed. */			if (!strcmp(pc->name, name))				BUG();		}	}	/* There is no reason to lock our new cache before we	 * link it in - no one knows about it yet...	 */	list_add(&cachep->next, &cache_chain);	up(&cache_chain_sem);opps:	return cachep;}#if DEBUG/* * This check if the kmem_cache_t pointer is chained in the cache_cache * list. -arca */static int is_chained_kmem_cache(kmem_cache_t * cachep){	struct list_head *p;	int ret = 0;	/* Find the cache in the chain of caches. */	down(&cache_chain_sem);	list_for_each(p, &cache_chain) {		if (p == &cachep->next) {			ret = 1;			break;		}	}	up(&cache_chain_sem);	return ret;}#else#define is_chained_kmem_cache(x) 1#endif#ifdef CONFIG_SMP/* * Waits for all CPUs to execute func(). */static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg){	local_irq_disable();	func(arg);	local_irq_enable();	if (smp_call_function(func, arg, 1, 1))		BUG();}typedef struct ccupdate_struct_s{	kmem_cache_t *cachep;	cpucache_t *new[NR_CPUS];} ccupdate_struct_t;static void do_ccupdate_local(void *info){	ccupdate_struct_t *new = (ccupdate_struct_t *)info;	cpucache_t *old = cc_data(new->cachep);		cc_data(new->cachep) = new->new[smp_processor_id()];	new->new[smp_processor_id()] = old;}static void free_block (kmem_cache_t* cachep, void** objpp, int len);static void drain_cpu_caches(kmem_cache_t *cachep){	ccupdate_struct_t new;	int i;	memset(&new.new,0,sizeof(new.new));	new.cachep = cachep;	down(&cache_chain_sem);	smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);	for (i = 0; i < smp_num_cpus; i++) {		cpucache_t* ccold = new.new[cpu_logical_map(i)];		if (!ccold || (ccold->avail == 0))			continue;		local_irq_disable();		free_block(cachep, cc_entry(ccold), ccold->avail);		local_irq_enable();		ccold->avail = 0;	}	smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);	up(&cache_chain_sem);}#else#define drain_cpu_caches(cachep)	do { } while (0)#endifstatic int __kmem_cache_shrink(kmem_cache_t *cachep){	slab_t *slabp;	int ret;	drain_cpu_caches(cachep);	spin_lock_irq(&cachep->spinlock);	/* If the cache is growing, stop shrinking. */	while (!cachep->growing) {		struct list_head *p;		p = cachep->slabs_free.prev;		if (p == &cachep->slabs_free)			break;		slabp = list_entry(cachep->slabs_free.prev, slab_t, list);#if DEBUG		if (slabp->inuse)			BUG();#endif		list_del(&slabp->list);		spin_unlock_irq(&cachep->spinlock);		kmem_slab_destroy(cachep, slabp);		spin_lock_irq(&cachep->spinlock);	}	ret = !list_empty(&cachep->slabs_full) || !list_empty(&cachep->slabs_partial);	spin_unlock_irq(&cachep->spinlock);	return ret;}/** * kmem_cache_shrink - Shrink a cache. * @cachep: The cache to shrink. * * Releases as many slabs as possible for a cache. * To help debugging, a zero exit status indicates all slabs were released. */int kmem_cache_shrink(kmem_cache_t *cachep){	if (!cachep || in_interrupt() || !is_chained_kmem_cache(cachep))		BUG();	return __kmem_cache_shrink(cachep);}/** * kmem_cache_destroy - delete a cache * @cachep: the cache to destroy * * Remove a kmem_cache_t object from the slab cache. * Returns 0 on success. * * It is expected this function will be called by a module when it is * unloaded.  This will remove the cache completely, and avoid a duplicate * cache being allocated each time a module is loaded and unloaded, if the * module doesn't have persistent in-kernel storage across loads and unloads. * * The caller must guarantee that noone will allocate memory from the cache * during the kmem_cache_destroy(). */int kmem_cache_destroy (kmem_cache_t * cachep){	if (!cachep || in_interrupt() || cachep->growing)		BUG();	/* Find the cache in the chain of caches. */	down(&cache_chain_sem);	/* the chain is never empty, cache_cache is never destroyed */	if (clock_searchp == cachep)		clock_searchp = list_entry(cachep->next.next,						kmem_cache_t, next);	list_del(&cachep->next);	up(&cache_chain_sem);	if (__kmem_cache_shrink(cachep)) {		printk(KERN_ERR "kmem_cache_destroy: Can't free all objects %p\n",		       cachep);		down(&cache_chain_sem);		list_add(&cachep->next,&cache_chain);		up(&cache_chain_sem);		return 1;	}#ifdef CONFIG_SMP	{		int i;		for (i = 0; i < NR_CPUS; i++)			kfree(cachep->cpudata[i]);	}#endif	kmem_cache_free(&cache_cache, cachep);	return 0;}/* Get the memory for a slab management obj. */static inline slab_t * kmem_cache_slabmgmt (kmem_cache_t *cachep,			void *objp, int colour_off, int local_flags){	slab_t *slabp;		if (OFF_SLAB(cachep)) {		/* Slab management obj is off-slab. */		slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);		if (!slabp)			return NULL;	} else {

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
日韩中文字幕一区二区三区| 精品免费国产二区三区| 亚洲色图第一区| 97se亚洲国产综合自在线不卡 | 欧美日韩三级一区二区| 亚洲一二三四久久| 欧美日本一区二区在线观看| 午夜视黄欧洲亚洲| 亚洲特黄一级片| 一区二区视频在线看| 在线观看一区二区视频| 天天影视网天天综合色在线播放 | 欧美久久久久久久久| 日韩综合一区二区| 久久久久久99久久久精品网站| 国产精品一二三四区| 亚洲精品国产精华液| 91精品国产综合久久婷婷香蕉| 国产一区二区在线视频| √…a在线天堂一区| 欧美三级日韩三级| 国产一区二区三区在线观看免费| 中文字幕的久久| 欧美男生操女生| 国产99精品国产| 午夜在线成人av| 欧美韩日一区二区三区| 91官网在线观看| 日本aⅴ精品一区二区三区| 国产福利一区二区三区| 久久久精品免费网站| 91高清视频在线| 精品一区二区日韩| 综合在线观看色| 日韩欧美一区二区在线视频| av亚洲产国偷v产偷v自拍| 午夜在线电影亚洲一区| 国产精品久久99| 日韩视频免费观看高清完整版| 国产91精品久久久久久久网曝门| 亚洲国产一区二区三区| 精品国产三级a在线观看| 色综合久久综合网欧美综合网| 久久国产三级精品| 一区二区三区在线视频免费| 久久久亚洲精华液精华液精华液| 欧美视频中文一区二区三区在线观看 | 欧美日韩一区国产| 成人久久久精品乱码一区二区三区 | 欧美一区二区日韩一区二区| 成人激情图片网| 国产一区二区三区在线观看免费| 日日夜夜精品视频天天综合网| 日韩精品综合一本久道在线视频| 91视频在线观看免费| 狠狠色丁香婷综合久久| 视频一区二区三区入口| 亚洲欧洲日产国码二区| 久久久久国产成人精品亚洲午夜| 在线播放欧美女士性生活| 91在线观看美女| 成人短视频下载| 韩国视频一区二区| 蜜臀av亚洲一区中文字幕| 亚洲自拍偷拍麻豆| 亚洲欧洲日韩av| 亚洲丝袜精品丝袜在线| 亚洲国产精华液网站w| 欧美mv日韩mv国产网站| 制服丝袜国产精品| 欧美日韩国产一二三| 欧美综合视频在线观看| 在线免费观看日本一区| 99久久99久久久精品齐齐| 成人av资源在线观看| 成人性生交大片免费看中文网站| 国产一区二区在线影院| 国产夫妻精品视频| 成人免费av网站| 91丨九色丨蝌蚪富婆spa| 成人av网址在线| www.一区二区| 色悠悠亚洲一区二区| 色先锋久久av资源部| 一本一本大道香蕉久在线精品| 成人av午夜电影| 日本韩国欧美一区二区三区| 色综合久久久久综合99| 在线精品亚洲一区二区不卡| 在线视频一区二区三区| 3d成人h动漫网站入口| 欧美一区二视频| 精品国产髙清在线看国产毛片| 久久亚洲综合av| 亚洲国产精品高清| 亚洲欧美在线高清| 亚洲成av人片一区二区三区| 丝袜亚洲另类丝袜在线| 激情国产一区二区 | 国产精品久久国产精麻豆99网站| 自拍偷拍国产亚洲| 亚洲va在线va天堂| 免费看欧美女人艹b| 国产精品1区2区3区| www.亚洲国产| 欧美日本一道本在线视频| 日韩情涩欧美日韩视频| 国产精品久久久久桃色tv| 一区二区久久久| 美国毛片一区二区三区| 成人免费视频免费观看| 欧美日韩在线三区| 精品国产在天天线2019| 国产精品嫩草久久久久| 日韩成人伦理电影在线观看| 国产精品1区2区| 欧美人xxxx| 国产精品丝袜久久久久久app| 亚洲午夜久久久久久久久电影院| 美女一区二区三区在线观看| 99久久精品国产观看| 欧美丰满少妇xxxxx高潮对白 | 美女看a上一区| 99re这里都是精品| 欧美一区二区久久久| 国产精品久久久久久久蜜臀 | 国产精品综合av一区二区国产馆| 97久久人人超碰| 久久亚洲免费视频| 亚洲成人tv网| 99国产精品久久久久久久久久久 | √…a在线天堂一区| 热久久免费视频| 色综合久久久久| 国产午夜三级一区二区三| 亚洲精品中文在线观看| 国产精品一卡二卡在线观看| 欧美日韩亚洲国产综合| 自拍av一区二区三区| 黄色资源网久久资源365| 欧美高清视频不卡网| 亚洲视频在线一区观看| 国产成人激情av| 日韩一区二区三免费高清| 亚洲精品精品亚洲| 亚洲一级在线观看| 久久精品无码一区二区三区| 亚洲一线二线三线久久久| 高清国产一区二区| 日韩欧美第一区| 天堂一区二区在线| 在线免费观看日本一区| 亚洲欧美一区二区三区久本道91| 国产夫妻精品视频| 久久久久久99精品| 国产精品一区二区男女羞羞无遮挡| 日韩视频一区在线观看| 日韩主播视频在线| 欧美伦理视频网站| 日本中文一区二区三区| 精品久久久久99| 日韩精品一二三| 欧美另类z0zxhd电影| 亚洲高清中文字幕| 欧美探花视频资源| 天堂一区二区在线免费观看| 欧美视频一区二区三区四区| 亚洲成人av在线电影| 欧美老人xxxx18| 秋霞电影网一区二区| 精品国产一区二区三区av性色| 理论电影国产精品| 欧美成人艳星乳罩| 国产精品伊人色| 国产日韩成人精品| av一区二区久久| 亚洲免费视频成人| 欧美午夜不卡在线观看免费| 亚洲一区二区三区视频在线| 欧美人xxxx| 狠狠色综合播放一区二区| 精品久久久久久久久久久久久久久久久 | 免费在线视频一区| 久久在线观看免费| 国产成a人亚洲| 亚洲色图视频网| 欧美日精品一区视频| 免费成人小视频| 久久久久综合网| 91在线视频播放地址| 亚洲一区二区三区四区中文字幕| 欧美日韩在线观看一区二区 | 午夜国产精品一区| 日韩欧美的一区| 成人免费高清在线观看| 亚洲人亚洲人成电影网站色| 欧美美女激情18p| 国产精品66部| 亚洲综合在线电影| 精品剧情在线观看|