亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關(guān)于我們
? 蟲蟲下載站

?? hugetlb.c

?? 最新最穩(wěn)定的Linux內(nèi)存管理模塊源代碼
?? C
?? 第 1 頁 / 共 5 頁
字號:
		if (err < 0)			return err;		return 0;	}}static void vma_commit_reservation(struct hstate *h,			struct vm_area_struct *vma, unsigned long addr){	struct address_space *mapping = vma->vm_file->f_mapping;	struct inode *inode = mapping->host;	if (vma->vm_flags & VM_SHARED) {		pgoff_t idx = vma_hugecache_offset(h, vma, addr);		region_add(&inode->i_mapping->private_list, idx, idx + 1);	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {		pgoff_t idx = vma_hugecache_offset(h, vma, addr);		struct resv_map *reservations = vma_resv_map(vma);		/* Mark this page used in the map. */		region_add(&reservations->regions, idx, idx + 1);	}}static struct page *alloc_huge_page(struct vm_area_struct *vma,				    unsigned long addr, int avoid_reserve){	struct hstate *h = hstate_vma(vma);	struct page *page;	struct address_space *mapping = vma->vm_file->f_mapping;	struct inode *inode = mapping->host;	unsigned int chg;	/*	 * Processes that did not create the mapping will have no reserves and	 * will not have accounted against quota. Check that the quota can be	 * made before satisfying the allocation	 * MAP_NORESERVE mappings may also need pages and quota allocated	 * if no reserve mapping overlaps.	 */	chg = vma_needs_reservation(h, vma, addr);	if (chg < 0)		return ERR_PTR(chg);	if (chg)		if (hugetlb_get_quota(inode->i_mapping, chg))			return ERR_PTR(-ENOSPC);	spin_lock(&hugetlb_lock);	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);	spin_unlock(&hugetlb_lock);	if (!page) {		page = alloc_buddy_huge_page(h, vma, addr);		if (!page) {			hugetlb_put_quota(inode->i_mapping, chg);			return ERR_PTR(-VM_FAULT_OOM);		}	}	set_page_refcounted(page);	set_page_private(page, (unsigned long) mapping);	vma_commit_reservation(h, vma, addr);	return page;}int __weak alloc_bootmem_huge_page(struct hstate *h){	struct huge_bootmem_page *m;	int nr_nodes = nodes_weight(node_online_map);	while (nr_nodes) {		void *addr;		addr = __alloc_bootmem_node_nopanic(				NODE_DATA(h->hugetlb_next_nid),				huge_page_size(h), huge_page_size(h), 0);		if (addr) {			/*			 * Use the beginning of the huge page to store the			 * huge_bootmem_page struct (until gather_bootmem			 * puts them into the mem_map).			 */			m = addr;			goto found;		}		hstate_next_node(h);		nr_nodes--;	}	return 0;found:	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));	/* Put them into a private list first because mem_map is not up yet */	list_add(&m->list, &huge_boot_pages);	m->hstate = h;	return 1;}static void prep_compound_huge_page(struct page *page, int order){	if (unlikely(order > (MAX_ORDER - 1)))		prep_compound_gigantic_page(page, order);	else		prep_compound_page(page, order);}/* Put bootmem huge pages into the standard lists after mem_map is up */static void __init gather_bootmem_prealloc(void){	struct huge_bootmem_page *m;	list_for_each_entry(m, &huge_boot_pages, list) {		struct page *page = virt_to_page(m);		struct hstate *h = m->hstate;		__ClearPageReserved(page);		WARN_ON(page_count(page) != 1);		prep_compound_huge_page(page, h->order);		prep_new_huge_page(h, page, page_to_nid(page));	}}static void __init hugetlb_hstate_alloc_pages(struct hstate *h){	unsigned long i;	for (i = 0; i < h->max_huge_pages; ++i) {		if (h->order >= MAX_ORDER) {			if (!alloc_bootmem_huge_page(h))				break;		} else if (!alloc_fresh_huge_page(h))			break;	}	h->max_huge_pages = i;}static void __init hugetlb_init_hstates(void){	struct hstate *h;	for_each_hstate(h) {		/* oversize hugepages were init'ed in early boot */		if (h->order < MAX_ORDER)			hugetlb_hstate_alloc_pages(h);	}}static char * __init memfmt(char *buf, unsigned long n){	if (n >= (1UL << 30))		sprintf(buf, "%lu GB", n >> 30);	else if (n >= (1UL << 20))		sprintf(buf, "%lu MB", n >> 20);	else		sprintf(buf, "%lu KB", n >> 10);	return buf;}static void __init report_hugepages(void){	struct hstate *h;	for_each_hstate(h) {		char buf[32];		printk(KERN_INFO "HugeTLB registered %s page size, "				 "pre-allocated %ld pages\n",			memfmt(buf, huge_page_size(h)),			h->free_huge_pages);	}}#ifdef CONFIG_HIGHMEMstatic void try_to_free_low(struct hstate *h, unsigned long count){	int i;	if (h->order >= MAX_ORDER)		return;	for (i = 0; i < MAX_NUMNODES; ++i) {		struct page *page, *next;		struct list_head *freel = &h->hugepage_freelists[i];		list_for_each_entry_safe(page, next, freel, lru) {			if (count >= h->nr_huge_pages)				return;			if (PageHighMem(page))				continue;			list_del(&page->lru);			update_and_free_page(h, page);			h->free_huge_pages--;			h->free_huge_pages_node[page_to_nid(page)]--;		}	}}#elsestatic inline void try_to_free_low(struct hstate *h, unsigned long count){}#endif#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count){	unsigned long min_count, ret;	if (h->order >= MAX_ORDER)		return h->max_huge_pages;	/*	 * Increase the pool size	 * First take pages out of surplus state.  Then make up the	 * remaining difference by allocating fresh huge pages.	 *	 * We might race with alloc_buddy_huge_page() here and be unable	 * to convert a surplus huge page to a normal huge page. That is	 * not critical, though, it just means the overall size of the	 * pool might be one hugepage larger than it needs to be, but	 * within all the constraints specified by the sysctls.	 */	spin_lock(&hugetlb_lock);	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {		if (!adjust_pool_surplus(h, -1))			break;	}	while (count > persistent_huge_pages(h)) {		/*		 * If this allocation races such that we no longer need the		 * page, free_huge_page will handle it by freeing the page		 * and reducing the surplus.		 */		spin_unlock(&hugetlb_lock);		ret = alloc_fresh_huge_page(h);		spin_lock(&hugetlb_lock);		if (!ret)			goto out;	}	/*	 * Decrease the pool size	 * First return free pages to the buddy allocator (being careful	 * to keep enough around to satisfy reservations).  Then place	 * pages into surplus state as needed so the pool will shrink	 * to the desired size as pages become free.	 *	 * By placing pages into the surplus state independent of the	 * overcommit value, we are allowing the surplus pool size to	 * exceed overcommit. There are few sane options here. Since	 * alloc_buddy_huge_page() is checking the global counter,	 * though, we'll note that we're not allowed to exceed surplus	 * and won't grow the pool anywhere else. Not until one of the	 * sysctls are changed, or the surplus pages go out of use.	 */	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;	min_count = max(count, min_count);	try_to_free_low(h, min_count);	while (min_count < persistent_huge_pages(h)) {		struct page *page = dequeue_huge_page(h);		if (!page)			break;		update_and_free_page(h, page);	}	while (count < persistent_huge_pages(h)) {		if (!adjust_pool_surplus(h, 1))			break;	}out:	ret = persistent_huge_pages(h);	spin_unlock(&hugetlb_lock);	return ret;}#define HSTATE_ATTR_RO(_name) \	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)#define HSTATE_ATTR(_name) \	static struct kobj_attribute _name##_attr = \		__ATTR(_name, 0644, _name##_show, _name##_store)static struct kobject *hugepages_kobj;static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];static struct hstate *kobj_to_hstate(struct kobject *kobj){	int i;	for (i = 0; i < HUGE_MAX_HSTATE; i++)		if (hstate_kobjs[i] == kobj)			return &hstates[i];	BUG();	return NULL;}static ssize_t nr_hugepages_show(struct kobject *kobj,					struct kobj_attribute *attr, char *buf){	struct hstate *h = kobj_to_hstate(kobj);	return sprintf(buf, "%lu\n", h->nr_huge_pages);}static ssize_t nr_hugepages_store(struct kobject *kobj,		struct kobj_attribute *attr, const char *buf, size_t count){	int err;	unsigned long input;	struct hstate *h = kobj_to_hstate(kobj);	err = strict_strtoul(buf, 10, &input);	if (err)		return 0;	h->max_huge_pages = set_max_huge_pages(h, input);	return count;}HSTATE_ATTR(nr_hugepages);static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,					struct kobj_attribute *attr, char *buf){	struct hstate *h = kobj_to_hstate(kobj);	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);}static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,		struct kobj_attribute *attr, const char *buf, size_t count){	int err;	unsigned long input;	struct hstate *h = kobj_to_hstate(kobj);	err = strict_strtoul(buf, 10, &input);	if (err)		return 0;	spin_lock(&hugetlb_lock);	h->nr_overcommit_huge_pages = input;	spin_unlock(&hugetlb_lock);	return count;}HSTATE_ATTR(nr_overcommit_hugepages);static ssize_t free_hugepages_show(struct kobject *kobj,					struct kobj_attribute *attr, char *buf){	struct hstate *h = kobj_to_hstate(kobj);	return sprintf(buf, "%lu\n", h->free_huge_pages);}HSTATE_ATTR_RO(free_hugepages);static ssize_t resv_hugepages_show(struct kobject *kobj,					struct kobj_attribute *attr, char *buf){	struct hstate *h = kobj_to_hstate(kobj);	return sprintf(buf, "%lu\n", h->resv_huge_pages);}HSTATE_ATTR_RO(resv_hugepages);static ssize_t surplus_hugepages_show(struct kobject *kobj,					struct kobj_attribute *attr, char *buf){	struct hstate *h = kobj_to_hstate(kobj);	return sprintf(buf, "%lu\n", h->surplus_huge_pages);}HSTATE_ATTR_RO(surplus_hugepages);static struct attribute *hstate_attrs[] = {	&nr_hugepages_attr.attr,	&nr_overcommit_hugepages_attr.attr,	&free_hugepages_attr.attr,	&resv_hugepages_attr.attr,	&surplus_hugepages_attr.attr,	NULL,};static struct attribute_group hstate_attr_group = {	.attrs = hstate_attrs,};static int __init hugetlb_sysfs_add_hstate(struct hstate *h){	int retval;	hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,							hugepages_kobj);	if (!hstate_kobjs[h - hstates])		return -ENOMEM;	retval = sysfs_create_group(hstate_kobjs[h - hstates],							&hstate_attr_group);	if (retval)		kobject_put(hstate_kobjs[h - hstates]);	return retval;}static void __init hugetlb_sysfs_init(void){	struct hstate *h;	int err;	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);	if (!hugepages_kobj)		return;	for_each_hstate(h) {		err = hugetlb_sysfs_add_hstate(h);		if (err)			printk(KERN_ERR "Hugetlb: Unable to add hstate %s",								h->name);	}}static void __exit hugetlb_exit(void){	struct hstate *h;	for_each_hstate(h) {		kobject_put(hstate_kobjs[h - hstates]);	}	kobject_put(hugepages_kobj);}module_exit(hugetlb_exit);static int __init hugetlb_init(void){	/* Some platform decide whether they support huge pages at boot	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when	 * there is no such support	 */	if (HPAGE_SHIFT == 0)		return 0;	if (!size_to_hstate(default_hstate_size)) {		default_hstate_size = HPAGE_SIZE;		if (!size_to_hstate(default_hstate_size))			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);	}	default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;	if (default_hstate_max_huge_pages)		default_hstate.max_huge_pages = default_hstate_max_huge_pages;	hugetlb_init_hstates();	gather_bootmem_prealloc();	report_hugepages();	hugetlb_sysfs_init();	return 0;}module_init(hugetlb_init);/* Should be called on processing a hugepagesz=... option */void __init hugetlb_add_hstate(unsigned order){	struct hstate *h;	unsigned long i;	if (size_to_hstate(PAGE_SIZE << order)) {		printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");		return;	}	BUG_ON(max_hstate >= HUGE_MAX_HSTATE);	BUG_ON(order == 0);	h = &hstates[max_hstate++];	h->order = order;

?? 快捷鍵說明

復(fù)制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
亚洲欧美日韩综合aⅴ视频| 欧美成人一区二区三区在线观看| 国产做a爰片久久毛片| 无码av中文一区二区三区桃花岛| 亚洲婷婷综合久久一本伊一区| 亚洲同性同志一二三专区| 亚洲最大色网站| 精品一区二区三区在线观看 | 欧洲一区二区三区在线| 欧美一区二区三区在| 欧美激情在线免费观看| 日韩高清在线电影| 91免费视频网| 欧美韩国日本不卡| 日本不卡一二三区黄网| 欧美日韩在线播| 中文字幕欧美一| 国产成人精品免费看| 精品国产免费人成在线观看| 欧美视频在线一区| 久久久久久电影| 久久精品国产亚洲a| 色老汉av一区二区三区| 久久久精品国产免大香伊 | 懂色中文一区二区在线播放| 欧美中文字幕不卡| 日韩一区欧美一区| 国产成人aaa| 亚洲成人自拍偷拍| 99精品视频一区二区三区| 久久精品一区蜜桃臀影院| 久久99久久久欧美国产| 久久综合久久综合亚洲| 国产综合一区二区| 久久久影视传媒| 久久99精品国产麻豆婷婷洗澡| 色老汉av一区二区三区| 一区二区三区成人| 欧美电影一区二区| 久久国产综合精品| 欧美精品一区在线观看| 国产成人一区二区精品非洲| 久久精品免费在线观看| 成人精品一区二区三区中文字幕| 国产日韩精品视频一区| 99riav一区二区三区| 亚洲国产精品影院| 日韩精品一区二区三区蜜臀| 国产精品一区二区无线| 亚洲视频图片小说| 日韩精品一区二区三区中文不卡| 国产一区二区三区免费| 日本人妖一区二区| 国精产品一区一区三区mba视频 | 青娱乐精品在线视频| 26uuu另类欧美| 91行情网站电视在线观看高清版| 日本在线不卡视频一二三区| 久久精品在这里| 91精品国模一区二区三区| 不卡电影一区二区三区| 亚洲国产中文字幕在线视频综合| 日韩欧美成人一区| 在线免费一区三区| 成人网在线播放| 日本亚洲电影天堂| 亚洲激情av在线| 综合分类小说区另类春色亚洲小说欧美 | 久久精品免费观看| 亚洲国产三级在线| 综合激情网...| 欧美激情一区二区三区不卡| 日韩一区二区三区观看| 91国偷自产一区二区三区成为亚洲经典 | 国产麻豆视频精品| 免费一级欧美片在线观看| 亚洲综合自拍偷拍| 国产麻豆欧美日韩一区| 日本怡春院一区二区| 亚洲电影第三页| 五月天中文字幕一区二区| 亚洲在线免费播放| 亚洲第一在线综合网站| 亚洲一区二区三区四区五区黄 | 91玉足脚交白嫩脚丫在线播放| 久久国产精品色婷婷| 日韩激情一区二区| 日日骚欧美日韩| 蓝色福利精品导航| 国产综合色视频| 成人av在线资源| 日本道色综合久久| 欧美日韩在线亚洲一区蜜芽| 3atv一区二区三区| 欧美不卡在线视频| 国产精品动漫网站| 亚洲精品老司机| 午夜久久久久久久久| 精品在线视频一区| 91蜜桃网址入口| 欧美一区二区三区在线视频| 欧美不卡一区二区三区| 一区二区三区四区亚洲| 午夜欧美大尺度福利影院在线看| 国产在线精品不卡| 欧美性受xxxx黑人xyx| 日韩一区二区在线播放| 亚洲精品视频在线| 国产成人av自拍| 日韩亚洲欧美高清| 性做久久久久久| 色婷婷激情一区二区三区| ww亚洲ww在线观看国产| 亚洲bt欧美bt精品| 91免费看`日韩一区二区| 一区二区成人在线观看| 国产成人久久精品77777最新版本 国产成人鲁色资源国产91色综 | 综合在线观看色| 国产一区激情在线| 欧美日韩免费电影| 亚洲狼人国产精品| hitomi一区二区三区精品| 久久久久久夜精品精品免费| 亚洲一二三四区| 成人久久视频在线观看| 日韩你懂的电影在线观看| 亚洲欧美偷拍卡通变态| 91社区在线播放| 亚洲一区二区三区四区五区黄 | 欧美激情一区二区三区蜜桃视频| 日韩电影免费一区| 欧美日韩成人综合在线一区二区| 亚洲自拍偷拍欧美| 欧美精品 国产精品| 美国十次综合导航| 欧美精品一区二区三区蜜臀 | 色激情天天射综合网| 一区二区三区四区中文字幕| 欧美精品一区视频| 9人人澡人人爽人人精品| 一区二区三区中文在线| 欧美精品视频www在线观看| 日本美女一区二区| 久久婷婷久久一区二区三区| 成人动漫一区二区三区| 亚洲已满18点击进入久久| 欧美一级国产精品| 成人白浆超碰人人人人| 亚洲激情五月婷婷| 精品日韩99亚洲| 色婷婷久久一区二区三区麻豆| 日本aⅴ亚洲精品中文乱码| 亚洲国产精品影院| 日韩一区二区精品在线观看| 91精品国产综合久久香蕉的特点 | 男女性色大片免费观看一区二区 | 一区二区三区电影在线播| 精品国产免费人成在线观看| 欧洲激情一区二区| 丁香激情综合五月| 国产综合一区二区| 精品亚洲欧美一区| 亚洲aaa精品| 韩国女主播成人在线| 亚洲第一主播视频| 伊人色综合久久天天| 一区二区三区丝袜| 亚洲啪啪综合av一区二区三区| 亚洲国产精品激情在线观看| 欧美一三区三区四区免费在线看| 在线观看日韩毛片| 欧美日韩1234| 91精品国产91久久久久久一区二区| 成人h动漫精品一区二区| 国产成人福利片| eeuss国产一区二区三区| 91免费视频网址| 欧美精品高清视频| 久久综合色播五月| 亚洲精品乱码久久久久久黑人| 午夜精品久久久久久久蜜桃app| 一区二区三区在线影院| 香蕉久久夜色精品国产使用方法| 午夜av一区二区三区| 精品一区二区三区视频在线观看| 国产高清精品网站| 色94色欧美sute亚洲线路一久| 欧美精品18+| 国产精品久久久久9999吃药| 国产午夜久久久久| 一区二区三区中文字幕精品精品 | 精品成人一区二区三区四区| 中文字幕+乱码+中文字幕一区| 亚洲高清不卡在线观看| 国产乱理伦片在线观看夜一区| 欧美午夜宅男影院| 久久久久99精品国产片| 日韩在线卡一卡二| 91首页免费视频| 国产精品美日韩|