亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? init.c

?? 一個2.4.21版本的嵌入式linux內核
?? C
字號:
/* *  linux/arch/x86_64/mm/init.c * *  Copyright (C) 1995  Linus Torvalds *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz> *  Copyright (C) 2002  Andi Kleen <ak@suse.de> */#include <linux/config.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/types.h>#include <linux/ptrace.h>#include <linux/mman.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/smp.h>#include <linux/init.h>#include <linux/blk.h>#include <linux/pagemap.h>#include <linux/bootmem.h>#include <asm/processor.h>#include <asm/system.h>#include <asm/uaccess.h>#include <asm/pgtable.h>#include <asm/pgalloc.h>#include <asm/dma.h>#include <asm/fixmap.h>#include <asm/e820.h>#include <asm/apic.h>#include <asm/tlb.h>#include <asm/pda.h>#include <asm/mmu_context.h>#include <asm/proto.h>mmu_gather_t mmu_gathers[NR_CPUS];static unsigned long totalram_pages;int do_check_pgt_cache(int low, int high){	int freed = 0;	if(read_pda(pgtable_cache_sz) > high) {		do {			if (read_pda(pgd_quick)) {				pgd_free_slow(pgd_alloc_one_fast());				freed++;			}			if (read_pda(pmd_quick)) {				pmd_free_slow(pmd_alloc_one_fast(NULL, 0));				freed++;			}			if (read_pda(pte_quick)) {				pte_free_slow(pte_alloc_one_fast(NULL, 0));				freed++;			}		} while(read_pda(pgtable_cache_sz) > low);	}	return freed;}#ifndef CONFIG_DISCONTIGMEM/* * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the * physical space so we can cache the place of the first one and move * around without checking the pgd every time. */void show_mem(void){	int i, total = 0, reserved = 0;	int shared = 0, cached = 0;	printk("Mem-info:\n");	show_free_areas();	printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));	i = max_mapnr;	while (i-- > 0) {		total++;		if (PageReserved(mem_map+i))			reserved++;		else if (PageSwapCache(mem_map+i))			cached++;		else if (page_count(mem_map+i))			shared += page_count(mem_map+i) - 1;	}	printk("%d pages of RAM\n", total);	printk("%d reserved pages\n",reserved);	printk("%d pages shared\n",shared);	printk("%d pages swap cached\n",cached);	printk("%ld pages in page table cache\n",read_pda(pgtable_cache_sz));	show_buffers();}#endif/* References to section boundaries */extern char _text, _etext, _edata, __bss_start, _end;extern char __init_begin, __init_end;int after_bootmem;static void *spp_getpage(void){ 	void *ptr;	if (after_bootmem)		ptr = (void *) get_free_page(GFP_ATOMIC); 	else		ptr = alloc_bootmem_low_pages(PAGE_SIZE); 	if (!ptr)		panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");	return ptr;} static void set_pte_phys(unsigned long vaddr,			 unsigned long phys, pgprot_t prot){	pml4_t *level4;	pgd_t *pgd;	pmd_t *pmd;	pte_t *pte;	level4 = pml4_offset_k(vaddr);	if (pml4_none(*level4)) {		printk("PML4 FIXMAP MISSING, it should be setup in head.S!\n");		return;	}	pgd = level3_offset_k(level4, vaddr);	if (pgd_none(*pgd)) {		pmd = (pmd_t *) spp_getpage(); 		set_pgd(pgd, __pgd(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));		if (pmd != pmd_offset(pgd, 0)) {			printk("PAGETABLE BUG #01!\n");			return;		}	}	pmd = pmd_offset(pgd, vaddr);	if (pmd_none(*pmd)) {		pte = (pte_t *) spp_getpage();		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));		if (pte != pte_offset(pmd, 0)) {			printk("PAGETABLE BUG #02!\n");			return;		}	}	pte = pte_offset(pmd, vaddr);	set_pte(pte, mk_pte_phys(phys, prot));	/*	 * It's enough to flush this one mapping.	 * (PGE mappings get flushed as well)	 */	__flush_tlb_one(vaddr);}void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot){	unsigned long address = __fix_to_virt(idx);	if (idx >= __end_of_fixed_addresses) {		printk("Invalid __set_fixmap\n");		return;	}	set_pte_phys(address, phys, prot);}extern pmd_t temp_boot_pmds[]; unsigned long __initdata table_start, table_end; static  struct temp_map { 	pmd_t *pmd; 	void  *address; 	int    allocated; } temp_mappings[] __initdata = { 	{ &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },	{ &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) }, 	{}}; static __init void *alloc_low_page(int *index, unsigned long *phys) { 	struct temp_map *ti;	int i; 	unsigned long pfn = table_end++, paddr; 	void *adr;	if (table_end >= end_pfn_map) 		panic("alloc_low_page: ran out of page mappings"); 	for (i = 0; temp_mappings[i].allocated; i++) {		if (!temp_mappings[i].pmd) 			panic("alloc_low_page: ran out of temp mappings"); 	} 	ti = &temp_mappings[i];	paddr = (pfn << PAGE_SHIFT) & PMD_MASK; 	set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE)); 	ti->allocated = 1; 	__flush_tlb(); 	       	adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK); 	*index = i; 	*phys  = pfn * PAGE_SIZE;  	return adr; } static __init void unmap_low_page(int i){ 	struct temp_map *ti = &temp_mappings[i];	set_pmd(ti->pmd, __pmd(0));	ti->allocated = 0; } static void __init phys_pgd_init(pgd_t *pgd, unsigned long address, unsigned long end){ 	long i, j; 	i = pgd_index(address);	pgd = pgd + i;	for (; i < PTRS_PER_PGD; pgd++, i++) {		int map; 		unsigned long paddr, pmd_phys;		pmd_t *pmd;		paddr = (address & PML4_MASK) + i*PGDIR_SIZE; 		if (paddr >= end) { 			for (; i < PTRS_PER_PGD; i++, pgd++) 				set_pgd(pgd, __pgd(0)); 			break;		} 		if (!e820_mapped(paddr, paddr+PGDIR_SIZE, 0)) { 			set_pgd(pgd, __pgd(0)); 			continue;		} 		pmd = alloc_low_page(&map, &pmd_phys);		set_pgd(pgd, __pgd(pmd_phys | _KERNPG_TABLE));		for (j = 0; j < PTRS_PER_PMD; pmd++, j++ , paddr += PMD_SIZE) {			unsigned long pe;			if (paddr >= end) { 				for (; j < PTRS_PER_PMD; j++, pmd++)					set_pmd(pmd,  __pmd(0)); 				break;			}			pe = _PAGE_PSE | _KERNPG_TABLE | _PAGE_NX | _PAGE_GLOBAL | paddr;			pe &= __supported_pte_mask; 			set_pmd(pmd, __pmd(pe));		}		unmap_low_page(map);	}	__flush_tlb();} /* Setup the direct mapping of the physical memory at PAGE_OFFSET.   This runs before bootmem is initialized and gets pages directly from the    physical memory. To access them they are temporarily mapped. */void __init init_memory_mapping(void) { 	unsigned long adr;	       	unsigned long end;	unsigned long next; 	unsigned long pgds, pmds, tables; 	end = end_pfn_map << PAGE_SHIFT; 	/* 	 * Find space for the kernel direct mapping tables.	 * Later we should allocate these tables in the local node of the memory	 * mapped.  Unfortunately this is done currently before the nodes are 	 * discovered.	 */	pgds = (end + PGDIR_SIZE - 1) >> PGDIR_SHIFT;	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 	tables = round_up(pgds*8, PAGE_SIZE) + round_up(pmds * 8, PAGE_SIZE); 	/* Direct mapping must currently fit below the kernel in the first MB.	   This is because we have no way to tell the later passes to not reuse	   the memory, until bootmem is initialised */	/* Should limit MAXMEM for this */	table_start = find_e820_area(/*0*/ 0x8000, __pa_symbol(&_text), tables); 	if (table_start == -1UL) 		panic("Cannot find space for the kernel page tables"); 	table_start >>= PAGE_SHIFT; 	table_end = table_start;       	end += __PAGE_OFFSET; /* turn virtual */  	for (adr = PAGE_OFFSET; adr < end; adr = next) { 		int map;		unsigned long pgd_phys; 		pgd_t *pgd = alloc_low_page(&map, &pgd_phys);		next = adr + PML4_SIZE;		if (next > end) 			next = end; 		phys_pgd_init(pgd, adr-PAGE_OFFSET, next-PAGE_OFFSET); 		set_pml4(init_level4_pgt + pml4_index(adr), 			 mk_kernel_pml4(pgd_phys, KERNPG_TABLE));		unmap_low_page(map);   	} 	asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));	__flush_tlb_all();	printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end, 	       table_start<<PAGE_SHIFT, 	       table_end<<PAGE_SHIFT);} void __init zap_low_mappings (void){	int i;	for (i = 0; i < NR_CPUS; i++) {		if (cpu_pda[i].level4_pgt) 			cpu_pda[i].level4_pgt[0] = 0; 	}	flush_tlb_all();}#ifndef CONFIG_DISCONTIGMEMvoid __init paging_init(void){	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};	unsigned int max_dma;		max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;	if (end_pfn < max_dma)		zones_size[ZONE_DMA] = end_pfn;	else {		zones_size[ZONE_DMA] = max_dma;		zones_size[ZONE_NORMAL] = end_pfn - max_dma;	}	free_area_init(zones_size);}static inline int page_is_ram (unsigned long pagenr){	int i;	for (i = 0; i < e820.nr_map; i++) {		unsigned long addr, end;		if (e820.map[i].type != E820_RAM)	/* not usable memory */			continue;		/*		 *	!!!FIXME!!! Some BIOSen report areas as RAM that		 *	are not. Notably the 640->1Mb area. We need a sanity		 *	check here.		 */		addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;		end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;		if  ((pagenr >= addr) && (pagenr < end))			return 1;	}	return 0;}#endifvoid __init mem_init(void){	unsigned long codesize, reservedpages, datasize, initsize;	unsigned long tmp;	max_mapnr = end_pfn; 	num_physpages = end_pfn; /* XXX not true because of holes */	high_memory = (void *) __va(end_pfn << PAGE_SHIFT);	/* clear the zero-page */	memset(empty_zero_page, 0, PAGE_SIZE);	reservedpages = 0;	/* this will put all low memory onto the freelists */#ifdef CONFIG_DISCONTIGMEM	totalram_pages += numa_free_all_bootmem();	tmp = 0;	/* should count reserved pages here for all nodes */ #else	if (!mem_map) BUG();	totalram_pages += free_all_bootmem();	for (tmp = 0; tmp < end_pfn; tmp++)		/*		 * Only count reserved RAM pages		 */		if (page_is_ram(tmp) && PageReserved(mem_map+tmp))			reservedpages++;#endif	after_bootmem = 1;	codesize =  (unsigned long) &_etext - (unsigned long) &_text;	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),		max_mapnr << (PAGE_SHIFT-10),		codesize >> 10,		reservedpages << (PAGE_SHIFT-10),		datasize >> 10,		initsize >> 10);	/*	 * Subtle. SMP is doing its boot stuff late (because it has to	 * fork idle threads) - but it also needs low mappings for the	 * protected-mode entry to work. We zap these entries only after	 * the WP-bit has been tested.	 */#ifndef CONFIG_SMP	zap_low_mappings();#endif}void __init __map_kernel_range(void *address, int len, pgprot_t prot) { 	int i;	void *end = address + len;	BUG_ON((pgprot_val(prot) & _PAGE_PSE) == 0);	address = (void *)((unsigned long)address & LARGE_PAGE_MASK); 	for (; address < end; address += LARGE_PAGE_SIZE) { 		pml4_t *pml4;		pgd_t *pgd;		pmd_t *pmd;		pml4 = pml4_offset_k((unsigned long) address); 		if (pml4_none(*pml4)) { 			void *p = (void *)get_zeroed_page(GFP_KERNEL); 			if (!p) panic("Cannot map kernel range"); 			for (i = 0; i < smp_num_cpus; i++) {				set_pml4((pml4_t *)(cpu_pda[i].level4_pgt) + 					 pml4_index((unsigned long)address),					 mk_kernel_pml4(virt_to_phys(p),KERNPG_TABLE));			}		} 		pgd = pgd_offset_k((unsigned long)address); 		if (pgd_none(*pgd)) { 			void *p = (void *)get_zeroed_page(GFP_KERNEL); 			if (!p) panic("Cannot map kernel range"); 			set_pgd(pgd, __mk_pgd(virt_to_phys(p), KERNPG_TABLE));		} 		pmd = pmd_offset(pgd, (unsigned long) address); 		set_pmd(pmd, __mk_pmd(virt_to_phys(address), prot));	} 	__flush_tlb_all(); } void free_initmem(void){	void *addr;	addr = (&__init_begin);	for (; addr < (void *)(&__init_end); addr += PAGE_SIZE) {		ClearPageReserved(virt_to_page(addr));		set_page_count(virt_to_page(addr), 1);#ifdef CONFIG_INIT_DEBUG		memset((unsigned long)addr & ~(PAGE_SIZE-1), 0xcc, PAGE_SIZE); #endif		free_page((unsigned long)addr);		totalram_pages++;	}	printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);}#ifdef CONFIG_BLK_DEV_INITRDvoid free_initrd_mem(unsigned long start, unsigned long end){	if (start < (unsigned long)&_end)		return;	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);	for (; start < end; start += PAGE_SIZE) {		ClearPageReserved(virt_to_page(start));		set_page_count(virt_to_page(start), 1);		free_page(start);		totalram_pages++;	}}#endifvoid si_meminfo(struct sysinfo *val){	val->totalram = totalram_pages;	val->sharedram = 0;	val->freeram = nr_free_pages();	val->bufferram = atomic_read(&buffermem_pages);	val->totalhigh = 0;	val->freehigh = nr_free_highpages();	val->mem_unit = PAGE_SIZE;	return;}void __init reserve_bootmem_generic(unsigned long phys, unsigned len) { 	/* Should check here against the e820 map to avoid double free */ #ifdef CONFIG_DISCONTIGMEM	reserve_bootmem_node(NODE_DATA(phys_to_nid(phys)), phys, len);#else       			reserve_bootmem(phys, len);    #endif}void free_bootmem_generic(unsigned long phys, unsigned len) { #ifdef CONFIG_DISCONTIGMEM	free_bootmem_node(NODE_DATA(phys_to_nid(phys)), phys, len);#else       			free_bootmem(phys, len);    #endif}

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
日本成人超碰在线观看| 91精品国产综合久久久蜜臀粉嫩| 欧美精品一区男女天堂| 麻豆精品一区二区| 欧美成人高清电影在线| 久久av老司机精品网站导航| 26uuu精品一区二区在线观看| 久久99精品一区二区三区三区| 欧美不卡视频一区| 国产精品自在欧美一区| 国产精品久久久久久久久动漫| 99久久er热在这里只有精品66| 一区二区不卡在线播放| 91精品国产综合久久久蜜臀粉嫩| 美国十次综合导航| 国产欧美精品在线观看| 色综合一个色综合亚洲| 香蕉av福利精品导航| 精品日本一线二线三线不卡| 成人综合日日夜夜| 亚洲国产精品久久艾草纯爱| 91精品国产色综合久久不卡电影| 国产精品一级黄| 亚洲美女在线国产| 日韩一区二区三区电影在线观看| 国产成人在线影院| 亚洲宅男天堂在线观看无病毒| 日韩欧美一级精品久久| 国产成人aaa| 亚洲国产裸拍裸体视频在线观看乱了| 5566中文字幕一区二区电影| 国产精品一卡二| 亚洲一二三四在线观看| 2欧美一区二区三区在线观看视频| 成人app软件下载大全免费| 无码av免费一区二区三区试看| 久久久精品中文字幕麻豆发布| 91免费视频网址| 精品综合久久久久久8888| 亚洲三级久久久| 亚洲精品一区二区三区四区高清| 99精品视频一区二区三区| 男女激情视频一区| 亚洲视频一区二区免费在线观看| 欧美一区二区视频在线观看2022| 成人动漫中文字幕| 奇米精品一区二区三区在线观看一| 国产精品久久三区| 亚洲精品一线二线三线| 欧美日韩黄色影视| 99精品热视频| 国产精品一级片在线观看| 天涯成人国产亚洲精品一区av| 国产精品久久毛片av大全日韩| 欧美成人精精品一区二区频| 欧美性猛交xxxx黑人交| 成人免费看的视频| 九一久久久久久| 天天综合网天天综合色| 亚洲人成网站色在线观看| www国产亚洲精品久久麻豆| 欧美在线观看视频一区二区| 成人精品一区二区三区四区| 久久国产精品色| 五月天一区二区| 亚洲高清视频中文字幕| 《视频一区视频二区| 国产女主播在线一区二区| 欧美成va人片在线观看| 欧美高清视频在线高清观看mv色露露十八 | 黄网站免费久久| 五月婷婷欧美视频| 亚洲午夜精品久久久久久久久| 国产精品的网站| 国产精品国产三级国产aⅴ原创| 久久综合色婷婷| 精品国精品自拍自在线| 日韩欧美一二三| 日韩欧美一二区| 欧美成人猛片aaaaaaa| 日韩一区二区免费高清| 日韩三级在线观看| 日韩精品一区二区三区在线观看| 欧美一级久久久久久久大片| 日韩精品一区二区三区在线观看| 欧美剧情片在线观看| 欧美日韩一区二区三区四区五区| 在线精品视频免费观看| 欧洲色大大久久| 欧美日韩在线亚洲一区蜜芽| 欧美日韩一区高清| 91精品国产91久久久久久最新毛片| 欧美夫妻性生活| 精品免费日韩av| 久久久久久97三级| 中文字幕一区二区三区精华液| 最新成人av在线| 一区二区三区鲁丝不卡| 午夜成人免费视频| 久久99精品国产.久久久久| 国产一区二区三区| hitomi一区二区三区精品| 色天使久久综合网天天| 欧美精品 国产精品| 久久久精品天堂| 自拍视频在线观看一区二区| 一区二区高清视频在线观看| 日韩成人午夜精品| 国产一区二区免费在线| 99久久99久久精品免费观看| 欧美人与禽zozo性伦| 日韩女优av电影在线观看| 日本一区二区三区国色天香 | 日韩精品91亚洲二区在线观看 | 欧美伦理电影网| 精品动漫一区二区三区在线观看| 国产精品无码永久免费888| 亚洲美女在线一区| 麻豆91精品91久久久的内涵| 国产成人午夜精品影院观看视频| 91在线一区二区| 日韩女优av电影| 亚洲免费av观看| 韩国精品一区二区| 91免费国产在线| 精品国产电影一区二区| 亚洲欧美区自拍先锋| 久久国产人妖系列| 色综合咪咪久久| 久久久久国产精品麻豆| 亚洲成av人片www| 岛国精品在线播放| 欧美一二三区精品| 一区二区三区在线影院| 国产原创一区二区三区| 欧美色电影在线| 国产精品美女久久福利网站| 麻豆精品一区二区综合av| 91香蕉视频mp4| 国产午夜精品福利| 日本中文字幕一区二区视频| 一本大道综合伊人精品热热| 久久久蜜桃精品| 久久精品国产久精国产| 欧美三级乱人伦电影| 国产精品久久综合| 国内精品久久久久影院薰衣草 | 欧美精品久久99久久在免费线 | 欧美国产精品v| 精品一区二区三区av| 欧美久久高跟鞋激| 亚洲女人****多毛耸耸8| 国产精品一区免费视频| 欧美大白屁股肥臀xxxxxx| 亚洲综合区在线| 91丨九色丨蝌蚪富婆spa| 国产亚洲一二三区| 久久99国产乱子伦精品免费| 欧美丰满少妇xxxxx高潮对白| 亚洲精品欧美专区| 99re成人精品视频| 国产欧美日韩不卡| 国产福利一区二区| 久久久久久久综合日本| 激情另类小说区图片区视频区| 欧美一区二区三区成人| 日日摸夜夜添夜夜添亚洲女人| 欧美午夜不卡在线观看免费| 亚洲美女偷拍久久| 欧美巨大另类极品videosbest| 一区二区三区免费| 在线视频综合导航| 亚洲综合图片区| 在线精品视频一区二区三四| 一个色在线综合| 欧美日韩国产成人在线免费| 亚洲一区二区三区四区在线观看 | 亚洲国产欧美在线| 欧美体内she精高潮| 亚洲国产精品一区二区久久 | 国产调教视频一区| 国产69精品久久久久777| 国产婷婷色一区二区三区 | 国产精品久久久久久久蜜臀| 成人av影院在线| 综合久久一区二区三区| 色婷婷av一区二区三区软件 | 91一区在线观看| 伊人色综合久久天天人手人婷| 色偷偷88欧美精品久久久| 亚洲影视资源网| 3d动漫精品啪啪一区二区竹菊| 日本va欧美va精品发布| 2020日本不卡一区二区视频| 国产suv精品一区二区883| 亚洲三级在线免费| 777色狠狠一区二区三区| 国产一区二三区好的| 综合久久一区二区三区| 91精品国产综合久久福利|