亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? mm-armv.c

?? linux和2410結合開發 用他可以生成2410所需的zImage文件
?? C
字號:
/* *  linux/arch/arm/mm/mm-armv.c * *  Copyright (C) 1998-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * *  Page table sludge for ARM v3 and v4 processor architectures. */#include <linux/sched.h>#include <linux/mm.h>#include <linux/init.h>#include <linux/bootmem.h>#include <asm/hardware.h>#include <asm/pgtable.h>#include <asm/pgalloc.h>#include <asm/page.h>#include <asm/setup.h>#include <asm/mach/map.h>/* * These are useful for identifing cache coherency * problems by allowing the cache or the cache and * writebuffer to be turned off.  (Note: the write * buffer should not be on and the cache off). */static int __init nocache_setup(char *__unused){	cr_alignment &= ~4;	cr_no_alignment &= ~4;	flush_cache_all();	set_cr(cr_alignment);	return 1;}static int __init nowrite_setup(char *__unused){	cr_alignment &= ~(8|4);	cr_no_alignment &= ~(8|4);	flush_cache_all();	set_cr(cr_alignment);	return 1;}static int __init noalign_setup(char *__unused){	cr_alignment &= ~2;	cr_no_alignment &= ~2;	set_cr(cr_alignment);	return 1;}__setup("noalign", noalign_setup);__setup("nocache", nocache_setup);__setup("nowb", nowrite_setup);#define FIRST_KERNEL_PGD_NR	(FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)#define clean_cache_area(start,size) \	cpu_cache_clean_invalidate_range((unsigned long)start, ((unsigned long)start) + size, 0);/* * need to get a 16k page for level 1 */pgd_t *get_pgd_slow(struct mm_struct *mm){	pgd_t *new_pgd, *init_pgd;	pmd_t *new_pmd, *init_pmd;	pte_t *new_pte, *init_pte;	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);	if (!new_pgd)		goto no_pgd;	memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));	init_pgd = pgd_offset_k(0);	if (vectors_base() == 0) {		init_pmd = pmd_offset(init_pgd, 0);		init_pte = pte_offset(init_pmd, 0);		/*		 * This lock is here just to satisfy pmd_alloc and pte_lock		 */		spin_lock(&mm->page_table_lock);		/*		 * On ARM, first page must always be allocated since it		 * contains the machine vectors.		 */		new_pmd = pmd_alloc(mm, new_pgd, 0);		if (!new_pmd)			goto no_pmd;		new_pte = pte_alloc(mm, new_pmd, 0);		if (!new_pte)			goto no_pte;		set_pte(new_pte, *init_pte);		spin_unlock(&mm->page_table_lock);	}	/*	 * Copy over the kernel and IO PGD entries	 */	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,		       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));	/*	 * FIXME: this should not be necessary	 */	clean_cache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));	return new_pgd;no_pte:	spin_unlock(&mm->page_table_lock);	pmd_free(new_pmd);	free_pages((unsigned long)new_pgd, 2);	return NULL;no_pmd:	spin_unlock(&mm->page_table_lock);	free_pages((unsigned long)new_pgd, 2);	return NULL;no_pgd:	return NULL;}void free_pgd_slow(pgd_t *pgd){	pmd_t *pmd;	pte_t *pte;	if (!pgd)		return;	/* pgd is always present and good */	pmd = (pmd_t *)pgd;	if (pmd_none(*pmd))		goto free;	if (pmd_bad(*pmd)) {		pmd_ERROR(*pmd);		pmd_clear(pmd);		goto free;	}	pte = pte_offset(pmd, 0);	pmd_clear(pmd);	pte_free(pte);	pmd_free(pmd);free:	free_pages((unsigned long) pgd, 2);}/* * Create a SECTION PGD between VIRT and PHYS in domain * DOMAIN with protection PROT */static inline voidalloc_init_section(unsigned long virt, unsigned long phys, int prot){	pmd_t pmd;	pmd_val(pmd) = phys | prot;	set_pmd(pmd_offset(pgd_offset_k(virt), virt), pmd);}/* * Add a PAGE mapping between VIRT and PHYS in domain * DOMAIN with protection PROT.  Note that due to the * way we map the PTEs, we must allocate two PTE_SIZE'd * blocks - one for the Linux pte table, and one for * the hardware pte table. */static inline voidalloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot){	pmd_t *pmdp;	pte_t *ptep;	pmdp = pmd_offset(pgd_offset_k(virt), virt);	if (pmd_none(*pmdp)) {		pte_t *ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *						      sizeof(pte_t));		ptep += PTRS_PER_PTE;		set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain)));	}	ptep = pte_offset(pmdp, virt);	set_pte(ptep, mk_pte_phys(phys, __pgprot(prot)));}/* * Clear any PGD mapping.  On a two-level page table system, * the clearance is done by the middle-level functions (pmd) * rather than the top-level (pgd) functions. */static inline void clear_mapping(unsigned long virt){	pmd_clear(pmd_offset(pgd_offset_k(virt), virt));}/* * Create the page directory entries and any necessary * page tables for the mapping specified by `md'.  We * are able to cope here with varying sizes and address * offsets, and we take full advantage of sections. */static void __init create_mapping(struct map_desc *md){	unsigned long virt, length;	int prot_sect, prot_pte;	long off;	if (md->prot_read && md->prot_write &&	    !md->cacheable && !md->bufferable) {		printk(KERN_WARNING "Security risk: creating user "		       "accessible mapping for 0x%08lx at 0x%08lx\n",		       md->physical, md->virtual);	}	if (md->virtual != vectors_base() && md->virtual < PAGE_OFFSET) {		printk(KERN_WARNING "MM: not creating mapping for "		       "0x%08lx at 0x%08lx in user region\n",		       md->physical, md->virtual);	}	prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |		   (md->prot_read  ? L_PTE_USER       : 0) |		   (md->prot_write ? L_PTE_WRITE      : 0) |		   (md->cacheable  ? L_PTE_CACHEABLE  : 0) |		   (md->bufferable ? L_PTE_BUFFERABLE : 0);	prot_sect = PMD_TYPE_SECT | PMD_DOMAIN(md->domain) |		    (md->prot_read  ? PMD_SECT_AP_READ    : 0) |		    (md->prot_write ? PMD_SECT_AP_WRITE   : 0) |		    (md->cacheable  ? PMD_SECT_CACHEABLE  : 0) |		    (md->bufferable ? PMD_SECT_BUFFERABLE : 0);	virt   = md->virtual;	off    = md->physical - virt;	length = md->length;	while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {		alloc_init_page(virt, virt + off, md->domain, prot_pte);		virt   += PAGE_SIZE;		length -= PAGE_SIZE;	}	while (length >= PGDIR_SIZE) {		alloc_init_section(virt, virt + off, prot_sect);		virt   += PGDIR_SIZE;		length -= PGDIR_SIZE;	}	while (length >= PAGE_SIZE) {		alloc_init_page(virt, virt + off, md->domain, prot_pte);		virt   += PAGE_SIZE;		length -= PAGE_SIZE;	}}/* * In order to soft-boot, we need to insert a 1:1 mapping in place of * the user-mode pages.  This will then ensure that we have predictable * results when turning the mmu off */void setup_mm_for_reboot(char mode){	pgd_t *pgd;	pmd_t pmd;	int i;	if (current->mm && current->mm->pgd)		pgd = current->mm->pgd;	else		pgd = init_mm.pgd;	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) {		pmd_val(pmd) = (i << PGDIR_SHIFT) |			PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |			PMD_TYPE_SECT;		set_pmd(pmd_offset(pgd + i, i << PGDIR_SHIFT), pmd);	}}/* * Setup initial mappings.  We use the page we allocated for zero page to hold * the mappings, which will get overwritten by the vectors in traps_init(). * The mappings must be in virtual address order. */void __init memtable_init(struct meminfo *mi){	struct map_desc *init_maps, *p, *q;	unsigned long address = 0;	int i;	init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);	for (i = 0; i < mi->nr_banks; i++) {		if (mi->bank[i].size == 0)			continue;		p->physical   = mi->bank[i].start;		p->virtual    = __phys_to_virt(p->physical);		p->length     = mi->bank[i].size;		p->domain     = DOMAIN_KERNEL;		p->prot_read  = 0;		p->prot_write = 1;		p->cacheable  = 1;		p->bufferable = 1;		p ++;	}#ifdef FLUSH_BASE	p->physical   = FLUSH_BASE_PHYS;	p->virtual    = FLUSH_BASE;	p->length     = PGDIR_SIZE;	p->domain     = DOMAIN_KERNEL;	p->prot_read  = 1;	p->prot_write = 0;	p->cacheable  = 1;	p->bufferable = 1;	p ++;#endif#ifdef FLUSH_BASE_MINICACHE	p->physical   = FLUSH_BASE_PHYS + PGDIR_SIZE;	p->virtual    = FLUSH_BASE_MINICACHE;	p->length     = PGDIR_SIZE;	p->domain     = DOMAIN_KERNEL;	p->prot_read  = 1;	p->prot_write = 0;	p->cacheable  = 1;	p->bufferable = 0;	p ++;#endif	/*	 * Go through the initial mappings, but clear out any	 * pgdir entries that are not in the description.	 */	q = init_maps;	do {		if (address < q->virtual || q == p) {			clear_mapping(address);			address += PGDIR_SIZE;		} else {			create_mapping(q);			address = q->virtual + q->length;			address = (address + PGDIR_SIZE - 1) & PGDIR_MASK;			q ++;		}	} while (address != 0);	/*	 * Create a mapping for the machine vectors at virtual address 0	 * or 0xffff0000.  We should always try the high mapping.	 */	init_maps->physical   = virt_to_phys(init_maps);	init_maps->virtual    = vectors_base();	init_maps->length     = PAGE_SIZE;	init_maps->domain     = DOMAIN_USER;	init_maps->prot_read  = 0;	init_maps->prot_write = 0;	init_maps->cacheable  = 1;	init_maps->bufferable = 1;	create_mapping(init_maps);	flush_cache_all();}/* * Create the architecture specific mappings */void __init iotable_init(struct map_desc *io_desc){	int i;	for (i = 0; io_desc[i].last == 0; i++)		create_mapping(io_desc + i);}static inline void free_memmap(int node, unsigned long start, unsigned long end){	unsigned long pg, pgend;	start = __phys_to_virt(start);	end   = __phys_to_virt(end);	pg    = PAGE_ALIGN((unsigned long)(virt_to_page(start)));	pgend = ((unsigned long)(virt_to_page(end))) & PAGE_MASK;	start = __virt_to_phys(pg);	end   = __virt_to_phys(pgend);	free_bootmem_node(NODE_DATA(node), start, end - start);}static inline void free_unused_memmap_node(int node, struct meminfo *mi){	unsigned long bank_start, prev_bank_end = 0;	unsigned int i;	/*	 * [FIXME] This relies on each bank being in address order.  This	 * may not be the case, especially if the user has provided the	 * information on the command line.	 */	for (i = 0; i < mi->nr_banks; i++) {		if (mi->bank[i].size == 0 || mi->bank[i].node != node)			continue;		bank_start = mi->bank[i].start & PAGE_MASK;		/*		 * If we had a previous bank, and there is a space		 * between the current bank and the previous, free it.		 */		if (prev_bank_end && prev_bank_end != bank_start)			free_memmap(node, prev_bank_end, bank_start);		prev_bank_end = PAGE_ALIGN(mi->bank[i].start +					   mi->bank[i].size);	}}/* * The mem_map array can get very big.  Free * the unused area of the memory map. */void __init create_memmap_holes(struct meminfo *mi){	int node;	for (node = 0; node < numnodes; node++)		free_unused_memmap_node(node, mi);}/* * PTE table allocation cache. * * This is a move away from our custom 2K page allocator.  We now use the * slab cache to keep track of these objects. * * With this, it is questionable as to whether the PGT cache gains us * anything.  We may be better off dropping the PTE stuff from our PGT * cache implementation. */kmem_cache_t *pte_cache;/* * The constructor gets called for each object within the cache when the * cache page is created.  Note that if slab tries to misalign the blocks, * we BUG() loudly. */static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags){	unsigned long block = (unsigned long)pte;	if (block & 2047)		BUG();	memzero(pte, 2 * PTRS_PER_PTE * sizeof(pte_t));	cpu_cache_clean_invalidate_range(block, block +			PTRS_PER_PTE * sizeof(pte_t), 0);}void __init pgtable_cache_init(void){	pte_cache = kmem_cache_create("pte-cache",				2 * PTRS_PER_PTE * sizeof(pte_t), 0, 0,				pte_cache_ctor, NULL);	if (!pte_cache)		BUG();}

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
91成人免费在线视频| 国产欧美日韩激情| 2021国产精品久久精品| 亚洲欧美日韩一区二区| 韩国欧美一区二区| 欧美亚洲国产bt| 国产精品网站导航| 久久精品国产一区二区| 欧美日韩小视频| 亚洲天堂a在线| 成人免费看视频| 精品国产乱码久久久久久久| 五月天激情小说综合| 97精品国产露脸对白| 国产网站一区二区三区| 久久草av在线| 日韩精品一区二区三区在线观看| 亚洲愉拍自拍另类高清精品| 成人国产精品免费| 中文字幕的久久| 国产成人午夜视频| 久久久噜噜噜久久人人看| 久久精品国产精品青草| 日韩三级视频中文字幕| 日日摸夜夜添夜夜添精品视频| 日本韩国欧美国产| 亚洲精品成人a在线观看| 91蜜桃网址入口| 亚洲天堂成人在线观看| 91社区在线播放| 亚洲欧美日韩久久| 在线免费观看日本欧美| 亚洲精选免费视频| 欧洲精品在线观看| 亚洲国产精品天堂| 91精品国产综合久久蜜臀| 日韩av一级片| 欧美xxxxx牲另类人与| 精品中文字幕一区二区| 久久久精品tv| 成人国产精品视频| 亚洲自拍欧美精品| 欧美猛男超大videosgay| 免费成人你懂的| 26uuu精品一区二区| 成人av在线资源| 一区二区三区在线播放| 欧美高清视频在线高清观看mv色露露十八 | 欧美大白屁股肥臀xxxxxx| 日本免费新一区视频| 精品裸体舞一区二区三区| 国产成人激情av| 日韩中文字幕麻豆| 国内一区二区视频| 国产精品你懂的| 色先锋资源久久综合| 日韩激情一二三区| 国产无人区一区二区三区| 色婷婷久久综合| 美腿丝袜亚洲综合| 国产精品国产三级国产普通话99 | 国产三级三级三级精品8ⅰ区| 成人手机在线视频| 亚洲6080在线| 国产日本欧洲亚洲| 欧美亚洲国产一区二区三区| 日本美女一区二区| 亚洲品质自拍视频网站| 日韩小视频在线观看专区| 成人性生交大片免费看中文| 亚洲图片欧美视频| 国产日韩视频一区二区三区| 欧洲av一区二区嗯嗯嗯啊| 激情综合五月天| 亚洲午夜三级在线| 国产欧美一区二区三区沐欲| 欧美日韩久久一区二区| 成人开心网精品视频| 午夜精品福利视频网站| 国产精品精品国产色婷婷| 欧美成人艳星乳罩| 欧美综合色免费| 成人性色生活片免费看爆迷你毛片| 天堂久久一区二区三区| 亚洲图片你懂的| 国产欧美一区视频| 精品入口麻豆88视频| 欧美色视频一区| 99精品欧美一区二区三区综合在线| 奇米精品一区二区三区在线观看一| 中文字幕综合网| 国产日韩欧美精品一区| 欧美一级精品在线| 欧美日韩在线一区二区| 91亚洲精品乱码久久久久久蜜桃| 国产一区二区免费视频| 日本在线不卡一区| 五月综合激情日本mⅴ| 亚洲精品成人天堂一二三| 国产精品久久99| 中日韩av电影| 国产精品日日摸夜夜摸av| 久久人人97超碰com| 日韩欧美高清在线| 欧美精品乱码久久久久久| 欧美性猛交xxxx黑人交| 色综合亚洲欧洲| av在线播放成人| 94色蜜桃网一区二区三区| a级高清视频欧美日韩| av不卡免费电影| 成人激情小说网站| 北条麻妃国产九九精品视频| 成人综合激情网| caoporen国产精品视频| 99精品久久只有精品| 色综合久久综合网| 在线精品亚洲一区二区不卡| 在线中文字幕一区| 欧美日韩在线播放| 91麻豆精品国产91久久久久久久久| 欧美日本在线视频| 日韩欧美一卡二卡| 久久久久久久久久看片| 日本一区二区三级电影在线观看| 中文字幕精品一区二区精品绿巨人 | 欧美伊人久久久久久午夜久久久久| 91免费在线视频观看| 欧洲视频一区二区| 欧美精品电影在线播放| 欧美大片日本大片免费观看| 久久婷婷久久一区二区三区| 国产欧美一区视频| 亚洲欧美国产高清| 午夜精品福利一区二区三区av| 免费成人在线观看视频| 粉嫩蜜臀av国产精品网站| 91在线国产福利| 91麻豆精品91久久久久同性| 久久亚洲精精品中文字幕早川悠里| 中文字幕精品一区二区精品绿巨人| 亚洲品质自拍视频| 麻豆成人在线观看| 不卡一二三区首页| 欧美精品乱码久久久久久按摩| 欧美α欧美αv大片| 中文字幕中文字幕一区二区| 性做久久久久久免费观看欧美| 六月丁香婷婷久久| 97久久超碰国产精品| 91精品国产高清一区二区三区 | 欧美日韩精品欧美日韩精品一综合| 日韩欧美的一区二区| 国产精品久久久久久福利一牛影视| 一区二区三区在线视频播放| 久久99久久久久久久久久久| av在线不卡免费看| 精品国产自在久精品国产| 亚洲乱码国产乱码精品精的特点| 久久精品国产亚洲aⅴ| 色久优优欧美色久优优| wwwwxxxxx欧美| 五月综合激情网| 99re这里只有精品6| 2024国产精品| 日本特黄久久久高潮| zzijzzij亚洲日本少妇熟睡| 日韩精品一区二区三区视频在线观看 | 欧美成人bangbros| 亚洲激情六月丁香| 成人性视频免费网站| 日韩一级完整毛片| 亚洲一区二区三区四区在线| 成人国产亚洲欧美成人综合网| 欧美一区二视频| 亚州成人在线电影| 日本二三区不卡| 中文字幕亚洲一区二区va在线| 久久电影网站中文字幕| 欧美精品欧美精品系列| 亚洲综合免费观看高清在线观看| 成人国产精品视频| 国产日韩精品久久久| 激情久久五月天| 久久综合久久99| 日韩黄色一级片| 在线观看91精品国产麻豆| 亚洲激情男女视频| 91蝌蚪porny成人天涯| 国产精品国产三级国产a| 成人精品国产一区二区4080| 欧美精品一区二区三区很污很色的 | 国产精品成人免费| 国产成人免费视频网站高清观看视频| 91精品国产欧美一区二区| 日韩福利电影在线| 91精品国产综合久久福利 | 国产精品一二三四区| 精品奇米国产一区二区三区| 欧美aⅴ一区二区三区视频|