亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? proc-xscale.s

?? ARM 嵌入式 系統 設計與實例開發 實驗教材 二源碼
?? S
?? 第 1 頁 / 共 2 頁
字號:
/* *  linux/arch/arm/mm/proc-xscale.S * *  Author:	Nicolas Pitre *  Created:	November 2000 *  Copyright:	(C) 2000, 2001 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * MMU functions for the Intel XScale CPUs * * 2001 Aug 21:	 *	some contributions by Brett Gaines <brett.w.gaines@intel.com> *	Copyright 2001 by Intel Corp. * * 2001 Sep 08: *	Completely revisited, many important fixes *	Nicolas Pitre <nico@cam.org> */#include <linux/config.h>#include <linux/linkage.h>#include <asm/assembler.h>#include <asm/constants.h>#include <asm/procinfo.h>#include <asm/hardware.h>#include <asm/proc/pgtable.h>/* * Some knobs for cache allocation policy. * Allocate on write may or may not be beneficial depending on the memory * usage pattern of your main application.  Write through cache is definitely * a performance loss in most cases, but might be used for special purposes. */#define PMD_CACHE_WRITE_ALLOCATE 1#define PTE_CACHE_WRITE_ALLOCATE 1#define CACHE_WRITE_THROUGH 0/* * There are errata that say that dirty status bits in the cache may get * corrupted. The workaround significantly affects performance, and the bug * _might_ just not be that visible or critical to you, so it is configurable. * Let's hope a future core revision will tell us this was only a bad dream. * But in the mean time the risk and tradeoff is yours to decide.... */#ifdef CONFIG_XSCALE_CACHE_ERRATA#undef CACHE_WRITE_THROUGH#define CACHE_WRITE_THROUGH 1#endif/*  * This is the maximum size of an area which will be flushed.  If the area * is larger than this, then we flush the whole cache */#define MAX_AREA_SIZE	32768/* * the cache line size of the I and D cache */#define CACHELINESIZE	32/* * the size of the data cache */#define CACHESIZE	32768/* * and the page size */#define PAGESIZE	4096/* * Virtual address used to allocate the cache when flushed * * This must be an address range which is _never_ used.  It should  * apparently have a mapping in the corresponding page table for  * compatibility with future CPUs that _could_ require it.  For instance we * don't care. * * This must be aligned on a 2*CACHESIZE boundary.  The code selects one of * the 2 areas in alternance each time the clean_d_cache macro is used. * Without this the XScale core exhibits cache eviction problems and no one * knows why.   * * Reminder: the vector table is located at 0xffff0000-0xffff0fff. */#define CLEAN_ADDR	0xfffe0000/* * This macro is used to wait for a CP15 write and is needed * when we have to ensure that the last operation to the co-pro * was completed before continuing with operation. */	.macro	cpwait, rd	mrc	p15, 0, \rd, c2, c0, 0		@ arbitrary read of cp15	mov	\rd, \rd			@ wait for completion	sub 	pc, pc, #4			@ flush instruction pipeline	.endm		.macro	cpwait_ret, lr, rd	mrc	p15, 0, \rd, c2, c0, 0		@ arbitrary read of cp15	sub	pc, \lr, \rd, LSR #32		@ wait for completion and						@ flush instruction pipeline	.endm#if !CACHE_WRITE_THROUGH/* * This macro cleans the entire dcache using line allocate. * The main loop has been unrolled to reduce loop overhead. * rd and rs are two scratch registers. */	.macro  clean_d_cache, rd, rs	ldr	\rs, =clean_addr	ldr	\rd, [\rs]	eor	\rd, \rd, #CACHESIZE	str	\rd, [\rs]	add	\rs, \rd, #CACHESIZE1:	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line	add	\rd, \rd, #CACHELINESIZE	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line	add	\rd, \rd, #CACHELINESIZE	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line	add	\rd, \rd, #CACHELINESIZE	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line	add	\rd, \rd, #CACHELINESIZE	teq	\rd, \rs	bne	1b	.endm	.macro	clean_d_line,	rd	mcr	p15, 0, \rd, c7, c10, 1	.endm	.dataclean_addr:	.word	CLEAN_ADDR#else/* * If cache is write-through, there is no need to clean it. * Simply invalidating will do. */	.macro  clean_d_cache, rd, rs	mcr	p15, 0, \rd, c7, c6, 0	.endm	/* let's try to skip this needless operations at least within loops */	.macro	clean_d_line,	rd	.endm#endif	.text/* * cpu_xscale_data_abort() * * obtain information about current aborted instruction. * Note: we read user space.  This means we might cause a data * abort here if the I-TLB and D-TLB aren't seeing the same * picture.  Unfortunately, this does happen.  We live with it. * *  r2 = address of aborted instruction *  r3 = cpsr * * Returns: *  r0 = address of abort *  r1 != 0 if writing *  r3 = FSR *  r4 = corrupted */	.align	5ENTRY(cpu_xscale_data_abort)	mrc	p15, 0, r3, c5, c0, 0		@ get FSR	mrc	p15, 0, r0, c6, c0, 0		@ get FAR	ldr	r1, [r2]			@ read aborted instruction	and	r3, r3, #255	tst	r1, r1, lsr #21			@ C = bit 20	sbc	r1, r1, r1			@ r1 = C - 1	mov	pc, lr/* * cpu_xscale_check_bugs() */ENTRY(cpu_xscale_check_bugs)	mrs	ip, cpsr	bic	ip, ip, #F_BIT	msr	cpsr, ip	mov	pc, lr#ifndef CONFIG_XSCALE_CACHE_ERRATA/* * cpu_xscale_proc_init() * * Nothing too exciting at the moment */ENTRY(cpu_xscale_proc_init)	mov	pc, lr#else/* * We enable the cache here, but we make sure all the status bits for dirty * lines are cleared as well (see PXA250 erratum #120). */ENTRY(cpu_xscale_proc_init)	@ enable data cache	ldr	r0, cr_p	ldmia	r0, {r1, r2}	orr	r1, r1, #0x4	orr	r2, r2, #0x4	stmia	r0, {r1, r2}	mcr	p15, 0, r1, c1, c0, 0	cpwait	r0	@ invalidate data cache	mcr	p15, 0, r0, c7, c6, 0	@ fill main cache with write-through lines	bic	r0, pc, #0x1f	add	r1, r0, #CACHESIZE1:	ldr	r2, [r0], #32	cmp	r0, r1	bne	1b	@ enable test feature to force all fills to the mini-cache	mov	r1, #0x8	mcr	p15, 0, r1, c15, c15, 3	@ fill mini-cache with write-through lines (2kbytes, 64 lines)	add	r1, r0, #20482:	ldr	r2, [r0], #32	cmp	r0, r1	bne	2b	@ disable test feature to force all fills to the mini-cache	mov	r1, #0x0	mcr	p15, 0, r1, c15, c15, 3	@ invalidate data cache again	mcr	p15, 0, r1, c7, c6, 0	mov	pc, lrcr_p:	.long	SYMBOL_NAME(cr_alignment)#endif/* * cpu_xscale_proc_fin() */ENTRY(cpu_xscale_proc_fin)	str	lr, [sp, #-4]!	mov	r0, #F_BIT|I_BIT|SVC_MODE	msr	cpsr_c, r0	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register	bic	r0, r0, #0x1800			@ ...IZ...........	bic	r0, r0, #0x0006			@ .............CA.	mcr	p15, 0, r0, c1, c0, 0		@ disable caches	bl	cpu_xscale_cache_clean_invalidate_all	@ clean caches	ldr	pc, [sp], #4/* * cpu_xscale_reset(loc) * * Perform a soft reset of the system.  Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */	.align	5ENTRY(cpu_xscale_reset)	mov	r1, #F_BIT|I_BIT|SVC_MODE	msr	cpsr_c, r1			@ reset CPSR	mrc	p15, 0, r1, c1, c0, 0		@ ctrl register	bic	r1, r1, #0x0086			@ ........B....CA.	bic	r1, r1, #0x1900			@ ...IZ..S........	mcr	p15, 0, r1, c1, c0, 0		@ ctrl register	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches & BTB	bic	r1, r1, #0x0001			@ ...............M	mcr	p15, 0, r1, c1, c0, 0		@ ctrl register	@ CAUTION: MMU turned off from this point. We count on the pipeline 	@ already containing those two last instructions to survive.	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs	mov	pc, r0/* * cpu_xscale_do_idle(type) * * Cause the processor to idle * * type:  *   0 = slow idle *   1 = fast idle *   2 = switch to slow processor clock *   3 = switch to fast processor clock * * For now we do nothing but go to idle mode for every case * * XScale supports clock switching, but using idle mode support * allows external hardware to react to system state changes. */	.align	5ENTRY(cpu_xscale_do_idle)	mov	r0, #1	mcr	p14, 0, r0, c7, c0, 0		@ Go to IDLE	mov	pc, lr/* ================================= CACHE ================================ *//* * cpu_xscale_cache_clean_invalidate_all (void) * * clean and invalidate all cache lines * * Note: *  1. We should preserve r0 at all times. *  2. Even if this function implies cache "invalidation" by its name, *     we don't need to actually use explicit invalidation operations *     since the goal is to discard all valid references from the cache *     and the cleaning of it already has that effect. *  3. Because of 2 above and the fact that kernel space memory is always *     coherent across task switches there is no need to worry about *     inconsistencies due to interrupts, ence no irq disabling. */	.align	5ENTRY(cpu_xscale_cache_clean_invalidate_all)	mov	r2, #1cpu_xscale_cache_clean_invalidate_all_r2:	clean_d_cache r0, r1	teq	r2, #0	mcrne	p15, 0, ip, c7, c5, 0		@ Invalidate I cache & BTB	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* * cpu_xscale_cache_clean_invalidate_range(start, end, flags) * * clean and invalidate all cache lines associated with this area of memory * * start: Area start address * end:   Area end address * flags: nonzero for I cache as well */	.align	5ENTRY(cpu_xscale_cache_clean_invalidate_range)	bic	r0, r0, #CACHELINESIZE - 1	@ round down to cache line	sub	r3, r1, r0	cmp	r3, #MAX_AREA_SIZE	bhi	cpu_xscale_cache_clean_invalidate_all_r21:	clean_d_line r0				@ Clean D cache line	mcr	p15, 0, r0, c7, c6, 1		@ Invalidate D cache line	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	teq	r2, #0	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer	moveq	pc, lr	sub	r0, r0, r31:	mcr	p15, 0, r0, c7, c5, 1		@ Invalidate I cache line	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	mcr	p15, 0, ip, c7, c5, 6		@ Invalidate BTB	mov	pc, lr/* * cpu_xscale_flush_ram_page(page) * * clean all cache lines associated with this memory page * * page: page to clean */	.align	5ENTRY(cpu_xscale_flush_ram_page)#if !CACHE_WRITE_THROUGH	mov	r1, #PAGESIZE1:	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line	add	r0, r0, #CACHELINESIZE	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line	add	r0, r0, #CACHELINESIZE	subs	r1, r1, #2 * CACHELINESIZE	bne	1b#endif	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* ================================ D-CACHE =============================== *//* * cpu_xscale_dcache_invalidate_range(start, end) * * throw away all D-cached data in specified region without an obligation * to write them back.  Note however that on XScale we must clean all * entries also due to hardware errata (80200 A0 & A1 only). * * start: virtual start address * end:   virtual end address */	.align	5ENTRY(cpu_xscale_dcache_invalidate_range)	mrc	p15, 0, r2, c0, c0, 0		@ Read part no.	eor	r2, r2, #0x69000000	eor	r2, r2, #0x00052000		@ 80200 XX part no.	bics	r2, r2, #0x1			@ Clear LSB in revision field	moveq	r2, #0	beq	cpu_xscale_cache_clean_invalidate_range	@ An 80200 A0 or A1	tst	r0, #CACHELINESIZE - 1	mcrne	p15, 0, r0, c7, c10, 1		@ Clean D cache line	tst	r1, #CACHELINESIZE - 1	mcrne	p15, 0, r1, c7, c10, 1		@ Clean D cache line	bic	r0, r0, #CACHELINESIZE - 1	@ round down to cache line1:	mcr	p15, 0, r0, c7, c6, 1		@ Invalidate D cache line	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	mov	pc, lr/* * cpu_xscale_dcache_clean_range(start, end) * * For the specified virtual address range, ensure that all caches contain * clean data, such that peripheral accesses to the physical RAM fetch * correct data. * * start: virtual start address * end:   virtual end address */	.align	5ENTRY(cpu_xscale_dcache_clean_range)#if !CACHE_WRITE_THROUGH	bic	r0, r0, #CACHELINESIZE - 1	sub	r2, r1, r0	cmp	r2, #MAX_AREA_SIZE	movhi	r2, #0	bhi	cpu_xscale_cache_clean_invalidate_all_r21:	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line	add	r0, r0, #CACHELINESIZE	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b#endif	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* * cpu_xscale_clean_dcache_page(page) * * Cleans a single page of dcache so that if we have any future aliased * mappings, they will be consistent at the time that they are created. * * Note: *  1. we don't need to flush the write buffer in this case. [really? -Nico] *  2. we don't invalidate the entries since when we write the page *     out to disk, the entries may get reloaded into the cache. */	.align	5ENTRY(cpu_xscale_dcache_clean_page)#if !CACHE_WRITE_THROUGH	mov	r1, #PAGESIZE1:	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line	add	r0, r0, #CACHELINESIZE	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line	add	r0, r0, #CACHELINESIZE	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line	add	r0, r0, #CACHELINESIZE	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line	add	r0, r0, #CACHELINESIZE	subs	r1, r1, #4 * CACHELINESIZE	bne	1b#endif	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* * cpu_xscale_dcache_clean_entry(addr) * * Clean the specified entry of any caches such that the MMU * translation fetches will obtain correct data. * * addr: cache-unaligned virtual address */	.align	5ENTRY(cpu_xscale_dcache_clean_entry)	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* ================================ I-CACHE =============================== *//* * cpu_xscale_icache_invalidate_range(start, end) * * invalidate a range of virtual addresses from the Icache * * start: virtual start address * end:   virtual end address * * Note: This is vaguely defined as supposed to bring the dcache and the  *       icache in sync by the way this function is used. */	.align	5ENTRY(cpu_xscale_icache_invalidate_range)	bic	r0, r0, #CACHELINESIZE - 11:	clean_d_line r0				@ Clean D cache line	mcr	p15, 0, r0, c7, c5, 1		@ Invalidate I cache line	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	mcr	p15, 0, ip, c7, c5, 6		@ Invalidate BTB	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
高清在线成人网| 欧美国产一区二区在线观看| 国产午夜精品久久久久久久| 亚洲图片你懂的| 蜜桃传媒麻豆第一区在线观看| 懂色av噜噜一区二区三区av| 6080yy午夜一二三区久久| 亚洲欧洲av一区二区三区久久| 久久精品国产77777蜜臀| 欧美日韩综合一区| 亚洲欧洲综合另类| 成人综合婷婷国产精品久久 | 国产乱理伦片在线观看夜一区| 色成人在线视频| 中文字幕一区二区三区不卡在线 | 欧美午夜在线一二页| 国产精品理伦片| 国产一区二区三区精品欧美日韩一区二区三区 | 日韩精品亚洲一区| 欧美午夜精品久久久久久孕妇| 国产欧美一区二区精品性色| 麻豆一区二区99久久久久| 欧美揉bbbbb揉bbbbb| 亚洲最大成人网4388xx| 91福利视频网站| 一区二区三区中文在线观看| 不卡一区二区三区四区| 欧美国产欧美亚州国产日韩mv天天看完整 | 色综合久久天天综合网| 中文字幕在线观看一区| www.在线成人| 亚洲激情自拍偷拍| 在线亚洲+欧美+日本专区| 亚洲免费观看在线视频| 91国产免费看| 视频一区二区不卡| 欧美成人a视频| 国产精品一二二区| 亚洲欧美一区二区视频| 色婷婷精品大视频在线蜜桃视频| 亚洲精品免费视频| 欧美丰满少妇xxxxx高潮对白| 图片区小说区国产精品视频| 欧美一区二区三区婷婷月色| 久久 天天综合| 国产人成一区二区三区影院| 成人aa视频在线观看| 亚洲精品欧美激情| 欧美一区二区在线免费观看| 久久99久久99精品免视看婷婷| 久久先锋影音av| 99久久精品99国产精品| 亚洲乱码国产乱码精品精小说| 91传媒视频在线播放| 日韩福利电影在线| 国产视频在线观看一区二区三区| 色综合久久中文字幕综合网| 日韩精品五月天| 久久婷婷国产综合国色天香| 波多野结衣亚洲一区| 亚洲国产精品综合小说图片区| 欧美一区二区视频网站| 国产·精品毛片| 亚洲一区二区综合| 欧美精品一区二区三区在线| 91小宝寻花一区二区三区| 天天色综合天天| 欧美激情一区二区三区| 欧美丰满美乳xxx高潮www| 成人美女视频在线观看| 日韩综合小视频| 中文字幕一区三区| 精品国产三级电影在线观看| 色乱码一区二区三区88 | 五月天国产精品| 国产精品另类一区| 精品久久久久久无| 国产精品理论片在线观看| 亚洲视频在线观看一区| 国产一区二区三区综合| 精品美女在线观看| 理论电影国产精品| 日韩欧美在线影院| 男女性色大片免费观看一区二区| 91精品国产综合久久久久久漫画 | 中文在线一区二区 | 欧美伦理电影网| 北条麻妃国产九九精品视频| 黑人巨大精品欧美一区| 亚洲午夜精品久久久久久久久| 国产女同性恋一区二区| 91精品国产一区二区| 在线观看国产91| 94-欧美-setu| 成人美女视频在线观看18| 精品一区二区三区视频| 日韩av二区在线播放| 一片黄亚洲嫩模| 亚洲色图欧洲色图| 26uuu精品一区二区三区四区在线 26uuu精品一区二区在线观看 | 国产成+人+日韩+欧美+亚洲 | 日韩激情视频在线观看| 一区二区三区产品免费精品久久75| 国产欧美日韩激情| 久久久国产一区二区三区四区小说 | 日韩小视频在线观看专区| 欧美色大人视频| 欧洲亚洲精品在线| 色综合天天综合网国产成人综合天| 国产成人免费av在线| 国产精品中文字幕一区二区三区| 美女一区二区在线观看| 久久激情五月婷婷| 久久精品国产77777蜜臀| 毛片av一区二区| 国产一区二区三区高清播放| 国产精品1区2区3区| 国产成人av一区二区三区在线| 国产精品白丝jk白祙喷水网站 | 国产精品不卡视频| 亚洲四区在线观看| 一区二区三区日韩欧美| 亚洲国产毛片aaaaa无费看| 天堂一区二区在线免费观看| 喷白浆一区二区| 国内精品国产三级国产a久久| 国产精品夜夜嗨| 91麻豆国产在线观看| 欧美日韩精品一区二区| 日韩视频免费观看高清在线视频| 26uuu久久天堂性欧美| 国产精品久久毛片av大全日韩| 亚洲欧洲制服丝袜| 丝袜美腿一区二区三区| 久久99国产精品久久99| 国产白丝精品91爽爽久久| 91丨九色丨黑人外教| 91精品国产aⅴ一区二区| 国产色一区二区| 亚洲精品精品亚洲| 久久精品99国产精品| 成+人+亚洲+综合天堂| 欧美日韩精品福利| 久久久久久久久99精品| 亚洲一区av在线| 国产麻豆精品95视频| 欧美性猛交xxxxxx富婆| 久久这里只精品最新地址| 国产精品久久免费看| 免费av成人在线| va亚洲va日韩不卡在线观看| 69久久99精品久久久久婷婷| 国产精品久久久久久久裸模| 亚洲国产精品精华液网站| 国产一区视频网站| 欧美日韩一区精品| 国产精品天天摸av网| 蜜桃视频在线观看一区| 色综合av在线| 国产欧美日韩在线| 麻豆成人av在线| 欧美性生交片4| 国产午夜精品久久久久久免费视 | 国产午夜精品理论片a级大结局| 亚洲国产日日夜夜| av中文一区二区三区| 欧美精品一区二区三区很污很色的| 亚洲激情第一区| 国产精品一区二区三区四区 | 色视频欧美一区二区三区| 亚洲精品一区二区三区蜜桃下载 | 欧美男人的天堂一二区| 亚洲欧洲精品天堂一级| 精品一区二区三区在线视频| 欧美午夜精品久久久久久超碰| 国产精品拍天天在线| 国产乱人伦偷精品视频不卡| 3751色影院一区二区三区| 一区二区三区精品在线| 北条麻妃国产九九精品视频| 久久久亚洲国产美女国产盗摄 | www日韩大片| 日韩二区三区四区| 欧美三级资源在线| 一级做a爱片久久| 色综合久久久网| 亚洲人成精品久久久久久 | 一区二区在线观看视频| aaa亚洲精品一二三区| 国产日韩欧美综合在线| 国产成人精品亚洲777人妖| 精品欧美一区二区三区精品久久| 日韩av一级片| 欧美一区二区三区视频在线| 偷拍日韩校园综合在线| 欧美视频一区在线| 午夜视频一区二区| 91精品国产综合久久国产大片| 日韩精品久久理论片| 欧美大白屁股肥臀xxxxxx|