?? cachearchlib.c
字號:
** cacheArchClear - clear (flush and invalidate) entries from an ARM cache** This routine clears some or all entries from the specified ARM cache.** RETURNS: OK, or ERROR if the cache type is invalid or the cache control* is not supported.** NOMANUAL*/LOCAL STATUS cacheArchClear ( CACHE_TYPE cache, /* cache to clear */ void * address, /* address to clear */ size_t bytes /* bytes to clear */ ) { if (cacheProbe (cache) != OK) return ERROR; /* invalid cache */#if ((ARMCACHE == ARMCACHE_710A) || (ARMCACHE == ARMCACHE_720T) || \ (ARMCACHE == ARMCACHE_740T)) cacheDClearAll (); /* also drains write-buffer */#endif#if ((ARMCACHE == ARMCACHE_810) || (ARMCACHE == ARMCACHE_SA110) || \ (ARMCACHE == ARMCACHE_SA1100) || (ARMCACHE == ARMCACHE_SA1500) || \ (ARMCACHE == ARMCACHE_920T) || (ARMCACHE == ARMCACHE_926E) || \ (ARMCACHE == ARMCACHE_940T) || (ARMCACHE == ARMCACHE_946E) || \ (ARMCACHE == ARMCACHE_XSCALE) || (ARMCACHE == ARMCACHE_1020E) || \ (ARMCACHE == ARMCACHE_1022E)) if (cache == DATA_CACHE) if ((bytes == ENTIRE_CACHE) ||#if (ARMCACHE == ARMCACHE_810) /* * On 810, to flush an individual address, we actually end up * flushing much more. If the address range corresponds to 8 * segments or more, we might as well do the lot and be done with * it. */ (bytes >= (_CACHE_ALIGN_SIZE * 8)))#endif /* (ARMCACHE == ARMCACHE_810) */#if (ARMCACHE == ARMCACHE_940T) /* similar arguments on 940T, for 4 segments or more */ (bytes >= (_CACHE_ALIGN_SIZE * 4)))#endif#if ((ARMCACHE == ARMCACHE_SA110) || (ARMCACHE == ARMCACHE_SA1100) || \ (ARMCACHE == ARMCACHE_SA1500) || (ARMCACHE == ARMCACHE_920T) || \ (ARMCACHE == ARMCACHE_926E) || (ARMCACHE == ARMCACHE_946E) || \ (ARMCACHE == ARMCACHE_XSCALE) || (ARMCACHE == ARMCACHE_1020E) || \ (ARMCACHE == ARMCACHE_1022E)) (bytes >= D_CACHE_SIZE))#endif cacheDClearAll (); /* also drains write-buffer */ else { bytes += (size_t) address; address = (void *) ((UINT) address & ~(_CACHE_ALIGN_SIZE - 1)); do { cacheDClear (address); address = (void *) ((UINT) address + _CACHE_ALIGN_SIZE); } while ((size_t) address < bytes); cacheArchPipeFlush (); /* drain write buffer */ } else { /* * I-cache. Cache is effectively read-only, so flush is a null * operation, so only need to invalidate the cache. */#if ((ARMCACHE == ARMCACHE_SA110) || \ (ARMCACHE == ARMCACHE_SA1100) || (ARMCACHE == ARMCACHE_SA1500)) cacheIInvalidateAll (); /* Cannot clear individual lines */#endif#if ((ARMCACHE == ARMCACHE_920T) || (ARMCACHE == ARMCACHE_926E) || \ (ARMCACHE == ARMCACHE_946E) || (ARMCACHE == ARMCACHE_XSCALE) || \ (ARMCACHE == ARMCACHE_1020E) || (ARMCACHE == ARMCACHE_1022E)) if ((bytes == ENTIRE_CACHE) || (bytes >= I_CACHE_SIZE)) cacheIInvalidateAll (); else {#if(ARMCACHE == ARMCACHE_XSCALE) cacheIInvalidateAll ();#else bytes += (size_t) address; address = (void *) ((UINT) address & ~(_CACHE_ALIGN_SIZE - 1)); do { cacheIInvalidate (address);#if (ARMCACHE == ARMCACHE_XSCALE) btbInvalidate ();#endif /* (ARMCACHE == ARMCACHE_XSCALE) */ address = (void *) ((UINT) address + _CACHE_ALIGN_SIZE); } while ((size_t) address < bytes);#endif /* (ARMCACHE == ARMCACHE_XSCALE) */ }#endif /* (ARMCACHE == ARMCACHE_920T,946E,XSCALE) */#if (ARMCACHE == ARMCACHE_940T) if (bytes == ENTIRE_CACHE) cacheIInvalidateAll(); else if (bytes >= (_CACHE_ALIGN_SIZE * 4)) cacheIInvalidateAll(); else { bytes += (size_t) address; address = (void *) ((UINT) address & ~(_CACHE_ALIGN_SIZE - 1)); do { cacheIInvalidate (address); address = (void *) ((UINT) address + _CACHE_ALIGN_SIZE); } while ((size_t) address < bytes); }#endif /* (ARMCACHE == ARMCACHE_940T) */#if ARMCACHE_NEEDS_IMB if (bytes == ENTIRE_CACHE) cacheIMB (); /* Execute IMB to flush Prefetch Unit */ else cacheIMBRange(address, (INSTR *) ((UINT32)address + bytes));#endif } /* endelse I-cache */#endif /* ARMCACHE == ARMCACHE_810, SA*, 920T, 940T, 946E, XSCALE, 1020E,1022E */ return OK; } /* cacheArchClear() *//********************************************************************************* cacheArchTextUpdate - synchronize the ARM instruction and data caches** This routine flushes the ARM data cache and drains the write-buffer, if* appropriate, and then invalidates the instruction cache. The instruction* cache is forced to fetch code that may have been created via the data path.** RETURNS: OK, or ERROR if the cache type is invalid or the cache control* is not supported.** NOMANUAL*/LOCAL STATUS cacheArchTextUpdate ( void * address, /* virtual address */ size_t bytes /* number of bytes to update */ ) {#if (ARMCACHE == ARMCACHE_810) int oldLevel, stat;#endif#if ((ARMCACHE == ARMCACHE_710A) || (ARMCACHE == ARMCACHE_740T) || \ (ARMCACHE == ARMCACHE_720T)) cacheArchPipeFlush (); return cacheArchInvalidate (INSTRUCTION_CACHE, address, bytes);#endif#if (ARMCACHE == ARMCACHE_810) /* * There is an argument that all we need to do here is an IMB, * but play safe for the moment. * * 810 is a combined ID-cache: when invalidating the "I-cache" we * will be invalidating the ID-cache, so we must lock interrupts * between cleaning the cache and invalidating it. */ oldLevel = cacheArchIntLock(); if (cacheArchFlush (DATA_CACHE, address, bytes) == OK) stat = cacheArchInvalidate (INSTRUCTION_CACHE, address, bytes); else stat = ERROR; intIFUnlock (oldLevel); return stat;#endif /* (ARMCACHE == ARMCACHE_810) */#if ((ARMCACHE == ARMCACHE_SA110) || (ARMCACHE == ARMCACHE_SA1100) || \ (ARMCACHE == ARMCACHE_SA1500) || (ARMCACHE == ARMCACHE_920T) || \ (ARMCACHE == ARMCACHE_926E) || (ARMCACHE == ARMCACHE_940T) || \ (ARMCACHE == ARMCACHE_946E) || (ARMCACHE == ARMCACHE_XSCALE) || \ (ARMCACHE == ARMCACHE_1020E) || (ARMCACHE == ARMCACHE_1022E)) /* Harvard caches: should be able to invalidate the I-cache with impunity */ if (cacheArchFlush (DATA_CACHE, address, bytes) == OK) return cacheArchInvalidate (INSTRUCTION_CACHE, address, bytes); else return ERROR;#endif /* ARMCACHE = SA110,1100,1500,920T,940T,946E,XSCALE,1020E,1022E */ } /* cacheArchTextUpdate() *//********************************************************************************* cacheArchDmaMalloc - allocate a cache-safe buffer** This routine attempts to return a pointer to a section of memory* that will not experience cache coherency problems. This routine* is only called when MMU support is available for cache control.** INTERNAL* The above comment about being called only when MMU support is available is* present in all the other architectures. It is not clear that this is* necessarily true.** INTERNAL* We check if the cache is actually on before allocating the memory. It* is possible that the user wants Memory Management Unit (MMU)* support but does not need caching.** RETURNS: A pointer to a cache-safe buffer, or NULL.** SEE ALSO: cacheArchDmaFree(), cacheDmaMalloc()** NOMANUAL*/LOCAL void * cacheArchDmaMalloc ( size_t bytes /* size of cache-safe buffer */ ) { void * pBuf; int pageSize; /* * This seems dangerous, as the buffer could be allocated and then the * cache could be switched on later. However, it is what the other * architectures do. */ if (!cacheIsOn (DATA_CACHE)) { /* If cache is off, just allocate buffer */ return malloc (bytes); } if ((pageSize = VM_PAGE_SIZE_GET ()) == ERROR) return NULL; /* make sure bytes is a multiple of pageSize */ bytes = ROUND_UP (bytes, pageSize);#if (!ARM_HAS_MPU) if ((_func_valloc == NULL) || ((pBuf = (void *)(* _func_valloc) (bytes)) == NULL)) return NULL;#else /* (!ARM_HAS_MPU) */ /* * On MPUs, regions must be aligned with their size, which must be * a power of two and at least 4k in size. * * Round up to a power of two in size. */ bytes = ROUND_UP (bytes, (1 << (ffsMsb(bytes) - 1))); if ((_func_memalign == NULL) || ((pBuf = (void *)(* _func_memalign) (bytes, bytes)) == NULL)) return NULL;#endif /* (!ARM_HAS_MPU) */ /* * Note that on MPUs we need to specify VM_STATE_VALID here, in * order that a new region will be created, if necessary, and that * that region will be marked as active, with appropriate access * rights. We should also free the allocate buffer and return NULL * if the VM_STATE_SET call fails. This fixes SPR #30697. */ if (VM_STATE_SET (NULL, pBuf, bytes, VM_STATE_MASK_CACHEABLE | VM_STATE_MASK_VALID, VM_STATE_CACHEABLE_NOT | VM_STATE_VALID) != OK) { free (pBuf); return NULL; } return pBuf; } /* cacheArchDmaMalloc() *//********************************************************************************* cacheArchDmaFree - free the buffer acquired by cacheArchDmaMalloc()** This routine returns to the free memory pool a block of memory previously* allocated with cacheArchDmaMalloc(). The buffer is marked cacheable.** RETURNS: OK, or ERROR if cacheArchDmaMalloc() cannot be undone.** SEE ALSO: cacheArchDmaMalloc(), cacheDmaFree()** NOMANUAL*/LOCAL STATUS cacheArchDmaFree ( void * pBuf /* ptr returned by cacheArchDmaMalloc() */ ) { BLOCK_HDR * pHdr; /* pointer to block header */ STATUS status = OK; /* return value */ /* changed to vmLibInstalled or BaseLibInstalled fixes SPR #22407 */ if (cacheIsOn (DATA_CACHE) && (vmLibInfo.vmLibInstalled || vmLibInfo.vmBaseLibInstalled)) { pHdr = BLOCK_TO_HDR (pBuf); status = VM_STATE_SET (NULL,pBuf,(pHdr->nWords * 2) - sizeof(BLOCK_HDR), VM_STATE_MASK_CACHEABLE, VM_STATE_CACHEABLE); } free (pBuf); /* free buffer after modified */ return status; } /* cacheArchDmaFree() *//********************************************************************************* cacheProbe - test for the presence of a type of cache** This routine returns status with regard to the presence of a particular* type of cache.** RETURNS: OK, or ERROR if the cache type is invalid or the cache control* is not supported.** CAVEATS* On ARM710A/810/740T/720T we return present for both data and* instruction cache as they have one mixed instruction and data cache.**/LOCAL STATUS cacheProbe ( CACHE_TYPE cache /* cache to test */ ) { if ((cache == INSTRUCTION_CACHE) || (cache == DATA_CACHE)) return OK; errno = S_cacheLib_INVALID_CACHE; /* set errno */ return ERROR; } /* cacheProbe() *//******************************************************************************* cacheIsOn - boolean function to return state of cache** This routine returns the state of the specified cache. The cache is* assumed to exist.** RETURNS: TRUE, if specified cache is enabled, FALSE otherwise.*/LOCAL BOOL cacheIsOn ( CACHE_TYPE cache /* cache to examine state */ ) {#if ((ARMCACHE == ARMCACHE_940T) || (ARMCACHE == ARMCACHE_946E)) /* return whether we have been *asked* to enable the cache in question */ if (cache == INSTRUCTION_CACHE) return (cacheArchState & MMUCR_I_ENABLE); else return (cacheArchState & MMUCR_C_ENABLE);#elif ((ARMCACHE == ARMCACHE_920T) || (ARMCACHE == ARMCACHE_926E) || \ (ARMCACHE == ARMCACHE_1020E) || (ARMCACHE == ARMCACHE_1022E)) if (cache == INSTRUCTION_CACHE) { /* return whether actually enabled */ return (mmuCrGet() & MMUCR_I_ENABLE); } else { /* return whether we have been asked to enable the D-cache */ return ((cacheArchState & MMUCR_C_ENABLE) != 0); }#else /* (ARMCACHE == ARMCACHE_940T,946E) */#if ((ARMCACHE == ARMCACHE_SA110) || (ARMCACHE == ARMCACHE_SA1100) || \ (ARMCACHE == ARMCACHE_SA1500) || (ARMCACHE == ARMCACHE_XSCALE)) if (cache == INSTRUCTION_CACHE) { /* return whether actually enabled */ return (mmuCrGet() & MMUCR_I_ENABLE); } else#endif /* return whether we have been asked to enable the cache */ return ( (cacheArchState & MMUCR_C_ENABLE) != 0);#endif /* (ARMCACHE == ARMCACHE_940T,946E) */ }/******************************************************************************* cacheMmuIsOn - boolean function to return state of MMU/MPU** This routine returns the state of the MMU/MPU.** RETURNS: TRUE if MMU/MPU enabled,* FALSE if not enabled.*/LOCAL BOOL cacheMmuIsOn ( void ) { return ((mmuCrGet() & MMUCR_M_ENABLE) != 0); }#endif /* (ARMCACHE != ARMCACHE_NONE) */
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -