?? buffer.c
字號:
if (cBufVolumes++ == 0) {
DWORD cbTotal;
PBYTE pbCarve = NULL;
if (cbufTotal == 0) {
SYSTEM_INFO si;
InitList((PDLINK)&dlBufMRU);
// Determine buffer size (and number of blocks per that size)
GetSystemInfo(&si);
#ifdef BIG_BUFFERS
cbBuf = si.dwPageSize;
// Make sure that a system with, say, 4K pages will still work
// with a device that has, say, 8K sectors.
if (cbBuf < pvol->v_pdsk->d_diActive.di_bytes_per_sect) {
cbBuf = pvol->v_pdsk->d_diActive.di_bytes_per_sect;
flBuf |= GBUF_LOCALALLOC;
}
#else
cbBuf = pvol->v_pdsk->d_diActive.di_bytes_per_sect;
if (cbBuf != si.dwPageSize)
flBuf |= GBUF_LOCALALLOC;
#endif
// Make sure the optimal buffer size for this volume is
// ALSO at least as large as our block size.
if (cbBuf < BLOCK_SIZE)
cbBuf = BLOCK_SIZE;
// Make sure buffer size is a multiple of the block size.
if ((cbBuf & BLOCK_OFFSET_MASK) != 0) {
DEBUGMSG(ZONE_INIT || ZONE_ERRORS,(DBGTEXT("FATFS!AllocBufferPool: cbBuf(%d) not a multiple of BLOCK_SIZE(%d), failing...\n"), cbBuf, BLOCK_SIZE));
goto error;
}
// Calculate the number of blocks per buffer.
cblkBuf = cbBuf >> BLOCK_LOG2;
// Calculate mask that converts a block into a buffer-granular block.
maskBufBlock = ~(cblkBuf-1);
// Make sure number of blocks per buffer is power of 2.
if ((cblkBuf & ~maskBufBlock) != 0) {
DEBUGMSG(ZONE_INIT || ZONE_ERRORS,(DBGTEXT("FATFS!AllocBufferPool: cblkBuf(%d) is not a power of two, failing...\n"), cblkBuf));
goto error;
}
// See if it makes sense to allocate a large chunk of virtual
// memory and carve it up (ie, if the total size of the buffer
// pool is a perfect number of pages).
ASSERT(pbBufCarve == NULL);
if ((cbTotal = DEF_BUFFERS * cbBuf) % si.dwPageSize == 0) {
pbBufCarve = VirtualAlloc(0,
cbTotal,
MEM_RESERVE|MEM_COMMIT,
//MEM_RESERVE|MEM_MAPPED,
PAGE_READWRITE
//PAGE_NOACCESS
);
if (pbBufCarve) {
DEBUGALLOC(cbTotal);
pbCarve = pbBufCarve;
}
}
}
while (cbufTotal < DEF_BUFFERS) {
PBUF pbuf;
if (!(pbuf = NewBuffer(cbBuf, &pbCarve)))
break;
cbufTotal++;
AddItem((PDLINK)&dlBufMRU, (PDLINK)&pbuf->b_dlink);
}
if (cbufTotal < MIN_BUFFERS) {
error:
FreeBufferPool(pvol);
LeaveCriticalSection(&csBuffers);
return FALSE;
}
// Since we have at least MIN_BUFFERS buffers now, calculate
// the number of threads our buffer pool can handle simultaneously.
// If DEMAND_PAGING is enabled, we reduce that number by 1 so that
// a Pagein thread can always assume there are adequate buffers.
#ifdef DEMAND_PAGING
cBufThreads = cBufThreadsOrig = cbufTotal / MIN_BUFFERS - 1;
#else
cBufThreads = cBufThreadsOrig = cbufTotal / MIN_BUFFERS;
#endif
}
}
// For all calls (first time or not), we must invalidate any buffers
// we retained for an unmounted volume that was subsequently *recycled*
// instead of *remounted*.
if (pvol->v_flags & VOLF_RECYCLED)
InvalidateBufferSet(pvol, TRUE);
LeaveCriticalSection(&csBuffers);
return TRUE;
}
/* FreeBufferPool - free all buffers in buffer pool
*
* ENTRY
* pvol - pointer to VOLUME currently being unmounted
*
* EXIT
* TRUE if volume can be freed, or FALSE if not (ie, there are still
* dirty buffers that could not be committed). Note that if the volume
* wasn't going to be freed to begin with, we always return FALSE.
*
* This module keeps track of the number of buffer clients (volumes),
* and only when the last client has called FreeBufferPool will the pool
* actually be freed (subject to all buffers being clean, of course).
*/
BOOL FreeBufferPool(PVOLUME pvol)
{
PBUF pbuf, pbufEnd;
BOOL fFree = TRUE;
BOOL fFreeCarve = FALSE;
ASSERT(OWNCRITICALSECTION(&pvol->v_cs));
// If the current volume isn't even using the buffer pool (yet),
// then we're done.
if (!(pvol->v_flags & VOLF_BUFFERED))
return fFree;
EnterCriticalSection(&csBuffers);
// This is our last chance to commit all dirty buffers. With any
// luck, this will clean all the buffers, allowing us to free them all.
// That will be our assumption at least. If dirty buffers still remain
// for this volume, its DIRTY bit will get set again, below.
CommitVolumeBuffers(pvol);
pvol->v_flags &= ~VOLF_DIRTY;
// If we have buffers carved from a single chunk of memory, then
// quickly walk the entire list and determine if ALL the carved buffers
// can be freed. A single dirty or held carved buffer means that NONE
// of the carved buffers can be freed.
pbuf = dlBufMRU.pbufNext;
pbufEnd = (PBUF)&dlBufMRU;
if (pbBufCarve) {
if (cBufVolumes == 1) {
fFreeCarve = TRUE;
if (!(pvol->v_flags & VOLF_CLOSING)) {
while (pbuf != pbufEnd) {
if ((pbuf->b_flags & BUF_DIRTY) || HeldBuffer(pbuf)) {
DEBUGMSG(TRUE,(DBGTEXT("FATFS!FreeBufferPool: dirty/held buffer for block %d!\n"), pbuf->b_blk));
if (pbuf->b_flags & BUF_CARVED) {
fFreeCarve = fFree = FALSE;
#ifndef DEBUG
break; // in debug builds, we like to see all the dirty buffers
#endif
}
}
pbuf = pbuf->b_dlink.pbufNext;
}
}
}
if (fFreeCarve) {
DEBUGFREE(DEF_BUFFERS * cbBuf);
VERIFYTRUE(VirtualFree(pbBufCarve, 0, MEM_RELEASE));
pbBufCarve = NULL;
}
}
// Now walk the buffer list again, freeing every buffer that we can.
pbuf = dlBufMRU.pbufNext;
while (pbuf != pbufEnd) {
BOOL fClean = !(pbuf->b_flags & BUF_DIRTY) && !HeldBuffer(pbuf);
if (pvol->v_flags & VOLF_CLOSING) {
// The only reason we hold the buffer is so that CleanBuffer
// won't generate a bogus assertion. We don't care about the
// data anymore, so it is otherwise pointless.
HoldBuffer(pbuf);
CleanBuffer(pbuf);
UnholdBuffer(pbuf);
fClean = TRUE;
}
if (cBufVolumes > 1) {
// If there are still other clients (ie, volumes), then all we
// want to do is remove any buffer references to this volume
// if it's being destroyed, so that we avoid false matches
// if another volume with the same address gets allocated later.
if (pbuf->b_pvol == pvol) {
if (fClean) {
pbuf->b_pvol = NULL;
} else {
pvol->v_flags |= VOLF_DIRTY;
fFree = FALSE;
}
}
}
else {
ASSERT(pbuf->b_pvol == NULL || pbuf->b_pvol == pvol);
if (fClean) {
pbuf->b_pvol = NULL;
// The current buffer is clean. Now, if this is NOT a carved
// buffer, or it is but we can free all the carved buffers anyway,
// then free the current buffer.
if (!(pbuf->b_flags & BUF_CARVED) || fFreeCarve) {
cbufTotal--;
if (!(pbuf->b_flags & BUF_CARVED)) {
DEBUGFREE(cbBuf);
if (flBuf & GBUF_LOCALALLOC)
VERIFYNULL(LocalFree((HLOCAL)pbuf->b_pdata));
else
VERIFYTRUE(VirtualFree(pbuf->b_pdata, 0, MEM_RELEASE));
}
RemoveItem((PDLINK)&pbuf->b_dlink);
DEBUGFREE(sizeof(BUF));
VERIFYNULL(LocalFree((HLOCAL)pbuf));
pbuf = dlBufMRU.pbufNext;
continue;
}
}
else {
ASSERT(pbuf->b_pvol == pvol);
pvol->v_flags |= VOLF_DIRTY;
fFree = FALSE;
}
}
pbuf = pbuf->b_dlink.pbufNext;
}
//DEBUGMSG(cbufTotal, (DBGTEXT("FATFS!FreeBufferPool: %d buffers retained\n"), cbufTotal));
// If there were no dirty buffers for this volume, then we can remove
// its reference to buffer pool.
if (!(pvol->v_flags & VOLF_DIRTY)) {
pvol->v_flags &= ~VOLF_BUFFERED;
cBufVolumes--;
}
LeaveCriticalSection(&csBuffers);
return fFree;
}
/* ModifyBuffer - Prepare to dirty buffer
*
* ENTRY
* pbuf - pointer to buffer
* pMod - pointer to modification
* cbMod - length of modification (ZERO to prevent logging)
*
* EXIT
* ERROR_SUCCESS if the buffer is allowed to be modified, otherwise
* some error code (eg, ERROR_WRITE_PROTECT).
*/
DWORD ModifyBuffer(PBUF pbuf, PVOID pMod, int cbMod)
{
DWORD dwError = ERROR_SUCCESS;
ASSERTHELDBUFFER(pbuf);
if (pbuf->b_pvol->v_flags & VOLF_READONLY) {
dwError = ERROR_WRITE_PROTECT;
}
if (!dwError)
DirtyBuffer(pbuf);
return dwError;
}
/* DirtyBuffer - Dirty a buffer
*
* ENTRY
* pbuf - pointer to BUF
*
* EXIT
* The buffer is marked DIRTY.
*/
void DirtyBuffer(PBUF pbuf)
{
ASSERTHELDBUFFER(pbuf);
pbuf->b_flags |= BUF_DIRTY;
}
/* DirtyBufferError - Dirty a buffer that cannot be cleaned
*
* ENTRY
* pbuf - pointer to BUF
* pMod - pointer to modification (NULL if none)
* cbMod - length of modification (ZERO if none)
*
* EXIT
* The buffer is marked DIRTY, and any error is recorded.
*/
void DirtyBufferError(PBUF pbuf, PVOID pMod, int cbMod)
{
ASSERTHELDBUFFER(pbuf);
pbuf->b_flags |= BUF_DIRTY;
EnterCriticalSection(&csBuffers);
if (!(pbuf->b_flags & BUF_ERROR)) {
if (cbufError % MIN_BUFFERS == 0)
InterlockedDecrement(&cBufThreads);
cbufError++;
pbuf->b_flags |= BUF_ERROR;
}
LeaveCriticalSection(&csBuffers);
}
/* CommitBuffer - Commit a dirty buffer
*
* ENTRY
* pbuf - pointer to BUF
* fCS - TRUE if csBuffers is held, FALSE if not
*
* EXIT
* ERROR_SUCCESS (0) if successful, non-zero if not.
*
* NOTES
* Since we clear BUF_DIRTY before calling WriteVolume, it's important
* we do something to prevent FindBuffer from getting any bright ideas
* about reusing it before we have finished writing the original dirty
* contents. Furthermore, since we don't want to hold csBuffers across
* the potentially lengthy WriteVolume request, applying another hold to
* the buffer is really our only option.
*/
DWORD CommitBuffer(PBUF pbuf, BOOL fCS)
{
PBYTE pbMod = NULL;
int cbMod = 0;
DWORD dwError = ERROR_SUCCESS;
#ifdef UNDER_CE ASSERT(fCS == OWNCRITICALSECTION(&csBuffers));
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -