?? store_dir_diskd.c
字號:
if (pid < 0) fatalf("execl: %s", Config.Program.diskd); fd_note(diskdinfo->rfd, "diskd -> squid health monitor"); fd_note(diskdinfo->wfd, "squid -> diskd health monitor"); commSetSelect(diskdinfo->rfd, COMM_SELECT_READ, diskdExited, NULL, 0); storeDiskdDirInitBitmap(sd); if (storeDiskdDirVerifyCacheDirs(sd) < 0) fatal(errmsg); storeDiskdDirOpenSwapLog(sd); storeDiskdDirRebuild(sd); if (!started_clean_event) { eventAdd("storeDirClean", storeDiskdDirCleanEvent, NULL, 15.0, 1); started_clean_event = 1; } (void) storeDirGetBlkSize(sd->path, &sd->fs.blksize); comm_quick_poll_required();}static voidstoreDiskdStats(StoreEntry * sentry){ storeAppendPrintf(sentry, "sent_count: %d\n", diskd_stats.sent_count); storeAppendPrintf(sentry, "recv_count: %d\n", diskd_stats.recv_count); storeAppendPrintf(sentry, "max_away: %d\n", diskd_stats.max_away); storeAppendPrintf(sentry, "max_shmuse: %d\n", diskd_stats.max_shmuse); storeAppendPrintf(sentry, "open_fail_queue_len: %d\n", diskd_stats.open_fail_queue_len); storeAppendPrintf(sentry, "block_queue_len: %d\n", diskd_stats.block_queue_len); diskd_stats.max_away = diskd_stats.max_shmuse = 0; storeAppendPrintf(sentry, "\n OPS SUCCESS FAIL\n"); storeAppendPrintf(sentry, "%7s %9d %9d %7d\n", "open", diskd_stats.open.ops, diskd_stats.open.success, diskd_stats.open.fail); storeAppendPrintf(sentry, "%7s %9d %9d %7d\n", "create", diskd_stats.create.ops, diskd_stats.create.success, diskd_stats.create.fail); storeAppendPrintf(sentry, "%7s %9d %9d %7d\n", "close", diskd_stats.close.ops, diskd_stats.close.success, diskd_stats.close.fail); storeAppendPrintf(sentry, "%7s %9d %9d %7d\n", "unlink", diskd_stats.unlink.ops, diskd_stats.unlink.success, diskd_stats.unlink.fail); storeAppendPrintf(sentry, "%7s %9d %9d %7d\n", "read", diskd_stats.read.ops, diskd_stats.read.success, diskd_stats.read.fail); storeAppendPrintf(sentry, "%7s %9d %9d %7d\n", "write", diskd_stats.write.ops, diskd_stats.write.success, diskd_stats.write.fail);}/* * storeDiskdDirSync * * Sync any pending data. We just sit around and read the queue * until the data has finished writing. */static voidstoreDiskdDirSync(SwapDir * SD){ static time_t lastmsg = 0; diskdinfo_t *diskdinfo = SD->fsdata; while (diskdinfo->away > 0) { if (squid_curtime > lastmsg) { debug(47, 1) ("storeDiskdDirSync: %d messages away\n", diskdinfo->away); lastmsg = squid_curtime; } storeDiskdDirCallback(SD); }}/* * storeDiskdDirCallback * * Handle callbacks. If we have more than magic2 requests away, we block * until the queue is below magic2. Otherwise, we simply return when we * don't get a message. */intstoreDiskdDirCallback(SwapDir * SD){ diomsg M; int x; diskdinfo_t *diskdinfo = SD->fsdata; int retval = 0; if (diskdinfo->away >= diskdinfo->magic2) { diskd_stats.block_queue_len++; retval = 1; /* We might not have anything to do, but our queue * is full.. */ } if (diskd_stats.sent_count - diskd_stats.recv_count > diskd_stats.max_away) { diskd_stats.max_away = diskd_stats.sent_count - diskd_stats.recv_count; } while (1) { memset(&M, '\0', sizeof(M)); x = msgrcv(diskdinfo->rmsgid, &M, msg_snd_rcv_sz, 0, IPC_NOWAIT); if (x < 0) break; else if (x != msg_snd_rcv_sz) { debug(79, 1) ("storeDiskdDirCallback: msgget returns %d\n", x); break; } diskd_stats.recv_count++; diskdinfo->away--; storeDiskdHandle(&M); retval = 1; /* Return that we've actually done some work */ if (M.shm_offset > -1) storeDiskdShmPut(SD, M.shm_offset); } return retval;}static voidstoreDiskdDirRebuildComplete(RebuildState * rb){ if (rb->log) { debug(47, 1) ("Done reading %s swaplog (%d entries)\n", rb->sd->path, rb->n_read); fclose(rb->log); rb->log = NULL; } else { debug(47, 1) ("Done scanning %s (%d entries)\n", rb->sd->path, rb->counts.scancount); } store_dirs_rebuilding--; storeDiskdDirCloseTmpSwapLog(rb->sd); storeRebuildComplete(&rb->counts); cbdataFree(rb);}static voidstoreDiskdDirRebuildFromDirectory(void *data){ RebuildState *rb = data; SwapDir *SD = rb->sd; LOCAL_ARRAY(char, hdr_buf, SM_PAGE_SIZE); StoreEntry *e = NULL; StoreEntry tmpe; cache_key key[MD5_DIGEST_CHARS]; sfileno filn = 0; int count; int size; struct stat sb; int swap_hdr_len; int fd = -1; tlv *tlv_list; tlv *t; assert(rb != NULL); debug(20, 3) ("storeDiskdDirRebuildFromDirectory: DIR #%d\n", rb->sd->index); for (count = 0; count < rb->speed; count++) { assert(fd == -1); fd = storeDiskdDirGetNextFile(rb, &filn, &size); if (fd == -2) { storeDiskdDirRebuildComplete(rb); return; } else if (fd < 0) { continue; } assert(fd > -1); /* lets get file stats here */ if (fstat(fd, &sb) < 0) { debug(20, 1) ("storeDiskdDirRebuildFromDirectory: fstat(FD %d): %s\n", fd, xstrerror()); file_close(fd); store_open_disk_fd--; fd = -1; continue; } if ((++rb->counts.scancount & 0xFFFF) == 0) debug(20, 3) (" %s %7d files opened so far.\n", rb->sd->path, rb->counts.scancount); debug(20, 9) ("file_in: fd=%d %08X\n", fd, filn); statCounter.syscalls.disk.reads++; if (FD_READ_METHOD(fd, hdr_buf, SM_PAGE_SIZE) < 0) { debug(20, 1) ("storeDiskdDirRebuildFromDirectory: read(FD %d): %s\n", fd, xstrerror()); file_close(fd); store_open_disk_fd--; fd = -1; continue; } file_close(fd); store_open_disk_fd--; fd = -1; swap_hdr_len = 0;#if USE_TRUNCATE if (sb.st_size == 0) continue;#endif tlv_list = storeSwapMetaUnpack(hdr_buf, &swap_hdr_len); if (tlv_list == NULL) { debug(20, 1) ("storeDiskdDirRebuildFromDirectory: failed to get meta data\n"); /* XXX shouldn't this be a call to storeDiskdUnlink ? */ storeDiskdDirUnlinkFile(SD, filn); continue; } debug(20, 3) ("storeDiskdDirRebuildFromDirectory: successful swap meta unpacking\n"); memset(key, '\0', MD5_DIGEST_CHARS); memset(&tmpe, '\0', sizeof(StoreEntry)); for (t = tlv_list; t; t = t->next) { switch (t->type) { case STORE_META_KEY: assert(t->length == MD5_DIGEST_CHARS); xmemcpy(key, t->value, MD5_DIGEST_CHARS); break;#if SIZEOF_SQUID_FILE_SZ == SIZEOF_SIZE_T case STORE_META_STD: assert(t->length == STORE_HDR_METASIZE); xmemcpy(&tmpe.timestamp, t->value, STORE_HDR_METASIZE); break;#else case STORE_META_STD_LFS: assert(t->length == STORE_HDR_METASIZE); xmemcpy(&tmpe.timestamp, t->value, STORE_HDR_METASIZE); break; case STORE_META_STD: assert(t->length == STORE_HDR_METASIZE_OLD); { struct { time_t timestamp; time_t lastref; time_t expires; time_t lastmod; size_t swap_file_sz; u_short refcount; u_short flags; } *tmp = t->value; assert(sizeof(*tmp) == STORE_HDR_METASIZE_OLD); tmpe.timestamp = tmp->timestamp; tmpe.lastref = tmp->lastref; tmpe.expires = tmp->expires; tmpe.lastmod = tmp->lastmod; tmpe.swap_file_sz = tmp->swap_file_sz; tmpe.refcount = tmp->refcount; tmpe.flags = tmp->flags; } break;#endif default: break; } } storeSwapTLVFree(tlv_list); tlv_list = NULL; if (storeKeyNull(key)) { debug(20, 1) ("storeDiskdDirRebuildFromDirectory: NULL key\n"); storeDiskdDirUnlinkFile(SD, filn); continue; } tmpe.hash.key = key; /* check sizes */ if (tmpe.swap_file_sz == 0) { tmpe.swap_file_sz = sb.st_size; } else if (tmpe.swap_file_sz == sb.st_size - swap_hdr_len) { tmpe.swap_file_sz = sb.st_size; } else if (tmpe.swap_file_sz != sb.st_size) { debug(20, 1) ("storeDiskdDirRebuildFromDirectory: SIZE MISMATCH %ld!=%ld\n", (long int) tmpe.swap_file_sz, (long int) sb.st_size); storeDiskdDirUnlinkFile(SD, filn); continue; } if (EBIT_TEST(tmpe.flags, KEY_PRIVATE)) { storeDiskdDirUnlinkFile(SD, filn); rb->counts.badflags++; continue; } e = storeGet(key); if (e && e->lastref >= tmpe.lastref) { /* key already exists, current entry is newer */ /* keep old, ignore new */ rb->counts.dupcount++; continue; } else if (NULL != e) { /* URL already exists, this swapfile not being used */ /* junk old, load new */ storeRelease(e); /* release old entry */ rb->counts.dupcount++; } rb->counts.objcount++; storeEntryDump(&tmpe, 5); e = storeDiskdDirAddDiskRestore(SD, key, filn, tmpe.swap_file_sz, tmpe.expires, tmpe.timestamp, tmpe.lastref, tmpe.lastmod, tmpe.refcount, /* refcount */ tmpe.flags, /* flags */ (int) rb->flags.clean); storeDirSwapLog(e, SWAP_LOG_ADD); } eventAdd("storeRebuild", storeDiskdDirRebuildFromDirectory, rb, 0.0, 1);}static voidstoreDiskdDirRebuildFromSwapLog(void *data){ RebuildState *rb = data; SwapDir *SD = rb->sd; StoreEntry *e = NULL; storeSwapLogData s; size_t ss = sizeof(storeSwapLogData); int count; int used; /* is swapfile already in use? */ int disk_entry_newer; /* is the log entry newer than current entry? */ double x; assert(rb != NULL); /* load a number of objects per invocation */ for (count = 0; count < rb->speed; count++) { if (fread(&s, ss, 1, rb->log) != 1) { storeDiskdDirRebuildComplete(rb); return; } rb->n_read++; /* * BC: during 2.4 development, we changed the way swap file * numbers are assigned and stored. The high 16 bits used * to encode the SD index number. There used to be a call * to storeDirProperFileno here that re-assigned the index * bits. Now, for backwards compatibility, we just need * to mask it off. */ s.swap_filen &= 0x00FFFFFF; debug(20, 3) ("storeDiskdDirRebuildFromSwapLog: %s %s %08X\n", swap_log_op_str[(int) s.op], storeKeyText(s.key), s.swap_filen); if (s.op == SWAP_LOG_ADD) { /* * Here we have some special checks for large files. * I've been seeing a system crash followed by a reboot * that seems to corrupt the swap log. Squid believes * that the disk holds some really large files. It * complains about using being over the high water mark * and proceeds to delete files as fast as it can. To * prevent that, we call stat() on sufficiently large * files (>128KB) and reject those that are missing or * have the wrong size. */ struct stat sb; char *p = storeDiskdDirFullPath(SD, s.swap_filen, NULL); if (s.swap_file_sz < (1 << 17)) { (void) 0; } else if (stat(p, &sb) < 0) { debug(47, 2) ("its missing!: %s\n", p); continue; } else if (sb.st_size != s.swap_file_sz) { debug(47, 2) ("size mismatch!: stat=%d, log=%d\n", (int) sb.st_size, (int) s.swap_file_sz); continue; } else { debug(47, 2) ("big file (%d bytes) checks out\n", (int) s.swap_file_sz); } } else if (s.op == SWAP_LOG_DEL) { /* Delete unless we already have a newer copy */ if ((e = storeGet(s.key)) != NULL && s.lastref >= e->lastref) { /* * Make sure we don't unlink the file, it might be * in use by a subsequent entry. Also note that * we don't have to subtract from store_swap_size * because adding to store_swap_size happens in * the cleanup procedure. */ storeRecycle(e); rb->counts.cancelcount++; } continue; } else { x = log(++rb->counts.bad_log_op) / log(10.0); if (0.0 == x - (double) (int) x) debug(20, 1) ("WARNING: %d invalid swap log entries found\n", rb->counts.bad_log_op); rb->counts.invalid++; continue; } if ((++rb->counts.scancount & 0xFFF) == 0) { struct stat sb; if (0 == fstat(fileno(rb->log), &sb)) storeRebuildProgress(SD->index, (int) sb.st_size / ss, rb->n_read); } if (!storeDiskdDirValidFileno(SD, s.swap_filen, 0)) { rb->counts.invalid++; continue; } if (EBIT_TEST(s.flags, KEY_PRIVATE)) { rb->counts.badflags++; continue; } e = storeGet(s.key); used = storeDiskdDirMapBitTest(SD, s.swap_filen); /* If this URL already exists in the cache, does the swap log * appear to have a newer entry? Compare 'lastref' from the * swap log to e->lastref. */ disk_entry_newer = e ? (s.lastref > e->lastref ? 1 : 0) : 0; if (used && !disk_entry_newer) { /* log entry is old, ignore it */ rb->counts.clashcount++; continue; } else if (used && e && e->swap_filen == s.swap_filen && e->swap_dirn == SD->index) { /* swapfile taken, same URL, newer, update meta */ if (e->store_status == STORE_OK) { e->lastref = s.timestamp; e->timestamp = s.timestamp; e->expires = s.expires; e->lastmod = s.lastmod; e->flags = s.flags; e->refcount += s.refcount; storeDiskdDirUnrefObj(SD, e); } else { debug_trap("storeDiskdDirRebuildFromSwapLog: bad condition"); debug(20, 1) ("\tSee %s:%d\n", __FILE__, __LINE__); } continue; } else if (used) { /* swapfile in use, not by this URL, log entry is newer */ /* This is sorta bad: the log entry should NOT be newer at this * point. If the log is dirty, the filesize check should have * caught this. If the log is clean, there should never be a * newer entry. */ debug(20, 1) ("WARNING: newer swaplog entry for dirno %d, fileno %08X\n", SD->index, s.swap_filen); /* I'm tempted to remove the swapfile here just to be safe, * but there is a bad race condition in the NOVM version if * the swapfile has recently been opened for writing, but * not yet opened for reading. Because we can't map * swapfiles back to StoreEntrys, we don't know the state * of the entry using that file. */ /* We'll assume the existing entry is valid, probably because * the swap file number got taken while we rebuild */ rb->counts.clashcount++; continue;
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -