亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? store_dir_diskd.c

?? 代理服務器 squid-2.6.STABLE16
?? C
?? 第 1 頁 / 共 5 頁
字號:
    if (pid < 0)	fatalf("execl: %s", Config.Program.diskd);    fd_note(diskdinfo->rfd, "diskd -> squid health monitor");    fd_note(diskdinfo->wfd, "squid -> diskd health monitor");    commSetSelect(diskdinfo->rfd, COMM_SELECT_READ, diskdExited, NULL, 0);    storeDiskdDirInitBitmap(sd);    if (storeDiskdDirVerifyCacheDirs(sd) < 0)	fatal(errmsg);    storeDiskdDirOpenSwapLog(sd);    storeDiskdDirRebuild(sd);    if (!started_clean_event) {	eventAdd("storeDirClean", storeDiskdDirCleanEvent, NULL, 15.0, 1);	started_clean_event = 1;    }    (void) storeDirGetBlkSize(sd->path, &sd->fs.blksize);    comm_quick_poll_required();}static voidstoreDiskdStats(StoreEntry * sentry){    storeAppendPrintf(sentry, "sent_count: %d\n", diskd_stats.sent_count);    storeAppendPrintf(sentry, "recv_count: %d\n", diskd_stats.recv_count);    storeAppendPrintf(sentry, "max_away: %d\n", diskd_stats.max_away);    storeAppendPrintf(sentry, "max_shmuse: %d\n", diskd_stats.max_shmuse);    storeAppendPrintf(sentry, "open_fail_queue_len: %d\n", diskd_stats.open_fail_queue_len);    storeAppendPrintf(sentry, "block_queue_len: %d\n", diskd_stats.block_queue_len);    diskd_stats.max_away = diskd_stats.max_shmuse = 0;    storeAppendPrintf(sentry, "\n              OPS   SUCCESS    FAIL\n");    storeAppendPrintf(sentry, "%7s %9d %9d %7d\n",	"open", diskd_stats.open.ops, diskd_stats.open.success, diskd_stats.open.fail);    storeAppendPrintf(sentry, "%7s %9d %9d %7d\n",	"create", diskd_stats.create.ops, diskd_stats.create.success, diskd_stats.create.fail);    storeAppendPrintf(sentry, "%7s %9d %9d %7d\n",	"close", diskd_stats.close.ops, diskd_stats.close.success, diskd_stats.close.fail);    storeAppendPrintf(sentry, "%7s %9d %9d %7d\n",	"unlink", diskd_stats.unlink.ops, diskd_stats.unlink.success, diskd_stats.unlink.fail);    storeAppendPrintf(sentry, "%7s %9d %9d %7d\n",	"read", diskd_stats.read.ops, diskd_stats.read.success, diskd_stats.read.fail);    storeAppendPrintf(sentry, "%7s %9d %9d %7d\n",	"write", diskd_stats.write.ops, diskd_stats.write.success, diskd_stats.write.fail);}/* * storeDiskdDirSync * * Sync any pending data. We just sit around and read the queue * until the data has finished writing. */static voidstoreDiskdDirSync(SwapDir * SD){    static time_t lastmsg = 0;    diskdinfo_t *diskdinfo = SD->fsdata;    while (diskdinfo->away > 0) {	if (squid_curtime > lastmsg) {	    debug(47, 1) ("storeDiskdDirSync: %d messages away\n",		diskdinfo->away);	    lastmsg = squid_curtime;	}	storeDiskdDirCallback(SD);    }}/* * storeDiskdDirCallback * * Handle callbacks. If we have more than magic2 requests away, we block * until the queue is below magic2. Otherwise, we simply return when we * don't get a message. */intstoreDiskdDirCallback(SwapDir * SD){    diomsg M;    int x;    diskdinfo_t *diskdinfo = SD->fsdata;    int retval = 0;    if (diskdinfo->away >= diskdinfo->magic2) {	diskd_stats.block_queue_len++;	retval = 1;		/* We might not have anything to do, but our queue				 * is full.. */    }    if (diskd_stats.sent_count - diskd_stats.recv_count >	diskd_stats.max_away) {	diskd_stats.max_away = diskd_stats.sent_count - diskd_stats.recv_count;    }    while (1) {	memset(&M, '\0', sizeof(M));	x = msgrcv(diskdinfo->rmsgid, &M, msg_snd_rcv_sz, 0, IPC_NOWAIT);	if (x < 0)	    break;	else if (x != msg_snd_rcv_sz) {	    debug(79, 1) ("storeDiskdDirCallback: msgget returns %d\n",		x);	    break;	}	diskd_stats.recv_count++;	diskdinfo->away--;	storeDiskdHandle(&M);	retval = 1;		/* Return that we've actually done some work */	if (M.shm_offset > -1)	    storeDiskdShmPut(SD, M.shm_offset);    }    return retval;}static voidstoreDiskdDirRebuildComplete(RebuildState * rb){    if (rb->log) {	debug(47, 1) ("Done reading %s swaplog (%d entries)\n",	    rb->sd->path, rb->n_read);	fclose(rb->log);	rb->log = NULL;    } else {	debug(47, 1) ("Done scanning %s (%d entries)\n",	    rb->sd->path, rb->counts.scancount);    }    store_dirs_rebuilding--;    storeDiskdDirCloseTmpSwapLog(rb->sd);    storeRebuildComplete(&rb->counts);    cbdataFree(rb);}static voidstoreDiskdDirRebuildFromDirectory(void *data){    RebuildState *rb = data;    SwapDir *SD = rb->sd;    LOCAL_ARRAY(char, hdr_buf, SM_PAGE_SIZE);    StoreEntry *e = NULL;    StoreEntry tmpe;    cache_key key[MD5_DIGEST_CHARS];    sfileno filn = 0;    int count;    int size;    struct stat sb;    int swap_hdr_len;    int fd = -1;    tlv *tlv_list;    tlv *t;    assert(rb != NULL);    debug(20, 3) ("storeDiskdDirRebuildFromDirectory: DIR #%d\n", rb->sd->index);    for (count = 0; count < rb->speed; count++) {	assert(fd == -1);	fd = storeDiskdDirGetNextFile(rb, &filn, &size);	if (fd == -2) {	    storeDiskdDirRebuildComplete(rb);	    return;	} else if (fd < 0) {	    continue;	}	assert(fd > -1);	/* lets get file stats here */	if (fstat(fd, &sb) < 0) {	    debug(20, 1) ("storeDiskdDirRebuildFromDirectory: fstat(FD %d): %s\n",		fd, xstrerror());	    file_close(fd);	    store_open_disk_fd--;	    fd = -1;	    continue;	}	if ((++rb->counts.scancount & 0xFFFF) == 0)	    debug(20, 3) ("  %s %7d files opened so far.\n",		rb->sd->path, rb->counts.scancount);	debug(20, 9) ("file_in: fd=%d %08X\n", fd, filn);	statCounter.syscalls.disk.reads++;	if (FD_READ_METHOD(fd, hdr_buf, SM_PAGE_SIZE) < 0) {	    debug(20, 1) ("storeDiskdDirRebuildFromDirectory: read(FD %d): %s\n",		fd, xstrerror());	    file_close(fd);	    store_open_disk_fd--;	    fd = -1;	    continue;	}	file_close(fd);	store_open_disk_fd--;	fd = -1;	swap_hdr_len = 0;#if USE_TRUNCATE	if (sb.st_size == 0)	    continue;#endif	tlv_list = storeSwapMetaUnpack(hdr_buf, &swap_hdr_len);	if (tlv_list == NULL) {	    debug(20, 1) ("storeDiskdDirRebuildFromDirectory: failed to get meta data\n");	    /* XXX shouldn't this be a call to storeDiskdUnlink ? */	    storeDiskdDirUnlinkFile(SD, filn);	    continue;	}	debug(20, 3) ("storeDiskdDirRebuildFromDirectory: successful swap meta unpacking\n");	memset(key, '\0', MD5_DIGEST_CHARS);	memset(&tmpe, '\0', sizeof(StoreEntry));	for (t = tlv_list; t; t = t->next) {	    switch (t->type) {	    case STORE_META_KEY:		assert(t->length == MD5_DIGEST_CHARS);		xmemcpy(key, t->value, MD5_DIGEST_CHARS);		break;#if SIZEOF_SQUID_FILE_SZ == SIZEOF_SIZE_T	    case STORE_META_STD:		assert(t->length == STORE_HDR_METASIZE);		xmemcpy(&tmpe.timestamp, t->value, STORE_HDR_METASIZE);		break;#else	    case STORE_META_STD_LFS:		assert(t->length == STORE_HDR_METASIZE);		xmemcpy(&tmpe.timestamp, t->value, STORE_HDR_METASIZE);		break;	    case STORE_META_STD:		assert(t->length == STORE_HDR_METASIZE_OLD);		{		    struct {			time_t timestamp;			time_t lastref;			time_t expires;			time_t lastmod;			size_t swap_file_sz;			u_short refcount;			u_short flags;		    }     *tmp = t->value;		    assert(sizeof(*tmp) == STORE_HDR_METASIZE_OLD);		    tmpe.timestamp = tmp->timestamp;		    tmpe.lastref = tmp->lastref;		    tmpe.expires = tmp->expires;		    tmpe.lastmod = tmp->lastmod;		    tmpe.swap_file_sz = tmp->swap_file_sz;		    tmpe.refcount = tmp->refcount;		    tmpe.flags = tmp->flags;		}		break;#endif	    default:		break;	    }	}	storeSwapTLVFree(tlv_list);	tlv_list = NULL;	if (storeKeyNull(key)) {	    debug(20, 1) ("storeDiskdDirRebuildFromDirectory: NULL key\n");	    storeDiskdDirUnlinkFile(SD, filn);	    continue;	}	tmpe.hash.key = key;	/* check sizes */	if (tmpe.swap_file_sz == 0) {	    tmpe.swap_file_sz = sb.st_size;	} else if (tmpe.swap_file_sz == sb.st_size - swap_hdr_len) {	    tmpe.swap_file_sz = sb.st_size;	} else if (tmpe.swap_file_sz != sb.st_size) {	    debug(20, 1) ("storeDiskdDirRebuildFromDirectory: SIZE MISMATCH %ld!=%ld\n",		(long int) tmpe.swap_file_sz, (long int) sb.st_size);	    storeDiskdDirUnlinkFile(SD, filn);	    continue;	}	if (EBIT_TEST(tmpe.flags, KEY_PRIVATE)) {	    storeDiskdDirUnlinkFile(SD, filn);	    rb->counts.badflags++;	    continue;	}	e = storeGet(key);	if (e && e->lastref >= tmpe.lastref) {	    /* key already exists, current entry is newer */	    /* keep old, ignore new */	    rb->counts.dupcount++;	    continue;	} else if (NULL != e) {	    /* URL already exists, this swapfile not being used */	    /* junk old, load new */	    storeRelease(e);	/* release old entry */	    rb->counts.dupcount++;	}	rb->counts.objcount++;	storeEntryDump(&tmpe, 5);	e = storeDiskdDirAddDiskRestore(SD, key,	    filn,	    tmpe.swap_file_sz,	    tmpe.expires,	    tmpe.timestamp,	    tmpe.lastref,	    tmpe.lastmod,	    tmpe.refcount,	/* refcount */	    tmpe.flags,		/* flags */	    (int) rb->flags.clean);	storeDirSwapLog(e, SWAP_LOG_ADD);    }    eventAdd("storeRebuild", storeDiskdDirRebuildFromDirectory, rb, 0.0, 1);}static voidstoreDiskdDirRebuildFromSwapLog(void *data){    RebuildState *rb = data;    SwapDir *SD = rb->sd;    StoreEntry *e = NULL;    storeSwapLogData s;    size_t ss = sizeof(storeSwapLogData);    int count;    int used;			/* is swapfile already in use? */    int disk_entry_newer;	/* is the log entry newer than current entry? */    double x;    assert(rb != NULL);    /* load a number of objects per invocation */    for (count = 0; count < rb->speed; count++) {	if (fread(&s, ss, 1, rb->log) != 1) {	    storeDiskdDirRebuildComplete(rb);	    return;	}	rb->n_read++;	/*	 * BC: during 2.4 development, we changed the way swap file	 * numbers are assigned and stored.  The high 16 bits used	 * to encode the SD index number.  There used to be a call	 * to storeDirProperFileno here that re-assigned the index	 * bits.  Now, for backwards compatibility, we just need	 * to mask it off.	 */	s.swap_filen &= 0x00FFFFFF;	debug(20, 3) ("storeDiskdDirRebuildFromSwapLog: %s %s %08X\n",	    swap_log_op_str[(int) s.op],	    storeKeyText(s.key),	    s.swap_filen);	if (s.op == SWAP_LOG_ADD) {	    /*	     * Here we have some special checks for large files.	     * I've been seeing a system crash followed by a reboot	     * that seems to corrupt the swap log.  Squid believes	     * that the disk holds some really large files.  It	     * complains about using being over the high water mark	     * and proceeds to delete files as fast as it can.  To	     * prevent that, we call stat() on sufficiently large	     * files (>128KB) and reject those that are missing or	     * have the wrong size.	     */	    struct stat sb;	    char *p = storeDiskdDirFullPath(SD, s.swap_filen, NULL);	    if (s.swap_file_sz < (1 << 17)) {		(void) 0;	    } else if (stat(p, &sb) < 0) {		debug(47, 2) ("its missing!: %s\n", p);		continue;	    } else if (sb.st_size != s.swap_file_sz) {		debug(47, 2) ("size mismatch!: stat=%d, log=%d\n",		    (int) sb.st_size, (int) s.swap_file_sz);		continue;	    } else {		debug(47, 2) ("big file (%d bytes) checks out\n",		    (int) s.swap_file_sz);	    }	} else if (s.op == SWAP_LOG_DEL) {	    /* Delete unless we already have a newer copy */	    if ((e = storeGet(s.key)) != NULL && s.lastref >= e->lastref) {		/*		 * Make sure we don't unlink the file, it might be		 * in use by a subsequent entry.  Also note that		 * we don't have to subtract from store_swap_size		 * because adding to store_swap_size happens in		 * the cleanup procedure.		 */		storeRecycle(e);		rb->counts.cancelcount++;	    }	    continue;	} else {	    x = log(++rb->counts.bad_log_op) / log(10.0);	    if (0.0 == x - (double) (int) x)		debug(20, 1) ("WARNING: %d invalid swap log entries found\n",		    rb->counts.bad_log_op);	    rb->counts.invalid++;	    continue;	}	if ((++rb->counts.scancount & 0xFFF) == 0) {	    struct stat sb;	    if (0 == fstat(fileno(rb->log), &sb))		storeRebuildProgress(SD->index,		    (int) sb.st_size / ss, rb->n_read);	}	if (!storeDiskdDirValidFileno(SD, s.swap_filen, 0)) {	    rb->counts.invalid++;	    continue;	}	if (EBIT_TEST(s.flags, KEY_PRIVATE)) {	    rb->counts.badflags++;	    continue;	}	e = storeGet(s.key);	used = storeDiskdDirMapBitTest(SD, s.swap_filen);	/* If this URL already exists in the cache, does the swap log	 * appear to have a newer entry?  Compare 'lastref' from the	 * swap log to e->lastref. */	disk_entry_newer = e ? (s.lastref > e->lastref ? 1 : 0) : 0;	if (used && !disk_entry_newer) {	    /* log entry is old, ignore it */	    rb->counts.clashcount++;	    continue;	} else if (used && e && e->swap_filen == s.swap_filen && e->swap_dirn == SD->index) {	    /* swapfile taken, same URL, newer, update meta */	    if (e->store_status == STORE_OK) {		e->lastref = s.timestamp;		e->timestamp = s.timestamp;		e->expires = s.expires;		e->lastmod = s.lastmod;		e->flags = s.flags;		e->refcount += s.refcount;		storeDiskdDirUnrefObj(SD, e);	    } else {		debug_trap("storeDiskdDirRebuildFromSwapLog: bad condition");		debug(20, 1) ("\tSee %s:%d\n", __FILE__, __LINE__);	    }	    continue;	} else if (used) {	    /* swapfile in use, not by this URL, log entry is newer */	    /* This is sorta bad: the log entry should NOT be newer at this	     * point.  If the log is dirty, the filesize check should have	     * caught this.  If the log is clean, there should never be a	     * newer entry. */	    debug(20, 1) ("WARNING: newer swaplog entry for dirno %d, fileno %08X\n",		SD->index, s.swap_filen);	    /* I'm tempted to remove the swapfile here just to be safe,	     * but there is a bad race condition in the NOVM version if	     * the swapfile has recently been opened for writing, but	     * not yet opened for reading.  Because we can't map	     * swapfiles back to StoreEntrys, we don't know the state	     * of the entry using that file.  */	    /* We'll assume the existing entry is valid, probably because	     * the swap file number got taken while we rebuild */	    rb->counts.clashcount++;	    continue;

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
国产精品一二二区| 99视频有精品| 一区二区三区在线免费| 宅男在线国产精品| 91在线你懂得| 黄色成人免费在线| 婷婷国产v国产偷v亚洲高清| 欧美国产亚洲另类动漫| 欧美一区二区三区免费大片| 91蝌蚪国产九色| 国产夫妻精品视频| 久久国产精品免费| 无吗不卡中文字幕| 一区二区日韩电影| 亚洲视频狠狠干| 中文字幕精品一区二区精品绿巨人| 91精品婷婷国产综合久久| 一本久久精品一区二区| 国产91精品欧美| 麻豆精品一二三| 午夜精品久久久久久不卡8050| 亚洲欧洲国产日韩| 中文字幕的久久| 国产欧美日韩综合精品一区二区| 欧美日本在线看| 欧美天堂一区二区三区| 91女厕偷拍女厕偷拍高清| 成人午夜av电影| 成人理论电影网| 成人免费观看视频| 国产成人精品在线看| 国内成人精品2018免费看| 久久综合综合久久综合| 91香蕉视频mp4| 91免费在线播放| 91亚洲精品久久久蜜桃| 成人av网站在线观看免费| 国产成人精品免费| 成人av午夜影院| 色婷婷久久久久swag精品| 日本国产一区二区| 在线观看视频一区二区欧美日韩| 91麻豆免费看| 在线欧美日韩精品| 欧美日韩免费观看一区二区三区| 欧美视频中文一区二区三区在线观看| 一本色道久久综合亚洲91| 在线观看亚洲专区| 欧美人妖巨大在线| 日韩免费视频线观看| 久久久久久久一区| 中文字幕 久热精品 视频在线 | 欧美激情一区二区三区不卡 | 亚洲在线中文字幕| 亚洲成av人片在www色猫咪| 亚洲bt欧美bt精品| 久久精品久久99精品久久| 极品尤物av久久免费看| 国产成人综合在线| 色天使色偷偷av一区二区| 欧美日韩国产高清一区二区| 欧美一区二区黄| 亚洲国产精品av| 亚洲一区二区影院| 久久激情综合网| 国产精品1区2区| 91香蕉视频在线| 欧美一区二区私人影院日本| 久久久久久久网| 中文字幕日韩精品一区| 亚洲一区二区三区四区五区黄| 日韩av电影免费观看高清完整版在线观看| 久久精品国产一区二区三区免费看| 国产麻豆91精品| 在线精品视频小说1| 日韩视频免费观看高清完整版 | 五月天欧美精品| 国产一区二区三区黄视频| 91在线视频播放| 日韩美女视频一区二区在线观看| 国产精品青草综合久久久久99| 一区二区不卡在线播放| 久久草av在线| 91福利视频在线| 精品剧情v国产在线观看在线| 国产精品欧美久久久久无广告 | 国产乱一区二区| 日本丶国产丶欧美色综合| 精品少妇一区二区三区在线播放| 中文一区在线播放| 人人精品人人爱| 97国产一区二区| 日韩欧美国产三级电影视频| 亚洲日本va午夜在线影院| 美女视频黄 久久| 日本乱人伦一区| 国产亚洲一本大道中文在线| 午夜精品福利久久久| 成人免费视频播放| 日韩精品中文字幕一区| 一区二区三区日韩欧美| 精品影视av免费| 欧美日韩国产经典色站一区二区三区| 国产精品日韩成人| 国内久久精品视频| 欧美电影在哪看比较好| 亚洲免费观看高清完整版在线观看| 老司机一区二区| 在线不卡的av| 亚洲国产视频网站| gogo大胆日本视频一区| 26uuu成人网一区二区三区| 视频在线观看一区| 在线观看国产精品网站| 亚洲欧美在线视频观看| 国产91精品久久久久久久网曝门 | 日本一区二区三区四区在线视频| 蜜臀久久99精品久久久久久9| 91电影在线观看| 日韩美女久久久| 99视频一区二区三区| 国产精品国产自产拍高清av王其| 韩国av一区二区| 欧美精品一区二区三区久久久| 日本最新不卡在线| 欧美精品在线视频| 婷婷成人综合网| 欧美一区二区三区喷汁尤物| 日韩avvvv在线播放| 欧美一区二区私人影院日本| 日韩av中文字幕一区二区三区| 欧美视频一区在线| 亚洲一卡二卡三卡四卡无卡久久 | 中文字幕av资源一区| 国产精品一卡二| 久久久99精品免费观看不卡| 国内偷窥港台综合视频在线播放| 精品欧美一区二区久久| 精品一区二区三区免费| 精品日韩欧美在线| 国产福利视频一区二区三区| 国产三级精品视频| 成人黄色免费短视频| 亚洲欧洲日韩一区二区三区| 色婷婷亚洲综合| 亚洲bt欧美bt精品777| 欧美一区二区三区日韩| 国内一区二区视频| 国产精品视频线看| 欧美制服丝袜第一页| 亚洲国产va精品久久久不卡综合| 欧美日韩免费电影| 精品一区二区影视| 国产精品女人毛片| 欧美在线短视频| 久久精品国产精品亚洲综合| 精品国产乱码久久久久久久久 | 国产成人福利片| 亚洲图片欧美激情| 欧美日韩一区 二区 三区 久久精品| 日韩精品电影在线| 国产喷白浆一区二区三区| 成人av在线看| 亚欧色一区w666天堂| 精品99一区二区三区| 99久久免费精品高清特色大片| 一区二区三区高清在线| 欧美一区二区观看视频| 丁香六月久久综合狠狠色| 东方aⅴ免费观看久久av| 亚洲欧美国产三级| 91精品国产手机| 成人久久视频在线观看| 亚洲成人综合网站| 久久精品一区二区| 欧美亚洲国产一区二区三区va | 欧美男女性生活在线直播观看| 美女mm1313爽爽久久久蜜臀| 国产精品视频yy9299一区| 欧美日韩亚洲高清一区二区| 国产呦萝稀缺另类资源| 亚洲在线免费播放| 久久品道一品道久久精品| 欧美色综合网站| 国产成人精品影院| 日韩av中文在线观看| 国产精品盗摄一区二区三区| 欧美一级电影网站| 色狠狠桃花综合| 国产成人在线视频播放| 五月天婷婷综合| 亚洲女同ⅹxx女同tv| 337p粉嫩大胆噜噜噜噜噜91av| 日本高清不卡视频| 成人午夜电影网站| 国内久久婷婷综合| 青青草成人在线观看| 一区二区三区日韩在线观看| 国产精品久久看| 久久久久久99久久久精品网站|