亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? fetch.cc

?? linux下基于c++的處理器仿真平臺。具有處理器流水線
?? CC
?? 第 1 頁 / 共 4 頁
字號:
    // cross to the next one    Addr blockBaseAddr = icacheBlockAlignPC(xc->regs.pc);    do {#if FULL_SYSTEM	// do PC-based annotations for the *next* PC here, now that	// we've update the PC.  This lets us magically transition to	// a totally different instruction with zero overhead (e.g.,	// if the annotation modifies pc).	if (!xc->spec_mode) {	    Addr oldpc;	    do {		oldpc = xc->regs.pc;		system->pcEventQueue.service(xc);	    } while (oldpc != xc->regs.pc);	}#endif	pair<DynInst *, Fault> r = fetchOneInst(thread_number);	DynInst *inst = r.first;	Fault fault = r.second;	if (inst != NULL)	    num_fetched++;	// inst == NULL signals failure to fetch for some reason (like	// refusal to fetch a speculative uncached instruction)	if (fault != No_Fault || inst == NULL) {	    if (fault != No_Fault) {		fetch_fault_count[thread_number]++;	    }	    return make_pair(num_fetched, false);	}	xc->regs.pc = inst->Pred_PC;	// if we're entering the asynchronous interrupt handler, mark	// the first instruction as "serializing" to flush the ROB	// before dispatching it.  Otherwise we're likely to	// underestimate the overhead of entering the handler.	if (entering_interrupt) {	    inst->serializing_inst = true;	    entering_interrupt = false;	// just flag first one	}	/*	 *  Now, figure out if we need to stop fetching...	 */	// did we exceed the per-cycle instruction limit?	if (num_fetched >= max_to_fetch)	    return make_pair(num_fetched, false);	// is the fetch queue full?	if ((mt_frontend && ifq[thread_number].num_total() == ifq_size) ||	    (!mt_frontend && ifq[0].num_total() == ifq_size)) {	    floss_state.fetch_end_cause[thread_number] = FLOSS_FETCH_QFULL;	    return make_pair(num_fetched, false);	}	if (inst->isControl()) {	    branch_cnt++;	    fetched_branch[thread_number]++;	    /*  if we've exceeded our branch count, then we're  */	    /*  done...                                         */	    if (branch_cnt >= fetch_branches) {		floss_state.fetch_end_cause[thread_number] = FLOSS_FETCH_BRANCH_LIMIT;		return make_pair(num_fetched, false);	    } else if (inst->Pred_PC != inst->PC + sizeof(MachInst)) {		/*  otherwise...                                      */		/*  if this is a predicted-taken branch, discontinue  */		/*  getting instructions from this block, move on to  */		/*  the next one.                                     */		return make_pair(num_fetched, true);	    }	}	// did we fall through to the next cache line?    } while (icacheBlockAlignPC(xc->regs.pc) == blockBaseAddr);    return make_pair(num_fetched, true);}// For debugging purposesstatic Addr uncompressedBlockAddress = 0;/** * Do fetch for one thread. * * @param thread_number Thread ID to fetch from. * @param max_to_fetch Maximum number of instructions to fetch. * @return Number of instructions fetched. */intFullCPU::fetchOneThread(int thread_number, int max_to_fetch){    SpecExecContext *xc = thread[thread_number];    int fetched_this_thread = 0;    int branch_cnt = 0;    // Track fetched blocks so we don't fetch the same one twice in    // the same cycle.    // (This is relatively expensive... we should find a way to do    // without it -- Steve)    std::set<Addr> fetchedAddresses;#if FULL_SYSTEM    bool entering_interrupt = false;    // Check for interrupts here.  We may want to do this sooner in    // SMT full system (up in fetch(), before we do the thread    // selection), but for a single-threaded processor it should be OK    // here.    if (!xc->spec_mode && checkInterrupts && check_interrupts() &&	!xc->inPalMode()) {	int ipl = 0;	int summary = 0;	checkInterrupts = false;	IntReg *ipr = xc->regs.ipr;	if (xc->regs.ipr[AlphaISA::IPR_SIRR]) {	    for (int i = AlphaISA::INTLEVEL_SOFTWARE_MIN;		 i < AlphaISA::INTLEVEL_SOFTWARE_MAX; i++) {		if (ipr[AlphaISA::IPR_SIRR] & (ULL(1) << i)) {		    // See table 4-19 of 21164 hardware reference		    ipl = (i - AlphaISA::INTLEVEL_SOFTWARE_MIN) + 1;		    summary |= (ULL(1) << i);		}	    }	}	uint64_t interrupts = xc->cpu->intr_status();	for (int i = AlphaISA::INTLEVEL_EXTERNAL_MIN;	    i < AlphaISA::INTLEVEL_EXTERNAL_MAX; i++) {	    if (interrupts & (ULL(1) << i)) {		// See table 4-19 of 21164 hardware reference		ipl = i;		summary |= (ULL(1) << i);	    }	}	if (ipr[AlphaISA::IPR_ASTRR])	    panic("asynchronous traps not implemented\n");	if (ipl && ipl > xc->regs.ipr[AlphaISA::IPR_IPLR]) {	    ipr[AlphaISA::IPR_ISR] = summary;	    ipr[AlphaISA::IPR_INTID] = ipl;	    xc->ev5_trap(Interrupt_Fault);	    entering_interrupt = true;	    DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",		    ipr[AlphaISA::IPR_IPLR], ipl, summary);	}    }#else    const bool entering_interrupt = false;#endif    // Fetch up to the maximum number of lines per cycle allowed    for (int fetchedLines = 0; fetchedLines < lines_to_fetch; ++fetchedLines) {	/* is this a bogus text address? (can happen on mis-spec path) */	if (!xc->validInstAddr(xc->regs.pc)) {	    floss_state.fetch_end_cause[thread_number] = FLOSS_FETCH_INVALID_PC;	    break;	}	// remember index & seq. number of first inst in this line for	// cache fetch later	int first_inst_index = icache_output_buffer[thread_number]->tail;	InstSeqNum first_inst_seq_num = next_fetch_seq;	uncompressedBlockAddress = xc->regs.pc;	/*  Mask lower bits to get block starting address         */	Addr blockAddress = icacheBlockAlignPC(xc->regs.pc);#if FULL_SYSTEM	bool pal_pc = xc->inPalMode();#endif	pair<int, bool> r = fetchOneLine(thread_number,					 max_to_fetch - fetched_this_thread,					 branch_cnt,					 entering_interrupt);	int fetched_this_line = r.first;	bool keep_fetching = r.second;	fetched_this_thread += fetched_this_line;	/*	 *  Fetch the entire cache block containing the instruction	 *  at "start_address"	 */	if (fetched_this_line > 0	    && (fetchedAddresses.find(blockAddress) ==		fetchedAddresses.end())) {	    MemAccessResult mem_access_result;	    assert(!icacheInterface->isBlocked());	    MemReqPtr req = new MemReq(blockAddress, xc,				     icache_block_size);	    req->flags |= INST_READ;	    req->cmd = Read;	    req->asid = thread[thread_number]->getInstAsid();	    req->thread_num = thread_number;	    req->time = curTick;	    req->data = new uint8_t[req->size];	    req->xc = xc;	    req->pc = xc->regs.pc;	    Event *ev = new FetchCompleteEvent(this,					       thread_number,					       first_inst_index,					       fetched_this_line,					       first_inst_seq_num,					       req);	    req->completionEvent = ev;#if FULL_SYSTEM	    // ugly hack!	    if (pal_pc)		req->paddr = req->vaddr;	    else		req->paddr = vtophys(xc, blockAddress);	    req->paddr &= EV5::PAddrImplMask;#else	    Fault fetch_fault = xc->translateInstReq(req);	    if (fetch_fault != No_Fault)		fatal("Bad translation on instruction fetch, vaddr = 0x%x",		      req->vaddr);#endif	    mem_access_result = icacheInterface->access(req);	    if (mem_access_result != MA_HIT) {		/* if we missed in the I-cache, stop fetching after this		   block.   */		floss_state.fetch_end_cause[thread_number] = FLOSS_FETCH_IMISS;		floss_state.fetch_mem_result[thread_number] =		    mem_access_result;		break;	    }	}	if (!keep_fetching)	    break;	/*	 * fetch_branches == 0, fetch one cache line per thread	 */	if (fetch_branches == 0) {	    floss_state.fetch_end_cause[thread_number] = FLOSS_FETCH_BRANCH_LIMIT;	    break;	}    }    if (fetched_this_thread) {	thread_info[thread_number].last_fetch = curTick;    }    /*     *  STATISTICS  (per-thread)     */    fetch_nisn_dist_[thread_number].sample(fetched_this_thread);    fetched_inst[thread_number] += fetched_this_thread;    thread_info[thread_number].fetch_counter += fetched_this_thread;    return fetched_this_thread;}/*****************************************************************************//* fetch up as many instruction as one branch prediction and one cache line  *//* acess will support without overflowing the IFETCH -> DISPATCH QUEUE       *//*                                                                           *//*  This function calls choose_next_thread() to determine which thread will  *//*  fetch next.                                                              *//*       => choose_next_thread() calls the individual policy routines        *//*          based on the setting of "fetch_policy"                           *//*                                                                           *//*****************************************************************************/voidFullCPU::fetch(){    int fetched_this_cycle = 0;    int fetched_this_thread;    int ports_used = 0;    int thread_fetched[number_of_threads];    /*     *  Reset the number of instrs fetched for each thread     */    icache_ports_used_last_fetch = 0;    for (int i = 0; i < number_of_threads; i++) {	thread_fetched[i] = 0;#if 0	if (curTick > 10000 && thread_info[i].last_fetch < curTick - 2000) {	    stringstream s;	    s << "Thread " << i << " hasn't fetched since cycle " <<		thread_info[i].last_fetch << ends;	    exitNow(s.str(), 1);	}#endif    }    /* always update icounts... we use them for bias adjustment even     * if we don't need them for scheduling this cycle */    update_icounts();    /*     * For each thread, set/clear the thread_info[].blocked flag.     * If set, also set floss_state.fetch_end_cause[] to indicate why.     */    for (int thread_number = 0; thread_number < number_of_threads;	 thread_number++) {	ExecContext *xc = thread[thread_number];	/* assume the worst until proven otherwise */	thread_info[thread_number].blocked = true;	/* Unless we fetch a full fetch_width of instructions, this	 * should get set to indicate why we didn't */	floss_state.fetch_end_cause[thread_number] = FLOSS_FETCH_NONE;	//        //  Now: check all the reasons we could be blocked... if none of	//       them are true, then mark as not blocked        //        //	if (!thread_info[thread_number].active)	    continue;	if (xc->status() != ExecContext::Active) {#if FULL_SYSTEM	    if (xc->status() == ExecContext::Suspended && check_interrupts()) {		xc->activate();	    } else#endif // FULL_SYSTEM	    {		continue;	    }	}        //        //  The case where the IFQ is full, but all slots are reserved        //  (ie. no real instructions present) indicates a cache miss.        //  This will be detected and handled later.        //        int flag = 0;        if (mt_frontend) {	    FetchQueue &q = ifq[thread_number];            if (q.num_total() == q.size && q.num_reserved < q.num_total()) {                floss_state.fetch_end_cause[thread_number] = FLOSS_FETCH_QFULL;                flag = 1;            }        } else {            //            //  For the non-MT case...            //	    FetchQueue &q = ifq[0];            if (q.num_total() == ifq_size && q.num_reserved < q.num_total()) {                floss_state.fetch_end_cause[thread_number] = FLOSS_FETCH_QFULL;                if (thread_number == 0)                    flag = 1;   // First time through, we collect stats...                else                    continue;   // After that, we just keep going...            }        }        if (flag) {            //            //  We can't fetch for this thread...            //	    for (int i = 0; i < number_of_threads; ++i) {		unsigned c = IQNumInstructions(i);		qfull_iq_occupancy[i] += c;		qfull_rob_occupancy[i] += ROB.num_thread(i);		qfull_iq_occ_dist_[i].sample(c);		qfull_rob_occ_dist_[i].sample(ROB.num_thread(i));	    }            continue;        }	if (fetch_stall[thread_number] != 0) {	    /* fetch loss cause for this thread is fid_cause value */	    floss_state.fetch_end_cause[thread_number] =		fid_cause[thread_number];	    continue;	}	if (fetch_fault_count[thread_number] != 0) {	    // pending faults...	    floss_state.fetch_end_cause[thread_number] = 		FLOSS_FETCH_FAULT_FLUSH;	    continue;	}	/* if icache_output_buffer is still full (due to icache miss,           or multi-cycle hit) then stall */        if (icache_output_buffer[thread_number]->free_slots() < fetch_width) {            floss_state.fetch_end_cause[thread_number] = FLOSS_FETCH_IMISS;	    floss_state.fetch_mem_result[thread_number] = MA_CACHE_MISS;            continue;        }	thread_info[thread_number].blocked = false;    }    /*     *  We need to block threads that have been assigned zero priority     *  Check for all blocked while we're at it...     */    bool all_threads_blocked = true;    for (int i = 0; i < number_of_threads; i++) {	if (thread_info[i].priority == 0)	    thread_info[i].blocked = true;	if (!thread_info[i].blocked)	    all_threads_blocked = false;    }    if (all_threads_blocked) {	flossRecord(&floss_state, thread_fetched);	fetch_idle_cycles++;	//	check_counters();	return;    }    /*  Add our static biases into the current icounts                     */    /*  ==> these will be removed after the choose_next_thread() function  */    for (int i = 0; i < number_of_threads; i++)	thread_info[i].current_icount += static_icount_bias[i];    /*     *  This function takes the contents of thread_info[] into account     *  and may change fetch_list[].blocked     */    choose_next_thread(fetch_list);    /*  Remove our static biases from the current icounts  */    for (int i = 0; i < number_of_threads; i++)	thread_info[i].current_icount -= static_icount_bias[i];    //    //  Assert blocked flag for threads with active ROB or IQ caps    //    for (int i = 0; i < number_of_threads; i++) {	int thread_number = fetch_list[i].thread_number;	/*  Handle IQ and ROB caps  */	if (iq_cap_active[thread_number] || rob_cap_active[thread_number])	    fetch_list[i].blocked = true;    }    /*     *  Are all threads blocked?     *  => Need to check again, because the fetch policy may block a thread     *     *  scan by fetch_list[] index to find threads not blocked by cache miss     *  or by fetch policy     */    all_threads_blocked = true;

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
日韩写真欧美这视频| 波多野结衣的一区二区三区| 欧美一区二区福利在线| 日韩电影在线免费观看| 日韩精品自拍偷拍| 国产综合一区二区| 国产香蕉久久精品综合网| 国产成人一区在线| 中文字幕在线不卡| 在线观看日韩av先锋影音电影院| 亚洲情趣在线观看| 91国偷自产一区二区三区成为亚洲经典| 一区二区三区在线免费视频| 制服视频三区第一页精品| 久久99精品久久久久婷婷| 欧美激情综合网| 日本韩国欧美三级| 美女在线观看视频一区二区| 久久综合九色综合97婷婷| 成人a区在线观看| 亚洲国产精品一区二区www | 国产成人av一区二区三区在线 | 性感美女极品91精品| 日韩欧美中文字幕制服| 99视频精品全部免费在线| 亚洲一区二区三区在线看| 欧美哺乳videos| 99精品视频在线免费观看| 日欧美一区二区| 国产婷婷色一区二区三区| 色婷婷综合久久久久中文| 裸体一区二区三区| 亚洲视频电影在线| 欧美不卡视频一区| 91免费视频网址| 韩国av一区二区三区四区| 亚洲精品成人天堂一二三| 久久伊99综合婷婷久久伊| 在线免费观看视频一区| 国产乱人伦偷精品视频免下载| 亚洲一区在线观看免费观看电影高清| 欧美精品一区在线观看| 欧美亚洲国产怡红院影院| 国产成人av一区二区三区在线观看| 亚洲成av人片观看| 亚洲欧洲精品成人久久奇米网| 日韩视频中午一区| 欧美伊人久久久久久久久影院| 国产一区二区福利视频| 日韩精品91亚洲二区在线观看| 欧美aaaaa成人免费观看视频| 久久精品亚洲精品国产欧美| 欧美无砖砖区免费| 色综合中文综合网| 亚洲精品日产精品乱码不卡| 久久久99精品免费观看| 欧美成人激情免费网| 欧美日韩国产经典色站一区二区三区| 成人激情视频网站| 国产精品一区不卡| 精品一区免费av| 喷白浆一区二区| 五月天欧美精品| 一区二区三区加勒比av| 亚洲色图.com| 中文字幕一区二区不卡| 欧美国产一区二区在线观看| 国产偷国产偷精品高清尤物| 久久夜色精品国产欧美乱极品| 欧美一区二区三区白人| 欧美高清激情brazzers| 欧美色网站导航| 欧美日韩在线播| 色94色欧美sute亚洲线路二| 91小视频免费看| 不卡电影一区二区三区| www.日韩av| 91麻豆swag| 欧美自拍偷拍一区| 欧美三级视频在线播放| 欧美片在线播放| 欧美人狂配大交3d怪物一区| 在线成人小视频| 日韩三级免费观看| 精品久久一区二区| 久久久久九九视频| 久久久国产精品麻豆| 欧美国产日韩在线观看| 国产精品久久毛片| 亚洲男人的天堂一区二区| 一区二区三区毛片| 日韩av电影免费观看高清完整版 | 亚洲成a人片在线不卡一二三区| 亚洲一区二区综合| 亚洲成av人片一区二区三区| 奇米在线7777在线精品| 黑人巨大精品欧美黑白配亚洲| 韩国v欧美v日本v亚洲v| 成人一区二区三区中文字幕| 91视频国产资源| 69p69国产精品| 亚洲精品一区二区三区在线观看 | 成人免费毛片高清视频| 色综合久久88色综合天天6| 欧美性生活久久| 日韩欧美美女一区二区三区| 国产欧美一区二区三区鸳鸯浴 | 国内精品久久久久影院一蜜桃| 国产酒店精品激情| 色综合一区二区三区| 91精品国产免费| 欧美经典三级视频一区二区三区| 亚洲色图欧美激情| 免费高清在线视频一区·| 国产69精品久久777的优势| 欧美亚洲高清一区二区三区不卡| 777xxx欧美| 欧美激情一区二区三区蜜桃视频| 亚洲综合视频在线| 国模套图日韩精品一区二区| 99re成人精品视频| 日韩欧美高清一区| 国产精品白丝在线| 免费高清在线一区| 色婷婷激情综合| 精品理论电影在线| 亚洲国产综合91精品麻豆| 国产成人福利片| 欧美美女一区二区| 中文字幕色av一区二区三区| 成a人片亚洲日本久久| 欧美老人xxxx18| 综合欧美一区二区三区| 久久成人18免费观看| 欧美性猛交xxxxxxxx| 国产亚洲人成网站| 蜜臀a∨国产成人精品| 色欧美日韩亚洲| 欧美极品美女视频| 九九视频精品免费| 欧美性大战久久久| 综合在线观看色| 高清国产一区二区| 日韩精品自拍偷拍| 五月天欧美精品| 色噜噜狠狠色综合中国| 国产精品天干天干在线综合| 毛片不卡一区二区| 884aa四虎影成人精品一区| 亚洲欧美电影一区二区| 成人性色生活片| 国产香蕉久久精品综合网| 精品一区二区三区在线播放视频| 欧美日韩三级一区二区| 一区二区三区久久| 91麻豆国产福利在线观看| 国产精品久线观看视频| 国产suv精品一区二区883| 日韩精品一区在线观看| 日本不卡高清视频| 欧美精品xxxxbbbb| 亚洲www啪成人一区二区麻豆| 色爱区综合激月婷婷| 一区二区三区在线视频免费| 色综合天天综合在线视频| 中文字幕中文字幕在线一区| av亚洲精华国产精华精华| 中文字幕一区二区日韩精品绯色| 成人h动漫精品一区二区| 国产精品久久777777| 99久久国产综合色|国产精品| 日本一区二区免费在线| 国产成人精品aa毛片| 国产精品久久久久影院亚瑟 | 国产精品乱人伦中文| 成人小视频在线| 亚洲日本免费电影| 26uuu国产电影一区二区| 国产一区二区三区观看| 日本一区二区电影| 99在线热播精品免费| 一区二区三区在线播放| 欧美丝袜自拍制服另类| 日本午夜精品一区二区三区电影 | 亚洲黄色尤物视频| 欧美亚洲国产怡红院影院| 午夜不卡av免费| 欧美r级在线观看| 懂色av一区二区夜夜嗨| 亚洲欧美区自拍先锋| 欧美日韩免费在线视频| 看电视剧不卡顿的网站| 国产欧美久久久精品影院| 91热门视频在线观看| 日韩不卡手机在线v区| 2020国产成人综合网| av亚洲精华国产精华| 日日夜夜精品视频天天综合网| 精品国产百合女同互慰| 福利一区二区在线观看|