?? exec.c
字號:
break; p = p1 + 1; } return mask;}void cpu_abort(CPUState *env, const char *fmt, ...){ va_list ap; va_list ap2; va_start(ap, fmt); va_copy(ap2, ap); fprintf(stderr, "qemu: fatal: "); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n");#ifdef TARGET_I386 if(env->intercept & INTERCEPT_SVM_MASK) { /* most probably the virtual machine should not be shut down but rather caught by the VMM */ vmexit(SVM_EXIT_SHUTDOWN, 0); } cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);#else cpu_dump_state(env, stderr, fprintf, 0);#endif if (logfile) { fprintf(logfile, "qemu: fatal: "); vfprintf(logfile, fmt, ap2); fprintf(logfile, "\n");#ifdef TARGET_I386 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);#else cpu_dump_state(env, logfile, fprintf, 0);#endif fflush(logfile); fclose(logfile); } va_end(ap2); va_end(ap); abort();}CPUState *cpu_copy(CPUState *env){ CPUState *new_env = cpu_init(env->cpu_model_str); /* preserve chaining and index */ CPUState *next_cpu = new_env->next_cpu; int cpu_index = new_env->cpu_index; memcpy(new_env, env, sizeof(CPUState)); new_env->next_cpu = next_cpu; new_env->cpu_index = cpu_index; return new_env;}#if !defined(CONFIG_USER_ONLY)/* NOTE: if flush_global is true, also flush global entries (not implemented yet) */void tlb_flush(CPUState *env, int flush_global){ int i;#if defined(DEBUG_TLB) printf("tlb_flush:\n");#endif /* must reset current TB so that interrupts cannot modify the links while we are modifying them */ env->current_tb = NULL; for(i = 0; i < CPU_TLB_SIZE; i++) { env->tlb_table[0][i].addr_read = -1; env->tlb_table[0][i].addr_write = -1; env->tlb_table[0][i].addr_code = -1; env->tlb_table[1][i].addr_read = -1; env->tlb_table[1][i].addr_write = -1; env->tlb_table[1][i].addr_code = -1;#if (NB_MMU_MODES >= 3) env->tlb_table[2][i].addr_read = -1; env->tlb_table[2][i].addr_write = -1; env->tlb_table[2][i].addr_code = -1;#if (NB_MMU_MODES == 4) env->tlb_table[3][i].addr_read = -1; env->tlb_table[3][i].addr_write = -1; env->tlb_table[3][i].addr_code = -1;#endif#endif } memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));#if !defined(CONFIG_SOFTMMU) munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);#endif#ifdef USE_KQEMU if (env->kqemu_enabled) { kqemu_flush(env, flush_global); }#endif tlb_flush_count++;}static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr){ if (addr == (tlb_entry->addr_read & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || addr == (tlb_entry->addr_write & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || addr == (tlb_entry->addr_code & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { tlb_entry->addr_read = -1; tlb_entry->addr_write = -1; tlb_entry->addr_code = -1; }}void tlb_flush_page(CPUState *env, target_ulong addr){ int i; TranslationBlock *tb;#if defined(DEBUG_TLB) printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);#endif /* must reset current TB so that interrupts cannot modify the links while we are modifying them */ env->current_tb = NULL; addr &= TARGET_PAGE_MASK; i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_flush_entry(&env->tlb_table[0][i], addr); tlb_flush_entry(&env->tlb_table[1][i], addr);#if (NB_MMU_MODES >= 3) tlb_flush_entry(&env->tlb_table[2][i], addr);#if (NB_MMU_MODES == 4) tlb_flush_entry(&env->tlb_table[3][i], addr);#endif#endif /* Discard jump cache entries for any tb which might potentially overlap the flushed page. */ i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb)); i = tb_jmp_cache_hash_page(addr); memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));#if !defined(CONFIG_SOFTMMU) if (addr < MMAP_AREA_END) munmap((void *)addr, TARGET_PAGE_SIZE);#endif#ifdef USE_KQEMU if (env->kqemu_enabled) { kqemu_flush_page(env, addr); }#endif}/* update the TLBs so that writes to code in the virtual page 'addr' can be detected */static void tlb_protect_code(ram_addr_t ram_addr){ cpu_physical_memory_reset_dirty(ram_addr, ram_addr + TARGET_PAGE_SIZE, CODE_DIRTY_FLAG);}/* update the TLB so that writes in physical page 'phys_addr' are no longer tested for self modifying code */static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, target_ulong vaddr){ phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;}static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, unsigned long start, unsigned long length){ unsigned long addr; if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; if ((addr - start) < length) { tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; } }}void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, int dirty_flags){ CPUState *env; unsigned long length, start1; int i, mask, len; uint8_t *p; start &= TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); length = end - start; if (length == 0) return; len = length >> TARGET_PAGE_BITS;#ifdef USE_KQEMU /* XXX: should not depend on cpu context */ env = first_cpu; if (env->kqemu_enabled) { ram_addr_t addr; addr = start; for(i = 0; i < len; i++) { kqemu_set_notdirty(env, addr); addr += TARGET_PAGE_SIZE; } }#endif mask = ~dirty_flags; p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); for(i = 0; i < len; i++) p[i] &= mask; /* we modify the TLB cache so that the dirty bit will be set again when accessing the range */ start1 = start + (unsigned long)phys_ram_base; for(env = first_cpu; env != NULL; env = env->next_cpu) { for(i = 0; i < CPU_TLB_SIZE; i++) tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); for(i = 0; i < CPU_TLB_SIZE; i++) tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);#if (NB_MMU_MODES >= 3) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);#if (NB_MMU_MODES == 4) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);#endif#endif }#if !defined(CONFIG_SOFTMMU) /* XXX: this is expensive */ { VirtPageDesc *p; int j; target_ulong addr; for(i = 0; i < L1_SIZE; i++) { p = l1_virt_map[i]; if (p) { addr = i << (TARGET_PAGE_BITS + L2_BITS); for(j = 0; j < L2_SIZE; j++) { if (p->valid_tag == virt_valid_tag && p->phys_addr >= start && p->phys_addr < end && (p->prot & PROT_WRITE)) { if (addr < MMAP_AREA_END) { mprotect((void *)addr, TARGET_PAGE_SIZE, p->prot & ~PROT_WRITE); } } addr += TARGET_PAGE_SIZE; p++; } } } }#endif}static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry){ ram_addr_t ram_addr; if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend - (unsigned long)phys_ram_base; if (!cpu_physical_memory_is_dirty(ram_addr)) { tlb_entry->addr_write |= IO_MEM_NOTDIRTY; } }}/* update the TLB according to the current state of the dirty bits */void cpu_tlb_update_dirty(CPUState *env){ int i; for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[0][i]); for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[1][i]);#if (NB_MMU_MODES >= 3) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[2][i]);#if (NB_MMU_MODES == 4) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[3][i]);#endif#endif}static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, unsigned long start){ unsigned long addr; if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; if (addr == start) { tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM; } }}/* update the TLB corresponding to virtual page vaddr and phys addr addr so that it is no longer dirty */static inline void tlb_set_dirty(CPUState *env, unsigned long addr, target_ulong vaddr){ int i; addr &= TARGET_PAGE_MASK; i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_set_dirty1(&env->tlb_table[0][i], addr); tlb_set_dirty1(&env->tlb_table[1][i], addr);#if (NB_MMU_MODES >= 3) tlb_set_dirty1(&env->tlb_table[2][i], addr);#if (NB_MMU_MODES == 4) tlb_set_dirty1(&env->tlb_table[3][i], addr);#endif#endif}/* add a new TLB entry. At most one entry for a given virtual address is permitted. Return 0 if OK or 2 if the page could not be mapped (can only happen in non SOFTMMU mode for I/O pages or pages conflicting with the host address space). */int tlb_set_page_exec(CPUState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int mmu_idx, int is_softmmu){ PhysPageDesc *p; unsigned long pd; unsigned int index; target_ulong address; target_phys_addr_t addend; int ret; CPUTLBEntry *te; int i; p = phys_page_find(paddr >> TARGET_PAGE_BITS); if (!p) { pd = IO_MEM_UNASSIGNED; } else { pd = p->phys_offset; }#if defined(DEBUG_TLB) printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n", vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);#endif ret = 0;#if !defined(CONFIG_SOFTMMU) if (is_softmmu)#endif { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { /* IO memory case */ address = vaddr | pd; addend = paddr; } else { /* standard memory */ address = vaddr; addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); } /* Make accesses to pages with watchpoints go via the watchpoint trap routines. */ for (i = 0; i < env->nb_watchpoints; i++) { if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) { if (address & ~TARGET_PAGE_MASK) { env->watchpoint[i].addend = 0; address = vaddr | io_mem_watch; } else { env->watchpoint[i].addend = pd - paddr + (unsigned long) phys_ram_base; /* TODO: Figure out how to make read watchpoints coexist with code. */ pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD; } } } index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); addend -= vaddr; te = &env->tlb_table[mmu_idx][index]; te->addend = addend; if (prot & PAGE_READ) { te->addr_read = address; } else { te->addr_read = -1; } if (prot & PAGE_EXEC) { te->addr_code = address; } else { te->addr_code = -1; } if (prot & PAGE_WRITE) { if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || (pd & IO_MEM_ROMD)) { /* write access calls the I/O callback */ te->addr_write = vaddr | (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && !cpu_physical_memory_is_dirty(pd)) { te->addr_write = vaddr | IO_MEM_NOTDIRTY; } else { te->addr_write = address; } } else { te->addr_write = -1; } }#if !defined(CONFIG_SOFTMMU) else { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { /* IO access: no mapping is done as it will be handled by the soft MMU */ if (!(env->hflags & HF_SOFTMMU_MASK)) ret = 2; } else { void *map_addr; if (vaddr >= MMAP_AREA_END) { ret = 2; } else { if (prot & PROT_WRITE) {
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -