?? exec.c
字號:
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||#if defined(TARGET_HAS_SMC) || 1 first_tb ||#endif ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && !cpu_physical_memory_is_dirty(pd))) { /* ROM: we do as if code was inside */ /* if code is present, we only map as read only and save the original mapping */ VirtPageDesc *vp; vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1); vp->phys_addr = pd; vp->prot = prot; vp->valid_tag = virt_valid_tag; prot &= ~PAGE_WRITE; } } map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); if (map_addr == MAP_FAILED) { cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", paddr, vaddr); } } } }#endif return ret;}/* called from signal handler: invalidate the code and unprotect the page. Return TRUE if the fault was succesfully handled. */int page_unprotect(target_ulong addr, unsigned long pc, void *puc){#if !defined(CONFIG_SOFTMMU) VirtPageDesc *vp;#if defined(DEBUG_TLB) printf("page_unprotect: addr=0x%08x\n", addr);#endif addr &= TARGET_PAGE_MASK; /* if it is not mapped, no need to worry here */ if (addr >= MMAP_AREA_END) return 0; vp = virt_page_find(addr >> TARGET_PAGE_BITS); if (!vp) return 0; /* NOTE: in this case, validate_tag is _not_ tested as it validates only the code TLB */ if (vp->valid_tag != virt_valid_tag) return 0; if (!(vp->prot & PAGE_WRITE)) return 0;#if defined(DEBUG_TLB) printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", addr, vp->phys_addr, vp->prot);#endif if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", (unsigned long)addr, vp->prot); /* set the dirty bit */ phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff; /* flush the code inside */ tb_invalidate_phys_page(vp->phys_addr, pc, puc); return 1;#else return 0;#endif}#elsevoid tlb_flush(CPUState *env, int flush_global){}void tlb_flush_page(CPUState *env, target_ulong addr){}int tlb_set_page_exec(CPUState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int mmu_idx, int is_softmmu){ return 0;}/* dump memory mappings */void page_dump(FILE *f){ unsigned long start, end; int i, j, prot, prot1; PageDesc *p; fprintf(f, "%-8s %-8s %-8s %s\n", "start", "end", "size", "prot"); start = -1; end = -1; prot = 0; for(i = 0; i <= L1_SIZE; i++) { if (i < L1_SIZE) p = l1_map[i]; else p = NULL; for(j = 0;j < L2_SIZE; j++) { if (!p) prot1 = 0; else prot1 = p[j].flags; if (prot1 != prot) { end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); if (start != -1) { fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", start, end, end - start, prot & PAGE_READ ? 'r' : '-', prot & PAGE_WRITE ? 'w' : '-', prot & PAGE_EXEC ? 'x' : '-'); } if (prot1 != 0) start = end; else start = -1; prot = prot1; } if (!p) break; } }}int page_get_flags(target_ulong address){ PageDesc *p; p = page_find(address >> TARGET_PAGE_BITS); if (!p) return 0; return p->flags;}/* modify the flags of a page and invalidate the code if necessary. The flag PAGE_WRITE_ORG is positionned automatically depending on PAGE_WRITE */void page_set_flags(target_ulong start, target_ulong end, int flags){ PageDesc *p; target_ulong addr; start = start & TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); if (flags & PAGE_WRITE) flags |= PAGE_WRITE_ORG; spin_lock(&tb_lock); for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { p = page_find_alloc(addr >> TARGET_PAGE_BITS); /* if the write protection is set, then we invalidate the code inside */ if (!(p->flags & PAGE_WRITE) && (flags & PAGE_WRITE) && p->first_tb) { tb_invalidate_phys_page(addr, 0, NULL); } p->flags = flags; } spin_unlock(&tb_lock);}int page_check_range(target_ulong start, target_ulong len, int flags){ PageDesc *p; target_ulong end; target_ulong addr; end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ start = start & TARGET_PAGE_MASK; if( end < start ) /* we've wrapped around */ return -1; for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { p = page_find(addr >> TARGET_PAGE_BITS); if( !p ) return -1; if( !(p->flags & PAGE_VALID) ) return -1; if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) return -1; if (flags & PAGE_WRITE) { if (!(p->flags & PAGE_WRITE_ORG)) return -1; /* unprotect the page if it was put read-only because it contains translated code */ if (!(p->flags & PAGE_WRITE)) { if (!page_unprotect(addr, 0, NULL)) return -1; } return 0; } } return 0;}/* called from signal handler: invalidate the code and unprotect the page. Return TRUE if the fault was succesfully handled. */int page_unprotect(target_ulong address, unsigned long pc, void *puc){ unsigned int page_index, prot, pindex; PageDesc *p, *p1; target_ulong host_start, host_end, addr; host_start = address & qemu_host_page_mask; page_index = host_start >> TARGET_PAGE_BITS; p1 = page_find(page_index); if (!p1) return 0; host_end = host_start + qemu_host_page_size; p = p1; prot = 0; for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { prot |= p->flags; p++; } /* if the page was really writable, then we change its protection back to writable */ if (prot & PAGE_WRITE_ORG) { pindex = (address - host_start) >> TARGET_PAGE_BITS; if (!(p1[pindex].flags & PAGE_WRITE)) { mprotect((void *)g2h(host_start), qemu_host_page_size, (prot & PAGE_BITS) | PAGE_WRITE); p1[pindex].flags |= PAGE_WRITE; /* and since the content will be modified, we must invalidate the corresponding translated code. */ tb_invalidate_phys_page(address, pc, puc);#ifdef DEBUG_TB_CHECK tb_invalidate_check(address);#endif return 1; } } return 0;}static inline void tlb_set_dirty(CPUState *env, unsigned long addr, target_ulong vaddr){}#endif /* defined(CONFIG_USER_ONLY) */static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, int memory);static void *subpage_init (target_phys_addr_t base, uint32_t *phys, int orig_memory);#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ need_subpage) \ do { \ if (addr > start_addr) \ start_addr2 = 0; \ else { \ start_addr2 = start_addr & ~TARGET_PAGE_MASK; \ if (start_addr2 > 0) \ need_subpage = 1; \ } \ \ if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \ end_addr2 = TARGET_PAGE_SIZE - 1; \ else { \ end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \ if (end_addr2 < TARGET_PAGE_SIZE - 1) \ need_subpage = 1; \ } \ } while (0)/* register physical memory. 'size' must be a multiple of the target page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an io memory page */void cpu_register_physical_memory(target_phys_addr_t start_addr, unsigned long size, unsigned long phys_offset){ target_phys_addr_t addr, end_addr; PhysPageDesc *p; CPUState *env; unsigned long orig_size = size; void *subpage; size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; end_addr = start_addr + (target_phys_addr_t)size; for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { p = phys_page_find(addr >> TARGET_PAGE_BITS); if (p && p->phys_offset != IO_MEM_UNASSIGNED) { unsigned long orig_memory = p->phys_offset; target_phys_addr_t start_addr2, end_addr2; int need_subpage = 0; CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, need_subpage); if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { if (!(orig_memory & IO_MEM_SUBPAGE)) { subpage = subpage_init((addr & TARGET_PAGE_MASK), &p->phys_offset, orig_memory); } else { subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) >> IO_MEM_SHIFT]; } subpage_register(subpage, start_addr2, end_addr2, phys_offset); } else { p->phys_offset = phys_offset; if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || (phys_offset & IO_MEM_ROMD)) phys_offset += TARGET_PAGE_SIZE; } } else { p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); p->phys_offset = phys_offset; if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || (phys_offset & IO_MEM_ROMD)) phys_offset += TARGET_PAGE_SIZE; else { target_phys_addr_t start_addr2, end_addr2; int need_subpage = 0; CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, need_subpage); if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { subpage = subpage_init((addr & TARGET_PAGE_MASK), &p->phys_offset, IO_MEM_UNASSIGNED); subpage_register(subpage, start_addr2, end_addr2, phys_offset); } } } } /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ /* XXX: slow ! */ for(env = first_cpu; env != NULL; env = env->next_cpu) { tlb_flush(env, 1); }}/* XXX: temporary until new memory mapping API */uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr){ PhysPageDesc *p; p = phys_page_find(addr >> TARGET_PAGE_BITS); if (!p) return IO_MEM_UNASSIGNED; return p->phys_offset;}/* XXX: better than nothing */ram_addr_t qemu_ram_alloc(unsigned int size){ ram_addr_t addr; if ((phys_ram_alloc_offset + size) >= phys_ram_size) { fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n", size, phys_ram_size); abort(); } addr = phys_ram_alloc_offset; phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size); return addr;}void qemu_ram_free(ram_addr_t addr){}static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr){#ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);#endif#ifdef TARGET_SPARC do_unassigned_access(addr, 0, 0, 0);#elif TARGET_CRIS do_unassigned_access(addr, 0, 0, 0);#endif return 0;}static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val){#ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);#endif#ifdef TARGET_SPARC do_unassigned_access(addr, 1, 0, 0);#elif TARGET_CRIS do_unassigned_access(addr, 1, 0, 0);#endif}static CPUReadMemoryFunc *unassigned_mem_read[3] = { unassigned_mem_readb, unassigned_mem_readb, unassigned_mem_readb,};static CPUWriteMemoryFunc *unassigned_mem_write[3] = { unassigned_mem_writeb, unassigned_mem_writeb, unassigned_mem_writeb,};static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val){ unsigned long ram_addr; int dirty_flags; ram_addr = addr - (unsigned long)phys_ram_base; dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; if (!(dirty_flags & CODE_DIRTY_FLAG)) {#if !defined(CONFIG_USER_ONLY) tb_invalidate_phys_page_fast(ram_addr, 1); dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];#endif } stb_p((uint8_t *)(long)addr, val);#ifdef USE_KQEMU if (cpu_single_env->kqemu_enabled && (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) kqemu_modify_page(cpu_single_env, ram_addr);#endif dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; /* we remove
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -