?? multi.c
字號:
map = sh_map_domain_page(mfn); ASSERT(map != NULL); dst = map + ((unsigned long)dst & (PAGE_SIZE - 1)); } for ( i = 0; i < entries; i++ ) safe_write_entry(dst++, src++); if ( map != NULL ) sh_unmap_domain_page(map);}static inline intperms_strictly_increased(u32 old_flags, u32 new_flags) /* Given the flags of two entries, are the new flags a strict * increase in rights over the old ones? */{ u32 of = old_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX); u32 nf = new_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX); /* Flip the NX bit, since it's the only one that decreases rights; * we calculate as if it were an "X" bit. */ of ^= _PAGE_NX_BIT; nf ^= _PAGE_NX_BIT; /* If the changed bits are all set in the new flags, then rights strictly * increased between old and new. */ return ((of | (of ^ nf)) == nf);}static int inlineshadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain *d){ int res; mfn_t mfn; struct domain *owner; ASSERT(!sh_l1e_is_magic(sl1e)); if ( !shadow_mode_refcounts(d) ) return 1; res = get_page_from_l1e(sl1e, d); // If a privileged domain is attempting to install a map of a page it does // not own, we let it succeed anyway. // if ( unlikely(!res) && !shadow_mode_translate(d) && mfn_valid(mfn = shadow_l1e_get_mfn(sl1e)) && (owner = page_get_owner(mfn_to_page(mfn))) && (d != owner) && IS_PRIV_FOR(d, owner)) { res = get_page_from_l1e(sl1e, owner); SHADOW_PRINTK("privileged domain %d installs map of mfn %05lx " "which is owned by domain %d: %s\n", d->domain_id, mfn_x(mfn), owner->domain_id, res ? "success" : "failed"); } if ( unlikely(!res) ) { perfc_incr(shadow_get_page_fail); SHADOW_PRINTK("failed: l1e=" SH_PRI_pte "\n"); } return res;}static void inlineshadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain *d){ if ( !shadow_mode_refcounts(d) ) return; put_page_from_l1e(sl1e, d);}#if GUEST_PAGING_LEVELS >= 4static int shadow_set_l4e(struct vcpu *v, shadow_l4e_t *sl4e, shadow_l4e_t new_sl4e, mfn_t sl4mfn){ int flags = 0, ok; shadow_l4e_t old_sl4e; paddr_t paddr; ASSERT(sl4e != NULL); old_sl4e = *sl4e; if ( old_sl4e.l4 == new_sl4e.l4 ) return 0; /* Nothing to do */ paddr = ((((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT) | (((unsigned long)sl4e) & ~PAGE_MASK)); if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT ) { /* About to install a new reference */ mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e); ok = sh_get_ref(v, sl3mfn, paddr); /* Are we pinning l3 shadows to handle wierd linux behaviour? */ if ( sh_type_is_pinnable(v, SH_type_l3_64_shadow) ) ok |= sh_pin(v, sl3mfn); if ( !ok ) { domain_crash(v->domain); return SHADOW_SET_ERROR; }#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) shadow_resync_all(v, 0);#endif } /* Write the new entry */ shadow_write_entries(sl4e, &new_sl4e, 1, sl4mfn); flags |= SHADOW_SET_CHANGED; if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT ) { /* We lost a reference to an old mfn. */ mfn_t osl3mfn = shadow_l4e_get_mfn(old_sl4e); if ( (mfn_x(osl3mfn) != mfn_x(shadow_l4e_get_mfn(new_sl4e))) || !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e), shadow_l4e_get_flags(new_sl4e)) ) { flags |= SHADOW_SET_FLUSH; } sh_put_ref(v, osl3mfn, paddr); } return flags;}static int shadow_set_l3e(struct vcpu *v, shadow_l3e_t *sl3e, shadow_l3e_t new_sl3e, mfn_t sl3mfn){ int flags = 0; shadow_l3e_t old_sl3e; paddr_t paddr; ASSERT(sl3e != NULL); old_sl3e = *sl3e; if ( old_sl3e.l3 == new_sl3e.l3 ) return 0; /* Nothing to do */ paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT) | (((unsigned long)sl3e) & ~PAGE_MASK)); if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT ) { /* About to install a new reference */ if ( !sh_get_ref(v, shadow_l3e_get_mfn(new_sl3e), paddr) ) { domain_crash(v->domain); return SHADOW_SET_ERROR; }#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC ) shadow_resync_all(v, 0);#endif } /* Write the new entry */ shadow_write_entries(sl3e, &new_sl3e, 1, sl3mfn); flags |= SHADOW_SET_CHANGED; if ( shadow_l3e_get_flags(old_sl3e) & _PAGE_PRESENT ) { /* We lost a reference to an old mfn. */ mfn_t osl2mfn = shadow_l3e_get_mfn(old_sl3e); if ( (mfn_x(osl2mfn) != mfn_x(shadow_l3e_get_mfn(new_sl3e))) || !perms_strictly_increased(shadow_l3e_get_flags(old_sl3e), shadow_l3e_get_flags(new_sl3e)) ) { flags |= SHADOW_SET_FLUSH; } sh_put_ref(v, osl2mfn, paddr); } return flags;}#endif /* GUEST_PAGING_LEVELS >= 4 */ static int shadow_set_l2e(struct vcpu *v, shadow_l2e_t *sl2e, shadow_l2e_t new_sl2e, mfn_t sl2mfn){ int flags = 0; shadow_l2e_t old_sl2e; paddr_t paddr;#if GUEST_PAGING_LEVELS == 2 /* In 2-on-3 we work with pairs of l2es pointing at two-page * shadows. Reference counting and up-pointers track from the first * page of the shadow to the first l2e, so make sure that we're * working with those: * Align the pointer down so it's pointing at the first of the pair */ sl2e = (shadow_l2e_t *)((unsigned long)sl2e & ~(sizeof(shadow_l2e_t))); /* Align the mfn of the shadow entry too */ new_sl2e.l2 &= ~(1<<PAGE_SHIFT);#endif ASSERT(sl2e != NULL); old_sl2e = *sl2e; if ( old_sl2e.l2 == new_sl2e.l2 ) return 0; /* Nothing to do */ paddr = ((((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT) | (((unsigned long)sl2e) & ~PAGE_MASK)); if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT ) { mfn_t sl1mfn = shadow_l2e_get_mfn(new_sl2e); /* About to install a new reference */ if ( !sh_get_ref(v, sl1mfn, paddr) ) { domain_crash(v->domain); return SHADOW_SET_ERROR; }#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) { struct shadow_page_info *sp = mfn_to_shadow_page(sl1mfn); mfn_t gl1mfn = _mfn(sp->backpointer); /* If the shadow is a fl1 then the backpointer contains the GFN instead of the GMFN, and it's definitely not OOS. */ if ( (sp->type != SH_type_fl1_shadow) && mfn_valid(gl1mfn) && mfn_is_out_of_sync(gl1mfn) ) sh_resync(v, gl1mfn); }#endif } /* Write the new entry */#if GUEST_PAGING_LEVELS == 2 { shadow_l2e_t pair[2] = { new_sl2e, new_sl2e }; /* The l1 shadow is two pages long and need to be pointed to by * two adjacent l1es. The pair have the same flags, but point * at odd and even MFNs */ ASSERT(!(pair[0].l2 & (1<<PAGE_SHIFT))); pair[1].l2 |= (1<<PAGE_SHIFT); shadow_write_entries(sl2e, &pair, 2, sl2mfn); }#else /* normal case */ shadow_write_entries(sl2e, &new_sl2e, 1, sl2mfn);#endif flags |= SHADOW_SET_CHANGED; if ( shadow_l2e_get_flags(old_sl2e) & _PAGE_PRESENT ) { /* We lost a reference to an old mfn. */ mfn_t osl1mfn = shadow_l2e_get_mfn(old_sl2e); if ( (mfn_x(osl1mfn) != mfn_x(shadow_l2e_get_mfn(new_sl2e))) || !perms_strictly_increased(shadow_l2e_get_flags(old_sl2e), shadow_l2e_get_flags(new_sl2e)) ) { flags |= SHADOW_SET_FLUSH; } sh_put_ref(v, osl1mfn, paddr); } return flags;}static inline void shadow_vram_get_l1e(shadow_l1e_t new_sl1e, shadow_l1e_t *sl1e, mfn_t sl1mfn, struct domain *d){ mfn_t mfn; unsigned long gfn; if ( !d->dirty_vram ) return; mfn = shadow_l1e_get_mfn(new_sl1e); if ( !mfn_valid(mfn) ) return; /* m2p for mmio_direct may not exist */ gfn = mfn_to_gfn(d, mfn); if ( (gfn >= d->dirty_vram->begin_pfn) && (gfn < d->dirty_vram->end_pfn) ) { unsigned long i = gfn - d->dirty_vram->begin_pfn; struct page_info *page = mfn_to_page(mfn); u32 count_info = page->u.inuse.type_info & PGT_count_mask; if ( count_info == 1 ) /* Initial guest reference, record it */ d->dirty_vram->sl1ma[i] = pfn_to_paddr(mfn_x(sl1mfn)) | ((unsigned long)sl1e & ~PAGE_MASK); }}static inline void shadow_vram_put_l1e(shadow_l1e_t old_sl1e, shadow_l1e_t *sl1e, mfn_t sl1mfn, struct domain *d){ mfn_t mfn; unsigned long gfn; if ( !d->dirty_vram ) return; mfn = shadow_l1e_get_mfn(old_sl1e); if ( !mfn_valid(mfn) ) return; gfn = mfn_to_gfn(d, mfn); if ( (gfn >= d->dirty_vram->begin_pfn) && (gfn < d->dirty_vram->end_pfn) ) { unsigned long i = gfn - d->dirty_vram->begin_pfn; struct page_info *page = mfn_to_page(mfn); u32 count_info = page->u.inuse.type_info & PGT_count_mask; int dirty = 0; paddr_t sl1ma = pfn_to_paddr(mfn_x(sl1mfn)) | ((unsigned long)sl1e & ~PAGE_MASK); if ( count_info == 1 ) { /* Last reference */ if ( d->dirty_vram->sl1ma[i] == INVALID_PADDR ) { /* We didn't know it was that one, let's say it is dirty */ dirty = 1; } else { ASSERT(d->dirty_vram->sl1ma[i] == sl1ma); d->dirty_vram->sl1ma[i] = INVALID_PADDR; if ( shadow_l1e_get_flags(old_sl1e) & _PAGE_DIRTY ) dirty = 1; } } else { /* We had more than one reference, just consider the page dirty. */ dirty = 1; /* Check that it's not the one we recorded. */ if ( d->dirty_vram->sl1ma[i] == sl1ma ) { /* Too bad, we remembered the wrong one... */ d->dirty_vram->sl1ma[i] = INVALID_PADDR; } else { /* Ok, our recorded sl1e is still pointing to this page, let's * just hope it will remain. */ } } if ( dirty ) { d->dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8); d->dirty_vram->last_dirty = NOW(); } }}static int shadow_set_l1e(struct vcpu *v, shadow_l1e_t *sl1e, shadow_l1e_t new_sl1e, mfn_t sl1mfn){ int flags = 0; struct domain *d = v->domain;
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -