亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關(guān)于我們
? 蟲蟲下載站

?? multi.c

?? xen虛擬機(jī)源代碼安裝包
?? C
?? 第 1 頁 / 共 5 頁
字號(hào):
    return (u32)((unsigned long)ptr & ~PAGE_MASK) / sizeof(guest_l1e_t);}static u32shadow_l1_index(mfn_t *smfn, u32 guest_index){#if (GUEST_PAGING_LEVELS == 2)    *smfn = _mfn(mfn_x(*smfn) +                 (guest_index / SHADOW_L1_PAGETABLE_ENTRIES));    return (guest_index % SHADOW_L1_PAGETABLE_ENTRIES);#else    return guest_index;#endif}static u32shadow_l2_index(mfn_t *smfn, u32 guest_index){#if (GUEST_PAGING_LEVELS == 2)    // Because we use 2 shadow l2 entries for each guest entry, the number of    // guest entries per shadow page is SHADOW_L2_PAGETABLE_ENTRIES/2    //    *smfn = _mfn(mfn_x(*smfn) +                 (guest_index / (SHADOW_L2_PAGETABLE_ENTRIES / 2)));    // We multiply by two to get the index of the first of the two entries    // used to shadow the specified guest entry.    return (guest_index % (SHADOW_L2_PAGETABLE_ENTRIES / 2)) * 2;#else    return guest_index;#endif}#if GUEST_PAGING_LEVELS >= 4static u32shadow_l3_index(mfn_t *smfn, u32 guest_index){    return guest_index;}static u32shadow_l4_index(mfn_t *smfn, u32 guest_index){    return guest_index;}#endif // GUEST_PAGING_LEVELS >= 4/**************************************************************************//* Function which computes shadow entries from their corresponding guest * entries.  This is the "heart" of the shadow code. It operates using * level-1 shadow types, but handles all levels of entry. * Don't call it directly, but use the four wrappers below. */static always_inline void_sh_propagate(struct vcpu *v,               guest_intpte_t guest_intpte,              mfn_t target_mfn,               void *shadow_entry_ptr,              int level,              fetch_type_t ft,               p2m_type_t p2mt){    guest_l1e_t guest_entry = { guest_intpte };    shadow_l1e_t *sp = shadow_entry_ptr;    struct domain *d = v->domain;    gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);    u32 pass_thru_flags;    u32 gflags, sflags;    /* We don't shadow PAE l3s */    ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);    /* Check there's something for the shadows to map to */    if ( !p2m_is_valid(p2mt) )    {        *sp = shadow_l1e_empty();        goto done;    }    gflags = guest_l1e_get_flags(guest_entry);    if ( unlikely(!(gflags & _PAGE_PRESENT)) )    {        /* If a guest l1 entry is not present, shadow with the magic          * guest-not-present entry. */        if ( level == 1 )            *sp = sh_l1e_gnp();        else             *sp = shadow_l1e_empty();        goto done;    }    if ( level == 1 && p2mt == p2m_mmio_dm )    {        /* Guest l1e maps emulated MMIO space */        *sp = sh_l1e_mmio(target_gfn, gflags);        if ( !d->arch.paging.shadow.has_fast_mmio_entries )            d->arch.paging.shadow.has_fast_mmio_entries = 1;        goto done;    }    // Must have a valid target_mfn unless this is a prefetch or an l1    // pointing at MMIO space.  In the case of a prefetch, an invalid    // mfn means that we can not usefully shadow anything, and so we    // return early.    //    if ( !mfn_valid(target_mfn)         && !(level == 1 && (!shadow_mode_refcounts(d)                              || p2mt == p2m_mmio_direct)) )    {        ASSERT((ft == ft_prefetch));        *sp = shadow_l1e_empty();        goto done;    }    // Propagate bits from the guest to the shadow.    // Some of these may be overwritten, below.    // Since we know the guest's PRESENT bit is set, we also set the shadow's    // SHADOW_PRESENT bit.    //    pass_thru_flags = (_PAGE_ACCESSED | _PAGE_USER |                       _PAGE_RW | _PAGE_PRESENT);    if ( guest_supports_nx(v) )        pass_thru_flags |= _PAGE_NX_BIT;    if ( !shadow_mode_refcounts(d) && !mfn_valid(target_mfn) )        pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;    sflags = gflags & pass_thru_flags;    /*     * For HVM domains with direct access to MMIO areas, set the correct     * caching attributes in the shadows to match what was asked for.     */    if ( (level == 1) && is_hvm_domain(d) && has_arch_pdevs(d) &&         !is_xen_heap_mfn(mfn_x(target_mfn)) )    {        unsigned int type;        if ( hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), &type) )            sflags |= pat_type_2_pte_flags(type);        else if ( d->arch.hvm_domain.is_in_uc_mode )            sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);        else            sflags |= get_pat_flags(v,                                    gflags,                                    gfn_to_paddr(target_gfn),                                    ((paddr_t)mfn_x(target_mfn)) << PAGE_SHIFT);    }    // Set the A&D bits for higher level shadows.    // Higher level entries do not, strictly speaking, have dirty bits, but    // since we use shadow linear tables, each of these entries may, at some    // point in time, also serve as a shadow L1 entry.    // By setting both the A&D bits in each of these, we eliminate the burden    // on the hardware to update these bits on initial accesses.    //    if ( (level > 1) && !((SHADOW_PAGING_LEVELS == 3) && (level == 3)) )        sflags |= _PAGE_ACCESSED | _PAGE_DIRTY;    // If the A or D bit has not yet been set in the guest, then we must    // prevent the corresponding kind of access.    //    if ( unlikely(!(gflags & _PAGE_ACCESSED)) )        sflags &= ~_PAGE_PRESENT;    /* D bits exist in L1es and PSE L2es */    if ( unlikely(((level == 1) ||                   ((level == 2) &&                    (gflags & _PAGE_PSE) &&                    guest_supports_superpages(v)))                  && !(gflags & _PAGE_DIRTY)) )        sflags &= ~_PAGE_RW;    // shadow_mode_log_dirty support    //    // Only allow the guest write access to a page a) on a demand fault,    // or b) if the page is already marked as dirty.    //    // (We handle log-dirty entirely inside the shadow code, without using the     // p2m_ram_logdirty p2m type: only HAP uses that.)    if ( unlikely((level == 1) && shadow_mode_log_dirty(d)) )    {        if ( mfn_valid(target_mfn) ) {            if ( ft & FETCH_TYPE_WRITE )                 paging_mark_dirty(d, mfn_x(target_mfn));            else if ( !sh_mfn_is_dirty(d, target_mfn) )                sflags &= ~_PAGE_RW;        }    }    if ( unlikely((level == 1) && d->dirty_vram            && d->dirty_vram->last_dirty == -1            && gfn_x(target_gfn) >= d->dirty_vram->begin_pfn            && gfn_x(target_gfn) < d->dirty_vram->end_pfn) )    {        if ( ft & FETCH_TYPE_WRITE )            d->dirty_vram->last_dirty = NOW();        else            sflags &= ~_PAGE_RW;    }    /* Read-only memory */    if ( p2mt == p2m_ram_ro )         sflags &= ~_PAGE_RW;        // protect guest page tables    //    if ( unlikely((level == 1)                   && sh_mfn_is_a_page_table(target_mfn)#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )                  /* Unless the page is out of sync and the guest is                     writing to it. */                  && !(mfn_oos_may_write(target_mfn)                       && (ft == ft_demand_write))#endif /* OOS */                  ) )    {        if ( shadow_mode_trap_reads(d) )        {            // if we are trapping both reads & writes, then mark this page            // as not present...            //            sflags &= ~_PAGE_PRESENT;        }        else        {            // otherwise, just prevent any writes...            //            sflags &= ~_PAGE_RW;        }    }    // PV guests in 64-bit mode use two different page tables for user vs    // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.    // It is always shadowed as present...    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)          && !is_hvm_domain(d) )    {        sflags |= _PAGE_USER;    }    *sp = shadow_l1e_from_mfn(target_mfn, sflags); done:    SHADOW_DEBUG(PROPAGATE,                 "%s level %u guest %" SH_PRI_gpte " shadow %" SH_PRI_pte "\n",                 fetch_type_names[ft], level, guest_entry.l1, sp->l1);}/* These four wrappers give us a little bit of type-safety back around * the use of void-* pointers and intpte types in _sh_propagate(), and * allow the compiler to optimize out some level checks. */#if GUEST_PAGING_LEVELS >= 4static voidl4e_propagate_from_guest(struct vcpu *v,                          guest_l4e_t gl4e,                         mfn_t sl3mfn,                         shadow_l4e_t *sl4e,                         fetch_type_t ft){    _sh_propagate(v, gl4e.l4, sl3mfn, sl4e, 4, ft, p2m_ram_rw);}static voidl3e_propagate_from_guest(struct vcpu *v,                         guest_l3e_t gl3e,                         mfn_t sl2mfn,                          shadow_l3e_t *sl3e,                         fetch_type_t ft){    _sh_propagate(v, gl3e.l3, sl2mfn, sl3e, 3, ft, p2m_ram_rw);}#endif // GUEST_PAGING_LEVELS >= 4static voidl2e_propagate_from_guest(struct vcpu *v,                          guest_l2e_t gl2e,                         mfn_t sl1mfn,                         shadow_l2e_t *sl2e,                         fetch_type_t ft){    _sh_propagate(v, gl2e.l2, sl1mfn, sl2e, 2, ft, p2m_ram_rw);}static voidl1e_propagate_from_guest(struct vcpu *v,                          guest_l1e_t gl1e,                         mfn_t gmfn,                          shadow_l1e_t *sl1e,                         fetch_type_t ft,                          p2m_type_t p2mt){    _sh_propagate(v, gl1e.l1, gmfn, sl1e, 1, ft, p2mt);}/**************************************************************************//* These functions update shadow entries (and do bookkeeping on the shadow * tables they are in).  It is intended that they are the only * functions which ever write (non-zero) data onto a shadow page. */static inline void safe_write_entry(void *dst, void *src) /* Copy one PTE safely when processors might be running on the * destination pagetable.   This does *not* give safety against * concurrent writes (that's what the shadow lock is for), just  * stops the hardware picking up partially written entries. */{    volatile unsigned long *d = dst;    unsigned long *s = src;    ASSERT(!((unsigned long) d & (sizeof (shadow_l1e_t) - 1)));#if CONFIG_PAGING_LEVELS == 3    /* In PAE mode, pagetable entries are larger     * than machine words, so won't get written atomically.  We need to make     * sure any other cpu running on these shadows doesn't see a     * half-written entry.  Do this by marking the entry not-present first,     * then writing the high word before the low word. */    BUILD_BUG_ON(sizeof (shadow_l1e_t) != 2 * sizeof (unsigned long));    d[0] = 0;    d[1] = s[1];    d[0] = s[0];#else    /* In 64-bit, sizeof(pte) == sizeof(ulong) == 1 word,     * which will be an atomic write, since the entry is aligned. */    BUILD_BUG_ON(sizeof (shadow_l1e_t) != sizeof (unsigned long));    *d = *s;#endif}static inline void shadow_write_entries(void *d, void *s, int entries, mfn_t mfn)/* This function does the actual writes to shadow pages. * It must not be called directly, since it doesn't do the bookkeeping * that shadow_set_l*e() functions do. */{    shadow_l1e_t *dst = d;    shadow_l1e_t *src = s;    void *map = NULL;    int i;    /* Because we mirror access rights at all levels in the shadow, an     * l2 (or higher) entry with the RW bit cleared will leave us with     * no write access through the linear map.       * We detect that by writing to the shadow with copy_to_user() and      * using map_domain_page() to get a writeable mapping if we need to. */    if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 )     {        perfc_incr(shadow_linear_map_failed);

?? 快捷鍵說明

復(fù)制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號(hào) Ctrl + =
減小字號(hào) Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
精品亚洲国产成人av制服丝袜| 久久天堂av综合合色蜜桃网| 国产精一品亚洲二区在线视频| 亚洲成人在线观看视频| 亚洲另类春色校园小说| 亚洲综合男人的天堂| 一区二区三区高清| 亚洲地区一二三色| 奇米色一区二区| 国产麻豆视频一区| 丁香婷婷深情五月亚洲| 国产黄色成人av| 99精品国产91久久久久久| 99久久精品国产毛片| 色一情一伦一子一伦一区| 在线欧美日韩精品| 日韩一区二区三区观看| 中文字幕精品在线不卡| 亚洲欧美在线另类| 亚洲成人免费在线观看| 国产视频视频一区| 一区二区三区欧美| 蜜桃视频第一区免费观看| 国产乱码精品1区2区3区| www.爱久久.com| 欧美日韩国产123区| 久久色中文字幕| 一区二区三区在线视频观看| 亚洲夂夂婷婷色拍ww47| 精品一区二区在线视频| 丁香婷婷综合网| 欧美一区二区三区免费| 国产精品视频免费看| 亚洲成人一区在线| 国产不卡高清在线观看视频| 色综合一区二区三区| 欧美精三区欧美精三区| 国产婷婷色一区二区三区在线| 亚洲色图视频网| 奇米色777欧美一区二区| 高清成人免费视频| 在线成人免费视频| 国产精品视频九色porn| 日本免费新一区视频| 成人性生交大片免费看视频在线| 欧美日韩1区2区| 国产精品毛片久久久久久| 日日摸夜夜添夜夜添亚洲女人| 粉嫩av亚洲一区二区图片| 69堂国产成人免费视频| 国产精品女人毛片| 久久精品国产亚洲aⅴ| 在线观看一区不卡| 国产三级精品视频| 久久91精品久久久久久秒播 | 91精品福利在线| 精品国产露脸精彩对白| 香蕉成人伊视频在线观看| 日本久久一区二区| 国产精品大尺度| 国产在线国偷精品免费看| 3atv一区二区三区| 亚洲国产一区二区三区青草影视| 风间由美性色一区二区三区| 欧美tk—视频vk| 免费成人美女在线观看| 欧美一区二区三区日韩| 亚洲一级电影视频| 91性感美女视频| 亚洲欧洲成人精品av97| 成人h版在线观看| 国产日韩欧美综合在线| 国产成人av自拍| 国产日韩欧美综合一区| 国产不卡视频在线观看| 国产精品水嫩水嫩| proumb性欧美在线观看| 国产精品午夜在线观看| www.av精品| 亚洲激情欧美激情| 在线亚洲一区二区| 一二三四区精品视频| 欧美中文字幕不卡| 日韩制服丝袜先锋影音| 欧美一区二区久久| 激情综合色播五月| 中文在线免费一区三区高中清不卡| 激情文学综合网| 国产精品网站导航| 在线免费观看不卡av| 国产偷v国产偷v亚洲高清| 91在线观看高清| 偷拍日韩校园综合在线| 精品视频一区二区三区免费| 无码av中文一区二区三区桃花岛| 精品国一区二区三区| 成人一区二区在线观看| 一区二区三区在线观看动漫| 欧美精品日韩一本| 狠狠色伊人亚洲综合成人| 欧美一区二区人人喊爽| 国产剧情av麻豆香蕉精品| 综合色中文字幕| 欧美电影一区二区| 国产在线精品免费av| 亚洲免费在线视频| 日韩欧美视频一区| 波多野结衣中文字幕一区| 亚洲一区二区三区视频在线 | 欧美日韩情趣电影| 国产精品中文字幕一区二区三区| 中文字幕色av一区二区三区| 在线播放视频一区| 高清国产一区二区| 日韩二区三区四区| 国产精品美女一区二区在线观看| 欧美日本乱大交xxxxx| 国产传媒欧美日韩成人| 亚洲成人一区在线| 国产欧美日韩久久| 欧美一级午夜免费电影| 99久久婷婷国产综合精品| 蜜臀久久久久久久| 亚洲成人综合网站| 中文字幕欧美一| 337p粉嫩大胆色噜噜噜噜亚洲| 欧美综合视频在线观看| 成人国产在线观看| 国产一区二区主播在线| 午夜免费久久看| 亚洲精品免费电影| 中文字幕在线观看一区二区| 欧美精品一区二区三区一线天视频| 在线视频国内一区二区| 91在线你懂得| 99国产麻豆精品| 成人国产在线观看| 成人美女在线视频| 国产尤物一区二区| 精品一区二区免费| 精品中文字幕一区二区小辣椒| 亚洲午夜羞羞片| 亚洲曰韩产成在线| 一区二区三区免费网站| 国产精品对白交换视频| 国产精品视频看| 国产精品入口麻豆原神| 国产人伦精品一区二区| 国产偷国产偷精品高清尤物 | 秋霞电影一区二区| 亚洲成a天堂v人片| 亚洲成人精品一区| 午夜日韩在线电影| 免费视频最近日韩| 国内精品免费在线观看| 精品一区二区三区香蕉蜜桃| 日本欧美一区二区三区乱码| 石原莉奈在线亚洲二区| 午夜视频久久久久久| 视频一区视频二区中文| 三级欧美韩日大片在线看| 三级久久三级久久久| 蜜臀av亚洲一区中文字幕| 黄网站免费久久| 国产精品久久久久久久浪潮网站| 欧美激情一区二区三区| **欧美大码日韩| 亚洲国产欧美日韩另类综合| 亚洲成av人片在线观看无码| 日韩高清在线观看| 韩国精品免费视频| 成人在线一区二区三区| 日本韩国欧美三级| 欧美日韩高清影院| www国产成人| 国产精品家庭影院| 亚洲综合免费观看高清在线观看| 一区二区三区在线观看网站| 亚洲精品国产a久久久久久| 亚洲第一成人在线| 韩国v欧美v日本v亚洲v| 成人av网站在线| 91精品国产高清一区二区三区蜜臀| 精品国产不卡一区二区三区| 中文字幕制服丝袜一区二区三区| 亚洲成a人片在线不卡一二三区| 免费欧美在线视频| 北条麻妃一区二区三区| 欧美一级高清片| 日韩理论电影院| 久久99深爱久久99精品| 91视视频在线观看入口直接观看www | 亚洲va中文字幕| 国内一区二区视频| 色哟哟国产精品免费观看| 欧美一区二区三区思思人| ...中文天堂在线一区| 久久精品国产亚洲高清剧情介绍| 色激情天天射综合网| 久久久国产一区二区三区四区小说|