亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? domain.c

?? xen 3.2.2 源碼
?? C
?? 第 1 頁 / 共 4 頁
字號:
        rc = -EFAULT;        if ( copy_to_guest(arg, &cpu_id, 1) )            break;        rc = 0;        break;    }    default:        rc = -ENOSYS;        break;    }    return rc;}#ifdef __x86_64__#define loadsegment(seg,value) ({               \    int __r = 1;                                \    asm volatile (                              \        "1: movl %k1,%%" #seg "\n2:\n"          \        ".section .fixup,\"ax\"\n"              \        "3: xorl %k0,%k0\n"                     \        "   movl %k0,%%" #seg "\n"              \        "   jmp 2b\n"                           \        ".previous\n"                           \        ".section __ex_table,\"a\"\n"           \        "   .align 8\n"                         \        "   .quad 1b,3b\n"                      \        ".previous"                             \        : "=r" (__r) : "r" (value), "0" (__r) );\    __r; })/* * save_segments() writes a mask of segments which are dirty (non-zero), * allowing load_segments() to avoid some expensive segment loads and * MSR writes. */static DEFINE_PER_CPU(unsigned int, dirty_segment_mask);#define DIRTY_DS           0x01#define DIRTY_ES           0x02#define DIRTY_FS           0x04#define DIRTY_GS           0x08#define DIRTY_FS_BASE      0x10#define DIRTY_GS_BASE_USER 0x20static void load_segments(struct vcpu *n){    struct vcpu_guest_context *nctxt = &n->arch.guest_context;    int all_segs_okay = 1;    unsigned int dirty_segment_mask, cpu = smp_processor_id();    /* Load and clear the dirty segment mask. */    dirty_segment_mask = per_cpu(dirty_segment_mask, cpu);    per_cpu(dirty_segment_mask, cpu) = 0;    /* Either selector != 0 ==> reload. */    if ( unlikely((dirty_segment_mask & DIRTY_DS) | nctxt->user_regs.ds) )        all_segs_okay &= loadsegment(ds, nctxt->user_regs.ds);    /* Either selector != 0 ==> reload. */    if ( unlikely((dirty_segment_mask & DIRTY_ES) | nctxt->user_regs.es) )        all_segs_okay &= loadsegment(es, nctxt->user_regs.es);    /*     * Either selector != 0 ==> reload.     * Also reload to reset FS_BASE if it was non-zero.     */    if ( unlikely((dirty_segment_mask & (DIRTY_FS | DIRTY_FS_BASE)) |                  nctxt->user_regs.fs) )        all_segs_okay &= loadsegment(fs, nctxt->user_regs.fs);    /*     * Either selector != 0 ==> reload.     * Also reload to reset GS_BASE if it was non-zero.     */    if ( unlikely((dirty_segment_mask & (DIRTY_GS | DIRTY_GS_BASE_USER)) |                  nctxt->user_regs.gs) )    {        /* Reset GS_BASE with user %gs? */        if ( (dirty_segment_mask & DIRTY_GS) || !nctxt->gs_base_user )            all_segs_okay &= loadsegment(gs, nctxt->user_regs.gs);    }    if ( !is_pv_32on64_domain(n->domain) )    {        /* This can only be non-zero if selector is NULL. */        if ( nctxt->fs_base )            wrmsr(MSR_FS_BASE,                  nctxt->fs_base,                  nctxt->fs_base>>32);        /* Most kernels have non-zero GS base, so don't bother testing. */        /* (This is also a serialising instruction, avoiding AMD erratum #88.) */        wrmsr(MSR_SHADOW_GS_BASE,              nctxt->gs_base_kernel,              nctxt->gs_base_kernel>>32);        /* This can only be non-zero if selector is NULL. */        if ( nctxt->gs_base_user )            wrmsr(MSR_GS_BASE,                  nctxt->gs_base_user,                  nctxt->gs_base_user>>32);        /* If in kernel mode then switch the GS bases around. */        if ( (n->arch.flags & TF_kernel_mode) )            asm volatile ( "swapgs" );    }    if ( unlikely(!all_segs_okay) )    {        struct cpu_user_regs *regs = guest_cpu_user_regs();        unsigned long *rsp =            (n->arch.flags & TF_kernel_mode) ?            (unsigned long *)regs->rsp :            (unsigned long *)nctxt->kernel_sp;        unsigned long cs_and_mask, rflags;        if ( is_pv_32on64_domain(n->domain) )        {            unsigned int *esp = ring_1(regs) ?                                (unsigned int *)regs->rsp :                                (unsigned int *)nctxt->kernel_sp;            unsigned int cs_and_mask, eflags;            int ret = 0;            /* CS longword also contains full evtchn_upcall_mask. */            cs_and_mask = (unsigned short)regs->cs |                ((unsigned int)vcpu_info(n, evtchn_upcall_mask) << 16);            /* Fold upcall mask into RFLAGS.IF. */            eflags  = regs->_eflags & ~X86_EFLAGS_IF;            eflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;            if ( !ring_1(regs) )            {                ret  = put_user(regs->ss,       esp-1);                ret |= put_user(regs->_esp,     esp-2);                esp -= 2;            }            if ( ret |                 put_user(eflags,              esp-1) |                 put_user(cs_and_mask,         esp-2) |                 put_user(regs->_eip,          esp-3) |                 put_user(nctxt->user_regs.gs, esp-4) |                 put_user(nctxt->user_regs.fs, esp-5) |                 put_user(nctxt->user_regs.es, esp-6) |                 put_user(nctxt->user_regs.ds, esp-7) )            {                gdprintk(XENLOG_ERR, "Error while creating compat "                         "failsafe callback frame.\n");                domain_crash(n->domain);            }            if ( test_bit(_VGCF_failsafe_disables_events,                          &n->arch.guest_context.flags) )                vcpu_info(n, evtchn_upcall_mask) = 1;            regs->entry_vector  = TRAP_syscall;            regs->_eflags      &= 0xFFFCBEFFUL;            regs->ss            = FLAT_COMPAT_KERNEL_SS;            regs->_esp          = (unsigned long)(esp-7);            regs->cs            = FLAT_COMPAT_KERNEL_CS;            regs->_eip          = nctxt->failsafe_callback_eip;            return;        }        if ( !(n->arch.flags & TF_kernel_mode) )            toggle_guest_mode(n);        else            regs->cs &= ~3;        /* CS longword also contains full evtchn_upcall_mask. */        cs_and_mask = (unsigned long)regs->cs |            ((unsigned long)vcpu_info(n, evtchn_upcall_mask) << 32);        /* Fold upcall mask into RFLAGS.IF. */        rflags  = regs->rflags & ~X86_EFLAGS_IF;        rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;        if ( put_user(regs->ss,            rsp- 1) |             put_user(regs->rsp,           rsp- 2) |             put_user(rflags,              rsp- 3) |             put_user(cs_and_mask,         rsp- 4) |             put_user(regs->rip,           rsp- 5) |             put_user(nctxt->user_regs.gs, rsp- 6) |             put_user(nctxt->user_regs.fs, rsp- 7) |             put_user(nctxt->user_regs.es, rsp- 8) |             put_user(nctxt->user_regs.ds, rsp- 9) |             put_user(regs->r11,           rsp-10) |             put_user(regs->rcx,           rsp-11) )        {            gdprintk(XENLOG_ERR, "Error while creating failsafe "                    "callback frame.\n");            domain_crash(n->domain);        }        if ( test_bit(_VGCF_failsafe_disables_events,                      &n->arch.guest_context.flags) )            vcpu_info(n, evtchn_upcall_mask) = 1;        regs->entry_vector  = TRAP_syscall;        regs->rflags       &= ~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|                                X86_EFLAGS_NT|X86_EFLAGS_TF);        regs->ss            = FLAT_KERNEL_SS;        regs->rsp           = (unsigned long)(rsp-11);        regs->cs            = FLAT_KERNEL_CS;        regs->rip           = nctxt->failsafe_callback_eip;    }}static void save_segments(struct vcpu *v){    struct vcpu_guest_context *ctxt = &v->arch.guest_context;    struct cpu_user_regs      *regs = &ctxt->user_regs;    unsigned int dirty_segment_mask = 0;    regs->ds = read_segment_register(ds);    regs->es = read_segment_register(es);    regs->fs = read_segment_register(fs);    regs->gs = read_segment_register(gs);    if ( regs->ds )        dirty_segment_mask |= DIRTY_DS;    if ( regs->es )        dirty_segment_mask |= DIRTY_ES;    if ( regs->fs || is_pv_32on64_domain(v->domain) )    {        dirty_segment_mask |= DIRTY_FS;        ctxt->fs_base = 0; /* != 0 selector kills fs_base */    }    else if ( ctxt->fs_base )    {        dirty_segment_mask |= DIRTY_FS_BASE;    }    if ( regs->gs || is_pv_32on64_domain(v->domain) )    {        dirty_segment_mask |= DIRTY_GS;        ctxt->gs_base_user = 0; /* != 0 selector kills gs_base_user */    }    else if ( ctxt->gs_base_user )    {        dirty_segment_mask |= DIRTY_GS_BASE_USER;    }    this_cpu(dirty_segment_mask) = dirty_segment_mask;}#define switch_kernel_stack(v) ((void)0)#elif defined(__i386__)#define load_segments(n) ((void)0)#define save_segments(p) ((void)0)static inline void switch_kernel_stack(struct vcpu *v){    struct tss_struct *tss = &init_tss[smp_processor_id()];    tss->esp1 = v->arch.guest_context.kernel_sp;    tss->ss1  = v->arch.guest_context.kernel_ss;}#endif /* __i386__ */static void paravirt_ctxt_switch_from(struct vcpu *v){    save_segments(v);    /*     * Disable debug breakpoints. We do this aggressively because if we switch     * to an HVM guest we may load DR0-DR3 with values that can cause #DE     * inside Xen, before we get a chance to reload DR7, and this cannot always     * safely be handled.     */    if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )        write_debugreg(7, 0);}static void paravirt_ctxt_switch_to(struct vcpu *v){    unsigned long cr4;    set_int80_direct_trap(v);    switch_kernel_stack(v);    cr4 = pv_guest_cr4_to_real_cr4(v->arch.guest_context.ctrlreg[4]);    if ( unlikely(cr4 != read_cr4()) )        write_cr4(cr4);    if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )    {        write_debugreg(0, v->arch.guest_context.debugreg[0]);        write_debugreg(1, v->arch.guest_context.debugreg[1]);        write_debugreg(2, v->arch.guest_context.debugreg[2]);        write_debugreg(3, v->arch.guest_context.debugreg[3]);        write_debugreg(6, v->arch.guest_context.debugreg[6]);        write_debugreg(7, v->arch.guest_context.debugreg[7]);    }}static void __context_switch(void){    struct cpu_user_regs *stack_regs = guest_cpu_user_regs();    unsigned int          cpu = smp_processor_id();    struct vcpu          *p = per_cpu(curr_vcpu, cpu);    struct vcpu          *n = current;    ASSERT(p != n);    ASSERT(cpus_empty(n->vcpu_dirty_cpumask));    if ( !is_idle_vcpu(p) )    {        memcpy(&p->arch.guest_context.user_regs,               stack_regs,               CTXT_SWITCH_STACK_BYTES);        unlazy_fpu(p);        p->arch.ctxt_switch_from(p);    }    if ( !is_idle_vcpu(n) )    {        memcpy(stack_regs,               &n->arch.guest_context.user_regs,               CTXT_SWITCH_STACK_BYTES);        n->arch.ctxt_switch_to(n);    }    if ( p->domain != n->domain )        cpu_set(cpu, n->domain->domain_dirty_cpumask);    cpu_set(cpu, n->vcpu_dirty_cpumask);    write_ptbase(n);    if ( p->vcpu_id != n->vcpu_id )    {        char gdt_load[10];        *(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;        *(unsigned long  *)(&gdt_load[2]) = GDT_VIRT_START(n);        asm volatile ( "lgdt %0" : "=m" (gdt_load) );    }    if ( p->domain != n->domain )        cpu_clear(cpu, p->domain->domain_dirty_cpumask);    cpu_clear(cpu, p->vcpu_dirty_cpumask);    per_cpu(curr_vcpu, cpu) = n;}void context_switch(struct vcpu *prev, struct vcpu *next){    unsigned int cpu = smp_processor_id();    cpumask_t dirty_mask = next->vcpu_dirty_cpumask;    ASSERT(local_irq_is_enabled());    /* Allow at most one CPU at a time to be dirty. */    ASSERT(cpus_weight(dirty_mask) <= 1);    if ( unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)) )    {        /* Other cpus call __sync_lazy_execstate from flush ipi handler. */        if ( !cpus_empty(next->vcpu_dirty_cpumask) )            flush_tlb_mask(next->vcpu_dirty_cpumask);    }    local_irq_disable();    if ( is_hvm_vcpu(prev) && !list_empty(&prev->arch.hvm_vcpu.tm_list) )        pt_save_timer(prev);    set_current(next);    if ( (per_cpu(curr_vcpu, cpu) == next) || is_idle_vcpu(next) )    {        local_irq_enable();    }    else    {        __context_switch();#ifdef CONFIG_COMPAT        if ( !is_hvm_vcpu(next) &&             (is_idle_vcpu(prev) ||              is_hvm_vcpu(prev) ||              is_pv_32on64_vcpu(prev) != is_pv_32on64_vcpu(next)) )        {            uint64_t efer = read_efer();            if ( !(efer & EFER_SCE) )                write_efer(efer | EFER_SCE);            flush_tlb_one_local(GDT_VIRT_START(next) +                                FIRST_RESERVED_GDT_BYTE);        }#endif        /* Re-enable interrupts before restoring state which may fault. */        local_irq_enable();        if ( !is_hvm_vcpu(next) )        {            load_LDT(next);            load_segments(next);        }    }    context_saved(prev);    /* Update per-VCPU guest runstate shared memory area (if registered). */    if ( !guest_handle_is_null(runstate_guest(next)) )    {        if ( !is_pv_32on64_domain(next->domain) )            __copy_to_guest(runstate_guest(next), &next->runstate, 1);#ifdef CONFIG_COMPAT        else        {            struct compat_vcpu_runstate_info info;            XLAT_vcpu_runstate_info(&info, &next->runstate);            __copy_to_guest(next->runstate_guest.compat, &info, 1);        }#endif    }    schedule_tail(next);    BUG();}void continue_running(struct vcpu *same){    schedule_tail(same);    BUG();}int __sync_lazy_execstate(void){    unsigned long flags;    int switch_required;    local_irq_save(flags);    switch_required = (this_cpu(curr_vcpu) != current);    if ( switch_required )    {        ASSERT(current == idle_vcpu[smp_processor_id()]);        __context_switch();    }    local_irq_restore(flags);    return switch_required;}void sync_vcpu_execstate(struct vcpu *v){    if ( cpu_isset(smp_processor_id(), v->vcpu_dirty_cpumask) )        (void)__sync_lazy_execstate();    /* Other cpus call __sync_lazy_execstate from flush ipi handler. */    flush_tlb_mask(v->vcpu_dirty_cpumask);}struct migrate_info {    long (*func)(void *data);    void *data;    void (*saved_schedule_tail)(struct vcpu *);    cpumask_t saved_affinity;};static void continue_hypercall_on_cpu_helper(struct vcpu *v){    struct cpu_user_regs *regs = guest_cpu_user_regs();    struct migrate_info *info = v->arch.continue_info;    cpumask_t mask = info->saved_affinity;

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
亚洲一区二区三区免费视频| 国产精品99精品久久免费| 麻豆一区二区在线| 不卡免费追剧大全电视剧网站| 欧美丰满美乳xxx高潮www| 国产精品国产自产拍高清av| 麻豆成人久久精品二区三区红 | 高清不卡一二三区| 555夜色666亚洲国产免| 亚洲人成网站精品片在线观看| 激情久久五月天| 日韩欧美中文字幕公布| 亚洲成人1区2区| 97国产一区二区| 中文字幕欧美三区| 韩国精品久久久| 精品国产一二三区| 日韩av在线发布| 欧美视频在线一区二区三区| 亚洲免费观看高清完整版在线 | 中文字幕中文字幕一区| 免费人成在线不卡| 日韩欧美一二三四区| 亚洲国产日韩a在线播放性色| 97精品国产露脸对白| 亚洲欧洲av另类| 成人av高清在线| 中文字幕一区二区三区视频| av中文字幕一区| 亚洲欧美偷拍另类a∨色屁股| 成人精品一区二区三区四区| 国产精品网站在线| 91污在线观看| 亚洲一区二区三区三| 欧美一a一片一级一片| 香蕉成人伊视频在线观看| 欧美日韩国产中文| 轻轻草成人在线| 欧美www视频| 国产成人av电影在线播放| 国产精品色呦呦| 91丨porny丨首页| 亚洲综合在线电影| 欧美日本一区二区三区四区| 日韩福利视频导航| 日韩三级伦理片妻子的秘密按摩| 久久精品久久久精品美女| 26uuu色噜噜精品一区二区| 国产精品一二三四五| 综合久久综合久久| 欧美性猛片aaaaaaa做受| 奇米精品一区二区三区四区| 久久精品人人做人人爽97| 国产丶欧美丶日本不卡视频| 中文字幕日韩av资源站| 在线免费av一区| 久久99这里只有精品| 中文字幕日本乱码精品影院| 欧美最猛性xxxxx直播| 久久不见久久见中文字幕免费| 久久午夜免费电影| 色哟哟一区二区| 蓝色福利精品导航| 亚洲伦理在线免费看| 欧美丰满嫩嫩电影| 成人av电影在线网| 日韩电影免费一区| 国产精品二三区| 日韩一区二区精品| 99免费精品视频| 免费成人你懂的| 中文字幕字幕中文在线中不卡视频| 欧美日韩日日摸| 国产aⅴ综合色| 天堂一区二区在线| 欧美国产日产图区| 日韩一级片在线观看| 色偷偷88欧美精品久久久| 国产一区二区91| 亚洲大片免费看| 亚洲欧洲日产国产综合网| 91精品在线麻豆| 色综合久久综合网| 在线视频国内自拍亚洲视频| 国产精品一区免费视频| 天堂在线亚洲视频| 亚洲精品v日韩精品| 2020国产精品| 欧美精品乱人伦久久久久久| 色一情一乱一乱一91av| 成人在线一区二区三区| 老司机午夜精品| 天堂在线亚洲视频| 亚洲成人激情自拍| 亚洲精品欧美激情| 最新久久zyz资源站| 精品国产a毛片| 91精品国产欧美一区二区成人| 色综合久久中文综合久久牛| 高清久久久久久| 懂色av一区二区三区免费观看| 久久er99热精品一区二区| 日本欧美一区二区| 日韩精品免费视频人成| 天天爽夜夜爽夜夜爽精品视频| 亚洲激情图片一区| 亚洲精品欧美专区| 一区二区三区国产精品| 亚洲人吸女人奶水| 亚洲人午夜精品天堂一二香蕉| 国产精品成人免费在线| 国产精品久久久久久久久果冻传媒| 久久尤物电影视频在线观看| 精品久久久久99| 欧美大片日本大片免费观看| 日韩欧美电影一区| 欧美成人一区二区三区| 精品乱码亚洲一区二区不卡| 欧美不卡一二三| 久久色.com| 国产日韩v精品一区二区| 国产午夜精品理论片a级大结局| 国产欧美日本一区二区三区| 欧美高清一级片在线观看| 国产精品久久久久aaaa樱花| 亚洲欧洲日韩在线| 亚洲一区二区三区四区五区中文| 亚洲一区在线电影| 青青草91视频| 国产激情精品久久久第一区二区| 国产成人综合网站| 一本久久a久久免费精品不卡| 欧美日韩情趣电影| 欧美精品一区二区三区蜜桃视频| 国产午夜亚洲精品理论片色戒| 中文字幕欧美激情一区| 亚洲激情自拍视频| 日日骚欧美日韩| 国产精品一级片在线观看| 91亚洲永久精品| 欧美精品第1页| 久久久久久夜精品精品免费| 国产精品不卡在线| 丝袜美腿亚洲一区| 国产成人在线免费观看| 日本伦理一区二区| 久久免费国产精品| 艳妇臀荡乳欲伦亚洲一区| 久久精品国产亚洲a| 97se狠狠狠综合亚洲狠狠| 91精品国产欧美一区二区成人| 国产清纯在线一区二区www| 亚洲一区二区三区自拍| 国产成人免费在线| 欧美精品乱码久久久久久| 中文字幕va一区二区三区| 婷婷中文字幕综合| 国产成人免费视频精品含羞草妖精| 欧美特级限制片免费在线观看| 精品久久久久久久久久久久久久久久久 | 色哦色哦哦色天天综合| 日韩免费一区二区| 亚洲摸摸操操av| 国产mv日韩mv欧美| 91精品国产91综合久久蜜臀| 亚洲男人天堂av网| 国产黄色91视频| 欧美一区二区日韩| 亚洲人精品午夜| 成人午夜av电影| 久久亚洲二区三区| 麻豆一区二区三区| 欧美日韩激情一区二区三区| 国产精品国产三级国产专播品爱网 | 欧美精品一区二区三区一线天视频| 一区二区成人在线| jizz一区二区| 国产日韩欧美麻豆| 久久精品国产澳门| 欧美一区二区视频网站| 亚洲精品国产第一综合99久久 | 日韩欧美色综合| 午夜精品视频在线观看| 91久久精品一区二区| 国产精品青草综合久久久久99| 国产精品一区二区黑丝| 欧美成人一区二区三区片免费 | 亚洲欧美综合网| 国产黄人亚洲片| 久久久久久久久久久99999| 日韩电影一二三区| 91精品国产欧美一区二区18| 视频一区二区中文字幕| 欧美亚洲一区二区在线| 亚洲第一在线综合网站| 久久久99精品免费观看不卡| 精品亚洲国产成人av制服丝袜| 91麻豆精品国产自产在线观看一区| 一区二区三区免费网站| 欧美在线色视频|