?? svm.c
字號:
err = kvm_vcpu_init(&svm->vcpu, kvm, id); if (err) goto free_svm; if (irqchip_in_kernel(kvm)) { err = kvm_create_lapic(&svm->vcpu); if (err < 0) goto free_svm; } page = alloc_page(GFP_KERNEL); if (!page) { err = -ENOMEM; goto uninit; } svm->vmcb = page_address(page); clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; svm->asid_generation = 0; memset(svm->db_regs, 0, sizeof(svm->db_regs)); init_vmcb(svm->vmcb); fx_init(&svm->vcpu); svm->vcpu.fpu_active = 1; svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; if (svm->vcpu.vcpu_id == 0) svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP; return &svm->vcpu;uninit: kvm_vcpu_uninit(&svm->vcpu);free_svm: kmem_cache_free(kvm_vcpu_cache, svm);out: return ERR_PTR(err);}static void svm_free_vcpu(struct kvm_vcpu *vcpu){ struct vcpu_svm *svm = to_svm(vcpu); __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm);}static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu){ struct vcpu_svm *svm = to_svm(vcpu); int i; if (unlikely(cpu != vcpu->cpu)) { u64 tsc_this, delta; /* * Make sure that the guest sees a monotonically * increasing TSC. */ rdtscll(tsc_this); delta = vcpu->host_tsc - tsc_this; svm->vmcb->control.tsc_offset += delta; vcpu->cpu = cpu; kvm_migrate_apic_timer(vcpu); } for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);}static void svm_vcpu_put(struct kvm_vcpu *vcpu){ struct vcpu_svm *svm = to_svm(vcpu); int i; for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); rdtscll(vcpu->host_tsc); kvm_put_guest_fpu(vcpu);}static void svm_vcpu_decache(struct kvm_vcpu *vcpu){}static void svm_cache_regs(struct kvm_vcpu *vcpu){ struct vcpu_svm *svm = to_svm(vcpu); vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->rip = svm->vmcb->save.rip;}static void svm_decache_regs(struct kvm_vcpu *vcpu){ struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->rip;}static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu){ return to_svm(vcpu)->vmcb->save.rflags;}static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags){ to_svm(vcpu)->vmcb->save.rflags = rflags;}static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg){ struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; switch (seg) { case VCPU_SREG_CS: return &save->cs; case VCPU_SREG_DS: return &save->ds; case VCPU_SREG_ES: return &save->es; case VCPU_SREG_FS: return &save->fs; case VCPU_SREG_GS: return &save->gs; case VCPU_SREG_SS: return &save->ss; case VCPU_SREG_TR: return &save->tr; case VCPU_SREG_LDTR: return &save->ldtr; } BUG(); return NULL;}static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg){ struct vmcb_seg *s = svm_seg(vcpu, seg); return s->base;}static void svm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg){ struct vmcb_seg *s = svm_seg(vcpu, seg); var->base = s->base; var->limit = s->limit; var->selector = s->selector; var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; var->unusable = !var->present;}static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt){ struct vcpu_svm *svm = to_svm(vcpu); dt->limit = svm->vmcb->save.idtr.limit; dt->base = svm->vmcb->save.idtr.base;}static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt){ struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.idtr.limit = dt->limit; svm->vmcb->save.idtr.base = dt->base ;}static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt){ struct vcpu_svm *svm = to_svm(vcpu); dt->limit = svm->vmcb->save.gdtr.limit; dt->base = svm->vmcb->save.gdtr.base;}static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt){ struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.gdtr.limit = dt->limit; svm->vmcb->save.gdtr.base = dt->base ;}static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu){}static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0){ struct vcpu_svm *svm = to_svm(vcpu);#ifdef CONFIG_X86_64 if (vcpu->shadow_efer & KVM_EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { vcpu->shadow_efer |= KVM_EFER_LMA; svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; } if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) { vcpu->shadow_efer &= ~KVM_EFER_LMA; svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); } }#endif if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); vcpu->fpu_active = 1; } vcpu->cr0 = cr0; cr0 |= X86_CR0_PG | X86_CR0_WP; cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0;}static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4){ vcpu->cr4 = cr4; to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;}static void svm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg){ struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_seg *s = svm_seg(vcpu, seg); s->base = var->base; s->limit = var->limit; s->selector = var->selector; if (var->unusable) s->attrib = 0; else { s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; } if (seg == VCPU_SREG_CS) svm->vmcb->save.cpl = (svm->vmcb->save.cs.attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;}/* FIXME: svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK; svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);*/static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg){ return -EOPNOTSUPP;}static int svm_get_irq(struct kvm_vcpu *vcpu){ struct vcpu_svm *svm = to_svm(vcpu); u32 exit_int_info = svm->vmcb->control.exit_int_info; if (is_external_interrupt(exit_int_info)) return exit_int_info & SVM_EVTINJ_VEC_MASK; return -1;}static void load_host_msrs(struct kvm_vcpu *vcpu){#ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);#endif}static void save_host_msrs(struct kvm_vcpu *vcpu){#ifdef CONFIG_X86_64 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);#endif}static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data){ if (svm_data->next_asid > svm_data->max_asid) { ++svm_data->asid_generation; svm_data->next_asid = 1; svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; } svm->vcpu.cpu = svm_data->cpu; svm->asid_generation = svm_data->asid_generation; svm->vmcb->control.asid = svm_data->next_asid++;}static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr){ return to_svm(vcpu)->db_regs[dr];}static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, int *exception){ struct vcpu_svm *svm = to_svm(vcpu); *exception = 0; if (svm->vmcb->save.dr7 & DR7_GD_MASK) { svm->vmcb->save.dr7 &= ~DR7_GD_MASK; svm->vmcb->save.dr6 |= DR6_BD_MASK; *exception = DB_VECTOR; return; } switch (dr) { case 0 ... 3: svm->db_regs[dr] = value; return; case 4 ... 5: if (vcpu->cr4 & X86_CR4_DE) { *exception = UD_VECTOR; return; } case 7: { if (value & ~((1ULL << 32) - 1)) { *exception = GP_VECTOR; return; } svm->vmcb->save.dr7 = value; return; } default: printk(KERN_DEBUG "%s: unexpected dr %u\n", __FUNCTION__, dr); *exception = UD_VECTOR; return; }}static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ u32 exit_int_info = svm->vmcb->control.exit_int_info; struct kvm *kvm = svm->vcpu.kvm; u64 fault_address; u32 error_code; enum emulation_result er; int r; if (!irqchip_in_kernel(kvm) && is_external_interrupt(exit_int_info)) push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); mutex_lock(&kvm->lock); fault_address = svm->vmcb->control.exit_info_2; error_code = svm->vmcb->control.exit_info_1; r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); if (r < 0) { mutex_unlock(&kvm->lock); return r; } if (!r) { mutex_unlock(&kvm->lock); return 1; } er = emulate_instruction(&svm->vcpu, kvm_run, fault_address, error_code); mutex_unlock(&kvm->lock); switch (er) { case EMULATE_DONE: return 1; case EMULATE_DO_MMIO: ++svm->vcpu.stat.mmio_exits; return 0; case EMULATE_FAIL: kvm_report_emulation_failure(&svm->vcpu, "pagetable"); break; default: BUG(); } kvm_run->exit_reason = KVM_EXIT_UNKNOWN; return 0;}static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); if (!(svm->vcpu.cr0 & X86_CR0_TS)) svm->vmcb->save.cr0 &= ~X86_CR0_TS; svm->vcpu.fpu_active = 1; return 1;}static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ /* * VMCB is undefined after a SHUTDOWN intercept * so reinitialize it. */ clear_page(svm->vmcb); init_vmcb(svm->vmcb); kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; return 0;}static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ u32 io_info = svm->vmcb->control.exit_info_1; //address size bug? int size, down, in, string, rep; unsigned port; ++svm->vcpu.stat.io_exits; svm->next_rip = svm->vmcb->control.exit_info_2; string = (io_info & SVM_IOIO_STR_MASK) != 0; if (string) { if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO) return 0; return 1; } in = (io_info & SVM_IOIO_TYPE_MASK) != 0; port = io_info >> 16; size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; rep = (io_info & SVM_IOIO_REP_MASK) != 0; down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);}static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ return 1;}static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ svm->next_rip = svm->vmcb->save.rip + 1; skip_emulated_instruction(&svm->vcpu); return kvm_emulate_halt(&svm->vcpu);}static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ svm->next_rip = svm->vmcb->save.rip + 3; skip_emulated_instruction(&svm->vcpu); return kvm_hypercall(&svm->vcpu, kvm_run);}static int invalid_op_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ inject_ud(&svm->vcpu); return 1;}static int task_switch_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__); kvm_run->exit_reason = KVM_EXIT_UNKNOWN; return 0;}static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ svm->next_rip = svm->vmcb->save.rip + 2; kvm_emulate_cpuid(&svm->vcpu); return 1;}static int emulate_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE) pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__); return 1;}static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data){ struct vcpu_svm *svm = to_svm(vcpu); switch (ecx) { case MSR_IA32_TIME_STAMP_COUNTER: { u64 tsc; rdtscll(tsc); *data = svm->vmcb->control.tsc_offset + tsc; break; } case MSR_K6_STAR: *data = svm->vmcb->save.star; break;#ifdef CONFIG_X86_64 case MSR_LSTAR: *data = svm->vmcb->save.lstar; break; case MSR_CSTAR: *data = svm->vmcb->save.cstar; break; case MSR_KERNEL_GS_BASE: *data = svm->vmcb->save.kernel_gs_base; break; case MSR_SYSCALL_MASK: *data = svm->vmcb->save.sfmask; break;#endif case MSR_IA32_SYSENTER_CS: *data = svm->vmcb->save.sysenter_cs; break; case MSR_IA32_SYSENTER_EIP: *data = svm->vmcb->save.sysenter_eip; break; case MSR_IA32_SYSENTER_ESP: *data = svm->vmcb->save.sysenter_esp; break; default: return kvm_get_msr_common(vcpu, ecx, data); } return 0;}static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX]; u64 data; if (svm_get_msr(&svm->vcpu, ecx, &data)) svm_inject_gp(&svm->vcpu, 0); else { svm->vmcb->save.rax = data & 0xffffffff; svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32; svm->next_rip = svm->vmcb->save.rip + 2; skip_emulated_instruction(&svm->vcpu); } return 1;}static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data){ struct vcpu_svm *svm = to_svm(vcpu); switch (ecx) { case MSR_IA32_TIME_STAMP_COUNTER: { u64 tsc; rdtscll(tsc); svm->vmcb->control.tsc_offset = data - tsc; break; } case MSR_K6_STAR: svm->vmcb->save.star = data; break;#ifdef CONFIG_X86_64 case MSR_LSTAR: svm->vmcb->save.lstar = data; break; case MSR_CSTAR: svm->vmcb->save.cstar = data; break; case MSR_KERNEL_GS_BASE: svm->vmcb->save.kernel_gs_base = data; break; case MSR_SYSCALL_MASK: svm->vmcb->save.sfmask = data; break;#endif case MSR_IA32_SYSENTER_CS:
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -