?? svm.c
字號:
svm->vmcb->save.sysenter_cs = data; break; case MSR_IA32_SYSENTER_EIP: svm->vmcb->save.sysenter_eip = data; break; case MSR_IA32_SYSENTER_ESP: svm->vmcb->save.sysenter_esp = data; break; default: return kvm_set_msr_common(vcpu, ecx, data); } return 0;}static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX]; u64 data = (svm->vmcb->save.rax & -1u) | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32); svm->next_rip = svm->vmcb->save.rip + 2; if (svm_set_msr(&svm->vcpu, ecx, data)) svm_inject_gp(&svm->vcpu, 0); else skip_emulated_instruction(&svm->vcpu); return 1;}static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ if (svm->vmcb->control.exit_info_1) return wrmsr_interception(svm, kvm_run); else return rdmsr_interception(svm, kvm_run);}static int interrupt_window_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run){ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; /* * If the user space waits to inject interrupts, exit as soon as * possible */ if (kvm_run->request_interrupt_window && !svm->vcpu.irq_summary) { ++svm->vcpu.stat.irq_window_exits; kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; return 0; } return 1;}static int (*svm_exit_handlers[])(struct vcpu_svm *svm, struct kvm_run *kvm_run) = { [SVM_EXIT_READ_CR0] = emulate_on_interception, [SVM_EXIT_READ_CR3] = emulate_on_interception, [SVM_EXIT_READ_CR4] = emulate_on_interception, /* for now: */ [SVM_EXIT_WRITE_CR0] = emulate_on_interception, [SVM_EXIT_WRITE_CR3] = emulate_on_interception, [SVM_EXIT_WRITE_CR4] = emulate_on_interception, [SVM_EXIT_READ_DR0] = emulate_on_interception, [SVM_EXIT_READ_DR1] = emulate_on_interception, [SVM_EXIT_READ_DR2] = emulate_on_interception, [SVM_EXIT_READ_DR3] = emulate_on_interception, [SVM_EXIT_WRITE_DR0] = emulate_on_interception, [SVM_EXIT_WRITE_DR1] = emulate_on_interception, [SVM_EXIT_WRITE_DR2] = emulate_on_interception, [SVM_EXIT_WRITE_DR3] = emulate_on_interception, [SVM_EXIT_WRITE_DR5] = emulate_on_interception, [SVM_EXIT_WRITE_DR7] = emulate_on_interception, [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, [SVM_EXIT_INTR] = nop_on_interception, [SVM_EXIT_NMI] = nop_on_interception, [SVM_EXIT_SMI] = nop_on_interception, [SVM_EXIT_INIT] = nop_on_interception, [SVM_EXIT_VINTR] = interrupt_window_interception, /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = emulate_on_interception, [SVM_EXIT_INVLPGA] = invalid_op_interception, [SVM_EXIT_IOIO] = io_interception, [SVM_EXIT_MSR] = msr_interception, [SVM_EXIT_TASK_SWITCH] = task_switch_interception, [SVM_EXIT_SHUTDOWN] = shutdown_interception, [SVM_EXIT_VMRUN] = invalid_op_interception, [SVM_EXIT_VMMCALL] = vmmcall_interception, [SVM_EXIT_VMLOAD] = invalid_op_interception, [SVM_EXIT_VMSAVE] = invalid_op_interception, [SVM_EXIT_STGI] = invalid_op_interception, [SVM_EXIT_CLGI] = invalid_op_interception, [SVM_EXIT_SKINIT] = invalid_op_interception, [SVM_EXIT_WBINVD] = emulate_on_interception, [SVM_EXIT_MONITOR] = invalid_op_interception, [SVM_EXIT_MWAIT] = invalid_op_interception,};static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu){ struct vcpu_svm *svm = to_svm(vcpu); u32 exit_code = svm->vmcb->control.exit_code; kvm_reput_irq(svm); if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->fail_entry.hardware_entry_failure_reason = svm->vmcb->control.exit_code; return 0; } if (is_external_interrupt(svm->vmcb->control.exit_int_info) && exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " "exit_code 0x%x\n", __FUNCTION__, svm->vmcb->control.exit_int_info, exit_code); if (exit_code >= ARRAY_SIZE(svm_exit_handlers) || svm_exit_handlers[exit_code] == 0) { kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->hw.hardware_exit_reason = exit_code; return 0; } return svm_exit_handlers[exit_code](svm, kvm_run);}static void reload_tss(struct kvm_vcpu *vcpu){ int cpu = raw_smp_processor_id(); struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); svm_data->tss_desc->type = 9; //available 32/64-bit TSS load_TR_desc();}static void pre_svm_run(struct vcpu_svm *svm){ int cpu = raw_smp_processor_id(); struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; if (svm->vcpu.cpu != cpu || svm->asid_generation != svm_data->asid_generation) new_asid(svm, svm_data);}static inline void svm_inject_irq(struct vcpu_svm *svm, int irq){ struct vmcb_control_area *control; control = &svm->vmcb->control; control->int_vector = irq; control->int_ctl &= ~V_INTR_PRIO_MASK; control->int_ctl |= V_IRQ_MASK | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);}static void svm_set_irq(struct kvm_vcpu *vcpu, int irq){ struct vcpu_svm *svm = to_svm(vcpu); svm_inject_irq(svm, irq);}static void svm_intr_assist(struct kvm_vcpu *vcpu){ struct vcpu_svm *svm = to_svm(vcpu); struct vmcb *vmcb = svm->vmcb; int intr_vector = -1; kvm_inject_pending_timer_irqs(vcpu); if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) && ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) { intr_vector = vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; vmcb->control.exit_int_info = 0; svm_inject_irq(svm, intr_vector); return; } if (vmcb->control.int_ctl & V_IRQ_MASK) return; if (!kvm_cpu_has_interrupt(vcpu)) return; if (!(vmcb->save.rflags & X86_EFLAGS_IF) || (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || (vmcb->control.event_inj & SVM_EVTINJ_VALID)) { /* unable to deliver irq, set pending irq */ vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); svm_inject_irq(svm, 0x0); return; } /* Okay, we can deliver the interrupt: grab it and update PIC state. */ intr_vector = kvm_cpu_get_interrupt(vcpu); svm_inject_irq(svm, intr_vector); kvm_timer_intr_post(vcpu, intr_vector);}static void kvm_reput_irq(struct vcpu_svm *svm){ struct vmcb_control_area *control = &svm->vmcb->control; if ((control->int_ctl & V_IRQ_MASK) && !irqchip_in_kernel(svm->vcpu.kvm)) { control->int_ctl &= ~V_IRQ_MASK; push_irq(&svm->vcpu, control->int_vector); } svm->vcpu.interrupt_window_open = !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);}static void svm_do_inject_vector(struct vcpu_svm *svm){ struct kvm_vcpu *vcpu = &svm->vcpu; int word_index = __ffs(vcpu->irq_summary); int bit_index = __ffs(vcpu->irq_pending[word_index]); int irq = word_index * BITS_PER_LONG + bit_index; clear_bit(bit_index, &vcpu->irq_pending[word_index]); if (!vcpu->irq_pending[word_index]) clear_bit(word_index, &vcpu->irq_summary); svm_inject_irq(svm, irq);}static void do_interrupt_requests(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run){ struct vcpu_svm *svm = to_svm(vcpu); struct vmcb_control_area *control = &svm->vmcb->control; svm->vcpu.interrupt_window_open = (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && (svm->vmcb->save.rflags & X86_EFLAGS_IF)); if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary) /* * If interrupts enabled, and not blocked by sti or mov ss. Good. */ svm_do_inject_vector(svm); /* * Interrupts blocked. Wait for unblock. */ if (!svm->vcpu.interrupt_window_open && (svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) { control->intercept |= 1ULL << INTERCEPT_VINTR; } else control->intercept &= ~(1ULL << INTERCEPT_VINTR);}static void save_db_regs(unsigned long *db_regs){ asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1])); asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2])); asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));}static void load_db_regs(unsigned long *db_regs){ asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0])); asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1])); asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2])); asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));}static void svm_flush_tlb(struct kvm_vcpu *vcpu){ force_new_asid(vcpu);}static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu){}static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run){ struct vcpu_svm *svm = to_svm(vcpu); u16 fs_selector; u16 gs_selector; u16 ldt_selector; pre_svm_run(svm); save_host_msrs(vcpu); fs_selector = read_fs(); gs_selector = read_gs(); ldt_selector = read_ldt(); svm->host_cr2 = kvm_read_cr2(); svm->host_dr6 = read_dr6(); svm->host_dr7 = read_dr7(); svm->vmcb->save.cr2 = vcpu->cr2; if (svm->vmcb->save.dr7 & 0xff) { write_dr7(0); save_db_regs(svm->host_db_regs); load_db_regs(svm->db_regs); } clgi(); local_irq_enable(); asm volatile (#ifdef CONFIG_X86_64 "push %%rbx; push %%rcx; push %%rdx;" "push %%rsi; push %%rdi; push %%rbp;" "push %%r8; push %%r9; push %%r10; push %%r11;" "push %%r12; push %%r13; push %%r14; push %%r15;"#else "push %%ebx; push %%ecx; push %%edx;" "push %%esi; push %%edi; push %%ebp;"#endif#ifdef CONFIG_X86_64 "mov %c[rbx](%[svm]), %%rbx \n\t" "mov %c[rcx](%[svm]), %%rcx \n\t" "mov %c[rdx](%[svm]), %%rdx \n\t" "mov %c[rsi](%[svm]), %%rsi \n\t" "mov %c[rdi](%[svm]), %%rdi \n\t" "mov %c[rbp](%[svm]), %%rbp \n\t" "mov %c[r8](%[svm]), %%r8 \n\t" "mov %c[r9](%[svm]), %%r9 \n\t" "mov %c[r10](%[svm]), %%r10 \n\t" "mov %c[r11](%[svm]), %%r11 \n\t" "mov %c[r12](%[svm]), %%r12 \n\t" "mov %c[r13](%[svm]), %%r13 \n\t" "mov %c[r14](%[svm]), %%r14 \n\t" "mov %c[r15](%[svm]), %%r15 \n\t"#else "mov %c[rbx](%[svm]), %%ebx \n\t" "mov %c[rcx](%[svm]), %%ecx \n\t" "mov %c[rdx](%[svm]), %%edx \n\t" "mov %c[rsi](%[svm]), %%esi \n\t" "mov %c[rdi](%[svm]), %%edi \n\t" "mov %c[rbp](%[svm]), %%ebp \n\t"#endif#ifdef CONFIG_X86_64 /* Enter guest mode */ "push %%rax \n\t" "mov %c[vmcb](%[svm]), %%rax \n\t" SVM_VMLOAD "\n\t" SVM_VMRUN "\n\t" SVM_VMSAVE "\n\t" "pop %%rax \n\t"#else /* Enter guest mode */ "push %%eax \n\t" "mov %c[vmcb](%[svm]), %%eax \n\t" SVM_VMLOAD "\n\t" SVM_VMRUN "\n\t" SVM_VMSAVE "\n\t" "pop %%eax \n\t"#endif /* Save guest registers, load host registers */#ifdef CONFIG_X86_64 "mov %%rbx, %c[rbx](%[svm]) \n\t" "mov %%rcx, %c[rcx](%[svm]) \n\t" "mov %%rdx, %c[rdx](%[svm]) \n\t" "mov %%rsi, %c[rsi](%[svm]) \n\t" "mov %%rdi, %c[rdi](%[svm]) \n\t" "mov %%rbp, %c[rbp](%[svm]) \n\t" "mov %%r8, %c[r8](%[svm]) \n\t" "mov %%r9, %c[r9](%[svm]) \n\t" "mov %%r10, %c[r10](%[svm]) \n\t" "mov %%r11, %c[r11](%[svm]) \n\t" "mov %%r12, %c[r12](%[svm]) \n\t" "mov %%r13, %c[r13](%[svm]) \n\t" "mov %%r14, %c[r14](%[svm]) \n\t" "mov %%r15, %c[r15](%[svm]) \n\t" "pop %%r15; pop %%r14; pop %%r13; pop %%r12;" "pop %%r11; pop %%r10; pop %%r9; pop %%r8;" "pop %%rbp; pop %%rdi; pop %%rsi;" "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"#else "mov %%ebx, %c[rbx](%[svm]) \n\t" "mov %%ecx, %c[rcx](%[svm]) \n\t" "mov %%edx, %c[rdx](%[svm]) \n\t" "mov %%esi, %c[rsi](%[svm]) \n\t" "mov %%edi, %c[rdi](%[svm]) \n\t" "mov %%ebp, %c[rbp](%[svm]) \n\t" "pop %%ebp; pop %%edi; pop %%esi;" "pop %%edx; pop %%ecx; pop %%ebx; \n\t"#endif : : [svm]"a"(svm), [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), [rbx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBX])), [rcx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RCX])), [rdx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDX])), [rsi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBP]))#ifdef CONFIG_X86_64 ,[r8 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R8])), [r9 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R9 ])), [r10]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R10])), [r11]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R11])), [r12]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R12])), [r13]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R13])), [r14]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R14])), [r15]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R15]))#endif : "cc", "memory" ); if ((svm->vmcb->save.dr7 & 0xff)) load_db_regs(svm->host_db_regs); vcpu->cr2 = svm->vmcb->save.cr2; write_dr6(svm->host_dr6); write_dr7(svm->host_dr7); kvm_write_cr2(svm->host_cr2); load_fs(fs_selector); load_gs(gs_selector); load_ldt(ldt_selector); load_host_msrs(vcpu); reload_tss(vcpu); local_irq_disable(); stgi(); svm->next_rip = 0;}static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root){ struct vcpu_svm *svm = to_svm(vcpu); svm->vmcb->save.cr3 = root; force_new_asid(vcpu); if (vcpu->fpu_active) { svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); svm->vmcb->save.cr0 |= X86_CR0_TS; vcpu->fpu_active = 0; }}static void svm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, uint32_t err_code){ struct vcpu_svm *svm = to_svm(vcpu); uint32_t exit_int_info = svm->vmcb->control.exit_int_info; ++vcpu->stat.pf_guest; if (is_page_fault(exit_int_info)) { svm->vmcb->control.event_inj_err = 0; svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_VALID_ERR | SVM_EVTINJ_TYPE_EXEPT | DF_VECTOR; return; } vcpu->cr2 = addr; svm->vmcb->save.cr2 = addr; svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_VALID_ERR | SVM_EVTINJ_TYPE_EXEPT | PF_VECTOR; svm->vmcb->control.event_inj_err = err_code;}static int is_disabled(void){ u64 vm_cr; rdmsrl(MSR_VM_CR, vm_cr); if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) return 1; return 0;}static voidsvm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall){ /* * Patch in the VMMCALL instruction: */ hypercall[0] = 0x0f; hypercall[1] = 0x01; hypercall[2] = 0xd9; hypercall[3] = 0xc3;}static void svm_check_processor_compat(void *rtn){ *(int *)rtn = 0;}static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, .hardware_setup = svm_hardware_setup, .hardware_unsetup = svm_hardware_unsetup, .check_processor_compatibility = svm_check_processor_compat, .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, .vcpu_create = svm_create_vcpu, .vcpu_free = svm_free_vcpu, .vcpu_reset = svm_vcpu_reset, .prepare_guest_switch = svm_prepare_guest_switch, .vcpu_load = svm_vcpu_load, .vcpu_put = svm_vcpu_put, .vcpu_decache = svm_vcpu_decache, .set_guest_debug = svm_guest_debug, .get_msr = svm_get_msr, .set_msr = svm_set_msr, .get_segment_base = svm_get_segment_base, .get_segment = svm_get_segment, .set_segment = svm_set_segment, .get_cs_db_l_bits = kvm_get_cs_db_l_bits, .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, .set_cr0 = svm_set_cr0, .set_cr3 = svm_set_cr3, .set_cr4 = svm_set_cr4, .set_efer = svm_set_efer, .get_idt = svm_get_idt, .set_idt = svm_set_idt, .get_gdt = svm_get_gdt, .set_gdt = svm_set_gdt, .get_dr = svm_get_dr, .set_dr = svm_set_dr, .cache_regs = svm_cache_regs, .decache_regs = svm_decache_regs, .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, .tlb_flush = svm_flush_tlb, .inject_page_fault = svm_inject_page_fault, .inject_gp = svm_inject_gp, .run = svm_vcpu_run, .handle_exit = handle_exit, .skip_emulated_instruction = skip_emulated_instruction, .patch_hypercall = svm_patch_hypercall, .get_irq = svm_get_irq, .set_irq = svm_set_irq, .inject_pending_irq = svm_intr_assist, .inject_pending_vectors = do_interrupt_requests,};static int __init svm_init(void){ return kvm_init_x86(&svm_x86_ops, sizeof(struct vcpu_svm), THIS_MODULE);}static void __exit svm_exit(void){ kvm_exit_x86();}module_init(svm_init)module_exit(svm_exit)
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -