亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關(guān)于我們
? 蟲蟲下載站

?? vmx.c

?? linux 內(nèi)核源代碼
?? C
?? 第 1 頁 / 共 5 頁
字號:
/* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * Copyright (C) 2006 Qumranet, Inc. * * Authors: *   Avi Kivity   <avi@qumranet.com> *   Yaniv Kamay  <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2.  See * the COPYING file in the top-level directory. * */#include "kvm.h"#include "x86_emulate.h"#include "irq.h"#include "vmx.h"#include "segment_descriptor.h"#include <linux/module.h>#include <linux/kernel.h>#include <linux/mm.h>#include <linux/highmem.h>#include <linux/sched.h>#include <asm/io.h>#include <asm/desc.h>MODULE_AUTHOR("Qumranet");MODULE_LICENSE("GPL");struct vmcs {	u32 revision_id;	u32 abort;	char data[0];};struct vcpu_vmx {	struct kvm_vcpu       vcpu;	int                   launched;	u8                    fail;	struct kvm_msr_entry *guest_msrs;	struct kvm_msr_entry *host_msrs;	int                   nmsrs;	int                   save_nmsrs;	int                   msr_offset_efer;#ifdef CONFIG_X86_64	int                   msr_offset_kernel_gs_base;#endif	struct vmcs          *vmcs;	struct {		int           loaded;		u16           fs_sel, gs_sel, ldt_sel;		int           gs_ldt_reload_needed;		int           fs_reload_needed;	}host_state;};static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu){	return container_of(vcpu, struct vcpu_vmx, vcpu);}static int init_rmode_tss(struct kvm *kvm);static DEFINE_PER_CPU(struct vmcs *, vmxarea);static DEFINE_PER_CPU(struct vmcs *, current_vmcs);static struct page *vmx_io_bitmap_a;static struct page *vmx_io_bitmap_b;#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)static struct vmcs_config {	int size;	int order;	u32 revision_id;	u32 pin_based_exec_ctrl;	u32 cpu_based_exec_ctrl;	u32 vmexit_ctrl;	u32 vmentry_ctrl;} vmcs_config;#define VMX_SEGMENT_FIELD(seg)					\	[VCPU_SREG_##seg] = {                                   \		.selector = GUEST_##seg##_SELECTOR,		\		.base = GUEST_##seg##_BASE,		   	\		.limit = GUEST_##seg##_LIMIT,		   	\		.ar_bytes = GUEST_##seg##_AR_BYTES,	   	\	}static struct kvm_vmx_segment_field {	unsigned selector;	unsigned base;	unsigned limit;	unsigned ar_bytes;} kvm_vmx_segment_fields[] = {	VMX_SEGMENT_FIELD(CS),	VMX_SEGMENT_FIELD(DS),	VMX_SEGMENT_FIELD(ES),	VMX_SEGMENT_FIELD(FS),	VMX_SEGMENT_FIELD(GS),	VMX_SEGMENT_FIELD(SS),	VMX_SEGMENT_FIELD(TR),	VMX_SEGMENT_FIELD(LDTR),};/* * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it * away by decrementing the array size. */static const u32 vmx_msr_index[] = {#ifdef CONFIG_X86_64	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,#endif	MSR_EFER, MSR_K6_STAR,};#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)static void load_msrs(struct kvm_msr_entry *e, int n){	int i;	for (i = 0; i < n; ++i)		wrmsrl(e[i].index, e[i].data);}static void save_msrs(struct kvm_msr_entry *e, int n){	int i;	for (i = 0; i < n; ++i)		rdmsrl(e[i].index, e[i].data);}static inline u64 msr_efer_save_restore_bits(struct kvm_msr_entry msr){	return (u64)msr.data & EFER_SAVE_RESTORE_BITS;}static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx){	int efer_offset = vmx->msr_offset_efer;	return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=		msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);}static inline int is_page_fault(u32 intr_info){	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |			     INTR_INFO_VALID_MASK)) ==		(INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);}static inline int is_no_device(u32 intr_info){	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |			     INTR_INFO_VALID_MASK)) ==		(INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);}static inline int is_external_interrupt(u32 intr_info){	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))		== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);}static inline int cpu_has_vmx_tpr_shadow(void){	return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);}static inline int vm_need_tpr_shadow(struct kvm *kvm){	return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));}static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr){	int i;	for (i = 0; i < vmx->nmsrs; ++i)		if (vmx->guest_msrs[i].index == msr)			return i;	return -1;}static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr){	int i;	i = __find_msr_index(vmx, msr);	if (i >= 0)		return &vmx->guest_msrs[i];	return NULL;}static void vmcs_clear(struct vmcs *vmcs){	u64 phys_addr = __pa(vmcs);	u8 error;	asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"		      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)		      : "cc", "memory");	if (error)		printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",		       vmcs, phys_addr);}static void __vcpu_clear(void *arg){	struct vcpu_vmx *vmx = arg;	int cpu = raw_smp_processor_id();	if (vmx->vcpu.cpu == cpu)		vmcs_clear(vmx->vmcs);	if (per_cpu(current_vmcs, cpu) == vmx->vmcs)		per_cpu(current_vmcs, cpu) = NULL;	rdtscll(vmx->vcpu.host_tsc);}static void vcpu_clear(struct vcpu_vmx *vmx){	if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)		smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,					 vmx, 0, 1);	else		__vcpu_clear(vmx);	vmx->launched = 0;}static unsigned long vmcs_readl(unsigned long field){	unsigned long value;	asm volatile (ASM_VMX_VMREAD_RDX_RAX		      : "=a"(value) : "d"(field) : "cc");	return value;}static u16 vmcs_read16(unsigned long field){	return vmcs_readl(field);}static u32 vmcs_read32(unsigned long field){	return vmcs_readl(field);}static u64 vmcs_read64(unsigned long field){#ifdef CONFIG_X86_64	return vmcs_readl(field);#else	return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);#endif}static noinline void vmwrite_error(unsigned long field, unsigned long value){	printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",	       field, value, vmcs_read32(VM_INSTRUCTION_ERROR));	dump_stack();}static void vmcs_writel(unsigned long field, unsigned long value){	u8 error;	asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"		       : "=q"(error) : "a"(value), "d"(field) : "cc" );	if (unlikely(error))		vmwrite_error(field, value);}static void vmcs_write16(unsigned long field, u16 value){	vmcs_writel(field, value);}static void vmcs_write32(unsigned long field, u32 value){	vmcs_writel(field, value);}static void vmcs_write64(unsigned long field, u64 value){#ifdef CONFIG_X86_64	vmcs_writel(field, value);#else	vmcs_writel(field, value);	asm volatile ("");	vmcs_writel(field+1, value >> 32);#endif}static void vmcs_clear_bits(unsigned long field, u32 mask){	vmcs_writel(field, vmcs_readl(field) & ~mask);}static void vmcs_set_bits(unsigned long field, u32 mask){	vmcs_writel(field, vmcs_readl(field) | mask);}static void update_exception_bitmap(struct kvm_vcpu *vcpu){	u32 eb;	eb = 1u << PF_VECTOR;	if (!vcpu->fpu_active)		eb |= 1u << NM_VECTOR;	if (vcpu->guest_debug.enabled)		eb |= 1u << 1;	if (vcpu->rmode.active)		eb = ~0;	vmcs_write32(EXCEPTION_BITMAP, eb);}static void reload_tss(void){#ifndef CONFIG_X86_64	/*	 * VT restores TR but not its size.  Useless.	 */	struct descriptor_table gdt;	struct segment_descriptor *descs;	get_gdt(&gdt);	descs = (void *)gdt.base;	descs[GDT_ENTRY_TSS].type = 9; /* available TSS */	load_TR_desc();#endif}static void load_transition_efer(struct vcpu_vmx *vmx){	u64 trans_efer;	int efer_offset = vmx->msr_offset_efer;	trans_efer = vmx->host_msrs[efer_offset].data;	trans_efer &= ~EFER_SAVE_RESTORE_BITS;	trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);	wrmsrl(MSR_EFER, trans_efer);	vmx->vcpu.stat.efer_reload++;}static void vmx_save_host_state(struct kvm_vcpu *vcpu){	struct vcpu_vmx *vmx = to_vmx(vcpu);	if (vmx->host_state.loaded)		return;	vmx->host_state.loaded = 1;	/*	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not	 * allow segment selectors with cpl > 0 or ti == 1.	 */	vmx->host_state.ldt_sel = read_ldt();	vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;	vmx->host_state.fs_sel = read_fs();	if (!(vmx->host_state.fs_sel & 7)) {		vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);		vmx->host_state.fs_reload_needed = 0;	} else {		vmcs_write16(HOST_FS_SELECTOR, 0);		vmx->host_state.fs_reload_needed = 1;	}	vmx->host_state.gs_sel = read_gs();	if (!(vmx->host_state.gs_sel & 7))		vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);	else {		vmcs_write16(HOST_GS_SELECTOR, 0);		vmx->host_state.gs_ldt_reload_needed = 1;	}#ifdef CONFIG_X86_64	vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));	vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));#else	vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));	vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));#endif#ifdef CONFIG_X86_64	if (is_long_mode(&vmx->vcpu)) {		save_msrs(vmx->host_msrs +			  vmx->msr_offset_kernel_gs_base, 1);	}#endif	load_msrs(vmx->guest_msrs, vmx->save_nmsrs);	if (msr_efer_need_save_restore(vmx))		load_transition_efer(vmx);}static void vmx_load_host_state(struct vcpu_vmx *vmx){	unsigned long flags;	if (!vmx->host_state.loaded)		return;	vmx->host_state.loaded = 0;	if (vmx->host_state.fs_reload_needed)		load_fs(vmx->host_state.fs_sel);	if (vmx->host_state.gs_ldt_reload_needed) {		load_ldt(vmx->host_state.ldt_sel);		/*		 * If we have to reload gs, we must take care to		 * preserve our gs base.		 */		local_irq_save(flags);		load_gs(vmx->host_state.gs_sel);#ifdef CONFIG_X86_64		wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));#endif		local_irq_restore(flags);	}	reload_tss();	save_msrs(vmx->guest_msrs, vmx->save_nmsrs);	load_msrs(vmx->host_msrs, vmx->save_nmsrs);	if (msr_efer_need_save_restore(vmx))		load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);}/* * Switches to specified vcpu, until a matching vcpu_put(), but assumes * vcpu mutex is already taken. */static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu){	struct vcpu_vmx *vmx = to_vmx(vcpu);	u64 phys_addr = __pa(vmx->vmcs);	u64 tsc_this, delta;	if (vcpu->cpu != cpu) {		vcpu_clear(vmx);		kvm_migrate_apic_timer(vcpu);	}	if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {		u8 error;		per_cpu(current_vmcs, cpu) = vmx->vmcs;		asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"			      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)			      : "cc");		if (error)			printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",			       vmx->vmcs, phys_addr);	}	if (vcpu->cpu != cpu) {		struct descriptor_table dt;		unsigned long sysenter_esp;		vcpu->cpu = cpu;		/*		 * Linux uses per-cpu TSS and GDT, so set these when switching		 * processors.		 */		vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */		get_gdt(&dt);		vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */		/*		 * Make sure the time stamp counter is monotonous.		 */		rdtscll(tsc_this);		delta = vcpu->host_tsc - tsc_this;		vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);	}}static void vmx_vcpu_put(struct kvm_vcpu *vcpu){	vmx_load_host_state(to_vmx(vcpu));	kvm_put_guest_fpu(vcpu);}static void vmx_fpu_activate(struct kvm_vcpu *vcpu){	if (vcpu->fpu_active)		return;	vcpu->fpu_active = 1;	vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);	if (vcpu->cr0 & X86_CR0_TS)		vmcs_set_bits(GUEST_CR0, X86_CR0_TS);	update_exception_bitmap(vcpu);}static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu){	if (!vcpu->fpu_active)		return;	vcpu->fpu_active = 0;	vmcs_set_bits(GUEST_CR0, X86_CR0_TS);	update_exception_bitmap(vcpu);}static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)

?? 快捷鍵說明

復(fù)制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
激情六月婷婷久久| 97久久精品人人澡人人爽| 亚洲美女淫视频| 亚洲精品一区二区三区在线观看| 岛国av在线一区| 日日夜夜精品视频天天综合网| 欧美国产一区二区在线观看| 欧美三区在线视频| 99视频超级精品| 国产精品自在欧美一区| 丝袜亚洲精品中文字幕一区| 18欧美亚洲精品| 欧美成人欧美edvon| 欧美人伦禁忌dvd放荡欲情| 99综合电影在线视频| 国产一区二区三区在线观看免费| 亚洲制服丝袜在线| **网站欧美大片在线观看| 久久女同互慰一区二区三区| 欧美美女网站色| 91搞黄在线观看| 91视视频在线直接观看在线看网页在线看| 免费国产亚洲视频| 日韩国产在线观看| 性欧美疯狂xxxxbbbb| 亚洲精品高清在线观看| 国产精品大尺度| 中文成人av在线| 国产午夜精品一区二区三区视频 | 国模无码大尺度一区二区三区| 夜夜嗨av一区二区三区| 自拍视频在线观看一区二区| 国产女人aaa级久久久级| 精品免费国产一区二区三区四区| 欧美一区二区三区系列电影| 欧美亚洲综合在线| 欧美日韩一区二区不卡| 91福利小视频| 欧美自拍丝袜亚洲| 欧美视频三区在线播放| 欧美少妇性性性| 欧美日韩中文字幕一区| 欧美日韩一卡二卡三卡| 欧美日韩午夜精品| 欧美一区二区精品在线| 欧美一卡在线观看| 久久影视一区二区| 国产午夜精品福利| 国产精品丝袜在线| 亚洲视频一区二区在线观看| 亚洲色图.com| 亚洲乱码中文字幕| 一个色在线综合| 亚洲成人av电影| 麻豆精品在线看| 狠狠色丁香婷婷综合久久片| 国产麻豆9l精品三级站| 大尺度一区二区| 欧美午夜片在线看| 日韩一区和二区| 久久久精品2019中文字幕之3| 欧美国产日韩精品免费观看| 亚洲视频一区二区在线| 亚洲成人中文在线| 精品一区在线看| 成人激情免费网站| 在线观看日韩av先锋影音电影院| 欧美亚洲综合在线| 日韩欧美国产三级| 国产精品网友自拍| 亚洲一区在线免费观看| 男女视频一区二区| 从欧美一区二区三区| 欧美色图激情小说| 精品人在线二区三区| 国产精品女主播在线观看| 亚洲综合图片区| 国产精品综合视频| 欧美精品一二三| 日韩免费看网站| 亚洲欧美中日韩| 五月开心婷婷久久| 成人自拍视频在线观看| 欧美日韩在线一区二区| 久久久久久亚洲综合| 亚洲在线视频网站| 国产精品一区在线观看你懂的| 91福利在线免费观看| 久久综合久久综合九色| 亚洲成人手机在线| 成人精品免费网站| 日韩一区和二区| 亚洲一区自拍偷拍| 成人黄色免费短视频| 欧美不卡一区二区三区四区| 一区二区三区免费网站| 国产精品一区免费视频| 欧美男女性生活在线直播观看| 国产欧美一区二区精品性色| 人妖欧美一区二区| 91精彩视频在线观看| 国产色综合一区| 麻豆国产欧美日韩综合精品二区| 91网上在线视频| 久久精品网站免费观看| 日韩黄色片在线观看| 一本大道久久a久久综合| 国产婷婷色一区二区三区在线| 日韩中文字幕av电影| 色偷偷成人一区二区三区91| 国产视频一区二区三区在线观看| 美女性感视频久久| 欧美三级韩国三级日本三斤| 1000精品久久久久久久久| 精品一区二区三区不卡| 欧美挠脚心视频网站| 亚洲激情图片一区| 99精品国产99久久久久久白柏| 久久久久9999亚洲精品| 毛片av一区二区三区| 欧美日韩久久久一区| 依依成人精品视频| 99精品视频在线观看| 国产精品欧美经典| 处破女av一区二区| 国产精品无码永久免费888| 国产不卡高清在线观看视频| 久久久久久久国产精品影院| 国产一区二区影院| 久久九九久精品国产免费直播| 蜜桃精品视频在线观看| 日韩欧美国产一二三区| 美女视频一区在线观看| 精品少妇一区二区三区在线视频| 美女免费视频一区二区| 欧美一卡二卡在线观看| 久久丁香综合五月国产三级网站| 这里只有精品电影| 蜜臀99久久精品久久久久久软件| 欧美亚洲日本国产| 日韩avvvv在线播放| 欧美一区二区三区四区高清| 免费成人小视频| 精品国产99国产精品| 国产精品一区二区不卡| 国产精品久久久久精k8| 99精品国产91久久久久久 | 日韩av一级片| 欧美一区二区三区四区五区 | 色国产综合视频| 亚洲综合色视频| 91精品国产综合久久精品图片 | 91天堂素人约啪| 亚洲亚洲人成综合网络| 国产午夜精品在线观看| 成人黄色av网站在线| 亚洲三级在线播放| 欧美群妇大交群中文字幕| 美女网站视频久久| 国产精品久久久久久久久久久免费看| 成人激情黄色小说| 午夜精品久久久久久久99水蜜桃 | 久久精品国产77777蜜臀| 久久久www成人免费无遮挡大片| 高清成人免费视频| 亚洲国产一区二区三区青草影视| 欧美一级精品大片| 成人18精品视频| 亚洲成人黄色小说| 久久久久久久久久久久久夜| av电影天堂一区二区在线| 亚洲影视在线观看| 久久婷婷一区二区三区| 91视视频在线观看入口直接观看www | 中文乱码免费一区二区| 在线观看不卡一区| 精品一区二区三区av| 亚洲欧洲制服丝袜| 日韩欧美中文字幕精品| 波多野结衣亚洲一区| 日韩成人一区二区三区在线观看| 欧美激情综合五月色丁香| 欧美日韩精品一二三区| 国产在线国偷精品免费看| 亚洲精品va在线观看| 2024国产精品视频| 欧美亚洲综合色| 国产福利一区二区三区视频 | 中文字幕av资源一区| 欧美日本在线看| 成人综合在线视频| 免费精品视频最新在线| 亚洲久草在线视频| 国产日韩av一区| 日韩一区二区电影| 在线亚洲一区观看| 成人午夜免费电影| 经典一区二区三区| 天堂一区二区在线| 亚洲欧美日韩系列|