?? mtrr.c
字號:
{ phys_base = ((uint64_t*)m->var_ranges)[seg*2]; phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1]; if ( phys_mask & (1 << MTRR_PHYSMASK_VALID_BIT) ) { if ( ((uint64_t) pa & phys_mask) >> MTRR_PHYSMASK_SHIFT == (phys_base & phys_mask) >> MTRR_PHYSMASK_SHIFT ) { if ( unlikely(m->overlapped) ) { overlap_mtrr |= 1 << (phys_base & MTRR_PHYSBASE_TYPE_MASK); overlap_mtrr_pos = phys_base & MTRR_PHYSBASE_TYPE_MASK; } else { /* If no overlap, return the found one */ return (phys_base & MTRR_PHYSBASE_TYPE_MASK); } } } } /* Overlapped or not found. */ if ( unlikely(overlap_mtrr == 0) ) return m->def_type; if ( likely(!(overlap_mtrr & ~( ((uint8_t)1) << overlap_mtrr_pos ))) ) /* Covers both one variable memory range matches and * two or more identical match. */ return overlap_mtrr_pos; if ( overlap_mtrr & 0x1 ) /* Two or more match, one is UC. */ return MTRR_TYPE_UNCACHABLE; if ( !(overlap_mtrr & 0xaf) ) /* Two or more match, WT and WB. */ return MTRR_TYPE_WRTHROUGH; /* Behaviour is undefined, but return the last overlapped type. */ return overlap_mtrr_pos;}/* * return the memory type from PAT. * NOTE: valid only when paging is enabled. * Only 4K page PTE is supported now. */static uint8_t page_pat_type(uint64_t pat_cr, uint32_t pte_flags){ int32_t pat_entry; /* PCD/PWT -> bit 1/0 of PAT entry */ pat_entry = ( pte_flags >> 3 ) & 0x3; /* PAT bits as bit 2 of PAT entry */ if ( pte_flags & _PAGE_PAT ) pat_entry |= 4; return (uint8_t)pat_cr_2_paf(pat_cr, pat_entry);}/* * Effective memory type for leaf page. */static uint8_t effective_mm_type(struct mtrr_state *m, uint64_t pat, paddr_t gpa, uint32_t pte_flags){ uint8_t mtrr_mtype, pat_value, effective; mtrr_mtype = get_mtrr_type(m, gpa); pat_value = page_pat_type(pat, pte_flags); effective = mm_type_tbl[mtrr_mtype][pat_value]; return effective;}static void init_mtrr_epat_tbl(void){ int32_t i, j; /* set default value to an invalid type, just for checking conflict */ memset(&mtrr_epat_tbl, INVALID_MEM_TYPE, sizeof(mtrr_epat_tbl)); for ( i = 0; i < MTRR_NUM_TYPES; i++ ) { for ( j = 0; j < PAT_TYPE_NUMS; j++ ) { int32_t tmp = mm_type_tbl[i][j]; if ( (tmp >= 0) && (tmp < MEMORY_NUM_TYPES) ) mtrr_epat_tbl[i][tmp] = j; } }}uint32_t get_pat_flags(struct vcpu *v, uint32_t gl1e_flags, paddr_t gpaddr, paddr_t spaddr){ uint8_t guest_eff_mm_type; uint8_t shadow_mtrr_type; uint8_t pat_entry_value; uint64_t pat = v->arch.hvm_vcpu.pat_cr; struct mtrr_state *g = &v->arch.hvm_vcpu.mtrr; /* 1. Get the effective memory type of guest physical address, * with the pair of guest MTRR and PAT */ guest_eff_mm_type = effective_mm_type(g, pat, gpaddr, gl1e_flags); /* 2. Get the memory type of host physical address, with MTRR */ shadow_mtrr_type = get_mtrr_type(&mtrr_state, spaddr); /* 3. Find the memory type in PAT, with host MTRR memory type * and guest effective memory type. */ pat_entry_value = mtrr_epat_tbl[shadow_mtrr_type][guest_eff_mm_type]; /* If conflit occurs(e.g host MTRR is UC, guest memory type is * WB),set UC as effective memory. Here, returning PAT_TYPE_UNCACHABLE will * always set effective memory as UC. */ if ( pat_entry_value == INVALID_MEM_TYPE ) { gdprintk(XENLOG_WARNING, "Conflict occurs for a given guest l1e flags:%x " "at %"PRIx64" (the effective mm type:%d), " "because the host mtrr type is:%d\n", gl1e_flags, (uint64_t)gpaddr, guest_eff_mm_type, shadow_mtrr_type); pat_entry_value = PAT_TYPE_UNCACHABLE; } /* 4. Get the pte flags */ return pat_type_2_pte_flags(pat_entry_value);}/* Helper funtions for seting mtrr/pat */bool_t pat_msr_set(uint64_t *pat, uint64_t msr_content){ uint8_t *value = (uint8_t*)&msr_content; int32_t i; if ( *pat != msr_content ) { for ( i = 0; i < 8; i++ ) if ( unlikely(!(value[i] == 0 || value[i] == 1 || value[i] == 4 || value[i] == 5 || value[i] == 6 || value[i] == 7)) ) return 0; *pat = msr_content; } return 1;}bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content){ uint8_t def_type = msr_content & 0xff; uint8_t enabled = (msr_content >> 10) & 0x3; if ( unlikely(!(def_type == 0 || def_type == 1 || def_type == 4 || def_type == 5 || def_type == 6)) ) { HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid MTRR def type:%x\n", def_type); return 0; } if ( unlikely(msr_content && (msr_content & ~0xcffUL)) ) { HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n", msr_content); return 0; } m->enabled = enabled; m->def_type = def_type; return 1;}bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, uint32_t row, uint64_t msr_content){ uint64_t *fixed_range_base = (uint64_t *)m->fixed_ranges; if ( fixed_range_base[row] != msr_content ) { uint8_t *range = (uint8_t*)&msr_content; int32_t i, type; for ( i = 0; i < 8; i++ ) { type = range[i]; if ( unlikely(!(type == 0 || type == 1 || type == 4 || type == 5 || type == 6)) ) return 0; } fixed_range_base[row] = msr_content; } return 1;}bool_t mtrr_var_range_msr_set(struct mtrr_state *m, uint32_t msr, uint64_t msr_content){ uint32_t index; uint64_t msr_mask; uint64_t *var_range_base = (uint64_t*)m->var_ranges; index = msr - MSR_IA32_MTRR_PHYSBASE0; if ( var_range_base[index] != msr_content ) { uint32_t type = msr_content & 0xff; msr_mask = (index & 1) ? phys_mask_msr_mask : phys_base_msr_mask; if ( unlikely(!(type == 0 || type == 1 || type == 4 || type == 5 || type == 6)) ) return 0; if ( unlikely(msr_content && (msr_content & msr_mask)) ) { HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n", msr_content); return 0; } var_range_base[index] = msr_content; } m->overlapped = is_var_mtrr_overlapped(m); return 1;}bool_t mtrr_pat_not_equal(struct vcpu *vd, struct vcpu *vs){ struct mtrr_state *md = &vd->arch.hvm_vcpu.mtrr; struct mtrr_state *ms = &vs->arch.hvm_vcpu.mtrr; int32_t res; uint8_t num_var_ranges = (uint8_t)md->mtrr_cap; /* Test fixed ranges. */ res = memcmp(md->fixed_ranges, ms->fixed_ranges, NUM_FIXED_RANGES*sizeof(mtrr_type)); if ( res ) return 1; /* Test var ranges. */ res = memcmp(md->var_ranges, ms->var_ranges, num_var_ranges*sizeof(struct mtrr_var_range)); if ( res ) return 1; /* Test default type MSR. */ if ( (md->def_type != ms->def_type) && (md->enabled != ms->enabled) ) return 1; /* Test PAT. */ if ( vd->arch.hvm_vcpu.pat_cr != vs->arch.hvm_vcpu.pat_cr ) return 1; return 0;}void hvm_init_cacheattr_region_list( struct domain *d){ INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);}void hvm_destroy_cacheattr_region_list( struct domain *d){ struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges; struct hvm_mem_pinned_cacheattr_range *range; while ( !list_empty(head) ) { range = list_entry(head->next, struct hvm_mem_pinned_cacheattr_range, list); list_del(&range->list); xfree(range); }}int32_t hvm_get_mem_pinned_cacheattr( struct domain *d, uint64_t guest_fn, uint32_t *type){ struct hvm_mem_pinned_cacheattr_range *range; *type = 0; if ( !is_hvm_domain(d) ) return 0; list_for_each_entry_rcu ( range, &d->arch.hvm_domain.pinned_cacheattr_ranges, list ) { if ( (guest_fn >= range->start) && (guest_fn <= range->end) ) { *type = range->type; return 1; } } return 0;}int32_t hvm_set_mem_pinned_cacheattr( struct domain *d, uint64_t gfn_start, uint64_t gfn_end, uint32_t type){ struct hvm_mem_pinned_cacheattr_range *range; if ( !((type == PAT_TYPE_UNCACHABLE) || (type == PAT_TYPE_WRCOMB) || (type == PAT_TYPE_WRTHROUGH) || (type == PAT_TYPE_WRPROT) || (type == PAT_TYPE_WRBACK) || (type == PAT_TYPE_UC_MINUS)) || !is_hvm_domain(d) ) return -EINVAL; range = xmalloc(struct hvm_mem_pinned_cacheattr_range); if ( range == NULL ) return -ENOMEM; memset(range, 0, sizeof(*range)); range->start = gfn_start; range->end = gfn_end; range->type = type; list_add_rcu(&range->list, &d->arch.hvm_domain.pinned_cacheattr_ranges); return 0;}static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h){ int i; struct vcpu *v; struct hvm_hw_mtrr hw_mtrr; struct mtrr_state *mtrr_state; /* save mtrr&pat */ for_each_vcpu(d, v) { mtrr_state = &v->arch.hvm_vcpu.mtrr; hw_mtrr.msr_pat_cr = v->arch.hvm_vcpu.pat_cr; hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type | (mtrr_state->enabled << 10); hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap; for ( i = 0; i < MTRR_VCNT; i++ ) { /* save physbase */ hw_mtrr.msr_mtrr_var[i*2] = ((uint64_t*)mtrr_state->var_ranges)[i*2]; /* save physmask */ hw_mtrr.msr_mtrr_var[i*2+1] = ((uint64_t*)mtrr_state->var_ranges)[i*2+1]; } for ( i = 0; i < NUM_FIXED_MSR; i++ ) hw_mtrr.msr_mtrr_fixed[i] = ((uint64_t*)mtrr_state->fixed_ranges)[i]; if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 ) return 1; } return 0;}static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h){ int vcpuid, i; struct vcpu *v; struct mtrr_state *mtrr_state; struct hvm_hw_mtrr hw_mtrr; vcpuid = hvm_load_instance(h); if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL ) { gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid); return -EINVAL; } if ( hvm_load_entry(MTRR, h, &hw_mtrr) != 0 ) return -EINVAL; mtrr_state = &v->arch.hvm_vcpu.mtrr; pat_msr_set(&v->arch.hvm_vcpu.pat_cr, hw_mtrr.msr_pat_cr); mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap; for ( i = 0; i < NUM_FIXED_MSR; i++ ) mtrr_fix_range_msr_set(mtrr_state, i, hw_mtrr.msr_mtrr_fixed[i]); for ( i = 0; i < MTRR_VCNT; i++ ) { mtrr_var_range_msr_set(mtrr_state, MTRRphysBase_MSR(i), hw_mtrr.msr_mtrr_var[i*2]); mtrr_var_range_msr_set(mtrr_state, MTRRphysMask_MSR(i), hw_mtrr.msr_mtrr_var[i*2+1]); } mtrr_def_type_msr_set(mtrr_state, hw_mtrr.msr_mtrr_def_type); v->arch.hvm_vcpu.mtrr.is_initialized = 1; return 0;}HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, hvm_load_mtrr_msr, 1, HVMSR_PER_VCPU);
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -