ia64/xen-unstable

view xen/arch/x86/hvm/mtrr.c @ 16197:b3fa9b58a102

hvm, vt-d: Add memory cache-attribute pinning domctl for HVM
guests. Use this to pin virtual framebuffer VRAM as attribute WB, even
if guest tries to map with other attributes.
Signed-off-by: Disheng Su <disheng.su@intel.com>
author Keir Fraser <keir@xensource.com>
date Tue Oct 23 14:38:47 2007 +0100 (2007-10-23)
parents 3e7c86602c70
children 2b11cb52f6a0
line source
1 /*
2 * mtrr.c: MTRR/PAT virtualization
3 *
4 * Copyright (c) 2007, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <public/hvm/e820.h>
21 #include <xen/types.h>
22 #include <asm/e820.h>
23 #include <asm/paging.h>
24 #include <asm/p2m.h>
25 #include <xen/domain_page.h>
26 #include <stdbool.h>
27 #include <asm/mtrr.h>
28 #include <asm/hvm/support.h>
29 #include <asm/hvm/cacheattr.h>
31 /* Xen holds the native MTRR MSRs */
32 extern struct mtrr_state mtrr_state;
34 static u64 phys_base_msr_mask;
35 static u64 phys_mask_msr_mask;
36 static u32 size_or_mask;
37 static u32 size_and_mask;
39 static void init_pat_entry_tbl(u64 pat);
40 static void init_mtrr_epat_tbl(void);
41 static unsigned char get_mtrr_type(struct mtrr_state *m, paddr_t pa);
42 /* get page attribute fields (PAn) from PAT MSR */
43 #define pat_cr_2_paf(pat_cr,n) ((((u64)pat_cr) >> ((n)<<3)) & 0xff)
44 /* pat entry to PTE flags (PAT, PCD, PWT bits) */
45 static unsigned char pat_entry_2_pte_flags[8] = {
46 0, _PAGE_PWT,
47 _PAGE_PCD, _PAGE_PCD | _PAGE_PWT,
48 _PAGE_PAT, _PAGE_PAT | _PAGE_PWT,
49 _PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT };
51 /* effective mm type lookup table, according to MTRR and PAT */
52 static u8 mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
53 /********PAT(UC,WC,RS,RS,WT,WP,WB,UC-)*/
54 /* RS means reserved type(2,3), and type is hardcoded here */
55 /*MTRR(UC):(UC,WC,RS,RS,UC,UC,UC,UC)*/
56 {0, 1, 2, 2, 0, 0, 0, 0},
57 /*MTRR(WC):(UC,WC,RS,RS,UC,UC,WC,WC)*/
58 {0, 1, 2, 2, 0, 0, 1, 1},
59 /*MTRR(RS):(RS,RS,RS,RS,RS,RS,RS,RS)*/
60 {2, 2, 2, 2, 2, 2, 2, 2},
61 /*MTRR(RS):(RS,RS,RS,RS,RS,RS,RS,RS)*/
62 {2, 2, 2, 2, 2, 2, 2, 2},
63 /*MTRR(WT):(UC,WC,RS,RS,WT,WP,WT,UC)*/
64 {0, 1, 2, 2, 4, 5, 4, 0},
65 /*MTRR(WP):(UC,WC,RS,RS,WT,WP,WP,WC)*/
66 {0, 1, 2, 2, 4, 5, 5, 1},
67 /*MTRR(WB):(UC,WC,RS,RS,WT,WP,WB,UC)*/
68 {0, 1, 2, 2, 4, 5, 6, 0}
69 };
71 /* reverse lookup table, to find a pat type according to MTRR and effective
72 * memory type. This table is dynamically generated
73 */
74 static u8 mtrr_epat_tbl[MTRR_NUM_TYPES][MEMORY_NUM_TYPES];
76 /* lookup table for PAT entry of a given PAT value in host pat */
77 static u8 pat_entry_tbl[PAT_TYPE_NUMS];
79 static void get_mtrr_range(uint64_t base_msr, uint64_t mask_msr,
80 uint64_t *base, uint64_t *end)
81 {
82 uint32_t mask_lo = (uint32_t)mask_msr;
83 uint32_t mask_hi = (uint32_t)(mask_msr >> 32);
84 uint32_t base_lo = (uint32_t)base_msr;
85 uint32_t base_hi = (uint32_t)(base_msr >> 32);
86 uint32_t size;
88 if ( (mask_lo & 0x800) == 0 )
89 {
90 /* Invalid (i.e. free) range */
91 *base = 0;
92 *end = 0;
93 return;
94 }
96 /* Work out the shifted address mask. */
97 mask_lo = (size_or_mask | (mask_hi << (32 - PAGE_SHIFT)) |
98 (mask_lo >> PAGE_SHIFT));
100 /* This works correctly if size is a power of two (a contiguous range). */
101 size = -mask_lo;
102 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
103 *end = *base + size - 1;
104 }
106 bool_t is_var_mtrr_overlapped(struct mtrr_state *m)
107 {
108 int seg, i;
109 uint64_t phys_base, phys_mask, phys_base_pre, phys_mask_pre;
110 uint64_t base_pre, end_pre, base, end;
111 uint8_t num_var_ranges = (u8)m->mtrr_cap;
113 for ( i = 0; i < num_var_ranges; i++ )
114 {
115 phys_base_pre = ((u64*)m->var_ranges)[i*2];
116 phys_mask_pre = ((u64*)m->var_ranges)[i*2 + 1];
118 get_mtrr_range(phys_base_pre, phys_mask_pre,
119 &base_pre, &end_pre);
121 for ( seg = i + 1; seg < num_var_ranges; seg ++ )
122 {
123 phys_base = ((u64*)m->var_ranges)[seg*2];
124 phys_mask = ((u64*)m->var_ranges)[seg*2 + 1];
126 get_mtrr_range(phys_base, phys_mask,
127 &base, &end);
129 if ( ((base_pre != end_pre) && (base != end))
130 || ((base >= base_pre) && (base <= end_pre))
131 || ((end >= base_pre) && (end <= end_pre))
132 || ((base_pre >= base) && (base_pre <= end))
133 || ((end_pre >= base) && (end_pre <= end)) )
134 {
135 /* MTRR is overlapped. */
136 return 1;
137 }
138 }
139 }
140 return 0;
141 }
143 /* reserved mtrr for guest OS */
144 #define RESERVED_MTRR 2
145 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
146 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
147 bool mtrr_var_range_msr_set(struct mtrr_state *m, u32 msr, u64 msr_content);
148 bool mtrr_def_type_msr_set(struct mtrr_state *m, u64 msr_content);
149 bool mtrr_fix_range_msr_set(struct mtrr_state *m, int row, u64 msr_content);
150 static void set_var_mtrr(unsigned int reg, struct mtrr_state *m,
151 unsigned int base, unsigned int size,
152 unsigned int type)
153 {
154 struct mtrr_var_range *vr;
156 vr = &m->var_ranges[reg];
158 if ( size == 0 )
159 {
160 /* The invalid bit is kept in the mask, so we simply clear the
161 * relevant mask register to disable a range.
162 */
163 mtrr_var_range_msr_set(m, MTRRphysMask_MSR(reg), 0);
164 }
165 else
166 {
167 vr->base_lo = base << PAGE_SHIFT | type;
168 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
169 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
170 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
172 mtrr_var_range_msr_set(m, MTRRphysBase_MSR(reg), *(unsigned long *)vr);
173 mtrr_var_range_msr_set(m, MTRRphysMask_MSR(reg),
174 *((unsigned long *)vr + 1));
175 }
176 }
177 /* From Intel Vol. III Section 10.11.4, the Range Size and Base Alignment has
178 * some kind of requirement:
179 * 1. The range size must be 2^N byte for N >= 12 (i.e 4KB minimum).
180 * 2. The base address must be 2^N aligned, where the N here is equal to
181 * the N in previous requirement. So a 8K range must be 8K aligned not 4K aligned.
182 */
183 static unsigned int range_to_mtrr(unsigned int reg, struct mtrr_state *m,
184 unsigned int range_startk, unsigned int range_sizek, unsigned char type)
185 {
186 if ( !range_sizek || (reg >= ((m->mtrr_cap & 0xff) - RESERVED_MTRR)) )
187 return reg;
189 while ( range_sizek )
190 {
191 unsigned int max_align, align, sizek;
193 max_align = (range_startk == 0) ? 32 : ffs(range_startk);
194 align = min_t(unsigned int, fls(range_sizek), max_align);
195 sizek = 1 << (align - 1);
197 set_var_mtrr(reg++, m, range_startk, sizek, type);
199 range_startk += sizek;
200 range_sizek -= sizek;
202 if ( reg >= ((m->mtrr_cap & 0xff) - RESERVED_MTRR) )
203 break;
204 }
206 return reg;
207 }
209 static void setup_fixed_mtrrs(struct vcpu *v)
210 {
211 uint64_t content;
212 int i;
213 struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
215 /* 1. Map (0~A0000) as WB */
216 content = 0x0606060606060606ull;
217 mtrr_fix_range_msr_set(m, 0, content);
218 mtrr_fix_range_msr_set(m, 1, content);
219 /* 2. Map VRAM(A0000~C0000) as WC */
220 content = 0x0101010101010101;
221 mtrr_fix_range_msr_set(m, 2, content);
222 /* 3. Map (C0000~100000) as UC */
223 for ( i = 3; i < 11; i++)
224 mtrr_fix_range_msr_set(m, i, 0);
225 }
227 static void setup_var_mtrrs(struct vcpu *v)
228 {
229 p2m_type_t p2m;
230 unsigned long e820_mfn;
231 char *p = NULL;
232 unsigned char nr = 0;
233 int i;
234 unsigned int reg = 0;
235 unsigned long size = 0;
236 unsigned long addr = 0;
237 struct e820entry *e820_table;
239 e820_mfn = mfn_x(gfn_to_mfn(v->domain,
240 HVM_E820_PAGE >> PAGE_SHIFT, &p2m));
242 p = (char *)map_domain_page(e820_mfn);
244 nr = *(unsigned char*)(p + HVM_E820_NR_OFFSET);
245 e820_table = (struct e820entry*)(p + HVM_E820_OFFSET);
246 /* search E820 table, set MTRR for RAM */
247 for ( i = 0; i < nr; i++)
248 {
249 if ( (e820_table[i].addr >= 0x100000) &&
250 (e820_table[i].type == E820_RAM) )
251 {
252 if ( e820_table[i].addr == 0x100000 )
253 {
254 size = e820_table[i].size + 0x100000 + PAGE_SIZE * 3;
255 addr = 0;
256 }
257 else
258 {
259 /* Larger than 4G */
260 size = e820_table[i].size;
261 addr = e820_table[i].addr;
262 }
264 reg = range_to_mtrr(reg, &v->arch.hvm_vcpu.mtrr,
265 addr >> PAGE_SHIFT, size >> PAGE_SHIFT,
266 MTRR_TYPE_WRBACK);
267 }
268 }
269 }
271 void init_mtrr_in_hyper(struct vcpu *v)
272 {
273 /* TODO:MTRR should be initialized in BIOS or other places.
274 * workaround to do it in here
275 */
276 if ( v->arch.hvm_vcpu.mtrr.is_initialized )
277 return;
279 setup_fixed_mtrrs(v);
280 setup_var_mtrrs(v);
281 /* enable mtrr */
282 mtrr_def_type_msr_set(&v->arch.hvm_vcpu.mtrr, 0xc00);
284 v->arch.hvm_vcpu.mtrr.is_initialized = 1;
285 }
287 static int reset_mtrr(struct mtrr_state *m)
288 {
289 m->var_ranges = xmalloc_array(struct mtrr_var_range, MTRR_VCNT);
290 if ( m->var_ranges == NULL )
291 return -ENOMEM;
292 memset(m->var_ranges, 0, MTRR_VCNT * sizeof(struct mtrr_var_range));
293 memset(m->fixed_ranges, 0, sizeof(m->fixed_ranges));
294 m->enabled = 0;
295 m->def_type = 0;/*mtrr is disabled*/
296 m->mtrr_cap = (0x5<<8)|MTRR_VCNT;/*wc,fix enabled, and vcnt=8*/
297 m->overlapped = 0;
298 return 0;
299 }
301 /* init global variables for MTRR and PAT */
302 void global_init_mtrr_pat(void)
303 {
304 extern u64 host_pat;
305 u32 phys_addr;
307 init_mtrr_epat_tbl();
308 init_pat_entry_tbl(host_pat);
309 /* Get max physical address, set some global variable */
310 if ( cpuid_eax(0x80000000) < 0x80000008 )
311 phys_addr = 36;
312 else
313 phys_addr = cpuid_eax(0x80000008);
315 phys_base_msr_mask = ~((((u64)1) << phys_addr) - 1) | 0xf00UL;
316 phys_mask_msr_mask = ~((((u64)1) << phys_addr) - 1) | 0x7ffUL;
318 size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
319 size_and_mask = ~size_or_mask & 0xfff00000;
320 }
322 static void init_pat_entry_tbl(u64 pat)
323 {
324 int i, j;
326 memset(&pat_entry_tbl, INVALID_MEM_TYPE,
327 PAT_TYPE_NUMS * sizeof(pat_entry_tbl[0]));
329 for ( i = 0; i < PAT_TYPE_NUMS; i++ )
330 {
331 for ( j = 0; j < PAT_TYPE_NUMS; j++ )
332 {
333 if ( pat_cr_2_paf(pat, j) == i )
334 {
335 pat_entry_tbl[i] = j;
336 break;
337 }
338 }
339 }
340 }
342 unsigned char pat_type_2_pte_flags(unsigned char pat_type)
343 {
344 int pat_entry = pat_entry_tbl[pat_type];
346 /* INVALID_MEM_TYPE, means doesn't find the pat_entry in host pat for
347 * a given pat_type. If host pat covers all the pat types,
348 * it can't happen.
349 */
350 if ( likely(pat_entry != INVALID_MEM_TYPE) )
351 return pat_entry_2_pte_flags[pat_entry];
353 return pat_entry_2_pte_flags[pat_entry_tbl[PAT_TYPE_UNCACHABLE]];
354 }
356 int reset_vmsr(struct mtrr_state *m, u64 *pat_ptr)
357 {
358 int rc;
360 rc = reset_mtrr(m);
361 if ( rc != 0 )
362 return rc;
364 *pat_ptr = ( (u64)PAT_TYPE_WRBACK) | /* PAT0: WB */
365 ( (u64)PAT_TYPE_WRTHROUGH << 8 ) | /* PAT1: WT */
366 ( (u64)PAT_TYPE_UC_MINUS << 16 ) | /* PAT2: UC- */
367 ( (u64)PAT_TYPE_UNCACHABLE << 24 ) | /* PAT3: UC */
368 ( (u64)PAT_TYPE_WRBACK << 32 ) | /* PAT4: WB */
369 ( (u64)PAT_TYPE_WRTHROUGH << 40 ) | /* PAT5: WT */
370 ( (u64)PAT_TYPE_UC_MINUS << 48 ) | /* PAT6: UC- */
371 ( (u64)PAT_TYPE_UNCACHABLE << 56 ); /* PAT7: UC */
373 return 0;
374 }
376 /*
377 * Get MTRR memory type for physical address pa.
378 */
379 static unsigned char get_mtrr_type(struct mtrr_state *m, paddr_t pa)
380 {
381 int addr, seg, index;
382 u8 overlap_mtrr = 0;
383 u8 overlap_mtrr_pos = 0;
384 u64 phys_base;
385 u64 phys_mask;
386 u8 num_var_ranges = m->mtrr_cap & 0xff;
388 if ( unlikely(!(m->enabled & 0x2)) )
389 return MTRR_TYPE_UNCACHABLE;
391 if ( (pa < 0x100000) && (m->enabled & 1) )
392 {
393 /* Fixed range MTRR takes effective */
394 addr = (unsigned int) pa;
395 if ( addr < 0x80000 )
396 {
397 seg = (addr >> 16);
398 return m->fixed_ranges[seg];
399 }
400 else if ( addr < 0xc0000 )
401 {
402 seg = (addr - 0x80000) >> 14;
403 index = (seg >> 3) + 1;
404 seg &= 7; /* select 0-7 segments */
405 return m->fixed_ranges[index*8 + seg];
406 }
407 else
408 {
409 /* 0xC0000 --- 0x100000 */
410 seg = (addr - 0xc0000) >> 12;
411 index = (seg >> 3) + 3;
412 seg &= 7; /* select 0-7 segments */
413 return m->fixed_ranges[index*8 + seg];
414 }
415 }
417 /* Match with variable MTRRs. */
418 for ( seg = 0; seg < num_var_ranges; seg++ )
419 {
420 phys_base = ((u64*)m->var_ranges)[seg*2];
421 phys_mask = ((u64*)m->var_ranges)[seg*2 + 1];
422 if ( phys_mask & (1 << MTRR_PHYSMASK_VALID_BIT) )
423 {
424 if ( ((u64) pa & phys_mask) >> MTRR_PHYSMASK_SHIFT ==
425 (phys_base & phys_mask) >> MTRR_PHYSMASK_SHIFT )
426 {
427 if ( unlikely(m->overlapped) )
428 {
429 overlap_mtrr |= 1 << (phys_base & MTRR_PHYSBASE_TYPE_MASK);
430 overlap_mtrr_pos = phys_base & MTRR_PHYSBASE_TYPE_MASK;
431 }
432 else
433 {
434 /* If no overlap, return the found one */
435 return (phys_base & MTRR_PHYSBASE_TYPE_MASK);
436 }
437 }
438 }
439 }
441 /* Overlapped or not found. */
442 if ( unlikely(overlap_mtrr == 0) )
443 return m->def_type;
445 if ( likely(!(overlap_mtrr & ~( ((u8)1) << overlap_mtrr_pos ))) )
446 /* Covers both one variable memory range matches and
447 * two or more identical match.
448 */
449 return overlap_mtrr_pos;
451 if ( overlap_mtrr & 0x1 )
452 /* Two or more match, one is UC. */
453 return MTRR_TYPE_UNCACHABLE;
455 if ( !(overlap_mtrr & 0xaf) )
456 /* Two or more match, WT and WB. */
457 return MTRR_TYPE_WRTHROUGH;
459 /* Behaviour is undefined, but return the last overlapped type. */
460 return overlap_mtrr_pos;
461 }
463 /*
464 * return the memory type from PAT.
465 * NOTE: valid only when paging is enabled.
466 * Only 4K page PTE is supported now.
467 */
468 static unsigned char page_pat_type(u64 pat_cr, unsigned long pte_flags)
469 {
470 int pat_entry;
472 /* PCD/PWT -> bit 1/0 of PAT entry */
473 pat_entry = ( pte_flags >> 3 ) & 0x3;
474 /* PAT bits as bit 2 of PAT entry */
475 if ( pte_flags & _PAGE_PAT )
476 pat_entry |= 4;
478 return (unsigned char)pat_cr_2_paf(pat_cr, pat_entry);
479 }
481 /*
482 * Effective memory type for leaf page.
483 */
484 static u8 effective_mm_type(
485 struct mtrr_state *m,
486 u64 pat,
487 paddr_t gpa,
488 unsigned long pte_flags)
489 {
490 unsigned char mtrr_mtype, pat_value, effective;
492 mtrr_mtype = get_mtrr_type(m, gpa);
494 pat_value = page_pat_type(pat, pte_flags);
496 effective = mm_type_tbl[mtrr_mtype][pat_value];
498 return effective;
499 }
501 static void init_mtrr_epat_tbl(void)
502 {
503 int i, j;
504 /* set default value to an invalid type, just for checking conflict */
505 memset(&mtrr_epat_tbl, INVALID_MEM_TYPE, sizeof(mtrr_epat_tbl));
507 for ( i = 0; i < MTRR_NUM_TYPES; i++ )
508 {
509 for ( j = 0; j < PAT_TYPE_NUMS; j++ )
510 {
511 int tmp = mm_type_tbl[i][j];
512 if ( (tmp >= 0) && (tmp < MEMORY_NUM_TYPES) )
513 mtrr_epat_tbl[i][tmp] = j;
514 }
515 }
516 }
518 u32 get_pat_flags(struct vcpu *v,
519 u32 gl1e_flags,
520 paddr_t gpaddr,
521 paddr_t spaddr)
522 {
523 u8 guest_eff_mm_type;
524 u8 shadow_mtrr_type;
525 u8 pat_entry_value;
526 u64 pat = v->arch.hvm_vcpu.pat_cr;
527 struct mtrr_state *g = &v->arch.hvm_vcpu.mtrr;
529 /* 1. Get the effective memory type of guest physical address,
530 * with the pair of guest MTRR and PAT
531 */
532 guest_eff_mm_type = effective_mm_type(g, pat, gpaddr, gl1e_flags);
533 /* 2. Get the memory type of host physical address, with MTRR */
534 shadow_mtrr_type = get_mtrr_type(&mtrr_state, spaddr);
536 /* 3. Find the memory type in PAT, with host MTRR memory type
537 * and guest effective memory type.
538 */
539 pat_entry_value = mtrr_epat_tbl[shadow_mtrr_type][guest_eff_mm_type];
540 /* If conflit occurs(e.g host MTRR is UC, guest memory type is
541 * WB),set UC as effective memory. Here, returning PAT_TYPE_UNCACHABLE will
542 * always set effective memory as UC.
543 */
544 if ( pat_entry_value == INVALID_MEM_TYPE )
545 {
546 gdprintk(XENLOG_WARNING,
547 "Conflict occurs for a given guest l1e flags:%x "
548 "at %"PRIx64" (the effective mm type:%d), "
549 "because the host mtrr type is:%d\n",
550 gl1e_flags, (uint64_t)gpaddr, guest_eff_mm_type,
551 shadow_mtrr_type);
552 pat_entry_value = PAT_TYPE_UNCACHABLE;
553 }
554 /* 4. Get the pte flags */
555 return pat_type_2_pte_flags(pat_entry_value);
556 }
558 /* Helper funtions for seting mtrr/pat */
559 bool pat_msr_set(u64 *pat, u64 msr_content)
560 {
561 u8 *value = (u8*)&msr_content;
562 int i;
564 if ( *pat != msr_content )
565 {
566 for ( i = 0; i < 8; i++ )
567 if ( unlikely(!(value[i] == 0 || value[i] == 1 ||
568 value[i] == 4 || value[i] == 5 ||
569 value[i] == 6 || value[i] == 7)) )
570 return 0;
572 *pat = msr_content;
573 }
575 return 1;
576 }
578 bool mtrr_def_type_msr_set(struct mtrr_state *m, u64 msr_content)
579 {
580 u8 def_type = msr_content & 0xff;
581 u8 enabled = (msr_content >> 10) & 0x3;
583 if ( unlikely(!(def_type == 0 || def_type == 1 || def_type == 4 ||
584 def_type == 5 || def_type == 6)) )
585 {
586 HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid MTRR def type:%x\n", def_type);
587 return 0;
588 }
590 if ( unlikely(msr_content && (msr_content & ~0xcffUL)) )
591 {
592 HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n",
593 msr_content);
594 return 0;
595 }
597 m->enabled = enabled;
598 m->def_type = def_type;
600 return 1;
601 }
603 bool mtrr_fix_range_msr_set(struct mtrr_state *m, int row, u64 msr_content)
604 {
605 u64 *fixed_range_base = (u64 *)m->fixed_ranges;
607 if ( fixed_range_base[row] != msr_content )
608 {
609 u8 *range = (u8*)&msr_content;
610 int i, type;
612 for ( i = 0; i < 8; i++ )
613 {
614 type = range[i];
615 if ( unlikely(!(type == 0 || type == 1 ||
616 type == 4 || type == 5 || type == 6)) )
617 return 0;
618 }
620 fixed_range_base[row] = msr_content;
621 }
623 return 1;
624 }
626 bool mtrr_var_range_msr_set(struct mtrr_state *m, u32 msr, u64 msr_content)
627 {
628 u32 index;
629 u64 msr_mask;
630 u64 *var_range_base = (u64*)m->var_ranges;
632 index = msr - MSR_IA32_MTRR_PHYSBASE0;
634 if ( var_range_base[index] != msr_content )
635 {
636 u32 type = msr_content & 0xff;
638 msr_mask = (index & 1) ? phys_mask_msr_mask : phys_base_msr_mask;
640 if ( unlikely(!(type == 0 || type == 1 ||
641 type == 4 || type == 5 || type == 6)) )
642 return 0;
644 if ( unlikely(msr_content && (msr_content & msr_mask)) )
645 {
646 HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n",
647 msr_content);
648 return 0;
649 }
651 var_range_base[index] = msr_content;
652 }
654 m->overlapped = is_var_mtrr_overlapped(m);
656 return 1;
657 }
659 bool_t mtrr_pat_not_equal(struct vcpu *vd, struct vcpu *vs)
660 {
661 struct mtrr_state *md = &vd->arch.hvm_vcpu.mtrr;
662 struct mtrr_state *ms = &vs->arch.hvm_vcpu.mtrr;
663 int res;
664 u8 num_var_ranges = (u8)md->mtrr_cap;
666 /* Test fixed ranges. */
667 res = memcmp(md->fixed_ranges, ms->fixed_ranges,
668 NUM_FIXED_RANGES*sizeof(mtrr_type));
669 if ( res )
670 return 1;
672 /* Test var ranges. */
673 res = memcmp(md->var_ranges, ms->var_ranges,
674 num_var_ranges*sizeof(struct mtrr_var_range));
675 if ( res )
676 return 1;
678 /* Test default type MSR. */
679 if ( (md->def_type != ms->def_type)
680 && (md->enabled != ms->enabled) )
681 return 1;
683 /* Test PAT. */
684 if ( vd->arch.hvm_vcpu.pat_cr != vs->arch.hvm_vcpu.pat_cr )
685 return 1;
687 return 0;
688 }
690 void hvm_init_cacheattr_region_list(
691 struct domain *d)
692 {
693 INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);
694 }
696 void hvm_destroy_cacheattr_region_list(
697 struct domain *d)
698 {
699 struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges;
700 struct hvm_mem_pinned_cacheattr_range *range;
702 while ( !list_empty(head) )
703 {
704 range = list_entry(head->next,
705 struct hvm_mem_pinned_cacheattr_range,
706 list);
707 list_del(&range->list);
708 xfree(range);
709 }
710 }
712 int hvm_get_mem_pinned_cacheattr(
713 struct domain *d,
714 unsigned long guest_fn,
715 unsigned int *type)
716 {
717 struct hvm_mem_pinned_cacheattr_range *range;
719 *type = 0;
721 if ( !is_hvm_domain(d) )
722 return 0;
724 list_for_each_entry_rcu ( range,
725 &d->arch.hvm_domain.pinned_cacheattr_ranges,
726 list )
727 {
728 if ( (guest_fn >= range->start) && (guest_fn <= range->end) )
729 {
730 *type = range->type;
731 return 1;
732 }
733 }
735 return 0;
736 }
738 int hvm_set_mem_pinned_cacheattr(
739 struct domain *d,
740 unsigned long gfn_start,
741 unsigned long gfn_end,
742 unsigned int type)
743 {
744 struct hvm_mem_pinned_cacheattr_range *range;
746 if ( !((type == PAT_TYPE_UNCACHABLE) ||
747 (type == PAT_TYPE_WRCOMB) ||
748 (type == PAT_TYPE_WRTHROUGH) ||
749 (type == PAT_TYPE_WRPROT) ||
750 (type == PAT_TYPE_WRBACK) ||
751 (type == PAT_TYPE_UC_MINUS)) ||
752 !is_hvm_domain(d) )
753 return -EINVAL;
755 range = xmalloc(struct hvm_mem_pinned_cacheattr_range);
756 if ( range == NULL )
757 return -ENOMEM;
759 memset(range, 0, sizeof(*range));
761 range->start = gfn_start;
762 range->end = gfn_end;
763 range->type = type;
765 list_add_rcu(&range->list, &d->arch.hvm_domain.pinned_cacheattr_ranges);
767 return 0;
768 }