ia64/xen-unstable

view xen/arch/x86/hvm/mtrr.c @ 19237:07e65892fc8e

[VTD] Utilise the snoop control capability in shadow with VT-d code

We compute the shadow PAT index in leaf page entries now as:
1) No VT-d assigned: let shadow PAT index as WB, handled already
in shadow code before.
2) direct assigned MMIO area: let shadow code compute the shadow
PAT with gMTRR=UC and gPAT value.
3) Snoop control enable: let shadow PAT index as WB.
4) Snoop control disable: let shadow code compute the shadow
PAT with gMTRR and gPAT, handled already in shadow code before

Signed-off-by: Xin, Xiaohui <xiaohui.xin@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Feb 20 11:11:40 2009 +0000 (2009-02-20)
parents 175a425e9b55
children b671d568115f
line source
1 /*
2 * mtrr.c: MTRR/PAT virtualization
3 *
4 * Copyright (c) 2007, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <public/hvm/e820.h>
21 #include <xen/types.h>
22 #include <asm/e820.h>
23 #include <asm/paging.h>
24 #include <asm/p2m.h>
25 #include <xen/domain_page.h>
26 #include <asm/mtrr.h>
27 #include <asm/hvm/support.h>
28 #include <asm/hvm/cacheattr.h>
30 extern struct mtrr_state mtrr_state;
32 static uint64_t phys_base_msr_mask;
33 static uint64_t phys_mask_msr_mask;
34 static uint32_t size_or_mask;
35 static uint32_t size_and_mask;
37 /* Get page attribute fields (PAn) from PAT MSR. */
38 #define pat_cr_2_paf(pat_cr,n) ((((uint64_t)pat_cr) >> ((n)<<3)) & 0xff)
40 /* PAT entry to PTE flags (PAT, PCD, PWT bits). */
41 static uint8_t pat_entry_2_pte_flags[8] = {
42 0, _PAGE_PWT,
43 _PAGE_PCD, _PAGE_PCD | _PAGE_PWT,
44 _PAGE_PAT, _PAGE_PAT | _PAGE_PWT,
45 _PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT };
47 /* Effective mm type lookup table, according to MTRR and PAT. */
48 static uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
49 /********PAT(UC,WC,RS,RS,WT,WP,WB,UC-)*/
50 /* RS means reserved type(2,3), and type is hardcoded here */
51 /*MTRR(UC):(UC,WC,RS,RS,UC,UC,UC,UC)*/
52 {0, 1, 2, 2, 0, 0, 0, 0},
53 /*MTRR(WC):(UC,WC,RS,RS,UC,UC,WC,WC)*/
54 {0, 1, 2, 2, 0, 0, 1, 1},
55 /*MTRR(RS):(RS,RS,RS,RS,RS,RS,RS,RS)*/
56 {2, 2, 2, 2, 2, 2, 2, 2},
57 /*MTRR(RS):(RS,RS,RS,RS,RS,RS,RS,RS)*/
58 {2, 2, 2, 2, 2, 2, 2, 2},
59 /*MTRR(WT):(UC,WC,RS,RS,WT,WP,WT,UC)*/
60 {0, 1, 2, 2, 4, 5, 4, 0},
61 /*MTRR(WP):(UC,WC,RS,RS,WT,WP,WP,WC)*/
62 {0, 1, 2, 2, 4, 5, 5, 1},
63 /*MTRR(WB):(UC,WC,RS,RS,WT,WP,WB,UC)*/
64 {0, 1, 2, 2, 4, 5, 6, 0}
65 };
67 /*
68 * Reverse lookup table, to find a pat type according to MTRR and effective
69 * memory type. This table is dynamically generated.
70 */
71 static uint8_t mtrr_epat_tbl[MTRR_NUM_TYPES][MEMORY_NUM_TYPES];
73 /* Lookup table for PAT entry of a given PAT value in host PAT. */
74 static uint8_t pat_entry_tbl[PAT_TYPE_NUMS];
76 static void get_mtrr_range(uint64_t base_msr, uint64_t mask_msr,
77 uint64_t *base, uint64_t *end)
78 {
79 uint32_t mask_lo = (uint32_t)mask_msr;
80 uint32_t mask_hi = (uint32_t)(mask_msr >> 32);
81 uint32_t base_lo = (uint32_t)base_msr;
82 uint32_t base_hi = (uint32_t)(base_msr >> 32);
83 uint32_t size;
85 if ( (mask_lo & 0x800) == 0 )
86 {
87 /* Invalid (i.e. free) range */
88 *base = 0;
89 *end = 0;
90 return;
91 }
93 /* Work out the shifted address mask. */
94 mask_lo = (size_or_mask | (mask_hi << (32 - PAGE_SHIFT)) |
95 (mask_lo >> PAGE_SHIFT));
97 /* This works correctly if size is a power of two (a contiguous range). */
98 size = -mask_lo;
99 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
100 *end = *base + size - 1;
101 }
103 bool_t is_var_mtrr_overlapped(struct mtrr_state *m)
104 {
105 int32_t seg, i;
106 uint64_t phys_base, phys_mask, phys_base_pre, phys_mask_pre;
107 uint64_t base_pre, end_pre, base, end;
108 uint8_t num_var_ranges = (uint8_t)m->mtrr_cap;
110 for ( i = 0; i < num_var_ranges; i++ )
111 {
112 phys_base_pre = ((uint64_t*)m->var_ranges)[i*2];
113 phys_mask_pre = ((uint64_t*)m->var_ranges)[i*2 + 1];
115 get_mtrr_range(phys_base_pre, phys_mask_pre,
116 &base_pre, &end_pre);
118 for ( seg = i + 1; seg < num_var_ranges; seg ++ )
119 {
120 phys_base = ((uint64_t*)m->var_ranges)[seg*2];
121 phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1];
123 get_mtrr_range(phys_base, phys_mask,
124 &base, &end);
126 if ( ((base_pre != end_pre) && (base != end))
127 || ((base >= base_pre) && (base <= end_pre))
128 || ((end >= base_pre) && (end <= end_pre))
129 || ((base_pre >= base) && (base_pre <= end))
130 || ((end_pre >= base) && (end_pre <= end)) )
131 {
132 /* MTRR is overlapped. */
133 return 1;
134 }
135 }
136 }
137 return 0;
138 }
140 #define MTRR_PHYSMASK_VALID_BIT 11
141 #define MTRR_PHYSMASK_SHIFT 12
143 #define MTRR_PHYSBASE_TYPE_MASK 0xff /* lowest 8 bits */
144 #define MTRR_PHYSBASE_SHIFT 12
145 #define MTRR_VCNT 8
147 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
148 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
149 bool_t mtrr_var_range_msr_set(struct mtrr_state *m, uint32_t msr,
150 uint64_t msr_content);
151 bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, uint32_t row,
152 uint64_t msr_content);
154 static int hvm_mtrr_pat_init(void)
155 {
156 extern uint64_t host_pat;
157 unsigned int i, j, phys_addr;
159 memset(&mtrr_epat_tbl, INVALID_MEM_TYPE, sizeof(mtrr_epat_tbl));
160 for ( i = 0; i < MTRR_NUM_TYPES; i++ )
161 {
162 for ( j = 0; j < PAT_TYPE_NUMS; j++ )
163 {
164 int32_t tmp = mm_type_tbl[i][j];
165 if ( (tmp >= 0) && (tmp < MEMORY_NUM_TYPES) )
166 mtrr_epat_tbl[i][tmp] = j;
167 }
168 }
170 memset(&pat_entry_tbl, INVALID_MEM_TYPE,
171 PAT_TYPE_NUMS * sizeof(pat_entry_tbl[0]));
172 for ( i = 0; i < PAT_TYPE_NUMS; i++ )
173 {
174 for ( j = 0; j < PAT_TYPE_NUMS; j++ )
175 {
176 if ( pat_cr_2_paf(host_pat, j) == i )
177 {
178 pat_entry_tbl[i] = j;
179 break;
180 }
181 }
182 }
184 phys_addr = 36;
185 if ( cpuid_eax(0x80000000) >= 0x80000008 )
186 phys_addr = (uint8_t)cpuid_eax(0x80000008);
188 phys_base_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0xf00UL;
189 phys_mask_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0x7ffUL;
191 size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
192 size_and_mask = ~size_or_mask & 0xfff00000;
194 return 0;
195 }
196 __initcall(hvm_mtrr_pat_init);
198 uint8_t pat_type_2_pte_flags(uint8_t pat_type)
199 {
200 int32_t pat_entry = pat_entry_tbl[pat_type];
202 /* INVALID_MEM_TYPE, means doesn't find the pat_entry in host pat for
203 * a given pat_type. If host pat covers all the pat types,
204 * it can't happen.
205 */
206 if ( likely(pat_entry != INVALID_MEM_TYPE) )
207 return pat_entry_2_pte_flags[pat_entry];
209 return pat_entry_2_pte_flags[pat_entry_tbl[PAT_TYPE_UNCACHABLE]];
210 }
212 int hvm_vcpu_cacheattr_init(struct vcpu *v)
213 {
214 struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
216 memset(m, 0, sizeof(*m));
218 m->var_ranges = xmalloc_array(struct mtrr_var_range, MTRR_VCNT);
219 if ( m->var_ranges == NULL )
220 return -ENOMEM;
221 memset(m->var_ranges, 0, MTRR_VCNT * sizeof(struct mtrr_var_range));
223 m->mtrr_cap = (1u << 10) | (1u << 8) | MTRR_VCNT;
225 v->arch.hvm_vcpu.pat_cr =
226 ((uint64_t)PAT_TYPE_WRBACK) | /* PAT0: WB */
227 ((uint64_t)PAT_TYPE_WRTHROUGH << 8) | /* PAT1: WT */
228 ((uint64_t)PAT_TYPE_UC_MINUS << 16) | /* PAT2: UC- */
229 ((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3: UC */
230 ((uint64_t)PAT_TYPE_WRBACK << 32) | /* PAT4: WB */
231 ((uint64_t)PAT_TYPE_WRTHROUGH << 40) | /* PAT5: WT */
232 ((uint64_t)PAT_TYPE_UC_MINUS << 48) | /* PAT6: UC- */
233 ((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7: UC */
235 return 0;
236 }
238 void hvm_vcpu_cacheattr_destroy(struct vcpu *v)
239 {
240 xfree(v->arch.hvm_vcpu.mtrr.var_ranges);
241 }
243 /*
244 * Get MTRR memory type for physical address pa.
245 */
246 static uint8_t get_mtrr_type(struct mtrr_state *m, paddr_t pa)
247 {
248 int32_t addr, seg, index;
249 uint8_t overlap_mtrr = 0;
250 uint8_t overlap_mtrr_pos = 0;
251 uint64_t phys_base;
252 uint64_t phys_mask;
253 uint8_t num_var_ranges = m->mtrr_cap & 0xff;
255 if ( unlikely(!(m->enabled & 0x2)) )
256 return MTRR_TYPE_UNCACHABLE;
258 if ( (pa < 0x100000) && (m->enabled & 1) )
259 {
260 /* Fixed range MTRR takes effective */
261 addr = (uint32_t) pa;
262 if ( addr < 0x80000 )
263 {
264 seg = (addr >> 16);
265 return m->fixed_ranges[seg];
266 }
267 else if ( addr < 0xc0000 )
268 {
269 seg = (addr - 0x80000) >> 14;
270 index = (seg >> 3) + 1;
271 seg &= 7; /* select 0-7 segments */
272 return m->fixed_ranges[index*8 + seg];
273 }
274 else
275 {
276 /* 0xC0000 --- 0x100000 */
277 seg = (addr - 0xc0000) >> 12;
278 index = (seg >> 3) + 3;
279 seg &= 7; /* select 0-7 segments */
280 return m->fixed_ranges[index*8 + seg];
281 }
282 }
284 /* Match with variable MTRRs. */
285 for ( seg = 0; seg < num_var_ranges; seg++ )
286 {
287 phys_base = ((uint64_t*)m->var_ranges)[seg*2];
288 phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1];
289 if ( phys_mask & (1 << MTRR_PHYSMASK_VALID_BIT) )
290 {
291 if ( ((uint64_t) pa & phys_mask) >> MTRR_PHYSMASK_SHIFT ==
292 (phys_base & phys_mask) >> MTRR_PHYSMASK_SHIFT )
293 {
294 if ( unlikely(m->overlapped) )
295 {
296 overlap_mtrr |= 1 << (phys_base & MTRR_PHYSBASE_TYPE_MASK);
297 overlap_mtrr_pos = phys_base & MTRR_PHYSBASE_TYPE_MASK;
298 }
299 else
300 {
301 /* If no overlap, return the found one */
302 return (phys_base & MTRR_PHYSBASE_TYPE_MASK);
303 }
304 }
305 }
306 }
308 /* Overlapped or not found. */
309 if ( unlikely(overlap_mtrr == 0) )
310 return m->def_type;
312 if ( likely(!(overlap_mtrr & ~( ((uint8_t)1) << overlap_mtrr_pos ))) )
313 /* Covers both one variable memory range matches and
314 * two or more identical match.
315 */
316 return overlap_mtrr_pos;
318 if ( overlap_mtrr & 0x1 )
319 /* Two or more match, one is UC. */
320 return MTRR_TYPE_UNCACHABLE;
322 if ( !(overlap_mtrr & 0xaf) )
323 /* Two or more match, WT and WB. */
324 return MTRR_TYPE_WRTHROUGH;
326 /* Behaviour is undefined, but return the last overlapped type. */
327 return overlap_mtrr_pos;
328 }
330 /*
331 * return the memory type from PAT.
332 * NOTE: valid only when paging is enabled.
333 * Only 4K page PTE is supported now.
334 */
335 static uint8_t page_pat_type(uint64_t pat_cr, uint32_t pte_flags)
336 {
337 int32_t pat_entry;
339 /* PCD/PWT -> bit 1/0 of PAT entry */
340 pat_entry = ( pte_flags >> 3 ) & 0x3;
341 /* PAT bits as bit 2 of PAT entry */
342 if ( pte_flags & _PAGE_PAT )
343 pat_entry |= 4;
345 return (uint8_t)pat_cr_2_paf(pat_cr, pat_entry);
346 }
348 /*
349 * Effective memory type for leaf page.
350 */
351 static uint8_t effective_mm_type(struct mtrr_state *m,
352 uint64_t pat,
353 paddr_t gpa,
354 uint32_t pte_flags,
355 uint8_t gmtrr_mtype)
356 {
357 uint8_t mtrr_mtype, pat_value, effective;
359 /* if get_pat_flags() gives a dedicated MTRR type,
360 * just use it
361 */
362 if ( gmtrr_mtype == NO_HARDCODE_MEM_TYPE )
363 mtrr_mtype = get_mtrr_type(m, gpa);
364 else
365 mtrr_mtype = gmtrr_mtype;
367 pat_value = page_pat_type(pat, pte_flags);
369 effective = mm_type_tbl[mtrr_mtype][pat_value];
371 return effective;
372 }
374 uint32_t get_pat_flags(struct vcpu *v,
375 uint32_t gl1e_flags,
376 paddr_t gpaddr,
377 paddr_t spaddr,
378 uint8_t gmtrr_mtype)
379 {
380 uint8_t guest_eff_mm_type;
381 uint8_t shadow_mtrr_type;
382 uint8_t pat_entry_value;
383 uint64_t pat = v->arch.hvm_vcpu.pat_cr;
384 struct mtrr_state *g = &v->arch.hvm_vcpu.mtrr;
386 /* 1. Get the effective memory type of guest physical address,
387 * with the pair of guest MTRR and PAT
388 */
389 guest_eff_mm_type = effective_mm_type(g, pat, gpaddr,
390 gl1e_flags, gmtrr_mtype);
391 /* 2. Get the memory type of host physical address, with MTRR */
392 shadow_mtrr_type = get_mtrr_type(&mtrr_state, spaddr);
394 /* 3. Find the memory type in PAT, with host MTRR memory type
395 * and guest effective memory type.
396 */
397 pat_entry_value = mtrr_epat_tbl[shadow_mtrr_type][guest_eff_mm_type];
398 /* If conflit occurs(e.g host MTRR is UC, guest memory type is
399 * WB),set UC as effective memory. Here, returning PAT_TYPE_UNCACHABLE will
400 * always set effective memory as UC.
401 */
402 if ( pat_entry_value == INVALID_MEM_TYPE )
403 {
404 struct domain *d = v->domain;
405 p2m_type_t p2mt;
406 gfn_to_mfn(d, paddr_to_pfn(gpaddr), &p2mt);
407 if (p2m_is_ram(p2mt))
408 gdprintk(XENLOG_WARNING,
409 "Conflict occurs for a given guest l1e flags:%x "
410 "at %"PRIx64" (the effective mm type:%d), "
411 "because the host mtrr type is:%d\n",
412 gl1e_flags, (uint64_t)gpaddr, guest_eff_mm_type,
413 shadow_mtrr_type);
414 pat_entry_value = PAT_TYPE_UNCACHABLE;
415 }
416 /* 4. Get the pte flags */
417 return pat_type_2_pte_flags(pat_entry_value);
418 }
420 /* Helper funtions for seting mtrr/pat */
421 bool_t pat_msr_set(uint64_t *pat, uint64_t msr_content)
422 {
423 uint8_t *value = (uint8_t*)&msr_content;
424 int32_t i;
426 if ( *pat != msr_content )
427 {
428 for ( i = 0; i < 8; i++ )
429 if ( unlikely(!(value[i] == 0 || value[i] == 1 ||
430 value[i] == 4 || value[i] == 5 ||
431 value[i] == 6 || value[i] == 7)) )
432 return 0;
434 *pat = msr_content;
435 }
437 return 1;
438 }
440 bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content)
441 {
442 uint8_t def_type = msr_content & 0xff;
443 uint8_t enabled = (msr_content >> 10) & 0x3;
445 if ( unlikely(!(def_type == 0 || def_type == 1 || def_type == 4 ||
446 def_type == 5 || def_type == 6)) )
447 {
448 HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid MTRR def type:%x\n", def_type);
449 return 0;
450 }
452 if ( unlikely(msr_content && (msr_content & ~0xcffUL)) )
453 {
454 HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n",
455 msr_content);
456 return 0;
457 }
459 m->enabled = enabled;
460 m->def_type = def_type;
462 return 1;
463 }
465 bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, uint32_t row,
466 uint64_t msr_content)
467 {
468 uint64_t *fixed_range_base = (uint64_t *)m->fixed_ranges;
470 if ( fixed_range_base[row] != msr_content )
471 {
472 uint8_t *range = (uint8_t*)&msr_content;
473 int32_t i, type;
475 for ( i = 0; i < 8; i++ )
476 {
477 type = range[i];
478 if ( unlikely(!(type == 0 || type == 1 ||
479 type == 4 || type == 5 || type == 6)) )
480 return 0;
481 }
483 fixed_range_base[row] = msr_content;
484 }
486 return 1;
487 }
489 bool_t mtrr_var_range_msr_set(struct mtrr_state *m, uint32_t msr,
490 uint64_t msr_content)
491 {
492 uint32_t index;
493 uint64_t msr_mask;
494 uint64_t *var_range_base = (uint64_t*)m->var_ranges;
496 index = msr - MSR_IA32_MTRR_PHYSBASE0;
498 if ( var_range_base[index] != msr_content )
499 {
500 uint32_t type = msr_content & 0xff;
502 msr_mask = (index & 1) ? phys_mask_msr_mask : phys_base_msr_mask;
504 if ( unlikely(!(type == 0 || type == 1 ||
505 type == 4 || type == 5 || type == 6)) )
506 return 0;
508 if ( unlikely(msr_content && (msr_content & msr_mask)) )
509 {
510 HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n",
511 msr_content);
512 return 0;
513 }
515 var_range_base[index] = msr_content;
516 }
518 m->overlapped = is_var_mtrr_overlapped(m);
520 return 1;
521 }
523 bool_t mtrr_pat_not_equal(struct vcpu *vd, struct vcpu *vs)
524 {
525 struct mtrr_state *md = &vd->arch.hvm_vcpu.mtrr;
526 struct mtrr_state *ms = &vs->arch.hvm_vcpu.mtrr;
527 int32_t res;
528 uint8_t num_var_ranges = (uint8_t)md->mtrr_cap;
530 /* Test fixed ranges. */
531 res = memcmp(md->fixed_ranges, ms->fixed_ranges,
532 NUM_FIXED_RANGES*sizeof(mtrr_type));
533 if ( res )
534 return 1;
536 /* Test var ranges. */
537 res = memcmp(md->var_ranges, ms->var_ranges,
538 num_var_ranges*sizeof(struct mtrr_var_range));
539 if ( res )
540 return 1;
542 /* Test default type MSR. */
543 if ( (md->def_type != ms->def_type)
544 && (md->enabled != ms->enabled) )
545 return 1;
547 /* Test PAT. */
548 if ( vd->arch.hvm_vcpu.pat_cr != vs->arch.hvm_vcpu.pat_cr )
549 return 1;
551 return 0;
552 }
554 void hvm_init_cacheattr_region_list(
555 struct domain *d)
556 {
557 INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);
558 }
560 void hvm_destroy_cacheattr_region_list(
561 struct domain *d)
562 {
563 struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges;
564 struct hvm_mem_pinned_cacheattr_range *range;
566 while ( !list_empty(head) )
567 {
568 range = list_entry(head->next,
569 struct hvm_mem_pinned_cacheattr_range,
570 list);
571 list_del(&range->list);
572 xfree(range);
573 }
574 }
576 int32_t hvm_get_mem_pinned_cacheattr(
577 struct domain *d,
578 uint64_t guest_fn,
579 uint32_t *type)
580 {
581 struct hvm_mem_pinned_cacheattr_range *range;
583 *type = 0;
585 if ( !is_hvm_domain(d) )
586 return 0;
588 list_for_each_entry_rcu ( range,
589 &d->arch.hvm_domain.pinned_cacheattr_ranges,
590 list )
591 {
592 if ( (guest_fn >= range->start) && (guest_fn <= range->end) )
593 {
594 *type = range->type;
595 return 1;
596 }
597 }
599 return 0;
600 }
602 int32_t hvm_set_mem_pinned_cacheattr(
603 struct domain *d,
604 uint64_t gfn_start,
605 uint64_t gfn_end,
606 uint32_t type)
607 {
608 struct hvm_mem_pinned_cacheattr_range *range;
610 if ( !((type == PAT_TYPE_UNCACHABLE) ||
611 (type == PAT_TYPE_WRCOMB) ||
612 (type == PAT_TYPE_WRTHROUGH) ||
613 (type == PAT_TYPE_WRPROT) ||
614 (type == PAT_TYPE_WRBACK) ||
615 (type == PAT_TYPE_UC_MINUS)) ||
616 !is_hvm_domain(d) )
617 return -EINVAL;
619 range = xmalloc(struct hvm_mem_pinned_cacheattr_range);
620 if ( range == NULL )
621 return -ENOMEM;
623 memset(range, 0, sizeof(*range));
625 range->start = gfn_start;
626 range->end = gfn_end;
627 range->type = type;
629 list_add_rcu(&range->list, &d->arch.hvm_domain.pinned_cacheattr_ranges);
631 return 0;
632 }
634 static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
635 {
636 int i;
637 struct vcpu *v;
638 struct hvm_hw_mtrr hw_mtrr;
639 struct mtrr_state *mtrr_state;
640 /* save mtrr&pat */
641 for_each_vcpu(d, v)
642 {
643 mtrr_state = &v->arch.hvm_vcpu.mtrr;
645 hw_mtrr.msr_pat_cr = v->arch.hvm_vcpu.pat_cr;
647 hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
648 | (mtrr_state->enabled << 10);
649 hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
651 for ( i = 0; i < MTRR_VCNT; i++ )
652 {
653 /* save physbase */
654 hw_mtrr.msr_mtrr_var[i*2] =
655 ((uint64_t*)mtrr_state->var_ranges)[i*2];
656 /* save physmask */
657 hw_mtrr.msr_mtrr_var[i*2+1] =
658 ((uint64_t*)mtrr_state->var_ranges)[i*2+1];
659 }
661 for ( i = 0; i < NUM_FIXED_MSR; i++ )
662 hw_mtrr.msr_mtrr_fixed[i] =
663 ((uint64_t*)mtrr_state->fixed_ranges)[i];
665 if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )
666 return 1;
667 }
668 return 0;
669 }
671 static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
672 {
673 int vcpuid, i;
674 struct vcpu *v;
675 struct mtrr_state *mtrr_state;
676 struct hvm_hw_mtrr hw_mtrr;
678 vcpuid = hvm_load_instance(h);
679 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
680 {
681 gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
682 return -EINVAL;
683 }
685 if ( hvm_load_entry(MTRR, h, &hw_mtrr) != 0 )
686 return -EINVAL;
688 mtrr_state = &v->arch.hvm_vcpu.mtrr;
690 pat_msr_set(&v->arch.hvm_vcpu.pat_cr, hw_mtrr.msr_pat_cr);
692 mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap;
694 for ( i = 0; i < NUM_FIXED_MSR; i++ )
695 mtrr_fix_range_msr_set(mtrr_state, i, hw_mtrr.msr_mtrr_fixed[i]);
697 for ( i = 0; i < MTRR_VCNT; i++ )
698 {
699 mtrr_var_range_msr_set(mtrr_state,
700 MTRRphysBase_MSR(i), hw_mtrr.msr_mtrr_var[i*2]);
701 mtrr_var_range_msr_set(mtrr_state,
702 MTRRphysMask_MSR(i), hw_mtrr.msr_mtrr_var[i*2+1]);
703 }
705 mtrr_def_type_msr_set(mtrr_state, hw_mtrr.msr_mtrr_def_type);
707 return 0;
708 }
710 HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, hvm_load_mtrr_msr,
711 1, HVMSR_PER_VCPU);
713 uint8_t epte_get_entry_emt(
714 struct domain *d, unsigned long gfn,
715 unsigned long mfn, uint8_t *igmt, int direct_mmio)
716 {
717 uint8_t gmtrr_mtype, hmtrr_mtype;
718 uint32_t type;
719 struct vcpu *v = current;
721 *igmt = 0;
723 if ( (current->domain != d) && ((v = d->vcpu[0]) == NULL) )
724 return MTRR_TYPE_WRBACK;
726 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] )
727 return MTRR_TYPE_WRBACK;
729 if ( (v == current) && v->domain->arch.hvm_domain.is_in_uc_mode )
730 return MTRR_TYPE_UNCACHABLE;
732 if ( !mfn_valid(mfn) )
733 return MTRR_TYPE_UNCACHABLE;
735 if ( hvm_get_mem_pinned_cacheattr(d, gfn, &type) )
736 return type;
738 if ( !iommu_enabled )
739 {
740 *igmt = 1;
741 return MTRR_TYPE_WRBACK;
742 }
744 if ( direct_mmio )
745 return MTRR_TYPE_UNCACHABLE;
747 if ( iommu_snoop )
748 {
749 *igmt = 1;
750 return MTRR_TYPE_WRBACK;
751 }
753 gmtrr_mtype = get_mtrr_type(&v->arch.hvm_vcpu.mtrr, (gfn << PAGE_SHIFT));
754 hmtrr_mtype = get_mtrr_type(&mtrr_state, (mfn << PAGE_SHIFT));
755 return ((gmtrr_mtype <= hmtrr_mtype) ? gmtrr_mtype : hmtrr_mtype);
756 }