_PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT };
/* Effective mm type lookup table, according to MTRR and PAT. */
-static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
+static const uint8_t mm_type_tbl[MTRR_NUM_TYPES][X86_NUM_MT] = {
#define RS MEMORY_NUM_TYPES
#define UC MTRR_TYPE_UNCACHABLE
#define WB MTRR_TYPE_WRBACK
};
/* Lookup table for PAT entry of a given PAT value in host PAT. */
-static uint8_t __read_mostly pat_entry_tbl[PAT_TYPE_NUMS] =
- { [0 ... PAT_TYPE_NUMS-1] = INVALID_MEM_TYPE };
+static uint8_t __read_mostly pat_entry_tbl[X86_NUM_MT] =
+ { [0 ... X86_NUM_MT - 1] = INVALID_MEM_TYPE };
static int __init cf_check hvm_mtrr_pat_init(void)
{
for ( i = 0; i < MTRR_NUM_TYPES; i++ )
{
- for ( j = 0; j < PAT_TYPE_NUMS; j++ )
+ for ( j = 0; j < X86_NUM_MT; j++ )
{
unsigned int tmp = mm_type_tbl[i][j];
}
}
- for ( i = 0; i < PAT_TYPE_NUMS; i++ )
+ for ( i = 0; i < X86_NUM_MT; i++ )
{
- for ( j = 0; j < PAT_TYPE_NUMS; j++ )
+ for ( j = 0; j < X86_NUM_MT; j++ )
{
if ( pat_cr_2_paf(XEN_MSR_PAT, j) == i )
{
* given pat_type. If host PAT covers all the PAT types, it can't happen.
*/
if ( unlikely(pat_entry == INVALID_MEM_TYPE) )
- pat_entry = pat_entry_tbl[PAT_TYPE_UNCACHABLE];
+ pat_entry = pat_entry_tbl[X86_MT_UC];
return pat_entry_2_pte_flags[pat_entry];
}
m->mtrr_cap = (1u << 10) | (1u << 8) | num_var_ranges;
v->arch.hvm.pat_cr =
- ((uint64_t)PAT_TYPE_WRBACK) | /* PAT0: WB */
- ((uint64_t)PAT_TYPE_WRTHROUGH << 8) | /* PAT1: WT */
- ((uint64_t)PAT_TYPE_UC_MINUS << 16) | /* PAT2: UC- */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3: UC */
- ((uint64_t)PAT_TYPE_WRBACK << 32) | /* PAT4: WB */
- ((uint64_t)PAT_TYPE_WRTHROUGH << 40) | /* PAT5: WT */
- ((uint64_t)PAT_TYPE_UC_MINUS << 48) | /* PAT6: UC- */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7: UC */
+ ((uint64_t)X86_MT_WB) | /* PAT0: WB */
+ ((uint64_t)X86_MT_WT << 8) | /* PAT1: WT */
+ ((uint64_t)X86_MT_UCM << 16) | /* PAT2: UC- */
+ ((uint64_t)X86_MT_UC << 24) | /* PAT3: UC */
+ ((uint64_t)X86_MT_WB << 32) | /* PAT4: WB */
+ ((uint64_t)X86_MT_WT << 40) | /* PAT5: WT */
+ ((uint64_t)X86_MT_UCM << 48) | /* PAT6: UC- */
+ ((uint64_t)X86_MT_UC << 56); /* PAT7: UC */
if ( is_hardware_domain(v->domain) )
{
*/
pat_entry_value = mtrr_epat_tbl[shadow_mtrr_type][guest_eff_mm_type];
/* If conflit occurs(e.g host MTRR is UC, guest memory type is
- * WB),set UC as effective memory. Here, returning PAT_TYPE_UNCACHABLE will
+ * WB), set UC as effective memory. Here, returning X86_MT_UC will
* always set effective memory as UC.
*/
if ( pat_entry_value == INVALID_MEM_TYPE )
"because the host mtrr type is:%d\n",
gl1e_flags, (uint64_t)gpaddr, guest_eff_mm_type,
shadow_mtrr_type);
- pat_entry_value = PAT_TYPE_UNCACHABLE;
+ pat_entry_value = X86_MT_UC;
}
/* 4. Get the pte flags */
return pat_type_2_pte_flags(pat_entry_value);
p2m_memory_type_changed(d);
switch ( type )
{
- case PAT_TYPE_UC_MINUS:
+ case X86_MT_UCM:
/*
* For EPT we can also avoid the flush in this case;
* see epte_get_entry_emt().
*/
if ( hap_enabled(d) && cpu_has_vmx )
- case PAT_TYPE_UNCACHABLE:
+ case X86_MT_UC:
break;
/* fall through */
default:
rcu_read_unlock(&pinned_cacheattr_rcu_lock);
return -ENOENT;
- case PAT_TYPE_UC_MINUS:
- case PAT_TYPE_UNCACHABLE:
- case PAT_TYPE_WRBACK:
- case PAT_TYPE_WRCOMB:
- case PAT_TYPE_WRPROT:
- case PAT_TYPE_WRTHROUGH:
+ case X86_MT_UCM:
+ case X86_MT_UC:
+ case X86_MT_WB:
+ case X86_MT_WC:
+ case X86_MT_WP:
+ case X86_MT_WT:
break;
default:
list_add_rcu(&range->list, &d->arch.hvm.pinned_cacheattr_ranges);
p2m_memory_type_changed(d);
- if ( type != PAT_TYPE_WRBACK )
+ if ( type != X86_MT_WB )
flush_all(FLUSH_CACHE);
return 0;
* memory type are all UC.
*/
u64 uc_pat =
- ((uint64_t)PAT_TYPE_UNCACHABLE) | /* PAT0 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 8) | /* PAT1 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 16) | /* PAT2 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 32) | /* PAT4 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 40) | /* PAT5 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 48) | /* PAT6 */
- ((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7 */
+ ((uint64_t)X86_MT_UC) | /* PAT0 */
+ ((uint64_t)X86_MT_UC << 8) | /* PAT1 */
+ ((uint64_t)X86_MT_UC << 16) | /* PAT2 */
+ ((uint64_t)X86_MT_UC << 24) | /* PAT3 */
+ ((uint64_t)X86_MT_UC << 32) | /* PAT4 */
+ ((uint64_t)X86_MT_UC << 40) | /* PAT5 */
+ ((uint64_t)X86_MT_UC << 48) | /* PAT6 */
+ ((uint64_t)X86_MT_UC << 56); /* PAT7 */
vmx_get_guest_pat(v, pat);
vmx_set_guest_pat(v, uc_pat);