ia64/xen-unstable
changeset 16093:0d7d6804af22
x86: AMD Fam10/11 adjustments
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Sync up affected files with 2.6.23-rc9.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Sync up affected files with 2.6.23-rc9.
Signed-off-by: Keir Fraser <keir@xensource.com>
line diff
1.1 --- a/xen/arch/x86/apic.c Thu Oct 11 10:21:55 2007 +0100 1.2 +++ b/xen/arch/x86/apic.c Thu Oct 11 12:11:54 2007 +0100 1.3 @@ -737,7 +737,7 @@ static int __init detect_init_APIC (void 1.4 switch (boot_cpu_data.x86_vendor) { 1.5 case X86_VENDOR_AMD: 1.6 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || 1.7 - (boot_cpu_data.x86 == 15)) 1.8 + (boot_cpu_data.x86 >= 15 && boot_cpu_data.x86 <= 17)) 1.9 break; 1.10 goto no_apic; 1.11 case X86_VENDOR_INTEL:
2.1 --- a/xen/arch/x86/cpu/amd.c Thu Oct 11 10:21:55 2007 +0100 2.2 +++ b/xen/arch/x86/cpu/amd.c Thu Oct 11 12:11:54 2007 +0100 2.3 @@ -100,6 +100,8 @@ static void disable_c1_ramping(void) 2.4 } 2.5 } 2.6 2.7 +int force_mwait __cpuinitdata; 2.8 + 2.9 static void __init init_amd(struct cpuinfo_x86 *c) 2.10 { 2.11 u32 l, h; 2.12 @@ -182,10 +184,7 @@ static void __init init_amd(struct cpuin 2.13 f_vide(); 2.14 rdtscl(d2); 2.15 d = d2-d; 2.16 - 2.17 - /* Knock these two lines out if it debugs out ok */ 2.18 - printk(KERN_INFO "AMD K6 stepping B detected - "); 2.19 - /* -- cut here -- */ 2.20 + 2.21 if (d > 20*K6_BUG_LOOP) 2.22 printk("system stability may be impaired when more than 32 MB are used.\n"); 2.23 else 2.24 @@ -279,6 +278,9 @@ static void __init init_amd(struct cpuin 2.25 2.26 switch (c->x86) { 2.27 case 15: 2.28 + /* Use K8 tuning for Fam10h and Fam11h */ 2.29 + case 0x10: 2.30 + case 0x11: 2.31 set_bit(X86_FEATURE_K8, c->x86_capability); 2.32 break; 2.33 case 6: 2.34 @@ -305,8 +307,6 @@ static void __init init_amd(struct cpuin 2.35 2.36 if (cpuid_eax(0x80000000) >= 0x80000008) { 2.37 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 2.38 - if (c->x86_max_cores & (c->x86_max_cores - 1)) 2.39 - c->x86_max_cores = 1; 2.40 } 2.41 2.42 if (cpuid_eax(0x80000000) >= 0x80000007) { 2.43 @@ -317,15 +317,17 @@ static void __init init_amd(struct cpuin 2.44 2.45 #ifdef CONFIG_X86_HT 2.46 /* 2.47 - * On a AMD dual core setup the lower bits of the APIC id 2.48 - * distingush the cores. Assumes number of cores is a power 2.49 - * of two. 2.50 + * On a AMD multi core setup the lower bits of the APIC id 2.51 + * distingush the cores. 2.52 */ 2.53 if (c->x86_max_cores > 1) { 2.54 int cpu = smp_processor_id(); 2.55 - unsigned bits = 0; 2.56 - while ((1 << bits) < c->x86_max_cores) 2.57 - bits++; 2.58 + unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf; 2.59 + 2.60 + if (bits == 0) { 2.61 + while ((1 << bits) < c->x86_max_cores) 2.62 + bits++; 2.63 + } 2.64 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); 2.65 phys_proc_id[cpu] >>= bits; 2.66 printk(KERN_INFO "CPU %d(%d) -> Core %d\n", 2.67 @@ -333,6 +335,13 @@ static void __init init_amd(struct cpuin 2.68 } 2.69 #endif 2.70 2.71 + if (c->x86 == 0x10 && !force_mwait) 2.72 + clear_bit(X86_FEATURE_MWAIT, c->x86_capability); 2.73 + 2.74 + /* K6s reports MCEs but don't actually have all the MSRs */ 2.75 + if (c->x86 < 6) 2.76 + clear_bit(X86_FEATURE_MCE, c->x86_capability); 2.77 + 2.78 /* Prevent TSC drift in non single-processor, single-core platforms. */ 2.79 if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c)) 2.80 disable_c1_ramping(); 2.81 @@ -340,7 +349,7 @@ static void __init init_amd(struct cpuin 2.82 start_svm(c); 2.83 } 2.84 2.85 -static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) 2.86 +static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) 2.87 { 2.88 /* AMD errata T13 (order #21922) */ 2.89 if ((c->x86 == 6)) { 2.90 @@ -353,7 +362,7 @@ static unsigned int amd_size_cache(struc 2.91 return size; 2.92 } 2.93 2.94 -static struct cpu_dev amd_cpu_dev __initdata = { 2.95 +static struct cpu_dev amd_cpu_dev __cpuinitdata = { 2.96 .c_vendor = "AMD", 2.97 .c_ident = { "AuthenticAMD" }, 2.98 .c_models = { 2.99 @@ -378,5 +387,3 @@ int __init amd_init_cpu(void) 2.100 cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; 2.101 return 0; 2.102 } 2.103 - 2.104 -//early_arch_initcall(amd_init_cpu);
3.1 --- a/xen/arch/x86/cpu/mcheck/mce.c Thu Oct 11 10:21:55 2007 +0100 3.2 +++ b/xen/arch/x86/cpu/mcheck/mce.c Thu Oct 11 12:11:54 2007 +0100 3.3 @@ -17,6 +17,8 @@ 3.4 int mce_disabled = 0; 3.5 int nr_mce_banks; 3.6 3.7 +EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ 3.8 + 3.9 /* Handle unconfigured int18 (should never happen) */ 3.10 static fastcall void unexpected_machine_check(struct cpu_user_regs * regs, long error_code) 3.11 { 3.12 @@ -34,8 +36,7 @@ void mcheck_init(struct cpuinfo_x86 *c) 3.13 3.14 switch (c->x86_vendor) { 3.15 case X86_VENDOR_AMD: 3.16 - if (c->x86==6 || c->x86==15) 3.17 - amd_mcheck_init(c); 3.18 + amd_mcheck_init(c); 3.19 break; 3.20 3.21 case X86_VENDOR_INTEL: 3.22 @@ -61,16 +62,28 @@ void mcheck_init(struct cpuinfo_x86 *c) 3.23 } 3.24 } 3.25 3.26 -static int __init mcheck_disable(char *str) 3.27 +static unsigned long old_cr4 __initdata; 3.28 + 3.29 +void __init stop_mce(void) 3.30 +{ 3.31 + old_cr4 = read_cr4(); 3.32 + clear_in_cr4(X86_CR4_MCE); 3.33 +} 3.34 + 3.35 +void __init restart_mce(void) 3.36 +{ 3.37 + if (old_cr4 & X86_CR4_MCE) 3.38 + set_in_cr4(X86_CR4_MCE); 3.39 +} 3.40 + 3.41 +static void __init mcheck_disable(char *str) 3.42 { 3.43 mce_disabled = 1; 3.44 - return 0; 3.45 } 3.46 3.47 -static int __init mcheck_enable(char *str) 3.48 +static void __init mcheck_enable(char *str) 3.49 { 3.50 mce_disabled = -1; 3.51 - return 0; 3.52 } 3.53 3.54 custom_param("nomce", mcheck_disable);
4.1 --- a/xen/arch/x86/cpu/mtrr/amd.c Thu Oct 11 10:21:55 2007 +0100 4.2 +++ b/xen/arch/x86/cpu/mtrr/amd.c Thu Oct 11 12:11:54 2007 +0100 4.3 @@ -7,7 +7,7 @@ 4.4 4.5 static void 4.6 amd_get_mtrr(unsigned int reg, unsigned long *base, 4.7 - unsigned int *size, mtrr_type * type) 4.8 + unsigned long *size, mtrr_type * type) 4.9 { 4.10 unsigned long low, high; 4.11
5.1 --- a/xen/arch/x86/cpu/mtrr/cyrix.c Thu Oct 11 10:21:55 2007 +0100 5.2 +++ b/xen/arch/x86/cpu/mtrr/cyrix.c Thu Oct 11 12:11:54 2007 +0100 5.3 @@ -9,7 +9,7 @@ int arr3_protected; 5.4 5.5 static void 5.6 cyrix_get_arr(unsigned int reg, unsigned long *base, 5.7 - unsigned int *size, mtrr_type * type) 5.8 + unsigned long *size, mtrr_type * type) 5.9 { 5.10 unsigned long flags; 5.11 unsigned char arr, ccr3, rcr, shift; 5.12 @@ -77,7 +77,7 @@ cyrix_get_arr(unsigned int reg, unsigned 5.13 } 5.14 5.15 static int 5.16 -cyrix_get_free_region(unsigned long base, unsigned long size) 5.17 +cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) 5.18 /* [SUMMARY] Get a free ARR. 5.19 <base> The starting (base) address of the region. 5.20 <size> The size (in bytes) of the region. 5.21 @@ -86,9 +86,24 @@ cyrix_get_free_region(unsigned long base 5.22 { 5.23 int i; 5.24 mtrr_type ltype; 5.25 - unsigned long lbase; 5.26 - unsigned int lsize; 5.27 + unsigned long lbase, lsize; 5.28 5.29 + switch (replace_reg) { 5.30 + case 7: 5.31 + if (size < 0x40) 5.32 + break; 5.33 + case 6: 5.34 + case 5: 5.35 + case 4: 5.36 + return replace_reg; 5.37 + case 3: 5.38 + if (arr3_protected) 5.39 + break; 5.40 + case 2: 5.41 + case 1: 5.42 + case 0: 5.43 + return replace_reg; 5.44 + } 5.45 /* If we are to set up a region >32M then look at ARR7 immediately */ 5.46 if (size > 0x2000) { 5.47 cyrix_get_arr(7, &lbase, &lsize, <ype); 5.48 @@ -121,7 +136,7 @@ static void prepare_set(void) 5.49 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 5.50 if ( cpu_has_pge ) { 5.51 cr4 = read_cr4(); 5.52 - write_cr4(cr4 & (unsigned char) ~(1 << 7)); 5.53 + write_cr4(cr4 & ~X86_CR4_PGE); 5.54 } 5.55 5.56 /* Disable and flush caches. Note that wbinvd flushes the TLBs as 5.57 @@ -214,16 +229,16 @@ static void cyrix_set_arr(unsigned int r 5.58 5.59 typedef struct { 5.60 unsigned long base; 5.61 - unsigned int size; 5.62 + unsigned long size; 5.63 mtrr_type type; 5.64 } arr_state_t; 5.65 5.66 -static arr_state_t arr_state[8] __devinitdata = { 5.67 +static arr_state_t arr_state[8] = { 5.68 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, 5.69 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL} 5.70 }; 5.71 5.72 -static unsigned char ccr_state[7] __devinitdata = { 0, 0, 0, 0, 0, 0, 0 }; 5.73 +static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 }; 5.74 5.75 static void cyrix_set_all(void) 5.76 {
6.1 --- a/xen/arch/x86/cpu/mtrr/generic.c Thu Oct 11 10:21:55 2007 +0100 6.2 +++ b/xen/arch/x86/cpu/mtrr/generic.c Thu Oct 11 12:11:54 2007 +0100 6.3 @@ -15,21 +15,34 @@ struct mtrr_state { 6.4 struct mtrr_var_range *var_ranges; 6.5 mtrr_type fixed_ranges[NUM_FIXED_RANGES]; 6.6 unsigned char enabled; 6.7 + unsigned char have_fixed; 6.8 mtrr_type def_type; 6.9 }; 6.10 6.11 +struct fixed_range_block { 6.12 + int base_msr; /* start address of an MTRR block */ 6.13 + int ranges; /* number of MTRRs in this block */ 6.14 +}; 6.15 + 6.16 +static struct fixed_range_block fixed_range_blocks[] = { 6.17 + { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */ 6.18 + { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */ 6.19 + { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */ 6.20 + {} 6.21 +}; 6.22 + 6.23 static unsigned long smp_changes_mask; 6.24 static struct mtrr_state mtrr_state = {}; 6.25 6.26 /* Get the MSR pair relating to a var range */ 6.27 -static void __init 6.28 +static void 6.29 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 6.30 { 6.31 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 6.32 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 6.33 } 6.34 6.35 -static void __init 6.36 +static void 6.37 get_fixed_ranges(mtrr_type * frs) 6.38 { 6.39 unsigned int *p = (unsigned int *) frs; 6.40 @@ -43,6 +56,12 @@ get_fixed_ranges(mtrr_type * frs) 6.41 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); 6.42 } 6.43 6.44 +void mtrr_save_fixed_ranges(void *info) 6.45 +{ 6.46 + if (cpu_has_mtrr) 6.47 + get_fixed_ranges(mtrr_state.fixed_ranges); 6.48 +} 6.49 + 6.50 /* Grab all of the MTRR state for this CPU into *state */ 6.51 void __init get_mtrr_state(void) 6.52 { 6.53 @@ -58,9 +77,13 @@ void __init get_mtrr_state(void) 6.54 } 6.55 vrs = mtrr_state.var_ranges; 6.56 6.57 + rdmsr(MTRRcap_MSR, lo, dummy); 6.58 + mtrr_state.have_fixed = (lo >> 8) & 1; 6.59 + 6.60 for (i = 0; i < num_var_ranges; i++) 6.61 get_mtrr_var_range(i, &vrs[i]); 6.62 - get_fixed_ranges(mtrr_state.fixed_ranges); 6.63 + if (mtrr_state.have_fixed) 6.64 + get_fixed_ranges(mtrr_state.fixed_ranges); 6.65 6.66 rdmsr(MTRRdefType_MSR, lo, dummy); 6.67 mtrr_state.def_type = (lo & 0xff); 6.68 @@ -95,7 +118,45 @@ void mtrr_wrmsr(unsigned msr, unsigned a 6.69 smp_processor_id(), msr, a, b); 6.70 } 6.71 6.72 -int generic_get_free_region(unsigned long base, unsigned long size) 6.73 +/** 6.74 + * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs 6.75 + * see AMD publication no. 24593, chapter 3.2.1 for more information 6.76 + */ 6.77 +static inline void k8_enable_fixed_iorrs(void) 6.78 +{ 6.79 + unsigned lo, hi; 6.80 + 6.81 + rdmsr(MSR_K8_SYSCFG, lo, hi); 6.82 + mtrr_wrmsr(MSR_K8_SYSCFG, lo 6.83 + | K8_MTRRFIXRANGE_DRAM_ENABLE 6.84 + | K8_MTRRFIXRANGE_DRAM_MODIFY, hi); 6.85 +} 6.86 + 6.87 +/** 6.88 + * Checks and updates an fixed-range MTRR if it differs from the value it 6.89 + * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also. 6.90 + * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information 6.91 + * \param msr MSR address of the MTTR which should be checked and updated 6.92 + * \param changed pointer which indicates whether the MTRR needed to be changed 6.93 + * \param msrwords pointer to the MSR values which the MSR should have 6.94 + */ 6.95 +static void set_fixed_range(int msr, int * changed, unsigned int * msrwords) 6.96 +{ 6.97 + unsigned lo, hi; 6.98 + 6.99 + rdmsr(msr, lo, hi); 6.100 + 6.101 + if (lo != msrwords[0] || hi != msrwords[1]) { 6.102 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && 6.103 + boot_cpu_data.x86 == 15 && 6.104 + ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK)) 6.105 + k8_enable_fixed_iorrs(); 6.106 + mtrr_wrmsr(msr, msrwords[0], msrwords[1]); 6.107 + *changed = TRUE; 6.108 + } 6.109 +} 6.110 + 6.111 +int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) 6.112 /* [SUMMARY] Get a free MTRR. 6.113 <base> The starting (base) address of the region. 6.114 <size> The size (in bytes) of the region. 6.115 @@ -104,10 +165,11 @@ int generic_get_free_region(unsigned lon 6.116 { 6.117 int i, max; 6.118 mtrr_type ltype; 6.119 - unsigned long lbase; 6.120 - unsigned lsize; 6.121 + unsigned long lbase, lsize; 6.122 6.123 max = num_var_ranges; 6.124 + if (replace_reg >= 0 && replace_reg < max) 6.125 + return replace_reg; 6.126 for (i = 0; i < max; ++i) { 6.127 mtrr_if->get(i, &lbase, &lsize, <ype); 6.128 if (lsize == 0) 6.129 @@ -117,7 +179,7 @@ int generic_get_free_region(unsigned lon 6.130 } 6.131 6.132 static void generic_get_mtrr(unsigned int reg, unsigned long *base, 6.133 - unsigned int *size, mtrr_type * type) 6.134 + unsigned long *size, mtrr_type *type) 6.135 { 6.136 unsigned int mask_lo, mask_hi, base_lo, base_hi; 6.137 6.138 @@ -143,36 +205,21 @@ static void generic_get_mtrr(unsigned in 6.139 *type = base_lo & 0xff; 6.140 } 6.141 6.142 +/** 6.143 + * Checks and updates the fixed-range MTRRs if they differ from the saved set 6.144 + * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges() 6.145 + */ 6.146 static int set_fixed_ranges(mtrr_type * frs) 6.147 { 6.148 - unsigned int *p = (unsigned int *) frs; 6.149 + unsigned long long *saved = (unsigned long long *) frs; 6.150 int changed = FALSE; 6.151 - int i; 6.152 - unsigned int lo, hi; 6.153 - 6.154 - rdmsr(MTRRfix64K_00000_MSR, lo, hi); 6.155 - if (p[0] != lo || p[1] != hi) { 6.156 - mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]); 6.157 - changed = TRUE; 6.158 - } 6.159 + int block=-1, range; 6.160 6.161 - for (i = 0; i < 2; i++) { 6.162 - rdmsr(MTRRfix16K_80000_MSR + i, lo, hi); 6.163 - if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) { 6.164 - mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], 6.165 - p[3 + i * 2]); 6.166 - changed = TRUE; 6.167 - } 6.168 - } 6.169 + while (fixed_range_blocks[++block].ranges) 6.170 + for (range=0; range < fixed_range_blocks[block].ranges; range++) 6.171 + set_fixed_range(fixed_range_blocks[block].base_msr + range, 6.172 + &changed, (unsigned int *) saved++); 6.173 6.174 - for (i = 0; i < 8; i++) { 6.175 - rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi); 6.176 - if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) { 6.177 - mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], 6.178 - p[7 + i * 2]); 6.179 - changed = TRUE; 6.180 - } 6.181 - } 6.182 return changed; 6.183 } 6.184 6.185 @@ -202,7 +249,9 @@ static int set_mtrr_var_ranges(unsigned 6.186 return changed; 6.187 } 6.188 6.189 -static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi) 6.190 +static u32 deftype_lo, deftype_hi; 6.191 + 6.192 +static unsigned long set_mtrr_state(void) 6.193 /* [SUMMARY] Set the MTRR state for this CPU. 6.194 <state> The MTRR state information to read. 6.195 <ctxt> Some relevant CPU context. 6.196 @@ -217,14 +266,14 @@ static unsigned long set_mtrr_state(u32 6.197 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) 6.198 change_mask |= MTRR_CHANGE_MASK_VARIABLE; 6.199 6.200 - if (set_fixed_ranges(mtrr_state.fixed_ranges)) 6.201 + if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) 6.202 change_mask |= MTRR_CHANGE_MASK_FIXED; 6.203 6.204 /* Set_mtrr_restore restores the old value of MTRRdefType, 6.205 so to set it we fiddle with the saved value */ 6.206 if ((deftype_lo & 0xff) != mtrr_state.def_type 6.207 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { 6.208 - deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10); 6.209 + deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); 6.210 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 6.211 } 6.212 6.213 @@ -233,7 +282,6 @@ static unsigned long set_mtrr_state(u32 6.214 6.215 6.216 static unsigned long cr4 = 0; 6.217 -static u32 deftype_lo, deftype_hi; 6.218 static DEFINE_SPINLOCK(set_atomicity_lock); 6.219 6.220 /* 6.221 @@ -271,7 +319,7 @@ static void prepare_set(void) 6.222 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); 6.223 6.224 /* Disable MTRRs, and set the default type to uncached */ 6.225 - mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); 6.226 + mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi); 6.227 } 6.228 6.229 static void post_set(void) 6.230 @@ -300,7 +348,7 @@ static void generic_set_all(void) 6.231 prepare_set(); 6.232 6.233 /* Actually set the state */ 6.234 - mask = set_mtrr_state(deftype_lo,deftype_hi); 6.235 + mask = set_mtrr_state(); 6.236 6.237 post_set(); 6.238 local_irq_restore(flags); 6.239 @@ -366,7 +414,7 @@ int generic_validate_add_page(unsigned l 6.240 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 6.241 return -EINVAL; 6.242 } 6.243 - if (!(base + size < 0x70000000 || base > 0x7003FFFF) && 6.244 + if (!(base + size < 0x70000 || base > 0x7003F) && 6.245 (type == MTRR_TYPE_WRCOMB 6.246 || type == MTRR_TYPE_WRBACK)) { 6.247 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); 6.248 @@ -374,11 +422,6 @@ int generic_validate_add_page(unsigned l 6.249 } 6.250 } 6.251 6.252 - if (base + size < 0x100) { 6.253 - printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n", 6.254 - base, size); 6.255 - return -EINVAL; 6.256 - } 6.257 /* Check upper bits of base and last are equal and lower bits are 0 6.258 for base and 1 for last */ 6.259 last = base + size - 1;
7.1 --- a/xen/arch/x86/cpu/mtrr/main.c Thu Oct 11 10:21:55 2007 +0100 7.2 +++ b/xen/arch/x86/cpu/mtrr/main.c Thu Oct 11 12:11:54 2007 +0100 7.3 @@ -43,19 +43,21 @@ 7.4 #include "mtrr.h" 7.5 7.6 /* No blocking mutexes in Xen. Spin instead. */ 7.7 -#define DECLARE_MUTEX(_m) DEFINE_SPINLOCK(_m) 7.8 -#define down(_m) spin_lock(_m) 7.9 -#define up(_m) spin_unlock(_m) 7.10 +#define DEFINE_MUTEX(_m) DEFINE_SPINLOCK(_m) 7.11 +#define mutex_lock(_m) spin_lock(_m) 7.12 +#define mutex_unlock(_m) spin_unlock(_m) 7.13 #define lock_cpu_hotplug() ((void)0) 7.14 #define unlock_cpu_hotplug() ((void)0) 7.15 #define dump_stack() ((void)0) 7.16 +#define get_cpu() smp_processor_id() 7.17 +#define put_cpu() do {} while(0) 7.18 7.19 u32 num_var_ranges = 0; 7.20 7.21 unsigned int *usage_table; 7.22 -static DECLARE_MUTEX(mtrr_sem); 7.23 +static DEFINE_MUTEX(mtrr_mutex); 7.24 7.25 -u32 size_or_mask, size_and_mask; 7.26 +u64 size_or_mask, size_and_mask; 7.27 7.28 static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; 7.29 7.30 @@ -70,7 +72,7 @@ extern int arr3_protected; 7.31 #define arr3_protected 0 7.32 #endif 7.33 7.34 -static char *mtrr_strings[MTRR_NUM_TYPES] = 7.35 +static const char *mtrr_strings[MTRR_NUM_TYPES] = 7.36 { 7.37 "uncachable", /* 0 */ 7.38 "write-combining", /* 1 */ 7.39 @@ -81,7 +83,7 @@ static char *mtrr_strings[MTRR_NUM_TYPES 7.40 "write-back", /* 6 */ 7.41 }; 7.42 7.43 -char *mtrr_attrib_to_str(int x) 7.44 +const char *mtrr_attrib_to_str(int x) 7.45 { 7.46 return (x <= 6) ? mtrr_strings[x] : "?"; 7.47 } 7.48 @@ -167,6 +169,13 @@ static void ipi_handler(void *info) 7.49 7.50 #endif 7.51 7.52 +static inline int types_compatible(mtrr_type type1, mtrr_type type2) { 7.53 + return type1 == MTRR_TYPE_UNCACHABLE || 7.54 + type2 == MTRR_TYPE_UNCACHABLE || 7.55 + (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || 7.56 + (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH); 7.57 +} 7.58 + 7.59 /** 7.60 * set_mtrr - update mtrrs on all processors 7.61 * @reg: mtrr in question 7.62 @@ -217,6 +226,8 @@ static void set_mtrr(unsigned int reg, u 7.63 data.smp_size = size; 7.64 data.smp_type = type; 7.65 atomic_set(&data.count, num_booting_cpus() - 1); 7.66 + /* make sure data.count is visible before unleashing other CPUs */ 7.67 + smp_wmb(); 7.68 atomic_set(&data.gate,0); 7.69 7.70 /* Start the ball rolling on other CPUs */ 7.71 @@ -230,6 +241,7 @@ static void set_mtrr(unsigned int reg, u 7.72 7.73 /* ok, reset count and toggle gate */ 7.74 atomic_set(&data.count, num_booting_cpus() - 1); 7.75 + smp_wmb(); 7.76 atomic_set(&data.gate,1); 7.77 7.78 /* do our MTRR business */ 7.79 @@ -248,6 +260,7 @@ static void set_mtrr(unsigned int reg, u 7.80 cpu_relax(); 7.81 7.82 atomic_set(&data.count, num_booting_cpus() - 1); 7.83 + smp_wmb(); 7.84 atomic_set(&data.gate,0); 7.85 7.86 /* 7.87 @@ -262,8 +275,8 @@ static void set_mtrr(unsigned int reg, u 7.88 7.89 /** 7.90 * mtrr_add_page - Add a memory type region 7.91 - * @base: Physical base address of region in pages (4 KB) 7.92 - * @size: Physical size of region in pages (4 KB) 7.93 + * @base: Physical base address of region in pages (in units of 4 kB!) 7.94 + * @size: Physical size of region in pages (4 kB) 7.95 * @type: Type of MTRR desired 7.96 * @increment: If this is true do usage counting on the region 7.97 * 7.98 @@ -299,11 +312,9 @@ static void set_mtrr(unsigned int reg, u 7.99 int mtrr_add_page(unsigned long base, unsigned long size, 7.100 unsigned int type, char increment) 7.101 { 7.102 - int i; 7.103 + int i, replace, error; 7.104 mtrr_type ltype; 7.105 - unsigned long lbase; 7.106 - unsigned int lsize; 7.107 - int error; 7.108 + unsigned long lbase, lsize; 7.109 7.110 if (!mtrr_if) 7.111 return -ENXIO; 7.112 @@ -323,34 +334,47 @@ int mtrr_add_page(unsigned long base, un 7.113 return -ENOSYS; 7.114 } 7.115 7.116 + if (!size) { 7.117 + printk(KERN_WARNING "mtrr: zero sized request\n"); 7.118 + return -EINVAL; 7.119 + } 7.120 + 7.121 if (base & size_or_mask || size & size_or_mask) { 7.122 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); 7.123 return -EINVAL; 7.124 } 7.125 7.126 error = -EINVAL; 7.127 + replace = -1; 7.128 7.129 /* No CPU hotplug when we change MTRR entries */ 7.130 lock_cpu_hotplug(); 7.131 /* Search for existing MTRR */ 7.132 - down(&mtrr_sem); 7.133 + mutex_lock(&mtrr_mutex); 7.134 for (i = 0; i < num_var_ranges; ++i) { 7.135 mtrr_if->get(i, &lbase, &lsize, <ype); 7.136 - if (base >= lbase + lsize) 7.137 - continue; 7.138 - if ((base < lbase) && (base + size <= lbase)) 7.139 + if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) 7.140 continue; 7.141 /* At this point we know there is some kind of overlap/enclosure */ 7.142 - if ((base < lbase) || (base + size > lbase + lsize)) { 7.143 + if (base < lbase || base + size - 1 > lbase + lsize - 1) { 7.144 + if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { 7.145 + /* New region encloses an existing region */ 7.146 + if (type == ltype) { 7.147 + replace = replace == -1 ? i : -2; 7.148 + continue; 7.149 + } 7.150 + else if (types_compatible(type, ltype)) 7.151 + continue; 7.152 + } 7.153 printk(KERN_WARNING 7.154 "mtrr: 0x%lx000,0x%lx000 overlaps existing" 7.155 - " 0x%lx000,0x%x000\n", base, size, lbase, 7.156 + " 0x%lx000,0x%lx000\n", base, size, lbase, 7.157 lsize); 7.158 goto out; 7.159 } 7.160 /* New region is enclosed by an existing region */ 7.161 if (ltype != type) { 7.162 - if (type == MTRR_TYPE_UNCACHABLE) 7.163 + if (types_compatible(type, ltype)) 7.164 continue; 7.165 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", 7.166 base, size, mtrr_attrib_to_str(ltype), 7.167 @@ -363,15 +387,23 @@ int mtrr_add_page(unsigned long base, un 7.168 goto out; 7.169 } 7.170 /* Search for an empty MTRR */ 7.171 - i = mtrr_if->get_free_region(base, size); 7.172 + i = mtrr_if->get_free_region(base, size, replace); 7.173 if (i >= 0) { 7.174 set_mtrr(i, base, size, type); 7.175 - usage_table[i] = 1; 7.176 + if (likely(replace < 0)) 7.177 + usage_table[i] = 1; 7.178 + else { 7.179 + usage_table[i] = usage_table[replace] + !!increment; 7.180 + if (unlikely(replace != i)) { 7.181 + set_mtrr(replace, 0, 0, 0); 7.182 + usage_table[replace] = 0; 7.183 + } 7.184 + } 7.185 } else 7.186 printk(KERN_INFO "mtrr: no more MTRRs available\n"); 7.187 error = i; 7.188 out: 7.189 - up(&mtrr_sem); 7.190 + mutex_unlock(&mtrr_mutex); 7.191 unlock_cpu_hotplug(); 7.192 return error; 7.193 } 7.194 @@ -454,8 +486,7 @@ int mtrr_del_page(int reg, unsigned long 7.195 { 7.196 int i, max; 7.197 mtrr_type ltype; 7.198 - unsigned long lbase; 7.199 - unsigned int lsize; 7.200 + unsigned long lbase, lsize; 7.201 int error = -EINVAL; 7.202 7.203 if (!mtrr_if) 7.204 @@ -464,7 +495,7 @@ int mtrr_del_page(int reg, unsigned long 7.205 max = num_var_ranges; 7.206 /* No CPU hotplug when we change MTRR entries */ 7.207 lock_cpu_hotplug(); 7.208 - down(&mtrr_sem); 7.209 + mutex_lock(&mtrr_mutex); 7.210 if (reg < 0) { 7.211 /* Search for existing MTRR */ 7.212 for (i = 0; i < max; ++i) { 7.213 @@ -503,7 +534,7 @@ int mtrr_del_page(int reg, unsigned long 7.214 set_mtrr(reg, 0, 0, 0); 7.215 error = reg; 7.216 out: 7.217 - up(&mtrr_sem); 7.218 + mutex_unlock(&mtrr_mutex); 7.219 unlock_cpu_hotplug(); 7.220 return error; 7.221 } 7.222 @@ -554,7 +585,7 @@ static void __init init_ifs(void) 7.223 struct mtrr_value { 7.224 mtrr_type ltype; 7.225 unsigned long lbase; 7.226 - unsigned int lsize; 7.227 + unsigned long lsize; 7.228 }; 7.229 7.230 /** 7.231 @@ -587,8 +618,8 @@ void __init mtrr_bp_init(void) 7.232 boot_cpu_data.x86_mask == 0x4)) 7.233 phys_addr = 36; 7.234 7.235 - size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1); 7.236 - size_and_mask = ~size_or_mask & 0xfff00000; 7.237 + size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1); 7.238 + size_and_mask = ~size_or_mask & 0xfffff00000ULL; 7.239 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && 7.240 boot_cpu_data.x86 == 6) { 7.241 /* VIA C* family have Intel style MTRRs, but 7.242 @@ -635,7 +666,7 @@ void mtrr_ap_init(void) 7.243 if (!mtrr_if || !use_intel()) 7.244 return; 7.245 /* 7.246 - * Ideally we should hold mtrr_sem here to avoid mtrr entries changed, 7.247 + * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, 7.248 * but this routine will be called in cpu boot time, holding the lock 7.249 * breaks it. This routine is called in two cases: 1.very earily time 7.250 * of software resume, when there absolutely isn't mtrr entry changes; 7.251 @@ -649,6 +680,20 @@ void mtrr_ap_init(void) 7.252 local_irq_restore(flags); 7.253 } 7.254 7.255 +/** 7.256 + * Save current fixed-range MTRR state of the BSP 7.257 + */ 7.258 +void mtrr_save_state(void) 7.259 +{ 7.260 + int cpu = get_cpu(); 7.261 + 7.262 + if (cpu == 0) 7.263 + mtrr_save_fixed_ranges(NULL); 7.264 + else 7.265 + on_selected_cpus(cpumask_of_cpu(0), mtrr_save_fixed_ranges, NULL, 1, 1); 7.266 + put_cpu(); 7.267 +} 7.268 + 7.269 static int __init mtrr_init_finialize(void) 7.270 { 7.271 if (!mtrr_if)
8.1 --- a/xen/arch/x86/cpu/mtrr/mtrr.h Thu Oct 11 10:21:55 2007 +0100 8.2 +++ b/xen/arch/x86/cpu/mtrr/mtrr.h Thu Oct 11 12:11:54 2007 +0100 8.3 @@ -43,15 +43,16 @@ struct mtrr_ops { 8.4 void (*set_all)(void); 8.5 8.6 void (*get)(unsigned int reg, unsigned long *base, 8.7 - unsigned int *size, mtrr_type * type); 8.8 - int (*get_free_region) (unsigned long base, unsigned long size); 8.9 - 8.10 + unsigned long *size, mtrr_type * type); 8.11 + int (*get_free_region)(unsigned long base, unsigned long size, 8.12 + int replace_reg); 8.13 int (*validate_add_page)(unsigned long base, unsigned long size, 8.14 unsigned int type); 8.15 int (*have_wrcomb)(void); 8.16 }; 8.17 8.18 -extern int generic_get_free_region(unsigned long base, unsigned long size); 8.19 +extern int generic_get_free_region(unsigned long base, unsigned long size, 8.20 + int replace_reg); 8.21 extern int generic_validate_add_page(unsigned long base, unsigned long size, 8.22 unsigned int type); 8.23 8.24 @@ -62,17 +63,17 @@ extern int positive_have_wrcomb(void); 8.25 /* library functions for processor-specific routines */ 8.26 struct set_mtrr_context { 8.27 unsigned long flags; 8.28 - unsigned long deftype_lo; 8.29 - unsigned long deftype_hi; 8.30 unsigned long cr4val; 8.31 - unsigned long ccr3; 8.32 + u32 deftype_lo; 8.33 + u32 deftype_hi; 8.34 + u32 ccr3; 8.35 }; 8.36 8.37 struct mtrr_var_range { 8.38 - unsigned long base_lo; 8.39 - unsigned long base_hi; 8.40 - unsigned long mask_lo; 8.41 - unsigned long mask_hi; 8.42 + u32 base_lo; 8.43 + u32 base_hi; 8.44 + u32 mask_lo; 8.45 + u32 mask_hi; 8.46 }; 8.47 8.48 void set_mtrr_done(struct set_mtrr_context *ctxt); 8.49 @@ -83,7 +84,7 @@ void get_mtrr_state(void); 8.50 8.51 extern void set_mtrr_ops(struct mtrr_ops * ops); 8.52 8.53 -extern u32 size_or_mask, size_and_mask; 8.54 +extern u64 size_or_mask, size_and_mask; 8.55 extern struct mtrr_ops * mtrr_if; 8.56 8.57 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) 8.58 @@ -92,6 +93,6 @@ extern struct mtrr_ops * mtrr_if; 8.59 extern unsigned int num_var_ranges; 8.60 8.61 void mtrr_state_warn(void); 8.62 -char *mtrr_attrib_to_str(int x); 8.63 +const char *mtrr_attrib_to_str(int x); 8.64 void mtrr_wrmsr(unsigned, unsigned, unsigned); 8.65
9.1 --- a/xen/arch/x86/hvm/svm/svm.c Thu Oct 11 10:21:55 2007 +0100 9.2 +++ b/xen/arch/x86/hvm/svm/svm.c Thu Oct 11 12:11:54 2007 +0100 9.3 @@ -120,7 +120,7 @@ static enum handler_return long_mode_do_ 9.4 return HNDL_exception_raised; 9.5 break; 9.6 9.7 - case MSR_K8_MC4_MISC: /* Threshold register */ 9.8 + case MSR_IA32_MC4_MISC: /* Threshold register */ 9.9 /* 9.10 * MCA/MCE: Threshold register is reported to be locked, so we ignore 9.11 * all write accesses. This behaviour matches real HW, so guests should 9.12 @@ -1776,7 +1776,7 @@ static void svm_do_msr_access( 9.13 if (vmcb->exitinfo1 == 0) 9.14 { 9.15 switch (ecx) { 9.16 - case MSR_IA32_TIME_STAMP_COUNTER: 9.17 + case MSR_IA32_TSC: 9.18 msr_content = hvm_get_guest_time(v); 9.19 break; 9.20 9.21 @@ -1788,7 +1788,7 @@ static void svm_do_msr_access( 9.22 msr_content = v->arch.hvm_vcpu.guest_efer; 9.23 break; 9.24 9.25 - case MSR_K8_MC4_MISC: /* Threshold register */ 9.26 + case MSR_IA32_MC4_MISC: /* Threshold register */ 9.27 /* 9.28 * MCA/MCE: We report that the threshold register is unavailable 9.29 * for OS use (locked by the BIOS). 9.30 @@ -1812,11 +1812,11 @@ static void svm_do_msr_access( 9.31 case MSR_IA32_MCG_CAP: 9.32 case MSR_IA32_MCG_STATUS: 9.33 case MSR_IA32_MC0_STATUS: 9.34 - case MSR_K8_MC1_STATUS: 9.35 - case MSR_K8_MC2_STATUS: 9.36 - case MSR_K8_MC3_STATUS: 9.37 - case MSR_K8_MC4_STATUS: 9.38 - case MSR_K8_MC5_STATUS: 9.39 + case MSR_IA32_MC1_STATUS: 9.40 + case MSR_IA32_MC2_STATUS: 9.41 + case MSR_IA32_MC3_STATUS: 9.42 + case MSR_IA32_MC4_STATUS: 9.43 + case MSR_IA32_MC5_STATUS: 9.44 /* No point in letting the guest see real MCEs */ 9.45 msr_content = 0; 9.46 break; 9.47 @@ -1850,7 +1850,7 @@ static void svm_do_msr_access( 9.48 9.49 switch (ecx) 9.50 { 9.51 - case MSR_IA32_TIME_STAMP_COUNTER: 9.52 + case MSR_IA32_TSC: 9.53 hvm_set_guest_time(v, msr_content); 9.54 pt_reset(v); 9.55 break;
10.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Oct 11 10:21:55 2007 +0100 10.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Oct 11 12:11:54 2007 +0100 10.3 @@ -2247,7 +2247,7 @@ static int vmx_do_msr_read(struct cpu_us 10.4 10.5 switch ( ecx ) 10.6 { 10.7 - case MSR_IA32_TIME_STAMP_COUNTER: 10.8 + case MSR_IA32_TSC: 10.9 msr_content = hvm_get_guest_time(v); 10.10 break; 10.11 case MSR_IA32_SYSENTER_CS: 10.12 @@ -2267,11 +2267,11 @@ static int vmx_do_msr_read(struct cpu_us 10.13 case MSR_IA32_MCG_CAP: 10.14 case MSR_IA32_MCG_STATUS: 10.15 case MSR_IA32_MC0_STATUS: 10.16 - case MSR_K8_MC1_STATUS: 10.17 - case MSR_K8_MC2_STATUS: 10.18 - case MSR_K8_MC3_STATUS: 10.19 - case MSR_K8_MC4_STATUS: 10.20 - case MSR_K8_MC5_STATUS: 10.21 + case MSR_IA32_MC1_STATUS: 10.22 + case MSR_IA32_MC2_STATUS: 10.23 + case MSR_IA32_MC3_STATUS: 10.24 + case MSR_IA32_MC4_STATUS: 10.25 + case MSR_IA32_MC5_STATUS: 10.26 /* No point in letting the guest see real MCEs */ 10.27 msr_content = 0; 10.28 break; 10.29 @@ -2387,7 +2387,7 @@ static int vmx_do_msr_write(struct cpu_u 10.30 10.31 switch ( ecx ) 10.32 { 10.33 - case MSR_IA32_TIME_STAMP_COUNTER: 10.34 + case MSR_IA32_TSC: 10.35 hvm_set_guest_time(v, msr_content); 10.36 pt_reset(v); 10.37 break;
11.1 --- a/xen/arch/x86/nmi.c Thu Oct 11 10:21:55 2007 +0100 11.2 +++ b/xen/arch/x86/nmi.c Thu Oct 11 12:11:54 2007 +0100 11.3 @@ -314,9 +314,14 @@ void __pminit setup_apic_nmi_watchdog(vo 11.4 11.5 switch (boot_cpu_data.x86_vendor) { 11.6 case X86_VENDOR_AMD: 11.7 - if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15) 11.8 + switch (boot_cpu_data.x86) { 11.9 + case 6: 11.10 + case 15 ... 17: 11.11 + setup_k7_watchdog(); 11.12 + break; 11.13 + default: 11.14 return; 11.15 - setup_k7_watchdog(); 11.16 + } 11.17 break; 11.18 case X86_VENDOR_INTEL: 11.19 switch (boot_cpu_data.x86) {
12.1 --- a/xen/arch/x86/oprofile/nmi_int.c Thu Oct 11 10:21:55 2007 +0100 12.2 +++ b/xen/arch/x86/oprofile/nmi_int.c Thu Oct 11 12:11:54 2007 +0100 12.3 @@ -348,6 +348,14 @@ static int __init nmi_init(void) 12.4 give user space an consistent name. */ 12.5 cpu_type = "x86-64/hammer"; 12.6 break; 12.7 + case 0x10: 12.8 + model = &op_athlon_spec; 12.9 + cpu_type = "x86-64/family10"; 12.10 + break; 12.11 + case 0x11: 12.12 + model = &op_athlon_spec; 12.13 + cpu_type = "x86-64/family11"; 12.14 + break; 12.15 } 12.16 break; 12.17
13.1 --- a/xen/arch/x86/oprofile/op_model_athlon.c Thu Oct 11 10:21:55 2007 +0100 13.2 +++ b/xen/arch/x86/oprofile/op_model_athlon.c Thu Oct 11 12:11:54 2007 +0100 13.3 @@ -34,12 +34,15 @@ 13.4 #define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) 13.5 #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) 13.6 #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) 13.7 -#define CTRL_CLEAR(x) (x &= (1<<21)) 13.8 +#define CTRL_CLEAR(lo, hi) (lo &= (1<<21), hi = 0) 13.9 #define CTRL_SET_ENABLE(val) (val |= 1<<20) 13.10 #define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) 13.11 #define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) 13.12 -#define CTRL_SET_UM(val, m) (val |= (m << 8)) 13.13 -#define CTRL_SET_EVENT(val, e) (val |= e) 13.14 +#define CTRL_SET_UM(val, m) (val |= ((m & 0xff) << 8)) 13.15 +#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) 13.16 +#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) 13.17 +#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9)) 13.18 +#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) 13.19 13.20 static unsigned long reset_value[NUM_COUNTERS]; 13.21 13.22 @@ -72,7 +75,7 @@ static void athlon_setup_ctrs(struct op_ 13.23 /* clear all counters */ 13.24 for (i = 0 ; i < NUM_CONTROLS; ++i) { 13.25 CTRL_READ(low, high, msrs, i); 13.26 - CTRL_CLEAR(low); 13.27 + CTRL_CLEAR(low, high); 13.28 CTRL_WRITE(low, high, msrs, i); 13.29 } 13.30 13.31 @@ -89,12 +92,15 @@ static void athlon_setup_ctrs(struct op_ 13.32 CTR_WRITE(counter_config[i].count, msrs, i); 13.33 13.34 CTRL_READ(low, high, msrs, i); 13.35 - CTRL_CLEAR(low); 13.36 + CTRL_CLEAR(low, high); 13.37 CTRL_SET_ENABLE(low); 13.38 CTRL_SET_USR(low, counter_config[i].user); 13.39 CTRL_SET_KERN(low, counter_config[i].kernel); 13.40 CTRL_SET_UM(low, counter_config[i].unit_mask); 13.41 - CTRL_SET_EVENT(low, counter_config[i].event); 13.42 + CTRL_SET_EVENT_LOW(low, counter_config[i].event); 13.43 + CTRL_SET_EVENT_HIGH(high, counter_config[i].event); 13.44 + CTRL_SET_HOST_ONLY(high, 0); 13.45 + CTRL_SET_GUEST_ONLY(high, 0); 13.46 CTRL_WRITE(low, high, msrs, i); 13.47 } else { 13.48 reset_value[i] = 0;
14.1 --- a/xen/arch/x86/platform_hypercall.c Thu Oct 11 10:21:55 2007 +0100 14.2 +++ b/xen/arch/x86/platform_hypercall.c Thu Oct 11 12:11:54 2007 +0100 14.3 @@ -121,8 +121,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe 14.4 14.5 case XENPF_read_memtype: 14.6 { 14.7 - unsigned long mfn; 14.8 - unsigned int nr_mfns; 14.9 + unsigned long mfn, nr_mfns; 14.10 mtrr_type type; 14.11 14.12 ret = xsm_memtype(op->cmd);
15.1 --- a/xen/arch/x86/smpboot.c Thu Oct 11 10:21:55 2007 +0100 15.2 +++ b/xen/arch/x86/smpboot.c Thu Oct 11 12:11:54 2007 +0100 15.3 @@ -50,6 +50,7 @@ 15.4 #include <asm/div64.h> 15.5 #include <asm/flushtlb.h> 15.6 #include <asm/msr.h> 15.7 +#include <asm/mtrr.h> 15.8 #include <mach_apic.h> 15.9 #include <mach_wakecpu.h> 15.10 #include <smpboot_hooks.h> 15.11 @@ -820,6 +821,12 @@ static int __devinit do_boot_cpu(int api 15.12 unsigned short nmi_high = 0, nmi_low = 0; 15.13 struct vcpu *v; 15.14 15.15 + /* 15.16 + * Save current MTRR state in case it was changed since early boot 15.17 + * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: 15.18 + */ 15.19 + mtrr_save_state(); 15.20 + 15.21 ++cpucount; 15.22 15.23 booting_cpu = cpu;
16.1 --- a/xen/arch/x86/traps.c Thu Oct 11 10:21:55 2007 +0100 16.2 +++ b/xen/arch/x86/traps.c Thu Oct 11 12:11:54 2007 +0100 16.3 @@ -1728,8 +1728,8 @@ static int emulate_privileged_op(struct 16.4 v->arch.guest_context.gs_base_user = res; 16.5 break; 16.6 #endif 16.7 - case MSR_K8_FIDVID_STATUS: 16.8 - case MSR_K8_FIDVID_CTL: 16.9 + case MSR_K7_FID_VID_STATUS: 16.10 + case MSR_K7_FID_VID_CTL: 16.11 if ( (cpufreq_controller != FREQCTL_dom0_kernel) || 16.12 (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) || 16.13 wrmsr_safe(regs->ecx, eax, edx) ) 16.14 @@ -1770,8 +1770,8 @@ static int emulate_privileged_op(struct 16.15 regs->edx = v->arch.guest_context.gs_base_user >> 32; 16.16 break; 16.17 #endif 16.18 - case MSR_K8_FIDVID_CTL: 16.19 - case MSR_K8_FIDVID_STATUS: 16.20 + case MSR_K7_FID_VID_CTL: 16.21 + case MSR_K7_FID_VID_STATUS: 16.22 if ( (cpufreq_controller != FREQCTL_dom0_kernel) || 16.23 (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) || 16.24 rdmsr_safe(regs->ecx, regs->eax, regs->edx) )
17.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 17.2 +++ b/xen/include/asm-x86/msr-index.h Thu Oct 11 12:11:54 2007 +0100 17.3 @@ -0,0 +1,334 @@ 17.4 +#ifndef __ASM_MSR_INDEX_H 17.5 +#define __ASM_MSR_INDEX_H 17.6 + 17.7 +/* CPU model specific register (MSR) numbers */ 17.8 + 17.9 +/* x86-64 specific MSRs */ 17.10 +#define MSR_EFER 0xc0000080 /* extended feature register */ 17.11 +#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ 17.12 +#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ 17.13 +#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ 17.14 +#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ 17.15 +#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ 17.16 +#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ 17.17 +#define MSR_SHADOW_GS_BASE 0xc0000102 /* SwapGS GS shadow */ 17.18 + 17.19 +/* EFER bits: */ 17.20 +#define _EFER_SCE 0 /* SYSCALL/SYSRET */ 17.21 +#define _EFER_LME 8 /* Long mode enable */ 17.22 +#define _EFER_LMA 10 /* Long mode active (read-only) */ 17.23 +#define _EFER_NX 11 /* No execute enable */ 17.24 +#define _EFER_SVME 12 17.25 + 17.26 +#define EFER_SCE (1<<_EFER_SCE) 17.27 +#define EFER_LME (1<<_EFER_LME) 17.28 +#define EFER_LMA (1<<_EFER_LMA) 17.29 +#define EFER_NX (1<<_EFER_NX) 17.30 +#define EFER_SVME (1<<_EFER_SVME) 17.31 + 17.32 +/* Intel MSRs. Some also available on other CPUs */ 17.33 +#define MSR_IA32_PERFCTR0 0x000000c1 17.34 +#define MSR_IA32_PERFCTR1 0x000000c2 17.35 +#define MSR_FSB_FREQ 0x000000cd 17.36 + 17.37 +#define MSR_MTRRcap 0x000000fe 17.38 +#define MSR_IA32_BBL_CR_CTL 0x00000119 17.39 + 17.40 +#define MSR_IA32_SYSENTER_CS 0x00000174 17.41 +#define MSR_IA32_SYSENTER_ESP 0x00000175 17.42 +#define MSR_IA32_SYSENTER_EIP 0x00000176 17.43 + 17.44 +#define MSR_IA32_MCG_CAP 0x00000179 17.45 +#define MSR_IA32_MCG_STATUS 0x0000017a 17.46 +#define MSR_IA32_MCG_CTL 0x0000017b 17.47 + 17.48 +#define MSR_IA32_PEBS_ENABLE 0x000003f1 17.49 +#define MSR_IA32_DS_AREA 0x00000600 17.50 +#define MSR_IA32_PERF_CAPABILITIES 0x00000345 17.51 + 17.52 +#define MSR_MTRRfix64K_00000 0x00000250 17.53 +#define MSR_MTRRfix16K_80000 0x00000258 17.54 +#define MSR_MTRRfix16K_A0000 0x00000259 17.55 +#define MSR_MTRRfix4K_C0000 0x00000268 17.56 +#define MSR_MTRRfix4K_C8000 0x00000269 17.57 +#define MSR_MTRRfix4K_D0000 0x0000026a 17.58 +#define MSR_MTRRfix4K_D8000 0x0000026b 17.59 +#define MSR_MTRRfix4K_E0000 0x0000026c 17.60 +#define MSR_MTRRfix4K_E8000 0x0000026d 17.61 +#define MSR_MTRRfix4K_F0000 0x0000026e 17.62 +#define MSR_MTRRfix4K_F8000 0x0000026f 17.63 +#define MSR_MTRRdefType 0x000002ff 17.64 + 17.65 +#define MSR_IA32_DEBUGCTLMSR 0x000001d9 17.66 +#define MSR_IA32_LASTBRANCHFROMIP 0x000001db 17.67 +#define MSR_IA32_LASTBRANCHTOIP 0x000001dc 17.68 +#define MSR_IA32_LASTINTFROMIP 0x000001dd 17.69 +#define MSR_IA32_LASTINTTOIP 0x000001de 17.70 + 17.71 +#define MSR_IA32_MC0_CTL 0x00000400 17.72 +#define MSR_IA32_MC0_STATUS 0x00000401 17.73 +#define MSR_IA32_MC0_ADDR 0x00000402 17.74 +#define MSR_IA32_MC0_MISC 0x00000403 17.75 + 17.76 +#define MSR_IA32_MC1_CTL 0x00000404 17.77 +#define MSR_IA32_MC1_STATUS 0x00000405 17.78 +#define MSR_IA32_MC1_ADDR 0x00000406 17.79 +#define MSR_IA32_MC1_MISC 0x00000407 17.80 + 17.81 +#define MSR_IA32_MC2_CTL 0x00000408 17.82 +#define MSR_IA32_MC2_STATUS 0x00000409 17.83 +#define MSR_IA32_MC2_ADDR 0x0000040A 17.84 +#define MSR_IA32_MC2_MISC 0x0000040B 17.85 + 17.86 +#define MSR_IA32_MC3_CTL 0x0000040C 17.87 +#define MSR_IA32_MC3_STATUS 0x0000040D 17.88 +#define MSR_IA32_MC3_ADDR 0x0000040E 17.89 +#define MSR_IA32_MC3_MISC 0x0000040F 17.90 + 17.91 +#define MSR_IA32_MC4_CTL 0x00000410 17.92 +#define MSR_IA32_MC4_STATUS 0x00000411 17.93 +#define MSR_IA32_MC4_ADDR 0x00000412 17.94 +#define MSR_IA32_MC4_MISC 0x00000413 17.95 + 17.96 +#define MSR_IA32_MC5_CTL 0x00000414 17.97 +#define MSR_IA32_MC5_STATUS 0x00000415 17.98 +#define MSR_IA32_MC5_ADDR 0x00000416 17.99 +#define MSR_IA32_MC5_MISC 0x00000417 17.100 + 17.101 +#define MSR_P6_PERFCTR0 0x000000c1 17.102 +#define MSR_P6_PERFCTR1 0x000000c2 17.103 +#define MSR_P6_EVNTSEL0 0x00000186 17.104 +#define MSR_P6_EVNTSEL1 0x00000187 17.105 + 17.106 +/* MSRs & bits used for VMX enabling */ 17.107 +#define MSR_IA32_VMX_BASIC 0x480 17.108 +#define MSR_IA32_VMX_PINBASED_CTLS 0x481 17.109 +#define MSR_IA32_VMX_PROCBASED_CTLS 0x482 17.110 +#define MSR_IA32_VMX_EXIT_CTLS 0x483 17.111 +#define MSR_IA32_VMX_ENTRY_CTLS 0x484 17.112 +#define MSR_IA32_VMX_MISC 0x485 17.113 +#define MSR_IA32_VMX_CR0_FIXED0 0x486 17.114 +#define MSR_IA32_VMX_CR0_FIXED1 0x487 17.115 +#define MSR_IA32_VMX_CR4_FIXED0 0x488 17.116 +#define MSR_IA32_VMX_CR4_FIXED1 0x489 17.117 +#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b 17.118 +#define IA32_FEATURE_CONTROL_MSR 0x3a 17.119 +#define IA32_FEATURE_CONTROL_MSR_LOCK 0x0001 17.120 +#define IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX 0x0002 17.121 +#define IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX 0x0004 17.122 +#define IA32_FEATURE_CONTROL_MSR_SENTER_PARAM_CTL 0x7f00 17.123 +#define IA32_FEATURE_CONTROL_MSR_ENABLE_SENTER 0x8000 17.124 + 17.125 +/* K7/K8 MSRs. Not complete. See the architecture manual for a more 17.126 + complete list. */ 17.127 +#define MSR_K7_EVNTSEL0 0xc0010000 17.128 +#define MSR_K7_PERFCTR0 0xc0010004 17.129 +#define MSR_K7_EVNTSEL1 0xc0010001 17.130 +#define MSR_K7_PERFCTR1 0xc0010005 17.131 +#define MSR_K7_EVNTSEL2 0xc0010002 17.132 +#define MSR_K7_PERFCTR2 0xc0010006 17.133 +#define MSR_K7_EVNTSEL3 0xc0010003 17.134 +#define MSR_K7_PERFCTR3 0xc0010007 17.135 +#define MSR_K8_TOP_MEM1 0xc001001a 17.136 +#define MSR_K7_CLK_CTL 0xc001001b 17.137 +#define MSR_K8_TOP_MEM2 0xc001001d 17.138 +#define MSR_K8_SYSCFG 0xc0010010 17.139 + 17.140 +#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ 17.141 +#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ 17.142 +#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ 17.143 + 17.144 +#define MSR_K7_HWCR 0xc0010015 17.145 +#define MSR_K8_HWCR 0xc0010015 17.146 +#define MSR_K7_FID_VID_CTL 0xc0010041 17.147 +#define MSR_K7_FID_VID_STATUS 0xc0010042 17.148 +#define MSR_K8_ENABLE_C1E 0xc0010055 17.149 +#define MSR_K8_VM_CR 0xC0010114 17.150 +#define MSR_K8_VM_HSAVE_PA 0xC0010117 17.151 + 17.152 +/* MSR_K8_VM_CR bits: */ 17.153 +#define _K8_VMCR_SVME_DISABLE 4 17.154 +#define K8_VMCR_SVME_DISABLE (1 << _K8_VMCR_SVME_DISABLE) 17.155 + 17.156 +/* K6 MSRs */ 17.157 +#define MSR_K6_EFER 0xc0000080 17.158 +#define MSR_K6_STAR 0xc0000081 17.159 +#define MSR_K6_WHCR 0xc0000082 17.160 +#define MSR_K6_UWCCR 0xc0000085 17.161 +#define MSR_K6_EPMR 0xc0000086 17.162 +#define MSR_K6_PSOR 0xc0000087 17.163 +#define MSR_K6_PFIR 0xc0000088 17.164 + 17.165 +/* Centaur-Hauls/IDT defined MSRs. */ 17.166 +#define MSR_IDT_FCR1 0x00000107 17.167 +#define MSR_IDT_FCR2 0x00000108 17.168 +#define MSR_IDT_FCR3 0x00000109 17.169 +#define MSR_IDT_FCR4 0x0000010a 17.170 + 17.171 +#define MSR_IDT_MCR0 0x00000110 17.172 +#define MSR_IDT_MCR1 0x00000111 17.173 +#define MSR_IDT_MCR2 0x00000112 17.174 +#define MSR_IDT_MCR3 0x00000113 17.175 +#define MSR_IDT_MCR4 0x00000114 17.176 +#define MSR_IDT_MCR5 0x00000115 17.177 +#define MSR_IDT_MCR6 0x00000116 17.178 +#define MSR_IDT_MCR7 0x00000117 17.179 +#define MSR_IDT_MCR_CTRL 0x00000120 17.180 + 17.181 +/* VIA Cyrix defined MSRs*/ 17.182 +#define MSR_VIA_FCR 0x00001107 17.183 +#define MSR_VIA_LONGHAUL 0x0000110a 17.184 +#define MSR_VIA_RNG 0x0000110b 17.185 +#define MSR_VIA_BCR2 0x00001147 17.186 + 17.187 +/* Transmeta defined MSRs */ 17.188 +#define MSR_TMTA_LONGRUN_CTRL 0x80868010 17.189 +#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 17.190 +#define MSR_TMTA_LRTI_READOUT 0x80868018 17.191 +#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a 17.192 + 17.193 +/* Intel defined MSRs. */ 17.194 +#define MSR_IA32_P5_MC_ADDR 0x00000000 17.195 +#define MSR_IA32_P5_MC_TYPE 0x00000001 17.196 +#define MSR_IA32_TSC 0x00000010 17.197 +#define MSR_IA32_PLATFORM_ID 0x00000017 17.198 +#define MSR_IA32_EBL_CR_POWERON 0x0000002a 17.199 +#define MSR_IA32_EBC_FREQUENCY_ID 0x0000002c 17.200 + 17.201 +#define MSR_IA32_APICBASE 0x0000001b 17.202 +#define MSR_IA32_APICBASE_BSP (1<<8) 17.203 +#define MSR_IA32_APICBASE_ENABLE (1<<11) 17.204 +#define MSR_IA32_APICBASE_BASE (0xfffff<<12) 17.205 + 17.206 +#define MSR_IA32_UCODE_WRITE 0x00000079 17.207 +#define MSR_IA32_UCODE_REV 0x0000008b 17.208 + 17.209 +#define MSR_IA32_PERF_STATUS 0x00000198 17.210 +#define MSR_IA32_PERF_CTL 0x00000199 17.211 + 17.212 +#define MSR_IA32_MPERF 0x000000e7 17.213 +#define MSR_IA32_APERF 0x000000e8 17.214 + 17.215 +#define MSR_IA32_THERM_CONTROL 0x0000019a 17.216 +#define MSR_IA32_THERM_INTERRUPT 0x0000019b 17.217 +#define MSR_IA32_THERM_STATUS 0x0000019c 17.218 +#define MSR_IA32_MISC_ENABLE 0x000001a0 17.219 +#define MSR_IA32_MISC_ENABLE_PERF_AVAIL (1<<7) 17.220 +#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1<<11) 17.221 +#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1<<12) 17.222 + 17.223 +/* Intel Model 6 */ 17.224 +#define MSR_P6_EVNTSEL0 0x00000186 17.225 +#define MSR_P6_EVNTSEL1 0x00000187 17.226 + 17.227 +/* P4/Xeon+ specific */ 17.228 +#define MSR_IA32_MCG_EAX 0x00000180 17.229 +#define MSR_IA32_MCG_EBX 0x00000181 17.230 +#define MSR_IA32_MCG_ECX 0x00000182 17.231 +#define MSR_IA32_MCG_EDX 0x00000183 17.232 +#define MSR_IA32_MCG_ESI 0x00000184 17.233 +#define MSR_IA32_MCG_EDI 0x00000185 17.234 +#define MSR_IA32_MCG_EBP 0x00000186 17.235 +#define MSR_IA32_MCG_ESP 0x00000187 17.236 +#define MSR_IA32_MCG_EFLAGS 0x00000188 17.237 +#define MSR_IA32_MCG_EIP 0x00000189 17.238 +#define MSR_IA32_MCG_RESERVED 0x0000018a 17.239 + 17.240 +/* Pentium IV performance counter MSRs */ 17.241 +#define MSR_P4_BPU_PERFCTR0 0x00000300 17.242 +#define MSR_P4_BPU_PERFCTR1 0x00000301 17.243 +#define MSR_P4_BPU_PERFCTR2 0x00000302 17.244 +#define MSR_P4_BPU_PERFCTR3 0x00000303 17.245 +#define MSR_P4_MS_PERFCTR0 0x00000304 17.246 +#define MSR_P4_MS_PERFCTR1 0x00000305 17.247 +#define MSR_P4_MS_PERFCTR2 0x00000306 17.248 +#define MSR_P4_MS_PERFCTR3 0x00000307 17.249 +#define MSR_P4_FLAME_PERFCTR0 0x00000308 17.250 +#define MSR_P4_FLAME_PERFCTR1 0x00000309 17.251 +#define MSR_P4_FLAME_PERFCTR2 0x0000030a 17.252 +#define MSR_P4_FLAME_PERFCTR3 0x0000030b 17.253 +#define MSR_P4_IQ_PERFCTR0 0x0000030c 17.254 +#define MSR_P4_IQ_PERFCTR1 0x0000030d 17.255 +#define MSR_P4_IQ_PERFCTR2 0x0000030e 17.256 +#define MSR_P4_IQ_PERFCTR3 0x0000030f 17.257 +#define MSR_P4_IQ_PERFCTR4 0x00000310 17.258 +#define MSR_P4_IQ_PERFCTR5 0x00000311 17.259 +#define MSR_P4_BPU_CCCR0 0x00000360 17.260 +#define MSR_P4_BPU_CCCR1 0x00000361 17.261 +#define MSR_P4_BPU_CCCR2 0x00000362 17.262 +#define MSR_P4_BPU_CCCR3 0x00000363 17.263 +#define MSR_P4_MS_CCCR0 0x00000364 17.264 +#define MSR_P4_MS_CCCR1 0x00000365 17.265 +#define MSR_P4_MS_CCCR2 0x00000366 17.266 +#define MSR_P4_MS_CCCR3 0x00000367 17.267 +#define MSR_P4_FLAME_CCCR0 0x00000368 17.268 +#define MSR_P4_FLAME_CCCR1 0x00000369 17.269 +#define MSR_P4_FLAME_CCCR2 0x0000036a 17.270 +#define MSR_P4_FLAME_CCCR3 0x0000036b 17.271 +#define MSR_P4_IQ_CCCR0 0x0000036c 17.272 +#define MSR_P4_IQ_CCCR1 0x0000036d 17.273 +#define MSR_P4_IQ_CCCR2 0x0000036e 17.274 +#define MSR_P4_IQ_CCCR3 0x0000036f 17.275 +#define MSR_P4_IQ_CCCR4 0x00000370 17.276 +#define MSR_P4_IQ_CCCR5 0x00000371 17.277 +#define MSR_P4_ALF_ESCR0 0x000003ca 17.278 +#define MSR_P4_ALF_ESCR1 0x000003cb 17.279 +#define MSR_P4_BPU_ESCR0 0x000003b2 17.280 +#define MSR_P4_BPU_ESCR1 0x000003b3 17.281 +#define MSR_P4_BSU_ESCR0 0x000003a0 17.282 +#define MSR_P4_BSU_ESCR1 0x000003a1 17.283 +#define MSR_P4_CRU_ESCR0 0x000003b8 17.284 +#define MSR_P4_CRU_ESCR1 0x000003b9 17.285 +#define MSR_P4_CRU_ESCR2 0x000003cc 17.286 +#define MSR_P4_CRU_ESCR3 0x000003cd 17.287 +#define MSR_P4_CRU_ESCR4 0x000003e0 17.288 +#define MSR_P4_CRU_ESCR5 0x000003e1 17.289 +#define MSR_P4_DAC_ESCR0 0x000003a8 17.290 +#define MSR_P4_DAC_ESCR1 0x000003a9 17.291 +#define MSR_P4_FIRM_ESCR0 0x000003a4 17.292 +#define MSR_P4_FIRM_ESCR1 0x000003a5 17.293 +#define MSR_P4_FLAME_ESCR0 0x000003a6 17.294 +#define MSR_P4_FLAME_ESCR1 0x000003a7 17.295 +#define MSR_P4_FSB_ESCR0 0x000003a2 17.296 +#define MSR_P4_FSB_ESCR1 0x000003a3 17.297 +#define MSR_P4_IQ_ESCR0 0x000003ba 17.298 +#define MSR_P4_IQ_ESCR1 0x000003bb 17.299 +#define MSR_P4_IS_ESCR0 0x000003b4 17.300 +#define MSR_P4_IS_ESCR1 0x000003b5 17.301 +#define MSR_P4_ITLB_ESCR0 0x000003b6 17.302 +#define MSR_P4_ITLB_ESCR1 0x000003b7 17.303 +#define MSR_P4_IX_ESCR0 0x000003c8 17.304 +#define MSR_P4_IX_ESCR1 0x000003c9 17.305 +#define MSR_P4_MOB_ESCR0 0x000003aa 17.306 +#define MSR_P4_MOB_ESCR1 0x000003ab 17.307 +#define MSR_P4_MS_ESCR0 0x000003c0 17.308 +#define MSR_P4_MS_ESCR1 0x000003c1 17.309 +#define MSR_P4_PMH_ESCR0 0x000003ac 17.310 +#define MSR_P4_PMH_ESCR1 0x000003ad 17.311 +#define MSR_P4_RAT_ESCR0 0x000003bc 17.312 +#define MSR_P4_RAT_ESCR1 0x000003bd 17.313 +#define MSR_P4_SAAT_ESCR0 0x000003ae 17.314 +#define MSR_P4_SAAT_ESCR1 0x000003af 17.315 +#define MSR_P4_SSU_ESCR0 0x000003be 17.316 +#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */ 17.317 + 17.318 +#define MSR_P4_TBPU_ESCR0 0x000003c2 17.319 +#define MSR_P4_TBPU_ESCR1 0x000003c3 17.320 +#define MSR_P4_TC_ESCR0 0x000003c4 17.321 +#define MSR_P4_TC_ESCR1 0x000003c5 17.322 +#define MSR_P4_U2L_ESCR0 0x000003b0 17.323 +#define MSR_P4_U2L_ESCR1 0x000003b1 17.324 + 17.325 +/* Intel Core-based CPU performance counters */ 17.326 +#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 17.327 +#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a 17.328 +#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b 17.329 +#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d 17.330 +#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e 17.331 +#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f 17.332 +#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 17.333 + 17.334 +/* Geode defined MSRs */ 17.335 +#define MSR_GEODE_BUSCONT_CONF0 0x00001900 17.336 + 17.337 +#endif /* __ASM_MSR_INDEX_H */
18.1 --- a/xen/include/asm-x86/msr.h Thu Oct 11 10:21:55 2007 +0100 18.2 +++ b/xen/include/asm-x86/msr.h Thu Oct 11 12:11:54 2007 +0100 18.3 @@ -1,6 +1,8 @@ 18.4 #ifndef __ASM_MSR_H 18.5 #define __ASM_MSR_H 18.6 18.7 +#include "msr-index.h" 18.8 + 18.9 #ifndef __ASSEMBLY__ 18.10 18.11 #include <xen/smp.h> 18.12 @@ -87,70 +89,6 @@ static inline void wrmsrl(unsigned int m 18.13 : "=a" (low), "=d" (high) \ 18.14 : "c" (counter)) 18.15 18.16 -#endif /* !__ASSEMBLY__ */ 18.17 - 18.18 -/* symbolic names for some interesting MSRs */ 18.19 -/* Intel defined MSRs. */ 18.20 -#define MSR_IA32_P5_MC_ADDR 0 18.21 -#define MSR_IA32_P5_MC_TYPE 1 18.22 -#define MSR_IA32_TIME_STAMP_COUNTER 0x10 18.23 -#define MSR_IA32_PLATFORM_ID 0x17 18.24 -#define MSR_IA32_EBL_CR_POWERON 0x2a 18.25 -#define MSR_IA32_EBC_FREQUENCY_ID 0x2c 18.26 - 18.27 -#define MSR_IA32_APICBASE 0x1b 18.28 -#define MSR_IA32_APICBASE_BSP (1<<8) 18.29 -#define MSR_IA32_APICBASE_ENABLE (1<<11) 18.30 -#define MSR_IA32_APICBASE_BASE (0xfffff<<12) 18.31 - 18.32 -#define MSR_IA32_UCODE_WRITE 0x79 18.33 -#define MSR_IA32_UCODE_REV 0x8b 18.34 - 18.35 -#define MSR_P6_PERFCTR0 0xc1 18.36 -#define MSR_P6_PERFCTR1 0xc2 18.37 - 18.38 -/* MSRs & bits used for VMX enabling */ 18.39 -#define MSR_IA32_VMX_BASIC 0x480 18.40 -#define MSR_IA32_VMX_PINBASED_CTLS 0x481 18.41 -#define MSR_IA32_VMX_PROCBASED_CTLS 0x482 18.42 -#define MSR_IA32_VMX_EXIT_CTLS 0x483 18.43 -#define MSR_IA32_VMX_ENTRY_CTLS 0x484 18.44 -#define MSR_IA32_VMX_MISC 0x485 18.45 -#define MSR_IA32_VMX_CR0_FIXED0 0x486 18.46 -#define MSR_IA32_VMX_CR0_FIXED1 0x487 18.47 -#define MSR_IA32_VMX_CR4_FIXED0 0x488 18.48 -#define MSR_IA32_VMX_CR4_FIXED1 0x489 18.49 -#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b 18.50 -#define IA32_FEATURE_CONTROL_MSR 0x3a 18.51 -#define IA32_FEATURE_CONTROL_MSR_LOCK 0x0001 18.52 -#define IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX 0x0002 18.53 -#define IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX 0x0004 18.54 -#define IA32_FEATURE_CONTROL_MSR_SENTER_PARAM_CTL 0x7f00 18.55 -#define IA32_FEATURE_CONTROL_MSR_ENABLE_SENTER 0x8000 18.56 - 18.57 -/* AMD/K8 specific MSRs */ 18.58 -#define MSR_EFER 0xc0000080 /* extended feature register */ 18.59 -#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ 18.60 -#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ 18.61 -#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ 18.62 -#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ 18.63 -#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ 18.64 -#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ 18.65 -#define MSR_SHADOW_GS_BASE 0xc0000102 /* SwapGS GS shadow */ 18.66 -/* EFER bits: */ 18.67 -#define _EFER_SCE 0 /* SYSCALL/SYSRET */ 18.68 -#define _EFER_LME 8 /* Long mode enable */ 18.69 -#define _EFER_LMA 10 /* Long mode active (read-only) */ 18.70 -#define _EFER_NX 11 /* No execute enable */ 18.71 -#define _EFER_SVME 12 18.72 - 18.73 -#define EFER_SCE (1<<_EFER_SCE) 18.74 -#define EFER_LME (1<<_EFER_LME) 18.75 -#define EFER_LMA (1<<_EFER_LMA) 18.76 -#define EFER_NX (1<<_EFER_NX) 18.77 -#define EFER_SVME (1<<_EFER_SVME) 18.78 - 18.79 -#ifndef __ASSEMBLY__ 18.80 18.81 DECLARE_PER_CPU(__u64, efer); 18.82 18.83 @@ -167,232 +105,6 @@ static inline void write_efer(__u64 val) 18.84 wrmsrl(MSR_EFER, val); 18.85 } 18.86 18.87 -#endif 18.88 - 18.89 -/* Intel MSRs. Some also available on other CPUs */ 18.90 -#define MSR_IA32_PLATFORM_ID 0x17 18.91 - 18.92 -#define MSR_MTRRcap 0x0fe 18.93 -#define MSR_IA32_BBL_CR_CTL 0x119 18.94 - 18.95 -#define MSR_IA32_SYSENTER_CS 0x174 18.96 -#define MSR_IA32_SYSENTER_ESP 0x175 18.97 -#define MSR_IA32_SYSENTER_EIP 0x176 18.98 - 18.99 -#define MSR_IA32_MCG_CAP 0x179 18.100 -#define MSR_IA32_MCG_STATUS 0x17a 18.101 -#define MSR_IA32_MCG_CTL 0x17b 18.102 - 18.103 -/* P4/Xeon+ specific */ 18.104 -#define MSR_IA32_MCG_EAX 0x180 18.105 -#define MSR_IA32_MCG_EBX 0x181 18.106 -#define MSR_IA32_MCG_ECX 0x182 18.107 -#define MSR_IA32_MCG_EDX 0x183 18.108 -#define MSR_IA32_MCG_ESI 0x184 18.109 -#define MSR_IA32_MCG_EDI 0x185 18.110 -#define MSR_IA32_MCG_EBP 0x186 18.111 -#define MSR_IA32_MCG_ESP 0x187 18.112 -#define MSR_IA32_MCG_EFLAGS 0x188 18.113 -#define MSR_IA32_MCG_EIP 0x189 18.114 -#define MSR_IA32_MCG_RESERVED 0x18A 18.115 - 18.116 -#define MSR_P6_EVNTSEL0 0x186 18.117 -#define MSR_P6_EVNTSEL1 0x187 18.118 - 18.119 -#define MSR_IA32_PERF_STATUS 0x198 18.120 -#define MSR_IA32_PERF_CTL 0x199 18.121 - 18.122 -#define MSR_IA32_THERM_CONTROL 0x19a 18.123 -#define MSR_IA32_THERM_INTERRUPT 0x19b 18.124 -#define MSR_IA32_THERM_STATUS 0x19c 18.125 -#define MSR_IA32_MISC_ENABLE 0x1a0 18.126 - 18.127 -#define MSR_IA32_MISC_ENABLE_PERF_AVAIL (1<<7) 18.128 -#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1<<11) 18.129 -#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1<<12) 18.130 - 18.131 -#define MSR_IA32_DEBUGCTLMSR 0x1d9 18.132 -#define MSR_IA32_LASTBRANCHFROMIP 0x1db 18.133 -#define MSR_IA32_LASTBRANCHTOIP 0x1dc 18.134 -#define MSR_IA32_LASTINTFROMIP 0x1dd 18.135 -#define MSR_IA32_LASTINTTOIP 0x1de 18.136 - 18.137 -#define MSR_IA32_MC0_CTL 0x400 18.138 -#define MSR_IA32_MC0_STATUS 0x401 18.139 -#define MSR_IA32_MC0_ADDR 0x402 18.140 -#define MSR_IA32_MC0_MISC 0x403 18.141 - 18.142 -/* K8 Machine Check MSRs */ 18.143 -#define MSR_K8_MC1_CTL 0x404 18.144 -#define MSR_K8_MC1_STATUS 0x405 18.145 -#define MSR_K8_MC1_ADDR 0x406 18.146 -#define MSR_K8_MC1_MISC 0x407 18.147 - 18.148 -#define MSR_K8_MC2_CTL 0x408 18.149 -#define MSR_K8_MC2_STATUS 0x409 18.150 -#define MSR_K8_MC2_ADDR 0x40A 18.151 -#define MSR_K8_MC2_MISC 0x40B 18.152 - 18.153 -#define MSR_K8_MC3_CTL 0x40C 18.154 -#define MSR_K8_MC3_STATUS 0x40D 18.155 -#define MSR_K8_MC3_ADDR 0x40E 18.156 -#define MSR_K8_MC3_MISC 0x40F 18.157 - 18.158 -#define MSR_K8_MC4_CTL 0x410 18.159 -#define MSR_K8_MC4_STATUS 0x411 18.160 -#define MSR_K8_MC4_ADDR 0x412 18.161 -#define MSR_K8_MC4_MISC 0x413 18.162 - 18.163 -#define MSR_K8_MC5_CTL 0x414 18.164 -#define MSR_K8_MC5_STATUS 0x415 18.165 -#define MSR_K8_MC5_ADDR 0x416 18.166 -#define MSR_K8_MC5_MISC 0x417 18.167 - 18.168 -/* Pentium IV performance counter MSRs */ 18.169 -#define MSR_P4_BPU_PERFCTR0 0x300 18.170 -#define MSR_P4_BPU_PERFCTR1 0x301 18.171 -#define MSR_P4_BPU_PERFCTR2 0x302 18.172 -#define MSR_P4_BPU_PERFCTR3 0x303 18.173 -#define MSR_P4_MS_PERFCTR0 0x304 18.174 -#define MSR_P4_MS_PERFCTR1 0x305 18.175 -#define MSR_P4_MS_PERFCTR2 0x306 18.176 -#define MSR_P4_MS_PERFCTR3 0x307 18.177 -#define MSR_P4_FLAME_PERFCTR0 0x308 18.178 -#define MSR_P4_FLAME_PERFCTR1 0x309 18.179 -#define MSR_P4_FLAME_PERFCTR2 0x30a 18.180 -#define MSR_P4_FLAME_PERFCTR3 0x30b 18.181 -#define MSR_P4_IQ_PERFCTR0 0x30c 18.182 -#define MSR_P4_IQ_PERFCTR1 0x30d 18.183 -#define MSR_P4_IQ_PERFCTR2 0x30e 18.184 -#define MSR_P4_IQ_PERFCTR3 0x30f 18.185 -#define MSR_P4_IQ_PERFCTR4 0x310 18.186 -#define MSR_P4_IQ_PERFCTR5 0x311 18.187 -#define MSR_P4_BPU_CCCR0 0x360 18.188 -#define MSR_P4_BPU_CCCR1 0x361 18.189 -#define MSR_P4_BPU_CCCR2 0x362 18.190 -#define MSR_P4_BPU_CCCR3 0x363 18.191 -#define MSR_P4_MS_CCCR0 0x364 18.192 -#define MSR_P4_MS_CCCR1 0x365 18.193 -#define MSR_P4_MS_CCCR2 0x366 18.194 -#define MSR_P4_MS_CCCR3 0x367 18.195 -#define MSR_P4_FLAME_CCCR0 0x368 18.196 -#define MSR_P4_FLAME_CCCR1 0x369 18.197 -#define MSR_P4_FLAME_CCCR2 0x36a 18.198 -#define MSR_P4_FLAME_CCCR3 0x36b 18.199 -#define MSR_P4_IQ_CCCR0 0x36c 18.200 -#define MSR_P4_IQ_CCCR1 0x36d 18.201 -#define MSR_P4_IQ_CCCR2 0x36e 18.202 -#define MSR_P4_IQ_CCCR3 0x36f 18.203 -#define MSR_P4_IQ_CCCR4 0x370 18.204 -#define MSR_P4_IQ_CCCR5 0x371 18.205 -#define MSR_P4_ALF_ESCR0 0x3ca 18.206 -#define MSR_P4_ALF_ESCR1 0x3cb 18.207 -#define MSR_P4_BPU_ESCR0 0x3b2 18.208 -#define MSR_P4_BPU_ESCR1 0x3b3 18.209 -#define MSR_P4_BSU_ESCR0 0x3a0 18.210 -#define MSR_P4_BSU_ESCR1 0x3a1 18.211 -#define MSR_P4_CRU_ESCR0 0x3b8 18.212 -#define MSR_P4_CRU_ESCR1 0x3b9 18.213 -#define MSR_P4_CRU_ESCR2 0x3cc 18.214 -#define MSR_P4_CRU_ESCR3 0x3cd 18.215 -#define MSR_P4_CRU_ESCR4 0x3e0 18.216 -#define MSR_P4_CRU_ESCR5 0x3e1 18.217 -#define MSR_P4_DAC_ESCR0 0x3a8 18.218 -#define MSR_P4_DAC_ESCR1 0x3a9 18.219 -#define MSR_P4_FIRM_ESCR0 0x3a4 18.220 -#define MSR_P4_FIRM_ESCR1 0x3a5 18.221 -#define MSR_P4_FLAME_ESCR0 0x3a6 18.222 -#define MSR_P4_FLAME_ESCR1 0x3a7 18.223 -#define MSR_P4_FSB_ESCR0 0x3a2 18.224 -#define MSR_P4_FSB_ESCR1 0x3a3 18.225 -#define MSR_P4_IQ_ESCR0 0x3ba 18.226 -#define MSR_P4_IQ_ESCR1 0x3bb 18.227 -#define MSR_P4_IS_ESCR0 0x3b4 18.228 -#define MSR_P4_IS_ESCR1 0x3b5 18.229 -#define MSR_P4_ITLB_ESCR0 0x3b6 18.230 -#define MSR_P4_ITLB_ESCR1 0x3b7 18.231 -#define MSR_P4_IX_ESCR0 0x3c8 18.232 -#define MSR_P4_IX_ESCR1 0x3c9 18.233 -#define MSR_P4_MOB_ESCR0 0x3aa 18.234 -#define MSR_P4_MOB_ESCR1 0x3ab 18.235 -#define MSR_P4_MS_ESCR0 0x3c0 18.236 -#define MSR_P4_MS_ESCR1 0x3c1 18.237 -#define MSR_P4_PMH_ESCR0 0x3ac 18.238 -#define MSR_P4_PMH_ESCR1 0x3ad 18.239 -#define MSR_P4_RAT_ESCR0 0x3bc 18.240 -#define MSR_P4_RAT_ESCR1 0x3bd 18.241 -#define MSR_P4_SAAT_ESCR0 0x3ae 18.242 -#define MSR_P4_SAAT_ESCR1 0x3af 18.243 -#define MSR_P4_SSU_ESCR0 0x3be 18.244 -#define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */ 18.245 -#define MSR_P4_TBPU_ESCR0 0x3c2 18.246 -#define MSR_P4_TBPU_ESCR1 0x3c3 18.247 -#define MSR_P4_TC_ESCR0 0x3c4 18.248 -#define MSR_P4_TC_ESCR1 0x3c5 18.249 -#define MSR_P4_U2L_ESCR0 0x3b0 18.250 -#define MSR_P4_U2L_ESCR1 0x3b1 18.251 - 18.252 -#define MSR_K6_EFER 0xC0000080 18.253 -#define MSR_K6_STAR 0xC0000081 18.254 -#define MSR_K6_WHCR 0xC0000082 18.255 -#define MSR_K6_UWCCR 0xC0000085 18.256 -#define MSR_K6_EPMR 0xC0000086 18.257 -#define MSR_K6_PSOR 0xC0000087 18.258 -#define MSR_K6_PFIR 0xC0000088 18.259 - 18.260 -#define MSR_K7_EVNTSEL0 0xC0010000 18.261 -#define MSR_K7_EVNTSEL1 0xC0010001 18.262 -#define MSR_K7_EVNTSEL2 0xC0010002 18.263 -#define MSR_K7_EVNTSEL3 0xC0010003 18.264 -#define MSR_K7_PERFCTR0 0xC0010004 18.265 -#define MSR_K7_PERFCTR1 0xC0010005 18.266 -#define MSR_K7_PERFCTR2 0xC0010006 18.267 -#define MSR_K7_PERFCTR3 0xC0010007 18.268 -#define MSR_K7_HWCR 0xC0010015 18.269 -#define MSR_K7_CLK_CTL 0xC001001b 18.270 -#define MSR_K7_FID_VID_CTL 0xC0010041 18.271 -#define MSR_K7_FID_VID_STATUS 0xC0010042 18.272 - 18.273 -#define MSR_K8_TOP_MEM1 0xC001001A 18.274 -#define MSR_K8_TOP_MEM2 0xC001001D 18.275 -#define MSR_K8_SYSCFG 0xC0010010 18.276 -#define MSR_K8_HWCR 0xC0010015 18.277 -#define MSR_K8_VM_CR 0xC0010114 18.278 -#define MSR_K8_VM_HSAVE_PA 0xC0010117 18.279 - 18.280 -#define MSR_K8_FIDVID_CTL 0xC0010041 18.281 -#define MSR_K8_FIDVID_STATUS 0xC0010042 18.282 - 18.283 -/* MSR_K8_VM_CR bits: */ 18.284 -#define _K8_VMCR_SVME_DISABLE 4 18.285 -#define K8_VMCR_SVME_DISABLE (1 << _K8_VMCR_SVME_DISABLE) 18.286 - 18.287 -/* Centaur-Hauls/IDT defined MSRs. */ 18.288 -#define MSR_IDT_FCR1 0x107 18.289 -#define MSR_IDT_FCR2 0x108 18.290 -#define MSR_IDT_FCR3 0x109 18.291 -#define MSR_IDT_FCR4 0x10a 18.292 - 18.293 -#define MSR_IDT_MCR0 0x110 18.294 -#define MSR_IDT_MCR1 0x111 18.295 -#define MSR_IDT_MCR2 0x112 18.296 -#define MSR_IDT_MCR3 0x113 18.297 -#define MSR_IDT_MCR4 0x114 18.298 -#define MSR_IDT_MCR5 0x115 18.299 -#define MSR_IDT_MCR6 0x116 18.300 -#define MSR_IDT_MCR7 0x117 18.301 -#define MSR_IDT_MCR_CTRL 0x120 18.302 - 18.303 -/* VIA Cyrix defined MSRs*/ 18.304 -#define MSR_VIA_FCR 0x1107 18.305 -#define MSR_VIA_LONGHAUL 0x110a 18.306 -#define MSR_VIA_RNG 0x110b 18.307 -#define MSR_VIA_BCR2 0x1147 18.308 - 18.309 -/* Transmeta defined MSRs */ 18.310 -#define MSR_TMTA_LONGRUN_CTRL 0x80868010 18.311 -#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 18.312 -#define MSR_TMTA_LRTI_READOUT 0x80868018 18.313 -#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a 18.314 +#endif /* !__ASSEMBLY__ */ 18.315 18.316 #endif /* __ASM_MSR_H */
19.1 --- a/xen/include/asm-x86/mtrr.h Thu Oct 11 10:21:55 2007 +0100 19.2 +++ b/xen/include/asm-x86/mtrr.h Thu Oct 11 12:11:54 2007 +0100 19.3 @@ -11,6 +11,8 @@ 19.4 #define MTRR_TYPE_WRBACK 6 19.5 #define MTRR_NUM_TYPES 7 19.6 19.7 +extern void mtrr_save_fixed_ranges(void *); 19.8 +extern void mtrr_save_state(void); 19.9 extern int mtrr_add(unsigned long base, unsigned long size, 19.10 unsigned int type, char increment); 19.11 extern int mtrr_add_page(unsigned long base, unsigned long size,
20.1 --- a/xen/include/asm-x86/processor.h Thu Oct 11 10:21:55 2007 +0100 20.2 +++ b/xen/include/asm-x86/processor.h Thu Oct 11 12:11:54 2007 +0100 20.3 @@ -342,6 +342,12 @@ static always_inline void set_in_cr4 (un 20.4 write_cr4(mmu_cr4_features); 20.5 } 20.6 20.7 +static always_inline void clear_in_cr4 (unsigned long mask) 20.8 +{ 20.9 + mmu_cr4_features &= ~mask; 20.10 + write_cr4(mmu_cr4_features); 20.11 +} 20.12 + 20.13 /* 20.14 * NSC/Cyrix CPU configuration register indexes 20.15 */
21.1 --- a/xen/include/xen/config.h Thu Oct 11 10:21:55 2007 +0100 21.2 +++ b/xen/include/xen/config.h Thu Oct 11 12:11:54 2007 +0100 21.3 @@ -10,6 +10,7 @@ 21.4 #include <asm/config.h> 21.5 21.6 #define EXPORT_SYMBOL(var) 21.7 +#define EXPORT_SYMBOL_GPL(var) 21.8 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 21.9 21.10 /*