ia64/xen-unstable

changeset 8846:b9b411b50587

Upgrade arch/x86/cpu/* files to their equivalents in
linux-2.6.16-rc2/arch/i386/kernel/cpu/*.

Also include kernel taint tracking and include that
information, and Xen release info, in our crash dumps.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Feb 14 16:23:43 2006 +0100 (2006-02-14)
parents fcc833cbaf82
children 07a892f12609
files xen/arch/x86/Makefile xen/arch/x86/apic.c xen/arch/x86/cpu/amd.c xen/arch/x86/cpu/common.c xen/arch/x86/cpu/intel.c xen/arch/x86/cpu/mcheck/k7.c xen/arch/x86/cpu/mcheck/mce.c xen/arch/x86/cpu/mcheck/mce.h xen/arch/x86/cpu/mcheck/non-fatal.c xen/arch/x86/cpu/mcheck/p4.c xen/arch/x86/cpu/mcheck/p5.c xen/arch/x86/cpu/mcheck/p6.c xen/arch/x86/cpu/mcheck/winchip.c xen/arch/x86/cpu/mtrr/amd.c xen/arch/x86/cpu/mtrr/centaur.c xen/arch/x86/cpu/mtrr/cyrix.c xen/arch/x86/cpu/mtrr/generic.c xen/arch/x86/cpu/mtrr/main.c xen/arch/x86/cpu/mtrr/mtrr.h xen/arch/x86/cpu/mtrr/state.c xen/arch/x86/dom0_ops.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/i8259.c xen/arch/x86/setup.c xen/arch/x86/smp.c xen/arch/x86/smpboot.c xen/arch/x86/traps.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c xen/common/kernel.c xen/include/asm-x86/bitops.h xen/include/asm-x86/config.h xen/include/asm-x86/cpufeature.h xen/include/asm-x86/msr.h xen/include/asm-x86/processor.h xen/include/asm-x86/x86_32/asm_defns.h xen/include/asm-x86/x86_64/asm_defns.h xen/include/xen/bitops.h xen/include/xen/init.h xen/include/xen/lib.h
line diff
     1.1 --- a/xen/arch/x86/Makefile	Mon Feb 13 17:41:23 2006 +0100
     1.2 +++ b/xen/arch/x86/Makefile	Tue Feb 14 16:23:43 2006 +0100
     1.3 @@ -4,9 +4,10 @@ include $(BASEDIR)/Rules.mk
     1.4  OBJS += $(patsubst %.S,%.o,$(wildcard $(TARGET_SUBARCH)/*.S))
     1.5  OBJS += $(patsubst %.c,%.o,$(wildcard $(TARGET_SUBARCH)/*.c))
     1.6  OBJS += $(patsubst %.c,%.o,$(wildcard acpi/*.c))
     1.7 -OBJS += $(patsubst %.c,%.o,$(wildcard mtrr/*.c))
     1.8  OBJS += $(patsubst %.c,%.o,$(wildcard genapic/*.c))
     1.9  OBJS += $(patsubst %.c,%.o,$(wildcard cpu/*.c))
    1.10 +OBJS += $(patsubst %.c,%.o,$(wildcard cpu/mcheck/*.c))
    1.11 +OBJS += $(patsubst %.c,%.o,$(wildcard cpu/mtrr/*.c))
    1.12  OBJS += $(patsubst %.c,%.o,$(wildcard hvm/*.c))
    1.13  OBJS += $(patsubst %.c,%.o,$(wildcard hvm/vmx/*.c))
    1.14  OBJS += $(patsubst %.S,%.o,$(wildcard hvm/vmx/$(TARGET_SUBARCH)/*.S))
     2.1 --- a/xen/arch/x86/apic.c	Mon Feb 13 17:41:23 2006 +0100
     2.2 +++ b/xen/arch/x86/apic.c	Tue Feb 14 16:23:43 2006 +0100
     2.3 @@ -927,7 +927,7 @@ int reprogram_timer(s_time_t timeout)
     2.4      return 1;
     2.5  }
     2.6  
     2.7 -void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
     2.8 +fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
     2.9  {
    2.10      ack_APIC_irq();
    2.11      perfc_incrc(apic_timer);
    2.12 @@ -937,7 +937,7 @@ void smp_apic_timer_interrupt(struct cpu
    2.13  /*
    2.14   * This interrupt should _never_ happen with our APIC/SMP architecture
    2.15   */
    2.16 -asmlinkage void smp_spurious_interrupt(struct cpu_user_regs *regs)
    2.17 +fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs)
    2.18  {
    2.19      unsigned long v;
    2.20  
    2.21 @@ -959,7 +959,7 @@ asmlinkage void smp_spurious_interrupt(s
    2.22   * This interrupt should never happen with our APIC/SMP architecture
    2.23   */
    2.24  
    2.25 -asmlinkage void smp_error_interrupt(struct cpu_user_regs *regs)
    2.26 +fastcall void smp_error_interrupt(struct cpu_user_regs *regs)
    2.27  {
    2.28      unsigned long v, v1;
    2.29  
     3.1 --- a/xen/arch/x86/cpu/amd.c	Mon Feb 13 17:41:23 2006 +0100
     3.2 +++ b/xen/arch/x86/cpu/amd.c	Tue Feb 14 16:23:43 2006 +0100
     3.3 @@ -48,6 +48,22 @@ static void __init init_amd(struct cpuin
     3.4  	int mbytes = num_physpages >> (20-PAGE_SHIFT);
     3.5  	int r;
     3.6  
     3.7 +#ifdef CONFIG_SMP
     3.8 +	unsigned long long value;
     3.9 +
    3.10 +	/* Disable TLB flush filter by setting HWCR.FFDIS on K8
    3.11 +	 * bit 6 of msr C001_0015
    3.12 +	 *
    3.13 +	 * Errata 63 for SH-B3 steppings
    3.14 +	 * Errata 122 for all steppings (F+ have it disabled by default)
    3.15 +	 */
    3.16 +	if (c->x86 == 15) {
    3.17 +		rdmsrl(MSR_K7_HWCR, value);
    3.18 +		value |= 1 << 6;
    3.19 +		wrmsrl(MSR_K7_HWCR, value);
    3.20 +	}
    3.21 +#endif
    3.22 +
    3.23  	/*
    3.24  	 *	FIXME: We should handle the K5 here. Set up the write
    3.25  	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,
    3.26 @@ -165,8 +181,13 @@ static void __init init_amd(struct cpuin
    3.27  					set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
    3.28  				break;
    3.29  			}
    3.30 +
    3.31 +			if (c->x86_model == 10) {
    3.32 +				/* AMD Geode LX is model 10 */
    3.33 +				/* placeholder for any needed mods */
    3.34 +				break;
    3.35 +			}
    3.36  			break;
    3.37 -
    3.38  		case 6: /* An Athlon/Duron */
    3.39   
    3.40  			/* Bit 15 of Athlon specific MSR 15, needs to be 0
    3.41 @@ -225,9 +246,15 @@ static void __init init_amd(struct cpuin
    3.42  	display_cacheinfo(c);
    3.43  
    3.44  	if (cpuid_eax(0x80000000) >= 0x80000008) {
    3.45 -		c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
    3.46 -		if (c->x86_num_cores & (c->x86_num_cores - 1))
    3.47 -			c->x86_num_cores = 1;
    3.48 +		c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
    3.49 +		if (c->x86_max_cores & (c->x86_max_cores - 1))
    3.50 +			c->x86_max_cores = 1;
    3.51 +	}
    3.52 +
    3.53 +	if (cpuid_eax(0x80000000) >= 0x80000007) {
    3.54 +		c->x86_power = cpuid_edx(0x80000007);
    3.55 +		if (c->x86_power & (1<<8))
    3.56 +			set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
    3.57  	}
    3.58  
    3.59  #ifdef CONFIG_X86_HT
    3.60 @@ -236,15 +263,15 @@ static void __init init_amd(struct cpuin
    3.61  	 * distingush the cores.  Assumes number of cores is a power
    3.62  	 * of two.
    3.63  	 */
    3.64 -	if (c->x86_num_cores > 1) {
    3.65 +	if (c->x86_max_cores > 1) {
    3.66  		int cpu = smp_processor_id();
    3.67  		unsigned bits = 0;
    3.68 -		while ((1 << bits) < c->x86_num_cores)
    3.69 +		while ((1 << bits) < c->x86_max_cores)
    3.70  			bits++;
    3.71  		cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
    3.72  		phys_proc_id[cpu] >>= bits;
    3.73  		printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
    3.74 -		       cpu, c->x86_num_cores, cpu_core_id[cpu]);
    3.75 +		       cpu, c->x86_max_cores, cpu_core_id[cpu]);
    3.76  	}
    3.77  #endif
    3.78  
     4.1 --- a/xen/arch/x86/cpu/common.c	Mon Feb 13 17:41:23 2006 +0100
     4.2 +++ b/xen/arch/x86/cpu/common.c	Tue Feb 14 16:23:43 2006 +0100
     4.3 @@ -17,14 +17,12 @@
     4.4  #define tsc_disable 0
     4.5  #define disable_pse 0
     4.6  
     4.7 -static int cachesize_override __initdata = -1;
     4.8 -static int disable_x86_fxsr __initdata = 0;
     4.9 -static int disable_x86_serial_nr __initdata = 1;
    4.10 +static int cachesize_override __devinitdata = -1;
    4.11 +static int disable_x86_fxsr __devinitdata = 0;
    4.12 +static int disable_x86_serial_nr __devinitdata = 0;
    4.13  
    4.14  struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
    4.15  
    4.16 -extern void mcheck_init(struct cpuinfo_x86 *c);
    4.17 -
    4.18  static void default_init(struct cpuinfo_x86 * c)
    4.19  {
    4.20  	/* Not much we can do here... */
    4.21 @@ -43,7 +41,9 @@ static struct cpu_dev default_cpu = {
    4.22  };
    4.23  static struct cpu_dev * this_cpu = &default_cpu;
    4.24  
    4.25 -int __init get_model_name(struct cpuinfo_x86 *c)
    4.26 +integer_param("cachesize", cachesize_override);
    4.27 +
    4.28 +int __devinit get_model_name(struct cpuinfo_x86 *c)
    4.29  {
    4.30  	unsigned int *v;
    4.31  	char *p, *q;
    4.32 @@ -73,7 +73,7 @@ int __init get_model_name(struct cpuinfo
    4.33  }
    4.34  
    4.35  
    4.36 -void __init display_cacheinfo(struct cpuinfo_x86 *c)
    4.37 +void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
    4.38  {
    4.39  	unsigned int n, dummy, ecx, edx, l2size;
    4.40  
    4.41 @@ -114,7 +114,7 @@ void __init display_cacheinfo(struct cpu
    4.42  /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
    4.43  
    4.44  /* Look up CPU names by table lookup. */
    4.45 -static char __init *table_lookup_model(struct cpuinfo_x86 *c)
    4.46 +static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
    4.47  {
    4.48  	struct cpu_model_info *info;
    4.49  
    4.50 @@ -135,7 +135,7 @@ static char __init *table_lookup_model(s
    4.51  }
    4.52  
    4.53  
    4.54 -void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
    4.55 +static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
    4.56  {
    4.57  	char *v = c->x86_vendor_id;
    4.58  	int i;
    4.59 @@ -155,12 +155,7 @@ void __init get_cpu_vendor(struct cpuinf
    4.60  }
    4.61  
    4.62  
    4.63 -static int __init x86_fxsr_setup(char * s)
    4.64 -{
    4.65 -	disable_x86_fxsr = 1;
    4.66 -	return 1;
    4.67 -}
    4.68 -__setup("nofxsr", x86_fxsr_setup);
    4.69 +boolean_param("nofxsr", disable_x86_fxsr);
    4.70  
    4.71  
    4.72  /* Standard macro to see if a specific flag is changeable */
    4.73 @@ -186,14 +181,17 @@ static inline int flag_is_changeable_p(u
    4.74  
    4.75  
    4.76  /* Probe for the CPUID instruction */
    4.77 -static int __init have_cpuid_p(void)
    4.78 +static int __devinit have_cpuid_p(void)
    4.79  {
    4.80  	return flag_is_changeable_p(X86_EFLAGS_ID);
    4.81  }
    4.82  
    4.83  /* Do minimum CPU detection early.
    4.84     Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
    4.85 -   The others are not touched to avoid unwanted side effects. */
    4.86 +   The others are not touched to avoid unwanted side effects.
    4.87 +
    4.88 +   WARNING: this function is only called on the BP.  Don't add code here
    4.89 +   that is supposed to run on all CPUs. */
    4.90  static void __init early_cpu_detect(void)
    4.91  {
    4.92  	struct cpuinfo_x86 *c = &boot_cpu_data;
    4.93 @@ -217,24 +215,18 @@ static void __init early_cpu_detect(void
    4.94  		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
    4.95  		c->x86 = (tfms >> 8) & 15;
    4.96  		c->x86_model = (tfms >> 4) & 15;
    4.97 -		if (c->x86 == 0xf) {
    4.98 +		if (c->x86 == 0xf)
    4.99  			c->x86 += (tfms >> 20) & 0xff;
   4.100 +		if (c->x86 >= 0x6)
   4.101  			c->x86_model += ((tfms >> 16) & 0xF) << 4;
   4.102 -		}
   4.103  		c->x86_mask = tfms & 15;
   4.104  		if (cap0 & (1<<19))
   4.105  			c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
   4.106  		c->x86_capability[0] = cap0; /* Added for Xen bootstrap */
   4.107  	}
   4.108 -
   4.109 -	early_intel_workaround(c);
   4.110 -
   4.111 -#ifdef CONFIG_X86_HT
   4.112 -	phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
   4.113 -#endif
   4.114  }
   4.115  
   4.116 -void __init generic_identify(struct cpuinfo_x86 * c)
   4.117 +void __devinit generic_identify(struct cpuinfo_x86 * c)
   4.118  {
   4.119  	u32 tfms, xlvl;
   4.120  	int junk;
   4.121 @@ -279,9 +271,15 @@ void __init generic_identify(struct cpui
   4.122  				get_model_name(c); /* Default name */
   4.123  		}
   4.124  	}
   4.125 +
   4.126 +	early_intel_workaround(c);
   4.127 +
   4.128 +#ifdef CONFIG_X86_HT
   4.129 +	phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
   4.130 +#endif
   4.131  }
   4.132  
   4.133 -static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
   4.134 +static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
   4.135  {
   4.136  	if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
   4.137  		/* Disable processor serial number */
   4.138 @@ -297,19 +295,14 @@ static void __init squash_the_stupid_ser
   4.139  	}
   4.140  }
   4.141  
   4.142 -static int __init x86_serial_nr_setup(char *s)
   4.143 -{
   4.144 -	disable_x86_serial_nr = 0;
   4.145 -	return 1;
   4.146 -}
   4.147 -__setup("serialnumber", x86_serial_nr_setup);
   4.148 +boolean_param("noserialnumber", disable_x86_serial_nr);
   4.149  
   4.150  
   4.151  
   4.152  /*
   4.153   * This does the hard work of actually picking apart the CPU stuff...
   4.154   */
   4.155 -void __init identify_cpu(struct cpuinfo_x86 *c)
   4.156 +void __devinit identify_cpu(struct cpuinfo_x86 *c)
   4.157  {
   4.158  	int i;
   4.159  
   4.160 @@ -319,7 +312,7 @@ void __init identify_cpu(struct cpuinfo_
   4.161  	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
   4.162  	c->x86_vendor_id[0] = '\0'; /* Unset */
   4.163  	c->x86_model_id[0] = '\0';  /* Unset */
   4.164 -	c->x86_num_cores = 1;
   4.165 +	c->x86_max_cores = 1;
   4.166  	memset(&c->x86_capability, 0, sizeof c->x86_capability);
   4.167  
   4.168  	if (!have_cpuid_p()) {
   4.169 @@ -342,6 +335,7 @@ void __init identify_cpu(struct cpuinfo_
   4.170  
   4.171  	if (this_cpu->c_identify) {
   4.172  		this_cpu->c_identify(c);
   4.173 +
   4.174  #ifdef NOISY_CAPS
   4.175  		printk(KERN_DEBUG "CPU: After vendor identify, caps:");
   4.176  		for (i = 0; i < NCAPINTS; i++)
   4.177 @@ -397,12 +391,14 @@ void __init identify_cpu(struct cpuinfo_
   4.178  	}
   4.179  
   4.180  	/* Now the feature flags better reflect actual CPU features! */
   4.181 +
   4.182  #ifdef NOISY_CAPS
   4.183  	printk(KERN_DEBUG "CPU: After all inits, caps:");
   4.184  	for (i = 0; i < NCAPINTS; i++)
   4.185  		printk(" %08x", c->x86_capability[i]);
   4.186  	printk("\n");
   4.187  #endif
   4.188 +
   4.189  	/*
   4.190  	 * On SMP, boot_cpu_data holds the common feature set between
   4.191  	 * all CPUs; so make sure that we indicate which features are
   4.192 @@ -416,68 +412,69 @@ void __init identify_cpu(struct cpuinfo_
   4.193  	}
   4.194  
   4.195  	/* Init Machine Check Exception if available. */
   4.196 -#ifdef CONFIG_X86_MCE
   4.197  	mcheck_init(c);
   4.198 +
   4.199 +#if 0
   4.200 +	if (c == &boot_cpu_data)
   4.201 +		sysenter_setup();
   4.202 +	enable_sep_cpu();
   4.203  #endif
   4.204 +
   4.205 +	if (c == &boot_cpu_data)
   4.206 +		mtrr_bp_init();
   4.207 +	else
   4.208 +		mtrr_ap_init();
   4.209  }
   4.210  
   4.211  #ifdef CONFIG_X86_HT
   4.212 -void __init detect_ht(struct cpuinfo_x86 *c)
   4.213 +void __devinit detect_ht(struct cpuinfo_x86 *c)
   4.214  {
   4.215  	u32 	eax, ebx, ecx, edx;
   4.216 -	int 	index_msb, tmp;
   4.217 +	int 	index_msb, core_bits;
   4.218  	int 	cpu = smp_processor_id();
   4.219  
   4.220 +	cpuid(1, &eax, &ebx, &ecx, &edx);
   4.221 +
   4.222 +	c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
   4.223 +
   4.224  	if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
   4.225  		return;
   4.226  
   4.227 -	cpuid(1, &eax, &ebx, &ecx, &edx);
   4.228  	smp_num_siblings = (ebx & 0xff0000) >> 16;
   4.229  
   4.230  	if (smp_num_siblings == 1) {
   4.231  		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
   4.232  	} else if (smp_num_siblings > 1 ) {
   4.233 -		index_msb = 31;
   4.234  
   4.235  		if (smp_num_siblings > NR_CPUS) {
   4.236  			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
   4.237  			smp_num_siblings = 1;
   4.238  			return;
   4.239  		}
   4.240 -		tmp = smp_num_siblings;
   4.241 -		while ((tmp & 0x80000000 ) == 0) {
   4.242 -			tmp <<=1 ;
   4.243 -			index_msb--;
   4.244 -		}
   4.245 -		if (smp_num_siblings & (smp_num_siblings - 1))
   4.246 -			index_msb++;
   4.247 +
   4.248 +		index_msb = get_count_order(smp_num_siblings);
   4.249  		phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
   4.250  
   4.251  		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
   4.252  		       phys_proc_id[cpu]);
   4.253  
   4.254 -		smp_num_siblings = smp_num_siblings / c->x86_num_cores;
   4.255 +		smp_num_siblings = smp_num_siblings / c->x86_max_cores;
   4.256  
   4.257 -		tmp = smp_num_siblings;
   4.258 -		index_msb = 31;
   4.259 -		while ((tmp & 0x80000000) == 0) {
   4.260 -			tmp <<=1 ;
   4.261 -			index_msb--;
   4.262 -		}
   4.263 +		index_msb = get_count_order(smp_num_siblings) ;
   4.264  
   4.265 -		if (smp_num_siblings & (smp_num_siblings - 1))
   4.266 -			index_msb++;
   4.267 +		core_bits = get_count_order(c->x86_max_cores);
   4.268  
   4.269 -		cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
   4.270 +		cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
   4.271 +					       ((1 << core_bits) - 1);
   4.272  
   4.273 -		if (c->x86_num_cores > 1)
   4.274 +		if (c->x86_max_cores > 1)
   4.275  			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
   4.276  			       cpu_core_id[cpu]);
   4.277  	}
   4.278  }
   4.279  #endif
   4.280  
   4.281 -void __init print_cpu_info(struct cpuinfo_x86 *c)
   4.282 +void __devinit print_cpu_info(struct cpuinfo_x86 *c)
   4.283  {
   4.284  	char *vendor = NULL;
   4.285  
   4.286 @@ -500,7 +497,7 @@ void __init print_cpu_info(struct cpuinf
   4.287  		printk("\n");
   4.288  }
   4.289  
   4.290 -cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
   4.291 +cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE;
   4.292  
   4.293  /* This is hacky. :)
   4.294   * We're emulating future behavior.
   4.295 @@ -537,7 +534,7 @@ void __init early_cpu_init(void)
   4.296   * and IDT. We reload them nevertheless, this function acts as a
   4.297   * 'CPU state barrier', nothing should get across.
   4.298   */
   4.299 -void __init cpu_init (void)
   4.300 +void __devinit cpu_init(void)
   4.301  {
   4.302  	int cpu = smp_processor_id();
   4.303  	struct tss_struct *t = &init_tss[cpu];
     5.1 --- a/xen/arch/x86/cpu/intel.c	Mon Feb 13 17:41:23 2006 +0100
     5.2 +++ b/xen/arch/x86/cpu/intel.c	Tue Feb 14 16:23:43 2006 +0100
     5.3 @@ -22,10 +22,10 @@ extern int trap_init_f00f_bug(void);
     5.4  /*
     5.5   * Alignment at which movsl is preferred for bulk memory copies.
     5.6   */
     5.7 -struct movsl_mask movsl_mask;
     5.8 +struct movsl_mask movsl_mask __read_mostly;
     5.9  #endif
    5.10  
    5.11 -void __init early_intel_workaround(struct cpuinfo_x86 *c)
    5.12 +void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
    5.13  {
    5.14  	if (c->x86_vendor != X86_VENDOR_INTEL)
    5.15  		return;
    5.16 @@ -40,7 +40,7 @@ void __init early_intel_workaround(struc
    5.17   *	This is called before we do cpu ident work
    5.18   */
    5.19   
    5.20 -int __init ppro_with_ram_bug(void)
    5.21 +int __devinit ppro_with_ram_bug(void)
    5.22  {
    5.23  	/* Uses data from early_cpu_detect now */
    5.24  	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
    5.25 @@ -58,7 +58,7 @@ int __init ppro_with_ram_bug(void)
    5.26   * P4 Xeon errata 037 workaround.
    5.27   * Hardware prefetcher may cause stale data to be loaded into the cache.
    5.28   */
    5.29 -static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
    5.30 +static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
    5.31  {
    5.32  	unsigned long lo, hi;
    5.33  
    5.34 @@ -77,25 +77,22 @@ static void __init Intel_errata_workarou
    5.35  /*
    5.36   * find out the number of processor cores on the die
    5.37   */
    5.38 -static int __init num_cpu_cores(struct cpuinfo_x86 *c)
    5.39 +static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
    5.40  {
    5.41 -	unsigned int eax;
    5.42 +	unsigned int eax, ebx, ecx, edx;
    5.43  
    5.44  	if (c->cpuid_level < 4)
    5.45  		return 1;
    5.46  
    5.47 -	__asm__("cpuid"
    5.48 -		: "=a" (eax)
    5.49 -		: "0" (4), "c" (0)
    5.50 -		: "bx", "dx");
    5.51 -
    5.52 +	/* Intel has a non-standard dependency on %ecx for this CPUID level. */
    5.53 +	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
    5.54  	if (eax & 0x1f)
    5.55  		return ((eax >> 26) + 1);
    5.56  	else
    5.57  		return 1;
    5.58  }
    5.59  
    5.60 -static void __init init_intel(struct cpuinfo_x86 *c)
    5.61 +static void __devinit init_intel(struct cpuinfo_x86 *c)
    5.62  {
    5.63  	unsigned int l2 = 0;
    5.64  	char *p = NULL;
    5.65 @@ -157,7 +154,7 @@ static void __init init_intel(struct cpu
    5.66  	if ( p )
    5.67  		strcpy(c->x86_model_id, p);
    5.68  	
    5.69 -	c->x86_num_cores = num_cpu_cores(c);
    5.70 +	c->x86_max_cores = num_cpu_cores(c);
    5.71  
    5.72  	detect_ht(c);
    5.73  
    5.74 @@ -182,10 +179,13 @@ static void __init init_intel(struct cpu
    5.75  	}
    5.76  #endif
    5.77  
    5.78 -	if (c->x86 == 15) 
    5.79 +	if (c->x86 == 15)
    5.80  		set_bit(X86_FEATURE_P4, c->x86_capability);
    5.81  	if (c->x86 == 6) 
    5.82  		set_bit(X86_FEATURE_P3, c->x86_capability);
    5.83 +	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
    5.84 +		(c->x86 == 0x6 && c->x86_model >= 0x0e))
    5.85 +		set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
    5.86  
    5.87  	start_vmx();
    5.88  }
    5.89 @@ -203,7 +203,7 @@ static unsigned int intel_size_cache(str
    5.90  	return size;
    5.91  }
    5.92  
    5.93 -static struct cpu_dev intel_cpu_dev __initdata = {
    5.94 +static struct cpu_dev intel_cpu_dev __devinitdata = {
    5.95  	.c_vendor	= "Intel",
    5.96  	.c_ident 	= { "GenuineIntel" },
    5.97  	.c_models = {
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/x86/cpu/mcheck/k7.c	Tue Feb 14 16:23:43 2006 +0100
     6.3 @@ -0,0 +1,95 @@
     6.4 +/*
     6.5 + * Athlon/Hammer specific Machine Check Exception Reporting
     6.6 + * (C) Copyright 2002 Dave Jones <davej@codemonkey.org.uk>
     6.7 + */
     6.8 +
     6.9 +#include <xen/init.h>
    6.10 +#include <xen/types.h>
    6.11 +#include <xen/kernel.h>
    6.12 +#include <xen/config.h>
    6.13 +#include <xen/smp.h>
    6.14 +
    6.15 +#include <asm/processor.h> 
    6.16 +#include <asm/system.h>
    6.17 +#include <asm/msr.h>
    6.18 +
    6.19 +#include "mce.h"
    6.20 +
    6.21 +/* Machine Check Handler For AMD Athlon/Duron */
    6.22 +static fastcall void k7_machine_check(struct cpu_user_regs * regs, long error_code)
    6.23 +{
    6.24 +	int recover=1;
    6.25 +	u32 alow, ahigh, high, low;
    6.26 +	u32 mcgstl, mcgsth;
    6.27 +	int i;
    6.28 +
    6.29 +	rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
    6.30 +	if (mcgstl & (1<<0))	/* Recoverable ? */
    6.31 +		recover=0;
    6.32 +
    6.33 +	printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
    6.34 +		smp_processor_id(), mcgsth, mcgstl);
    6.35 +
    6.36 +	for (i=1; i<nr_mce_banks; i++) {
    6.37 +		rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
    6.38 +		if (high&(1<<31)) {
    6.39 +			if (high & (1<<29))
    6.40 +				recover |= 1;
    6.41 +			if (high & (1<<25))
    6.42 +				recover |= 2;
    6.43 +			printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
    6.44 +			high &= ~(1<<31);
    6.45 +			if (high & (1<<27)) {
    6.46 +				rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
    6.47 +				printk ("[%08x%08x]", ahigh, alow);
    6.48 +			}
    6.49 +			if (high & (1<<26)) {
    6.50 +				rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
    6.51 +				printk (" at %08x%08x", ahigh, alow);
    6.52 +			}
    6.53 +			printk ("\n");
    6.54 +			/* Clear it */
    6.55 +			wrmsr (MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
    6.56 +			/* Serialize */
    6.57 +			wmb();
    6.58 +			add_taint(TAINT_MACHINE_CHECK);
    6.59 +		}
    6.60 +	}
    6.61 +
    6.62 +	if (recover&2)
    6.63 +		panic ("CPU context corrupt");
    6.64 +	if (recover&1)
    6.65 +		panic ("Unable to continue");
    6.66 +	printk (KERN_EMERG "Attempting to continue.\n");
    6.67 +	mcgstl &= ~(1<<2);
    6.68 +	wrmsr (MSR_IA32_MCG_STATUS,mcgstl, mcgsth);
    6.69 +}
    6.70 +
    6.71 +
    6.72 +/* AMD K7 machine check is Intel like */
    6.73 +void amd_mcheck_init(struct cpuinfo_x86 *c)
    6.74 +{
    6.75 +	u32 l, h;
    6.76 +	int i;
    6.77 +
    6.78 +	machine_check_vector = k7_machine_check;
    6.79 +	wmb();
    6.80 +
    6.81 +	printk (KERN_INFO "Intel machine check architecture supported.\n");
    6.82 +	rdmsr (MSR_IA32_MCG_CAP, l, h);
    6.83 +	if (l & (1<<8))	/* Control register present ? */
    6.84 +		wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
    6.85 +	nr_mce_banks = l & 0xff;
    6.86 +
    6.87 +	/* Clear status for MC index 0 separately, we don't touch CTL,
    6.88 +	 * as some Athlons cause spurious MCEs when its enabled. */
    6.89 +	wrmsr (MSR_IA32_MC0_STATUS, 0x0, 0x0);
    6.90 +	for (i=1; i<nr_mce_banks; i++) {
    6.91 +		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
    6.92 +		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
    6.93 +	}
    6.94 +
    6.95 +	set_in_cr4 (X86_CR4_MCE);
    6.96 +	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
    6.97 +		smp_processor_id());
    6.98 +}
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/x86/cpu/mcheck/mce.c	Tue Feb 14 16:23:43 2006 +0100
     7.3 @@ -0,0 +1,73 @@
     7.4 +/*
     7.5 + * mce.c - x86 Machine Check Exception Reporting
     7.6 + * (c) 2002 Alan Cox <alan@redhat.com>, Dave Jones <davej@codemonkey.org.uk>
     7.7 + */
     7.8 +
     7.9 +#include <xen/init.h>
    7.10 +#include <xen/types.h>
    7.11 +#include <xen/kernel.h>
    7.12 +#include <xen/config.h>
    7.13 +#include <xen/smp.h>
    7.14 +
    7.15 +#include <asm/processor.h> 
    7.16 +#include <asm/system.h>
    7.17 +
    7.18 +#include "mce.h"
    7.19 +
    7.20 +int mce_disabled = 0;
    7.21 +int nr_mce_banks;
    7.22 +
    7.23 +/* Handle unconfigured int18 (should never happen) */
    7.24 +static fastcall void unexpected_machine_check(struct cpu_user_regs * regs, long error_code)
    7.25 +{	
    7.26 +	printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id());
    7.27 +}
    7.28 +
    7.29 +/* Call the installed machine check handler for this CPU setup. */
    7.30 +void fastcall (*machine_check_vector)(struct cpu_user_regs *, long error_code) = unexpected_machine_check;
    7.31 +
    7.32 +/* This has to be run for each processor */
    7.33 +void mcheck_init(struct cpuinfo_x86 *c)
    7.34 +{
    7.35 +	if (mce_disabled==1)
    7.36 +		return;
    7.37 +
    7.38 +	switch (c->x86_vendor) {
    7.39 +		case X86_VENDOR_AMD:
    7.40 +			if (c->x86==6 || c->x86==15)
    7.41 +				amd_mcheck_init(c);
    7.42 +			break;
    7.43 +
    7.44 +		case X86_VENDOR_INTEL:
    7.45 +			if (c->x86==5)
    7.46 +				intel_p5_mcheck_init(c);
    7.47 +			if (c->x86==6)
    7.48 +				intel_p6_mcheck_init(c);
    7.49 +			if (c->x86==15)
    7.50 +				intel_p4_mcheck_init(c);
    7.51 +			break;
    7.52 +
    7.53 +		case X86_VENDOR_CENTAUR:
    7.54 +			if (c->x86==5)
    7.55 +				winchip_mcheck_init(c);
    7.56 +			break;
    7.57 +
    7.58 +		default:
    7.59 +			break;
    7.60 +	}
    7.61 +}
    7.62 +
    7.63 +static int __init mcheck_disable(char *str)
    7.64 +{
    7.65 +	mce_disabled = 1;
    7.66 +	return 0;
    7.67 +}
    7.68 +
    7.69 +static int __init mcheck_enable(char *str)
    7.70 +{
    7.71 +	mce_disabled = -1;
    7.72 +	return 0;
    7.73 +}
    7.74 +
    7.75 +__setup("nomce", mcheck_disable);
    7.76 +__setup("mce", mcheck_enable);
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/arch/x86/cpu/mcheck/mce.h	Tue Feb 14 16:23:43 2006 +0100
     8.3 @@ -0,0 +1,14 @@
     8.4 +#include <xen/init.h>
     8.5 +
     8.6 +void amd_mcheck_init(struct cpuinfo_x86 *c);
     8.7 +void intel_p4_mcheck_init(struct cpuinfo_x86 *c);
     8.8 +void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
     8.9 +void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
    8.10 +void winchip_mcheck_init(struct cpuinfo_x86 *c);
    8.11 +
    8.12 +/* Call the installed machine check handler for this CPU setup. */
    8.13 +extern fastcall void (*machine_check_vector)(struct cpu_user_regs *, long error_code);
    8.14 +
    8.15 +extern int mce_disabled __initdata;
    8.16 +extern int nr_mce_banks;
    8.17 +
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xen/arch/x86/cpu/mcheck/non-fatal.c	Tue Feb 14 16:23:43 2006 +0100
     9.3 @@ -0,0 +1,86 @@
     9.4 +/*
     9.5 + * Non Fatal Machine Check Exception Reporting
     9.6 + *
     9.7 + * (C) Copyright 2002 Dave Jones. <davej@codemonkey.org.uk>
     9.8 + *
     9.9 + * This file contains routines to check for non-fatal MCEs every 15s
    9.10 + *
    9.11 + */
    9.12 +
    9.13 +#include <xen/config.h>
    9.14 +#include <xen/init.h>
    9.15 +#include <xen/types.h>
    9.16 +#include <xen/kernel.h>
    9.17 +#include <xen/smp.h>
    9.18 +#include <xen/timer.h>
    9.19 +#include <xen/errno.h>
    9.20 +#include <asm/processor.h> 
    9.21 +#include <asm/system.h>
    9.22 +#include <asm/msr.h>
    9.23 +
    9.24 +#include "mce.h"
    9.25 +
    9.26 +static int firstbank;
    9.27 +static struct timer mce_timer;
    9.28 +
    9.29 +#define MCE_PERIOD MILLISECS(15000)
    9.30 +
    9.31 +static void mce_checkregs (void *info)
    9.32 +{
    9.33 +	u32 low, high;
    9.34 +	int i;
    9.35 +
    9.36 +	for (i=firstbank; i<nr_mce_banks; i++) {
    9.37 +		rdmsr (MSR_IA32_MC0_STATUS+i*4, low, high);
    9.38 +
    9.39 +		if (high & (1<<31)) {
    9.40 +			printk(KERN_INFO "MCE: The hardware reports a non "
    9.41 +				"fatal, correctable incident occurred on "
    9.42 +				"CPU %d.\n",
    9.43 +				smp_processor_id());
    9.44 +			printk (KERN_INFO "Bank %d: %08x%08x\n", i, high, low);
    9.45 +
    9.46 +			/* Scrub the error so we don't pick it up in MCE_RATE seconds time. */
    9.47 +			wrmsr (MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
    9.48 +
    9.49 +			/* Serialize */
    9.50 +			wmb();
    9.51 +			add_taint(TAINT_MACHINE_CHECK);
    9.52 +		}
    9.53 +	}
    9.54 +}
    9.55 +
    9.56 +static void mce_work_fn(void *data)
    9.57 +{ 
    9.58 +	on_each_cpu(mce_checkregs, NULL, 1, 1);
    9.59 +	set_timer(&mce_timer, NOW() + MCE_PERIOD);
    9.60 +}
    9.61 +
    9.62 +static int __init init_nonfatal_mce_checker(void)
    9.63 +{
    9.64 +	struct cpuinfo_x86 *c = &boot_cpu_data;
    9.65 +
    9.66 +	/* Check for MCE support */
    9.67 +	if (!cpu_has(c, X86_FEATURE_MCE))
    9.68 +		return -ENODEV;
    9.69 +
    9.70 +	/* Check for PPro style MCA */
    9.71 +	if (!cpu_has(c, X86_FEATURE_MCA))
    9.72 +		return -ENODEV;
    9.73 +
    9.74 +	/* Some Athlons misbehave when we frob bank 0 */
    9.75 +	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
    9.76 +		boot_cpu_data.x86 == 6)
    9.77 +			firstbank = 1;
    9.78 +	else
    9.79 +			firstbank = 0;
    9.80 +
    9.81 +	/*
    9.82 +	 * Check for non-fatal errors every MCE_RATE s
    9.83 +	 */
    9.84 +	init_timer(&mce_timer, mce_work_fn, NULL, 0);
    9.85 +	set_timer(&mce_timer, NOW() + MCE_PERIOD);
    9.86 +	printk(KERN_INFO "Machine check exception polling timer started.\n");
    9.87 +	return 0;
    9.88 +}
    9.89 +__initcall(init_nonfatal_mce_checker);
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/arch/x86/cpu/mcheck/p4.c	Tue Feb 14 16:23:43 2006 +0100
    10.3 @@ -0,0 +1,270 @@
    10.4 +/*
    10.5 + * P4 specific Machine Check Exception Reporting
    10.6 + */
    10.7 +
    10.8 +#include <xen/init.h>
    10.9 +#include <xen/types.h>
   10.10 +#include <xen/kernel.h>
   10.11 +#include <xen/config.h>
   10.12 +#include <xen/smp.h>
   10.13 +#include <xen/irq.h>
   10.14 +#include <xen/time.h>
   10.15 +#include <asm/processor.h> 
   10.16 +#include <asm/system.h>
   10.17 +#include <asm/msr.h>
   10.18 +#include <asm/apic.h>
   10.19 +
   10.20 +#include "mce.h"
   10.21 +
   10.22 +/* as supported by the P4/Xeon family */
   10.23 +struct intel_mce_extended_msrs {
   10.24 +	u32 eax;
   10.25 +	u32 ebx;
   10.26 +	u32 ecx;
   10.27 +	u32 edx;
   10.28 +	u32 esi;
   10.29 +	u32 edi;
   10.30 +	u32 ebp;
   10.31 +	u32 esp;
   10.32 +	u32 eflags;
   10.33 +	u32 eip;
   10.34 +	/* u32 *reserved[]; */
   10.35 +};
   10.36 +
   10.37 +static int mce_num_extended_msrs = 0;
   10.38 +
   10.39 +
   10.40 +#ifdef CONFIG_X86_MCE_P4THERMAL
   10.41 +static void unexpected_thermal_interrupt(struct cpu_user_regs *regs)
   10.42 +{	
   10.43 +	printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
   10.44 +			smp_processor_id());
   10.45 +	add_taint(TAINT_MACHINE_CHECK);
   10.46 +}
   10.47 +
   10.48 +/* P4/Xeon Thermal transition interrupt handler */
   10.49 +static void intel_thermal_interrupt(struct cpu_user_regs *regs)
   10.50 +{
   10.51 +	u32 l, h;
   10.52 +	unsigned int cpu = smp_processor_id();
   10.53 +	static s_time_t next[NR_CPUS];
   10.54 +
   10.55 +	ack_APIC_irq();
   10.56 +
   10.57 +	if (NOW() > next[cpu])
   10.58 +		return;
   10.59 +
   10.60 +	next[cpu] = NOW() + MILLISECS(5000);
   10.61 +	rdmsr(MSR_IA32_THERM_STATUS, l, h);
   10.62 +	if (l & 0x1) {
   10.63 +		printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu);
   10.64 +		printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n",
   10.65 +				cpu);
   10.66 +		add_taint(TAINT_MACHINE_CHECK);
   10.67 +	} else {
   10.68 +		printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
   10.69 +	}
   10.70 +}
   10.71 +
   10.72 +/* Thermal interrupt handler for this CPU setup */
   10.73 +static void (*vendor_thermal_interrupt)(struct cpu_user_regs *regs) = unexpected_thermal_interrupt;
   10.74 +
   10.75 +fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs)
   10.76 +{
   10.77 +	irq_enter(smp_processor_id());
   10.78 +	vendor_thermal_interrupt(regs);
   10.79 +	irq_exit(smp_processor_id());
   10.80 +}
   10.81 +
   10.82 +/* P4/Xeon Thermal regulation detect and init */
   10.83 +static void intel_init_thermal(struct cpuinfo_x86 *c)
   10.84 +{
   10.85 +	u32 l, h;
   10.86 +	unsigned int cpu = smp_processor_id();
   10.87 +
   10.88 +	/* Thermal monitoring */
   10.89 +	if (!cpu_has(c, X86_FEATURE_ACPI))
   10.90 +		return;	/* -ENODEV */
   10.91 +
   10.92 +	/* Clock modulation */
   10.93 +	if (!cpu_has(c, X86_FEATURE_ACC))
   10.94 +		return;	/* -ENODEV */
   10.95 +
   10.96 +	/* first check if its enabled already, in which case there might
   10.97 +	 * be some SMM goo which handles it, so we can't even put a handler
   10.98 +	 * since it might be delivered via SMI already -zwanem.
   10.99 +	 */
  10.100 +	rdmsr (MSR_IA32_MISC_ENABLE, l, h);
  10.101 +	h = apic_read(APIC_LVTTHMR);
  10.102 +	if ((l & (1<<3)) && (h & APIC_DM_SMI)) {
  10.103 +		printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
  10.104 +				cpu);
  10.105 +		return; /* -EBUSY */
  10.106 +	}
  10.107 +
  10.108 +	/* check whether a vector already exists, temporarily masked? */	
  10.109 +	if (h & APIC_VECTOR_MASK) {
  10.110 +		printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already "
  10.111 +				"installed\n",
  10.112 +			cpu, (h & APIC_VECTOR_MASK));
  10.113 +		return; /* -EBUSY */
  10.114 +	}
  10.115 +
  10.116 +	/* The temperature transition interrupt handler setup */
  10.117 +	h = THERMAL_APIC_VECTOR;		/* our delivery vector */
  10.118 +	h |= (APIC_DM_FIXED | APIC_LVT_MASKED);	/* we'll mask till we're ready */
  10.119 +	apic_write_around(APIC_LVTTHMR, h);
  10.120 +
  10.121 +	rdmsr (MSR_IA32_THERM_INTERRUPT, l, h);
  10.122 +	wrmsr (MSR_IA32_THERM_INTERRUPT, l | 0x03 , h);
  10.123 +
  10.124 +	/* ok we're good to go... */
  10.125 +	vendor_thermal_interrupt = intel_thermal_interrupt;
  10.126 +	
  10.127 +	rdmsr (MSR_IA32_MISC_ENABLE, l, h);
  10.128 +	wrmsr (MSR_IA32_MISC_ENABLE, l | (1<<3), h);
  10.129 +	
  10.130 +	l = apic_read (APIC_LVTTHMR);
  10.131 +	apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
  10.132 +	printk (KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu);
  10.133 +	return;
  10.134 +}
  10.135 +#endif /* CONFIG_X86_MCE_P4THERMAL */
  10.136 +
  10.137 +
  10.138 +/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */
  10.139 +static inline int intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
  10.140 +{
  10.141 +	u32 h;
  10.142 +
  10.143 +	if (mce_num_extended_msrs == 0)
  10.144 +		goto done;
  10.145 +
  10.146 +	rdmsr (MSR_IA32_MCG_EAX, r->eax, h);
  10.147 +	rdmsr (MSR_IA32_MCG_EBX, r->ebx, h);
  10.148 +	rdmsr (MSR_IA32_MCG_ECX, r->ecx, h);
  10.149 +	rdmsr (MSR_IA32_MCG_EDX, r->edx, h);
  10.150 +	rdmsr (MSR_IA32_MCG_ESI, r->esi, h);
  10.151 +	rdmsr (MSR_IA32_MCG_EDI, r->edi, h);
  10.152 +	rdmsr (MSR_IA32_MCG_EBP, r->ebp, h);
  10.153 +	rdmsr (MSR_IA32_MCG_ESP, r->esp, h);
  10.154 +	rdmsr (MSR_IA32_MCG_EFLAGS, r->eflags, h);
  10.155 +	rdmsr (MSR_IA32_MCG_EIP, r->eip, h);
  10.156 +
  10.157 +	/* can we rely on kmalloc to do a dynamic
  10.158 +	 * allocation for the reserved registers?
  10.159 +	 */
  10.160 +done:
  10.161 +	return mce_num_extended_msrs;
  10.162 +}
  10.163 +
  10.164 +static fastcall void intel_machine_check(struct cpu_user_regs * regs, long error_code)
  10.165 +{
  10.166 +	int recover=1;
  10.167 +	u32 alow, ahigh, high, low;
  10.168 +	u32 mcgstl, mcgsth;
  10.169 +	int i;
  10.170 +	struct intel_mce_extended_msrs dbg;
  10.171 +
  10.172 +	rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
  10.173 +	if (mcgstl & (1<<0))	/* Recoverable ? */
  10.174 +		recover=0;
  10.175 +
  10.176 +	printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
  10.177 +		smp_processor_id(), mcgsth, mcgstl);
  10.178 +
  10.179 +	if (intel_get_extended_msrs(&dbg)) {
  10.180 +		printk (KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n",
  10.181 +			smp_processor_id(), dbg.eip, dbg.eflags);
  10.182 +		printk (KERN_DEBUG "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n",
  10.183 +			dbg.eax, dbg.ebx, dbg.ecx, dbg.edx);
  10.184 +		printk (KERN_DEBUG "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
  10.185 +			dbg.esi, dbg.edi, dbg.ebp, dbg.esp);
  10.186 +	}
  10.187 +
  10.188 +	for (i=0; i<nr_mce_banks; i++) {
  10.189 +		rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
  10.190 +		if (high & (1<<31)) {
  10.191 +			if (high & (1<<29))
  10.192 +				recover |= 1;
  10.193 +			if (high & (1<<25))
  10.194 +				recover |= 2;
  10.195 +			printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
  10.196 +			high &= ~(1<<31);
  10.197 +			if (high & (1<<27)) {
  10.198 +				rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
  10.199 +				printk ("[%08x%08x]", ahigh, alow);
  10.200 +			}
  10.201 +			if (high & (1<<26)) {
  10.202 +				rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
  10.203 +				printk (" at %08x%08x", ahigh, alow);
  10.204 +			}
  10.205 +			printk ("\n");
  10.206 +		}
  10.207 +	}
  10.208 +
  10.209 +	if (recover & 2)
  10.210 +		panic ("CPU context corrupt");
  10.211 +	if (recover & 1)
  10.212 +		panic ("Unable to continue");
  10.213 +
  10.214 +	printk(KERN_EMERG "Attempting to continue.\n");
  10.215 +	/* 
  10.216 +	 * Do not clear the MSR_IA32_MCi_STATUS if the error is not 
  10.217 +	 * recoverable/continuable.This will allow BIOS to look at the MSRs
  10.218 +	 * for errors if the OS could not log the error.
  10.219 +	 */
  10.220 +	for (i=0; i<nr_mce_banks; i++) {
  10.221 +		u32 msr;
  10.222 +		msr = MSR_IA32_MC0_STATUS+i*4;
  10.223 +		rdmsr (msr, low, high);
  10.224 +		if (high&(1<<31)) {
  10.225 +			/* Clear it */
  10.226 +			wrmsr(msr, 0UL, 0UL);
  10.227 +			/* Serialize */
  10.228 +			wmb();
  10.229 +			add_taint(TAINT_MACHINE_CHECK);
  10.230 +		}
  10.231 +	}
  10.232 +	mcgstl &= ~(1<<2);
  10.233 +	wrmsr (MSR_IA32_MCG_STATUS,mcgstl, mcgsth);
  10.234 +}
  10.235 +
  10.236 +
  10.237 +void intel_p4_mcheck_init(struct cpuinfo_x86 *c)
  10.238 +{
  10.239 +	u32 l, h;
  10.240 +	int i;
  10.241 +	
  10.242 +	machine_check_vector = intel_machine_check;
  10.243 +	wmb();
  10.244 +
  10.245 +	printk (KERN_INFO "Intel machine check architecture supported.\n");
  10.246 +	rdmsr (MSR_IA32_MCG_CAP, l, h);
  10.247 +	if (l & (1<<8))	/* Control register present ? */
  10.248 +		wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
  10.249 +	nr_mce_banks = l & 0xff;
  10.250 +
  10.251 +	for (i=0; i<nr_mce_banks; i++) {
  10.252 +		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
  10.253 +		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
  10.254 +	}
  10.255 +
  10.256 +	set_in_cr4 (X86_CR4_MCE);
  10.257 +	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
  10.258 +		smp_processor_id());
  10.259 +
  10.260 +	/* Check for P4/Xeon extended MCE MSRs */
  10.261 +	rdmsr (MSR_IA32_MCG_CAP, l, h);
  10.262 +	if (l & (1<<9))	{/* MCG_EXT_P */
  10.263 +		mce_num_extended_msrs = (l >> 16) & 0xff;
  10.264 +		printk (KERN_INFO "CPU%d: Intel P4/Xeon Extended MCE MSRs (%d)"
  10.265 +				" available\n",
  10.266 +			smp_processor_id(), mce_num_extended_msrs);
  10.267 +
  10.268 +#ifdef CONFIG_X86_MCE_P4THERMAL
  10.269 +		/* Check for P4/Xeon Thermal monitor */
  10.270 +		intel_init_thermal(c);
  10.271 +#endif
  10.272 +	}
  10.273 +}
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen/arch/x86/cpu/mcheck/p5.c	Tue Feb 14 16:23:43 2006 +0100
    11.3 @@ -0,0 +1,52 @@
    11.4 +/*
    11.5 + * P5 specific Machine Check Exception Reporting
    11.6 + * (C) Copyright 2002 Alan Cox <alan@redhat.com>
    11.7 + */
    11.8 +
    11.9 +#include <xen/init.h>
   11.10 +#include <xen/types.h>
   11.11 +#include <xen/kernel.h>
   11.12 +#include <xen/smp.h>
   11.13 +
   11.14 +#include <asm/processor.h> 
   11.15 +#include <asm/system.h>
   11.16 +#include <asm/msr.h>
   11.17 +
   11.18 +#include "mce.h"
   11.19 +
   11.20 +/* Machine check handler for Pentium class Intel */
   11.21 +static fastcall void pentium_machine_check(struct cpu_user_regs * regs, long error_code)
   11.22 +{
   11.23 +	u32 loaddr, hi, lotype;
   11.24 +	rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
   11.25 +	rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
   11.26 +	printk(KERN_EMERG "CPU#%d: Machine Check Exception:  0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype);
   11.27 +	if(lotype&(1<<5))
   11.28 +		printk(KERN_EMERG "CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id());
   11.29 +	add_taint(TAINT_MACHINE_CHECK);
   11.30 +}
   11.31 +
   11.32 +/* Set up machine check reporting for processors with Intel style MCE */
   11.33 +void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
   11.34 +{
   11.35 +	u32 l, h;
   11.36 +	
   11.37 +	/*Check for MCE support */
   11.38 +	if( !cpu_has(c, X86_FEATURE_MCE) )
   11.39 +		return;	
   11.40 +
   11.41 +	/* Default P5 to off as its often misconnected */
   11.42 +	if(mce_disabled != -1)
   11.43 +		return;
   11.44 +	machine_check_vector = pentium_machine_check;
   11.45 +	wmb();
   11.46 +
   11.47 +	/* Read registers before enabling */
   11.48 +	rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
   11.49 +	rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
   11.50 +	printk(KERN_INFO "Intel old style machine check architecture supported.\n");
   11.51 +
   11.52 + 	/* Enable MCE */
   11.53 +	set_in_cr4(X86_CR4_MCE);
   11.54 +	printk(KERN_INFO "Intel old style machine check reporting enabled on CPU#%d.\n", smp_processor_id());
   11.55 +}
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xen/arch/x86/cpu/mcheck/p6.c	Tue Feb 14 16:23:43 2006 +0100
    12.3 @@ -0,0 +1,118 @@
    12.4 +/*
    12.5 + * P6 specific Machine Check Exception Reporting
    12.6 + * (C) Copyright 2002 Alan Cox <alan@redhat.com>
    12.7 + */
    12.8 +
    12.9 +#include <xen/init.h>
   12.10 +#include <xen/types.h>
   12.11 +#include <xen/kernel.h>
   12.12 +#include <xen/smp.h>
   12.13 +
   12.14 +#include <asm/processor.h> 
   12.15 +#include <asm/system.h>
   12.16 +#include <asm/msr.h>
   12.17 +
   12.18 +#include "mce.h"
   12.19 +
   12.20 +/* Machine Check Handler For PII/PIII */
   12.21 +static fastcall void intel_machine_check(struct cpu_user_regs * regs, long error_code)
   12.22 +{
   12.23 +	int recover=1;
   12.24 +	u32 alow, ahigh, high, low;
   12.25 +	u32 mcgstl, mcgsth;
   12.26 +	int i;
   12.27 +
   12.28 +	rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
   12.29 +	if (mcgstl & (1<<0))	/* Recoverable ? */
   12.30 +		recover=0;
   12.31 +
   12.32 +	printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
   12.33 +		smp_processor_id(), mcgsth, mcgstl);
   12.34 +
   12.35 +	for (i=0; i<nr_mce_banks; i++) {
   12.36 +		rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
   12.37 +		if (high & (1<<31)) {
   12.38 +			if (high & (1<<29))
   12.39 +				recover |= 1;
   12.40 +			if (high & (1<<25))
   12.41 +				recover |= 2;
   12.42 +			printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
   12.43 +			high &= ~(1<<31);
   12.44 +			if (high & (1<<27)) {
   12.45 +				rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
   12.46 +				printk ("[%08x%08x]", ahigh, alow);
   12.47 +			}
   12.48 +			if (high & (1<<26)) {
   12.49 +				rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
   12.50 +				printk (" at %08x%08x", ahigh, alow);
   12.51 +			}
   12.52 +			printk ("\n");
   12.53 +		}
   12.54 +	}
   12.55 +
   12.56 +	if (recover & 2)
   12.57 +		panic ("CPU context corrupt");
   12.58 +	if (recover & 1)
   12.59 +		panic ("Unable to continue");
   12.60 +
   12.61 +	printk (KERN_EMERG "Attempting to continue.\n");
   12.62 +	/* 
   12.63 +	 * Do not clear the MSR_IA32_MCi_STATUS if the error is not 
   12.64 +	 * recoverable/continuable.This will allow BIOS to look at the MSRs
   12.65 +	 * for errors if the OS could not log the error.
   12.66 +	 */
   12.67 +	for (i=0; i<nr_mce_banks; i++) {
   12.68 +		unsigned int msr;
   12.69 +		msr = MSR_IA32_MC0_STATUS+i*4;
   12.70 +		rdmsr (msr,low, high);
   12.71 +		if (high & (1<<31)) {
   12.72 +			/* Clear it */
   12.73 +			wrmsr (msr, 0UL, 0UL);
   12.74 +			/* Serialize */
   12.75 +			wmb();
   12.76 +			add_taint(TAINT_MACHINE_CHECK);
   12.77 +		}
   12.78 +	}
   12.79 +	mcgstl &= ~(1<<2);
   12.80 +	wrmsr (MSR_IA32_MCG_STATUS,mcgstl, mcgsth);
   12.81 +}
   12.82 +
   12.83 +/* Set up machine check reporting for processors with Intel style MCE */
   12.84 +void intel_p6_mcheck_init(struct cpuinfo_x86 *c)
   12.85 +{
   12.86 +	u32 l, h;
   12.87 +	int i;
   12.88 +	
   12.89 +	/* Check for MCE support */
   12.90 +	if (!cpu_has(c, X86_FEATURE_MCE))
   12.91 +		return;
   12.92 +
   12.93 +	/* Check for PPro style MCA */
   12.94 + 	if (!cpu_has(c, X86_FEATURE_MCA))
   12.95 +		return;
   12.96 +
   12.97 +	/* Ok machine check is available */
   12.98 +	machine_check_vector = intel_machine_check;
   12.99 +	wmb();
  12.100 +
  12.101 +	printk (KERN_INFO "Intel machine check architecture supported.\n");
  12.102 +	rdmsr (MSR_IA32_MCG_CAP, l, h);
  12.103 +	if (l & (1<<8))	/* Control register present ? */
  12.104 +		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
  12.105 +	nr_mce_banks = l & 0xff;
  12.106 +
  12.107 +	/*
  12.108 +	 * Following the example in IA-32 SDM Vol 3:
  12.109 +	 * - MC0_CTL should not be written
  12.110 +	 * - Status registers on all banks should be cleared on reset
  12.111 +	 */
  12.112 +	for (i=1; i<nr_mce_banks; i++)
  12.113 +		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
  12.114 +
  12.115 +	for (i=0; i<nr_mce_banks; i++)
  12.116 +		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
  12.117 +
  12.118 +	set_in_cr4 (X86_CR4_MCE);
  12.119 +	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
  12.120 +		smp_processor_id());
  12.121 +}
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/xen/arch/x86/cpu/mcheck/winchip.c	Tue Feb 14 16:23:43 2006 +0100
    13.3 @@ -0,0 +1,37 @@
    13.4 +/*
    13.5 + * IDT Winchip specific Machine Check Exception Reporting
    13.6 + * (C) Copyright 2002 Alan Cox <alan@redhat.com>
    13.7 + */
    13.8 +
    13.9 +#include <xen/config.h>
   13.10 +#include <xen/init.h>
   13.11 +#include <xen/lib.h>
   13.12 +#include <xen/types.h>
   13.13 +#include <xen/kernel.h>
   13.14 +
   13.15 +#include <asm/processor.h> 
   13.16 +#include <asm/system.h>
   13.17 +#include <asm/msr.h>
   13.18 +
   13.19 +#include "mce.h"
   13.20 +
   13.21 +/* Machine check handler for WinChip C6 */
   13.22 +static fastcall void winchip_machine_check(struct cpu_user_regs * regs, long error_code)
   13.23 +{
   13.24 +	printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
   13.25 +	add_taint(TAINT_MACHINE_CHECK);
   13.26 +}
   13.27 +
   13.28 +/* Set up machine check reporting on the Winchip C6 series */
   13.29 +void winchip_mcheck_init(struct cpuinfo_x86 *c)
   13.30 +{
   13.31 +	u32 lo, hi;
   13.32 +	machine_check_vector = winchip_machine_check;
   13.33 +	wmb();
   13.34 +	rdmsr(MSR_IDT_FCR1, lo, hi);
   13.35 +	lo|= (1<<2);	/* Enable EIERRINT (int 18 MCE) */
   13.36 +	lo&= ~(1<<4);	/* Enable MCE */
   13.37 +	wrmsr(MSR_IDT_FCR1, lo, hi);
   13.38 +	set_in_cr4(X86_CR4_MCE);
   13.39 +	printk(KERN_INFO "Winchip machine check reporting enabled on CPU#0.\n");
   13.40 +}
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/xen/arch/x86/cpu/mtrr/amd.c	Tue Feb 14 16:23:43 2006 +0100
    14.3 @@ -0,0 +1,121 @@
    14.4 +#include <xen/init.h>
    14.5 +#include <xen/mm.h>
    14.6 +#include <asm/mtrr.h>
    14.7 +#include <asm/msr.h>
    14.8 +
    14.9 +#include "mtrr.h"
   14.10 +
   14.11 +static void
   14.12 +amd_get_mtrr(unsigned int reg, unsigned long *base,
   14.13 +	     unsigned int *size, mtrr_type * type)
   14.14 +{
   14.15 +	unsigned long low, high;
   14.16 +
   14.17 +	rdmsr(MSR_K6_UWCCR, low, high);
   14.18 +	/*  Upper dword is region 1, lower is region 0  */
   14.19 +	if (reg == 1)
   14.20 +		low = high;
   14.21 +	/*  The base masks off on the right alignment  */
   14.22 +	*base = (low & 0xFFFE0000) >> PAGE_SHIFT;
   14.23 +	*type = 0;
   14.24 +	if (low & 1)
   14.25 +		*type = MTRR_TYPE_UNCACHABLE;
   14.26 +	if (low & 2)
   14.27 +		*type = MTRR_TYPE_WRCOMB;
   14.28 +	if (!(low & 3)) {
   14.29 +		*size = 0;
   14.30 +		return;
   14.31 +	}
   14.32 +	/*
   14.33 +	 *  This needs a little explaining. The size is stored as an
   14.34 +	 *  inverted mask of bits of 128K granularity 15 bits long offset
   14.35 +	 *  2 bits
   14.36 +	 *
   14.37 +	 *  So to get a size we do invert the mask and add 1 to the lowest
   14.38 +	 *  mask bit (4 as its 2 bits in). This gives us a size we then shift
   14.39 +	 *  to turn into 128K blocks
   14.40 +	 *
   14.41 +	 *  eg              111 1111 1111 1100      is 512K
   14.42 +	 *
   14.43 +	 *  invert          000 0000 0000 0011
   14.44 +	 *  +1              000 0000 0000 0100
   14.45 +	 *  *128K   ...
   14.46 +	 */
   14.47 +	low = (~low) & 0x1FFFC;
   14.48 +	*size = (low + 4) << (15 - PAGE_SHIFT);
   14.49 +	return;
   14.50 +}
   14.51 +
   14.52 +static void amd_set_mtrr(unsigned int reg, unsigned long base,
   14.53 +			 unsigned long size, mtrr_type type)
   14.54 +/*  [SUMMARY] Set variable MTRR register on the local CPU.
   14.55 +    <reg> The register to set.
   14.56 +    <base> The base address of the region.
   14.57 +    <size> The size of the region. If this is 0 the region is disabled.
   14.58 +    <type> The type of the region.
   14.59 +    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
   14.60 +    be done externally.
   14.61 +    [RETURNS] Nothing.
   14.62 +*/
   14.63 +{
   14.64 +	u32 regs[2];
   14.65 +
   14.66 +	/*
   14.67 +	 *  Low is MTRR0 , High MTRR 1
   14.68 +	 */
   14.69 +	rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
   14.70 +	/*
   14.71 +	 *  Blank to disable
   14.72 +	 */
   14.73 +	if (size == 0)
   14.74 +		regs[reg] = 0;
   14.75 +	else
   14.76 +		/* Set the register to the base, the type (off by one) and an
   14.77 +		   inverted bitmask of the size The size is the only odd
   14.78 +		   bit. We are fed say 512K We invert this and we get 111 1111
   14.79 +		   1111 1011 but if you subtract one and invert you get the   
   14.80 +		   desired 111 1111 1111 1100 mask
   14.81 +
   14.82 +		   But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!  */
   14.83 +		regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
   14.84 +		    | (base << PAGE_SHIFT) | (type + 1);
   14.85 +
   14.86 +	/*
   14.87 +	 *  The writeback rule is quite specific. See the manual. Its
   14.88 +	 *  disable local interrupts, write back the cache, set the mtrr
   14.89 +	 */
   14.90 +	wbinvd();
   14.91 +	wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
   14.92 +}
   14.93 +
   14.94 +static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
   14.95 +{
   14.96 +	/* Apply the K6 block alignment and size rules
   14.97 +	   In order
   14.98 +	   o Uncached or gathering only
   14.99 +	   o 128K or bigger block
  14.100 +	   o Power of 2 block
  14.101 +	   o base suitably aligned to the power
  14.102 +	*/
  14.103 +	if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
  14.104 +	    || (size & ~(size - 1)) - size || (base & (size - 1)))
  14.105 +		return -EINVAL;
  14.106 +	return 0;
  14.107 +}
  14.108 +
  14.109 +static struct mtrr_ops amd_mtrr_ops = {
  14.110 +	.vendor            = X86_VENDOR_AMD,
  14.111 +	.set               = amd_set_mtrr,
  14.112 +	.get               = amd_get_mtrr,
  14.113 +	.get_free_region   = generic_get_free_region,
  14.114 +	.validate_add_page = amd_validate_add_page,
  14.115 +	.have_wrcomb       = positive_have_wrcomb,
  14.116 +};
  14.117 +
  14.118 +int __init amd_init_mtrr(void)
  14.119 +{
  14.120 +	set_mtrr_ops(&amd_mtrr_ops);
  14.121 +	return 0;
  14.122 +}
  14.123 +
  14.124 +//arch_initcall(amd_mtrr_init);
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/xen/arch/x86/cpu/mtrr/centaur.c	Tue Feb 14 16:23:43 2006 +0100
    15.3 @@ -0,0 +1,223 @@
    15.4 +#include <xen/init.h>
    15.5 +#include <xen/mm.h>
    15.6 +#include <asm/mtrr.h>
    15.7 +#include <asm/msr.h>
    15.8 +#include "mtrr.h"
    15.9 +
   15.10 +static struct {
   15.11 +	unsigned long high;
   15.12 +	unsigned long low;
   15.13 +} centaur_mcr[8];
   15.14 +
   15.15 +static u8 centaur_mcr_reserved;
   15.16 +static u8 centaur_mcr_type;	/* 0 for winchip, 1 for winchip2 */
   15.17 +
   15.18 +/*
   15.19 + *	Report boot time MCR setups 
   15.20 + */
   15.21 +
   15.22 +static int
   15.23 +centaur_get_free_region(unsigned long base, unsigned long size)
   15.24 +/*  [SUMMARY] Get a free MTRR.
   15.25 +    <base> The starting (base) address of the region.
   15.26 +    <size> The size (in bytes) of the region.
   15.27 +    [RETURNS] The index of the region on success, else -1 on error.
   15.28 +*/
   15.29 +{
   15.30 +	int i, max;
   15.31 +	mtrr_type ltype;
   15.32 +	unsigned long lbase;
   15.33 +	unsigned int lsize;
   15.34 +
   15.35 +	max = num_var_ranges;
   15.36 +	for (i = 0; i < max; ++i) {
   15.37 +		if (centaur_mcr_reserved & (1 << i))
   15.38 +			continue;
   15.39 +		mtrr_if->get(i, &lbase, &lsize, &ltype);
   15.40 +		if (lsize == 0)
   15.41 +			return i;
   15.42 +	}
   15.43 +	return -ENOSPC;
   15.44 +}
   15.45 +
   15.46 +void
   15.47 +mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
   15.48 +{
   15.49 +	centaur_mcr[mcr].low = lo;
   15.50 +	centaur_mcr[mcr].high = hi;
   15.51 +}
   15.52 +
   15.53 +static void
   15.54 +centaur_get_mcr(unsigned int reg, unsigned long *base,
   15.55 +		unsigned int *size, mtrr_type * type)
   15.56 +{
   15.57 +	*base = centaur_mcr[reg].high >> PAGE_SHIFT;
   15.58 +	*size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
   15.59 +	*type = MTRR_TYPE_WRCOMB;	/*  If it is there, it is write-combining  */
   15.60 +	if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2))
   15.61 +		*type = MTRR_TYPE_UNCACHABLE;
   15.62 +	if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25)
   15.63 +		*type = MTRR_TYPE_WRBACK;
   15.64 +	if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31)
   15.65 +		*type = MTRR_TYPE_WRBACK;
   15.66 +
   15.67 +}
   15.68 +
   15.69 +static void centaur_set_mcr(unsigned int reg, unsigned long base,
   15.70 +			    unsigned long size, mtrr_type type)
   15.71 +{
   15.72 +	unsigned long low, high;
   15.73 +
   15.74 +	if (size == 0) {
   15.75 +		/*  Disable  */
   15.76 +		high = low = 0;
   15.77 +	} else {
   15.78 +		high = base << PAGE_SHIFT;
   15.79 +		if (centaur_mcr_type == 0)
   15.80 +			low = -size << PAGE_SHIFT | 0x1f;	/* only support write-combining... */
   15.81 +		else {
   15.82 +			if (type == MTRR_TYPE_UNCACHABLE)
   15.83 +				low = -size << PAGE_SHIFT | 0x02;	/* NC */
   15.84 +			else
   15.85 +				low = -size << PAGE_SHIFT | 0x09;	/* WWO,WC */
   15.86 +		}
   15.87 +	}
   15.88 +	centaur_mcr[reg].high = high;
   15.89 +	centaur_mcr[reg].low = low;
   15.90 +	wrmsr(MSR_IDT_MCR0 + reg, low, high);
   15.91 +}
   15.92 +
   15.93 +#if 0
   15.94 +/*
   15.95 + *	Initialise the later (saner) Winchip MCR variant. In this version
   15.96 + *	the BIOS can pass us the registers it has used (but not their values)
   15.97 + *	and the control register is read/write
   15.98 + */
   15.99 +
  15.100 +static void __init
  15.101 +centaur_mcr1_init(void)
  15.102 +{
  15.103 +	unsigned i;
  15.104 +	u32 lo, hi;
  15.105 +
  15.106 +	/* Unfortunately, MCR's are read-only, so there is no way to
  15.107 +	 * find out what the bios might have done.
  15.108 +	 */
  15.109 +
  15.110 +	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
  15.111 +	if (((lo >> 17) & 7) == 1) {	/* Type 1 Winchip2 MCR */
  15.112 +		lo &= ~0x1C0;	/* clear key */
  15.113 +		lo |= 0x040;	/* set key to 1 */
  15.114 +		wrmsr(MSR_IDT_MCR_CTRL, lo, hi);	/* unlock MCR */
  15.115 +	}
  15.116 +
  15.117 +	centaur_mcr_type = 1;
  15.118 +
  15.119 +	/*
  15.120 +	 *  Clear any unconfigured MCR's.
  15.121 +	 */
  15.122 +
  15.123 +	for (i = 0; i < 8; ++i) {
  15.124 +		if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) {
  15.125 +			if (!(lo & (1 << (9 + i))))
  15.126 +				wrmsr(MSR_IDT_MCR0 + i, 0, 0);
  15.127 +			else
  15.128 +				/*
  15.129 +				 *      If the BIOS set up an MCR we cannot see it
  15.130 +				 *      but we don't wish to obliterate it
  15.131 +				 */
  15.132 +				centaur_mcr_reserved |= (1 << i);
  15.133 +		}
  15.134 +	}
  15.135 +	/*  
  15.136 +	 *  Throw the main write-combining switch... 
  15.137 +	 *  However if OOSTORE is enabled then people have already done far
  15.138 +	 *  cleverer things and we should behave. 
  15.139 +	 */
  15.140 +
  15.141 +	lo |= 15;		/* Write combine enables */
  15.142 +	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
  15.143 +}
  15.144 +
  15.145 +/*
  15.146 + *	Initialise the original winchip with read only MCR registers
  15.147 + *	no used bitmask for the BIOS to pass on and write only control
  15.148 + */
  15.149 +
  15.150 +static void __init
  15.151 +centaur_mcr0_init(void)
  15.152 +{
  15.153 +	unsigned i;
  15.154 +
  15.155 +	/* Unfortunately, MCR's are read-only, so there is no way to
  15.156 +	 * find out what the bios might have done.
  15.157 +	 */
  15.158 +
  15.159 +	/* Clear any unconfigured MCR's.
  15.160 +	 * This way we are sure that the centaur_mcr array contains the actual
  15.161 +	 * values. The disadvantage is that any BIOS tweaks are thus undone.
  15.162 +	 *
  15.163 +	 */
  15.164 +	for (i = 0; i < 8; ++i) {
  15.165 +		if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0)
  15.166 +			wrmsr(MSR_IDT_MCR0 + i, 0, 0);
  15.167 +	}
  15.168 +
  15.169 +	wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);	/* Write only */
  15.170 +}
  15.171 +
  15.172 +/*
  15.173 + *	Initialise Winchip series MCR registers
  15.174 + */
  15.175 +
  15.176 +static void __init
  15.177 +centaur_mcr_init(void)
  15.178 +{
  15.179 +	struct set_mtrr_context ctxt;
  15.180 +
  15.181 +	set_mtrr_prepare_save(&ctxt);
  15.182 +	set_mtrr_cache_disable(&ctxt);
  15.183 +
  15.184 +	if (boot_cpu_data.x86_model == 4)
  15.185 +		centaur_mcr0_init();
  15.186 +	else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9)
  15.187 +		centaur_mcr1_init();
  15.188 +
  15.189 +	set_mtrr_done(&ctxt);
  15.190 +}
  15.191 +#endif
  15.192 +
  15.193 +static int centaur_validate_add_page(unsigned long base, 
  15.194 +				     unsigned long size, unsigned int type)
  15.195 +{
  15.196 +	/*
  15.197 +	 *  FIXME: Winchip2 supports uncached
  15.198 +	 */
  15.199 +	if (type != MTRR_TYPE_WRCOMB && 
  15.200 +	    (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
  15.201 +		printk(KERN_WARNING
  15.202 +		       "mtrr: only write-combining%s supported\n",
  15.203 +		       centaur_mcr_type ? " and uncacheable are"
  15.204 +		       : " is");
  15.205 +		return -EINVAL;
  15.206 +	}
  15.207 +	return 0;
  15.208 +}
  15.209 +
  15.210 +static struct mtrr_ops centaur_mtrr_ops = {
  15.211 +	.vendor            = X86_VENDOR_CENTAUR,
  15.212 +//	.init              = centaur_mcr_init,
  15.213 +	.set               = centaur_set_mcr,
  15.214 +	.get               = centaur_get_mcr,
  15.215 +	.get_free_region   = centaur_get_free_region,
  15.216 +	.validate_add_page = centaur_validate_add_page,
  15.217 +	.have_wrcomb       = positive_have_wrcomb,
  15.218 +};
  15.219 +
  15.220 +int __init centaur_init_mtrr(void)
  15.221 +{
  15.222 +	set_mtrr_ops(&centaur_mtrr_ops);
  15.223 +	return 0;
  15.224 +}
  15.225 +
  15.226 +//arch_initcall(centaur_init_mtrr);
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/xen/arch/x86/cpu/mtrr/cyrix.c	Tue Feb 14 16:23:43 2006 +0100
    16.3 @@ -0,0 +1,364 @@
    16.4 +#include <xen/init.h>
    16.5 +#include <xen/mm.h>
    16.6 +#include <asm/mtrr.h>
    16.7 +#include <asm/msr.h>
    16.8 +#include <asm/io.h>
    16.9 +#include "mtrr.h"
   16.10 +
   16.11 +int arr3_protected;
   16.12 +
   16.13 +static void
   16.14 +cyrix_get_arr(unsigned int reg, unsigned long *base,
   16.15 +	      unsigned int *size, mtrr_type * type)
   16.16 +{
   16.17 +	unsigned long flags;
   16.18 +	unsigned char arr, ccr3, rcr, shift;
   16.19 +
   16.20 +	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
   16.21 +
   16.22 +	/* Save flags and disable interrupts */
   16.23 +	local_irq_save(flags);
   16.24 +
   16.25 +	ccr3 = getCx86(CX86_CCR3);
   16.26 +	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
   16.27 +	((unsigned char *) base)[3] = getCx86(arr);
   16.28 +	((unsigned char *) base)[2] = getCx86(arr + 1);
   16.29 +	((unsigned char *) base)[1] = getCx86(arr + 2);
   16.30 +	rcr = getCx86(CX86_RCR_BASE + reg);
   16.31 +	setCx86(CX86_CCR3, ccr3);	/* disable MAPEN */
   16.32 +
   16.33 +	/* Enable interrupts if it was enabled previously */
   16.34 +	local_irq_restore(flags);
   16.35 +	shift = ((unsigned char *) base)[1] & 0x0f;
   16.36 +	*base >>= PAGE_SHIFT;
   16.37 +
   16.38 +	/* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
   16.39 +	 * Note: shift==0xf means 4G, this is unsupported.
   16.40 +	 */
   16.41 +	if (shift)
   16.42 +		*size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
   16.43 +	else
   16.44 +		*size = 0;
   16.45 +
   16.46 +	/* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
   16.47 +	if (reg < 7) {
   16.48 +		switch (rcr) {
   16.49 +		case 1:
   16.50 +			*type = MTRR_TYPE_UNCACHABLE;
   16.51 +			break;
   16.52 +		case 8:
   16.53 +			*type = MTRR_TYPE_WRBACK;
   16.54 +			break;
   16.55 +		case 9:
   16.56 +			*type = MTRR_TYPE_WRCOMB;
   16.57 +			break;
   16.58 +		case 24:
   16.59 +		default:
   16.60 +			*type = MTRR_TYPE_WRTHROUGH;
   16.61 +			break;
   16.62 +		}
   16.63 +	} else {
   16.64 +		switch (rcr) {
   16.65 +		case 0:
   16.66 +			*type = MTRR_TYPE_UNCACHABLE;
   16.67 +			break;
   16.68 +		case 8:
   16.69 +			*type = MTRR_TYPE_WRCOMB;
   16.70 +			break;
   16.71 +		case 9:
   16.72 +			*type = MTRR_TYPE_WRBACK;
   16.73 +			break;
   16.74 +		case 25:
   16.75 +		default:
   16.76 +			*type = MTRR_TYPE_WRTHROUGH;
   16.77 +			break;
   16.78 +		}
   16.79 +	}
   16.80 +}
   16.81 +
   16.82 +static int
   16.83 +cyrix_get_free_region(unsigned long base, unsigned long size)
   16.84 +/*  [SUMMARY] Get a free ARR.
   16.85 +    <base> The starting (base) address of the region.
   16.86 +    <size> The size (in bytes) of the region.
   16.87 +    [RETURNS] The index of the region on success, else -1 on error.
   16.88 +*/
   16.89 +{
   16.90 +	int i;
   16.91 +	mtrr_type ltype;
   16.92 +	unsigned long lbase;
   16.93 +	unsigned int  lsize;
   16.94 +
   16.95 +	/* If we are to set up a region >32M then look at ARR7 immediately */
   16.96 +	if (size > 0x2000) {
   16.97 +		cyrix_get_arr(7, &lbase, &lsize, &ltype);
   16.98 +		if (lsize == 0)
   16.99 +			return 7;
  16.100 +		/*  Else try ARR0-ARR6 first  */
  16.101 +	} else {
  16.102 +		for (i = 0; i < 7; i++) {
  16.103 +			cyrix_get_arr(i, &lbase, &lsize, &ltype);
  16.104 +			if ((i == 3) && arr3_protected)
  16.105 +				continue;
  16.106 +			if (lsize == 0)
  16.107 +				return i;
  16.108 +		}
  16.109 +		/* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
  16.110 +		cyrix_get_arr(i, &lbase, &lsize, &ltype);
  16.111 +		if ((lsize == 0) && (size >= 0x40))
  16.112 +			return i;
  16.113 +	}
  16.114 +	return -ENOSPC;
  16.115 +}
  16.116 +
  16.117 +static u32 cr4 = 0;
  16.118 +static u32 ccr3;
  16.119 +
  16.120 +static void prepare_set(void)
  16.121 +{
  16.122 +	u32 cr0;
  16.123 +
  16.124 +	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
  16.125 +	if ( cpu_has_pge ) {
  16.126 +		cr4 = read_cr4();
  16.127 +		write_cr4(cr4 & (unsigned char) ~(1 << 7));
  16.128 +	}
  16.129 +
  16.130 +	/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
  16.131 +	    a side-effect  */
  16.132 +	cr0 = read_cr0() | 0x40000000;
  16.133 +	wbinvd();
  16.134 +	write_cr0(cr0);
  16.135 +	wbinvd();
  16.136 +
  16.137 +	/* Cyrix ARRs - everything else were excluded at the top */
  16.138 +	ccr3 = getCx86(CX86_CCR3);
  16.139 +
  16.140 +	/* Cyrix ARRs - everything else were excluded at the top */
  16.141 +	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
  16.142 +
  16.143 +}
  16.144 +
  16.145 +static void post_set(void)
  16.146 +{
  16.147 +	/*  Flush caches and TLBs  */
  16.148 +	wbinvd();
  16.149 +
  16.150 +	/* Cyrix ARRs - everything else was excluded at the top */
  16.151 +	setCx86(CX86_CCR3, ccr3);
  16.152 +		
  16.153 +	/*  Enable caches  */
  16.154 +	write_cr0(read_cr0() & 0xbfffffff);
  16.155 +
  16.156 +	/*  Restore value of CR4  */
  16.157 +	if ( cpu_has_pge )
  16.158 +		write_cr4(cr4);
  16.159 +}
  16.160 +
  16.161 +static void cyrix_set_arr(unsigned int reg, unsigned long base,
  16.162 +			  unsigned long size, mtrr_type type)
  16.163 +{
  16.164 +	unsigned char arr, arr_type, arr_size;
  16.165 +
  16.166 +	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
  16.167 +
  16.168 +	/* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
  16.169 +	if (reg >= 7)
  16.170 +		size >>= 6;
  16.171 +
  16.172 +	size &= 0x7fff;		/* make sure arr_size <= 14 */
  16.173 +	for (arr_size = 0; size; arr_size++, size >>= 1) ;
  16.174 +
  16.175 +	if (reg < 7) {
  16.176 +		switch (type) {
  16.177 +		case MTRR_TYPE_UNCACHABLE:
  16.178 +			arr_type = 1;
  16.179 +			break;
  16.180 +		case MTRR_TYPE_WRCOMB:
  16.181 +			arr_type = 9;
  16.182 +			break;
  16.183 +		case MTRR_TYPE_WRTHROUGH:
  16.184 +			arr_type = 24;
  16.185 +			break;
  16.186 +		default:
  16.187 +			arr_type = 8;
  16.188 +			break;
  16.189 +		}
  16.190 +	} else {
  16.191 +		switch (type) {
  16.192 +		case MTRR_TYPE_UNCACHABLE:
  16.193 +			arr_type = 0;
  16.194 +			break;
  16.195 +		case MTRR_TYPE_WRCOMB:
  16.196 +			arr_type = 8;
  16.197 +			break;
  16.198 +		case MTRR_TYPE_WRTHROUGH:
  16.199 +			arr_type = 25;
  16.200 +			break;
  16.201 +		default:
  16.202 +			arr_type = 9;
  16.203 +			break;
  16.204 +		}
  16.205 +	}
  16.206 +
  16.207 +	prepare_set();
  16.208 +
  16.209 +	base <<= PAGE_SHIFT;
  16.210 +	setCx86(arr, ((unsigned char *) &base)[3]);
  16.211 +	setCx86(arr + 1, ((unsigned char *) &base)[2]);
  16.212 +	setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
  16.213 +	setCx86(CX86_RCR_BASE + reg, arr_type);
  16.214 +
  16.215 +	post_set();
  16.216 +}
  16.217 +
  16.218 +typedef struct {
  16.219 +	unsigned long base;
  16.220 +	unsigned int size;
  16.221 +	mtrr_type type;
  16.222 +} arr_state_t;
  16.223 +
  16.224 +static arr_state_t arr_state[8] __devinitdata = {
  16.225 +	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
  16.226 +	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
  16.227 +};
  16.228 +
  16.229 +static unsigned char ccr_state[7] __devinitdata = { 0, 0, 0, 0, 0, 0, 0 };
  16.230 +
  16.231 +static void cyrix_set_all(void)
  16.232 +{
  16.233 +	int i;
  16.234 +
  16.235 +	prepare_set();
  16.236 +
  16.237 +	/* the CCRs are not contiguous */
  16.238 +	for (i = 0; i < 4; i++)
  16.239 +		setCx86(CX86_CCR0 + i, ccr_state[i]);
  16.240 +	for (; i < 7; i++)
  16.241 +		setCx86(CX86_CCR4 + i, ccr_state[i]);
  16.242 +	for (i = 0; i < 8; i++)
  16.243 +		cyrix_set_arr(i, arr_state[i].base, 
  16.244 +			      arr_state[i].size, arr_state[i].type);
  16.245 +
  16.246 +	post_set();
  16.247 +}
  16.248 +
  16.249 +#if 0
  16.250 +/*
  16.251 + * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
  16.252 + * with the SMM (System Management Mode) mode. So we need the following:
  16.253 + * Check whether SMI_LOCK (CCR3 bit 0) is set
  16.254 + *   if it is set, write a warning message: ARR3 cannot be changed!
  16.255 + *     (it cannot be changed until the next processor reset)
  16.256 + *   if it is reset, then we can change it, set all the needed bits:
  16.257 + *   - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
  16.258 + *   - disable access to SMM memory (CCR1 bit 2 reset)
  16.259 + *   - disable SMM mode (CCR1 bit 1 reset)
  16.260 + *   - disable write protection of ARR3 (CCR6 bit 1 reset)
  16.261 + *   - (maybe) disable ARR3
  16.262 + * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
  16.263 + */
  16.264 +static void __init
  16.265 +cyrix_arr_init(void)
  16.266 +{
  16.267 +	struct set_mtrr_context ctxt;
  16.268 +	unsigned char ccr[7];
  16.269 +	int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
  16.270 +#ifdef CONFIG_SMP
  16.271 +	int i;
  16.272 +#endif
  16.273 +
  16.274 +	/* flush cache and enable MAPEN */
  16.275 +	set_mtrr_prepare_save(&ctxt);
  16.276 +	set_mtrr_cache_disable(&ctxt);
  16.277 +
  16.278 +	/* Save all CCRs locally */
  16.279 +	ccr[0] = getCx86(CX86_CCR0);
  16.280 +	ccr[1] = getCx86(CX86_CCR1);
  16.281 +	ccr[2] = getCx86(CX86_CCR2);
  16.282 +	ccr[3] = ctxt.ccr3;
  16.283 +	ccr[4] = getCx86(CX86_CCR4);
  16.284 +	ccr[5] = getCx86(CX86_CCR5);
  16.285 +	ccr[6] = getCx86(CX86_CCR6);
  16.286 +
  16.287 +	if (ccr[3] & 1) {
  16.288 +		ccrc[3] = 1;
  16.289 +		arr3_protected = 1;
  16.290 +	} else {
  16.291 +		/* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
  16.292 +		 * access to SMM memory through ARR3 (bit 7).
  16.293 +		 */
  16.294 +		if (ccr[1] & 0x80) {
  16.295 +			ccr[1] &= 0x7f;
  16.296 +			ccrc[1] |= 0x80;
  16.297 +		}
  16.298 +		if (ccr[1] & 0x04) {
  16.299 +			ccr[1] &= 0xfb;
  16.300 +			ccrc[1] |= 0x04;
  16.301 +		}
  16.302 +		if (ccr[1] & 0x02) {
  16.303 +			ccr[1] &= 0xfd;
  16.304 +			ccrc[1] |= 0x02;
  16.305 +		}
  16.306 +		arr3_protected = 0;
  16.307 +		if (ccr[6] & 0x02) {
  16.308 +			ccr[6] &= 0xfd;
  16.309 +			ccrc[6] = 1;	/* Disable write protection of ARR3 */
  16.310 +			setCx86(CX86_CCR6, ccr[6]);
  16.311 +		}
  16.312 +		/* Disable ARR3. This is safe now that we disabled SMM. */
  16.313 +		/* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
  16.314 +	}
  16.315 +	/* If we changed CCR1 in memory, change it in the processor, too. */
  16.316 +	if (ccrc[1])
  16.317 +		setCx86(CX86_CCR1, ccr[1]);
  16.318 +
  16.319 +	/* Enable ARR usage by the processor */
  16.320 +	if (!(ccr[5] & 0x20)) {
  16.321 +		ccr[5] |= 0x20;
  16.322 +		ccrc[5] = 1;
  16.323 +		setCx86(CX86_CCR5, ccr[5]);
  16.324 +	}
  16.325 +#ifdef CONFIG_SMP
  16.326 +	for (i = 0; i < 7; i++)
  16.327 +		ccr_state[i] = ccr[i];
  16.328 +	for (i = 0; i < 8; i++)
  16.329 +		cyrix_get_arr(i,
  16.330 +			      &arr_state[i].base, &arr_state[i].size,
  16.331 +			      &arr_state[i].type);
  16.332 +#endif
  16.333 +
  16.334 +	set_mtrr_done(&ctxt);	/* flush cache and disable MAPEN */
  16.335 +
  16.336 +	if (ccrc[5])
  16.337 +		printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
  16.338 +	if (ccrc[3])
  16.339 +		printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
  16.340 +/*
  16.341 +    if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
  16.342 +    if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
  16.343 +    if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
  16.344 +*/
  16.345 +	if (ccrc[6])
  16.346 +		printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
  16.347 +}
  16.348 +#endif
  16.349 +
  16.350 +static struct mtrr_ops cyrix_mtrr_ops = {
  16.351 +	.vendor            = X86_VENDOR_CYRIX,
  16.352 +//	.init              = cyrix_arr_init,
  16.353 +	.set_all	   = cyrix_set_all,
  16.354 +	.set               = cyrix_set_arr,
  16.355 +	.get               = cyrix_get_arr,
  16.356 +	.get_free_region   = cyrix_get_free_region,
  16.357 +	.validate_add_page = generic_validate_add_page,
  16.358 +	.have_wrcomb       = positive_have_wrcomb,
  16.359 +};
  16.360 +
  16.361 +int __init cyrix_init_mtrr(void)
  16.362 +{
  16.363 +	set_mtrr_ops(&cyrix_mtrr_ops);
  16.364 +	return 0;
  16.365 +}
  16.366 +
  16.367 +//arch_initcall(cyrix_init_mtrr);
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/xen/arch/x86/cpu/mtrr/generic.c	Tue Feb 14 16:23:43 2006 +0100
    17.3 @@ -0,0 +1,418 @@
    17.4 +/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
    17.5 +   because MTRRs can span upto 40 bits (36bits on most modern x86) */ 
    17.6 +#include <xen/lib.h>
    17.7 +#include <xen/init.h>
    17.8 +#include <xen/mm.h>
    17.9 +#include <asm/flushtlb.h>
   17.10 +#include <asm/io.h>
   17.11 +#include <asm/mtrr.h>
   17.12 +#include <asm/msr.h>
   17.13 +#include <asm/system.h>
   17.14 +#include <asm/cpufeature.h>
   17.15 +#include "mtrr.h"
   17.16 +
   17.17 +struct mtrr_state {
   17.18 +	struct mtrr_var_range *var_ranges;
   17.19 +	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
   17.20 +	unsigned char enabled;
   17.21 +	mtrr_type def_type;
   17.22 +};
   17.23 +
   17.24 +static unsigned long smp_changes_mask;
   17.25 +static struct mtrr_state mtrr_state = {};
   17.26 +
   17.27 +/*  Get the MSR pair relating to a var range  */
   17.28 +static void __init
   17.29 +get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
   17.30 +{
   17.31 +	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
   17.32 +	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
   17.33 +}
   17.34 +
   17.35 +static void __init
   17.36 +get_fixed_ranges(mtrr_type * frs)
   17.37 +{
   17.38 +	unsigned int *p = (unsigned int *) frs;
   17.39 +	int i;
   17.40 +
   17.41 +	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
   17.42 +
   17.43 +	for (i = 0; i < 2; i++)
   17.44 +		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
   17.45 +	for (i = 0; i < 8; i++)
   17.46 +		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
   17.47 +}
   17.48 +
   17.49 +/*  Grab all of the MTRR state for this CPU into *state  */
   17.50 +void __init get_mtrr_state(void)
   17.51 +{
   17.52 +	unsigned int i;
   17.53 +	struct mtrr_var_range *vrs;
   17.54 +	unsigned lo, dummy;
   17.55 +
   17.56 +	if (!mtrr_state.var_ranges) {
   17.57 +		mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range,
   17.58 +						  num_var_ranges);
   17.59 +		if (!mtrr_state.var_ranges)
   17.60 +			return;
   17.61 +	} 
   17.62 +	vrs = mtrr_state.var_ranges;
   17.63 +
   17.64 +	for (i = 0; i < num_var_ranges; i++)
   17.65 +		get_mtrr_var_range(i, &vrs[i]);
   17.66 +	get_fixed_ranges(mtrr_state.fixed_ranges);
   17.67 +
   17.68 +	rdmsr(MTRRdefType_MSR, lo, dummy);
   17.69 +	mtrr_state.def_type = (lo & 0xff);
   17.70 +	mtrr_state.enabled = (lo & 0xc00) >> 10;
   17.71 +}
   17.72 +
   17.73 +/*  Some BIOS's are fucked and don't set all MTRRs the same!  */
   17.74 +void __init mtrr_state_warn(void)
   17.75 +{
   17.76 +	unsigned long mask = smp_changes_mask;
   17.77 +
   17.78 +	if (!mask)
   17.79 +		return;
   17.80 +	if (mask & MTRR_CHANGE_MASK_FIXED)
   17.81 +		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
   17.82 +	if (mask & MTRR_CHANGE_MASK_VARIABLE)
   17.83 +		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
   17.84 +	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
   17.85 +		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
   17.86 +	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
   17.87 +	printk(KERN_INFO "mtrr: corrected configuration.\n");
   17.88 +}
   17.89 +
   17.90 +/* Doesn't attempt to pass an error out to MTRR users
   17.91 +   because it's quite complicated in some cases and probably not
   17.92 +   worth it because the best error handling is to ignore it. */
   17.93 +void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
   17.94 +{
   17.95 +	if (wrmsr_safe(msr, a, b) < 0)
   17.96 +		printk(KERN_ERR
   17.97 +			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
   17.98 +			smp_processor_id(), msr, a, b);
   17.99 +}
  17.100 +
  17.101 +int generic_get_free_region(unsigned long base, unsigned long size)
  17.102 +/*  [SUMMARY] Get a free MTRR.
  17.103 +    <base> The starting (base) address of the region.
  17.104 +    <size> The size (in bytes) of the region.
  17.105 +    [RETURNS] The index of the region on success, else -1 on error.
  17.106 +*/
  17.107 +{
  17.108 +	int i, max;
  17.109 +	mtrr_type ltype;
  17.110 +	unsigned long lbase;
  17.111 +	unsigned lsize;
  17.112 +
  17.113 +	max = num_var_ranges;
  17.114 +	for (i = 0; i < max; ++i) {
  17.115 +		mtrr_if->get(i, &lbase, &lsize, &ltype);
  17.116 +		if (lsize == 0)
  17.117 +			return i;
  17.118 +	}
  17.119 +	return -ENOSPC;
  17.120 +}
  17.121 +
  17.122 +static void generic_get_mtrr(unsigned int reg, unsigned long *base,
  17.123 +			     unsigned int *size, mtrr_type * type)
  17.124 +{
  17.125 +	unsigned int mask_lo, mask_hi, base_lo, base_hi;
  17.126 +
  17.127 +	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
  17.128 +	if ((mask_lo & 0x800) == 0) {
  17.129 +		/*  Invalid (i.e. free) range  */
  17.130 +		*base = 0;
  17.131 +		*size = 0;
  17.132 +		*type = 0;
  17.133 +		return;
  17.134 +	}
  17.135 +
  17.136 +	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
  17.137 +
  17.138 +	/* Work out the shifted address mask. */
  17.139 +	mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
  17.140 +	    | mask_lo >> PAGE_SHIFT;
  17.141 +
  17.142 +	/* This works correctly if size is a power of two, i.e. a
  17.143 +	   contiguous range. */
  17.144 +	*size = -mask_lo;
  17.145 +	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
  17.146 +	*type = base_lo & 0xff;
  17.147 +}
  17.148 +
  17.149 +static int set_fixed_ranges(mtrr_type * frs)
  17.150 +{
  17.151 +	unsigned int *p = (unsigned int *) frs;
  17.152 +	int changed = FALSE;
  17.153 +	int i;
  17.154 +	unsigned int lo, hi;
  17.155 +
  17.156 +	rdmsr(MTRRfix64K_00000_MSR, lo, hi);
  17.157 +	if (p[0] != lo || p[1] != hi) {
  17.158 +		mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
  17.159 +		changed = TRUE;
  17.160 +	}
  17.161 +
  17.162 +	for (i = 0; i < 2; i++) {
  17.163 +		rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
  17.164 +		if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
  17.165 +			mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
  17.166 +			      p[3 + i * 2]);
  17.167 +			changed = TRUE;
  17.168 +		}
  17.169 +	}
  17.170 +
  17.171 +	for (i = 0; i < 8; i++) {
  17.172 +		rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
  17.173 +		if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
  17.174 +			mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
  17.175 +			      p[7 + i * 2]);
  17.176 +			changed = TRUE;
  17.177 +		}
  17.178 +	}
  17.179 +	return changed;
  17.180 +}
  17.181 +
  17.182 +/*  Set the MSR pair relating to a var range. Returns TRUE if
  17.183 +    changes are made  */
  17.184 +static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
  17.185 +{
  17.186 +	unsigned int lo, hi;
  17.187 +	int changed = FALSE;
  17.188 +
  17.189 +	rdmsr(MTRRphysBase_MSR(index), lo, hi);
  17.190 +	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
  17.191 +	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
  17.192 +		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
  17.193 +		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
  17.194 +		changed = TRUE;
  17.195 +	}
  17.196 +
  17.197 +	rdmsr(MTRRphysMask_MSR(index), lo, hi);
  17.198 +
  17.199 +	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
  17.200 +	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
  17.201 +		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
  17.202 +		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
  17.203 +		changed = TRUE;
  17.204 +	}
  17.205 +	return changed;
  17.206 +}
  17.207 +
  17.208 +static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
  17.209 +/*  [SUMMARY] Set the MTRR state for this CPU.
  17.210 +    <state> The MTRR state information to read.
  17.211 +    <ctxt> Some relevant CPU context.
  17.212 +    [NOTE] The CPU must already be in a safe state for MTRR changes.
  17.213 +    [RETURNS] 0 if no changes made, else a mask indication what was changed.
  17.214 +*/
  17.215 +{
  17.216 +	unsigned int i;
  17.217 +	unsigned long change_mask = 0;
  17.218 +
  17.219 +	for (i = 0; i < num_var_ranges; i++)
  17.220 +		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
  17.221 +			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
  17.222 +
  17.223 +	if (set_fixed_ranges(mtrr_state.fixed_ranges))
  17.224 +		change_mask |= MTRR_CHANGE_MASK_FIXED;
  17.225 +
  17.226 +	/*  Set_mtrr_restore restores the old value of MTRRdefType,
  17.227 +	   so to set it we fiddle with the saved value  */
  17.228 +	if ((deftype_lo & 0xff) != mtrr_state.def_type
  17.229 +	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
  17.230 +		deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
  17.231 +		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
  17.232 +	}
  17.233 +
  17.234 +	return change_mask;
  17.235 +}
  17.236 +
  17.237 +
  17.238 +static unsigned long cr4 = 0;
  17.239 +static u32 deftype_lo, deftype_hi;
  17.240 +static DEFINE_SPINLOCK(set_atomicity_lock);
  17.241 +
  17.242 +/*
  17.243 + * Since we are disabling the cache don't allow any interrupts - they
  17.244 + * would run extremely slow and would only increase the pain.  The caller must
  17.245 + * ensure that local interrupts are disabled and are reenabled after post_set()
  17.246 + * has been called.
  17.247 + */
  17.248 +
  17.249 +static void prepare_set(void)
  17.250 +{
  17.251 +	unsigned long cr0;
  17.252 +
  17.253 +	/*  Note that this is not ideal, since the cache is only flushed/disabled
  17.254 +	   for this CPU while the MTRRs are changed, but changing this requires
  17.255 +	   more invasive changes to the way the kernel boots  */
  17.256 +
  17.257 +	spin_lock(&set_atomicity_lock);
  17.258 +
  17.259 +	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
  17.260 +	cr0 = read_cr0() | 0x40000000;	/* set CD flag */
  17.261 +	write_cr0(cr0);
  17.262 +	wbinvd();
  17.263 +
  17.264 +	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
  17.265 +	if ( cpu_has_pge ) {
  17.266 +		cr4 = read_cr4();
  17.267 +		write_cr4(cr4 & ~X86_CR4_PGE);
  17.268 +	}
  17.269 +
  17.270 +	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
  17.271 +	local_flush_tlb();
  17.272 +
  17.273 +	/*  Save MTRR state */
  17.274 +	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
  17.275 +
  17.276 +	/*  Disable MTRRs, and set the default type to uncached  */
  17.277 +	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
  17.278 +}
  17.279 +
  17.280 +static void post_set(void)
  17.281 +{
  17.282 +	/*  Flush TLBs (no need to flush caches - they are disabled)  */
  17.283 +	local_flush_tlb();
  17.284 +
  17.285 +	/* Intel (P6) standard MTRRs */
  17.286 +	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
  17.287 +		
  17.288 +	/*  Enable caches  */
  17.289 +	write_cr0(read_cr0() & 0xbfffffff);
  17.290 +
  17.291 +	/*  Restore value of CR4  */
  17.292 +	if ( cpu_has_pge )
  17.293 +		write_cr4(cr4);
  17.294 +	spin_unlock(&set_atomicity_lock);
  17.295 +}
  17.296 +
  17.297 +static void generic_set_all(void)
  17.298 +{
  17.299 +	unsigned long mask, count;
  17.300 +	unsigned long flags;
  17.301 +
  17.302 +	local_irq_save(flags);
  17.303 +	prepare_set();
  17.304 +
  17.305 +	/* Actually set the state */
  17.306 +	mask = set_mtrr_state(deftype_lo,deftype_hi);
  17.307 +
  17.308 +	post_set();
  17.309 +	local_irq_restore(flags);
  17.310 +
  17.311 +	/*  Use the atomic bitops to update the global mask  */
  17.312 +	for (count = 0; count < sizeof mask * 8; ++count) {
  17.313 +		if (mask & 0x01)
  17.314 +			set_bit(count, &smp_changes_mask);
  17.315 +		mask >>= 1;
  17.316 +	}
  17.317 +	
  17.318 +}
  17.319 +
  17.320 +static void generic_set_mtrr(unsigned int reg, unsigned long base,
  17.321 +			     unsigned long size, mtrr_type type)
  17.322 +/*  [SUMMARY] Set variable MTRR register on the local CPU.
  17.323 +    <reg> The register to set.
  17.324 +    <base> The base address of the region.
  17.325 +    <size> The size of the region. If this is 0 the region is disabled.
  17.326 +    <type> The type of the region.
  17.327 +    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
  17.328 +    be done externally.
  17.329 +    [RETURNS] Nothing.
  17.330 +*/
  17.331 +{
  17.332 +	unsigned long flags;
  17.333 +	struct mtrr_var_range *vr;
  17.334 +
  17.335 +	vr = &mtrr_state.var_ranges[reg];
  17.336 +
  17.337 +	local_irq_save(flags);
  17.338 +	prepare_set();
  17.339 +
  17.340 +	if (size == 0) {
  17.341 +		/* The invalid bit is kept in the mask, so we simply clear the
  17.342 +		   relevant mask register to disable a range. */
  17.343 +		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
  17.344 +		memset(vr, 0, sizeof(struct mtrr_var_range));
  17.345 +	} else {
  17.346 +		vr->base_lo = base << PAGE_SHIFT | type;
  17.347 +		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
  17.348 +		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
  17.349 +		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
  17.350 +
  17.351 +		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
  17.352 +		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
  17.353 +	}
  17.354 +
  17.355 +	post_set();
  17.356 +	local_irq_restore(flags);
  17.357 +}
  17.358 +
  17.359 +int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
  17.360 +{
  17.361 +	unsigned long lbase, last;
  17.362 +
  17.363 +	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned 
  17.364 +	    and not touch 0x70000000->0x7003FFFF */
  17.365 +	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
  17.366 +	    boot_cpu_data.x86_model == 1 &&
  17.367 +	    boot_cpu_data.x86_mask <= 7) {
  17.368 +		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
  17.369 +			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
  17.370 +			return -EINVAL;
  17.371 +		}
  17.372 +		if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
  17.373 +		    (type == MTRR_TYPE_WRCOMB
  17.374 +		     || type == MTRR_TYPE_WRBACK)) {
  17.375 +			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
  17.376 +			return -EINVAL;
  17.377 +		}
  17.378 +	}
  17.379 +
  17.380 +	if (base + size < 0x100) {
  17.381 +		printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
  17.382 +		       base, size);
  17.383 +		return -EINVAL;
  17.384 +	}
  17.385 +	/*  Check upper bits of base and last are equal and lower bits are 0
  17.386 +	    for base and 1 for last  */
  17.387 +	last = base + size - 1;
  17.388 +	for (lbase = base; !(lbase & 1) && (last & 1);
  17.389 +	     lbase = lbase >> 1, last = last >> 1) ;
  17.390 +	if (lbase != last) {
  17.391 +		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
  17.392 +		       base, size);
  17.393 +		return -EINVAL;
  17.394 +	}
  17.395 +	return 0;
  17.396 +}
  17.397 +
  17.398 +
  17.399 +static int generic_have_wrcomb(void)
  17.400 +{
  17.401 +	unsigned long config, dummy;
  17.402 +	rdmsr(MTRRcap_MSR, config, dummy);
  17.403 +	return (config & (1 << 10));
  17.404 +}
  17.405 +
  17.406 +int positive_have_wrcomb(void)
  17.407 +{
  17.408 +	return 1;
  17.409 +}
  17.410 +
  17.411 +/* generic structure...
  17.412 + */
  17.413 +struct mtrr_ops generic_mtrr_ops = {
  17.414 +	.use_intel_if      = 1,
  17.415 +	.set_all	   = generic_set_all,
  17.416 +	.get               = generic_get_mtrr,
  17.417 +	.get_free_region   = generic_get_free_region,
  17.418 +	.set               = generic_set_mtrr,
  17.419 +	.validate_add_page = generic_validate_add_page,
  17.420 +	.have_wrcomb       = generic_have_wrcomb,
  17.421 +};
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/xen/arch/x86/cpu/mtrr/main.c	Tue Feb 14 16:23:43 2006 +0100
    18.3 @@ -0,0 +1,661 @@
    18.4 +/*  Generic MTRR (Memory Type Range Register) driver.
    18.5 +
    18.6 +    Copyright (C) 1997-2000  Richard Gooch
    18.7 +    Copyright (c) 2002	     Patrick Mochel
    18.8 +
    18.9 +    This library is free software; you can redistribute it and/or
   18.10 +    modify it under the terms of the GNU Library General Public
   18.11 +    License as published by the Free Software Foundation; either
   18.12 +    version 2 of the License, or (at your option) any later version.
   18.13 +
   18.14 +    This library is distributed in the hope that it will be useful,
   18.15 +    but WITHOUT ANY WARRANTY; without even the implied warranty of
   18.16 +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   18.17 +    Library General Public License for more details.
   18.18 +
   18.19 +    You should have received a copy of the GNU Library General Public
   18.20 +    License along with this library; if not, write to the Free
   18.21 +    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
   18.22 +
   18.23 +    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
   18.24 +    The postal address is:
   18.25 +      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
   18.26 +
   18.27 +    Source: "Pentium Pro Family Developer's Manual, Volume 3:
   18.28 +    Operating System Writer's Guide" (Intel document number 242692),
   18.29 +    section 11.11.7
   18.30 +
   18.31 +    This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> 
   18.32 +    on 6-7 March 2002. 
   18.33 +    Source: Intel Architecture Software Developers Manual, Volume 3: 
   18.34 +    System Programming Guide; Section 9.11. (1997 edition - PPro).
   18.35 +*/
   18.36 +
   18.37 +#include <xen/config.h>
   18.38 +#include <xen/init.h>
   18.39 +#include <xen/lib.h>
   18.40 +#include <xen/smp.h>
   18.41 +#include <xen/spinlock.h>
   18.42 +#include <asm/mtrr.h>
   18.43 +#include <asm/uaccess.h>
   18.44 +#include <asm/processor.h>
   18.45 +#include <asm/msr.h>
   18.46 +#include "mtrr.h"
   18.47 +
   18.48 +/* No blocking mutexes in Xen. Spin instead. */
   18.49 +#define DECLARE_MUTEX(_m) spinlock_t _m = SPIN_LOCK_UNLOCKED
   18.50 +#define down(_m) spin_lock(_m)
   18.51 +#define up(_m) spin_unlock(_m)
   18.52 +#define lock_cpu_hotplug() ((void)0)
   18.53 +#define unlock_cpu_hotplug() ((void)0)
   18.54 +#define dump_stack() ((void)0)
   18.55 +
   18.56 +u32 num_var_ranges = 0;
   18.57 +
   18.58 +unsigned int *usage_table;
   18.59 +static DECLARE_MUTEX(mtrr_sem);
   18.60 +
   18.61 +u32 size_or_mask, size_and_mask;
   18.62 +
   18.63 +static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
   18.64 +
   18.65 +struct mtrr_ops * mtrr_if = NULL;
   18.66 +
   18.67 +static void set_mtrr(unsigned int reg, unsigned long base,
   18.68 +		     unsigned long size, mtrr_type type);
   18.69 +
   18.70 +extern int arr3_protected;
   18.71 +
   18.72 +static char *mtrr_strings[MTRR_NUM_TYPES] =
   18.73 +{
   18.74 +    "uncachable",               /* 0 */
   18.75 +    "write-combining",          /* 1 */
   18.76 +    "?",                        /* 2 */
   18.77 +    "?",                        /* 3 */
   18.78 +    "write-through",            /* 4 */
   18.79 +    "write-protect",            /* 5 */
   18.80 +    "write-back",               /* 6 */
   18.81 +};
   18.82 +
   18.83 +char *mtrr_attrib_to_str(int x)
   18.84 +{
   18.85 +	return (x <= 6) ? mtrr_strings[x] : "?";
   18.86 +}
   18.87 +
   18.88 +void set_mtrr_ops(struct mtrr_ops * ops)
   18.89 +{
   18.90 +	if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
   18.91 +		mtrr_ops[ops->vendor] = ops;
   18.92 +}
   18.93 +
   18.94 +/*  Returns non-zero if we have the write-combining memory type  */
   18.95 +static int have_wrcomb(void)
   18.96 +{
   18.97 +	return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
   18.98 +}
   18.99 +
  18.100 +/*  This function returns the number of variable MTRRs  */
  18.101 +static void __init set_num_var_ranges(void)
  18.102 +{
  18.103 +	unsigned long config = 0, dummy;
  18.104 +
  18.105 +	if (use_intel()) {
  18.106 +		rdmsr(MTRRcap_MSR, config, dummy);
  18.107 +	} else if (is_cpu(AMD))
  18.108 +		config = 2;
  18.109 +	else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
  18.110 +		config = 8;
  18.111 +	num_var_ranges = config & 0xff;
  18.112 +}
  18.113 +
  18.114 +static void __init init_table(void)
  18.115 +{
  18.116 +	int i, max;
  18.117 +
  18.118 +	max = num_var_ranges;
  18.119 +	if ((usage_table = xmalloc_array(unsigned int, max)) == NULL) {
  18.120 +		printk(KERN_ERR "mtrr: could not allocate\n");
  18.121 +		return;
  18.122 +	}
  18.123 +	for (i = 0; i < max; i++)
  18.124 +		usage_table[i] = 1;
  18.125 +}
  18.126 +
  18.127 +struct set_mtrr_data {
  18.128 +	atomic_t	count;
  18.129 +	atomic_t	gate;
  18.130 +	unsigned long	smp_base;
  18.131 +	unsigned long	smp_size;
  18.132 +	unsigned int	smp_reg;
  18.133 +	mtrr_type	smp_type;
  18.134 +};
  18.135 +
  18.136 +#ifdef CONFIG_SMP
  18.137 +
  18.138 +static void ipi_handler(void *info)
  18.139 +/*  [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
  18.140 +    [RETURNS] Nothing.
  18.141 +*/
  18.142 +{
  18.143 +	struct set_mtrr_data *data = info;
  18.144 +	unsigned long flags;
  18.145 +
  18.146 +	local_irq_save(flags);
  18.147 +
  18.148 +	atomic_dec(&data->count);
  18.149 +	while(!atomic_read(&data->gate))
  18.150 +		cpu_relax();
  18.151 +
  18.152 +	/*  The master has cleared me to execute  */
  18.153 +	if (data->smp_reg != ~0U) 
  18.154 +		mtrr_if->set(data->smp_reg, data->smp_base, 
  18.155 +			     data->smp_size, data->smp_type);
  18.156 +	else
  18.157 +		mtrr_if->set_all();
  18.158 +
  18.159 +	atomic_dec(&data->count);
  18.160 +	while(atomic_read(&data->gate))
  18.161 +		cpu_relax();
  18.162 +
  18.163 +	atomic_dec(&data->count);
  18.164 +	local_irq_restore(flags);
  18.165 +}
  18.166 +
  18.167 +#endif
  18.168 +
  18.169 +/**
  18.170 + * set_mtrr - update mtrrs on all processors
  18.171 + * @reg:	mtrr in question
  18.172 + * @base:	mtrr base
  18.173 + * @size:	mtrr size
  18.174 + * @type:	mtrr type
  18.175 + *
  18.176 + * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
  18.177 + * 
  18.178 + * 1. Send IPI to do the following:
  18.179 + * 2. Disable Interrupts
  18.180 + * 3. Wait for all procs to do so 
  18.181 + * 4. Enter no-fill cache mode
  18.182 + * 5. Flush caches
  18.183 + * 6. Clear PGE bit
  18.184 + * 7. Flush all TLBs
  18.185 + * 8. Disable all range registers
  18.186 + * 9. Update the MTRRs
  18.187 + * 10. Enable all range registers
  18.188 + * 11. Flush all TLBs and caches again
  18.189 + * 12. Enter normal cache mode and reenable caching
  18.190 + * 13. Set PGE 
  18.191 + * 14. Wait for buddies to catch up
  18.192 + * 15. Enable interrupts.
  18.193 + * 
  18.194 + * What does that mean for us? Well, first we set data.count to the number
  18.195 + * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
  18.196 + * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
  18.197 + * Meanwhile, they are waiting for that flag to be set. Once it's set, each 
  18.198 + * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it 
  18.199 + * differently, so we call mtrr_if->set() callback and let them take care of it.
  18.200 + * When they're done, they again decrement data->count and wait for data.gate to 
  18.201 + * be reset. 
  18.202 + * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
  18.203 + * Everyone then enables interrupts and we all continue on.
  18.204 + *
  18.205 + * Note that the mechanism is the same for UP systems, too; all the SMP stuff
  18.206 + * becomes nops.
  18.207 + */
  18.208 +static void set_mtrr(unsigned int reg, unsigned long base,
  18.209 +		     unsigned long size, mtrr_type type)
  18.210 +{
  18.211 +	struct set_mtrr_data data;
  18.212 +	unsigned long flags;
  18.213 +
  18.214 +	data.smp_reg = reg;
  18.215 +	data.smp_base = base;
  18.216 +	data.smp_size = size;
  18.217 +	data.smp_type = type;
  18.218 +	atomic_set(&data.count, num_booting_cpus() - 1);
  18.219 +	atomic_set(&data.gate,0);
  18.220 +
  18.221 +	/*  Start the ball rolling on other CPUs  */
  18.222 +	if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
  18.223 +		panic("mtrr: timed out waiting for other CPUs\n");
  18.224 +
  18.225 +	local_irq_save(flags);
  18.226 +
  18.227 +	while(atomic_read(&data.count))
  18.228 +		cpu_relax();
  18.229 +
  18.230 +	/* ok, reset count and toggle gate */
  18.231 +	atomic_set(&data.count, num_booting_cpus() - 1);
  18.232 +	atomic_set(&data.gate,1);
  18.233 +
  18.234 +	/* do our MTRR business */
  18.235 +
  18.236 +	/* HACK!
  18.237 +	 * We use this same function to initialize the mtrrs on boot.
  18.238 +	 * The state of the boot cpu's mtrrs has been saved, and we want
  18.239 +	 * to replicate across all the APs. 
  18.240 +	 * If we're doing that @reg is set to something special...
  18.241 +	 */
  18.242 +	if (reg != ~0U) 
  18.243 +		mtrr_if->set(reg,base,size,type);
  18.244 +
  18.245 +	/* wait for the others */
  18.246 +	while(atomic_read(&data.count))
  18.247 +		cpu_relax();
  18.248 +
  18.249 +	atomic_set(&data.count, num_booting_cpus() - 1);
  18.250 +	atomic_set(&data.gate,0);
  18.251 +
  18.252 +	/*
  18.253 +	 * Wait here for everyone to have seen the gate change
  18.254 +	 * So we're the last ones to touch 'data'
  18.255 +	 */
  18.256 +	while(atomic_read(&data.count))
  18.257 +		cpu_relax();
  18.258 +
  18.259 +	local_irq_restore(flags);
  18.260 +}
  18.261 +
  18.262 +/**
  18.263 + *	mtrr_add_page - Add a memory type region
  18.264 + *	@base: Physical base address of region in pages (4 KB)
  18.265 + *	@size: Physical size of region in pages (4 KB)
  18.266 + *	@type: Type of MTRR desired
  18.267 + *	@increment: If this is true do usage counting on the region
  18.268 + *
  18.269 + *	Memory type region registers control the caching on newer Intel and
  18.270 + *	non Intel processors. This function allows drivers to request an
  18.271 + *	MTRR is added. The details and hardware specifics of each processor's
  18.272 + *	implementation are hidden from the caller, but nevertheless the 
  18.273 + *	caller should expect to need to provide a power of two size on an
  18.274 + *	equivalent power of two boundary.
  18.275 + *
  18.276 + *	If the region cannot be added either because all regions are in use
  18.277 + *	or the CPU cannot support it a negative value is returned. On success
  18.278 + *	the register number for this entry is returned, but should be treated
  18.279 + *	as a cookie only.
  18.280 + *
  18.281 + *	On a multiprocessor machine the changes are made to all processors.
  18.282 + *	This is required on x86 by the Intel processors.
  18.283 + *
  18.284 + *	The available types are
  18.285 + *
  18.286 + *	%MTRR_TYPE_UNCACHABLE	-	No caching
  18.287 + *
  18.288 + *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
  18.289 + *
  18.290 + *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
  18.291 + *
  18.292 + *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
  18.293 + *
  18.294 + *	BUGS: Needs a quiet flag for the cases where drivers do not mind
  18.295 + *	failures and do not wish system log messages to be sent.
  18.296 + */
  18.297 +
  18.298 +int mtrr_add_page(unsigned long base, unsigned long size, 
  18.299 +		  unsigned int type, char increment)
  18.300 +{
  18.301 +	int i;
  18.302 +	mtrr_type ltype;
  18.303 +	unsigned long lbase;
  18.304 +	unsigned int lsize;
  18.305 +	int error;
  18.306 +
  18.307 +	if (!mtrr_if)
  18.308 +		return -ENXIO;
  18.309 +		
  18.310 +	if ((error = mtrr_if->validate_add_page(base,size,type)))
  18.311 +		return error;
  18.312 +
  18.313 +	if (type >= MTRR_NUM_TYPES) {
  18.314 +		printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
  18.315 +		return -EINVAL;
  18.316 +	}
  18.317 +
  18.318 +	/*  If the type is WC, check that this processor supports it  */
  18.319 +	if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
  18.320 +		printk(KERN_WARNING
  18.321 +		       "mtrr: your processor doesn't support write-combining\n");
  18.322 +		return -ENOSYS;
  18.323 +	}
  18.324 +
  18.325 +	if (base & size_or_mask || size & size_or_mask) {
  18.326 +		printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
  18.327 +		return -EINVAL;
  18.328 +	}
  18.329 +
  18.330 +	error = -EINVAL;
  18.331 +
  18.332 +	/* No CPU hotplug when we change MTRR entries */
  18.333 +	lock_cpu_hotplug();
  18.334 +	/*  Search for existing MTRR  */
  18.335 +	down(&mtrr_sem);
  18.336 +	for (i = 0; i < num_var_ranges; ++i) {
  18.337 +		mtrr_if->get(i, &lbase, &lsize, &ltype);
  18.338 +		if (base >= lbase + lsize)
  18.339 +			continue;
  18.340 +		if ((base < lbase) && (base + size <= lbase))
  18.341 +			continue;
  18.342 +		/*  At this point we know there is some kind of overlap/enclosure  */
  18.343 +		if ((base < lbase) || (base + size > lbase + lsize)) {
  18.344 +			printk(KERN_WARNING
  18.345 +			       "mtrr: 0x%lx000,0x%lx000 overlaps existing"
  18.346 +			       " 0x%lx000,0x%x000\n", base, size, lbase,
  18.347 +			       lsize);
  18.348 +			goto out;
  18.349 +		}
  18.350 +		/*  New region is enclosed by an existing region  */
  18.351 +		if (ltype != type) {
  18.352 +			if (type == MTRR_TYPE_UNCACHABLE)
  18.353 +				continue;
  18.354 +			printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
  18.355 +			     base, size, mtrr_attrib_to_str(ltype),
  18.356 +			     mtrr_attrib_to_str(type));
  18.357 +			goto out;
  18.358 +		}
  18.359 +		if (increment)
  18.360 +			++usage_table[i];
  18.361 +		error = i;
  18.362 +		goto out;
  18.363 +	}
  18.364 +	/*  Search for an empty MTRR  */
  18.365 +	i = mtrr_if->get_free_region(base, size);
  18.366 +	if (i >= 0) {
  18.367 +		set_mtrr(i, base, size, type);
  18.368 +		usage_table[i] = 1;
  18.369 +	} else
  18.370 +		printk(KERN_INFO "mtrr: no more MTRRs available\n");
  18.371 +	error = i;
  18.372 + out:
  18.373 +	up(&mtrr_sem);
  18.374 +	unlock_cpu_hotplug();
  18.375 +	return error;
  18.376 +}
  18.377 +
  18.378 +static int mtrr_check(unsigned long base, unsigned long size)
  18.379 +{
  18.380 +	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
  18.381 +		printk(KERN_WARNING
  18.382 +			"mtrr: size and base must be multiples of 4 kiB\n");
  18.383 +		printk(KERN_DEBUG
  18.384 +			"mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
  18.385 +		dump_stack();
  18.386 +		return -1;
  18.387 +	}
  18.388 +	return 0;
  18.389 +}
  18.390 +
  18.391 +/**
  18.392 + *	mtrr_add - Add a memory type region
  18.393 + *	@base: Physical base address of region
  18.394 + *	@size: Physical size of region
  18.395 + *	@type: Type of MTRR desired
  18.396 + *	@increment: If this is true do usage counting on the region
  18.397 + *
  18.398 + *	Memory type region registers control the caching on newer Intel and
  18.399 + *	non Intel processors. This function allows drivers to request an
  18.400 + *	MTRR is added. The details and hardware specifics of each processor's
  18.401 + *	implementation are hidden from the caller, but nevertheless the 
  18.402 + *	caller should expect to need to provide a power of two size on an
  18.403 + *	equivalent power of two boundary.
  18.404 + *
  18.405 + *	If the region cannot be added either because all regions are in use
  18.406 + *	or the CPU cannot support it a negative value is returned. On success
  18.407 + *	the register number for this entry is returned, but should be treated
  18.408 + *	as a cookie only.
  18.409 + *
  18.410 + *	On a multiprocessor machine the changes are made to all processors.
  18.411 + *	This is required on x86 by the Intel processors.
  18.412 + *
  18.413 + *	The available types are
  18.414 + *
  18.415 + *	%MTRR_TYPE_UNCACHABLE	-	No caching
  18.416 + *
  18.417 + *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
  18.418 + *
  18.419 + *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
  18.420 + *
  18.421 + *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
  18.422 + *
  18.423 + *	BUGS: Needs a quiet flag for the cases where drivers do not mind
  18.424 + *	failures and do not wish system log messages to be sent.
  18.425 + */
  18.426 +
  18.427 +int
  18.428 +mtrr_add(unsigned long base, unsigned long size, unsigned int type,
  18.429 +	 char increment)
  18.430 +{
  18.431 +	if (mtrr_check(base, size))
  18.432 +		return -EINVAL;
  18.433 +	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
  18.434 +			     increment);
  18.435 +}
  18.436 +
  18.437 +/**
  18.438 + *	mtrr_del_page - delete a memory type region
  18.439 + *	@reg: Register returned by mtrr_add
  18.440 + *	@base: Physical base address
  18.441 + *	@size: Size of region
  18.442 + *
  18.443 + *	If register is supplied then base and size are ignored. This is
  18.444 + *	how drivers should call it.
  18.445 + *
  18.446 + *	Releases an MTRR region. If the usage count drops to zero the 
  18.447 + *	register is freed and the region returns to default state.
  18.448 + *	On success the register is returned, on failure a negative error
  18.449 + *	code.
  18.450 + */
  18.451 +
  18.452 +int mtrr_del_page(int reg, unsigned long base, unsigned long size)
  18.453 +{
  18.454 +	int i, max;
  18.455 +	mtrr_type ltype;
  18.456 +	unsigned long lbase;
  18.457 +	unsigned int lsize;
  18.458 +	int error = -EINVAL;
  18.459 +
  18.460 +	if (!mtrr_if)
  18.461 +		return -ENXIO;
  18.462 +
  18.463 +	max = num_var_ranges;
  18.464 +	/* No CPU hotplug when we change MTRR entries */
  18.465 +	lock_cpu_hotplug();
  18.466 +	down(&mtrr_sem);
  18.467 +	if (reg < 0) {
  18.468 +		/*  Search for existing MTRR  */
  18.469 +		for (i = 0; i < max; ++i) {
  18.470 +			mtrr_if->get(i, &lbase, &lsize, &ltype);
  18.471 +			if (lbase == base && lsize == size) {
  18.472 +				reg = i;
  18.473 +				break;
  18.474 +			}
  18.475 +		}
  18.476 +		if (reg < 0) {
  18.477 +			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
  18.478 +			       size);
  18.479 +			goto out;
  18.480 +		}
  18.481 +	}
  18.482 +	if (reg >= max) {
  18.483 +		printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
  18.484 +		goto out;
  18.485 +	}
  18.486 +	if (is_cpu(CYRIX) && !use_intel()) {
  18.487 +		if ((reg == 3) && arr3_protected) {
  18.488 +			printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
  18.489 +			goto out;
  18.490 +		}
  18.491 +	}
  18.492 +	mtrr_if->get(reg, &lbase, &lsize, &ltype);
  18.493 +	if (lsize < 1) {
  18.494 +		printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
  18.495 +		goto out;
  18.496 +	}
  18.497 +	if (usage_table[reg] < 1) {
  18.498 +		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
  18.499 +		goto out;
  18.500 +	}
  18.501 +	if (--usage_table[reg] < 1)
  18.502 +		set_mtrr(reg, 0, 0, 0);
  18.503 +	error = reg;
  18.504 + out:
  18.505 +	up(&mtrr_sem);
  18.506 +	unlock_cpu_hotplug();
  18.507 +	return error;
  18.508 +}
  18.509 +/**
  18.510 + *	mtrr_del - delete a memory type region
  18.511 + *	@reg: Register returned by mtrr_add
  18.512 + *	@base: Physical base address
  18.513 + *	@size: Size of region
  18.514 + *
  18.515 + *	If register is supplied then base and size are ignored. This is
  18.516 + *	how drivers should call it.
  18.517 + *
  18.518 + *	Releases an MTRR region. If the usage count drops to zero the 
  18.519 + *	register is freed and the region returns to default state.
  18.520 + *	On success the register is returned, on failure a negative error
  18.521 + *	code.
  18.522 + */
  18.523 +
  18.524 +int
  18.525 +mtrr_del(int reg, unsigned long base, unsigned long size)
  18.526 +{
  18.527 +	if (mtrr_check(base, size))
  18.528 +		return -EINVAL;
  18.529 +	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
  18.530 +}
  18.531 +
  18.532 +EXPORT_SYMBOL(mtrr_add);
  18.533 +EXPORT_SYMBOL(mtrr_del);
  18.534 +
  18.535 +/* HACK ALERT!
  18.536 + * These should be called implicitly, but we can't yet until all the initcall
  18.537 + * stuff is done...
  18.538 + */
  18.539 +extern void amd_init_mtrr(void);
  18.540 +extern void cyrix_init_mtrr(void);
  18.541 +extern void centaur_init_mtrr(void);
  18.542 +
  18.543 +static void __init init_ifs(void)
  18.544 +{
  18.545 +	amd_init_mtrr();
  18.546 +	cyrix_init_mtrr();
  18.547 +	centaur_init_mtrr();
  18.548 +}
  18.549 +
  18.550 +/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
  18.551 + * MTRR driver doesn't require this
  18.552 + */
  18.553 +struct mtrr_value {
  18.554 +	mtrr_type	ltype;
  18.555 +	unsigned long	lbase;
  18.556 +	unsigned int	lsize;
  18.557 +};
  18.558 +
  18.559 +/**
  18.560 + * mtrr_bp_init - initialize mtrrs on the boot CPU
  18.561 + *
  18.562 + * This needs to be called early; before any of the other CPUs are 
  18.563 + * initialized (i.e. before smp_init()).
  18.564 + * 
  18.565 + */
  18.566 +void __init mtrr_bp_init(void)
  18.567 +{
  18.568 +	init_ifs();
  18.569 +
  18.570 +	if (cpu_has_mtrr) {
  18.571 +		mtrr_if = &generic_mtrr_ops;
  18.572 +		size_or_mask = 0xff000000;	/* 36 bits */
  18.573 +		size_and_mask = 0x00f00000;
  18.574 +
  18.575 +		/* This is an AMD specific MSR, but we assume(hope?) that
  18.576 +		   Intel will implement it to when they extend the address
  18.577 +		   bus of the Xeon. */
  18.578 +		if (cpuid_eax(0x80000000) >= 0x80000008) {
  18.579 +			u32 phys_addr;
  18.580 +			phys_addr = cpuid_eax(0x80000008) & 0xff;
  18.581 +			/* CPUID workaround for Intel 0F33/0F34 CPU */
  18.582 +			if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
  18.583 +			    boot_cpu_data.x86 == 0xF &&
  18.584 +			    boot_cpu_data.x86_model == 0x3 &&
  18.585 +			    (boot_cpu_data.x86_mask == 0x3 ||
  18.586 +			     boot_cpu_data.x86_mask == 0x4))
  18.587 +				phys_addr = 36;
  18.588 +
  18.589 +			size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
  18.590 +			size_and_mask = ~size_or_mask & 0xfff00000;
  18.591 +		} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
  18.592 +			   boot_cpu_data.x86 == 6) {
  18.593 +			/* VIA C* family have Intel style MTRRs, but
  18.594 +			   don't support PAE */
  18.595 +			size_or_mask = 0xfff00000;	/* 32 bits */
  18.596 +			size_and_mask = 0;
  18.597 +		}
  18.598 +	} else {
  18.599 +		switch (boot_cpu_data.x86_vendor) {
  18.600 +		case X86_VENDOR_AMD:
  18.601 +			if (cpu_has_k6_mtrr) {
  18.602 +				/* Pre-Athlon (K6) AMD CPU MTRRs */
  18.603 +				mtrr_if = mtrr_ops[X86_VENDOR_AMD];
  18.604 +				size_or_mask = 0xfff00000;	/* 32 bits */
  18.605 +				size_and_mask = 0;
  18.606 +			}
  18.607 +			break;
  18.608 +		case X86_VENDOR_CENTAUR:
  18.609 +			if (cpu_has_centaur_mcr) {
  18.610 +				mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
  18.611 +				size_or_mask = 0xfff00000;	/* 32 bits */
  18.612 +				size_and_mask = 0;
  18.613 +			}
  18.614 +			break;
  18.615 +		case X86_VENDOR_CYRIX:
  18.616 +			if (cpu_has_cyrix_arr) {
  18.617 +				mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
  18.618 +				size_or_mask = 0xfff00000;	/* 32 bits */
  18.619 +				size_and_mask = 0;
  18.620 +			}
  18.621 +			break;
  18.622 +		default:
  18.623 +			break;
  18.624 +		}
  18.625 +	}
  18.626 +
  18.627 +	if (mtrr_if) {
  18.628 +		set_num_var_ranges();
  18.629 +		init_table();
  18.630 +		if (use_intel())
  18.631 +			get_mtrr_state();
  18.632 +	}
  18.633 +}
  18.634 +
  18.635 +void mtrr_ap_init(void)
  18.636 +{
  18.637 +	unsigned long flags;
  18.638 +
  18.639 +	if (!mtrr_if || !use_intel())
  18.640 +		return;
  18.641 +	/*
  18.642 +	 * Ideally we should hold mtrr_sem here to avoid mtrr entries changed,
  18.643 +	 * but this routine will be called in cpu boot time, holding the lock
  18.644 +	 * breaks it. This routine is called in two cases: 1.very earily time
  18.645 +	 * of software resume, when there absolutely isn't mtrr entry changes;
  18.646 +	 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
  18.647 +	 * prevent mtrr entry changes
  18.648 +	 */
  18.649 +	local_irq_save(flags);
  18.650 +
  18.651 +	mtrr_if->set_all();
  18.652 +
  18.653 +	local_irq_restore(flags);
  18.654 +}
  18.655 +
  18.656 +static int __init mtrr_init_finialize(void)
  18.657 +{
  18.658 +	if (!mtrr_if)
  18.659 +		return 0;
  18.660 +	if (use_intel())
  18.661 +		mtrr_state_warn();
  18.662 +	return 0;
  18.663 +}
  18.664 +__initcall(mtrr_init_finialize);
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/xen/arch/x86/cpu/mtrr/mtrr.h	Tue Feb 14 16:23:43 2006 +0100
    19.3 @@ -0,0 +1,97 @@
    19.4 +/*
    19.5 + * local mtrr defines.
    19.6 + */
    19.7 +
    19.8 +#ifndef TRUE
    19.9 +#define TRUE  1
   19.10 +#define FALSE 0
   19.11 +#endif
   19.12 +
   19.13 +#define MTRRcap_MSR     0x0fe
   19.14 +#define MTRRdefType_MSR 0x2ff
   19.15 +
   19.16 +#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
   19.17 +#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
   19.18 +
   19.19 +#define NUM_FIXED_RANGES 88
   19.20 +#define MTRRfix64K_00000_MSR 0x250
   19.21 +#define MTRRfix16K_80000_MSR 0x258
   19.22 +#define MTRRfix16K_A0000_MSR 0x259
   19.23 +#define MTRRfix4K_C0000_MSR 0x268
   19.24 +#define MTRRfix4K_C8000_MSR 0x269
   19.25 +#define MTRRfix4K_D0000_MSR 0x26a
   19.26 +#define MTRRfix4K_D8000_MSR 0x26b
   19.27 +#define MTRRfix4K_E0000_MSR 0x26c
   19.28 +#define MTRRfix4K_E8000_MSR 0x26d
   19.29 +#define MTRRfix4K_F0000_MSR 0x26e
   19.30 +#define MTRRfix4K_F8000_MSR 0x26f
   19.31 +
   19.32 +#define MTRR_CHANGE_MASK_FIXED     0x01
   19.33 +#define MTRR_CHANGE_MASK_VARIABLE  0x02
   19.34 +#define MTRR_CHANGE_MASK_DEFTYPE   0x04
   19.35 +
   19.36 +/* In the Intel processor's MTRR interface, the MTRR type is always held in
   19.37 +   an 8 bit field: */
   19.38 +typedef u8 mtrr_type;
   19.39 +
   19.40 +struct mtrr_ops {
   19.41 +	u32	vendor;
   19.42 +	u32	use_intel_if;
   19.43 +//	void	(*init)(void);
   19.44 +	void	(*set)(unsigned int reg, unsigned long base,
   19.45 +		       unsigned long size, mtrr_type type);
   19.46 +	void	(*set_all)(void);
   19.47 +
   19.48 +	void	(*get)(unsigned int reg, unsigned long *base,
   19.49 +		       unsigned int *size, mtrr_type * type);
   19.50 +	int	(*get_free_region) (unsigned long base, unsigned long size);
   19.51 +
   19.52 +	int	(*validate_add_page)(unsigned long base, unsigned long size,
   19.53 +				     unsigned int type);
   19.54 +	int	(*have_wrcomb)(void);
   19.55 +};
   19.56 +
   19.57 +extern int generic_get_free_region(unsigned long base, unsigned long size);
   19.58 +extern int generic_validate_add_page(unsigned long base, unsigned long size,
   19.59 +				     unsigned int type);
   19.60 +
   19.61 +extern struct mtrr_ops generic_mtrr_ops;
   19.62 +
   19.63 +extern int positive_have_wrcomb(void);
   19.64 +
   19.65 +/* library functions for processor-specific routines */
   19.66 +struct set_mtrr_context {
   19.67 +	unsigned long flags;
   19.68 +	unsigned long deftype_lo;
   19.69 +	unsigned long deftype_hi;
   19.70 +	unsigned long cr4val;
   19.71 +	unsigned long ccr3;
   19.72 +};
   19.73 +
   19.74 +struct mtrr_var_range {
   19.75 +	unsigned long base_lo;
   19.76 +	unsigned long base_hi;
   19.77 +	unsigned long mask_lo;
   19.78 +	unsigned long mask_hi;
   19.79 +};
   19.80 +
   19.81 +void set_mtrr_done(struct set_mtrr_context *ctxt);
   19.82 +void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
   19.83 +void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
   19.84 +
   19.85 +void get_mtrr_state(void);
   19.86 +
   19.87 +extern void set_mtrr_ops(struct mtrr_ops * ops);
   19.88 +
   19.89 +extern u32 size_or_mask, size_and_mask;
   19.90 +extern struct mtrr_ops * mtrr_if;
   19.91 +
   19.92 +#define is_cpu(vnd)	(mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
   19.93 +#define use_intel()	(mtrr_if && mtrr_if->use_intel_if == 1)
   19.94 +
   19.95 +extern unsigned int num_var_ranges;
   19.96 +
   19.97 +void mtrr_state_warn(void);
   19.98 +char *mtrr_attrib_to_str(int x);
   19.99 +void mtrr_wrmsr(unsigned, unsigned, unsigned);
  19.100 +
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/xen/arch/x86/cpu/mtrr/state.c	Tue Feb 14 16:23:43 2006 +0100
    20.3 @@ -0,0 +1,78 @@
    20.4 +#include <xen/mm.h>
    20.5 +#include <xen/init.h>
    20.6 +#include <asm/io.h>
    20.7 +#include <asm/mtrr.h>
    20.8 +#include <asm/msr.h>
    20.9 +#include "mtrr.h"
   20.10 +
   20.11 +
   20.12 +/*  Put the processor into a state where MTRRs can be safely set  */
   20.13 +void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
   20.14 +{
   20.15 +	unsigned int cr0;
   20.16 +
   20.17 +	/*  Disable interrupts locally  */
   20.18 +	local_irq_save(ctxt->flags);
   20.19 +
   20.20 +	if (use_intel() || is_cpu(CYRIX)) {
   20.21 +
   20.22 +		/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
   20.23 +		if ( cpu_has_pge ) {
   20.24 +			ctxt->cr4val = read_cr4();
   20.25 +			write_cr4(ctxt->cr4val & (unsigned char) ~(1 << 7));
   20.26 +		}
   20.27 +
   20.28 +		/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
   20.29 +		    a side-effect  */
   20.30 +		cr0 = read_cr0() | 0x40000000;
   20.31 +		wbinvd();
   20.32 +		write_cr0(cr0);
   20.33 +		wbinvd();
   20.34 +
   20.35 +		if (use_intel())
   20.36 +			/*  Save MTRR state */
   20.37 +			rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
   20.38 +		else
   20.39 +			/* Cyrix ARRs - everything else were excluded at the top */
   20.40 +			ctxt->ccr3 = getCx86(CX86_CCR3);
   20.41 +	}
   20.42 +}
   20.43 +
   20.44 +void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
   20.45 +{
   20.46 +	if (use_intel()) 
   20.47 +		/*  Disable MTRRs, and set the default type to uncached  */
   20.48 +		mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL,
   20.49 +		      ctxt->deftype_hi);
   20.50 +	else if (is_cpu(CYRIX))
   20.51 +		/* Cyrix ARRs - everything else were excluded at the top */
   20.52 +		setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
   20.53 +}
   20.54 +
   20.55 +/*  Restore the processor after a set_mtrr_prepare  */
   20.56 +void set_mtrr_done(struct set_mtrr_context *ctxt)
   20.57 +{
   20.58 +	if (use_intel() || is_cpu(CYRIX)) {
   20.59 +
   20.60 +		/*  Flush caches and TLBs  */
   20.61 +		wbinvd();
   20.62 +
   20.63 +		/*  Restore MTRRdefType  */
   20.64 +		if (use_intel())
   20.65 +			/* Intel (P6) standard MTRRs */
   20.66 +			mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
   20.67 +		else
   20.68 +			/* Cyrix ARRs - everything else was excluded at the top */
   20.69 +			setCx86(CX86_CCR3, ctxt->ccr3);
   20.70 +		
   20.71 +		/*  Enable caches  */
   20.72 +		write_cr0(read_cr0() & 0xbfffffff);
   20.73 +
   20.74 +		/*  Restore value of CR4  */
   20.75 +		if ( cpu_has_pge )
   20.76 +			write_cr4(ctxt->cr4val);
   20.77 +	}
   20.78 +	/*  Re-enable interrupts locally (if enabled previously)  */
   20.79 +	local_irq_restore(ctxt->flags);
   20.80 +}
   20.81 +
    21.1 --- a/xen/arch/x86/dom0_ops.c	Mon Feb 13 17:41:23 2006 +0100
    21.2 +++ b/xen/arch/x86/dom0_ops.c	Tue Feb 14 16:23:43 2006 +0100
    21.3 @@ -26,7 +26,7 @@
    21.4  #include <public/sched_ctl.h>
    21.5  
    21.6  #include <asm/mtrr.h>
    21.7 -#include "mtrr/mtrr.h"
    21.8 +#include "cpu/mtrr/mtrr.h"
    21.9  
   21.10  #define TRC_DOM0OP_ENTER_BASE  0x00020000
   21.11  #define TRC_DOM0OP_LEAVE_BASE  0x00030000
   21.12 @@ -39,13 +39,13 @@ static unsigned long msr_hi;
   21.13  static void write_msr_for(void *unused)
   21.14  {
   21.15      if ( ((1 << smp_processor_id()) & msr_cpu_mask) )
   21.16 -        (void)wrmsr_user(msr_addr, msr_lo, msr_hi);
   21.17 +        (void)wrmsr_safe(msr_addr, msr_lo, msr_hi);
   21.18  }
   21.19  
   21.20  static void read_msr_for(void *unused)
   21.21  {
   21.22      if ( ((1 << smp_processor_id()) & msr_cpu_mask) )
   21.23 -        (void)rdmsr_user(msr_addr, msr_lo, msr_hi);
   21.24 +        (void)rdmsr_safe(msr_addr, msr_lo, msr_hi);
   21.25  }
   21.26  
   21.27  long arch_do_dom0_op(struct dom0_op *op, struct dom0_op *u_dom0_op)
   21.28 @@ -182,7 +182,7 @@ long arch_do_dom0_op(struct dom0_op *op,
   21.29          dom0_physinfo_t *pi = &op->u.physinfo;
   21.30  
   21.31          pi->threads_per_core = smp_num_siblings;
   21.32 -        pi->cores_per_socket = boot_cpu_data.x86_num_cores;
   21.33 +        pi->cores_per_socket = boot_cpu_data.x86_max_cores;
   21.34          pi->sockets_per_node = 
   21.35              num_online_cpus() / (pi->threads_per_core * pi->cores_per_socket);
   21.36          pi->nr_nodes         = 1;
    22.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Feb 13 17:41:23 2006 +0100
    22.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Tue Feb 14 16:23:43 2006 +0100
    22.3 @@ -69,8 +69,6 @@ extern void do_nmi(struct cpu_user_regs 
    22.4  extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
    22.5                                  int inst_len);
    22.6  extern asmlinkage void do_IRQ(struct cpu_user_regs *);
    22.7 -extern void smp_apic_timer_interrupt(struct cpu_user_regs *);
    22.8 -extern void timer_interrupt(int, void *, struct cpu_user_regs *);
    22.9  extern void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
   22.10         unsigned long count, int size, long value, int dir, int pvalid);
   22.11  extern int svm_instrlen(struct cpu_user_regs *regs, int mode);
   22.12 @@ -1761,7 +1759,7 @@ static inline void svm_do_msr_access(str
   22.13          default:
   22.14              if (long_mode_do_msr_read(regs))
   22.15                  goto done;
   22.16 -            rdmsr_user(regs->ecx, regs->eax, regs->edx);
   22.17 +            rdmsr_safe(regs->ecx, regs->eax, regs->edx);
   22.18              break;
   22.19          }
   22.20      }
    23.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Feb 13 17:41:23 2006 +0100
    23.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Tue Feb 14 16:23:43 2006 +0100
    23.3 @@ -1568,7 +1568,7 @@ static inline void vmx_do_msr_read(struc
    23.4      default:
    23.5          if(long_mode_do_msr_read(regs))
    23.6              return;
    23.7 -        rdmsr_user(regs->ecx, regs->eax, regs->edx);
    23.8 +        rdmsr_safe(regs->ecx, regs->eax, regs->edx);
    23.9          break;
   23.10      }
   23.11  
   23.12 @@ -1658,13 +1658,12 @@ static inline void vmx_vmexit_do_extint(
   23.13      int error;
   23.14  
   23.15      asmlinkage void do_IRQ(struct cpu_user_regs *);
   23.16 -    void smp_apic_timer_interrupt(struct cpu_user_regs *);
   23.17 -    void timer_interrupt(int, void *, struct cpu_user_regs *);
   23.18 -    void smp_event_check_interrupt(void);
   23.19 -    void smp_invalidate_interrupt(void);
   23.20 -    void smp_call_function_interrupt(void);
   23.21 -    void smp_spurious_interrupt(struct cpu_user_regs *regs);
   23.22 -    void smp_error_interrupt(struct cpu_user_regs *regs);
   23.23 +    fastcall void smp_apic_timer_interrupt(struct cpu_user_regs *);
   23.24 +    fastcall void smp_event_check_interrupt(void);
   23.25 +    fastcall void smp_invalidate_interrupt(void);
   23.26 +    fastcall void smp_call_function_interrupt(void);
   23.27 +    fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs);
   23.28 +    fastcall void smp_error_interrupt(struct cpu_user_regs *regs);
   23.29  
   23.30      if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
   23.31          && !(vector & INTR_INFO_VALID_MASK))
    24.1 --- a/xen/arch/x86/i8259.c	Mon Feb 13 17:41:23 2006 +0100
    24.2 +++ b/xen/arch/x86/i8259.c	Tue Feb 14 16:23:43 2006 +0100
    24.3 @@ -68,9 +68,10 @@ BUILD_SMP_INTERRUPT(call_function_interr
    24.4   * overflow. Linux uses the local APIC timer interrupt to get
    24.5   * a much simpler SMP time architecture:
    24.6   */
    24.7 -BUILD_SMP_TIMER_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
    24.8 +BUILD_SMP_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
    24.9  BUILD_SMP_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
   24.10  BUILD_SMP_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
   24.11 +BUILD_SMP_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
   24.12  
   24.13  #define IRQ(x,y) \
   24.14      IRQ##x##y##_interrupt
   24.15 @@ -391,6 +392,7 @@ void __init init_IRQ(void)
   24.16      /* IPI vectors for APIC spurious and error interrupts. */
   24.17      set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
   24.18      set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
   24.19 +    set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
   24.20  
   24.21      /* Set the clock to HZ Hz */
   24.22  #define CLOCK_TICK_RATE 1193180 /* crystal freq (Hz) */
    25.1 --- a/xen/arch/x86/mtrr/amd.c	Mon Feb 13 17:41:23 2006 +0100
    25.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.3 @@ -1,121 +0,0 @@
    25.4 -#include <xen/init.h>
    25.5 -#include <xen/mm.h>
    25.6 -#include <asm/mtrr.h>
    25.7 -#include <asm/msr.h>
    25.8 -
    25.9 -#include "mtrr.h"
   25.10 -
   25.11 -static void
   25.12 -amd_get_mtrr(unsigned int reg, unsigned long *base,
   25.13 -	     unsigned int *size, mtrr_type * type)
   25.14 -{
   25.15 -	unsigned long low, high;
   25.16 -
   25.17 -	rdmsr(MSR_K6_UWCCR, low, high);
   25.18 -	/*  Upper dword is region 1, lower is region 0  */
   25.19 -	if (reg == 1)
   25.20 -		low = high;
   25.21 -	/*  The base masks off on the right alignment  */
   25.22 -	*base = (low & 0xFFFE0000) >> PAGE_SHIFT;
   25.23 -	*type = 0;
   25.24 -	if (low & 1)
   25.25 -		*type = MTRR_TYPE_UNCACHABLE;
   25.26 -	if (low & 2)
   25.27 -		*type = MTRR_TYPE_WRCOMB;
   25.28 -	if (!(low & 3)) {
   25.29 -		*size = 0;
   25.30 -		return;
   25.31 -	}
   25.32 -	/*
   25.33 -	 *  This needs a little explaining. The size is stored as an
   25.34 -	 *  inverted mask of bits of 128K granularity 15 bits long offset
   25.35 -	 *  2 bits
   25.36 -	 *
   25.37 -	 *  So to get a size we do invert the mask and add 1 to the lowest
   25.38 -	 *  mask bit (4 as its 2 bits in). This gives us a size we then shift
   25.39 -	 *  to turn into 128K blocks
   25.40 -	 *
   25.41 -	 *  eg              111 1111 1111 1100      is 512K
   25.42 -	 *
   25.43 -	 *  invert          000 0000 0000 0011
   25.44 -	 *  +1              000 0000 0000 0100
   25.45 -	 *  *128K   ...
   25.46 -	 */
   25.47 -	low = (~low) & 0x1FFFC;
   25.48 -	*size = (low + 4) << (15 - PAGE_SHIFT);
   25.49 -	return;
   25.50 -}
   25.51 -
   25.52 -static void amd_set_mtrr(unsigned int reg, unsigned long base,
   25.53 -			 unsigned long size, mtrr_type type)
   25.54 -/*  [SUMMARY] Set variable MTRR register on the local CPU.
   25.55 -    <reg> The register to set.
   25.56 -    <base> The base address of the region.
   25.57 -    <size> The size of the region. If this is 0 the region is disabled.
   25.58 -    <type> The type of the region.
   25.59 -    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
   25.60 -    be done externally.
   25.61 -    [RETURNS] Nothing.
   25.62 -*/
   25.63 -{
   25.64 -	u32 regs[2];
   25.65 -
   25.66 -	/*
   25.67 -	 *  Low is MTRR0 , High MTRR 1
   25.68 -	 */
   25.69 -	rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
   25.70 -	/*
   25.71 -	 *  Blank to disable
   25.72 -	 */
   25.73 -	if (size == 0)
   25.74 -		regs[reg] = 0;
   25.75 -	else
   25.76 -		/* Set the register to the base, the type (off by one) and an
   25.77 -		   inverted bitmask of the size The size is the only odd
   25.78 -		   bit. We are fed say 512K We invert this and we get 111 1111
   25.79 -		   1111 1011 but if you subtract one and invert you get the   
   25.80 -		   desired 111 1111 1111 1100 mask
   25.81 -
   25.82 -		   But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!  */
   25.83 -		regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
   25.84 -		    | (base << PAGE_SHIFT) | (type + 1);
   25.85 -
   25.86 -	/*
   25.87 -	 *  The writeback rule is quite specific. See the manual. Its
   25.88 -	 *  disable local interrupts, write back the cache, set the mtrr
   25.89 -	 */
   25.90 -	wbinvd();
   25.91 -	wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
   25.92 -}
   25.93 -
   25.94 -static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
   25.95 -{
   25.96 -	/* Apply the K6 block alignment and size rules
   25.97 -	   In order
   25.98 -	   o Uncached or gathering only
   25.99 -	   o 128K or bigger block
  25.100 -	   o Power of 2 block
  25.101 -	   o base suitably aligned to the power
  25.102 -	*/
  25.103 -	if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
  25.104 -	    || (size & ~(size - 1)) - size || (base & (size - 1)))
  25.105 -		return -EINVAL;
  25.106 -	return 0;
  25.107 -}
  25.108 -
  25.109 -static struct mtrr_ops amd_mtrr_ops = {
  25.110 -	.vendor            = X86_VENDOR_AMD,
  25.111 -	.set               = amd_set_mtrr,
  25.112 -	.get               = amd_get_mtrr,
  25.113 -	.get_free_region   = generic_get_free_region,
  25.114 -	.validate_add_page = amd_validate_add_page,
  25.115 -	.have_wrcomb       = positive_have_wrcomb,
  25.116 -};
  25.117 -
  25.118 -int __init amd_init_mtrr(void)
  25.119 -{
  25.120 -	set_mtrr_ops(&amd_mtrr_ops);
  25.121 -	return 0;
  25.122 -}
  25.123 -
  25.124 -//arch_initcall(amd_mtrr_init);
    26.1 --- a/xen/arch/x86/mtrr/centaur.c	Mon Feb 13 17:41:23 2006 +0100
    26.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.3 @@ -1,220 +0,0 @@
    26.4 -#include <xen/init.h>
    26.5 -#include <xen/mm.h>
    26.6 -#include <asm/mtrr.h>
    26.7 -#include <asm/msr.h>
    26.8 -#include "mtrr.h"
    26.9 -
   26.10 -static struct {
   26.11 -	unsigned long high;
   26.12 -	unsigned long low;
   26.13 -} centaur_mcr[8];
   26.14 -
   26.15 -static u8 centaur_mcr_reserved;
   26.16 -static u8 centaur_mcr_type;	/* 0 for winchip, 1 for winchip2 */
   26.17 -
   26.18 -/*
   26.19 - *	Report boot time MCR setups 
   26.20 - */
   26.21 -
   26.22 -static int
   26.23 -centaur_get_free_region(unsigned long base, unsigned long size)
   26.24 -/*  [SUMMARY] Get a free MTRR.
   26.25 -    <base> The starting (base) address of the region.
   26.26 -    <size> The size (in bytes) of the region.
   26.27 -    [RETURNS] The index of the region on success, else -1 on error.
   26.28 -*/
   26.29 -{
   26.30 -	int i, max;
   26.31 -	mtrr_type ltype;
   26.32 -	unsigned long lbase;
   26.33 -	unsigned int lsize;
   26.34 -
   26.35 -	max = num_var_ranges;
   26.36 -	for (i = 0; i < max; ++i) {
   26.37 -		if (centaur_mcr_reserved & (1 << i))
   26.38 -			continue;
   26.39 -		mtrr_if->get(i, &lbase, &lsize, &ltype);
   26.40 -		if (lsize == 0)
   26.41 -			return i;
   26.42 -	}
   26.43 -	return -ENOSPC;
   26.44 -}
   26.45 -
   26.46 -void
   26.47 -mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
   26.48 -{
   26.49 -	centaur_mcr[mcr].low = lo;
   26.50 -	centaur_mcr[mcr].high = hi;
   26.51 -}
   26.52 -
   26.53 -static void
   26.54 -centaur_get_mcr(unsigned int reg, unsigned long *base,
   26.55 -		unsigned int *size, mtrr_type * type)
   26.56 -{
   26.57 -	*base = centaur_mcr[reg].high >> PAGE_SHIFT;
   26.58 -	*size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
   26.59 -	*type = MTRR_TYPE_WRCOMB;	/*  If it is there, it is write-combining  */
   26.60 -	if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2))
   26.61 -		*type = MTRR_TYPE_UNCACHABLE;
   26.62 -	if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25)
   26.63 -		*type = MTRR_TYPE_WRBACK;
   26.64 -	if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31)
   26.65 -		*type = MTRR_TYPE_WRBACK;
   26.66 -
   26.67 -}
   26.68 -
   26.69 -static void centaur_set_mcr(unsigned int reg, unsigned long base,
   26.70 -			    unsigned long size, mtrr_type type)
   26.71 -{
   26.72 -	unsigned long low, high;
   26.73 -
   26.74 -	if (size == 0) {
   26.75 -		/*  Disable  */
   26.76 -		high = low = 0;
   26.77 -	} else {
   26.78 -		high = base << PAGE_SHIFT;
   26.79 -		if (centaur_mcr_type == 0)
   26.80 -			low = -size << PAGE_SHIFT | 0x1f;	/* only support write-combining... */
   26.81 -		else {
   26.82 -			if (type == MTRR_TYPE_UNCACHABLE)
   26.83 -				low = -size << PAGE_SHIFT | 0x02;	/* NC */
   26.84 -			else
   26.85 -				low = -size << PAGE_SHIFT | 0x09;	/* WWO,WC */
   26.86 -		}
   26.87 -	}
   26.88 -	centaur_mcr[reg].high = high;
   26.89 -	centaur_mcr[reg].low = low;
   26.90 -	wrmsr(MSR_IDT_MCR0 + reg, low, high);
   26.91 -}
   26.92 -/*
   26.93 - *	Initialise the later (saner) Winchip MCR variant. In this version
   26.94 - *	the BIOS can pass us the registers it has used (but not their values)
   26.95 - *	and the control register is read/write
   26.96 - */
   26.97 -
   26.98 -static void __init
   26.99 -centaur_mcr1_init(void)
  26.100 -{
  26.101 -	unsigned i;
  26.102 -	u32 lo, hi;
  26.103 -
  26.104 -	/* Unfortunately, MCR's are read-only, so there is no way to
  26.105 -	 * find out what the bios might have done.
  26.106 -	 */
  26.107 -
  26.108 -	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
  26.109 -	if (((lo >> 17) & 7) == 1) {	/* Type 1 Winchip2 MCR */
  26.110 -		lo &= ~0x1C0;	/* clear key */
  26.111 -		lo |= 0x040;	/* set key to 1 */
  26.112 -		wrmsr(MSR_IDT_MCR_CTRL, lo, hi);	/* unlock MCR */
  26.113 -	}
  26.114 -
  26.115 -	centaur_mcr_type = 1;
  26.116 -
  26.117 -	/*
  26.118 -	 *  Clear any unconfigured MCR's.
  26.119 -	 */
  26.120 -
  26.121 -	for (i = 0; i < 8; ++i) {
  26.122 -		if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) {
  26.123 -			if (!(lo & (1 << (9 + i))))
  26.124 -				wrmsr(MSR_IDT_MCR0 + i, 0, 0);
  26.125 -			else
  26.126 -				/*
  26.127 -				 *      If the BIOS set up an MCR we cannot see it
  26.128 -				 *      but we don't wish to obliterate it
  26.129 -				 */
  26.130 -				centaur_mcr_reserved |= (1 << i);
  26.131 -		}
  26.132 -	}
  26.133 -	/*  
  26.134 -	 *  Throw the main write-combining switch... 
  26.135 -	 *  However if OOSTORE is enabled then people have already done far
  26.136 -	 *  cleverer things and we should behave. 
  26.137 -	 */
  26.138 -
  26.139 -	lo |= 15;		/* Write combine enables */
  26.140 -	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
  26.141 -}
  26.142 -
  26.143 -/*
  26.144 - *	Initialise the original winchip with read only MCR registers
  26.145 - *	no used bitmask for the BIOS to pass on and write only control
  26.146 - */
  26.147 -
  26.148 -static void __init
  26.149 -centaur_mcr0_init(void)
  26.150 -{
  26.151 -	unsigned i;
  26.152 -
  26.153 -	/* Unfortunately, MCR's are read-only, so there is no way to
  26.154 -	 * find out what the bios might have done.
  26.155 -	 */
  26.156 -
  26.157 -	/* Clear any unconfigured MCR's.
  26.158 -	 * This way we are sure that the centaur_mcr array contains the actual
  26.159 -	 * values. The disadvantage is that any BIOS tweaks are thus undone.
  26.160 -	 *
  26.161 -	 */
  26.162 -	for (i = 0; i < 8; ++i) {
  26.163 -		if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0)
  26.164 -			wrmsr(MSR_IDT_MCR0 + i, 0, 0);
  26.165 -	}
  26.166 -
  26.167 -	wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);	/* Write only */
  26.168 -}
  26.169 -
  26.170 -/*
  26.171 - *	Initialise Winchip series MCR registers
  26.172 - */
  26.173 -
  26.174 -static void __init
  26.175 -centaur_mcr_init(void)
  26.176 -{
  26.177 -	struct set_mtrr_context ctxt;
  26.178 -
  26.179 -	set_mtrr_prepare_save(&ctxt);
  26.180 -	set_mtrr_cache_disable(&ctxt);
  26.181 -
  26.182 -	if (boot_cpu_data.x86_model == 4)
  26.183 -		centaur_mcr0_init();
  26.184 -	else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9)
  26.185 -		centaur_mcr1_init();
  26.186 -
  26.187 -	set_mtrr_done(&ctxt);
  26.188 -}
  26.189 -
  26.190 -static int centaur_validate_add_page(unsigned long base, 
  26.191 -				     unsigned long size, unsigned int type)
  26.192 -{
  26.193 -	/*
  26.194 -	 *  FIXME: Winchip2 supports uncached
  26.195 -	 */
  26.196 -	if (type != MTRR_TYPE_WRCOMB && 
  26.197 -	    (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
  26.198 -		printk(KERN_WARNING
  26.199 -		       "mtrr: only write-combining%s supported\n",
  26.200 -		       centaur_mcr_type ? " and uncacheable are"
  26.201 -		       : " is");
  26.202 -		return -EINVAL;
  26.203 -	}
  26.204 -	return 0;
  26.205 -}
  26.206 -
  26.207 -static struct mtrr_ops centaur_mtrr_ops = {
  26.208 -	.vendor            = X86_VENDOR_CENTAUR,
  26.209 -	.init              = centaur_mcr_init,
  26.210 -	.set               = centaur_set_mcr,
  26.211 -	.get               = centaur_get_mcr,
  26.212 -	.get_free_region   = centaur_get_free_region,
  26.213 -	.validate_add_page = centaur_validate_add_page,
  26.214 -	.have_wrcomb       = positive_have_wrcomb,
  26.215 -};
  26.216 -
  26.217 -int __init centaur_init_mtrr(void)
  26.218 -{
  26.219 -	set_mtrr_ops(&centaur_mtrr_ops);
  26.220 -	return 0;
  26.221 -}
  26.222 -
  26.223 -//arch_initcall(centaur_init_mtrr);
    27.1 --- a/xen/arch/x86/mtrr/cyrix.c	Mon Feb 13 17:41:23 2006 +0100
    27.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.3 @@ -1,362 +0,0 @@
    27.4 -#include <xen/init.h>
    27.5 -#include <xen/mm.h>
    27.6 -#include <asm/mtrr.h>
    27.7 -#include <asm/msr.h>
    27.8 -#include <asm/io.h>
    27.9 -#include "mtrr.h"
   27.10 -
   27.11 -int arr3_protected;
   27.12 -
   27.13 -static void
   27.14 -cyrix_get_arr(unsigned int reg, unsigned long *base,
   27.15 -	      unsigned int *size, mtrr_type * type)
   27.16 -{
   27.17 -	unsigned long flags;
   27.18 -	unsigned char arr, ccr3, rcr, shift;
   27.19 -
   27.20 -	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
   27.21 -
   27.22 -	/* Save flags and disable interrupts */
   27.23 -	local_irq_save(flags);
   27.24 -
   27.25 -	ccr3 = getCx86(CX86_CCR3);
   27.26 -	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
   27.27 -	((unsigned char *) base)[3] = getCx86(arr);
   27.28 -	((unsigned char *) base)[2] = getCx86(arr + 1);
   27.29 -	((unsigned char *) base)[1] = getCx86(arr + 2);
   27.30 -	rcr = getCx86(CX86_RCR_BASE + reg);
   27.31 -	setCx86(CX86_CCR3, ccr3);	/* disable MAPEN */
   27.32 -
   27.33 -	/* Enable interrupts if it was enabled previously */
   27.34 -	local_irq_restore(flags);
   27.35 -	shift = ((unsigned char *) base)[1] & 0x0f;
   27.36 -	*base >>= PAGE_SHIFT;
   27.37 -
   27.38 -	/* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
   27.39 -	 * Note: shift==0xf means 4G, this is unsupported.
   27.40 -	 */
   27.41 -	if (shift)
   27.42 -		*size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
   27.43 -	else
   27.44 -		*size = 0;
   27.45 -
   27.46 -	/* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
   27.47 -	if (reg < 7) {
   27.48 -		switch (rcr) {
   27.49 -		case 1:
   27.50 -			*type = MTRR_TYPE_UNCACHABLE;
   27.51 -			break;
   27.52 -		case 8:
   27.53 -			*type = MTRR_TYPE_WRBACK;
   27.54 -			break;
   27.55 -		case 9:
   27.56 -			*type = MTRR_TYPE_WRCOMB;
   27.57 -			break;
   27.58 -		case 24:
   27.59 -		default:
   27.60 -			*type = MTRR_TYPE_WRTHROUGH;
   27.61 -			break;
   27.62 -		}
   27.63 -	} else {
   27.64 -		switch (rcr) {
   27.65 -		case 0:
   27.66 -			*type = MTRR_TYPE_UNCACHABLE;
   27.67 -			break;
   27.68 -		case 8:
   27.69 -			*type = MTRR_TYPE_WRCOMB;
   27.70 -			break;
   27.71 -		case 9:
   27.72 -			*type = MTRR_TYPE_WRBACK;
   27.73 -			break;
   27.74 -		case 25:
   27.75 -		default:
   27.76 -			*type = MTRR_TYPE_WRTHROUGH;
   27.77 -			break;
   27.78 -		}
   27.79 -	}
   27.80 -}
   27.81 -
   27.82 -static int
   27.83 -cyrix_get_free_region(unsigned long base, unsigned long size)
   27.84 -/*  [SUMMARY] Get a free ARR.
   27.85 -    <base> The starting (base) address of the region.
   27.86 -    <size> The size (in bytes) of the region.
   27.87 -    [RETURNS] The index of the region on success, else -1 on error.
   27.88 -*/
   27.89 -{
   27.90 -	int i;
   27.91 -	mtrr_type ltype;
   27.92 -	unsigned long lbase;
   27.93 -	unsigned int  lsize;
   27.94 -
   27.95 -	/* If we are to set up a region >32M then look at ARR7 immediately */
   27.96 -	if (size > 0x2000) {
   27.97 -		cyrix_get_arr(7, &lbase, &lsize, &ltype);
   27.98 -		if (lsize == 0)
   27.99 -			return 7;
  27.100 -		/*  Else try ARR0-ARR6 first  */
  27.101 -	} else {
  27.102 -		for (i = 0; i < 7; i++) {
  27.103 -			cyrix_get_arr(i, &lbase, &lsize, &ltype);
  27.104 -			if ((i == 3) && arr3_protected)
  27.105 -				continue;
  27.106 -			if (lsize == 0)
  27.107 -				return i;
  27.108 -		}
  27.109 -		/* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
  27.110 -		cyrix_get_arr(i, &lbase, &lsize, &ltype);
  27.111 -		if ((lsize == 0) && (size >= 0x40))
  27.112 -			return i;
  27.113 -	}
  27.114 -	return -ENOSPC;
  27.115 -}
  27.116 -
  27.117 -static u32 cr4 = 0;
  27.118 -static u32 ccr3;
  27.119 -
  27.120 -static void prepare_set(void)
  27.121 -{
  27.122 -	u32 cr0;
  27.123 -
  27.124 -	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
  27.125 -	if ( cpu_has_pge ) {
  27.126 -		cr4 = read_cr4();
  27.127 -		write_cr4(cr4 & (unsigned char) ~(1 << 7));
  27.128 -	}
  27.129 -
  27.130 -	/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
  27.131 -	    a side-effect  */
  27.132 -	cr0 = read_cr0() | 0x40000000;
  27.133 -	wbinvd();
  27.134 -	write_cr0(cr0);
  27.135 -	wbinvd();
  27.136 -
  27.137 -	/* Cyrix ARRs - everything else were excluded at the top */
  27.138 -	ccr3 = getCx86(CX86_CCR3);
  27.139 -
  27.140 -	/* Cyrix ARRs - everything else were excluded at the top */
  27.141 -	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
  27.142 -
  27.143 -}
  27.144 -
  27.145 -static void post_set(void)
  27.146 -{
  27.147 -	/*  Flush caches and TLBs  */
  27.148 -	wbinvd();
  27.149 -
  27.150 -	/* Cyrix ARRs - everything else was excluded at the top */
  27.151 -	setCx86(CX86_CCR3, ccr3);
  27.152 -		
  27.153 -	/*  Enable caches  */
  27.154 -	write_cr0(read_cr0() & 0xbfffffff);
  27.155 -
  27.156 -	/*  Restore value of CR4  */
  27.157 -	if ( cpu_has_pge )
  27.158 -		write_cr4(cr4);
  27.159 -}
  27.160 -
  27.161 -static void cyrix_set_arr(unsigned int reg, unsigned long base,
  27.162 -			  unsigned long size, mtrr_type type)
  27.163 -{
  27.164 -	unsigned char arr, arr_type, arr_size;
  27.165 -
  27.166 -	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
  27.167 -
  27.168 -	/* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
  27.169 -	if (reg >= 7)
  27.170 -		size >>= 6;
  27.171 -
  27.172 -	size &= 0x7fff;		/* make sure arr_size <= 14 */
  27.173 -	for (arr_size = 0; size; arr_size++, size >>= 1) ;
  27.174 -
  27.175 -	if (reg < 7) {
  27.176 -		switch (type) {
  27.177 -		case MTRR_TYPE_UNCACHABLE:
  27.178 -			arr_type = 1;
  27.179 -			break;
  27.180 -		case MTRR_TYPE_WRCOMB:
  27.181 -			arr_type = 9;
  27.182 -			break;
  27.183 -		case MTRR_TYPE_WRTHROUGH:
  27.184 -			arr_type = 24;
  27.185 -			break;
  27.186 -		default:
  27.187 -			arr_type = 8;
  27.188 -			break;
  27.189 -		}
  27.190 -	} else {
  27.191 -		switch (type) {
  27.192 -		case MTRR_TYPE_UNCACHABLE:
  27.193 -			arr_type = 0;
  27.194 -			break;
  27.195 -		case MTRR_TYPE_WRCOMB:
  27.196 -			arr_type = 8;
  27.197 -			break;
  27.198 -		case MTRR_TYPE_WRTHROUGH:
  27.199 -			arr_type = 25;
  27.200 -			break;
  27.201 -		default:
  27.202 -			arr_type = 9;
  27.203 -			break;
  27.204 -		}
  27.205 -	}
  27.206 -
  27.207 -	prepare_set();
  27.208 -
  27.209 -	base <<= PAGE_SHIFT;
  27.210 -	setCx86(arr, ((unsigned char *) &base)[3]);
  27.211 -	setCx86(arr + 1, ((unsigned char *) &base)[2]);
  27.212 -	setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
  27.213 -	setCx86(CX86_RCR_BASE + reg, arr_type);
  27.214 -
  27.215 -	post_set();
  27.216 -}
  27.217 -
  27.218 -typedef struct {
  27.219 -	unsigned long base;
  27.220 -	unsigned int size;
  27.221 -	mtrr_type type;
  27.222 -} arr_state_t;
  27.223 -
  27.224 -arr_state_t arr_state[8] __initdata = {
  27.225 -	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
  27.226 -	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
  27.227 -};
  27.228 -
  27.229 -unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
  27.230 -
  27.231 -static void cyrix_set_all(void)
  27.232 -{
  27.233 -	int i;
  27.234 -
  27.235 -	prepare_set();
  27.236 -
  27.237 -	/* the CCRs are not contiguous */
  27.238 -	for (i = 0; i < 4; i++)
  27.239 -		setCx86(CX86_CCR0 + i, ccr_state[i]);
  27.240 -	for (; i < 7; i++)
  27.241 -		setCx86(CX86_CCR4 + i, ccr_state[i]);
  27.242 -	for (i = 0; i < 8; i++)
  27.243 -		cyrix_set_arr(i, arr_state[i].base, 
  27.244 -			      arr_state[i].size, arr_state[i].type);
  27.245 -
  27.246 -	post_set();
  27.247 -}
  27.248 -
  27.249 -/*
  27.250 - * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
  27.251 - * with the SMM (System Management Mode) mode. So we need the following:
  27.252 - * Check whether SMI_LOCK (CCR3 bit 0) is set
  27.253 - *   if it is set, write a warning message: ARR3 cannot be changed!
  27.254 - *     (it cannot be changed until the next processor reset)
  27.255 - *   if it is reset, then we can change it, set all the needed bits:
  27.256 - *   - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
  27.257 - *   - disable access to SMM memory (CCR1 bit 2 reset)
  27.258 - *   - disable SMM mode (CCR1 bit 1 reset)
  27.259 - *   - disable write protection of ARR3 (CCR6 bit 1 reset)
  27.260 - *   - (maybe) disable ARR3
  27.261 - * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
  27.262 - */
  27.263 -static void __init
  27.264 -cyrix_arr_init(void)
  27.265 -{
  27.266 -	struct set_mtrr_context ctxt;
  27.267 -	unsigned char ccr[7];
  27.268 -	int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
  27.269 -#ifdef CONFIG_SMP
  27.270 -	int i;
  27.271 -#endif
  27.272 -
  27.273 -	/* flush cache and enable MAPEN */
  27.274 -	set_mtrr_prepare_save(&ctxt);
  27.275 -	set_mtrr_cache_disable(&ctxt);
  27.276 -
  27.277 -	/* Save all CCRs locally */
  27.278 -	ccr[0] = getCx86(CX86_CCR0);
  27.279 -	ccr[1] = getCx86(CX86_CCR1);
  27.280 -	ccr[2] = getCx86(CX86_CCR2);
  27.281 -	ccr[3] = ctxt.ccr3;
  27.282 -	ccr[4] = getCx86(CX86_CCR4);
  27.283 -	ccr[5] = getCx86(CX86_CCR5);
  27.284 -	ccr[6] = getCx86(CX86_CCR6);
  27.285 -
  27.286 -	if (ccr[3] & 1) {
  27.287 -		ccrc[3] = 1;
  27.288 -		arr3_protected = 1;
  27.289 -	} else {
  27.290 -		/* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
  27.291 -		 * access to SMM memory through ARR3 (bit 7).
  27.292 -		 */
  27.293 -		if (ccr[1] & 0x80) {
  27.294 -			ccr[1] &= 0x7f;
  27.295 -			ccrc[1] |= 0x80;
  27.296 -		}
  27.297 -		if (ccr[1] & 0x04) {
  27.298 -			ccr[1] &= 0xfb;
  27.299 -			ccrc[1] |= 0x04;
  27.300 -		}
  27.301 -		if (ccr[1] & 0x02) {
  27.302 -			ccr[1] &= 0xfd;
  27.303 -			ccrc[1] |= 0x02;
  27.304 -		}
  27.305 -		arr3_protected = 0;
  27.306 -		if (ccr[6] & 0x02) {
  27.307 -			ccr[6] &= 0xfd;
  27.308 -			ccrc[6] = 1;	/* Disable write protection of ARR3 */
  27.309 -			setCx86(CX86_CCR6, ccr[6]);
  27.310 -		}
  27.311 -		/* Disable ARR3. This is safe now that we disabled SMM. */
  27.312 -		/* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
  27.313 -	}
  27.314 -	/* If we changed CCR1 in memory, change it in the processor, too. */
  27.315 -	if (ccrc[1])
  27.316 -		setCx86(CX86_CCR1, ccr[1]);
  27.317 -
  27.318 -	/* Enable ARR usage by the processor */
  27.319 -	if (!(ccr[5] & 0x20)) {
  27.320 -		ccr[5] |= 0x20;
  27.321 -		ccrc[5] = 1;
  27.322 -		setCx86(CX86_CCR5, ccr[5]);
  27.323 -	}
  27.324 -#ifdef CONFIG_SMP
  27.325 -	for (i = 0; i < 7; i++)
  27.326 -		ccr_state[i] = ccr[i];
  27.327 -	for (i = 0; i < 8; i++)
  27.328 -		cyrix_get_arr(i,
  27.329 -			      &arr_state[i].base, &arr_state[i].size,
  27.330 -			      &arr_state[i].type);
  27.331 -#endif
  27.332 -
  27.333 -	set_mtrr_done(&ctxt);	/* flush cache and disable MAPEN */
  27.334 -
  27.335 -	if (ccrc[5])
  27.336 -		printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
  27.337 -	if (ccrc[3])
  27.338 -		printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
  27.339 -/*
  27.340 -    if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
  27.341 -    if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
  27.342 -    if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
  27.343 -*/
  27.344 -	if (ccrc[6])
  27.345 -		printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
  27.346 -}
  27.347 -
  27.348 -static struct mtrr_ops cyrix_mtrr_ops = {
  27.349 -	.vendor            = X86_VENDOR_CYRIX,
  27.350 -	.init              = cyrix_arr_init,
  27.351 -	.set_all	   = cyrix_set_all,
  27.352 -	.set               = cyrix_set_arr,
  27.353 -	.get               = cyrix_get_arr,
  27.354 -	.get_free_region   = cyrix_get_free_region,
  27.355 -	.validate_add_page = generic_validate_add_page,
  27.356 -	.have_wrcomb       = positive_have_wrcomb,
  27.357 -};
  27.358 -
  27.359 -int __init cyrix_init_mtrr(void)
  27.360 -{
  27.361 -	set_mtrr_ops(&cyrix_mtrr_ops);
  27.362 -	return 0;
  27.363 -}
  27.364 -
  27.365 -//arch_initcall(cyrix_init_mtrr);
    28.1 --- a/xen/arch/x86/mtrr/generic.c	Mon Feb 13 17:41:23 2006 +0100
    28.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.3 @@ -1,407 +0,0 @@
    28.4 -/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
    28.5 -   because MTRRs can span upto 40 bits (36bits on most modern x86) */ 
    28.6 -#include <xen/lib.h>
    28.7 -#include <xen/init.h>
    28.8 -#include <xen/mm.h>
    28.9 -#include <asm/flushtlb.h>
   28.10 -#include <asm/io.h>
   28.11 -#include <asm/mtrr.h>
   28.12 -#include <asm/msr.h>
   28.13 -#include <asm/system.h>
   28.14 -#include <asm/cpufeature.h>
   28.15 -#include "mtrr.h"
   28.16 -
   28.17 -struct mtrr_state {
   28.18 -	struct mtrr_var_range *var_ranges;
   28.19 -	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
   28.20 -	unsigned char enabled;
   28.21 -	mtrr_type def_type;
   28.22 -};
   28.23 -
   28.24 -static unsigned long smp_changes_mask;
   28.25 -struct mtrr_state mtrr_state = {};
   28.26 -
   28.27 -
   28.28 -/*  Get the MSR pair relating to a var range  */
   28.29 -static void __init
   28.30 -get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
   28.31 -{
   28.32 -	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
   28.33 -	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
   28.34 -}
   28.35 -
   28.36 -static void __init
   28.37 -get_fixed_ranges(mtrr_type * frs)
   28.38 -{
   28.39 -	unsigned int *p = (unsigned int *) frs;
   28.40 -	int i;
   28.41 -
   28.42 -	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
   28.43 -
   28.44 -	for (i = 0; i < 2; i++)
   28.45 -		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
   28.46 -	for (i = 0; i < 8; i++)
   28.47 -		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
   28.48 -}
   28.49 -
   28.50 -/*  Grab all of the MTRR state for this CPU into *state  */
   28.51 -void __init get_mtrr_state(void)
   28.52 -{
   28.53 -	unsigned int i;
   28.54 -	struct mtrr_var_range *vrs;
   28.55 -	unsigned lo, dummy;
   28.56 -
   28.57 -	if (!mtrr_state.var_ranges) {
   28.58 -		mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range,
   28.59 -						  num_var_ranges);
   28.60 -		if (!mtrr_state.var_ranges)
   28.61 -			return;
   28.62 -	} 
   28.63 -	vrs = mtrr_state.var_ranges;
   28.64 -
   28.65 -	for (i = 0; i < num_var_ranges; i++)
   28.66 -		get_mtrr_var_range(i, &vrs[i]);
   28.67 -	get_fixed_ranges(mtrr_state.fixed_ranges);
   28.68 -
   28.69 -	rdmsr(MTRRdefType_MSR, lo, dummy);
   28.70 -	mtrr_state.def_type = (lo & 0xff);
   28.71 -	mtrr_state.enabled = (lo & 0xc00) >> 10;
   28.72 -}
   28.73 -
   28.74 -/*  Free resources associated with a struct mtrr_state  */
   28.75 -void __init finalize_mtrr_state(void)
   28.76 -{
   28.77 -	xfree(mtrr_state.var_ranges);
   28.78 -	mtrr_state.var_ranges = NULL;
   28.79 -}
   28.80 -
   28.81 -/*  Some BIOS's are fucked and don't set all MTRRs the same!  */
   28.82 -void __init mtrr_state_warn(void)
   28.83 -{
   28.84 -	unsigned long mask = smp_changes_mask;
   28.85 -
   28.86 -	if (!mask)
   28.87 -		return;
   28.88 -	if (mask & MTRR_CHANGE_MASK_FIXED)
   28.89 -		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
   28.90 -	if (mask & MTRR_CHANGE_MASK_VARIABLE)
   28.91 -		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
   28.92 -	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
   28.93 -		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
   28.94 -	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
   28.95 -	printk(KERN_INFO "mtrr: corrected configuration.\n");
   28.96 -}
   28.97 -
   28.98 -
   28.99 -int generic_get_free_region(unsigned long base, unsigned long size)
  28.100 -/*  [SUMMARY] Get a free MTRR.
  28.101 -    <base> The starting (base) address of the region.
  28.102 -    <size> The size (in bytes) of the region.
  28.103 -    [RETURNS] The index of the region on success, else -1 on error.
  28.104 -*/
  28.105 -{
  28.106 -	int i, max;
  28.107 -	mtrr_type ltype;
  28.108 -	unsigned long lbase;
  28.109 -	unsigned lsize;
  28.110 -
  28.111 -	max = num_var_ranges;
  28.112 -	for (i = 0; i < max; ++i) {
  28.113 -		mtrr_if->get(i, &lbase, &lsize, &ltype);
  28.114 -		if (lsize == 0)
  28.115 -			return i;
  28.116 -	}
  28.117 -	return -ENOSPC;
  28.118 -}
  28.119 -
  28.120 -void generic_get_mtrr(unsigned int reg, unsigned long *base,
  28.121 -		      unsigned int *size, mtrr_type * type)
  28.122 -{
  28.123 -	unsigned int mask_lo, mask_hi, base_lo, base_hi;
  28.124 -
  28.125 -	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
  28.126 -	if ((mask_lo & 0x800) == 0) {
  28.127 -		/*  Invalid (i.e. free) range  */
  28.128 -		*base = 0;
  28.129 -		*size = 0;
  28.130 -		*type = 0;
  28.131 -		return;
  28.132 -	}
  28.133 -
  28.134 -	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
  28.135 -
  28.136 -	/* Work out the shifted address mask. */
  28.137 -	mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
  28.138 -	    | mask_lo >> PAGE_SHIFT;
  28.139 -
  28.140 -	/* This works correctly if size is a power of two, i.e. a
  28.141 -	   contiguous range. */
  28.142 -	*size = -mask_lo;
  28.143 -	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
  28.144 -	*type = base_lo & 0xff;
  28.145 -}
  28.146 -
  28.147 -static int set_fixed_ranges(mtrr_type * frs)
  28.148 -{
  28.149 -	unsigned int *p = (unsigned int *) frs;
  28.150 -	int changed = FALSE;
  28.151 -	int i;
  28.152 -	unsigned int lo, hi;
  28.153 -
  28.154 -	rdmsr(MTRRfix64K_00000_MSR, lo, hi);
  28.155 -	if (p[0] != lo || p[1] != hi) {
  28.156 -		wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
  28.157 -		changed = TRUE;
  28.158 -	}
  28.159 -
  28.160 -	for (i = 0; i < 2; i++) {
  28.161 -		rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
  28.162 -		if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
  28.163 -			wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
  28.164 -			      p[3 + i * 2]);
  28.165 -			changed = TRUE;
  28.166 -		}
  28.167 -	}
  28.168 -
  28.169 -	for (i = 0; i < 8; i++) {
  28.170 -		rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
  28.171 -		if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
  28.172 -			wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
  28.173 -			      p[7 + i * 2]);
  28.174 -			changed = TRUE;
  28.175 -		}
  28.176 -	}
  28.177 -	return changed;
  28.178 -}
  28.179 -
  28.180 -/*  Set the MSR pair relating to a var range. Returns TRUE if
  28.181 -    changes are made  */
  28.182 -static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
  28.183 -{
  28.184 -	unsigned int lo, hi;
  28.185 -	int changed = FALSE;
  28.186 -
  28.187 -	rdmsr(MTRRphysBase_MSR(index), lo, hi);
  28.188 -	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
  28.189 -	    || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
  28.190 -		wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
  28.191 -		changed = TRUE;
  28.192 -	}
  28.193 -
  28.194 -	rdmsr(MTRRphysMask_MSR(index), lo, hi);
  28.195 -
  28.196 -	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
  28.197 -	    || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
  28.198 -		wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
  28.199 -		changed = TRUE;
  28.200 -	}
  28.201 -	return changed;
  28.202 -}
  28.203 -
  28.204 -static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
  28.205 -/*  [SUMMARY] Set the MTRR state for this CPU.
  28.206 -    <state> The MTRR state information to read.
  28.207 -    <ctxt> Some relevant CPU context.
  28.208 -    [NOTE] The CPU must already be in a safe state for MTRR changes.
  28.209 -    [RETURNS] 0 if no changes made, else a mask indication what was changed.
  28.210 -*/
  28.211 -{
  28.212 -	unsigned int i;
  28.213 -	unsigned long change_mask = 0;
  28.214 -
  28.215 -	for (i = 0; i < num_var_ranges; i++)
  28.216 -		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
  28.217 -			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
  28.218 -
  28.219 -	if (set_fixed_ranges(mtrr_state.fixed_ranges))
  28.220 -		change_mask |= MTRR_CHANGE_MASK_FIXED;
  28.221 -
  28.222 -	/*  Set_mtrr_restore restores the old value of MTRRdefType,
  28.223 -	   so to set it we fiddle with the saved value  */
  28.224 -	if ((deftype_lo & 0xff) != mtrr_state.def_type
  28.225 -	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
  28.226 -		deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
  28.227 -		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
  28.228 -	}
  28.229 -
  28.230 -	return change_mask;
  28.231 -}
  28.232 -
  28.233 -
  28.234 -static unsigned long cr4 = 0;
  28.235 -static u32 deftype_lo, deftype_hi;
  28.236 -static spinlock_t set_atomicity_lock = SPIN_LOCK_UNLOCKED;
  28.237 -
  28.238 -/*
  28.239 - * Since we are disabling the cache don't allow any interrupts - they
  28.240 - * would run extremely slow and would only increase the pain.  The caller must
  28.241 - * ensure that local interrupts are disabled and are reenabled after post_set()
  28.242 - * has been called.
  28.243 - */
  28.244 -
  28.245 -static void prepare_set(void)
  28.246 -{
  28.247 -	unsigned long cr0;
  28.248 -
  28.249 -	/*  Note that this is not ideal, since the cache is only flushed/disabled
  28.250 -	   for this CPU while the MTRRs are changed, but changing this requires
  28.251 -	   more invasive changes to the way the kernel boots  */
  28.252 -
  28.253 -	spin_lock(&set_atomicity_lock);
  28.254 -
  28.255 -	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
  28.256 -	cr0 = read_cr0() | 0x40000000;	/* set CD flag */
  28.257 -	write_cr0(cr0);
  28.258 -	wbinvd();
  28.259 -
  28.260 -	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
  28.261 -	if ( cpu_has_pge ) {
  28.262 -		cr4 = read_cr4();
  28.263 -		write_cr4(cr4 & ~X86_CR4_PGE);
  28.264 -	}
  28.265 -
  28.266 -	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
  28.267 -	local_flush_tlb();
  28.268 -
  28.269 -	/*  Save MTRR state */
  28.270 -	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
  28.271 -
  28.272 -	/*  Disable MTRRs, and set the default type to uncached  */
  28.273 -	wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
  28.274 -}
  28.275 -
  28.276 -static void post_set(void)
  28.277 -{
  28.278 -	/*  Flush TLBs (no need to flush caches - they are disabled)  */
  28.279 -	local_flush_tlb();
  28.280 -
  28.281 -	/* Intel (P6) standard MTRRs */
  28.282 -	wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
  28.283 -		
  28.284 -	/*  Enable caches  */
  28.285 -	write_cr0(read_cr0() & 0xbfffffff);
  28.286 -
  28.287 -	/*  Restore value of CR4  */
  28.288 -	if ( cpu_has_pge )
  28.289 -		write_cr4(cr4);
  28.290 -	spin_unlock(&set_atomicity_lock);
  28.291 -}
  28.292 -
  28.293 -static void generic_set_all(void)
  28.294 -{
  28.295 -	unsigned long mask, count;
  28.296 -	unsigned long flags;
  28.297 -
  28.298 -	local_irq_save(flags);
  28.299 -	prepare_set();
  28.300 -
  28.301 -	/* Actually set the state */
  28.302 -	mask = set_mtrr_state(deftype_lo,deftype_hi);
  28.303 -
  28.304 -	post_set();
  28.305 -	local_irq_restore(flags);
  28.306 -
  28.307 -	/*  Use the atomic bitops to update the global mask  */
  28.308 -	for (count = 0; count < sizeof mask * 8; ++count) {
  28.309 -		if (mask & 0x01)
  28.310 -			set_bit(count, &smp_changes_mask);
  28.311 -		mask >>= 1;
  28.312 -	}
  28.313 -	
  28.314 -}
  28.315 -
  28.316 -static void generic_set_mtrr(unsigned int reg, unsigned long base,
  28.317 -			     unsigned long size, mtrr_type type)
  28.318 -/*  [SUMMARY] Set variable MTRR register on the local CPU.
  28.319 -    <reg> The register to set.
  28.320 -    <base> The base address of the region.
  28.321 -    <size> The size of the region. If this is 0 the region is disabled.
  28.322 -    <type> The type of the region.
  28.323 -    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
  28.324 -    be done externally.
  28.325 -    [RETURNS] Nothing.
  28.326 -*/
  28.327 -{
  28.328 -	unsigned long flags;
  28.329 -
  28.330 -	local_irq_save(flags);
  28.331 -	prepare_set();
  28.332 -
  28.333 -	if (size == 0) {
  28.334 -		/* The invalid bit is kept in the mask, so we simply clear the
  28.335 -		   relevant mask register to disable a range. */
  28.336 -		wrmsr(MTRRphysMask_MSR(reg), 0, 0);
  28.337 -	} else {
  28.338 -		wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
  28.339 -		      (base & size_and_mask) >> (32 - PAGE_SHIFT));
  28.340 -		wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
  28.341 -		      (-size & size_and_mask) >> (32 - PAGE_SHIFT));
  28.342 -	}
  28.343 -
  28.344 -	post_set();
  28.345 -	local_irq_restore(flags);
  28.346 -}
  28.347 -
  28.348 -int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
  28.349 -{
  28.350 -	unsigned long lbase, last;
  28.351 -
  28.352 -	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned 
  28.353 -	    and not touch 0x70000000->0x7003FFFF */
  28.354 -	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
  28.355 -	    boot_cpu_data.x86_model == 1 &&
  28.356 -	    boot_cpu_data.x86_mask <= 7) {
  28.357 -		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
  28.358 -			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
  28.359 -			return -EINVAL;
  28.360 -		}
  28.361 -		if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
  28.362 -		    (type == MTRR_TYPE_WRCOMB
  28.363 -		     || type == MTRR_TYPE_WRBACK)) {
  28.364 -			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
  28.365 -			return -EINVAL;
  28.366 -		}
  28.367 -	}
  28.368 -
  28.369 -	if (base + size < 0x100) {
  28.370 -		printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
  28.371 -		       base, size);
  28.372 -		return -EINVAL;
  28.373 -	}
  28.374 -	/*  Check upper bits of base and last are equal and lower bits are 0
  28.375 -	    for base and 1 for last  */
  28.376 -	last = base + size - 1;
  28.377 -	for (lbase = base; !(lbase & 1) && (last & 1);
  28.378 -	     lbase = lbase >> 1, last = last >> 1) ;
  28.379 -	if (lbase != last) {
  28.380 -		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
  28.381 -		       base, size);
  28.382 -		return -EINVAL;
  28.383 -	}
  28.384 -	return 0;
  28.385 -}
  28.386 -
  28.387 -
  28.388 -int generic_have_wrcomb(void)
  28.389 -{
  28.390 -	unsigned long config, dummy;
  28.391 -	rdmsr(MTRRcap_MSR, config, dummy);
  28.392 -	return (config & (1 << 10));
  28.393 -}
  28.394 -
  28.395 -int positive_have_wrcomb(void)
  28.396 -{
  28.397 -	return 1;
  28.398 -}
  28.399 -
  28.400 -/* generic structure...
  28.401 - */
  28.402 -struct mtrr_ops generic_mtrr_ops = {
  28.403 -	.use_intel_if      = 1,
  28.404 -	.set_all	   = generic_set_all,
  28.405 -	.get               = generic_get_mtrr,
  28.406 -	.get_free_region   = generic_get_free_region,
  28.407 -	.set               = generic_set_mtrr,
  28.408 -	.validate_add_page = generic_validate_add_page,
  28.409 -	.have_wrcomb       = generic_have_wrcomb,
  28.410 -};
    29.1 --- a/xen/arch/x86/mtrr/main.c	Mon Feb 13 17:41:23 2006 +0100
    29.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.3 @@ -1,647 +0,0 @@
    29.4 -/*  Generic MTRR (Memory Type Range Register) driver.
    29.5 -
    29.6 -    Copyright (C) 1997-2000  Richard Gooch
    29.7 -    Copyright (c) 2002	     Patrick Mochel
    29.8 -
    29.9 -    This library is free software; you can redistribute it and/or
   29.10 -    modify it under the terms of the GNU Library General Public
   29.11 -    License as published by the Free Software Foundation; either
   29.12 -    version 2 of the License, or (at your option) any later version.
   29.13 -
   29.14 -    This library is distributed in the hope that it will be useful,
   29.15 -    but WITHOUT ANY WARRANTY; without even the implied warranty of
   29.16 -    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   29.17 -    Library General Public License for more details.
   29.18 -
   29.19 -    You should have received a copy of the GNU Library General Public
   29.20 -    License along with this library; if not, write to the Free
   29.21 -    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
   29.22 -
   29.23 -    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
   29.24 -    The postal address is:
   29.25 -      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
   29.26 -
   29.27 -    Source: "Pentium Pro Family Developer's Manual, Volume 3:
   29.28 -    Operating System Writer's Guide" (Intel document number 242692),
   29.29 -    section 11.11.7
   29.30 -
   29.31 -    This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> 
   29.32 -    on 6-7 March 2002. 
   29.33 -    Source: Intel Architecture Software Developers Manual, Volume 3: 
   29.34 -    System Programming Guide; Section 9.11. (1997 edition - PPro).
   29.35 -*/
   29.36 -
   29.37 -#include <xen/config.h>
   29.38 -#include <xen/init.h>
   29.39 -#include <xen/lib.h>
   29.40 -#include <xen/smp.h>
   29.41 -#include <xen/spinlock.h>
   29.42 -#include <asm/mtrr.h>
   29.43 -#include <asm/uaccess.h>
   29.44 -#include <asm/processor.h>
   29.45 -#include <asm/msr.h>
   29.46 -#include "mtrr.h"
   29.47 -
   29.48 -#define MTRR_VERSION            "2.0 (20020519)"
   29.49 -
   29.50 -/* No blocking mutexes in Xen. Spin instead. */
   29.51 -#define DECLARE_MUTEX(_m) spinlock_t _m = SPIN_LOCK_UNLOCKED
   29.52 -#define down(_m) spin_lock(_m)
   29.53 -#define up(_m) spin_unlock(_m)
   29.54 -
   29.55 -u32 num_var_ranges = 0;
   29.56 -
   29.57 -unsigned int *usage_table;
   29.58 -static DECLARE_MUTEX(main_lock);
   29.59 -
   29.60 -u32 size_or_mask, size_and_mask;
   29.61 -
   29.62 -static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
   29.63 -
   29.64 -struct mtrr_ops * mtrr_if = NULL;
   29.65 -
   29.66 -__initdata char *mtrr_if_name[] = {
   29.67 -    "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR"
   29.68 -};
   29.69 -
   29.70 -static void set_mtrr(unsigned int reg, unsigned long base,
   29.71 -		     unsigned long size, mtrr_type type);
   29.72 -
   29.73 -extern int arr3_protected;
   29.74 -
   29.75 -static char *mtrr_strings[MTRR_NUM_TYPES] =
   29.76 -{
   29.77 -    "uncachable",               /* 0 */
   29.78 -    "write-combining",          /* 1 */
   29.79 -    "?",                        /* 2 */
   29.80 -    "?",                        /* 3 */
   29.81 -    "write-through",            /* 4 */
   29.82 -    "write-protect",            /* 5 */
   29.83 -    "write-back",               /* 6 */
   29.84 -};
   29.85 -
   29.86 -char *mtrr_attrib_to_str(int x)
   29.87 -{
   29.88 -	return (x <= 6) ? mtrr_strings[x] : "?";
   29.89 -}
   29.90 -
   29.91 -void set_mtrr_ops(struct mtrr_ops * ops)
   29.92 -{
   29.93 -	if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
   29.94 -		mtrr_ops[ops->vendor] = ops;
   29.95 -}
   29.96 -
   29.97 -/*  Returns non-zero if we have the write-combining memory type  */
   29.98 -static int have_wrcomb(void)
   29.99 -{
  29.100 -	return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
  29.101 -}
  29.102 -
  29.103 -/*  This function returns the number of variable MTRRs  */
  29.104 -void __init set_num_var_ranges(void)
  29.105 -{
  29.106 -	unsigned long config = 0, dummy;
  29.107 -
  29.108 -	if (use_intel()) {
  29.109 -		rdmsr(MTRRcap_MSR, config, dummy);
  29.110 -	} else if (is_cpu(AMD))
  29.111 -		config = 2;
  29.112 -	else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
  29.113 -		config = 8;
  29.114 -	num_var_ranges = config & 0xff;
  29.115 -}
  29.116 -
  29.117 -static void __init init_table(void)
  29.118 -{
  29.119 -	int i, max;
  29.120 -
  29.121 -	max = num_var_ranges;
  29.122 -	if ((usage_table = xmalloc_array(unsigned int, max)) == NULL) {
  29.123 -		printk(KERN_ERR "mtrr: could not allocate\n");
  29.124 -		return;
  29.125 -	}
  29.126 -	for (i = 0; i < max; i++)
  29.127 -		usage_table[i] = 1;
  29.128 -}
  29.129 -
  29.130 -struct set_mtrr_data {
  29.131 -	atomic_t	count;
  29.132 -	atomic_t	gate;
  29.133 -	unsigned long	smp_base;
  29.134 -	unsigned long	smp_size;
  29.135 -	unsigned int	smp_reg;
  29.136 -	mtrr_type	smp_type;
  29.137 -};
  29.138 -
  29.139 -#ifdef CONFIG_SMP
  29.140 -
  29.141 -static void ipi_handler(void *info)
  29.142 -/*  [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
  29.143 -    [RETURNS] Nothing.
  29.144 -*/
  29.145 -{
  29.146 -	struct set_mtrr_data *data = info;
  29.147 -	unsigned long flags;
  29.148 -
  29.149 -	local_irq_save(flags);
  29.150 -
  29.151 -	atomic_dec(&data->count);
  29.152 -	while(!atomic_read(&data->gate))
  29.153 -		cpu_relax();
  29.154 -
  29.155 -	/*  The master has cleared me to execute  */
  29.156 -	if (data->smp_reg != ~0U) 
  29.157 -		mtrr_if->set(data->smp_reg, data->smp_base, 
  29.158 -			     data->smp_size, data->smp_type);
  29.159 -	else
  29.160 -		mtrr_if->set_all();
  29.161 -
  29.162 -	atomic_dec(&data->count);
  29.163 -	while(atomic_read(&data->gate))
  29.164 -		cpu_relax();
  29.165 -
  29.166 -	atomic_dec(&data->count);
  29.167 -	local_irq_restore(flags);
  29.168 -}
  29.169 -
  29.170 -#endif
  29.171 -
  29.172 -/**
  29.173 - * set_mtrr - update mtrrs on all processors
  29.174 - * @reg:	mtrr in question
  29.175 - * @base:	mtrr base
  29.176 - * @size:	mtrr size
  29.177 - * @type:	mtrr type
  29.178 - *
  29.179 - * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
  29.180 - * 
  29.181 - * 1. Send IPI to do the following:
  29.182 - * 2. Disable Interrupts
  29.183 - * 3. Wait for all procs to do so 
  29.184 - * 4. Enter no-fill cache mode
  29.185 - * 5. Flush caches
  29.186 - * 6. Clear PGE bit
  29.187 - * 7. Flush all TLBs
  29.188 - * 8. Disable all range registers
  29.189 - * 9. Update the MTRRs
  29.190 - * 10. Enable all range registers
  29.191 - * 11. Flush all TLBs and caches again
  29.192 - * 12. Enter normal cache mode and reenable caching
  29.193 - * 13. Set PGE 
  29.194 - * 14. Wait for buddies to catch up
  29.195 - * 15. Enable interrupts.
  29.196 - * 
  29.197 - * What does that mean for us? Well, first we set data.count to the number
  29.198 - * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
  29.199 - * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
  29.200 - * Meanwhile, they are waiting for that flag to be set. Once it's set, each 
  29.201 - * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it 
  29.202 - * differently, so we call mtrr_if->set() callback and let them take care of it.
  29.203 - * When they're done, they again decrement data->count and wait for data.gate to 
  29.204 - * be reset. 
  29.205 - * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
  29.206 - * Everyone then enables interrupts and we all continue on.
  29.207 - *
  29.208 - * Note that the mechanism is the same for UP systems, too; all the SMP stuff
  29.209 - * becomes nops.
  29.210 - */
  29.211 -static void set_mtrr(unsigned int reg, unsigned long base,
  29.212 -		     unsigned long size, mtrr_type type)
  29.213 -{
  29.214 -	struct set_mtrr_data data;
  29.215 -	unsigned long flags;
  29.216 -
  29.217 -	data.smp_reg = reg;
  29.218 -	data.smp_base = base;
  29.219 -	data.smp_size = size;
  29.220 -	data.smp_type = type;
  29.221 -	atomic_set(&data.count, num_booting_cpus() - 1);
  29.222 -	atomic_set(&data.gate,0);
  29.223 -
  29.224 -	/*  Start the ball rolling on other CPUs  */
  29.225 -	if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
  29.226 -		panic("mtrr: timed out waiting for other CPUs\n");
  29.227 -
  29.228 -	local_irq_save(flags);
  29.229 -
  29.230 -	while(atomic_read(&data.count))
  29.231 -		cpu_relax();
  29.232 -
  29.233 -	/* ok, reset count and toggle gate */
  29.234 -	atomic_set(&data.count, num_booting_cpus() - 1);
  29.235 -	atomic_set(&data.gate,1);
  29.236 -
  29.237 -	/* do our MTRR business */
  29.238 -
  29.239 -	/* HACK!
  29.240 -	 * We use this same function to initialize the mtrrs on boot.
  29.241 -	 * The state of the boot cpu's mtrrs has been saved, and we want
  29.242 -	 * to replicate across all the APs. 
  29.243 -	 * If we're doing that @reg is set to something special...
  29.244 -	 */
  29.245 -	if (reg != ~0U) 
  29.246 -		mtrr_if->set(reg,base,size,type);
  29.247 -
  29.248 -	/* wait for the others */
  29.249 -	while(atomic_read(&data.count))
  29.250 -		cpu_relax();
  29.251 -
  29.252 -	atomic_set(&data.count, num_booting_cpus() - 1);
  29.253 -	atomic_set(&data.gate,0);
  29.254 -
  29.255 -	/*
  29.256 -	 * Wait here for everyone to have seen the gate change
  29.257 -	 * So we're the last ones to touch 'data'
  29.258 -	 */
  29.259 -	while(atomic_read(&data.count))
  29.260 -		cpu_relax();
  29.261 -
  29.262 -	local_irq_restore(flags);
  29.263 -}
  29.264 -
  29.265 -/**
  29.266 - *	mtrr_add_page - Add a memory type region
  29.267 - *	@base: Physical base address of region in pages (4 KB)
  29.268 - *	@size: Physical size of region in pages (4 KB)
  29.269 - *	@type: Type of MTRR desired
  29.270 - *	@increment: If this is true do usage counting on the region
  29.271 - *
  29.272 - *	Memory type region registers control the caching on newer Intel and
  29.273 - *	non Intel processors. This function allows drivers to request an
  29.274 - *	MTRR is added. The details and hardware specifics of each processor's
  29.275 - *	implementation are hidden from the caller, but nevertheless the 
  29.276 - *	caller should expect to need to provide a power of two size on an
  29.277 - *	equivalent power of two boundary.
  29.278 - *
  29.279 - *	If the region cannot be added either because all regions are in use
  29.280 - *	or the CPU cannot support it a negative value is returned. On success
  29.281 - *	the register number for this entry is returned, but should be treated
  29.282 - *	as a cookie only.
  29.283 - *
  29.284 - *	On a multiprocessor machine the changes are made to all processors.
  29.285 - *	This is required on x86 by the Intel processors.
  29.286 - *
  29.287 - *	The available types are
  29.288 - *
  29.289 - *	%MTRR_TYPE_UNCACHABLE	-	No caching
  29.290 - *
  29.291 - *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
  29.292 - *
  29.293 - *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
  29.294 - *
  29.295 - *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
  29.296 - *
  29.297 - *	BUGS: Needs a quiet flag for the cases where drivers do not mind
  29.298 - *	failures and do not wish system log messages to be sent.
  29.299 - */
  29.300 -
  29.301 -int mtrr_add_page(unsigned long base, unsigned long size, 
  29.302 -		  unsigned int type, char increment)
  29.303 -{
  29.304 -	int i;
  29.305 -	mtrr_type ltype;
  29.306 -	unsigned long lbase;
  29.307 -	unsigned int lsize;
  29.308 -	int error;
  29.309 -
  29.310 -	if (!mtrr_if)
  29.311 -		return -ENXIO;
  29.312 -		
  29.313 -	if ((error = mtrr_if->validate_add_page(base,size,type)))
  29.314 -		return error;
  29.315 -
  29.316 -	if (type >= MTRR_NUM_TYPES) {
  29.317 -		printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
  29.318 -		return -EINVAL;
  29.319 -	}
  29.320 -
  29.321 -	/*  If the type is WC, check that this processor supports it  */
  29.322 -	if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
  29.323 -		printk(KERN_WARNING
  29.324 -		       "mtrr: your processor doesn't support write-combining\n");
  29.325 -		return -ENOSYS;
  29.326 -	}
  29.327 -
  29.328 -	if (base & size_or_mask || size & size_or_mask) {
  29.329 -		printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
  29.330 -		return -EINVAL;
  29.331 -	}
  29.332 -
  29.333 -	error = -EINVAL;
  29.334 -
  29.335 -	/*  Search for existing MTRR  */
  29.336 -	down(&main_lock);
  29.337 -	for (i = 0; i < num_var_ranges; ++i) {
  29.338 -		mtrr_if->get(i, &lbase, &lsize, &ltype);
  29.339 -		if (base >= lbase + lsize)
  29.340 -			continue;
  29.341 -		if ((base < lbase) && (base + size <= lbase))
  29.342 -			continue;
  29.343 -		/*  At this point we know there is some kind of overlap/enclosure  */
  29.344 -		if ((base < lbase) || (base + size > lbase + lsize)) {
  29.345 -			printk(KERN_WARNING
  29.346 -			       "mtrr: 0x%lx000,0x%lx000 overlaps existing"
  29.347 -			       " 0x%lx000,0x%x000\n", base, size, lbase,
  29.348 -			       lsize);
  29.349 -			goto out;
  29.350 -		}
  29.351 -		/*  New region is enclosed by an existing region  */
  29.352 -		if (ltype != type) {
  29.353 -			if (type == MTRR_TYPE_UNCACHABLE)
  29.354 -				continue;
  29.355 -			printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
  29.356 -			     base, size, mtrr_attrib_to_str(ltype),
  29.357 -			     mtrr_attrib_to_str(type));
  29.358 -			goto out;
  29.359 -		}
  29.360 -		if (increment)
  29.361 -			++usage_table[i];
  29.362 -		error = i;
  29.363 -		goto out;
  29.364 -	}
  29.365 -	/*  Search for an empty MTRR  */
  29.366 -	i = mtrr_if->get_free_region(base, size);
  29.367 -	if (i >= 0) {
  29.368 -		set_mtrr(i, base, size, type);
  29.369 -		usage_table[i] = 1;
  29.370 -	} else
  29.371 -		printk(KERN_INFO "mtrr: no more MTRRs available\n");
  29.372 -	error = i;
  29.373 - out:
  29.374 -	up(&main_lock);
  29.375 -	return error;
  29.376 -}
  29.377 -
  29.378 -/**
  29.379 - *	mtrr_add - Add a memory type region
  29.380 - *	@base: Physical base address of region
  29.381 - *	@size: Physical size of region
  29.382 - *	@type: Type of MTRR desired
  29.383 - *	@increment: If this is true do usage counting on the region
  29.384 - *
  29.385 - *	Memory type region registers control the caching on newer Intel and
  29.386 - *	non Intel processors. This function allows drivers to request an
  29.387 - *	MTRR is added. The details and hardware specifics of each processor's
  29.388 - *	implementation are hidden from the caller, but nevertheless the 
  29.389 - *	caller should expect to need to provide a power of two size on an
  29.390 - *	equivalent power of two boundary.
  29.391 - *
  29.392 - *	If the region cannot be added either because all regions are in use
  29.393 - *	or the CPU cannot support it a negative value is returned. On success
  29.394 - *	the register number for this entry is returned, but should be treated
  29.395 - *	as a cookie only.
  29.396 - *
  29.397 - *	On a multiprocessor machine the changes are made to all processors.
  29.398 - *	This is required on x86 by the Intel processors.
  29.399 - *
  29.400 - *	The available types are
  29.401 - *
  29.402 - *	%MTRR_TYPE_UNCACHABLE	-	No caching
  29.403 - *
  29.404 - *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
  29.405 - *
  29.406 - *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
  29.407 - *
  29.408 - *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
  29.409 - *
  29.410 - *	BUGS: Needs a quiet flag for the cases where drivers do not mind
  29.411 - *	failures and do not wish system log messages to be sent.
  29.412 - */
  29.413 -
  29.414 -int
  29.415 -mtrr_add(unsigned long base, unsigned long size, unsigned int type,
  29.416 -	 char increment)
  29.417 -{
  29.418 -	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
  29.419 -		printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
  29.420 -		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
  29.421 -		return -EINVAL;
  29.422 -	}
  29.423 -	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
  29.424 -			     increment);
  29.425 -}
  29.426 -
  29.427 -/**
  29.428 - *	mtrr_del_page - delete a memory type region
  29.429 - *	@reg: Register returned by mtrr_add
  29.430 - *	@base: Physical base address
  29.431 - *	@size: Size of region
  29.432 - *
  29.433 - *	If register is supplied then base and size are ignored. This is
  29.434 - *	how drivers should call it.
  29.435 - *
  29.436 - *	Releases an MTRR region. If the usage count drops to zero the 
  29.437 - *	register is freed and the region returns to default state.
  29.438 - *	On success the register is returned, on failure a negative error
  29.439 - *	code.
  29.440 - */
  29.441 -
  29.442 -int mtrr_del_page(int reg, unsigned long base, unsigned long size)
  29.443 -{
  29.444 -	int i, max;
  29.445 -	mtrr_type ltype;
  29.446 -	unsigned long lbase;
  29.447 -	unsigned int lsize;
  29.448 -	int error = -EINVAL;
  29.449 -
  29.450 -	if (!mtrr_if)
  29.451 -		return -ENXIO;
  29.452 -
  29.453 -	max = num_var_ranges;
  29.454 -	down(&main_lock);
  29.455 -	if (reg < 0) {
  29.456 -		/*  Search for existing MTRR  */
  29.457 -		for (i = 0; i < max; ++i) {
  29.458 -			mtrr_if->get(i, &lbase, &lsize, &ltype);
  29.459 -			if (lbase == base && lsize == size) {
  29.460 -				reg = i;
  29.461 -				break;
  29.462 -			}
  29.463 -		}
  29.464 -		if (reg < 0) {
  29.465 -			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
  29.466 -			       size);
  29.467 -			goto out;
  29.468 -		}
  29.469 -	}
  29.470 -	if (reg >= max) {
  29.471 -		printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
  29.472 -		goto out;
  29.473 -	}
  29.474 -	if (is_cpu(CYRIX) && !use_intel()) {
  29.475 -		if ((reg == 3) && arr3_protected) {
  29.476 -			printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
  29.477 -			goto out;
  29.478 -		}
  29.479 -	}
  29.480 -	mtrr_if->get(reg, &lbase, &lsize, &ltype);
  29.481 -	if (lsize < 1) {
  29.482 -		printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
  29.483 -		goto out;
  29.484 -	}
  29.485 -	if (usage_table[reg] < 1) {
  29.486 -		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
  29.487 -		goto out;
  29.488 -	}
  29.489 -	if (--usage_table[reg] < 1)
  29.490 -		set_mtrr(reg, 0, 0, 0);
  29.491 -	error = reg;
  29.492 - out:
  29.493 -	up(&main_lock);
  29.494 -	return error;
  29.495 -}
  29.496 -/**
  29.497 - *	mtrr_del - delete a memory type region
  29.498 - *	@reg: Register returned by mtrr_add
  29.499 - *	@base: Physical base address
  29.500 - *	@size: Size of region
  29.501 - *
  29.502 - *	If register is supplied then base and size are ignored. This is
  29.503 - *	how drivers should call it.
  29.504 - *
  29.505 - *	Releases an MTRR region. If the usage count drops to zero the 
  29.506 - *	register is freed and the region returns to default state.
  29.507 - *	On success the register is returned, on failure a negative error
  29.508 - *	code.
  29.509 - */
  29.510 -
  29.511 -int
  29.512 -mtrr_del(int reg, unsigned long base, unsigned long size)
  29.513 -{
  29.514 -	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
  29.515 -		printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
  29.516 -		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
  29.517 -		return -EINVAL;
  29.518 -	}
  29.519 -	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
  29.520 -}
  29.521 -
  29.522 -EXPORT_SYMBOL(mtrr_add);
  29.523 -EXPORT_SYMBOL(mtrr_del);
  29.524 -
  29.525 -/* HACK ALERT!
  29.526 - * These should be called implicitly, but we can't yet until all the initcall
  29.527 - * stuff is done...
  29.528 - */
  29.529 -extern void amd_init_mtrr(void);
  29.530 -extern void cyrix_init_mtrr(void);
  29.531 -extern void centaur_init_mtrr(void);
  29.532 -
  29.533 -static void __init init_ifs(void)
  29.534 -{
  29.535 -	amd_init_mtrr();
  29.536 -	cyrix_init_mtrr();
  29.537 -	centaur_init_mtrr();
  29.538 -}
  29.539 -
  29.540 -static void __init init_other_cpus(void)
  29.541 -{
  29.542 -	if (use_intel())
  29.543 -		get_mtrr_state();
  29.544 -
  29.545 -	/* bring up the other processors */
  29.546 -	set_mtrr(~0U,0,0,0);
  29.547 -
  29.548 -	if (use_intel()) {
  29.549 -		finalize_mtrr_state();
  29.550 -		mtrr_state_warn();
  29.551 -	}
  29.552 -}
  29.553 -
  29.554 -
  29.555 -struct mtrr_value {
  29.556 -	mtrr_type	ltype;
  29.557 -	unsigned long	lbase;
  29.558 -	unsigned int	lsize;
  29.559 -};
  29.560 -
  29.561 -/**
  29.562 - * mtrr_init - initialize mtrrs on the boot CPU
  29.563 - *
  29.564 - * This needs to be called early; before any of the other CPUs are 
  29.565 - * initialized (i.e. before smp_init()).
  29.566 - * 
  29.567 - */
  29.568 -static int __init mtrr_init(void)
  29.569 -{
  29.570 -	init_ifs();
  29.571 -
  29.572 -	if (cpu_has_mtrr) {
  29.573 -		mtrr_if = &generic_mtrr_ops;
  29.574 -		size_or_mask = 0xff000000;	/* 36 bits */
  29.575 -		size_and_mask = 0x00f00000;
  29.576 -			
  29.577 -		switch (boot_cpu_data.x86_vendor) {
  29.578 -		case X86_VENDOR_AMD:
  29.579 -			/* The original Athlon docs said that
  29.580 -			   total addressable memory is 44 bits wide.
  29.581 -			   It was not really clear whether its MTRRs
  29.582 -			   follow this or not. (Read: 44 or 36 bits).
  29.583 -			   However, "x86-64_overview.pdf" explicitly
  29.584 -			   states that "previous implementations support
  29.585 -			   36 bit MTRRs" and also provides a way to
  29.586 -			   query the width (in bits) of the physical
  29.587 -			   addressable memory on the Hammer family.
  29.588 -			 */
  29.589 -			if (boot_cpu_data.x86 == 15
  29.590 -			    && (cpuid_eax(0x80000000) >= 0x80000008)) {
  29.591 -				u32 phys_addr;
  29.592 -				phys_addr = cpuid_eax(0x80000008) & 0xff;
  29.593 -				size_or_mask =
  29.594 -				    ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
  29.595 -				size_and_mask = ~size_or_mask & 0xfff00000;
  29.596 -			}
  29.597 -			/* Athlon MTRRs use an Intel-compatible interface for 
  29.598 -			 * getting and setting */
  29.599 -			break;
  29.600 -		case X86_VENDOR_CENTAUR:
  29.601 -			if (boot_cpu_data.x86 == 6) {
  29.602 -				/* VIA Cyrix family have Intel style MTRRs, but don't support PAE */
  29.603 -				size_or_mask = 0xfff00000;	/* 32 bits */
  29.604 -				size_and_mask = 0;
  29.605 -			}
  29.606 -			break;
  29.607 -		
  29.608 -		default:
  29.609 -			break;
  29.610 -		}
  29.611 -	} else {
  29.612 -		switch (boot_cpu_data.x86_vendor) {
  29.613 -		case X86_VENDOR_AMD:
  29.614 -			if (cpu_has_k6_mtrr) {
  29.615 -				/* Pre-Athlon (K6) AMD CPU MTRRs */
  29.616 -				mtrr_if = mtrr_ops[X86_VENDOR_AMD];
  29.617 -				size_or_mask = 0xfff00000;	/* 32 bits */
  29.618 -				size_and_mask = 0;
  29.619 -			}
  29.620 -			break;
  29.621 -		case X86_VENDOR_CENTAUR:
  29.622 -			if (cpu_has_centaur_mcr) {
  29.623 -				mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
  29.624 -				size_or_mask = 0xfff00000;	/* 32 bits */
  29.625 -				size_and_mask = 0;
  29.626 -			}
  29.627 -			break;
  29.628 -		case X86_VENDOR_CYRIX:
  29.629 -			if (cpu_has_cyrix_arr) {
  29.630 -				mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
  29.631 -				size_or_mask = 0xfff00000;	/* 32 bits */
  29.632 -				size_and_mask = 0;
  29.633 -			}
  29.634 -			break;
  29.635 -		default:
  29.636 -			break;
  29.637 -		}
  29.638 -	}
  29.639 -	printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION);
  29.640 -
  29.641 -	if (mtrr_if) {
  29.642 -		set_num_var_ranges();
  29.643 -		init_table();
  29.644 -		init_other_cpus();
  29.645 -		return 0;
  29.646 -	}
  29.647 -	return -ENXIO;
  29.648 -}
  29.649 -
  29.650 -__initcall(mtrr_init);
    30.1 --- a/xen/arch/x86/mtrr/mtrr.h	Mon Feb 13 17:41:23 2006 +0100
    30.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.3 @@ -1,99 +0,0 @@
    30.4 -/*
    30.5 - * local mtrr defines.
    30.6 - */
    30.7 -
    30.8 -#ifndef TRUE
    30.9 -#define TRUE  1
   30.10 -#define FALSE 0
   30.11 -#endif
   30.12 -
   30.13 -#define MTRRcap_MSR     0x0fe
   30.14 -#define MTRRdefType_MSR 0x2ff
   30.15 -
   30.16 -#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
   30.17 -#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
   30.18 -
   30.19 -#define NUM_FIXED_RANGES 88
   30.20 -#define MTRRfix64K_00000_MSR 0x250
   30.21 -#define MTRRfix16K_80000_MSR 0x258
   30.22 -#define MTRRfix16K_A0000_MSR 0x259
   30.23 -#define MTRRfix4K_C0000_MSR 0x268
   30.24 -#define MTRRfix4K_C8000_MSR 0x269
   30.25 -#define MTRRfix4K_D0000_MSR 0x26a
   30.26 -#define MTRRfix4K_D8000_MSR 0x26b
   30.27 -#define MTRRfix4K_E0000_MSR 0x26c
   30.28 -#define MTRRfix4K_E8000_MSR 0x26d
   30.29 -#define MTRRfix4K_F0000_MSR 0x26e
   30.30 -#define MTRRfix4K_F8000_MSR 0x26f
   30.31 -
   30.32 -#define MTRR_CHANGE_MASK_FIXED     0x01
   30.33 -#define MTRR_CHANGE_MASK_VARIABLE  0x02
   30.34 -#define MTRR_CHANGE_MASK_DEFTYPE   0x04
   30.35 -
   30.36 -/* In the Intel processor's MTRR interface, the MTRR type is always held in
   30.37 -   an 8 bit field: */
   30.38 -typedef u8 mtrr_type;
   30.39 -
   30.40 -struct mtrr_ops {
   30.41 -	u32	vendor;
   30.42 -	u32	use_intel_if;
   30.43 -	void	(*init)(void);
   30.44 -	void	(*set)(unsigned int reg, unsigned long base,
   30.45 -		       unsigned long size, mtrr_type type);
   30.46 -	void	(*set_all)(void);
   30.47 -
   30.48 -	void	(*get)(unsigned int reg, unsigned long *base,
   30.49 -		       unsigned int *size, mtrr_type * type);
   30.50 -	int	(*get_free_region) (unsigned long base, unsigned long size);
   30.51 -
   30.52 -	int	(*validate_add_page)(unsigned long base, unsigned long size,
   30.53 -				     unsigned int type);
   30.54 -	int	(*have_wrcomb)(void);
   30.55 -};
   30.56 -
   30.57 -extern int generic_get_free_region(unsigned long base, unsigned long size);
   30.58 -extern int generic_validate_add_page(unsigned long base, unsigned long size,
   30.59 -				     unsigned int type);
   30.60 -
   30.61 -extern struct mtrr_ops generic_mtrr_ops;
   30.62 -
   30.63 -extern int generic_have_wrcomb(void);
   30.64 -extern int positive_have_wrcomb(void);
   30.65 -
   30.66 -/* library functions for processor-specific routines */
   30.67 -struct set_mtrr_context {
   30.68 -	unsigned long flags;
   30.69 -	unsigned long deftype_lo;
   30.70 -	unsigned long deftype_hi;
   30.71 -	unsigned long cr4val;
   30.72 -	unsigned long ccr3;
   30.73 -};
   30.74 -
   30.75 -struct mtrr_var_range {
   30.76 -	unsigned long base_lo;
   30.77 -	unsigned long base_hi;
   30.78 -	unsigned long mask_lo;
   30.79 -	unsigned long mask_hi;
   30.80 -};
   30.81 -
   30.82 -void set_mtrr_done(struct set_mtrr_context *ctxt);
   30.83 -void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
   30.84 -void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
   30.85 -
   30.86 -void get_mtrr_state(void);
   30.87 -
   30.88 -extern void set_mtrr_ops(struct mtrr_ops * ops);
   30.89 -
   30.90 -extern u32 size_or_mask, size_and_mask;
   30.91 -extern struct mtrr_ops * mtrr_if;
   30.92 -
   30.93 -#define is_cpu(vnd)	(mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
   30.94 -#define use_intel()	(mtrr_if && mtrr_if->use_intel_if == 1)
   30.95 -
   30.96 -extern unsigned int num_var_ranges;
   30.97 -
   30.98 -void finalize_mtrr_state(void);
   30.99 -void mtrr_state_warn(void);
  30.100 -char *mtrr_attrib_to_str(int x);
  30.101 -
  30.102 -extern char * mtrr_if_name[];
    31.1 --- a/xen/arch/x86/mtrr/state.c	Mon Feb 13 17:41:23 2006 +0100
    31.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.3 @@ -1,78 +0,0 @@
    31.4 -#include <xen/mm.h>
    31.5 -#include <xen/init.h>
    31.6 -#include <asm/io.h>
    31.7 -#include <asm/mtrr.h>
    31.8 -#include <asm/msr.h>
    31.9 -#include "mtrr.h"
   31.10 -
   31.11 -
   31.12 -/*  Put the processor into a state where MTRRs can be safely set  */
   31.13 -void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
   31.14 -{
   31.15 -	unsigned int cr0;
   31.16 -
   31.17 -	/*  Disable interrupts locally  */
   31.18 -	local_irq_save(ctxt->flags);
   31.19 -
   31.20 -	if (use_intel() || is_cpu(CYRIX)) {
   31.21 -
   31.22 -		/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
   31.23 -		if ( cpu_has_pge ) {
   31.24 -			ctxt->cr4val = read_cr4();
   31.25 -			write_cr4(ctxt->cr4val & (unsigned char) ~(1 << 7));
   31.26 -		}
   31.27 -
   31.28 -		/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
   31.29 -		    a side-effect  */
   31.30 -		cr0 = read_cr0() | 0x40000000;
   31.31 -		wbinvd();
   31.32 -		write_cr0(cr0);
   31.33 -		wbinvd();
   31.34 -
   31.35 -		if (use_intel())
   31.36 -			/*  Save MTRR state */
   31.37 -			rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
   31.38 -		else
   31.39 -			/* Cyrix ARRs - everything else were excluded at the top */
   31.40 -			ctxt->ccr3 = getCx86(CX86_CCR3);
   31.41 -	}
   31.42 -}
   31.43 -
   31.44 -void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
   31.45 -{
   31.46 -	if (use_intel()) 
   31.47 -		/*  Disable MTRRs, and set the default type to uncached  */
   31.48 -		wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL,
   31.49 -		      ctxt->deftype_hi);
   31.50 -	else if (is_cpu(CYRIX))
   31.51 -		/* Cyrix ARRs - everything else were excluded at the top */
   31.52 -		setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
   31.53 -}
   31.54 -
   31.55 -/*  Restore the processor after a set_mtrr_prepare  */
   31.56 -void set_mtrr_done(struct set_mtrr_context *ctxt)
   31.57 -{
   31.58 -	if (use_intel() || is_cpu(CYRIX)) {
   31.59 -
   31.60 -		/*  Flush caches and TLBs  */
   31.61 -		wbinvd();
   31.62 -
   31.63 -		/*  Restore MTRRdefType  */
   31.64 -		if (use_intel())
   31.65 -			/* Intel (P6) standard MTRRs */
   31.66 -			wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
   31.67 -		else
   31.68 -			/* Cyrix ARRs - everything else was excluded at the top */
   31.69 -			setCx86(CX86_CCR3, ctxt->ccr3);
   31.70 -		
   31.71 -		/*  Enable caches  */
   31.72 -		write_cr0(read_cr0() & 0xbfffffff);
   31.73 -
   31.74 -		/*  Restore value of CR4  */
   31.75 -		if ( cpu_has_pge )
   31.76 -			write_cr4(ctxt->cr4val);
   31.77 -	}
   31.78 -	/*  Re-enable interrupts locally (if enabled previously)  */
   31.79 -	local_irq_restore(ctxt->flags);
   31.80 -}
   31.81 -
    32.1 --- a/xen/arch/x86/setup.c	Mon Feb 13 17:41:23 2006 +0100
    32.2 +++ b/xen/arch/x86/setup.c	Tue Feb 14 16:23:43 2006 +0100
    32.3 @@ -440,7 +440,7 @@ void __init __start_xen(multiboot_info_t
    32.4      {
    32.5          max_cpus = 0;
    32.6          smp_num_siblings = 1;
    32.7 -        boot_cpu_data.x86_num_cores = 1;
    32.8 +        boot_cpu_data.x86_max_cores = 1;
    32.9      }
   32.10  
   32.11      smp_prepare_cpus(max_cpus);
    33.1 --- a/xen/arch/x86/smp.c	Mon Feb 13 17:41:23 2006 +0100
    33.2 +++ b/xen/arch/x86/smp.c	Tue Feb 14 16:23:43 2006 +0100
    33.3 @@ -188,7 +188,7 @@ static spinlock_t flush_lock = SPIN_LOCK
    33.4  static cpumask_t flush_cpumask;
    33.5  static unsigned long flush_va;
    33.6  
    33.7 -asmlinkage void smp_invalidate_interrupt(void)
    33.8 +fastcall void smp_invalidate_interrupt(void)
    33.9  {
   33.10      ack_APIC_irq();
   33.11      perfc_incrc(ipis);
   33.12 @@ -339,13 +339,13 @@ void smp_send_stop(void)
   33.13      local_irq_enable();
   33.14  }
   33.15  
   33.16 -asmlinkage void smp_event_check_interrupt(void)
   33.17 +fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
   33.18  {
   33.19      ack_APIC_irq();
   33.20      perfc_incrc(ipis);
   33.21  }
   33.22  
   33.23 -asmlinkage void smp_call_function_interrupt(void)
   33.24 +fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
   33.25  {
   33.26      void (*func)(void *info) = call_data->func;
   33.27      void *info = call_data->info;
    34.1 --- a/xen/arch/x86/smpboot.c	Mon Feb 13 17:41:23 2006 +0100
    34.2 +++ b/xen/arch/x86/smpboot.c	Tue Feb 14 16:23:43 2006 +0100
    34.3 @@ -53,8 +53,6 @@
    34.4  static int _foo;
    34.5  #define set_kernel_exec(x,y) (_foo=0)
    34.6  #define alloc_bootmem_low_pages(x) __va(0x90000) /* trampoline address */
    34.7 -int tainted;
    34.8 -#define TAINT_UNSAFE_SMP 0
    34.9  
   34.10  /* Set if we find a B stepping CPU */
   34.11  static int __initdata smp_b_stepping;
   34.12 @@ -1110,7 +1108,7 @@ static void __init smp_boot_cpus(unsigne
   34.13  			smp_num_siblings = siblings;
   34.14  		}
   34.15  
   34.16 -		if (c->x86_num_cores > 1) {
   34.17 +		if (c->x86_max_cores > 1) {
   34.18  			for (i = 0; i < NR_CPUS; i++) {
   34.19  				if (!cpu_isset(i, cpu_callout_map))
   34.20  					continue;
    35.1 --- a/xen/arch/x86/traps.c	Mon Feb 13 17:41:23 2006 +0100
    35.2 +++ b/xen/arch/x86/traps.c	Tue Feb 14 16:23:43 2006 +0100
    35.3 @@ -981,26 +981,26 @@ static int emulate_privileged_op(struct 
    35.4          {
    35.5  #ifdef CONFIG_X86_64
    35.6          case MSR_FS_BASE:
    35.7 -            if ( wrmsr_user(MSR_FS_BASE, regs->eax, regs->edx) )
    35.8 +            if ( wrmsr_safe(MSR_FS_BASE, regs->eax, regs->edx) )
    35.9                  goto fail;
   35.10              v->arch.guest_context.fs_base =
   35.11                  ((u64)regs->edx << 32) | regs->eax;
   35.12              break;
   35.13          case MSR_GS_BASE:
   35.14 -            if ( wrmsr_user(MSR_GS_BASE, regs->eax, regs->edx) )
   35.15 +            if ( wrmsr_safe(MSR_GS_BASE, regs->eax, regs->edx) )
   35.16                  goto fail;
   35.17              v->arch.guest_context.gs_base_kernel =
   35.18                  ((u64)regs->edx << 32) | regs->eax;
   35.19              break;
   35.20          case MSR_SHADOW_GS_BASE:
   35.21 -            if ( wrmsr_user(MSR_SHADOW_GS_BASE, regs->eax, regs->edx) )
   35.22 +            if ( wrmsr_safe(MSR_SHADOW_GS_BASE, regs->eax, regs->edx) )
   35.23                  goto fail;
   35.24              v->arch.guest_context.gs_base_user =
   35.25                  ((u64)regs->edx << 32) | regs->eax;
   35.26              break;
   35.27  #endif
   35.28          default:
   35.29 -            if ( (rdmsr_user(regs->ecx, l, h) != 0) ||
   35.30 +            if ( (rdmsr_safe(regs->ecx, l, h) != 0) ||
   35.31                   (regs->ecx != MSR_EFER) ||
   35.32                   (regs->eax != l) || (regs->edx != h) )
   35.33                  DPRINTK("Domain attempted WRMSR %p from "
   35.34 @@ -1028,13 +1028,13 @@ static int emulate_privileged_op(struct 
   35.35              break;
   35.36  #endif
   35.37          case MSR_EFER:
   35.38 -            if ( rdmsr_user(regs->ecx, regs->eax, regs->edx) )
   35.39 +            if ( rdmsr_safe(regs->ecx, regs->eax, regs->edx) )
   35.40                  goto fail;
   35.41              break;
   35.42          default:
   35.43              DPRINTK("Domain attempted RDMSR %p.\n", _p(regs->ecx));
   35.44              /* Everyone can read the MSR space. */
   35.45 -            if ( rdmsr_user(regs->ecx, regs->eax, regs->edx) )
   35.46 +            if ( rdmsr_safe(regs->ecx, regs->eax, regs->edx) )
   35.47                  goto fail;
   35.48              break;
   35.49          }
    36.1 --- a/xen/arch/x86/x86_32/traps.c	Mon Feb 13 17:41:23 2006 +0100
    36.2 +++ b/xen/arch/x86/x86_32/traps.c	Tue Feb 14 16:23:43 2006 +0100
    36.3 @@ -1,5 +1,6 @@
    36.4  
    36.5  #include <xen/config.h>
    36.6 +#include <xen/compile.h>
    36.7  #include <xen/domain_page.h>
    36.8  #include <xen/init.h>
    36.9  #include <xen/sched.h>
   36.10 @@ -20,6 +21,7 @@ void show_registers(struct cpu_user_regs
   36.11  {
   36.12      struct cpu_user_regs fault_regs = *regs;
   36.13      unsigned long fault_crs[8];
   36.14 +    char taint_str[TAINT_STRING_MAX_LEN];
   36.15      const char *context;
   36.16  
   36.17      if ( HVM_DOMAIN(current) && GUEST_MODE(regs) )
   36.18 @@ -49,6 +51,9 @@ void show_registers(struct cpu_user_regs
   36.19          fault_crs[3] = read_cr3();
   36.20      }
   36.21  
   36.22 +    printk("----[ Xen-%d.%d%s    %s ]----\n",
   36.23 +           XEN_VERSION, XEN_SUBVERSION, XEN_EXTRAVERSION,
   36.24 +           print_tainted(taint_str));
   36.25      printk("CPU:    %d\nEIP:    %04x:[<%08x>]",
   36.26             smp_processor_id(), fault_regs.cs, fault_regs.eip);
   36.27      if ( !GUEST_MODE(regs) )
   36.28 @@ -201,11 +206,11 @@ unsigned long do_iret(void)
   36.29  }
   36.30  
   36.31  BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
   36.32 -asmlinkage void smp_deferred_nmi(struct cpu_user_regs regs)
   36.33 +fastcall void smp_deferred_nmi(struct cpu_user_regs *regs)
   36.34  {
   36.35      asmlinkage void do_nmi(struct cpu_user_regs *);
   36.36      ack_APIC_irq();
   36.37 -    do_nmi(&regs);
   36.38 +    do_nmi(regs);
   36.39  }
   36.40  
   36.41  void __init percpu_traps_init(void)
    37.1 --- a/xen/arch/x86/x86_64/mm.c	Mon Feb 13 17:41:23 2006 +0100
    37.2 +++ b/xen/arch/x86/x86_64/mm.c	Tue Feb 14 16:23:43 2006 +0100
    37.3 @@ -243,21 +243,21 @@ long do_set_segment_base(unsigned int wh
    37.4      switch ( which )
    37.5      {
    37.6      case SEGBASE_FS:
    37.7 -        if ( wrmsr_user(MSR_FS_BASE, base, base>>32) )
    37.8 +        if ( wrmsr_safe(MSR_FS_BASE, base, base>>32) )
    37.9              ret = -EFAULT;
   37.10          else
   37.11              v->arch.guest_context.fs_base = base;
   37.12          break;
   37.13  
   37.14      case SEGBASE_GS_USER:
   37.15 -        if ( wrmsr_user(MSR_SHADOW_GS_BASE, base, base>>32) )
   37.16 +        if ( wrmsr_safe(MSR_SHADOW_GS_BASE, base, base>>32) )
   37.17              ret = -EFAULT;
   37.18          else
   37.19              v->arch.guest_context.gs_base_user = base;
   37.20          break;
   37.21  
   37.22      case SEGBASE_GS_KERNEL:
   37.23 -        if ( wrmsr_user(MSR_GS_BASE, base, base>>32) )
   37.24 +        if ( wrmsr_safe(MSR_GS_BASE, base, base>>32) )
   37.25              ret = -EFAULT;
   37.26          else
   37.27              v->arch.guest_context.gs_base_kernel = base;
    38.1 --- a/xen/arch/x86/x86_64/traps.c	Mon Feb 13 17:41:23 2006 +0100
    38.2 +++ b/xen/arch/x86/x86_64/traps.c	Tue Feb 14 16:23:43 2006 +0100
    38.3 @@ -1,5 +1,6 @@
    38.4  
    38.5  #include <xen/config.h>
    38.6 +#include <xen/compile.h>
    38.7  #include <xen/init.h>
    38.8  #include <xen/sched.h>
    38.9  #include <xen/lib.h>
   38.10 @@ -20,6 +21,7 @@ void show_registers(struct cpu_user_regs
   38.11  {
   38.12      struct cpu_user_regs fault_regs = *regs;
   38.13      unsigned long fault_crs[8];
   38.14 +    char taint_str[TAINT_STRING_MAX_LEN];
   38.15      const char *context;
   38.16  
   38.17      if ( HVM_DOMAIN(current) && GUEST_MODE(regs) )
   38.18 @@ -35,6 +37,9 @@ void show_registers(struct cpu_user_regs
   38.19          fault_crs[3] = read_cr3();
   38.20      }
   38.21  
   38.22 +    printk("----[ Xen-%d.%d%s    %s ]----\n",
   38.23 +           XEN_VERSION, XEN_SUBVERSION, XEN_EXTRAVERSION,
   38.24 +           print_tainted(taint_str));
   38.25      printk("CPU:    %d\nRIP:    %04x:[<%016lx>]",
   38.26             smp_processor_id(), fault_regs.cs, fault_regs.rip);
   38.27      if ( !GUEST_MODE(regs) )
    39.1 --- a/xen/common/kernel.c	Mon Feb 13 17:41:23 2006 +0100
    39.2 +++ b/xen/common/kernel.c	Tue Feb 14 16:23:43 2006 +0100
    39.3 @@ -14,6 +14,8 @@
    39.4  #include <public/nmi.h>
    39.5  #include <public/version.h>
    39.6  
    39.7 +int tainted;
    39.8 +
    39.9  void cmdline_parse(char *cmdline)
   39.10  {
   39.11      char opt[100], *optval, *p = cmdline, *q;
   39.12 @@ -78,6 +80,37 @@ void cmdline_parse(char *cmdline)
   39.13      }
   39.14  }
   39.15  
   39.16 +/**
   39.17 + *      print_tainted - return a string to represent the kernel taint state.
   39.18 + *
   39.19 + *  'S' - SMP with CPUs not designed for SMP.
   39.20 + *  'M' - Machine had a machine check experience.
   39.21 + *  'B' - System has hit bad_page.
   39.22 + *
   39.23 + *      The string is overwritten by the next call to print_taint().
   39.24 + */
   39.25 +char *print_tainted(char *str)
   39.26 +{
   39.27 +    if ( tainted )
   39.28 +    {
   39.29 +        snprintf(str, TAINT_STRING_MAX_LEN, "Tainted: %c%c%c",
   39.30 +                 tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
   39.31 +                 tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
   39.32 +                 tainted & TAINT_BAD_PAGE ? 'B' : ' ');
   39.33 +    }
   39.34 +    else
   39.35 +    {
   39.36 +        snprintf(str, TAINT_STRING_MAX_LEN, "Not tainted");
   39.37 +    }
   39.38 +
   39.39 +    return str;
   39.40 +}
   39.41 +
   39.42 +void add_taint(unsigned flag)
   39.43 +{
   39.44 +    tainted |= flag;
   39.45 +}
   39.46 +
   39.47  /*
   39.48   * Simple hypercalls.
   39.49   */
    40.1 --- a/xen/include/asm-x86/bitops.h	Mon Feb 13 17:41:23 2006 +0100
    40.2 +++ b/xen/include/asm-x86/bitops.h	Tue Feb 14 16:23:43 2006 +0100
    40.3 @@ -322,6 +322,58 @@ static __inline__ unsigned int find_firs
    40.4  }
    40.5  
    40.6  /**
    40.7 + * ffz - find first zero in word.
    40.8 + * @word: The word to search
    40.9 + *
   40.10 + * Undefined if no zero exists, so code should check against ~0UL first.
   40.11 + */
   40.12 +static inline unsigned long ffz(unsigned long word)
   40.13 +{
   40.14 +	__asm__("bsf %1,%0"
   40.15 +		:"=r" (word)
   40.16 +		:"r" (~word));
   40.17 +	return word;
   40.18 +}
   40.19 +
   40.20 +#define fls64(x)   generic_fls64(x)
   40.21 +
   40.22 +/**
   40.23 + * ffs - find first bit set
   40.24 + * @x: the word to search
   40.25 + *
   40.26 + * This is defined the same way as
   40.27 + * the libc and compiler builtin ffs routines, therefore
   40.28 + * differs in spirit from the above ffz (man ffs).
   40.29 + */
   40.30 +static inline int ffs(int x)
   40.31 +{
   40.32 +	int r;
   40.33 +
   40.34 +	__asm__("bsfl %1,%0\n\t"
   40.35 +		"jnz 1f\n\t"
   40.36 +		"movl $-1,%0\n"
   40.37 +		"1:" : "=r" (r) : "rm" (x));
   40.38 +	return r+1;
   40.39 +}
   40.40 +
   40.41 +/**
   40.42 + * fls - find last bit set
   40.43 + * @x: the word to search
   40.44 + *
   40.45 + * This is defined the same way as ffs.
   40.46 + */
   40.47 +static inline int fls(int x)
   40.48 +{
   40.49 +	int r;
   40.50 +
   40.51 +	__asm__("bsrl %1,%0\n\t"
   40.52 +		"jnz 1f\n\t"
   40.53 +		"movl $-1,%0\n"
   40.54 +		"1:" : "=r" (r) : "rm" (x));
   40.55 +	return r+1;
   40.56 +}
   40.57 +
   40.58 +/**
   40.59   * hweightN - returns the hamming weight of a N-bit word
   40.60   * @x: the word to weigh
   40.61   *
    41.1 --- a/xen/include/asm-x86/config.h	Mon Feb 13 17:41:23 2006 +0100
    41.2 +++ b/xen/include/asm-x86/config.h	Tue Feb 14 16:23:43 2006 +0100
    41.3 @@ -23,6 +23,7 @@
    41.4  #define CONFIG_X86_GOOD_APIC 1
    41.5  #define CONFIG_X86_IO_APIC 1
    41.6  #define CONFIG_HPET_TIMER 1
    41.7 +#define CONFIG_X86_MCE_P4THERMAL 1
    41.8  
    41.9  /* Intel P4 currently has largest cache line (L2 line size is 128 bytes). */
   41.10  #define CONFIG_X86_L1_CACHE_SHIFT 7
    42.1 --- a/xen/include/asm-x86/cpufeature.h	Mon Feb 13 17:41:23 2006 +0100
    42.2 +++ b/xen/include/asm-x86/cpufeature.h	Tue Feb 14 16:23:43 2006 +0100
    42.3 @@ -69,6 +69,7 @@
    42.4  #define X86_FEATURE_K7		(3*32+ 5) /* Athlon */
    42.5  #define X86_FEATURE_P3		(3*32+ 6) /* P3 */
    42.6  #define X86_FEATURE_P4		(3*32+ 7) /* P4 */
    42.7 +#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
    42.8  
    42.9  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
   42.10  #define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
   42.11 @@ -88,9 +89,9 @@
   42.12  #define X86_FEATURE_XCRYPT_EN	(5*32+ 7) /* on-CPU crypto enabled */
   42.13  
   42.14  /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
   42.15 -#define X86_FEATURE_LAHF_LM	(5*32+ 0) /* LAHF/SAHF in long mode */
   42.16 -#define X86_FEATURE_CMP_LEGACY	(5*32+ 1) /* If yes HyperThreading not valid */
   42.17 -#define X86_FEATURE_SVME        (5*32+ 2) /* Secure Virtual Machine */
   42.18 +#define X86_FEATURE_LAHF_LM	(6*32+ 0) /* LAHF/SAHF in long mode */
   42.19 +#define X86_FEATURE_CMP_LEGACY	(6*32+ 1) /* If yes HyperThreading not valid */
   42.20 +#define X86_FEATURE_SVME        (6*32+ 2) /* Secure Virtual Machine */
   42.21  
   42.22  #define cpu_has(c, bit)		test_bit(bit, (c)->x86_capability)
   42.23  #define boot_cpu_has(bit)	test_bit(bit, boot_cpu_data.x86_capability)
    43.1 --- a/xen/include/asm-x86/msr.h	Mon Feb 13 17:41:23 2006 +0100
    43.2 +++ b/xen/include/asm-x86/msr.h	Tue Feb 14 16:23:43 2006 +0100
    43.3 @@ -28,34 +28,36 @@ static inline void wrmsrl(unsigned int m
    43.4          wrmsr(msr, lo, hi);
    43.5  }
    43.6  
    43.7 -#define rdmsr_user(msr,val1,val2) ({\
    43.8 +/* rdmsr with exception handling */
    43.9 +#define rdmsr_safe(msr,val1,val2) ({\
   43.10      int _rc; \
   43.11      __asm__ __volatile__( \
   43.12          "1: rdmsr\n2:\n" \
   43.13          ".section .fixup,\"ax\"\n" \
   43.14 -        "3: movl $1,%2\n; jmp 2b\n" \
   43.15 +        "3: movl %5,%2\n; jmp 2b\n" \
   43.16          ".previous\n" \
   43.17          ".section __ex_table,\"a\"\n" \
   43.18          "   "__FIXUP_ALIGN"\n" \
   43.19          "   "__FIXUP_WORD" 1b,3b\n" \
   43.20          ".previous\n" \
   43.21          : "=a" (val1), "=d" (val2), "=&r" (_rc) \
   43.22 -        : "c" (msr), "2" (0)); \
   43.23 +        : "c" (msr), "2" (0), "i" (-EFAULT)); \
   43.24      _rc; })
   43.25  
   43.26 -#define wrmsr_user(msr,val1,val2) ({\
   43.27 +/* wrmsr with exception handling */
   43.28 +#define wrmsr_safe(msr,val1,val2) ({\
   43.29      int _rc; \
   43.30      __asm__ __volatile__( \
   43.31          "1: wrmsr\n2:\n" \
   43.32          ".section .fixup,\"ax\"\n" \
   43.33 -        "3: movl $1,%0\n; jmp 2b\n" \
   43.34 +        "3: movl %5,%0\n; jmp 2b\n" \
   43.35          ".previous\n" \
   43.36          ".section __ex_table,\"a\"\n" \
   43.37          "   "__FIXUP_ALIGN"\n" \
   43.38          "   "__FIXUP_WORD" 1b,3b\n" \
   43.39          ".previous\n" \
   43.40          : "=&r" (_rc) \
   43.41 -        : "c" (msr), "a" (val1), "d" (val2), "0" (0)); \
   43.42 +        : "c" (msr), "a" (val1), "d" (val2), "0" (0), "i" (-EFAULT)); \
   43.43      _rc; })
   43.44  
   43.45  #define rdtsc(low,high) \
    44.1 --- a/xen/include/asm-x86/processor.h	Mon Feb 13 17:41:23 2006 +0100
    44.2 +++ b/xen/include/asm-x86/processor.h	Tue Feb 14 16:23:43 2006 +0100
    44.3 @@ -147,24 +147,28 @@ struct vcpu;
    44.4  #endif
    44.5  
    44.6  struct cpuinfo_x86 {
    44.7 -	__u8 x86;		/* CPU family */
    44.8 -	__u8 x86_vendor;	/* CPU vendor */
    44.9 -	__u8 x86_model;
   44.10 -	__u8 x86_mask;
   44.11 -	char wp_works_ok;	/* It doesn't on 386's */
   44.12 -	char hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
   44.13 -	char hard_math;
   44.14 -	char rfu;
   44.15 +    __u8 x86;		/* CPU family */
   44.16 +    __u8 x86_vendor;	/* CPU vendor */
   44.17 +    __u8 x86_model;
   44.18 +    __u8 x86_mask;
   44.19 +    char wp_works_ok;	/* It doesn't on 386's */
   44.20 +    char hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
   44.21 +    char hard_math;
   44.22 +    char rfu;
   44.23      int  cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
   44.24 -	unsigned int x86_capability[NCAPINTS];
   44.25 -	char x86_vendor_id[16];
   44.26 -	char x86_model_id[64];
   44.27 -	int  x86_cache_size;  /* in KB - valid for CPUS which support this call  */
   44.28 -	int  x86_cache_alignment;	/* In bytes */
   44.29 -	int	 fdiv_bug;
   44.30 -	int	 f00f_bug;
   44.31 -	int	 coma_bug;
   44.32 -	unsigned char x86_num_cores;
   44.33 +    unsigned int x86_capability[NCAPINTS];
   44.34 +    char x86_vendor_id[16];
   44.35 +    char x86_model_id[64];
   44.36 +    int  x86_cache_size;  /* in KB - valid for CPUS which support this call  */
   44.37 +    int  x86_cache_alignment;	/* In bytes */
   44.38 +    char fdiv_bug;
   44.39 +    char f00f_bug;
   44.40 +    char coma_bug;
   44.41 +    char pad0;
   44.42 +    int  x86_power;
   44.43 +    unsigned char x86_max_cores; /* cpuid returned max cores value */
   44.44 +    unsigned char booted_cores; /* number of cores as seen by OS */
   44.45 +    unsigned char apicid;
   44.46  } __cacheline_aligned;
   44.47  
   44.48  /*
   44.49 @@ -208,6 +212,18 @@ static always_inline void detect_ht(stru
   44.50                "=d" (*(int *)(_edx))             \
   44.51              : "0" (_op), "2" (0))
   44.52  
   44.53 +/* Some CPUID calls want 'count' to be placed in ecx */
   44.54 +static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
   44.55 +	       	int *edx)
   44.56 +{
   44.57 +	__asm__("cpuid"
   44.58 +		: "=a" (*eax),
   44.59 +		  "=b" (*ebx),
   44.60 +		  "=c" (*ecx),
   44.61 +		  "=d" (*edx)
   44.62 +		: "0" (op), "c" (count));
   44.63 +}
   44.64 +
   44.65  /*
   44.66   * CPUID functions returning a single datum
   44.67   */
   44.68 @@ -502,6 +518,11 @@ void show_registers(struct cpu_user_regs
   44.69  void show_page_walk(unsigned long addr);
   44.70  asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs);
   44.71  
   44.72 +extern void mtrr_ap_init(void);
   44.73 +extern void mtrr_bp_init(void);
   44.74 +
   44.75 +extern void mcheck_init(struct cpuinfo_x86 *c);
   44.76 +
   44.77  #endif /* !__ASSEMBLY__ */
   44.78  
   44.79  #endif /* __ASM_X86_PROCESSOR_H */
    45.1 --- a/xen/include/asm-x86/x86_32/asm_defns.h	Mon Feb 13 17:41:23 2006 +0100
    45.2 +++ b/xen/include/asm-x86/x86_32/asm_defns.h	Tue Feb 14 16:23:43 2006 +0100
    45.3 @@ -61,23 +61,14 @@ asmlinkage void x(void);                
    45.4      STR(x) ":\n\t"                              \
    45.5      "pushl $"#v"<<16\n\t"                       \
    45.6      STR(SAVE_ALL(a))                            \
    45.7 -    "call "STR(smp_##x)"\n\t"                   \
    45.8 -    "jmp ret_from_intr\n");
    45.9 -
   45.10 -#define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
   45.11 -#define XBUILD_SMP_TIMER_INTERRUPT(x,v)         \
   45.12 -asmlinkage void x(struct cpu_user_regs * regs); \
   45.13 -__asm__(                                        \
   45.14 -"\n"__ALIGN_STR"\n"                             \
   45.15 -STR(x) ":\n\t"                                  \
   45.16 -    "pushl $"#v"<<16\n\t"                       \
   45.17 -    STR(SAVE_ALL(a))                            \
   45.18      "movl %esp,%eax\n\t"                        \
   45.19      "pushl %eax\n\t"                            \
   45.20      "call "STR(smp_##x)"\n\t"                   \
   45.21      "addl $4,%esp\n\t"                          \
   45.22      "jmp ret_from_intr\n");
   45.23  
   45.24 +#define BUILD_SMP_TIMER_INTERRUPT(x,v) BUILD_SMP_INTERRUPT(x,v)
   45.25 +
   45.26  #define BUILD_COMMON_IRQ()                      \
   45.27  __asm__(                                        \
   45.28      "\n" __ALIGN_STR"\n"                        \
    46.1 --- a/xen/include/asm-x86/x86_64/asm_defns.h	Mon Feb 13 17:41:23 2006 +0100
    46.2 +++ b/xen/include/asm-x86/x86_64/asm_defns.h	Tue Feb 14 16:23:43 2006 +0100
    46.3 @@ -69,21 +69,11 @@ asmlinkage void x(void);                
    46.4      "pushq $0\n\t"                              \
    46.5      "movl $"#v",4(%rsp)\n\t"                    \
    46.6      STR(SAVE_ALL)                               \
    46.7 +    "movq %rsp,%rdi\n\t"                        \
    46.8      "callq "STR(smp_##x)"\n\t"                  \
    46.9      "jmp ret_from_intr\n");
   46.10  
   46.11 -#define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
   46.12 -#define XBUILD_SMP_TIMER_INTERRUPT(x,v)         \
   46.13 -asmlinkage void x(struct cpu_user_regs * regs); \
   46.14 -__asm__(                                        \
   46.15 -"\n"__ALIGN_STR"\n"                             \
   46.16 -STR(x) ":\n\t"                                  \
   46.17 -    "pushq $0\n\t"                              \
   46.18 -    "movl $"#v",4(%rsp)\n\t"                    \
   46.19 -    STR(SAVE_ALL)                               \
   46.20 -    "movq %rsp,%rdi\n\t"                        \
   46.21 -    "callq "STR(smp_##x)"\n\t"                  \
   46.22 -    "jmp ret_from_intr\n");
   46.23 +#define BUILD_SMP_TIMER_INTERRUPT(x,v) BUILD_SMP_INTERRUPT(x,v)
   46.24  
   46.25  #define BUILD_COMMON_IRQ()                      \
   46.26  __asm__(                                        \
    47.1 --- a/xen/include/xen/bitops.h	Mon Feb 13 17:41:23 2006 +0100
    47.2 +++ b/xen/include/xen/bitops.h	Tue Feb 14 16:23:43 2006 +0100
    47.3 @@ -76,6 +76,33 @@ static __inline__ int generic_fls(int x)
    47.4   */
    47.5  #include <asm/bitops.h>
    47.6  
    47.7 +
    47.8 +static inline int generic_fls64(__u64 x)
    47.9 +{
   47.10 +    __u32 h = x >> 32;
   47.11 +    if (h)
   47.12 +        return fls(x) + 32;
   47.13 +    return fls(x);
   47.14 +}
   47.15 +
   47.16 +static __inline__ int get_bitmask_order(unsigned int count)
   47.17 +{
   47.18 +    int order;
   47.19 +    
   47.20 +    order = fls(count);
   47.21 +    return order;   /* We could be slightly more clever with -1 here... */
   47.22 +}
   47.23 +
   47.24 +static __inline__ int get_count_order(unsigned int count)
   47.25 +{
   47.26 +    int order;
   47.27 +
   47.28 +    order = fls(count) - 1;
   47.29 +    if (count & (count - 1))
   47.30 +        order++;
   47.31 +    return order;
   47.32 +}
   47.33 +
   47.34  /*
   47.35   * hweightN: returns the hamming weight (i.e. the number
   47.36   * of bits set) of a N-bit word
   47.37 @@ -126,4 +153,26 @@ static inline unsigned long hweight_long
   47.38      return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w);
   47.39  }
   47.40  
   47.41 +/*
   47.42 + * rol32 - rotate a 32-bit value left
   47.43 + *
   47.44 + * @word: value to rotate
   47.45 + * @shift: bits to roll
   47.46 + */
   47.47 +static inline __u32 rol32(__u32 word, unsigned int shift)
   47.48 +{
   47.49 +    return (word << shift) | (word >> (32 - shift));
   47.50 +}
   47.51 +
   47.52 +/*
   47.53 + * ror32 - rotate a 32-bit value right
   47.54 + *
   47.55 + * @word: value to rotate
   47.56 + * @shift: bits to roll
   47.57 + */
   47.58 +static inline __u32 ror32(__u32 word, unsigned int shift)
   47.59 +{
   47.60 +    return (word >> shift) | (word << (32 - shift));
   47.61 +}
   47.62 +
   47.63  #endif
    48.1 --- a/xen/include/xen/init.h	Mon Feb 13 17:41:23 2006 +0100
    48.2 +++ b/xen/include/xen/init.h	Tue Feb 14 16:23:43 2006 +0100
    48.3 @@ -100,4 +100,6 @@ extern struct kernel_param __setup_start
    48.4  #define __devexitdata __exitdata
    48.5  #endif
    48.6  
    48.7 +#define fastcall
    48.8 +
    48.9  #endif /* _LINUX_INIT_H */
    49.1 --- a/xen/include/xen/lib.h	Mon Feb 13 17:41:23 2006 +0100
    49.2 +++ b/xen/include/xen/lib.h	Tue Feb 14 16:23:43 2006 +0100
    49.3 @@ -75,4 +75,12 @@ unsigned long long simple_strtoull(
    49.4  
    49.5  unsigned long long parse_size_and_unit(char *s);
    49.6  
    49.7 +#define TAINT_UNSAFE_SMP                (1<<0)
    49.8 +#define TAINT_MACHINE_CHECK             (1<<1)
    49.9 +#define TAINT_BAD_PAGE                  (1<<2)
   49.10 +extern int tainted;
   49.11 +#define TAINT_STRING_MAX_LEN            20
   49.12 +extern char *print_tainted(char *str);
   49.13 +extern void add_taint(unsigned);
   49.14 +
   49.15  #endif /* __LIB_H__ */