ia64/xen-unstable

changeset 5167:41a5c6143c83

bitkeeper revision 1.1564 (4295ecb2jzOPE0em5dg6Hu_4rbzFCg)

Port CPU setup code from Linux 2.6.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu May 26 15:35:14 2005 +0000 (2005-05-26)
parents d867c2bbd27e
children 0e347f03a68c 1f630bfd83f5
files .rootkeys xen/arch/x86/Makefile xen/arch/x86/cpu/amd.c xen/arch/x86/cpu/centaur.c xen/arch/x86/cpu/common.c xen/arch/x86/cpu/cpu.h xen/arch/x86/cpu/cyrix.c xen/arch/x86/cpu/intel.c xen/arch/x86/cpu/intel_cacheinfo.c xen/arch/x86/cpu/rise.c xen/arch/x86/cpu/transmeta.c xen/arch/x86/setup.c xen/include/asm-x86/config.h xen/include/asm-x86/cpufeature.h xen/include/asm-x86/msr.h xen/include/asm-x86/processor.h
line diff
     1.1 --- a/.rootkeys	Thu May 26 15:11:08 2005 +0000
     1.2 +++ b/.rootkeys	Thu May 26 15:35:14 2005 +0000
     1.3 @@ -1132,6 +1132,15 @@ 3ddb79c4yGZ7_22QAFFwPzqP4NSHwA xen/arch/
     1.4  3ddb79bcSC_LvnmFlX-T5iTgaR0SKg xen/arch/x86/boot/x86_32.S
     1.5  40e42bdbNu4MjI750THP_8J1S-Sa0g xen/arch/x86/boot/x86_64.S
     1.6  4107c15e-VmEcLsE-7JCXZaabI8C7A xen/arch/x86/cdb.c
     1.7 +4295ecb1Ynez_TseZvDdjD7PzVMDiw xen/arch/x86/cpu/amd.c
     1.8 +4295ecb1KPPNny26nBEJzK4pAG-KXQ xen/arch/x86/cpu/centaur.c
     1.9 +4295ecb1QnJx9cbqCJQ1o4TTFQL5Vg xen/arch/x86/cpu/common.c
    1.10 +4295ecb1ZIJLN5uklV1xompN7DN1WQ xen/arch/x86/cpu/cpu.h
    1.11 +4295ecb1g6Ye-zy_oXVQQaKw4AtDmw xen/arch/x86/cpu/cyrix.c
    1.12 +4295ecb1MOdQxXznHu3g-p5DzhMv8g xen/arch/x86/cpu/intel.c
    1.13 +4295ecb1LsW7ov9JOtRP8euvJKbgbQ xen/arch/x86/cpu/intel_cacheinfo.c
    1.14 +4295ecb1AeClyruqwLz-xDthMZ5eoA xen/arch/x86/cpu/rise.c
    1.15 +4295ecb1GO92quFeyoVz2LsPQcFuHg xen/arch/x86/cpu/transmeta.c
    1.16  3ddb79bcUrk2EIaM5VsT6wUudH1kkg xen/arch/x86/delay.c
    1.17  4294b5ee34eGSh5YNDKMSxBIOycluw xen/arch/x86/dmi_scan.c
    1.18  40e34414WiQO4h2m3tcpaCPn7SyYyg xen/arch/x86/dom0_ops.c
     2.1 --- a/xen/arch/x86/Makefile	Thu May 26 15:11:08 2005 +0000
     2.2 +++ b/xen/arch/x86/Makefile	Thu May 26 15:35:14 2005 +0000
     2.3 @@ -6,6 +6,14 @@ OBJS += $(patsubst %.c,%.o,$(wildcard $(
     2.4  OBJS += $(patsubst %.c,%.o,$(wildcard acpi/*.c))
     2.5  OBJS += $(patsubst %.c,%.o,$(wildcard mtrr/*.c))
     2.6  OBJS += $(patsubst %.c,%.o,$(wildcard genapic/*.c))
     2.7 +OBJS += $(patsubst %.c,%.o,$(wildcard cpu/*.c))
     2.8 +
     2.9 +ifeq ($(TARGET_SUBARCH),x86_64) 
    2.10 +OBJS := $(subst cpu/centaur.o,,$(OBJS))
    2.11 +OBJS := $(subst cpu/cyrix.o,,$(OBJS))
    2.12 +OBJS := $(subst cpu/rise.o,,$(OBJS))
    2.13 +OBJS := $(subst cpu/transmeta.o,,$(OBJS))
    2.14 +endif
    2.15  
    2.16  OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
    2.17  
    2.18 @@ -38,6 +46,7 @@ clean:
    2.19  	rm -f mtrr/*.o mtrr/*~ mtrr/core
    2.20  	rm -f acpi/*.o acpi/*~ acpi/core
    2.21  	rm -f genapic/*.o genapic/*~ genapic/core
    2.22 +	rm -f cpu/*.o cpu/*~ cpu/core
    2.23  
    2.24  delete-unfresh-files:
    2.25  	# nothing
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/x86/cpu/amd.c	Thu May 26 15:35:14 2005 +0000
     3.3 @@ -0,0 +1,254 @@
     3.4 +#include <xen/config.h>
     3.5 +#include <xen/init.h>
     3.6 +#include <xen/bitops.h>
     3.7 +#include <xen/mm.h>
     3.8 +#include <xen/smp.h>
     3.9 +#include <asm/io.h>
    3.10 +#include <asm/msr.h>
    3.11 +#include <asm/processor.h>
    3.12 +
    3.13 +#include "cpu.h"
    3.14 +
    3.15 +#define num_physpages 0
    3.16 +
    3.17 +/*
    3.18 + *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
    3.19 + *	misexecution of code under Linux. Owners of such processors should
    3.20 + *	contact AMD for precise details and a CPU swap.
    3.21 + *
    3.22 + *	See	http://www.multimania.com/poulot/k6bug.html
    3.23 + *		http://www.amd.com/K6/k6docs/revgd.html
    3.24 + *
    3.25 + *	The following test is erm.. interesting. AMD neglected to up
    3.26 + *	the chip setting when fixing the bug but they also tweaked some
    3.27 + *	performance at the same time..
    3.28 + */
    3.29 + 
    3.30 +extern void vide(void);
    3.31 +__asm__(".align 4\nvide: ret");
    3.32 +
    3.33 +static void __init init_amd(struct cpuinfo_x86 *c)
    3.34 +{
    3.35 +	u32 l, h;
    3.36 +	int mbytes = num_physpages >> (20-PAGE_SHIFT);
    3.37 +	int r;
    3.38 +
    3.39 +	/*
    3.40 +	 *	FIXME: We should handle the K5 here. Set up the write
    3.41 +	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,
    3.42 +	 *	no bus pipeline)
    3.43 +	 */
    3.44 +
    3.45 +	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
    3.46 +	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
    3.47 +	clear_bit(0*32+31, c->x86_capability);
    3.48 +	
    3.49 +	r = get_model_name(c);
    3.50 +
    3.51 +	switch(c->x86)
    3.52 +	{
    3.53 +		case 4:
    3.54 +		/*
    3.55 +		 * General Systems BIOSen alias the cpu frequency registers
    3.56 +		 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
    3.57 +		 * drivers subsequently pokes it, and changes the CPU speed.
    3.58 +		 * Workaround : Remove the unneeded alias.
    3.59 +		 */
    3.60 +#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
    3.61 +#define CBAR_ENB	(0x80000000)
    3.62 +#define CBAR_KEY	(0X000000CB)
    3.63 +			if (c->x86_model==9 || c->x86_model == 10) {
    3.64 +				if (inl (CBAR) & CBAR_ENB)
    3.65 +					outl (0 | CBAR_KEY, CBAR);
    3.66 +			}
    3.67 +			break;
    3.68 +		case 5:
    3.69 +			if( c->x86_model < 6 )
    3.70 +			{
    3.71 +				/* Based on AMD doc 20734R - June 2000 */
    3.72 +				if ( c->x86_model == 0 ) {
    3.73 +					clear_bit(X86_FEATURE_APIC, c->x86_capability);
    3.74 +					set_bit(X86_FEATURE_PGE, c->x86_capability);
    3.75 +				}
    3.76 +				break;
    3.77 +			}
    3.78 +			
    3.79 +			if ( c->x86_model == 6 && c->x86_mask == 1 ) {
    3.80 +				const int K6_BUG_LOOP = 1000000;
    3.81 +				int n;
    3.82 +				void (*f_vide)(void);
    3.83 +				unsigned long d, d2;
    3.84 +				
    3.85 +				printk(KERN_INFO "AMD K6 stepping B detected - ");
    3.86 +				
    3.87 +				/*
    3.88 +				 * It looks like AMD fixed the 2.6.2 bug and improved indirect 
    3.89 +				 * calls at the same time.
    3.90 +				 */
    3.91 +
    3.92 +				n = K6_BUG_LOOP;
    3.93 +				f_vide = vide;
    3.94 +				rdtscl(d);
    3.95 +				while (n--) 
    3.96 +					f_vide();
    3.97 +				rdtscl(d2);
    3.98 +				d = d2-d;
    3.99 +				
   3.100 +				/* Knock these two lines out if it debugs out ok */
   3.101 +				printk(KERN_INFO "AMD K6 stepping B detected - ");
   3.102 +				/* -- cut here -- */
   3.103 +				if (d > 20*K6_BUG_LOOP) 
   3.104 +					printk("system stability may be impaired when more than 32 MB are used.\n");
   3.105 +				else 
   3.106 +					printk("probably OK (after B9730xxxx).\n");
   3.107 +				printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
   3.108 +			}
   3.109 +
   3.110 +			/* K6 with old style WHCR */
   3.111 +			if (c->x86_model < 8 ||
   3.112 +			   (c->x86_model== 8 && c->x86_mask < 8)) {
   3.113 +				/* We can only write allocate on the low 508Mb */
   3.114 +				if(mbytes>508)
   3.115 +					mbytes=508;
   3.116 +
   3.117 +				rdmsr(MSR_K6_WHCR, l, h);
   3.118 +				if ((l&0x0000FFFF)==0) {
   3.119 +					unsigned long flags;
   3.120 +					l=(1<<0)|((mbytes/4)<<1);
   3.121 +					local_irq_save(flags);
   3.122 +					wbinvd();
   3.123 +					wrmsr(MSR_K6_WHCR, l, h);
   3.124 +					local_irq_restore(flags);
   3.125 +					printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
   3.126 +						mbytes);
   3.127 +				}
   3.128 +				break;
   3.129 +			}
   3.130 +
   3.131 +			if ((c->x86_model == 8 && c->x86_mask >7) ||
   3.132 +			     c->x86_model == 9 || c->x86_model == 13) {
   3.133 +				/* The more serious chips .. */
   3.134 +
   3.135 +				if(mbytes>4092)
   3.136 +					mbytes=4092;
   3.137 +
   3.138 +				rdmsr(MSR_K6_WHCR, l, h);
   3.139 +				if ((l&0xFFFF0000)==0) {
   3.140 +					unsigned long flags;
   3.141 +					l=((mbytes>>2)<<22)|(1<<16);
   3.142 +					local_irq_save(flags);
   3.143 +					wbinvd();
   3.144 +					wrmsr(MSR_K6_WHCR, l, h);
   3.145 +					local_irq_restore(flags);
   3.146 +					printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
   3.147 +						mbytes);
   3.148 +				}
   3.149 +
   3.150 +				/*  Set MTRR capability flag if appropriate */
   3.151 +				if (c->x86_model == 13 || c->x86_model == 9 ||
   3.152 +				   (c->x86_model == 8 && c->x86_mask >= 8))
   3.153 +					set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
   3.154 +				break;
   3.155 +			}
   3.156 +			break;
   3.157 +
   3.158 +		case 6: /* An Athlon/Duron */
   3.159 + 
   3.160 +			/* Bit 15 of Athlon specific MSR 15, needs to be 0
   3.161 + 			 * to enable SSE on Palomino/Morgan/Barton CPU's.
   3.162 +			 * If the BIOS didn't enable it already, enable it here.
   3.163 +			 */
   3.164 +			if (c->x86_model >= 6 && c->x86_model <= 10) {
   3.165 +				if (!cpu_has(c, X86_FEATURE_XMM)) {
   3.166 +					printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
   3.167 +					rdmsr(MSR_K7_HWCR, l, h);
   3.168 +					l &= ~0x00008000;
   3.169 +					wrmsr(MSR_K7_HWCR, l, h);
   3.170 +					set_bit(X86_FEATURE_XMM, c->x86_capability);
   3.171 +				}
   3.172 +			}
   3.173 +
   3.174 +			/* It's been determined by AMD that Athlons since model 8 stepping 1
   3.175 +			 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
   3.176 +			 * As per AMD technical note 27212 0.2
   3.177 +			 */
   3.178 +			if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
   3.179 +				rdmsr(MSR_K7_CLK_CTL, l, h);
   3.180 +				if ((l & 0xfff00000) != 0x20000000) {
   3.181 +					printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
   3.182 +						((l & 0x000fffff)|0x20000000));
   3.183 +					wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
   3.184 +				}
   3.185 +			}
   3.186 +			break;
   3.187 +	}
   3.188 +
   3.189 +	switch (c->x86) {
   3.190 +	case 15:
   3.191 +		set_bit(X86_FEATURE_K8, c->x86_capability);
   3.192 +		break;
   3.193 +	case 6:
   3.194 +		set_bit(X86_FEATURE_K7, c->x86_capability); 
   3.195 +		break;
   3.196 +	}
   3.197 +
   3.198 +	display_cacheinfo(c);
   3.199 +	detect_ht(c);
   3.200 +
   3.201 +#ifdef CONFIG_X86_HT
   3.202 +	/* AMD dual core looks like HT but isn't really. Hide it from the
   3.203 +	   scheduler. This works around problems with the domain scheduler.
   3.204 +	   Also probably gives slightly better scheduling and disables
   3.205 +	   SMT nice which is harmful on dual core.
   3.206 +	   TBD tune the domain scheduler for dual core. */
   3.207 +	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
   3.208 +		smp_num_siblings = 1;
   3.209 +#endif
   3.210 +
   3.211 +	if (cpuid_eax(0x80000000) >= 0x80000008) {
   3.212 +		c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
   3.213 +		if (c->x86_num_cores & (c->x86_num_cores - 1))
   3.214 +			c->x86_num_cores = 1;
   3.215 +	}
   3.216 +}
   3.217 +
   3.218 +static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
   3.219 +{
   3.220 +	/* AMD errata T13 (order #21922) */
   3.221 +	if ((c->x86 == 6)) {
   3.222 +		if (c->x86_model == 3 && c->x86_mask == 0)	/* Duron Rev A0 */
   3.223 +			size = 64;
   3.224 +		if (c->x86_model == 4 &&
   3.225 +		    (c->x86_mask==0 || c->x86_mask==1))	/* Tbird rev A1/A2 */
   3.226 +			size = 256;
   3.227 +	}
   3.228 +	return size;
   3.229 +}
   3.230 +
   3.231 +static struct cpu_dev amd_cpu_dev __initdata = {
   3.232 +	.c_vendor	= "AMD",
   3.233 +	.c_ident 	= { "AuthenticAMD" },
   3.234 +	.c_models = {
   3.235 +		{ .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
   3.236 +		  {
   3.237 +			  [3] = "486 DX/2",
   3.238 +			  [7] = "486 DX/2-WB",
   3.239 +			  [8] = "486 DX/4", 
   3.240 +			  [9] = "486 DX/4-WB", 
   3.241 +			  [14] = "Am5x86-WT",
   3.242 +			  [15] = "Am5x86-WB" 
   3.243 +		  }
   3.244 +		},
   3.245 +	},
   3.246 +	.c_init		= init_amd,
   3.247 +	.c_identify	= generic_identify,
   3.248 +	.c_size_cache	= amd_size_cache,
   3.249 +};
   3.250 +
   3.251 +int __init amd_init_cpu(void)
   3.252 +{
   3.253 +	cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
   3.254 +	return 0;
   3.255 +}
   3.256 +
   3.257 +//early_arch_initcall(amd_init_cpu);
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/arch/x86/cpu/centaur.c	Thu May 26 15:35:14 2005 +0000
     4.3 @@ -0,0 +1,477 @@
     4.4 +#include <xen/config.h>
     4.5 +#include <xen/lib.h>
     4.6 +#include <xen/init.h>
     4.7 +#include <xen/bitops.h>
     4.8 +#include <asm/processor.h>
     4.9 +#include <asm/msr.h>
    4.10 +#include <asm/e820.h>
    4.11 +#include "cpu.h"
    4.12 +
    4.13 +#ifdef CONFIG_X86_OOSTORE
    4.14 +
    4.15 +static u32 __init power2(u32 x)
    4.16 +{
    4.17 +	u32 s=1;
    4.18 +	while(s<=x)
    4.19 +		s<<=1;
    4.20 +	return s>>=1;
    4.21 +}
    4.22 +
    4.23 +
    4.24 +/*
    4.25 + *	Set up an actual MCR
    4.26 + */
    4.27 + 
    4.28 +static void __init centaur_mcr_insert(int reg, u32 base, u32 size, int key)
    4.29 +{
    4.30 +	u32 lo, hi;
    4.31 +	
    4.32 +	hi = base & ~0xFFF;
    4.33 +	lo = ~(size-1);		/* Size is a power of 2 so this makes a mask */
    4.34 +	lo &= ~0xFFF;		/* Remove the ctrl value bits */
    4.35 +	lo |= key;		/* Attribute we wish to set */
    4.36 +	wrmsr(reg+MSR_IDT_MCR0, lo, hi);
    4.37 +	mtrr_centaur_report_mcr(reg, lo, hi);	/* Tell the mtrr driver */
    4.38 +}
    4.39 +
    4.40 +/*
    4.41 + *	Figure what we can cover with MCR's
    4.42 + *
    4.43 + *	Shortcut: We know you can't put 4Gig of RAM on a winchip
    4.44 + */
    4.45 +
    4.46 +static u32 __init ramtop(void)		/* 16388 */
    4.47 +{
    4.48 +	int i;
    4.49 +	u32 top = 0;
    4.50 +	u32 clip = 0xFFFFFFFFUL;
    4.51 +	
    4.52 +	for (i = 0; i < e820.nr_map; i++) {
    4.53 +		unsigned long start, end;
    4.54 +
    4.55 +		if (e820.map[i].addr > 0xFFFFFFFFUL)
    4.56 +			continue;
    4.57 +		/*
    4.58 +		 *	Don't MCR over reserved space. Ignore the ISA hole
    4.59 +		 *	we frob around that catastrophy already
    4.60 +		 */
    4.61 +		 			
    4.62 +		if (e820.map[i].type == E820_RESERVED)
    4.63 +		{
    4.64 +			if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
    4.65 +				clip = e820.map[i].addr;
    4.66 +			continue;
    4.67 +		}
    4.68 +		start = e820.map[i].addr;
    4.69 +		end = e820.map[i].addr + e820.map[i].size;
    4.70 +		if (start >= end)
    4.71 +			continue;
    4.72 +		if (end > top)
    4.73 +			top = end;
    4.74 +	}
    4.75 +	/* Everything below 'top' should be RAM except for the ISA hole.
    4.76 +	   Because of the limited MCR's we want to map NV/ACPI into our
    4.77 +	   MCR range for gunk in RAM 
    4.78 +	   
    4.79 +	   Clip might cause us to MCR insufficient RAM but that is an
    4.80 +	   acceptable failure mode and should only bite obscure boxes with
    4.81 +	   a VESA hole at 15Mb
    4.82 +	   
    4.83 +	   The second case Clip sometimes kicks in is when the EBDA is marked
    4.84 +	   as reserved. Again we fail safe with reasonable results
    4.85 +	*/
    4.86 +	
    4.87 +	if(top>clip)
    4.88 +		top=clip;
    4.89 +		
    4.90 +	return top;
    4.91 +}
    4.92 +
    4.93 +/*
    4.94 + *	Compute a set of MCR's to give maximum coverage
    4.95 + */
    4.96 +
    4.97 +static int __init centaur_mcr_compute(int nr, int key)
    4.98 +{
    4.99 +	u32 mem = ramtop();
   4.100 +	u32 root = power2(mem);
   4.101 +	u32 base = root;
   4.102 +	u32 top = root;
   4.103 +	u32 floor = 0;
   4.104 +	int ct = 0;
   4.105 +	
   4.106 +	while(ct<nr)
   4.107 +	{
   4.108 +		u32 fspace = 0;
   4.109 +
   4.110 +		/*
   4.111 +		 *	Find the largest block we will fill going upwards
   4.112 +		 */
   4.113 +
   4.114 +		u32 high = power2(mem-top);	
   4.115 +
   4.116 +		/*
   4.117 +		 *	Find the largest block we will fill going downwards
   4.118 +		 */
   4.119 +
   4.120 +		u32 low = base/2;
   4.121 +
   4.122 +		/*
   4.123 +		 *	Don't fill below 1Mb going downwards as there
   4.124 +		 *	is an ISA hole in the way.
   4.125 +		 */		
   4.126 +		 
   4.127 +		if(base <= 1024*1024)
   4.128 +			low = 0;
   4.129 +			
   4.130 +		/*
   4.131 +		 *	See how much space we could cover by filling below
   4.132 +		 *	the ISA hole
   4.133 +		 */
   4.134 +		 
   4.135 +		if(floor == 0)
   4.136 +			fspace = 512*1024;
   4.137 +		else if(floor ==512*1024)
   4.138 +			fspace = 128*1024;
   4.139 +
   4.140 +		/* And forget ROM space */
   4.141 +		
   4.142 +		/*
   4.143 +		 *	Now install the largest coverage we get
   4.144 +		 */
   4.145 +		 
   4.146 +		if(fspace > high && fspace > low)
   4.147 +		{
   4.148 +			centaur_mcr_insert(ct, floor, fspace, key);
   4.149 +			floor += fspace;
   4.150 +		}
   4.151 +		else if(high > low)
   4.152 +		{
   4.153 +			centaur_mcr_insert(ct, top, high, key);
   4.154 +			top += high;
   4.155 +		}
   4.156 +		else if(low > 0)
   4.157 +		{
   4.158 +			base -= low;
   4.159 +			centaur_mcr_insert(ct, base, low, key);
   4.160 +		}
   4.161 +		else break;
   4.162 +		ct++;
   4.163 +	}
   4.164 +	/*
   4.165 +	 *	We loaded ct values. We now need to set the mask. The caller
   4.166 +	 *	must do this bit.
   4.167 +	 */
   4.168 +	 
   4.169 +	return ct;
   4.170 +}
   4.171 +
   4.172 +static void __init centaur_create_optimal_mcr(void)
   4.173 +{
   4.174 +	int i;
   4.175 +	/*
   4.176 +	 *	Allocate up to 6 mcrs to mark as much of ram as possible
   4.177 +	 *	as write combining and weak write ordered.
   4.178 +	 *
   4.179 +	 *	To experiment with: Linux never uses stack operations for 
   4.180 +	 *	mmio spaces so we could globally enable stack operation wc
   4.181 +	 *
   4.182 +	 *	Load the registers with type 31 - full write combining, all
   4.183 +	 *	writes weakly ordered.
   4.184 +	 */
   4.185 +	int used = centaur_mcr_compute(6, 31);
   4.186 +
   4.187 +	/*
   4.188 +	 *	Wipe unused MCRs
   4.189 +	 */
   4.190 +	 
   4.191 +	for(i=used;i<8;i++)
   4.192 +		wrmsr(MSR_IDT_MCR0+i, 0, 0);
   4.193 +}
   4.194 +
   4.195 +static void __init winchip2_create_optimal_mcr(void)
   4.196 +{
   4.197 +	u32 lo, hi;
   4.198 +	int i;
   4.199 +
   4.200 +	/*
   4.201 +	 *	Allocate up to 6 mcrs to mark as much of ram as possible
   4.202 +	 *	as write combining, weak store ordered.
   4.203 +	 *
   4.204 +	 *	Load the registers with type 25
   4.205 +	 *		8	-	weak write ordering
   4.206 +	 *		16	-	weak read ordering
   4.207 +	 *		1	-	write combining
   4.208 +	 */
   4.209 +
   4.210 +	int used = centaur_mcr_compute(6, 25);
   4.211 +	
   4.212 +	/*
   4.213 +	 *	Mark the registers we are using.
   4.214 +	 */
   4.215 +	 
   4.216 +	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.217 +	for(i=0;i<used;i++)
   4.218 +		lo|=1<<(9+i);
   4.219 +	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.220 +	
   4.221 +	/*
   4.222 +	 *	Wipe unused MCRs
   4.223 +	 */
   4.224 +	 
   4.225 +	for(i=used;i<8;i++)
   4.226 +		wrmsr(MSR_IDT_MCR0+i, 0, 0);
   4.227 +}
   4.228 +
   4.229 +/*
   4.230 + *	Handle the MCR key on the Winchip 2.
   4.231 + */
   4.232 +
   4.233 +static void __init winchip2_unprotect_mcr(void)
   4.234 +{
   4.235 +	u32 lo, hi;
   4.236 +	u32 key;
   4.237 +	
   4.238 +	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.239 +	lo&=~0x1C0;	/* blank bits 8-6 */
   4.240 +	key = (lo>>17) & 7;
   4.241 +	lo |= key<<6;	/* replace with unlock key */
   4.242 +	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.243 +}
   4.244 +
   4.245 +static void __init winchip2_protect_mcr(void)
   4.246 +{
   4.247 +	u32 lo, hi;
   4.248 +	
   4.249 +	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.250 +	lo&=~0x1C0;	/* blank bits 8-6 */
   4.251 +	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.252 +}
   4.253 +#endif /* CONFIG_X86_OOSTORE */
   4.254 +
   4.255 +#define ACE_PRESENT	(1 << 6)
   4.256 +#define ACE_ENABLED	(1 << 7)
   4.257 +#define ACE_FCR		(1 << 28)	/* MSR_VIA_FCR */
   4.258 +
   4.259 +#define RNG_PRESENT	(1 << 2)
   4.260 +#define RNG_ENABLED	(1 << 3)
   4.261 +#define RNG_ENABLE	(1 << 6)	/* MSR_VIA_RNG */
   4.262 +
   4.263 +static void __init init_c3(struct cpuinfo_x86 *c)
   4.264 +{
   4.265 +	u32  lo, hi;
   4.266 +
   4.267 +	/* Test for Centaur Extended Feature Flags presence */
   4.268 +	if (cpuid_eax(0xC0000000) >= 0xC0000001) {
   4.269 +		u32 tmp = cpuid_edx(0xC0000001);
   4.270 +
   4.271 +		/* enable ACE unit, if present and disabled */
   4.272 +		if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
   4.273 +			rdmsr (MSR_VIA_FCR, lo, hi);
   4.274 +			lo |= ACE_FCR;		/* enable ACE unit */
   4.275 +			wrmsr (MSR_VIA_FCR, lo, hi);
   4.276 +			printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
   4.277 +		}
   4.278 +
   4.279 +		/* enable RNG unit, if present and disabled */
   4.280 +		if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
   4.281 +			rdmsr (MSR_VIA_RNG, lo, hi);
   4.282 +			lo |= RNG_ENABLE;	/* enable RNG unit */
   4.283 +			wrmsr (MSR_VIA_RNG, lo, hi);
   4.284 +			printk(KERN_INFO "CPU: Enabled h/w RNG\n");
   4.285 +		}
   4.286 +
   4.287 +		/* store Centaur Extended Feature Flags as
   4.288 +		 * word 5 of the CPU capability bit array
   4.289 +		 */
   4.290 +		c->x86_capability[5] = cpuid_edx(0xC0000001);
   4.291 +	}
   4.292 +
   4.293 +	/* Cyrix III family needs CX8 & PGE explicity enabled. */
   4.294 +	if (c->x86_model >=6 && c->x86_model <= 9) {
   4.295 +		rdmsr (MSR_VIA_FCR, lo, hi);
   4.296 +		lo |= (1<<1 | 1<<7);
   4.297 +		wrmsr (MSR_VIA_FCR, lo, hi);
   4.298 +		set_bit(X86_FEATURE_CX8, c->x86_capability);
   4.299 +	}
   4.300 +
   4.301 +	/* Before Nehemiah, the C3's had 3dNOW! */
   4.302 +	if (c->x86_model >=6 && c->x86_model <9)
   4.303 +		set_bit(X86_FEATURE_3DNOW, c->x86_capability);
   4.304 +
   4.305 +	get_model_name(c);
   4.306 +	display_cacheinfo(c);
   4.307 +}
   4.308 +
   4.309 +static void __init init_centaur(struct cpuinfo_x86 *c)
   4.310 +{
   4.311 +	enum {
   4.312 +		ECX8=1<<1,
   4.313 +		EIERRINT=1<<2,
   4.314 +		DPM=1<<3,
   4.315 +		DMCE=1<<4,
   4.316 +		DSTPCLK=1<<5,
   4.317 +		ELINEAR=1<<6,
   4.318 +		DSMC=1<<7,
   4.319 +		DTLOCK=1<<8,
   4.320 +		EDCTLB=1<<8,
   4.321 +		EMMX=1<<9,
   4.322 +		DPDC=1<<11,
   4.323 +		EBRPRED=1<<12,
   4.324 +		DIC=1<<13,
   4.325 +		DDC=1<<14,
   4.326 +		DNA=1<<15,
   4.327 +		ERETSTK=1<<16,
   4.328 +		E2MMX=1<<19,
   4.329 +		EAMD3D=1<<20,
   4.330 +	};
   4.331 +
   4.332 +	char *name;
   4.333 +	u32  fcr_set=0;
   4.334 +	u32  fcr_clr=0;
   4.335 +	u32  lo,hi,newlo;
   4.336 +	u32  aa,bb,cc,dd;
   4.337 +
   4.338 +	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
   4.339 +	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
   4.340 +	clear_bit(0*32+31, c->x86_capability);
   4.341 +
   4.342 +	switch (c->x86) {
   4.343 +
   4.344 +		case 5:
   4.345 +			switch(c->x86_model) {
   4.346 +			case 4:
   4.347 +				name="C6";
   4.348 +				fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
   4.349 +				fcr_clr=DPDC;
   4.350 +				printk(KERN_NOTICE "Disabling bugged TSC.\n");
   4.351 +				clear_bit(X86_FEATURE_TSC, c->x86_capability);
   4.352 +#ifdef CONFIG_X86_OOSTORE
   4.353 +				centaur_create_optimal_mcr();
   4.354 +				/* Enable
   4.355 +					write combining on non-stack, non-string
   4.356 +					write combining on string, all types
   4.357 +					weak write ordering 
   4.358 +					
   4.359 +				   The C6 original lacks weak read order 
   4.360 +				   
   4.361 +				   Note 0x120 is write only on Winchip 1 */
   4.362 +				   
   4.363 +				wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
   4.364 +#endif				
   4.365 +				break;
   4.366 +			case 8:
   4.367 +				switch(c->x86_mask) {
   4.368 +				default:
   4.369 +					name="2";
   4.370 +					break;
   4.371 +				case 7 ... 9:
   4.372 +					name="2A";
   4.373 +					break;
   4.374 +				case 10 ... 15:
   4.375 +					name="2B";
   4.376 +					break;
   4.377 +				}
   4.378 +				fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
   4.379 +				fcr_clr=DPDC;
   4.380 +#ifdef CONFIG_X86_OOSTORE
   4.381 +				winchip2_unprotect_mcr();
   4.382 +				winchip2_create_optimal_mcr();
   4.383 +				rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.384 +				/* Enable
   4.385 +					write combining on non-stack, non-string
   4.386 +					write combining on string, all types
   4.387 +					weak write ordering 
   4.388 +				*/
   4.389 +				lo|=31;				
   4.390 +				wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.391 +				winchip2_protect_mcr();
   4.392 +#endif
   4.393 +				break;
   4.394 +			case 9:
   4.395 +				name="3";
   4.396 +				fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
   4.397 +				fcr_clr=DPDC;
   4.398 +#ifdef CONFIG_X86_OOSTORE
   4.399 +				winchip2_unprotect_mcr();
   4.400 +				winchip2_create_optimal_mcr();
   4.401 +				rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.402 +				/* Enable
   4.403 +					write combining on non-stack, non-string
   4.404 +					write combining on string, all types
   4.405 +					weak write ordering 
   4.406 +				*/
   4.407 +				lo|=31;				
   4.408 +				wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.409 +				winchip2_protect_mcr();
   4.410 +#endif
   4.411 +				break;
   4.412 +			case 10:
   4.413 +				name="4";
   4.414 +				/* no info on the WC4 yet */
   4.415 +				break;
   4.416 +			default:
   4.417 +				name="??";
   4.418 +			}
   4.419 +
   4.420 +			rdmsr(MSR_IDT_FCR1, lo, hi);
   4.421 +			newlo=(lo|fcr_set) & (~fcr_clr);
   4.422 +
   4.423 +			if (newlo!=lo) {
   4.424 +				printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
   4.425 +				wrmsr(MSR_IDT_FCR1, newlo, hi );
   4.426 +			} else {
   4.427 +				printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
   4.428 +			}
   4.429 +			/* Emulate MTRRs using Centaur's MCR. */
   4.430 +			set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
   4.431 +			/* Report CX8 */
   4.432 +			set_bit(X86_FEATURE_CX8, c->x86_capability);
   4.433 +			/* Set 3DNow! on Winchip 2 and above. */
   4.434 +			if (c->x86_model >=8)
   4.435 +				set_bit(X86_FEATURE_3DNOW, c->x86_capability);
   4.436 +			/* See if we can find out some more. */
   4.437 +			if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
   4.438 +				/* Yes, we can. */
   4.439 +				cpuid(0x80000005,&aa,&bb,&cc,&dd);
   4.440 +				/* Add L1 data and code cache sizes. */
   4.441 +				c->x86_cache_size = (cc>>24)+(dd>>24);
   4.442 +			}
   4.443 +			sprintf( c->x86_model_id, "WinChip %s", name );
   4.444 +			break;
   4.445 +
   4.446 +		case 6:
   4.447 +			init_c3(c);
   4.448 +			break;
   4.449 +	}
   4.450 +}
   4.451 +
   4.452 +static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
   4.453 +{
   4.454 +	/* VIA C3 CPUs (670-68F) need further shifting. */
   4.455 +	if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
   4.456 +		size >>= 8;
   4.457 +
   4.458 +	/* VIA also screwed up Nehemiah stepping 1, and made
   4.459 +	   it return '65KB' instead of '64KB'
   4.460 +	   - Note, it seems this may only be in engineering samples. */
   4.461 +	if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
   4.462 +		size -=1;
   4.463 +
   4.464 +	return size;
   4.465 +}
   4.466 +
   4.467 +static struct cpu_dev centaur_cpu_dev __initdata = {
   4.468 +	.c_vendor	= "Centaur",
   4.469 +	.c_ident	= { "CentaurHauls" },
   4.470 +	.c_init		= init_centaur,
   4.471 +	.c_size_cache	= centaur_size_cache,
   4.472 +};
   4.473 +
   4.474 +int __init centaur_init_cpu(void)
   4.475 +{
   4.476 +	cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
   4.477 +	return 0;
   4.478 +}
   4.479 +
   4.480 +//early_arch_initcall(centaur_init_cpu);
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/x86/cpu/common.c	Thu May 26 15:35:14 2005 +0000
     5.3 @@ -0,0 +1,579 @@
     5.4 +#include <xen/config.h>
     5.5 +#include <xen/init.h>
     5.6 +#include <xen/string.h>
     5.7 +#include <xen/delay.h>
     5.8 +#include <xen/smp.h>
     5.9 +#include <asm/processor.h>
    5.10 +#include <asm/i387.h>
    5.11 +#include <asm/msr.h>
    5.12 +#include <asm/io.h>
    5.13 +#include <asm/mpspec.h>
    5.14 +#include <asm/apic.h>
    5.15 +#include <mach_apic.h>
    5.16 +
    5.17 +#include "cpu.h"
    5.18 +
    5.19 +#define tsc_disable 0
    5.20 +#define disable_pse 0
    5.21 +
    5.22 +static int cachesize_override __initdata = -1;
    5.23 +static int disable_x86_fxsr __initdata = 0;
    5.24 +static int disable_x86_serial_nr __initdata = 1;
    5.25 +
    5.26 +struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
    5.27 +
    5.28 +extern void mcheck_init(struct cpuinfo_x86 *c);
    5.29 +
    5.30 +static void default_init(struct cpuinfo_x86 * c)
    5.31 +{
    5.32 +	/* Not much we can do here... */
    5.33 +	/* Check if at least it has cpuid */
    5.34 +	if (c->cpuid_level == -1) {
    5.35 +		/* No cpuid. It must be an ancient CPU */
    5.36 +		if (c->x86 == 4)
    5.37 +			strcpy(c->x86_model_id, "486");
    5.38 +		else if (c->x86 == 3)
    5.39 +			strcpy(c->x86_model_id, "386");
    5.40 +	}
    5.41 +}
    5.42 +
    5.43 +static struct cpu_dev default_cpu = {
    5.44 +	.c_init	= default_init,
    5.45 +};
    5.46 +static struct cpu_dev * this_cpu = &default_cpu;
    5.47 +
    5.48 +int __init get_model_name(struct cpuinfo_x86 *c)
    5.49 +{
    5.50 +	unsigned int *v;
    5.51 +	char *p, *q;
    5.52 +
    5.53 +	if (cpuid_eax(0x80000000) < 0x80000004)
    5.54 +		return 0;
    5.55 +
    5.56 +	v = (unsigned int *) c->x86_model_id;
    5.57 +	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
    5.58 +	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
    5.59 +	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
    5.60 +	c->x86_model_id[48] = 0;
    5.61 +
    5.62 +	/* Intel chips right-justify this string for some dumb reason;
    5.63 +	   undo that brain damage */
    5.64 +	p = q = &c->x86_model_id[0];
    5.65 +	while ( *p == ' ' )
    5.66 +	     p++;
    5.67 +	if ( p != q ) {
    5.68 +	     while ( *p )
    5.69 +		  *q++ = *p++;
    5.70 +	     while ( q <= &c->x86_model_id[48] )
    5.71 +		  *q++ = '\0';	/* Zero-pad the rest */
    5.72 +	}
    5.73 +
    5.74 +	return 1;
    5.75 +}
    5.76 +
    5.77 +
    5.78 +void __init display_cacheinfo(struct cpuinfo_x86 *c)
    5.79 +{
    5.80 +	unsigned int n, dummy, ecx, edx, l2size;
    5.81 +
    5.82 +	n = cpuid_eax(0x80000000);
    5.83 +
    5.84 +	if (n >= 0x80000005) {
    5.85 +		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
    5.86 +		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
    5.87 +			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
    5.88 +		c->x86_cache_size=(ecx>>24)+(edx>>24);	
    5.89 +	}
    5.90 +
    5.91 +	if (n < 0x80000006)	/* Some chips just has a large L1. */
    5.92 +		return;
    5.93 +
    5.94 +	ecx = cpuid_ecx(0x80000006);
    5.95 +	l2size = ecx >> 16;
    5.96 +	
    5.97 +	/* do processor-specific cache resizing */
    5.98 +	if (this_cpu->c_size_cache)
    5.99 +		l2size = this_cpu->c_size_cache(c,l2size);
   5.100 +
   5.101 +	/* Allow user to override all this if necessary. */
   5.102 +	if (cachesize_override != -1)
   5.103 +		l2size = cachesize_override;
   5.104 +
   5.105 +	if ( l2size == 0 )
   5.106 +		return;		/* Again, no L2 cache is possible */
   5.107 +
   5.108 +	c->x86_cache_size = l2size;
   5.109 +
   5.110 +	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
   5.111 +	       l2size, ecx & 0xFF);
   5.112 +}
   5.113 +
   5.114 +/* Naming convention should be: <Name> [(<Codename>)] */
   5.115 +/* This table only is used unless init_<vendor>() below doesn't set it; */
   5.116 +/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
   5.117 +
   5.118 +/* Look up CPU names by table lookup. */
   5.119 +static char __init *table_lookup_model(struct cpuinfo_x86 *c)
   5.120 +{
   5.121 +	struct cpu_model_info *info;
   5.122 +
   5.123 +	if ( c->x86_model >= 16 )
   5.124 +		return NULL;	/* Range check */
   5.125 +
   5.126 +	if (!this_cpu)
   5.127 +		return NULL;
   5.128 +
   5.129 +	info = this_cpu->c_models;
   5.130 +
   5.131 +	while (info && info->family) {
   5.132 +		if (info->family == c->x86)
   5.133 +			return info->model_names[c->x86_model];
   5.134 +		info++;
   5.135 +	}
   5.136 +	return NULL;		/* Not found */
   5.137 +}
   5.138 +
   5.139 +
   5.140 +void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
   5.141 +{
   5.142 +	char *v = c->x86_vendor_id;
   5.143 +	int i;
   5.144 +
   5.145 +	for (i = 0; i < X86_VENDOR_NUM; i++) {
   5.146 +		if (cpu_devs[i]) {
   5.147 +			if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
   5.148 +			    (cpu_devs[i]->c_ident[1] && 
   5.149 +			     !strcmp(v,cpu_devs[i]->c_ident[1]))) {
   5.150 +				c->x86_vendor = i;
   5.151 +				if (!early)
   5.152 +					this_cpu = cpu_devs[i];
   5.153 +				break;
   5.154 +			}
   5.155 +		}
   5.156 +	}
   5.157 +}
   5.158 +
   5.159 +
   5.160 +static int __init x86_fxsr_setup(char * s)
   5.161 +{
   5.162 +	disable_x86_fxsr = 1;
   5.163 +	return 1;
   5.164 +}
   5.165 +__setup("nofxsr", x86_fxsr_setup);
   5.166 +
   5.167 +
   5.168 +/* Standard macro to see if a specific flag is changeable */
   5.169 +static inline int flag_is_changeable_p(unsigned long flag)
   5.170 +{
   5.171 +	unsigned long f1, f2;
   5.172 +
   5.173 +	asm("pushf\n\t"
   5.174 +	    "pushf\n\t"
   5.175 +	    "pop %0\n\t"
   5.176 +	    "mov %0,%1\n\t"
   5.177 +	    "xor %2,%0\n\t"
   5.178 +	    "push %0\n\t"
   5.179 +	    "popf\n\t"
   5.180 +	    "pushf\n\t"
   5.181 +	    "pop %0\n\t"
   5.182 +	    "popf\n\t"
   5.183 +	    : "=&r" (f1), "=&r" (f2)
   5.184 +	    : "ir" (flag));
   5.185 +
   5.186 +	return ((f1^f2) & flag) != 0;
   5.187 +}
   5.188 +
   5.189 +
   5.190 +/* Probe for the CPUID instruction */
   5.191 +int __init have_cpuid_p(void)
   5.192 +{
   5.193 +	return flag_is_changeable_p(X86_EFLAGS_ID);
   5.194 +}
   5.195 +
   5.196 +/* Do minimum CPU detection early.
   5.197 +   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
   5.198 +   The others are not touched to avoid unwanted side effects. */
   5.199 +void __init early_cpu_detect(void)
   5.200 +{
   5.201 +	struct cpuinfo_x86 *c = &boot_cpu_data;
   5.202 +
   5.203 +	c->x86_cache_alignment = 32;
   5.204 +
   5.205 +	if (!have_cpuid_p())
   5.206 +		return;
   5.207 +
   5.208 +	/* Get vendor name */
   5.209 +	cpuid(0x00000000, &c->cpuid_level,
   5.210 +	      (int *)&c->x86_vendor_id[0],
   5.211 +	      (int *)&c->x86_vendor_id[8],
   5.212 +	      (int *)&c->x86_vendor_id[4]);
   5.213 +
   5.214 +	get_cpu_vendor(c, 1);
   5.215 +
   5.216 +	c->x86 = 4;
   5.217 +	if (c->cpuid_level >= 0x00000001) {
   5.218 +		u32 junk, tfms, cap0, misc;
   5.219 +		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
   5.220 +		c->x86 = (tfms >> 8) & 15;
   5.221 +		c->x86_model = (tfms >> 4) & 15;
   5.222 +		if (c->x86 == 0xf) {
   5.223 +			c->x86 += (tfms >> 20) & 0xff;
   5.224 +			c->x86_model += ((tfms >> 16) & 0xF) << 4;
   5.225 +		}
   5.226 +		c->x86_mask = tfms & 15;
   5.227 +		if (cap0 & (1<<19))
   5.228 +			c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
   5.229 +	}
   5.230 +
   5.231 +	early_intel_workaround(c);
   5.232 +}
   5.233 +
   5.234 +void __init generic_identify(struct cpuinfo_x86 * c)
   5.235 +{
   5.236 +	u32 tfms, xlvl;
   5.237 +	int junk;
   5.238 +
   5.239 +	if (have_cpuid_p()) {
   5.240 +		/* Get vendor name */
   5.241 +		cpuid(0x00000000, &c->cpuid_level,
   5.242 +		      (int *)&c->x86_vendor_id[0],
   5.243 +		      (int *)&c->x86_vendor_id[8],
   5.244 +		      (int *)&c->x86_vendor_id[4]);
   5.245 +		
   5.246 +		get_cpu_vendor(c, 0);
   5.247 +		/* Initialize the standard set of capabilities */
   5.248 +		/* Note that the vendor-specific code below might override */
   5.249 +	
   5.250 +		/* Intel-defined flags: level 0x00000001 */
   5.251 +		if ( c->cpuid_level >= 0x00000001 ) {
   5.252 +			u32 capability, excap;
   5.253 +			cpuid(0x00000001, &tfms, &junk, &excap, &capability);
   5.254 +			c->x86_capability[0] = capability;
   5.255 +			c->x86_capability[4] = excap;
   5.256 +			c->x86 = (tfms >> 8) & 15;
   5.257 +			c->x86_model = (tfms >> 4) & 15;
   5.258 +			if (c->x86 == 0xf) {
   5.259 +				c->x86 += (tfms >> 20) & 0xff;
   5.260 +				c->x86_model += ((tfms >> 16) & 0xF) << 4;
   5.261 +			} 
   5.262 +			c->x86_mask = tfms & 15;
   5.263 +		} else {
   5.264 +			/* Have CPUID level 0 only - unheard of */
   5.265 +			c->x86 = 4;
   5.266 +		}
   5.267 +
   5.268 +		/* AMD-defined flags: level 0x80000001 */
   5.269 +		xlvl = cpuid_eax(0x80000000);
   5.270 +		if ( (xlvl & 0xffff0000) == 0x80000000 ) {
   5.271 +			if ( xlvl >= 0x80000001 ) {
   5.272 +				c->x86_capability[1] = cpuid_edx(0x80000001);
   5.273 +				c->x86_capability[6] = cpuid_ecx(0x80000001);
   5.274 +			}
   5.275 +			if ( xlvl >= 0x80000004 )
   5.276 +				get_model_name(c); /* Default name */
   5.277 +		}
   5.278 +	}
   5.279 +}
   5.280 +
   5.281 +static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
   5.282 +{
   5.283 +	if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
   5.284 +		/* Disable processor serial number */
   5.285 +		unsigned long lo,hi;
   5.286 +		rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
   5.287 +		lo |= 0x200000;
   5.288 +		wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
   5.289 +		printk(KERN_NOTICE "CPU serial number disabled.\n");
   5.290 +		clear_bit(X86_FEATURE_PN, c->x86_capability);
   5.291 +
   5.292 +		/* Disabling the serial number may affect the cpuid level */
   5.293 +		c->cpuid_level = cpuid_eax(0);
   5.294 +	}
   5.295 +}
   5.296 +
   5.297 +static int __init x86_serial_nr_setup(char *s)
   5.298 +{
   5.299 +	disable_x86_serial_nr = 0;
   5.300 +	return 1;
   5.301 +}
   5.302 +__setup("serialnumber", x86_serial_nr_setup);
   5.303 +
   5.304 +
   5.305 +
   5.306 +/*
   5.307 + * This does the hard work of actually picking apart the CPU stuff...
   5.308 + */
   5.309 +void __init identify_cpu(struct cpuinfo_x86 *c)
   5.310 +{
   5.311 +	int i;
   5.312 +
   5.313 +	c->x86_cache_size = -1;
   5.314 +	c->x86_vendor = X86_VENDOR_UNKNOWN;
   5.315 +	c->cpuid_level = -1;	/* CPUID not detected */
   5.316 +	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
   5.317 +	c->x86_vendor_id[0] = '\0'; /* Unset */
   5.318 +	c->x86_model_id[0] = '\0';  /* Unset */
   5.319 +	c->x86_num_cores = 1;
   5.320 +	memset(&c->x86_capability, 0, sizeof c->x86_capability);
   5.321 +
   5.322 +	if (!have_cpuid_p()) {
   5.323 +		/* First of all, decide if this is a 486 or higher */
   5.324 +		/* It's a 486 if we can modify the AC flag */
   5.325 +		if ( flag_is_changeable_p(X86_EFLAGS_AC) )
   5.326 +			c->x86 = 4;
   5.327 +		else
   5.328 +			c->x86 = 3;
   5.329 +	}
   5.330 +
   5.331 +	generic_identify(c);
   5.332 +
   5.333 +#ifdef NOISY_CAPS
   5.334 +	printk(KERN_DEBUG "CPU: After generic identify, caps:");
   5.335 +	for (i = 0; i < NCAPINTS; i++)
   5.336 +		printk(" %08lx", c->x86_capability[i]);
   5.337 +	printk("\n");
   5.338 +#endif
   5.339 +
   5.340 +	if (this_cpu->c_identify) {
   5.341 +		this_cpu->c_identify(c);
   5.342 +#ifdef NOISY_CAPS
   5.343 +		printk(KERN_DEBUG "CPU: After vendor identify, caps:");
   5.344 +		for (i = 0; i < NCAPINTS; i++)
   5.345 +			printk(" %08lx", c->x86_capability[i]);
   5.346 +		printk("\n");
   5.347 +#endif
   5.348 +	}
   5.349 +
   5.350 +	/*
   5.351 +	 * Vendor-specific initialization.  In this section we
   5.352 +	 * canonicalize the feature flags, meaning if there are
   5.353 +	 * features a certain CPU supports which CPUID doesn't
   5.354 +	 * tell us, CPUID claiming incorrect flags, or other bugs,
   5.355 +	 * we handle them here.
   5.356 +	 *
   5.357 +	 * At the end of this section, c->x86_capability better
   5.358 +	 * indicate the features this CPU genuinely supports!
   5.359 +	 */
   5.360 +	if (this_cpu->c_init)
   5.361 +		this_cpu->c_init(c);
   5.362 +
   5.363 +	/* Disable the PN if appropriate */
   5.364 +	squash_the_stupid_serial_number(c);
   5.365 +
   5.366 +	/*
   5.367 +	 * The vendor-specific functions might have changed features.  Now
   5.368 +	 * we do "generic changes."
   5.369 +	 */
   5.370 +
   5.371 +	/* TSC disabled? */
   5.372 +	if ( tsc_disable )
   5.373 +		clear_bit(X86_FEATURE_TSC, c->x86_capability);
   5.374 +
   5.375 +	/* FXSR disabled? */
   5.376 +	if (disable_x86_fxsr) {
   5.377 +		clear_bit(X86_FEATURE_FXSR, c->x86_capability);
   5.378 +		clear_bit(X86_FEATURE_XMM, c->x86_capability);
   5.379 +	}
   5.380 +
   5.381 +	if (disable_pse)
   5.382 +		clear_bit(X86_FEATURE_PSE, c->x86_capability);
   5.383 +
   5.384 +	/* If the model name is still unset, do table lookup. */
   5.385 +	if ( !c->x86_model_id[0] ) {
   5.386 +		char *p;
   5.387 +		p = table_lookup_model(c);
   5.388 +		if ( p )
   5.389 +			strcpy(c->x86_model_id, p);
   5.390 +		else
   5.391 +			/* Last resort... */
   5.392 +			sprintf(c->x86_model_id, "%02x/%02x",
   5.393 +				c->x86_vendor, c->x86_model);
   5.394 +	}
   5.395 +
   5.396 +	/* Now the feature flags better reflect actual CPU features! */
   5.397 +#ifdef NOISY_CAPS
   5.398 +	printk(KERN_DEBUG "CPU: After all inits, caps:");
   5.399 +	for (i = 0; i < NCAPINTS; i++)
   5.400 +		printk(" %08lx", c->x86_capability[i]);
   5.401 +	printk("\n");
   5.402 +#endif
   5.403 +	/*
   5.404 +	 * On SMP, boot_cpu_data holds the common feature set between
   5.405 +	 * all CPUs; so make sure that we indicate which features are
   5.406 +	 * common between the CPUs.  The first time this routine gets
   5.407 +	 * executed, c == &boot_cpu_data.
   5.408 +	 */
   5.409 +	if ( c != &boot_cpu_data ) {
   5.410 +		/* AND the already accumulated flags with these */
   5.411 +		for ( i = 0 ; i < NCAPINTS ; i++ )
   5.412 +			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
   5.413 +	}
   5.414 +
   5.415 +	/* Init Machine Check Exception if available. */
   5.416 +#ifdef CONFIG_X86_MCE
   5.417 +	mcheck_init(c);
   5.418 +#endif
   5.419 +}
   5.420 +/*
   5.421 + *	Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
   5.422 + */
   5.423 + 
   5.424 +void __init dodgy_tsc(void)
   5.425 +{
   5.426 +	if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
   5.427 +	    ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC   ))
   5.428 +		cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
   5.429 +}
   5.430 +
   5.431 +#ifdef CONFIG_X86_HT
   5.432 +void __init detect_ht(struct cpuinfo_x86 *c)
   5.433 +{
   5.434 +	u32 	eax, ebx, ecx, edx;
   5.435 +	int 	index_lsb, index_msb, tmp;
   5.436 +	int 	cpu = smp_processor_id();
   5.437 +
   5.438 +	if (!cpu_has(c, X86_FEATURE_HT))
   5.439 +		return;
   5.440 +
   5.441 +	cpuid(1, &eax, &ebx, &ecx, &edx);
   5.442 +	smp_num_siblings = (ebx & 0xff0000) >> 16;
   5.443 +
   5.444 +	if (smp_num_siblings == 1) {
   5.445 +		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
   5.446 +	} else if (smp_num_siblings > 1 ) {
   5.447 +		index_lsb = 0;
   5.448 +		index_msb = 31;
   5.449 +
   5.450 +		if (smp_num_siblings > NR_CPUS) {
   5.451 +			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
   5.452 +			smp_num_siblings = 1;
   5.453 +			return;
   5.454 +		}
   5.455 +		tmp = smp_num_siblings;
   5.456 +		while ((tmp & 1) == 0) {
   5.457 +			tmp >>=1 ;
   5.458 +			index_lsb++;
   5.459 +		}
   5.460 +		tmp = smp_num_siblings;
   5.461 +		while ((tmp & 0x80000000 ) == 0) {
   5.462 +			tmp <<=1 ;
   5.463 +			index_msb--;
   5.464 +		}
   5.465 +		if (index_lsb != index_msb )
   5.466 +			index_msb++;
   5.467 +		phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
   5.468 +
   5.469 +		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
   5.470 +		       phys_proc_id[cpu]);
   5.471 +	}
   5.472 +}
   5.473 +#endif
   5.474 +
   5.475 +void __init print_cpu_info(struct cpuinfo_x86 *c)
   5.476 +{
   5.477 +	char *vendor = NULL;
   5.478 +
   5.479 +	if (c->x86_vendor < X86_VENDOR_NUM)
   5.480 +		vendor = this_cpu->c_vendor;
   5.481 +	else if (c->cpuid_level >= 0)
   5.482 +		vendor = c->x86_vendor_id;
   5.483 +
   5.484 +	if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
   5.485 +		printk("%s ", vendor);
   5.486 +
   5.487 +	if (!c->x86_model_id[0])
   5.488 +		printk("%d86", c->x86);
   5.489 +	else
   5.490 +		printk("%s", c->x86_model_id);
   5.491 +
   5.492 +	if (c->x86_mask || c->cpuid_level >= 0) 
   5.493 +		printk(" stepping %02x\n", c->x86_mask);
   5.494 +	else
   5.495 +		printk("\n");
   5.496 +}
   5.497 +
   5.498 +cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
   5.499 +
   5.500 +/* This is hacky. :)
   5.501 + * We're emulating future behavior.
   5.502 + * In the future, the cpu-specific init functions will be called implicitly
   5.503 + * via the magic of initcalls.
   5.504 + * They will insert themselves into the cpu_devs structure.
   5.505 + * Then, when cpu_init() is called, we can just iterate over that array.
   5.506 + */
   5.507 +
   5.508 +extern int intel_cpu_init(void);
   5.509 +extern int cyrix_init_cpu(void);
   5.510 +extern int nsc_init_cpu(void);
   5.511 +extern int amd_init_cpu(void);
   5.512 +extern int centaur_init_cpu(void);
   5.513 +extern int transmeta_init_cpu(void);
   5.514 +extern int rise_init_cpu(void);
   5.515 +void early_cpu_detect(void);
   5.516 +
   5.517 +void __init early_cpu_init(void)
   5.518 +{
   5.519 +	intel_cpu_init();
   5.520 +	amd_init_cpu();
   5.521 +#ifdef CONFIG_X86_32
   5.522 +	cyrix_init_cpu();
   5.523 +	nsc_init_cpu();
   5.524 +	centaur_init_cpu();
   5.525 +	transmeta_init_cpu();
   5.526 +	rise_init_cpu();
   5.527 +#endif
   5.528 +	early_cpu_detect();
   5.529 +}
   5.530 +/*
   5.531 + * cpu_init() initializes state that is per-CPU. Some data is already
   5.532 + * initialized (naturally) in the bootstrap process, such as the GDT
   5.533 + * and IDT. We reload them nevertheless, this function acts as a
   5.534 + * 'CPU state barrier', nothing should get across.
   5.535 + */
   5.536 +void __init cpu_init (void)
   5.537 +{
   5.538 +	int cpu = smp_processor_id();
   5.539 +	struct tss_struct *t = &init_tss[cpu];
   5.540 +	char gdt_load[10];
   5.541 +
   5.542 +	if (cpu_test_and_set(cpu, cpu_initialized)) {
   5.543 +		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
   5.544 +		for (;;) local_irq_enable();
   5.545 +	}
   5.546 +	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
   5.547 +
   5.548 +	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
   5.549 +		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
   5.550 +
   5.551 +	*(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
   5.552 +	*(unsigned long  *)(&gdt_load[2]) = GDT_VIRT_START(current);
   5.553 +	__asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
   5.554 +
   5.555 +	/* No nested task. */
   5.556 +	__asm__("pushf ; andw $0xbfff,(%"__OP"sp) ; popf");
   5.557 +
   5.558 +	/* Ensure FPU gets initialised for each domain. */
   5.559 +	stts();
   5.560 +
   5.561 +	/* Set up and load the per-CPU TSS and LDT. */
   5.562 +	t->bitmap = IOBMP_INVALID_OFFSET;
   5.563 +#if defined(CONFIG_X86_32)
   5.564 +	t->ss0  = __HYPERVISOR_DS;
   5.565 +	t->esp0 = get_stack_bottom();
   5.566 +#elif defined(CONFIG_X86_64)
   5.567 +	/* Bottom-of-stack must be 16-byte aligned! */
   5.568 +	BUG_ON((get_stack_bottom() & 15) != 0);
   5.569 +	t->rsp0 = get_stack_bottom();
   5.570 +#endif
   5.571 +	set_tss_desc(cpu,t);
   5.572 +	load_TR(cpu);
   5.573 +	__asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
   5.574 +
   5.575 +	/* Clear all 6 debug registers: */
   5.576 +#define CD(register) __asm__("mov %0,%%db" #register ::"r"(0UL) );
   5.577 +	CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
   5.578 +#undef CD
   5.579 +
   5.580 +	/* Install correct page table. */
   5.581 +	write_ptbase(current);
   5.582 +}
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/x86/cpu/cpu.h	Thu May 26 15:35:14 2005 +0000
     6.3 @@ -0,0 +1,31 @@
     6.4 +
     6.5 +struct cpu_model_info {
     6.6 +	int vendor;
     6.7 +	int family;
     6.8 +	char *model_names[16];
     6.9 +};
    6.10 +
    6.11 +/* attempt to consolidate cpu attributes */
    6.12 +struct cpu_dev {
    6.13 +	char	* c_vendor;
    6.14 +
    6.15 +	/* some have two possibilities for cpuid string */
    6.16 +	char	* c_ident[2];	
    6.17 +
    6.18 +	struct		cpu_model_info c_models[4];
    6.19 +
    6.20 +	void		(*c_init)(struct cpuinfo_x86 * c);
    6.21 +	void		(*c_identify)(struct cpuinfo_x86 * c);
    6.22 +	unsigned int	(*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
    6.23 +};
    6.24 +
    6.25 +extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
    6.26 +
    6.27 +extern int get_model_name(struct cpuinfo_x86 *c);
    6.28 +extern void display_cacheinfo(struct cpuinfo_x86 *c);
    6.29 +
    6.30 +extern void generic_identify(struct cpuinfo_x86 * c);
    6.31 +extern int have_cpuid_p(void);
    6.32 +
    6.33 +extern void early_intel_workaround(struct cpuinfo_x86 *c);
    6.34 +
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/x86/cpu/cyrix.c	Thu May 26 15:35:14 2005 +0000
     7.3 @@ -0,0 +1,400 @@
     7.4 +#include <xen/config.h>
     7.5 +#include <xen/init.h>
     7.6 +#include <xen/irq.h>
     7.7 +#include <xen/bitops.h>
     7.8 +#include <xen/delay.h>
     7.9 +#include <asm/io.h>
    7.10 +#include <asm/processor.h>
    7.11 +
    7.12 +#include "cpu.h"
    7.13 +
    7.14 +/*
    7.15 + * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
    7.16 + */
    7.17 +void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
    7.18 +{
    7.19 +	unsigned char ccr2, ccr3;
    7.20 +	unsigned long flags;
    7.21 +	
    7.22 +	/* we test for DEVID by checking whether CCR3 is writable */
    7.23 +	local_irq_save(flags);
    7.24 +	ccr3 = getCx86(CX86_CCR3);
    7.25 +	setCx86(CX86_CCR3, ccr3 ^ 0x80);
    7.26 +	getCx86(0xc0);   /* dummy to change bus */
    7.27 +
    7.28 +	if (getCx86(CX86_CCR3) == ccr3) {       /* no DEVID regs. */
    7.29 +		ccr2 = getCx86(CX86_CCR2);
    7.30 +		setCx86(CX86_CCR2, ccr2 ^ 0x04);
    7.31 +		getCx86(0xc0);  /* dummy */
    7.32 +
    7.33 +		if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
    7.34 +			*dir0 = 0xfd;
    7.35 +		else {                          /* Cx486S A step */
    7.36 +			setCx86(CX86_CCR2, ccr2);
    7.37 +			*dir0 = 0xfe;
    7.38 +		}
    7.39 +	}
    7.40 +	else {
    7.41 +		setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
    7.42 +
    7.43 +		/* read DIR0 and DIR1 CPU registers */
    7.44 +		*dir0 = getCx86(CX86_DIR0);
    7.45 +		*dir1 = getCx86(CX86_DIR1);
    7.46 +	}
    7.47 +	local_irq_restore(flags);
    7.48 +}
    7.49 +
    7.50 +/*
    7.51 + * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
    7.52 + * order to identify the Cyrix CPU model after we're out of setup.c
    7.53 + *
    7.54 + * Actually since bugs.h doesn't even reference this perhaps someone should
    7.55 + * fix the documentation ???
    7.56 + */
    7.57 +static unsigned char Cx86_dir0_msb __initdata = 0;
    7.58 +
    7.59 +static char Cx86_model[][9] __initdata = {
    7.60 +	"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
    7.61 +	"M II ", "Unknown"
    7.62 +};
    7.63 +static char Cx486_name[][5] __initdata = {
    7.64 +	"SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
    7.65 +	"SRx2", "DRx2"
    7.66 +};
    7.67 +static char Cx486S_name[][4] __initdata = {
    7.68 +	"S", "S2", "Se", "S2e"
    7.69 +};
    7.70 +static char Cx486D_name[][4] __initdata = {
    7.71 +	"DX", "DX2", "?", "?", "?", "DX4"
    7.72 +};
    7.73 +static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
    7.74 +static char cyrix_model_mult1[] __initdata = "12??43";
    7.75 +static char cyrix_model_mult2[] __initdata = "12233445";
    7.76 +
    7.77 +/*
    7.78 + * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
    7.79 + * BIOSes for compatibility with DOS games.  This makes the udelay loop
    7.80 + * work correctly, and improves performance.
    7.81 + *
    7.82 + * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
    7.83 + */
    7.84 +
    7.85 +static void __init check_cx686_slop(struct cpuinfo_x86 *c)
    7.86 +{
    7.87 +	unsigned long flags;
    7.88 +	
    7.89 +	if (Cx86_dir0_msb == 3) {
    7.90 +		unsigned char ccr3, ccr5;
    7.91 +
    7.92 +		local_irq_save(flags);
    7.93 +		ccr3 = getCx86(CX86_CCR3);
    7.94 +		setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
    7.95 +		ccr5 = getCx86(CX86_CCR5);
    7.96 +		if (ccr5 & 2)
    7.97 +			setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
    7.98 +		setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
    7.99 +		local_irq_restore(flags);
   7.100 +	}
   7.101 +}
   7.102 +
   7.103 +
   7.104 +static void __init set_cx86_reorder(void)
   7.105 +{
   7.106 +	u8 ccr3;
   7.107 +
   7.108 +	printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
   7.109 +	ccr3 = getCx86(CX86_CCR3);
   7.110 +	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
   7.111 +
   7.112 +	/* Load/Store Serialize to mem access disable (=reorder it)  */
   7.113 +	setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
   7.114 +	/* set load/store serialize from 1GB to 4GB */
   7.115 +	ccr3 |= 0xe0;
   7.116 +	setCx86(CX86_CCR3, ccr3);
   7.117 +}
   7.118 +
   7.119 +static void __init set_cx86_memwb(void)
   7.120 +{
   7.121 +	u32 cr0;
   7.122 +
   7.123 +	printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
   7.124 +
   7.125 +	/* CCR2 bit 2: unlock NW bit */
   7.126 +	setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
   7.127 +	/* set 'Not Write-through' */
   7.128 +	cr0 = 0x20000000;
   7.129 +	__asm__("movl %%cr0,%%eax\n\t"
   7.130 +		"orl %0,%%eax\n\t"
   7.131 +		"movl %%eax,%%cr0\n"
   7.132 +		: : "r" (cr0)
   7.133 +		:"ax");
   7.134 +	/* CCR2 bit 2: lock NW bit and set WT1 */
   7.135 +	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
   7.136 +}
   7.137 +
   7.138 +static void __init set_cx86_inc(void)
   7.139 +{
   7.140 +	unsigned char ccr3;
   7.141 +
   7.142 +	printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
   7.143 +
   7.144 +	ccr3 = getCx86(CX86_CCR3);
   7.145 +	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
   7.146 +	/* PCR1 -- Performance Control */
   7.147 +	/* Incrementor on, whatever that is */
   7.148 +	setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
   7.149 +	/* PCR0 -- Performance Control */
   7.150 +	/* Incrementor Margin 10 */
   7.151 +	setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); 
   7.152 +	setCx86(CX86_CCR3, ccr3);	/* disable MAPEN */
   7.153 +}
   7.154 +
   7.155 +/*
   7.156 + *	Configure later MediaGX and/or Geode processor.
   7.157 + */
   7.158 +
   7.159 +static void __init geode_configure(void)
   7.160 +{
   7.161 +	unsigned long flags;
   7.162 +	u8 ccr3, ccr4;
   7.163 +	local_irq_save(flags);
   7.164 +
   7.165 +	/* Suspend on halt power saving and enable #SUSP pin */
   7.166 +	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
   7.167 +
   7.168 +	ccr3 = getCx86(CX86_CCR3);
   7.169 +	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* Enable */
   7.170 +	
   7.171 +	ccr4 = getCx86(CX86_CCR4);
   7.172 +	ccr4 |= 0x38;		/* FPU fast, DTE cache, Mem bypass */
   7.173 +	
   7.174 +	setCx86(CX86_CCR3, ccr3);
   7.175 +	
   7.176 +	set_cx86_memwb();
   7.177 +	set_cx86_reorder();	
   7.178 +	set_cx86_inc();
   7.179 +	
   7.180 +	local_irq_restore(flags);
   7.181 +}
   7.182 +
   7.183 +
   7.184 +static void __init init_cyrix(struct cpuinfo_x86 *c)
   7.185 +{
   7.186 +	unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
   7.187 +	char *buf = c->x86_model_id;
   7.188 +	const char *p = NULL;
   7.189 +
   7.190 +	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
   7.191 +	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
   7.192 +	clear_bit(0*32+31, c->x86_capability);
   7.193 +
   7.194 +	/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
   7.195 +	if ( test_bit(1*32+24, c->x86_capability) ) {
   7.196 +		clear_bit(1*32+24, c->x86_capability);
   7.197 +		set_bit(X86_FEATURE_CXMMX, c->x86_capability);
   7.198 +	}
   7.199 +
   7.200 +	do_cyrix_devid(&dir0, &dir1);
   7.201 +
   7.202 +	check_cx686_slop(c);
   7.203 +
   7.204 +	Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
   7.205 +	dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
   7.206 +
   7.207 +	/* common case step number/rev -- exceptions handled below */
   7.208 +	c->x86_model = (dir1 >> 4) + 1;
   7.209 +	c->x86_mask = dir1 & 0xf;
   7.210 +
   7.211 +	/* Now cook; the original recipe is by Channing Corn, from Cyrix.
   7.212 +	 * We do the same thing for each generation: we work out
   7.213 +	 * the model, multiplier and stepping.  Black magic included,
   7.214 +	 * to make the silicon step/rev numbers match the printed ones.
   7.215 +	 */
   7.216 +	 
   7.217 +	switch (dir0_msn) {
   7.218 +		unsigned char tmp;
   7.219 +
   7.220 +	case 0: /* Cx486SLC/DLC/SRx/DRx */
   7.221 +		p = Cx486_name[dir0_lsn & 7];
   7.222 +		break;
   7.223 +
   7.224 +	case 1: /* Cx486S/DX/DX2/DX4 */
   7.225 +		p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
   7.226 +			: Cx486S_name[dir0_lsn & 3];
   7.227 +		break;
   7.228 +
   7.229 +	case 2: /* 5x86 */
   7.230 +		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
   7.231 +		p = Cx86_cb+2;
   7.232 +		break;
   7.233 +
   7.234 +	case 3: /* 6x86/6x86L */
   7.235 +		Cx86_cb[1] = ' ';
   7.236 +		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
   7.237 +		if (dir1 > 0x21) { /* 686L */
   7.238 +			Cx86_cb[0] = 'L';
   7.239 +			p = Cx86_cb;
   7.240 +			(c->x86_model)++;
   7.241 +		} else             /* 686 */
   7.242 +			p = Cx86_cb+1;
   7.243 +		/* Emulate MTRRs using Cyrix's ARRs. */
   7.244 +		set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
   7.245 +		/* 6x86's contain this bug */
   7.246 +		c->coma_bug = 1;
   7.247 +		break;
   7.248 +
   7.249 +	case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
   7.250 +		c->x86_cache_size=16;	/* Yep 16K integrated cache thats it */
   7.251 + 
   7.252 +		/* GXm supports extended cpuid levels 'ala' AMD */
   7.253 +		if (c->cpuid_level == 2) {
   7.254 +			/* Enable cxMMX extensions (GX1 Datasheet 54) */
   7.255 +			setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
   7.256 +			
   7.257 +			/* GXlv/GXm/GX1 */
   7.258 +			if((dir1 >= 0x50 && dir1 <= 0x54) || dir1 >= 0x63)
   7.259 +				geode_configure();
   7.260 +			get_model_name(c);  /* get CPU marketing name */
   7.261 +			return;
   7.262 +		}
   7.263 +		else {  /* MediaGX */
   7.264 +			Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
   7.265 +			p = Cx86_cb+2;
   7.266 +			c->x86_model = (dir1 & 0x20) ? 1 : 2;
   7.267 +		}
   7.268 +		break;
   7.269 +
   7.270 +        case 5: /* 6x86MX/M II */
   7.271 +		if (dir1 > 7)
   7.272 +		{
   7.273 +			dir0_msn++;  /* M II */
   7.274 +			/* Enable MMX extensions (App note 108) */
   7.275 +			setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
   7.276 +		}
   7.277 +		else
   7.278 +		{
   7.279 +			c->coma_bug = 1;      /* 6x86MX, it has the bug. */
   7.280 +		}
   7.281 +		tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
   7.282 +		Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
   7.283 +		p = Cx86_cb+tmp;
   7.284 +        	if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
   7.285 +			(c->x86_model)++;
   7.286 +		/* Emulate MTRRs using Cyrix's ARRs. */
   7.287 +		set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
   7.288 +		break;
   7.289 +
   7.290 +	case 0xf:  /* Cyrix 486 without DEVID registers */
   7.291 +		switch (dir0_lsn) {
   7.292 +		case 0xd:  /* either a 486SLC or DLC w/o DEVID */
   7.293 +			dir0_msn = 0;
   7.294 +			p = Cx486_name[(c->hard_math) ? 1 : 0];
   7.295 +			break;
   7.296 +
   7.297 +		case 0xe:  /* a 486S A step */
   7.298 +			dir0_msn = 0;
   7.299 +			p = Cx486S_name[0];
   7.300 +			break;
   7.301 +		}
   7.302 +		break;
   7.303 +
   7.304 +	default:  /* unknown (shouldn't happen, we know everyone ;-) */
   7.305 +		dir0_msn = 7;
   7.306 +		break;
   7.307 +	}
   7.308 +	strcpy(buf, Cx86_model[dir0_msn & 7]);
   7.309 +	if (p) strcat(buf, p);
   7.310 +	return;
   7.311 +}
   7.312 +
   7.313 +/*
   7.314 + * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
   7.315 + * by the fact that they preserve the flags across the division of 5/2.
   7.316 + * PII and PPro exhibit this behavior too, but they have cpuid available.
   7.317 + */
   7.318 + 
   7.319 +/*
   7.320 + * Perform the Cyrix 5/2 test. A Cyrix won't change
   7.321 + * the flags, while other 486 chips will.
   7.322 + */
   7.323 +static inline int test_cyrix_52div(void)
   7.324 +{
   7.325 +	unsigned int test;
   7.326 +
   7.327 +	__asm__ __volatile__(
   7.328 +	     "sahf\n\t"		/* clear flags (%eax = 0x0005) */
   7.329 +	     "div %b2\n\t"	/* divide 5 by 2 */
   7.330 +	     "lahf"		/* store flags into %ah */
   7.331 +	     : "=a" (test)
   7.332 +	     : "0" (5), "q" (2)
   7.333 +	     : "cc");
   7.334 +
   7.335 +	/* AH is 0x02 on Cyrix after the divide.. */
   7.336 +	return (unsigned char) (test >> 8) == 0x02;
   7.337 +}
   7.338 +
   7.339 +static void cyrix_identify(struct cpuinfo_x86 * c)
   7.340 +{
   7.341 +	/* Detect Cyrix with disabled CPUID */
   7.342 +	if ( c->x86 == 4 && test_cyrix_52div() ) {
   7.343 +		unsigned char dir0, dir1;
   7.344 +		
   7.345 +		strcpy(c->x86_vendor_id, "CyrixInstead");
   7.346 +	        c->x86_vendor = X86_VENDOR_CYRIX;
   7.347 +	        
   7.348 +	        /* Actually enable cpuid on the older cyrix */
   7.349 +	    
   7.350 +	    	/* Retrieve CPU revisions */
   7.351 +	    	
   7.352 +		do_cyrix_devid(&dir0, &dir1);
   7.353 +
   7.354 +		dir0>>=4;		
   7.355 +		
   7.356 +		/* Check it is an affected model */
   7.357 +		
   7.358 +   	        if (dir0 == 5 || dir0 == 3)
   7.359 +   	        {
   7.360 +			unsigned char ccr3, ccr4;
   7.361 +			unsigned long flags;
   7.362 +			printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
   7.363 +			local_irq_save(flags);
   7.364 +			ccr3 = getCx86(CX86_CCR3);
   7.365 +			setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
   7.366 +			ccr4 = getCx86(CX86_CCR4);
   7.367 +			setCx86(CX86_CCR4, ccr4 | 0x80);          /* enable cpuid  */
   7.368 +			setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
   7.369 +			local_irq_restore(flags);
   7.370 +		}
   7.371 +	}
   7.372 +	generic_identify(c);
   7.373 +}
   7.374 +
   7.375 +static struct cpu_dev cyrix_cpu_dev __initdata = {
   7.376 +	.c_vendor	= "Cyrix",
   7.377 +	.c_ident 	= { "CyrixInstead" },
   7.378 +	.c_init		= init_cyrix,
   7.379 +	.c_identify	= cyrix_identify,
   7.380 +};
   7.381 +
   7.382 +int __init cyrix_init_cpu(void)
   7.383 +{
   7.384 +	cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
   7.385 +	return 0;
   7.386 +}
   7.387 +
   7.388 +//early_arch_initcall(cyrix_init_cpu);
   7.389 +
   7.390 +static struct cpu_dev nsc_cpu_dev __initdata = {
   7.391 +	.c_vendor	= "NSC",
   7.392 +	.c_ident 	= { "Geode by NSC" },
   7.393 +	.c_init		= init_cyrix,
   7.394 +	.c_identify	= generic_identify,
   7.395 +};
   7.396 +
   7.397 +int __init nsc_init_cpu(void)
   7.398 +{
   7.399 +	cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
   7.400 +	return 0;
   7.401 +}
   7.402 +
   7.403 +//early_arch_initcall(nsc_init_cpu);
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/arch/x86/cpu/intel.c	Thu May 26 15:35:14 2005 +0000
     8.3 @@ -0,0 +1,244 @@
     8.4 +#include <xen/config.h>
     8.5 +#include <xen/init.h>
     8.6 +#include <xen/kernel.h>
     8.7 +#include <xen/string.h>
     8.8 +#include <xen/bitops.h>
     8.9 +#include <xen/smp.h>
    8.10 +#include <asm/processor.h>
    8.11 +#include <asm/msr.h>
    8.12 +#include <asm/uaccess.h>
    8.13 +#include <asm/mpspec.h>
    8.14 +#include <asm/apic.h>
    8.15 +#include <mach_apic.h>
    8.16 +
    8.17 +#include "cpu.h"
    8.18 +
    8.19 +#define select_idle_routine(x) ((void)0)
    8.20 +
    8.21 +extern int trap_init_f00f_bug(void);
    8.22 +
    8.23 +#ifdef CONFIG_X86_INTEL_USERCOPY
    8.24 +/*
    8.25 + * Alignment at which movsl is preferred for bulk memory copies.
    8.26 + */
    8.27 +struct movsl_mask movsl_mask;
    8.28 +#endif
    8.29 +
    8.30 +void __init early_intel_workaround(struct cpuinfo_x86 *c)
    8.31 +{
    8.32 +	if (c->x86_vendor != X86_VENDOR_INTEL)
    8.33 +		return;
    8.34 +	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
    8.35 +	if (c->x86 == 15 && c->x86_cache_alignment == 64)
    8.36 +		c->x86_cache_alignment = 128;
    8.37 +}
    8.38 +
    8.39 +/*
    8.40 + *	Early probe support logic for ppro memory erratum #50
    8.41 + *
    8.42 + *	This is called before we do cpu ident work
    8.43 + */
    8.44 + 
    8.45 +int __init ppro_with_ram_bug(void)
    8.46 +{
    8.47 +	/* Uses data from early_cpu_detect now */
    8.48 +	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
    8.49 +	    boot_cpu_data.x86 == 6 &&
    8.50 +	    boot_cpu_data.x86_model == 1 &&
    8.51 +	    boot_cpu_data.x86_mask < 8) {
    8.52 +		printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
    8.53 +		return 1;
    8.54 +	}
    8.55 +	return 0;
    8.56 +}
    8.57 +	
    8.58 +
    8.59 +/*
    8.60 + * P4 Xeon errata 037 workaround.
    8.61 + * Hardware prefetcher may cause stale data to be loaded into the cache.
    8.62 + */
    8.63 +static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
    8.64 +{
    8.65 +	unsigned long lo, hi;
    8.66 +
    8.67 +	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
    8.68 +		rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
    8.69 +		if ((lo & (1<<9)) == 0) {
    8.70 +			printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
    8.71 +			printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
    8.72 +			lo |= (1<<9);	/* Disable hw prefetching */
    8.73 +			wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
    8.74 +		}
    8.75 +	}
    8.76 +}
    8.77 +
    8.78 +
    8.79 +static void __init init_intel(struct cpuinfo_x86 *c)
    8.80 +{
    8.81 +	unsigned int l2 = 0;
    8.82 +	char *p = NULL;
    8.83 +
    8.84 +#ifdef CONFIG_X86_F00F_BUG
    8.85 +	/*
    8.86 +	 * All current models of Pentium and Pentium with MMX technology CPUs
    8.87 +	 * have the F0 0F bug, which lets nonprivileged users lock up the system.
    8.88 +	 * Note that the workaround only should be initialized once...
    8.89 +	 */
    8.90 +	c->f00f_bug = 0;
    8.91 +	if ( c->x86 == 5 ) {
    8.92 +		static int f00f_workaround_enabled = 0;
    8.93 +
    8.94 +		c->f00f_bug = 1;
    8.95 +		if ( !f00f_workaround_enabled ) {
    8.96 +			trap_init_f00f_bug();
    8.97 +			printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
    8.98 +			f00f_workaround_enabled = 1;
    8.99 +		}
   8.100 +	}
   8.101 +#endif
   8.102 +
   8.103 +	select_idle_routine(c);
   8.104 +	l2 = init_intel_cacheinfo(c);
   8.105 +
   8.106 +	/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
   8.107 +	if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
   8.108 +		clear_bit(X86_FEATURE_SEP, c->x86_capability);
   8.109 +
   8.110 +	/* Names for the Pentium II/Celeron processors 
   8.111 +	   detectable only by also checking the cache size.
   8.112 +	   Dixon is NOT a Celeron. */
   8.113 +	if (c->x86 == 6) {
   8.114 +		switch (c->x86_model) {
   8.115 +		case 5:
   8.116 +			if (c->x86_mask == 0) {
   8.117 +				if (l2 == 0)
   8.118 +					p = "Celeron (Covington)";
   8.119 +				else if (l2 == 256)
   8.120 +					p = "Mobile Pentium II (Dixon)";
   8.121 +			}
   8.122 +			break;
   8.123 +			
   8.124 +		case 6:
   8.125 +			if (l2 == 128)
   8.126 +				p = "Celeron (Mendocino)";
   8.127 +			else if (c->x86_mask == 0 || c->x86_mask == 5)
   8.128 +				p = "Celeron-A";
   8.129 +			break;
   8.130 +			
   8.131 +		case 8:
   8.132 +			if (l2 == 128)
   8.133 +				p = "Celeron (Coppermine)";
   8.134 +			break;
   8.135 +		}
   8.136 +	}
   8.137 +
   8.138 +	if ( p )
   8.139 +		strcpy(c->x86_model_id, p);
   8.140 +	
   8.141 +	detect_ht(c);
   8.142 +
   8.143 +	/* Work around errata */
   8.144 +	Intel_errata_workarounds(c);
   8.145 +
   8.146 +#ifdef CONFIG_X86_INTEL_USERCOPY
   8.147 +	/*
   8.148 +	 * Set up the preferred alignment for movsl bulk memory moves
   8.149 +	 */
   8.150 +	switch (c->x86) {
   8.151 +	case 4:		/* 486: untested */
   8.152 +		break;
   8.153 +	case 5:		/* Old Pentia: untested */
   8.154 +		break;
   8.155 +	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
   8.156 +		movsl_mask.mask = 7;
   8.157 +		break;
   8.158 +	case 15:	/* P4 is OK down to 8-byte alignment */
   8.159 +		movsl_mask.mask = 7;
   8.160 +		break;
   8.161 +	}
   8.162 +#endif
   8.163 +
   8.164 +	if (c->x86 == 15) 
   8.165 +		set_bit(X86_FEATURE_P4, c->x86_capability);
   8.166 +	if (c->x86 == 6) 
   8.167 +		set_bit(X86_FEATURE_P3, c->x86_capability);
   8.168 +}
   8.169 +
   8.170 +
   8.171 +static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
   8.172 +{
   8.173 +	/* Intel PIII Tualatin. This comes in two flavours.
   8.174 +	 * One has 256kb of cache, the other 512. We have no way
   8.175 +	 * to determine which, so we use a boottime override
   8.176 +	 * for the 512kb model, and assume 256 otherwise.
   8.177 +	 */
   8.178 +	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
   8.179 +		size = 256;
   8.180 +	return size;
   8.181 +}
   8.182 +
   8.183 +static struct cpu_dev intel_cpu_dev __initdata = {
   8.184 +	.c_vendor	= "Intel",
   8.185 +	.c_ident 	= { "GenuineIntel" },
   8.186 +	.c_models = {
   8.187 +		{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 
   8.188 +		  { 
   8.189 +			  [0] = "486 DX-25/33", 
   8.190 +			  [1] = "486 DX-50", 
   8.191 +			  [2] = "486 SX", 
   8.192 +			  [3] = "486 DX/2", 
   8.193 +			  [4] = "486 SL", 
   8.194 +			  [5] = "486 SX/2", 
   8.195 +			  [7] = "486 DX/2-WB", 
   8.196 +			  [8] = "486 DX/4", 
   8.197 +			  [9] = "486 DX/4-WB"
   8.198 +		  }
   8.199 +		},
   8.200 +		{ .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
   8.201 +		  { 
   8.202 +			  [0] = "Pentium 60/66 A-step", 
   8.203 +			  [1] = "Pentium 60/66", 
   8.204 +			  [2] = "Pentium 75 - 200",
   8.205 +			  [3] = "OverDrive PODP5V83", 
   8.206 +			  [4] = "Pentium MMX",
   8.207 +			  [7] = "Mobile Pentium 75 - 200", 
   8.208 +			  [8] = "Mobile Pentium MMX"
   8.209 +		  }
   8.210 +		},
   8.211 +		{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
   8.212 +		  { 
   8.213 +			  [0] = "Pentium Pro A-step",
   8.214 +			  [1] = "Pentium Pro", 
   8.215 +			  [3] = "Pentium II (Klamath)", 
   8.216 +			  [4] = "Pentium II (Deschutes)", 
   8.217 +			  [5] = "Pentium II (Deschutes)", 
   8.218 +			  [6] = "Mobile Pentium II",
   8.219 +			  [7] = "Pentium III (Katmai)", 
   8.220 +			  [8] = "Pentium III (Coppermine)", 
   8.221 +			  [10] = "Pentium III (Cascades)",
   8.222 +			  [11] = "Pentium III (Tualatin)",
   8.223 +		  }
   8.224 +		},
   8.225 +		{ .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
   8.226 +		  {
   8.227 +			  [0] = "Pentium 4 (Unknown)",
   8.228 +			  [1] = "Pentium 4 (Willamette)",
   8.229 +			  [2] = "Pentium 4 (Northwood)",
   8.230 +			  [4] = "Pentium 4 (Foster)",
   8.231 +			  [5] = "Pentium 4 (Foster)",
   8.232 +		  }
   8.233 +		},
   8.234 +	},
   8.235 +	.c_init		= init_intel,
   8.236 +	.c_identify	= generic_identify,
   8.237 +	.c_size_cache	= intel_size_cache,
   8.238 +};
   8.239 +
   8.240 +__init int intel_cpu_init(void)
   8.241 +{
   8.242 +	cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
   8.243 +	return 0;
   8.244 +}
   8.245 +
   8.246 +// arch_initcall(intel_cpu_init);
   8.247 +
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xen/arch/x86/cpu/intel_cacheinfo.c	Thu May 26 15:35:14 2005 +0000
     9.3 @@ -0,0 +1,142 @@
     9.4 +#include <xen/config.h>
     9.5 +#include <xen/init.h>
     9.6 +#include <xen/lib.h>
     9.7 +#include <asm/processor.h>
     9.8 +
     9.9 +#define LVL_1_INST	1
    9.10 +#define LVL_1_DATA	2
    9.11 +#define LVL_2		3
    9.12 +#define LVL_3		4
    9.13 +#define LVL_TRACE	5
    9.14 +
    9.15 +struct _cache_table
    9.16 +{
    9.17 +	unsigned char descriptor;
    9.18 +	char cache_type;
    9.19 +	short size;
    9.20 +};
    9.21 +
    9.22 +/* all the cache descriptor types we care about (no TLB or trace cache entries) */
    9.23 +static struct _cache_table cache_table[] __initdata =
    9.24 +{
    9.25 +	{ 0x06, LVL_1_INST, 8 },	/* 4-way set assoc, 32 byte line size */
    9.26 +	{ 0x08, LVL_1_INST, 16 },	/* 4-way set assoc, 32 byte line size */
    9.27 +	{ 0x0a, LVL_1_DATA, 8 },	/* 2 way set assoc, 32 byte line size */
    9.28 +	{ 0x0c, LVL_1_DATA, 16 },	/* 4-way set assoc, 32 byte line size */
    9.29 +	{ 0x22, LVL_3,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
    9.30 +	{ 0x23, LVL_3,      1024 },	/* 8-way set assoc, sectored cache, 64 byte line size */
    9.31 +	{ 0x25, LVL_3,      2048 },	/* 8-way set assoc, sectored cache, 64 byte line size */
    9.32 +	{ 0x29, LVL_3,      4096 },	/* 8-way set assoc, sectored cache, 64 byte line size */
    9.33 +	{ 0x2c, LVL_1_DATA, 32 },	/* 8-way set assoc, 64 byte line size */
    9.34 +	{ 0x30, LVL_1_INST, 32 },	/* 8-way set assoc, 64 byte line size */
    9.35 +	{ 0x39, LVL_2,      128 },	/* 4-way set assoc, sectored cache, 64 byte line size */
    9.36 +	{ 0x3b, LVL_2,      128 },	/* 2-way set assoc, sectored cache, 64 byte line size */
    9.37 +	{ 0x3c, LVL_2,      256 },	/* 4-way set assoc, sectored cache, 64 byte line size */
    9.38 +	{ 0x41, LVL_2,      128 },	/* 4-way set assoc, 32 byte line size */
    9.39 +	{ 0x42, LVL_2,      256 },	/* 4-way set assoc, 32 byte line size */
    9.40 +	{ 0x43, LVL_2,      512 },	/* 4-way set assoc, 32 byte line size */
    9.41 +	{ 0x44, LVL_2,      1024 },	/* 4-way set assoc, 32 byte line size */
    9.42 +	{ 0x45, LVL_2,      2048 },	/* 4-way set assoc, 32 byte line size */
    9.43 +	{ 0x60, LVL_1_DATA, 16 },	/* 8-way set assoc, sectored cache, 64 byte line size */
    9.44 +	{ 0x66, LVL_1_DATA, 8 },	/* 4-way set assoc, sectored cache, 64 byte line size */
    9.45 +	{ 0x67, LVL_1_DATA, 16 },	/* 4-way set assoc, sectored cache, 64 byte line size */
    9.46 +	{ 0x68, LVL_1_DATA, 32 },	/* 4-way set assoc, sectored cache, 64 byte line size */
    9.47 +	{ 0x70, LVL_TRACE,  12 },	/* 8-way set assoc */
    9.48 +	{ 0x71, LVL_TRACE,  16 },	/* 8-way set assoc */
    9.49 +	{ 0x72, LVL_TRACE,  32 },	/* 8-way set assoc */
    9.50 +	{ 0x78, LVL_2,    1024 },	/* 4-way set assoc, 64 byte line size */
    9.51 +	{ 0x79, LVL_2,     128 },	/* 8-way set assoc, sectored cache, 64 byte line size */
    9.52 +	{ 0x7a, LVL_2,     256 },	/* 8-way set assoc, sectored cache, 64 byte line size */
    9.53 +	{ 0x7b, LVL_2,     512 },	/* 8-way set assoc, sectored cache, 64 byte line size */
    9.54 +	{ 0x7c, LVL_2,    1024 },	/* 8-way set assoc, sectored cache, 64 byte line size */
    9.55 +	{ 0x7d, LVL_2,    2048 },	/* 8-way set assoc, 64 byte line size */
    9.56 +	{ 0x7f, LVL_2,     512 },	/* 2-way set assoc, 64 byte line size */
    9.57 +	{ 0x82, LVL_2,     256 },	/* 8-way set assoc, 32 byte line size */
    9.58 +	{ 0x83, LVL_2,     512 },	/* 8-way set assoc, 32 byte line size */
    9.59 +	{ 0x84, LVL_2,    1024 },	/* 8-way set assoc, 32 byte line size */
    9.60 +	{ 0x85, LVL_2,    2048 },	/* 8-way set assoc, 32 byte line size */
    9.61 +	{ 0x86, LVL_2,     512 },	/* 4-way set assoc, 64 byte line size */
    9.62 +	{ 0x87, LVL_2,    1024 },	/* 8-way set assoc, 64 byte line size */
    9.63 +	{ 0x00, 0, 0}
    9.64 +};
    9.65 +
    9.66 +unsigned int __init init_intel_cacheinfo(struct cpuinfo_x86 *c)
    9.67 +{
    9.68 +	unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
    9.69 +
    9.70 +	if (c->cpuid_level > 1) {
    9.71 +		/* supports eax=2  call */
    9.72 +		int i, j, n;
    9.73 +		int regs[4];
    9.74 +		unsigned char *dp = (unsigned char *)regs;
    9.75 +
    9.76 +		/* Number of times to iterate */
    9.77 +		n = cpuid_eax(2) & 0xFF;
    9.78 +
    9.79 +		for ( i = 0 ; i < n ; i++ ) {
    9.80 +			cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
    9.81 +
    9.82 +			/* If bit 31 is set, this is an unknown format */
    9.83 +			for ( j = 0 ; j < 3 ; j++ ) {
    9.84 +				if ( regs[j] < 0 ) regs[j] = 0;
    9.85 +			}
    9.86 +
    9.87 +			/* Byte 0 is level count, not a descriptor */
    9.88 +			for ( j = 1 ; j < 16 ; j++ ) {
    9.89 +				unsigned char des = dp[j];
    9.90 +				unsigned char k = 0;
    9.91 +
    9.92 +				/* look up this descriptor in the table */
    9.93 +				while (cache_table[k].descriptor != 0)
    9.94 +				{
    9.95 +					if (cache_table[k].descriptor == des) {
    9.96 +						switch (cache_table[k].cache_type) {
    9.97 +						case LVL_1_INST:
    9.98 +							l1i += cache_table[k].size;
    9.99 +							break;
   9.100 +						case LVL_1_DATA:
   9.101 +							l1d += cache_table[k].size;
   9.102 +							break;
   9.103 +						case LVL_2:
   9.104 +							l2 += cache_table[k].size;
   9.105 +							break;
   9.106 +						case LVL_3:
   9.107 +							l3 += cache_table[k].size;
   9.108 +							break;
   9.109 +						case LVL_TRACE:
   9.110 +							trace += cache_table[k].size;
   9.111 +							break;
   9.112 +						}
   9.113 +
   9.114 +						break;
   9.115 +					}
   9.116 +
   9.117 +					k++;
   9.118 +				}
   9.119 +			}
   9.120 +		}
   9.121 +
   9.122 +		if ( trace )
   9.123 +			printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
   9.124 +		else if ( l1i )
   9.125 +			printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
   9.126 +		if ( l1d )
   9.127 +			printk(", L1 D cache: %dK\n", l1d);
   9.128 +		else
   9.129 +			printk("\n");
   9.130 +		if ( l2 )
   9.131 +			printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
   9.132 +		if ( l3 )
   9.133 +			printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
   9.134 +
   9.135 +		/*
   9.136 +		 * This assumes the L3 cache is shared; it typically lives in
   9.137 +		 * the northbridge.  The L1 caches are included by the L2
   9.138 +		 * cache, and so should not be included for the purpose of
   9.139 +		 * SMP switching weights.
   9.140 +		 */
   9.141 +		c->x86_cache_size = l2 ? l2 : (l1i+l1d);
   9.142 +	}
   9.143 +
   9.144 +	return l2;
   9.145 +}
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/arch/x86/cpu/rise.c	Thu May 26 15:35:14 2005 +0000
    10.3 @@ -0,0 +1,54 @@
    10.4 +#include <xen/config.h>
    10.5 +#include <xen/lib.h>
    10.6 +#include <xen/init.h>
    10.7 +#include <xen/bitops.h>
    10.8 +#include <asm/processor.h>
    10.9 +
   10.10 +#include "cpu.h"
   10.11 +
   10.12 +static void __init init_rise(struct cpuinfo_x86 *c)
   10.13 +{
   10.14 +	printk("CPU: Rise iDragon");
   10.15 +	if (c->x86_model > 2)
   10.16 +		printk(" II");
   10.17 +	printk("\n");
   10.18 +
   10.19 +	/* Unhide possibly hidden capability flags
   10.20 +	   The mp6 iDragon family don't have MSRs.
   10.21 +	   We switch on extra features with this cpuid weirdness: */
   10.22 +	__asm__ (
   10.23 +		"movl $0x6363452a, %%eax\n\t"
   10.24 +		"movl $0x3231206c, %%ecx\n\t"
   10.25 +		"movl $0x2a32313a, %%edx\n\t"
   10.26 +		"cpuid\n\t"
   10.27 +		"movl $0x63634523, %%eax\n\t"
   10.28 +		"movl $0x32315f6c, %%ecx\n\t"
   10.29 +		"movl $0x2333313a, %%edx\n\t"
   10.30 +		"cpuid\n\t" : : : "eax", "ebx", "ecx", "edx"
   10.31 +	);
   10.32 +	set_bit(X86_FEATURE_CX8, c->x86_capability);
   10.33 +}
   10.34 +
   10.35 +static struct cpu_dev rise_cpu_dev __initdata = {
   10.36 +	.c_vendor	= "Rise",
   10.37 +	.c_ident	= { "RiseRiseRise" },
   10.38 +	.c_models = {
   10.39 +		{ .vendor = X86_VENDOR_RISE, .family = 5, .model_names = 
   10.40 +		  { 
   10.41 +			  [0] = "iDragon", 
   10.42 +			  [2] = "iDragon", 
   10.43 +			  [8] = "iDragon II", 
   10.44 +			  [9] = "iDragon II"
   10.45 +		  }
   10.46 +		},
   10.47 +	},
   10.48 +	.c_init		= init_rise,
   10.49 +};
   10.50 +
   10.51 +int __init rise_init_cpu(void)
   10.52 +{
   10.53 +	cpu_devs[X86_VENDOR_RISE] = &rise_cpu_dev;
   10.54 +	return 0;
   10.55 +}
   10.56 +
   10.57 +//early_arch_initcall(rise_init_cpu);
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen/arch/x86/cpu/transmeta.c	Thu May 26 15:35:14 2005 +0000
    11.3 @@ -0,0 +1,108 @@
    11.4 +#include <xen/config.h>
    11.5 +#include <xen/lib.h>
    11.6 +#include <xen/init.h>
    11.7 +#include <asm/processor.h>
    11.8 +#include <asm/msr.h>
    11.9 +#include "cpu.h"
   11.10 +
   11.11 +static void __init init_transmeta(struct cpuinfo_x86 *c)
   11.12 +{
   11.13 +	unsigned int cap_mask, uk, max, dummy;
   11.14 +	unsigned int cms_rev1, cms_rev2;
   11.15 +	unsigned int cpu_rev, cpu_freq, cpu_flags, new_cpu_rev;
   11.16 +	char cpu_info[65];
   11.17 +
   11.18 +	get_model_name(c);	/* Same as AMD/Cyrix */
   11.19 +	display_cacheinfo(c);
   11.20 +
   11.21 +	/* Print CMS and CPU revision */
   11.22 +	max = cpuid_eax(0x80860000);
   11.23 +	cpu_rev = 0;
   11.24 +	if ( max >= 0x80860001 ) {
   11.25 +		cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); 
   11.26 +		if (cpu_rev != 0x02000000) {
   11.27 +			printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
   11.28 +				(cpu_rev >> 24) & 0xff,
   11.29 +				(cpu_rev >> 16) & 0xff,
   11.30 +				(cpu_rev >> 8) & 0xff,
   11.31 +				cpu_rev & 0xff,
   11.32 +				cpu_freq);
   11.33 +		}
   11.34 +	}
   11.35 +	if ( max >= 0x80860002 ) {
   11.36 +		cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
   11.37 +		if (cpu_rev == 0x02000000) {
   11.38 +			printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n",
   11.39 +				new_cpu_rev, cpu_freq);
   11.40 +		}
   11.41 +		printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
   11.42 +		       (cms_rev1 >> 24) & 0xff,
   11.43 +		       (cms_rev1 >> 16) & 0xff,
   11.44 +		       (cms_rev1 >> 8) & 0xff,
   11.45 +		       cms_rev1 & 0xff,
   11.46 +		       cms_rev2);
   11.47 +	}
   11.48 +	if ( max >= 0x80860006 ) {
   11.49 +		cpuid(0x80860003,
   11.50 +		      (void *)&cpu_info[0],
   11.51 +		      (void *)&cpu_info[4],
   11.52 +		      (void *)&cpu_info[8],
   11.53 +		      (void *)&cpu_info[12]);
   11.54 +		cpuid(0x80860004,
   11.55 +		      (void *)&cpu_info[16],
   11.56 +		      (void *)&cpu_info[20],
   11.57 +		      (void *)&cpu_info[24],
   11.58 +		      (void *)&cpu_info[28]);
   11.59 +		cpuid(0x80860005,
   11.60 +		      (void *)&cpu_info[32],
   11.61 +		      (void *)&cpu_info[36],
   11.62 +		      (void *)&cpu_info[40],
   11.63 +		      (void *)&cpu_info[44]);
   11.64 +		cpuid(0x80860006,
   11.65 +		      (void *)&cpu_info[48],
   11.66 +		      (void *)&cpu_info[52],
   11.67 +		      (void *)&cpu_info[56],
   11.68 +		      (void *)&cpu_info[60]);
   11.69 +		cpu_info[64] = '\0';
   11.70 +		printk(KERN_INFO "CPU: %s\n", cpu_info);
   11.71 +	}
   11.72 +
   11.73 +	/* Unhide possibly hidden capability flags */
   11.74 +	rdmsr(0x80860004, cap_mask, uk);
   11.75 +	wrmsr(0x80860004, ~0, uk);
   11.76 +	c->x86_capability[0] = cpuid_edx(0x00000001);
   11.77 +	wrmsr(0x80860004, cap_mask, uk);
   11.78 +	
   11.79 +	/* If we can run i686 user-space code, call us an i686 */
   11.80 +#define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV)
   11.81 +        if ( c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686 )
   11.82 +		c->x86 = 6;
   11.83 +}
   11.84 +
   11.85 +static void transmeta_identify(struct cpuinfo_x86 * c)
   11.86 +{
   11.87 +	u32 xlvl;
   11.88 +	generic_identify(c);
   11.89 +
   11.90 +	/* Transmeta-defined flags: level 0x80860001 */
   11.91 +	xlvl = cpuid_eax(0x80860000);
   11.92 +	if ( (xlvl & 0xffff0000) == 0x80860000 ) {
   11.93 +		if (  xlvl >= 0x80860001 )
   11.94 +			c->x86_capability[2] = cpuid_edx(0x80860001);
   11.95 +	}
   11.96 +}
   11.97 +
   11.98 +static struct cpu_dev transmeta_cpu_dev __initdata = {
   11.99 +	.c_vendor	= "Transmeta",
  11.100 +	.c_ident	= { "GenuineTMx86", "TransmetaCPU" },
  11.101 +	.c_init		= init_transmeta,
  11.102 +	.c_identify	= transmeta_identify,
  11.103 +};
  11.104 +
  11.105 +int __init transmeta_init_cpu(void)
  11.106 +{
  11.107 +	cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
  11.108 +	return 0;
  11.109 +}
  11.110 +
  11.111 +//early_arch_initcall(transmeta_init_cpu);
    12.1 --- a/xen/arch/x86/setup.c	Thu May 26 15:11:08 2005 +0000
    12.2 +++ b/xen/arch/x86/setup.c	Thu May 26 15:35:14 2005 +0000
    12.3 @@ -82,7 +82,8 @@ extern void init_IRQ(void);
    12.4  extern void trap_init(void);
    12.5  extern void time_init(void);
    12.6  extern void ac_timer_init(void);
    12.7 -extern void initialize_keytable();
    12.8 +extern void initialize_keytable(void);
    12.9 +extern void early_cpu_init(void);
   12.10  
   12.11  extern unsigned long cpu0_stack[];
   12.12  
   12.13 @@ -101,256 +102,6 @@ int acpi_disabled;
   12.14  
   12.15  int logical_proc_id[NR_CPUS];
   12.16  
   12.17 -/* Standard macro to see if a specific flag is changeable. */
   12.18 -static inline int flag_is_changeable_p(unsigned long flag)
   12.19 -{
   12.20 -    unsigned long f1, f2;
   12.21 -
   12.22 -    asm("pushf\n\t"
   12.23 -        "pushf\n\t"
   12.24 -        "pop %0\n\t"
   12.25 -        "mov %0,%1\n\t"
   12.26 -        "xor %2,%0\n\t"
   12.27 -        "push %0\n\t"
   12.28 -        "popf\n\t"
   12.29 -        "pushf\n\t"
   12.30 -        "pop %0\n\t"
   12.31 -        "popf\n\t"
   12.32 -        : "=&r" (f1), "=&r" (f2)
   12.33 -        : "ir" (flag));
   12.34 -
   12.35 -    return ((f1^f2) & flag) != 0;
   12.36 -}
   12.37 -
   12.38 -/* Probe for the CPUID instruction */
   12.39 -static int __init have_cpuid_p(void)
   12.40 -{
   12.41 -    return flag_is_changeable_p(X86_EFLAGS_ID);
   12.42 -}
   12.43 -
   12.44 -void __init get_cpu_vendor(struct cpuinfo_x86 *c)
   12.45 -{
   12.46 -    char *v = c->x86_vendor_id;
   12.47 -
   12.48 -    if (!strcmp(v, "GenuineIntel"))
   12.49 -        c->x86_vendor = X86_VENDOR_INTEL;
   12.50 -    else if (!strcmp(v, "AuthenticAMD"))
   12.51 -        c->x86_vendor = X86_VENDOR_AMD;
   12.52 -    else if (!strcmp(v, "CyrixInstead"))
   12.53 -        c->x86_vendor = X86_VENDOR_CYRIX;
   12.54 -    else if (!strcmp(v, "UMC UMC UMC "))
   12.55 -        c->x86_vendor = X86_VENDOR_UMC;
   12.56 -    else if (!strcmp(v, "CentaurHauls"))
   12.57 -        c->x86_vendor = X86_VENDOR_CENTAUR;
   12.58 -    else if (!strcmp(v, "NexGenDriven"))
   12.59 -        c->x86_vendor = X86_VENDOR_NEXGEN;
   12.60 -    else if (!strcmp(v, "RiseRiseRise"))
   12.61 -        c->x86_vendor = X86_VENDOR_RISE;
   12.62 -    else if (!strcmp(v, "GenuineTMx86") ||
   12.63 -             !strcmp(v, "TransmetaCPU"))
   12.64 -        c->x86_vendor = X86_VENDOR_TRANSMETA;
   12.65 -    else
   12.66 -        c->x86_vendor = X86_VENDOR_UNKNOWN;
   12.67 -}
   12.68 -
   12.69 -static void __init init_intel(struct cpuinfo_x86 *c)
   12.70 -{
   12.71 -    /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
   12.72 -    if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
   12.73 -        clear_bit(X86_FEATURE_SEP, &c->x86_capability);
   12.74 -
   12.75 -    if ( test_bit(X86_FEATURE_HT, &c->x86_capability) )
   12.76 -    {
   12.77 -        u32     eax, ebx, ecx, edx;
   12.78 -        int     initial_apic_id, siblings, cpu = smp_processor_id();
   12.79 -
   12.80 -        cpuid(1, &eax, &ebx, &ecx, &edx);
   12.81 -        ht_per_core = siblings = (ebx & 0xff0000) >> 16;
   12.82 -
   12.83 -        if ( opt_noht )
   12.84 -            clear_bit(X86_FEATURE_HT, &c->x86_capability[0]);
   12.85 -
   12.86 -        if ( siblings <= 1 )
   12.87 -        {
   12.88 -            printk(KERN_INFO  "CPU#%d: Hyper-Threading is disabled\n", cpu);
   12.89 -        } 
   12.90 -        else if ( siblings > 2 )
   12.91 -        {
   12.92 -            panic("We don't support more than two logical CPUs per package!");
   12.93 -        }
   12.94 -        else
   12.95 -        {
   12.96 -            initial_apic_id = ebx >> 24 & 0xff;
   12.97 -            phys_proc_id[cpu]    = initial_apic_id >> 1;
   12.98 -            logical_proc_id[cpu] = initial_apic_id & 1;
   12.99 -            printk(KERN_INFO  "CPU#%d: Physical ID: %d, Logical ID: %d\n",
  12.100 -                   cpu, phys_proc_id[cpu], logical_proc_id[cpu]);
  12.101 -        }
  12.102 -    }
  12.103 -
  12.104 -#ifdef CONFIG_VMX
  12.105 -    start_vmx();
  12.106 -#endif
  12.107 -
  12.108 -}
  12.109 -
  12.110 -static void __init init_amd(struct cpuinfo_x86 *c)
  12.111 -{
  12.112 -    /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
  12.113 -       3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
  12.114 -    clear_bit(0*32+31, &c->x86_capability);
  12.115 -	
  12.116 -    switch(c->x86)
  12.117 -    {
  12.118 -    case 5:
  12.119 -        panic("AMD K6 is not supported.\n");
  12.120 -    case 6:	/* An Athlon/Duron. We can trust the BIOS probably */
  12.121 -        break;		
  12.122 -    }
  12.123 -}
  12.124 -
  12.125 -/*
  12.126 - * This does the hard work of actually picking apart the CPU stuff...
  12.127 - */
  12.128 -void __init identify_cpu(struct cpuinfo_x86 *c)
  12.129 -{
  12.130 -    int i, cpu = smp_processor_id();
  12.131 -    u32 xlvl, tfms, junk;
  12.132 -
  12.133 -    phys_proc_id[cpu]    = cpu;
  12.134 -    logical_proc_id[cpu] = 0;
  12.135 -
  12.136 -    c->x86_vendor = X86_VENDOR_UNKNOWN;
  12.137 -    c->cpuid_level = -1;	/* CPUID not detected */
  12.138 -    c->x86_model = c->x86_mask = 0;	/* So far unknown... */
  12.139 -    c->x86_vendor_id[0] = '\0'; /* Unset */
  12.140 -    memset(&c->x86_capability, 0, sizeof c->x86_capability);
  12.141 -
  12.142 -    if ( !have_cpuid_p() )
  12.143 -        panic("Ancient processors not supported\n");
  12.144 -
  12.145 -    /* Get vendor name */
  12.146 -    cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
  12.147 -          (unsigned int *)&c->x86_vendor_id[0],
  12.148 -          (unsigned int *)&c->x86_vendor_id[8],
  12.149 -          (unsigned int *)&c->x86_vendor_id[4]);
  12.150 -
  12.151 -    get_cpu_vendor(c);
  12.152 -		
  12.153 -    if ( c->cpuid_level == 0 )
  12.154 -        panic("Decrepit CPUID not supported\n");
  12.155 -
  12.156 -    cpuid(0x00000001, &tfms, &junk, &junk,
  12.157 -          &c->x86_capability[0]);
  12.158 -    c->x86 = (tfms >> 8) & 15;
  12.159 -    c->x86_model = (tfms >> 4) & 15;
  12.160 -    c->x86_mask = tfms & 15;
  12.161 -
  12.162 -    /* AMD-defined flags: level 0x80000001 */
  12.163 -    xlvl = cpuid_eax(0x80000000);
  12.164 -    if ( (xlvl & 0xffff0000) == 0x80000000 ) {
  12.165 -        if ( xlvl >= 0x80000001 )
  12.166 -            c->x86_capability[1] = cpuid_edx(0x80000001);
  12.167 -    }
  12.168 -
  12.169 -    /* Transmeta-defined flags: level 0x80860001 */
  12.170 -    xlvl = cpuid_eax(0x80860000);
  12.171 -    if ( (xlvl & 0xffff0000) == 0x80860000 ) {
  12.172 -        if (  xlvl >= 0x80860001 )
  12.173 -            c->x86_capability[2] = cpuid_edx(0x80860001);
  12.174 -    }
  12.175 -
  12.176 -    printk("CPU%d: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
  12.177 -           smp_processor_id(),
  12.178 -           c->x86_capability[0],
  12.179 -           c->x86_capability[1],
  12.180 -           c->x86_capability[2],
  12.181 -           c->x86_vendor);
  12.182 -
  12.183 -    switch ( c->x86_vendor ) {
  12.184 -    case X86_VENDOR_INTEL:
  12.185 -        init_intel(c);
  12.186 -        break;
  12.187 -    case X86_VENDOR_AMD:
  12.188 -        init_amd(c);
  12.189 -        break;
  12.190 -    case X86_VENDOR_UNKNOWN:  /* Connectix Virtual PC reports this */
  12.191 -	break;
  12.192 -    case X86_VENDOR_CENTAUR:
  12.193 -        break;
  12.194 -    default:
  12.195 -        printk("Unknown CPU identifier (%d): continuing anyway, "
  12.196 -               "but might fail.\n", c->x86_vendor);
  12.197 -    }
  12.198 -	
  12.199 -    printk("CPU caps: %08x %08x %08x %08x\n",
  12.200 -           c->x86_capability[0],
  12.201 -           c->x86_capability[1],
  12.202 -           c->x86_capability[2],
  12.203 -           c->x86_capability[3]);
  12.204 -
  12.205 -    /*
  12.206 -     * On SMP, boot_cpu_data holds the common feature set between
  12.207 -     * all CPUs; so make sure that we indicate which features are
  12.208 -     * common between the CPUs.  The first time this routine gets
  12.209 -     * executed, c == &boot_cpu_data.
  12.210 -     */
  12.211 -    if ( c != &boot_cpu_data ) {
  12.212 -        /* AND the already accumulated flags with these */
  12.213 -        for ( i = 0 ; i < NCAPINTS ; i++ )
  12.214 -            boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
  12.215 -    }
  12.216 -}
  12.217 -
  12.218 -void __init print_cpu_info(struct cpuinfo_x86 *c)
  12.219 -{
  12.220 -    printk("booted.\n");
  12.221 -}
  12.222 -
  12.223 -unsigned long cpu_initialized;
  12.224 -void __init cpu_init(void)
  12.225 -{
  12.226 -    int nr = smp_processor_id();
  12.227 -    struct tss_struct *t = &init_tss[nr];
  12.228 -    char gdt_load[10];
  12.229 -
  12.230 -    if ( test_and_set_bit(nr, &cpu_initialized) )
  12.231 -        panic("CPU#%d already initialized!!!\n", nr);
  12.232 -    printk("Initializing CPU#%d\n", nr);
  12.233 -
  12.234 -    *(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
  12.235 -    *(unsigned long  *)(&gdt_load[2]) = GDT_VIRT_START(current);
  12.236 -    __asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
  12.237 -
  12.238 -    /* No nested task. */
  12.239 -    __asm__ __volatile__ ( "pushf ; andw $0xbfff,(%"__OP"sp) ; popf" );
  12.240 -
  12.241 -    /* Ensure FPU gets initialised for each domain. */
  12.242 -    stts();
  12.243 -
  12.244 -    /* Set up and load the per-CPU TSS and LDT. */
  12.245 -    t->bitmap = IOBMP_INVALID_OFFSET;
  12.246 -#if defined(CONFIG_X86_32)
  12.247 -    t->ss0  = __HYPERVISOR_DS;
  12.248 -    t->esp0 = get_stack_bottom();
  12.249 -#elif defined(CONFIG_X86_64)
  12.250 -    /* Bottom-of-stack must be 16-byte aligned or CPU will force it! :-o */
  12.251 -    BUG_ON((get_stack_bottom() & 15) != 0);
  12.252 -    t->rsp0 = get_stack_bottom();
  12.253 -#endif
  12.254 -    set_tss_desc(nr,t);
  12.255 -    load_TR(nr);
  12.256 -    __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
  12.257 -
  12.258 -    /* Clear all 6 debug registers. */
  12.259 -#define CD(register) __asm__ ( "mov %0,%%db" #register : : "r" (0UL) );
  12.260 -    CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
  12.261 -#undef CD
  12.262 -
  12.263 -    /* Install correct page table. */
  12.264 -    write_ptbase(current);
  12.265 -}
  12.266 -
  12.267  int acpi_force;
  12.268  char acpi_param[10] = "";
  12.269  static void parse_acpi_param(char *s)
  12.270 @@ -415,6 +166,7 @@ static void __init start_of_day(void)
  12.271          virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR);
  12.272  
  12.273      /* Process CPU type information. */
  12.274 +    early_cpu_init();
  12.275      identify_cpu(&boot_cpu_data);
  12.276      if ( cpu_has_fxsr )
  12.277          set_in_cr4(X86_CR4_OSFXSR);
    13.1 --- a/xen/include/asm-x86/config.h	Thu May 26 15:11:08 2005 +0000
    13.2 +++ b/xen/include/asm-x86/config.h	Thu May 26 15:35:14 2005 +0000
    13.3 @@ -7,11 +7,10 @@
    13.4  #ifndef __X86_CONFIG_H__
    13.5  #define __X86_CONFIG_H__
    13.6  
    13.7 +#define CONFIG_X86 1
    13.8 +#define CONFIG_X86_HT 1
    13.9 +#define CONFIG_SHADOW 1
   13.10  #define CONFIG_VMX 1
   13.11 -
   13.12 -#define CONFIG_X86 1
   13.13 -#define CONFIG_SHADOW 1
   13.14 -
   13.15  #define CONFIG_SMP 1
   13.16  #define CONFIG_X86_LOCAL_APIC 1
   13.17  #define CONFIG_X86_GOOD_APIC 1
    14.1 --- a/xen/include/asm-x86/cpufeature.h	Thu May 26 15:11:08 2005 +0000
    14.2 +++ b/xen/include/asm-x86/cpufeature.h	Thu May 26 15:35:14 2005 +0000
    14.3 @@ -4,13 +4,12 @@
    14.4   * Defines x86 CPU feature bits
    14.5   */
    14.6  
    14.7 -#ifndef __ASM_X86_CPUFEATURE_H
    14.8 -#define __ASM_X86_CPUFEATURE_H
    14.9 +#ifndef __ASM_I386_CPUFEATURE_H
   14.10 +#define __ASM_I386_CPUFEATURE_H
   14.11  
   14.12 -/* Sample usage: CPU_FEATURE_P(cpu.x86_capability, FPU) */
   14.13 -#define CPU_FEATURE_P(CAP, FEATURE) test_bit(CAP, X86_FEATURE_##FEATURE)
   14.14 +#include <xen/bitops.h>
   14.15  
   14.16 -#define NCAPINTS	6	/* Currently we have 6 32-bit words worth of info */
   14.17 +#define NCAPINTS	7	/* N 32-bit words worth of info */
   14.18  
   14.19  /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
   14.20  #define X86_FEATURE_FPU		(0*32+ 0) /* Onboard FPU */
   14.21 @@ -48,7 +47,7 @@
   14.22  /* Don't duplicate feature flags which are redundant with Intel! */
   14.23  #define X86_FEATURE_SYSCALL	(1*32+11) /* SYSCALL/SYSRET */
   14.24  #define X86_FEATURE_MP		(1*32+19) /* MP Capable. */
   14.25 -#define X86_FEATURE_NX		(1*32+20) /* No-Execute Bit. */
   14.26 +#define X86_FEATURE_NX		(1*32+20) /* Execute Disable */
   14.27  #define X86_FEATURE_MMXEXT	(1*32+22) /* AMD MMX extensions */
   14.28  #define X86_FEATURE_LM		(1*32+29) /* Long Mode (x86-64) */
   14.29  #define X86_FEATURE_3DNOWEXT	(1*32+30) /* AMD 3DNow! extensions */
   14.30 @@ -72,13 +71,25 @@
   14.31  #define X86_FEATURE_P4		(3*32+ 7) /* P4 */
   14.32  
   14.33  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
   14.34 +#define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
   14.35  #define X86_FEATURE_MWAIT	(4*32+ 3) /* Monitor/Mwait support */
   14.36 +#define X86_FEATURE_DSCPL	(4*32+ 4) /* CPL Qualified Debug Store */
   14.37  #define X86_FEATURE_VMXE	(4*32+ 5) /* Virtual Machine Extensions */
   14.38  #define X86_FEATURE_EST		(4*32+ 7) /* Enhanced SpeedStep */
   14.39 +#define X86_FEATURE_TM2		(4*32+ 8) /* Thermal Monitor 2 */
   14.40 +#define X86_FEATURE_CID		(4*32+10) /* Context ID */
   14.41 +#define X86_FEATURE_CX16        (4*32+13) /* CMPXCHG16B */
   14.42 +#define X86_FEATURE_XTPR	(4*32+14) /* Send Task Priority Messages */
   14.43  
   14.44  /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
   14.45  #define X86_FEATURE_XSTORE	(5*32+ 2) /* on-CPU RNG present (xstore insn) */
   14.46 +#define X86_FEATURE_XSTORE_EN	(5*32+ 3) /* on-CPU RNG enabled */
   14.47 +#define X86_FEATURE_XCRYPT	(5*32+ 6) /* on-CPU crypto (xcrypt insn) */
   14.48 +#define X86_FEATURE_XCRYPT_EN	(5*32+ 7) /* on-CPU crypto enabled */
   14.49  
   14.50 +/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
   14.51 +#define X86_FEATURE_LAHF_LM	(5*32+ 0) /* LAHF/SAHF in long mode */
   14.52 +#define X86_FEATURE_CMP_LEGACY	(5*32+ 1) /* If yes HyperThreading not valid */
   14.53  
   14.54  #define cpu_has(c, bit)		test_bit(bit, (c)->x86_capability)
   14.55  #define boot_cpu_has(bit)	test_bit(bit, boot_cpu_data.x86_capability)
   14.56 @@ -90,13 +101,14 @@
   14.57  #define cpu_has_tsc		boot_cpu_has(X86_FEATURE_TSC)
   14.58  #define cpu_has_pae		boot_cpu_has(X86_FEATURE_PAE)
   14.59  #define cpu_has_pge		boot_cpu_has(X86_FEATURE_PGE)
   14.60 -#define cpu_has_sse2		boot_cpu_has(X86_FEATURE_XMM2)
   14.61  #define cpu_has_apic		boot_cpu_has(X86_FEATURE_APIC)
   14.62  #define cpu_has_sep		boot_cpu_has(X86_FEATURE_SEP)
   14.63  #define cpu_has_mtrr		boot_cpu_has(X86_FEATURE_MTRR)
   14.64  #define cpu_has_mmx		boot_cpu_has(X86_FEATURE_MMX)
   14.65  #define cpu_has_fxsr		boot_cpu_has(X86_FEATURE_FXSR)
   14.66  #define cpu_has_xmm		boot_cpu_has(X86_FEATURE_XMM)
   14.67 +#define cpu_has_xmm2		boot_cpu_has(X86_FEATURE_XMM2)
   14.68 +#define cpu_has_xmm3		boot_cpu_has(X86_FEATURE_XMM3)
   14.69  #define cpu_has_ht		boot_cpu_has(X86_FEATURE_HT)
   14.70  #define cpu_has_mp		boot_cpu_has(X86_FEATURE_MP)
   14.71  #define cpu_has_nx		boot_cpu_has(X86_FEATURE_NX)
   14.72 @@ -104,5 +116,15 @@
   14.73  #define cpu_has_cyrix_arr	boot_cpu_has(X86_FEATURE_CYRIX_ARR)
   14.74  #define cpu_has_centaur_mcr	boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
   14.75  #define cpu_has_xstore		boot_cpu_has(X86_FEATURE_XSTORE)
   14.76 +#define cpu_has_xstore_enabled	boot_cpu_has(X86_FEATURE_XSTORE_EN)
   14.77 +#define cpu_has_xcrypt		boot_cpu_has(X86_FEATURE_XCRYPT)
   14.78 +#define cpu_has_xcrypt_enabled	boot_cpu_has(X86_FEATURE_XCRYPT_EN)
   14.79  
   14.80 -#endif /* __ASM_X86_CPUFEATURE_H */
   14.81 +#endif /* __ASM_I386_CPUFEATURE_H */
   14.82 +
   14.83 +/* 
   14.84 + * Local Variables:
   14.85 + * mode:c
   14.86 + * comment-column:42
   14.87 + * End:
   14.88 + */
    15.1 --- a/xen/include/asm-x86/msr.h	Thu May 26 15:11:08 2005 +0000
    15.2 +++ b/xen/include/asm-x86/msr.h	Thu May 26 15:35:14 2005 +0000
    15.3 @@ -79,6 +79,17 @@
    15.4  #define MSR_IA32_PLATFORM_ID		0x17
    15.5  #define MSR_IA32_EBL_CR_POWERON		0x2a
    15.6  
    15.7 +#define MSR_IA32_APICBASE		0x1b
    15.8 +#define MSR_IA32_APICBASE_BSP		(1<<8)
    15.9 +#define MSR_IA32_APICBASE_ENABLE	(1<<11)
   15.10 +#define MSR_IA32_APICBASE_BASE		(0xfffff<<12)
   15.11 +
   15.12 +#define MSR_IA32_UCODE_WRITE		0x79
   15.13 +#define MSR_IA32_UCODE_REV		0x8b
   15.14 +
   15.15 +#define MSR_P6_PERFCTR0      0xc1
   15.16 +#define MSR_P6_PERFCTR1      0xc2
   15.17 +
   15.18  /* MSRs & bits used for VMX enabling */
   15.19  #define MSR_IA32_VMX_BASIC_MSR                  0x480
   15.20  #define IA32_FEATURE_CONTROL_MSR                0x3a
   15.21 @@ -108,9 +119,6 @@
   15.22  /* Intel MSRs. Some also available on other CPUs */
   15.23  #define MSR_IA32_PLATFORM_ID	0x17
   15.24  
   15.25 -#define MSR_IA32_PERFCTR0      0xc1
   15.26 -#define MSR_IA32_PERFCTR1      0xc2
   15.27 -
   15.28  #define MSR_MTRRcap		0x0fe
   15.29  #define MSR_IA32_BBL_CR_CTL        0x119
   15.30  
   15.31 @@ -122,9 +130,6 @@
   15.32  #define MSR_IA32_MCG_STATUS        0x17a
   15.33  #define MSR_IA32_MCG_CTL       0x17b
   15.34  
   15.35 -#define MSR_IA32_EVNTSEL0      0x186
   15.36 -#define MSR_IA32_EVNTSEL1      0x187
   15.37 -
   15.38  #define MSR_MTRRfix64K_00000	0x250
   15.39  #define MSR_MTRRfix16K_80000	0x258
   15.40  #define MSR_MTRRfix16K_A0000	0x259
   15.41 @@ -145,14 +150,6 @@
   15.42  
   15.43  #define MSR_IA32_DS_AREA	0x600
   15.44  
   15.45 -#define MSR_IA32_APICBASE		0x1b
   15.46 -#define MSR_IA32_APICBASE_BSP		(1<<8)
   15.47 -#define MSR_IA32_APICBASE_ENABLE	(1<<11)
   15.48 -#define MSR_IA32_APICBASE_BASE		(0xfffff<<12)
   15.49 -
   15.50 -#define MSR_IA32_UCODE_WRITE		0x79
   15.51 -#define MSR_IA32_UCODE_REV		0x8b
   15.52 -
   15.53  #define MSR_IA32_BBL_CR_CTL		0x119
   15.54  
   15.55  #define MSR_IA32_MCG_CAP		0x179
   15.56 @@ -237,6 +234,7 @@
   15.57  /* VIA Cyrix defined MSRs*/
   15.58  #define MSR_VIA_FCR			0x1107
   15.59  #define MSR_VIA_LONGHAUL		0x110a
   15.60 +#define MSR_VIA_RNG			0x110b
   15.61  #define MSR_VIA_BCR2			0x1147
   15.62  
   15.63  /* Transmeta defined MSRs */
    16.1 --- a/xen/include/asm-x86/processor.h	Thu May 26 15:11:08 2005 +0000
    16.2 +++ b/xen/include/asm-x86/processor.h	Thu May 26 15:35:14 2005 +0000
    16.3 @@ -26,8 +26,7 @@
    16.4  #define X86_VENDOR_RISE 6
    16.5  #define X86_VENDOR_TRANSMETA 7
    16.6  #define X86_VENDOR_NSC 8
    16.7 -#define X86_VENDOR_SIS 9
    16.8 -#define X86_VENDOR_NUM 10
    16.9 +#define X86_VENDOR_NUM 9
   16.10  #define X86_VENDOR_UNKNOWN 0xff
   16.11  
   16.12  /*
   16.13 @@ -146,23 +145,26 @@ struct exec_domain;
   16.14    ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
   16.15  #endif
   16.16  
   16.17 -/*
   16.18 - *  CPU type and hardware bug flags. Kept separately for each CPU.
   16.19 - *  Members of this structure are referenced in head.S, so think twice
   16.20 - *  before touching them. [mj]
   16.21 - */
   16.22 -
   16.23  struct cpuinfo_x86 {
   16.24 -    __u8    x86;            /* CPU family */
   16.25 -    __u8    x86_vendor;     /* CPU vendor */
   16.26 -    __u8    x86_model;
   16.27 -    __u8    x86_mask;
   16.28 -    int     cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
   16.29 -    __u32   x86_capability[NCAPINTS];
   16.30 -    char    x86_vendor_id[16];
   16.31 -    int     x86_cache_size;  /* in KB - for CPUS that support this call  */
   16.32 -    int	    x86_clflush_size;
   16.33 -    int	    x86_tlbsize;     /* number of 4K pages in DTLB/ITLB combined */
   16.34 +	__u8	x86;		/* CPU family */
   16.35 +	__u8	x86_vendor;	/* CPU vendor */
   16.36 +	__u8	x86_model;
   16.37 +	__u8	x86_mask;
   16.38 +	char	wp_works_ok;	/* It doesn't on 386's */
   16.39 +	char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
   16.40 +	char	hard_math;
   16.41 +	char	rfu;
   16.42 +       	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
   16.43 +	unsigned long	x86_capability[NCAPINTS];
   16.44 +	char	x86_vendor_id[16];
   16.45 +	char	x86_model_id[64];
   16.46 +	int 	x86_cache_size;  /* in KB - valid for CPUS which support this
   16.47 +				    call  */
   16.48 +	int 	x86_cache_alignment;	/* In bytes */
   16.49 +	int	fdiv_bug;
   16.50 +	int	f00f_bug;
   16.51 +	int	coma_bug;
   16.52 +	unsigned char x86_num_cores;
   16.53  } __cacheline_aligned;
   16.54  
   16.55  /*
   16.56 @@ -179,15 +181,23 @@ extern struct cpuinfo_x86 cpu_data[];
   16.57  #define current_cpu_data boot_cpu_data
   16.58  #endif
   16.59  
   16.60 -extern  int phys_proc_id[NR_CPUS];
   16.61 -extern char ignore_irq13;
   16.62 +extern int phys_proc_id[NR_CPUS];
   16.63  
   16.64  extern void identify_cpu(struct cpuinfo_x86 *);
   16.65  extern void print_cpu_info(struct cpuinfo_x86 *);
   16.66 +extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
   16.67  extern void dodgy_tsc(void);
   16.68  
   16.69 +#ifdef CONFIG_X86_HT
   16.70 +extern void detect_ht(struct cpuinfo_x86 *c);
   16.71 +#else
   16.72 +static inline void detect_ht(struct cpuinfo_x86 *c) {}
   16.73 +#endif
   16.74 +
   16.75  /*
   16.76   * Generic CPUID function
   16.77 + * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
   16.78 + * resulting in stale register contents being returned.
   16.79   */
   16.80  static inline void cpuid(
   16.81      int op, unsigned int *eax, unsigned int *ebx,
   16.82 @@ -195,10 +205,10 @@ static inline void cpuid(
   16.83  {
   16.84      __asm__("cpuid"
   16.85              : "=a" (*eax),
   16.86 -            "=b" (*ebx),
   16.87 -            "=c" (*ecx),
   16.88 -            "=d" (*edx)
   16.89 -            : "0" (op));
   16.90 +              "=b" (*ebx),
   16.91 +              "=c" (*ecx),
   16.92 +              "=d" (*edx)
   16.93 +            : "0" (op), "2" (0));
   16.94  }
   16.95  
   16.96  /*
   16.97 @@ -327,6 +337,23 @@ static inline void clear_in_cr4 (unsigne
   16.98  	outb((data), 0x23); \
   16.99  } while (0)
  16.100  
  16.101 +static inline void __monitor(const void *eax, unsigned long ecx,
  16.102 +		unsigned long edx)
  16.103 +{
  16.104 +	/* "monitor %eax,%ecx,%edx;" */
  16.105 +	asm volatile(
  16.106 +		".byte 0x0f,0x01,0xc8;"
  16.107 +		: :"a" (eax), "c" (ecx), "d"(edx));
  16.108 +}
  16.109 +
  16.110 +static inline void __mwait(unsigned long eax, unsigned long ecx)
  16.111 +{
  16.112 +	/* "mwait %eax,%ecx;" */
  16.113 +	asm volatile(
  16.114 +		".byte 0x0f,0x01,0xc9;"
  16.115 +		: :"a" (eax), "c" (ecx));
  16.116 +}
  16.117 +
  16.118  #define IOBMP_BYTES             8192
  16.119  #define IOBMP_INVALID_OFFSET    0x8000
  16.120