ia64/xen-unstable

changeset 3195:a31bdfb8d4ea

bitkeeper revision 1.1159.183.29 (41aaf596qooupGDNnx14Mo2HjOCaSA)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-2.0-testing.bk
into scramble.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Mon Nov 29 10:10:30 2004 +0000 (2004-11-29)
parents 2e33efd5b908 9c26a1d2b093
children 2e64fe4ea360
files .rootkeys xen/arch/x86/Makefile xen/arch/x86/mtrr/amd.c xen/arch/x86/mtrr/centaur.c xen/arch/x86/mtrr/cyrix.c xen/arch/x86/mtrr/generic.c xen/arch/x86/mtrr/main.c xen/arch/x86/mtrr/mtrr.h xen/arch/x86/mtrr/state.c xen/include/asm-x86/mtrr.h xen/include/asm-x86/processor.h
line diff
     1.1 --- a/.rootkeys	Mon Nov 29 09:19:52 2004 +0000
     1.2 +++ b/.rootkeys	Mon Nov 29 10:10:30 2004 +0000
     1.3 @@ -664,6 +664,13 @@ 3ddb79bcKIkRR0kqWaJhe5VUDkMdxg xen/arch/
     1.4  3ddb79bdqfIcjkz_h9Hvtp8Tk_19Zw xen/arch/x86/irq.c
     1.5  40ec29ffuOa1ZvmJHzFKyZn4k_RcXg xen/arch/x86/memory.c
     1.6  3ddb79bdS4UeWWXDH-FaBKqcpMFcnw xen/arch/x86/mpparse.c
     1.7 +41aaf566Z4sTDgJ77eEg0TzzQ1ka6Q xen/arch/x86/mtrr/amd.c
     1.8 +41aaf566TOpOBXT00wwQGUh20f1rlA xen/arch/x86/mtrr/centaur.c
     1.9 +41aaf566yhr0zKYnGVSOQpkWMM0Kiw xen/arch/x86/mtrr/cyrix.c
    1.10 +41aaf567t3hFKsyfEFoy3KAnB-bj8w xen/arch/x86/mtrr/generic.c
    1.11 +41aaf567tqrKGSTDK8OVeAbpeoccPw xen/arch/x86/mtrr/main.c
    1.12 +41aaf567a36esU-rUK7twPiv-yTFyw xen/arch/x86/mtrr/mtrr.h
    1.13 +41aaf567DcTL6pqVtLZJI5cSryyA1A xen/arch/x86/mtrr/state.c
    1.14  3f12cff65EV3qOG2j37Qm0ShgvXGRw xen/arch/x86/nmi.c
    1.15  3ddb79bdHe6_Uij4-glW91vInNtBYQ xen/arch/x86/pci-irq.c
    1.16  3ddb79bcZ_2FxINljqNSkqa17ISyJw xen/arch/x86/pci-pc.c
    1.17 @@ -799,6 +806,7 @@ 3ddb79c3I98vWcQR8xEo34JMJ4Ahyw xen/inclu
    1.18  40ec25fd7cSvbP7Biw91zaU_g0xsEQ xen/include/asm-x86/mm.h
    1.19  3ddb79c3n_UbPuxlkNxvvLycClIkxA xen/include/asm-x86/mpspec.h
    1.20  3ddb79c2wa0dA_LGigxOelSGbJ284Q xen/include/asm-x86/msr.h
    1.21 +41aaf567Mi3OishhvrCtET1y-mxQBg xen/include/asm-x86/mtrr.h
    1.22  41a61536MFhNalgbVmYGXAhQsPTZNw xen/include/asm-x86/multicall.h
    1.23  3ddb79c3xjYnrv5t3VqYlR4tNEOl4Q xen/include/asm-x86/page.h
    1.24  3ddb79c3ysKUbxZuwKBRK3WXU2TlEg xen/include/asm-x86/pci.h
     2.1 --- a/xen/arch/x86/Makefile	Mon Nov 29 09:19:52 2004 +0000
     2.2 +++ b/xen/arch/x86/Makefile	Mon Nov 29 10:10:30 2004 +0000
     2.3 @@ -8,6 +8,7 @@ endif
     2.4  
     2.5  OBJS += $(patsubst %.S,%.o,$(wildcard $(TARGET_SUBARCH)/*.S))
     2.6  OBJS += $(patsubst %.c,%.o,$(wildcard $(TARGET_SUBARCH)/*.c))
     2.7 +OBJS += $(patsubst %.c,%.o,$(wildcard mtrr/*.c))
     2.8  
     2.9  OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
    2.10  
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/x86/mtrr/amd.c	Mon Nov 29 10:10:30 2004 +0000
     3.3 @@ -0,0 +1,121 @@
     3.4 +#include <xen/init.h>
     3.5 +#include <xen/mm.h>
     3.6 +#include <asm/mtrr.h>
     3.7 +#include <asm/msr.h>
     3.8 +
     3.9 +#include "mtrr.h"
    3.10 +
    3.11 +static void
    3.12 +amd_get_mtrr(unsigned int reg, unsigned long *base,
    3.13 +	     unsigned int *size, mtrr_type * type)
    3.14 +{
    3.15 +	unsigned long low, high;
    3.16 +
    3.17 +	rdmsr(MSR_K6_UWCCR, low, high);
    3.18 +	/*  Upper dword is region 1, lower is region 0  */
    3.19 +	if (reg == 1)
    3.20 +		low = high;
    3.21 +	/*  The base masks off on the right alignment  */
    3.22 +	*base = (low & 0xFFFE0000) >> PAGE_SHIFT;
    3.23 +	*type = 0;
    3.24 +	if (low & 1)
    3.25 +		*type = MTRR_TYPE_UNCACHABLE;
    3.26 +	if (low & 2)
    3.27 +		*type = MTRR_TYPE_WRCOMB;
    3.28 +	if (!(low & 3)) {
    3.29 +		*size = 0;
    3.30 +		return;
    3.31 +	}
    3.32 +	/*
    3.33 +	 *  This needs a little explaining. The size is stored as an
    3.34 +	 *  inverted mask of bits of 128K granularity 15 bits long offset
    3.35 +	 *  2 bits
    3.36 +	 *
    3.37 +	 *  So to get a size we do invert the mask and add 1 to the lowest
    3.38 +	 *  mask bit (4 as its 2 bits in). This gives us a size we then shift
    3.39 +	 *  to turn into 128K blocks
    3.40 +	 *
    3.41 +	 *  eg              111 1111 1111 1100      is 512K
    3.42 +	 *
    3.43 +	 *  invert          000 0000 0000 0011
    3.44 +	 *  +1              000 0000 0000 0100
    3.45 +	 *  *128K   ...
    3.46 +	 */
    3.47 +	low = (~low) & 0x1FFFC;
    3.48 +	*size = (low + 4) << (15 - PAGE_SHIFT);
    3.49 +	return;
    3.50 +}
    3.51 +
    3.52 +static void amd_set_mtrr(unsigned int reg, unsigned long base,
    3.53 +			 unsigned long size, mtrr_type type)
    3.54 +/*  [SUMMARY] Set variable MTRR register on the local CPU.
    3.55 +    <reg> The register to set.
    3.56 +    <base> The base address of the region.
    3.57 +    <size> The size of the region. If this is 0 the region is disabled.
    3.58 +    <type> The type of the region.
    3.59 +    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
    3.60 +    be done externally.
    3.61 +    [RETURNS] Nothing.
    3.62 +*/
    3.63 +{
    3.64 +	u32 regs[2];
    3.65 +
    3.66 +	/*
    3.67 +	 *  Low is MTRR0 , High MTRR 1
    3.68 +	 */
    3.69 +	rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
    3.70 +	/*
    3.71 +	 *  Blank to disable
    3.72 +	 */
    3.73 +	if (size == 0)
    3.74 +		regs[reg] = 0;
    3.75 +	else
    3.76 +		/* Set the register to the base, the type (off by one) and an
    3.77 +		   inverted bitmask of the size The size is the only odd
    3.78 +		   bit. We are fed say 512K We invert this and we get 111 1111
    3.79 +		   1111 1011 but if you subtract one and invert you get the   
    3.80 +		   desired 111 1111 1111 1100 mask
    3.81 +
    3.82 +		   But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!  */
    3.83 +		regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
    3.84 +		    | (base << PAGE_SHIFT) | (type + 1);
    3.85 +
    3.86 +	/*
    3.87 +	 *  The writeback rule is quite specific. See the manual. Its
    3.88 +	 *  disable local interrupts, write back the cache, set the mtrr
    3.89 +	 */
    3.90 +	wbinvd();
    3.91 +	wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
    3.92 +}
    3.93 +
    3.94 +static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
    3.95 +{
    3.96 +	/* Apply the K6 block alignment and size rules
    3.97 +	   In order
    3.98 +	   o Uncached or gathering only
    3.99 +	   o 128K or bigger block
   3.100 +	   o Power of 2 block
   3.101 +	   o base suitably aligned to the power
   3.102 +	*/
   3.103 +	if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
   3.104 +	    || (size & ~(size - 1)) - size || (base & (size - 1)))
   3.105 +		return -EINVAL;
   3.106 +	return 0;
   3.107 +}
   3.108 +
   3.109 +static struct mtrr_ops amd_mtrr_ops = {
   3.110 +	.vendor            = X86_VENDOR_AMD,
   3.111 +	.set               = amd_set_mtrr,
   3.112 +	.get               = amd_get_mtrr,
   3.113 +	.get_free_region   = generic_get_free_region,
   3.114 +	.validate_add_page = amd_validate_add_page,
   3.115 +	.have_wrcomb       = positive_have_wrcomb,
   3.116 +};
   3.117 +
   3.118 +int __init amd_init_mtrr(void)
   3.119 +{
   3.120 +	set_mtrr_ops(&amd_mtrr_ops);
   3.121 +	return 0;
   3.122 +}
   3.123 +
   3.124 +//arch_initcall(amd_mtrr_init);
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/arch/x86/mtrr/centaur.c	Mon Nov 29 10:10:30 2004 +0000
     4.3 @@ -0,0 +1,220 @@
     4.4 +#include <xen/init.h>
     4.5 +#include <xen/mm.h>
     4.6 +#include <asm/mtrr.h>
     4.7 +#include <asm/msr.h>
     4.8 +#include "mtrr.h"
     4.9 +
    4.10 +static struct {
    4.11 +	unsigned long high;
    4.12 +	unsigned long low;
    4.13 +} centaur_mcr[8];
    4.14 +
    4.15 +static u8 centaur_mcr_reserved;
    4.16 +static u8 centaur_mcr_type;	/* 0 for winchip, 1 for winchip2 */
    4.17 +
    4.18 +/*
    4.19 + *	Report boot time MCR setups 
    4.20 + */
    4.21 +
    4.22 +static int
    4.23 +centaur_get_free_region(unsigned long base, unsigned long size)
    4.24 +/*  [SUMMARY] Get a free MTRR.
    4.25 +    <base> The starting (base) address of the region.
    4.26 +    <size> The size (in bytes) of the region.
    4.27 +    [RETURNS] The index of the region on success, else -1 on error.
    4.28 +*/
    4.29 +{
    4.30 +	int i, max;
    4.31 +	mtrr_type ltype;
    4.32 +	unsigned long lbase;
    4.33 +	unsigned int lsize;
    4.34 +
    4.35 +	max = num_var_ranges;
    4.36 +	for (i = 0; i < max; ++i) {
    4.37 +		if (centaur_mcr_reserved & (1 << i))
    4.38 +			continue;
    4.39 +		mtrr_if->get(i, &lbase, &lsize, &ltype);
    4.40 +		if (lsize == 0)
    4.41 +			return i;
    4.42 +	}
    4.43 +	return -ENOSPC;
    4.44 +}
    4.45 +
    4.46 +void
    4.47 +mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
    4.48 +{
    4.49 +	centaur_mcr[mcr].low = lo;
    4.50 +	centaur_mcr[mcr].high = hi;
    4.51 +}
    4.52 +
    4.53 +static void
    4.54 +centaur_get_mcr(unsigned int reg, unsigned long *base,
    4.55 +		unsigned int *size, mtrr_type * type)
    4.56 +{
    4.57 +	*base = centaur_mcr[reg].high >> PAGE_SHIFT;
    4.58 +	*size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
    4.59 +	*type = MTRR_TYPE_WRCOMB;	/*  If it is there, it is write-combining  */
    4.60 +	if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2))
    4.61 +		*type = MTRR_TYPE_UNCACHABLE;
    4.62 +	if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25)
    4.63 +		*type = MTRR_TYPE_WRBACK;
    4.64 +	if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31)
    4.65 +		*type = MTRR_TYPE_WRBACK;
    4.66 +
    4.67 +}
    4.68 +
    4.69 +static void centaur_set_mcr(unsigned int reg, unsigned long base,
    4.70 +			    unsigned long size, mtrr_type type)
    4.71 +{
    4.72 +	unsigned long low, high;
    4.73 +
    4.74 +	if (size == 0) {
    4.75 +		/*  Disable  */
    4.76 +		high = low = 0;
    4.77 +	} else {
    4.78 +		high = base << PAGE_SHIFT;
    4.79 +		if (centaur_mcr_type == 0)
    4.80 +			low = -size << PAGE_SHIFT | 0x1f;	/* only support write-combining... */
    4.81 +		else {
    4.82 +			if (type == MTRR_TYPE_UNCACHABLE)
    4.83 +				low = -size << PAGE_SHIFT | 0x02;	/* NC */
    4.84 +			else
    4.85 +				low = -size << PAGE_SHIFT | 0x09;	/* WWO,WC */
    4.86 +		}
    4.87 +	}
    4.88 +	centaur_mcr[reg].high = high;
    4.89 +	centaur_mcr[reg].low = low;
    4.90 +	wrmsr(MSR_IDT_MCR0 + reg, low, high);
    4.91 +}
    4.92 +/*
    4.93 + *	Initialise the later (saner) Winchip MCR variant. In this version
    4.94 + *	the BIOS can pass us the registers it has used (but not their values)
    4.95 + *	and the control register is read/write
    4.96 + */
    4.97 +
    4.98 +static void __init
    4.99 +centaur_mcr1_init(void)
   4.100 +{
   4.101 +	unsigned i;
   4.102 +	u32 lo, hi;
   4.103 +
   4.104 +	/* Unfortunately, MCR's are read-only, so there is no way to
   4.105 +	 * find out what the bios might have done.
   4.106 +	 */
   4.107 +
   4.108 +	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.109 +	if (((lo >> 17) & 7) == 1) {	/* Type 1 Winchip2 MCR */
   4.110 +		lo &= ~0x1C0;	/* clear key */
   4.111 +		lo |= 0x040;	/* set key to 1 */
   4.112 +		wrmsr(MSR_IDT_MCR_CTRL, lo, hi);	/* unlock MCR */
   4.113 +	}
   4.114 +
   4.115 +	centaur_mcr_type = 1;
   4.116 +
   4.117 +	/*
   4.118 +	 *  Clear any unconfigured MCR's.
   4.119 +	 */
   4.120 +
   4.121 +	for (i = 0; i < 8; ++i) {
   4.122 +		if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) {
   4.123 +			if (!(lo & (1 << (9 + i))))
   4.124 +				wrmsr(MSR_IDT_MCR0 + i, 0, 0);
   4.125 +			else
   4.126 +				/*
   4.127 +				 *      If the BIOS set up an MCR we cannot see it
   4.128 +				 *      but we don't wish to obliterate it
   4.129 +				 */
   4.130 +				centaur_mcr_reserved |= (1 << i);
   4.131 +		}
   4.132 +	}
   4.133 +	/*  
   4.134 +	 *  Throw the main write-combining switch... 
   4.135 +	 *  However if OOSTORE is enabled then people have already done far
   4.136 +	 *  cleverer things and we should behave. 
   4.137 +	 */
   4.138 +
   4.139 +	lo |= 15;		/* Write combine enables */
   4.140 +	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
   4.141 +}
   4.142 +
   4.143 +/*
   4.144 + *	Initialise the original winchip with read only MCR registers
   4.145 + *	no used bitmask for the BIOS to pass on and write only control
   4.146 + */
   4.147 +
   4.148 +static void __init
   4.149 +centaur_mcr0_init(void)
   4.150 +{
   4.151 +	unsigned i;
   4.152 +
   4.153 +	/* Unfortunately, MCR's are read-only, so there is no way to
   4.154 +	 * find out what the bios might have done.
   4.155 +	 */
   4.156 +
   4.157 +	/* Clear any unconfigured MCR's.
   4.158 +	 * This way we are sure that the centaur_mcr array contains the actual
   4.159 +	 * values. The disadvantage is that any BIOS tweaks are thus undone.
   4.160 +	 *
   4.161 +	 */
   4.162 +	for (i = 0; i < 8; ++i) {
   4.163 +		if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0)
   4.164 +			wrmsr(MSR_IDT_MCR0 + i, 0, 0);
   4.165 +	}
   4.166 +
   4.167 +	wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);	/* Write only */
   4.168 +}
   4.169 +
   4.170 +/*
   4.171 + *	Initialise Winchip series MCR registers
   4.172 + */
   4.173 +
   4.174 +static void __init
   4.175 +centaur_mcr_init(void)
   4.176 +{
   4.177 +	struct set_mtrr_context ctxt;
   4.178 +
   4.179 +	set_mtrr_prepare_save(&ctxt);
   4.180 +	set_mtrr_cache_disable(&ctxt);
   4.181 +
   4.182 +	if (boot_cpu_data.x86_model == 4)
   4.183 +		centaur_mcr0_init();
   4.184 +	else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9)
   4.185 +		centaur_mcr1_init();
   4.186 +
   4.187 +	set_mtrr_done(&ctxt);
   4.188 +}
   4.189 +
   4.190 +static int centaur_validate_add_page(unsigned long base, 
   4.191 +				     unsigned long size, unsigned int type)
   4.192 +{
   4.193 +	/*
   4.194 +	 *  FIXME: Winchip2 supports uncached
   4.195 +	 */
   4.196 +	if (type != MTRR_TYPE_WRCOMB && 
   4.197 +	    (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
   4.198 +		printk(KERN_WARNING
   4.199 +		       "mtrr: only write-combining%s supported\n",
   4.200 +		       centaur_mcr_type ? " and uncacheable are"
   4.201 +		       : " is");
   4.202 +		return -EINVAL;
   4.203 +	}
   4.204 +	return 0;
   4.205 +}
   4.206 +
   4.207 +static struct mtrr_ops centaur_mtrr_ops = {
   4.208 +	.vendor            = X86_VENDOR_CENTAUR,
   4.209 +	.init              = centaur_mcr_init,
   4.210 +	.set               = centaur_set_mcr,
   4.211 +	.get               = centaur_get_mcr,
   4.212 +	.get_free_region   = centaur_get_free_region,
   4.213 +	.validate_add_page = centaur_validate_add_page,
   4.214 +	.have_wrcomb       = positive_have_wrcomb,
   4.215 +};
   4.216 +
   4.217 +int __init centaur_init_mtrr(void)
   4.218 +{
   4.219 +	set_mtrr_ops(&centaur_mtrr_ops);
   4.220 +	return 0;
   4.221 +}
   4.222 +
   4.223 +//arch_initcall(centaur_init_mtrr);
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/x86/mtrr/cyrix.c	Mon Nov 29 10:10:30 2004 +0000
     5.3 @@ -0,0 +1,362 @@
     5.4 +#include <xen/init.h>
     5.5 +#include <xen/mm.h>
     5.6 +#include <asm/mtrr.h>
     5.7 +#include <asm/msr.h>
     5.8 +#include <asm/io.h>
     5.9 +#include "mtrr.h"
    5.10 +
    5.11 +int arr3_protected;
    5.12 +
    5.13 +static void
    5.14 +cyrix_get_arr(unsigned int reg, unsigned long *base,
    5.15 +	      unsigned int *size, mtrr_type * type)
    5.16 +{
    5.17 +	unsigned long flags;
    5.18 +	unsigned char arr, ccr3, rcr, shift;
    5.19 +
    5.20 +	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
    5.21 +
    5.22 +	/* Save flags and disable interrupts */
    5.23 +	local_irq_save(flags);
    5.24 +
    5.25 +	ccr3 = getCx86(CX86_CCR3);
    5.26 +	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
    5.27 +	((unsigned char *) base)[3] = getCx86(arr);
    5.28 +	((unsigned char *) base)[2] = getCx86(arr + 1);
    5.29 +	((unsigned char *) base)[1] = getCx86(arr + 2);
    5.30 +	rcr = getCx86(CX86_RCR_BASE + reg);
    5.31 +	setCx86(CX86_CCR3, ccr3);	/* disable MAPEN */
    5.32 +
    5.33 +	/* Enable interrupts if it was enabled previously */
    5.34 +	local_irq_restore(flags);
    5.35 +	shift = ((unsigned char *) base)[1] & 0x0f;
    5.36 +	*base >>= PAGE_SHIFT;
    5.37 +
    5.38 +	/* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
    5.39 +	 * Note: shift==0xf means 4G, this is unsupported.
    5.40 +	 */
    5.41 +	if (shift)
    5.42 +		*size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
    5.43 +	else
    5.44 +		*size = 0;
    5.45 +
    5.46 +	/* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
    5.47 +	if (reg < 7) {
    5.48 +		switch (rcr) {
    5.49 +		case 1:
    5.50 +			*type = MTRR_TYPE_UNCACHABLE;
    5.51 +			break;
    5.52 +		case 8:
    5.53 +			*type = MTRR_TYPE_WRBACK;
    5.54 +			break;
    5.55 +		case 9:
    5.56 +			*type = MTRR_TYPE_WRCOMB;
    5.57 +			break;
    5.58 +		case 24:
    5.59 +		default:
    5.60 +			*type = MTRR_TYPE_WRTHROUGH;
    5.61 +			break;
    5.62 +		}
    5.63 +	} else {
    5.64 +		switch (rcr) {
    5.65 +		case 0:
    5.66 +			*type = MTRR_TYPE_UNCACHABLE;
    5.67 +			break;
    5.68 +		case 8:
    5.69 +			*type = MTRR_TYPE_WRCOMB;
    5.70 +			break;
    5.71 +		case 9:
    5.72 +			*type = MTRR_TYPE_WRBACK;
    5.73 +			break;
    5.74 +		case 25:
    5.75 +		default:
    5.76 +			*type = MTRR_TYPE_WRTHROUGH;
    5.77 +			break;
    5.78 +		}
    5.79 +	}
    5.80 +}
    5.81 +
    5.82 +static int
    5.83 +cyrix_get_free_region(unsigned long base, unsigned long size)
    5.84 +/*  [SUMMARY] Get a free ARR.
    5.85 +    <base> The starting (base) address of the region.
    5.86 +    <size> The size (in bytes) of the region.
    5.87 +    [RETURNS] The index of the region on success, else -1 on error.
    5.88 +*/
    5.89 +{
    5.90 +	int i;
    5.91 +	mtrr_type ltype;
    5.92 +	unsigned long lbase;
    5.93 +	unsigned int  lsize;
    5.94 +
    5.95 +	/* If we are to set up a region >32M then look at ARR7 immediately */
    5.96 +	if (size > 0x2000) {
    5.97 +		cyrix_get_arr(7, &lbase, &lsize, &ltype);
    5.98 +		if (lsize == 0)
    5.99 +			return 7;
   5.100 +		/*  Else try ARR0-ARR6 first  */
   5.101 +	} else {
   5.102 +		for (i = 0; i < 7; i++) {
   5.103 +			cyrix_get_arr(i, &lbase, &lsize, &ltype);
   5.104 +			if ((i == 3) && arr3_protected)
   5.105 +				continue;
   5.106 +			if (lsize == 0)
   5.107 +				return i;
   5.108 +		}
   5.109 +		/* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
   5.110 +		cyrix_get_arr(i, &lbase, &lsize, &ltype);
   5.111 +		if ((lsize == 0) && (size >= 0x40))
   5.112 +			return i;
   5.113 +	}
   5.114 +	return -ENOSPC;
   5.115 +}
   5.116 +
   5.117 +static u32 cr4 = 0;
   5.118 +static u32 ccr3;
   5.119 +
   5.120 +static void prepare_set(void)
   5.121 +{
   5.122 +	u32 cr0;
   5.123 +
   5.124 +	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
   5.125 +	if ( cpu_has_pge ) {
   5.126 +		cr4 = read_cr4();
   5.127 +		write_cr4(cr4 & (unsigned char) ~(1 << 7));
   5.128 +	}
   5.129 +
   5.130 +	/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
   5.131 +	    a side-effect  */
   5.132 +	cr0 = read_cr0() | 0x40000000;
   5.133 +	wbinvd();
   5.134 +	write_cr0(cr0);
   5.135 +	wbinvd();
   5.136 +
   5.137 +	/* Cyrix ARRs - everything else were excluded at the top */
   5.138 +	ccr3 = getCx86(CX86_CCR3);
   5.139 +
   5.140 +	/* Cyrix ARRs - everything else were excluded at the top */
   5.141 +	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
   5.142 +
   5.143 +}
   5.144 +
   5.145 +static void post_set(void)
   5.146 +{
   5.147 +	/*  Flush caches and TLBs  */
   5.148 +	wbinvd();
   5.149 +
   5.150 +	/* Cyrix ARRs - everything else was excluded at the top */
   5.151 +	setCx86(CX86_CCR3, ccr3);
   5.152 +		
   5.153 +	/*  Enable caches  */
   5.154 +	write_cr0(read_cr0() & 0xbfffffff);
   5.155 +
   5.156 +	/*  Restore value of CR4  */
   5.157 +	if ( cpu_has_pge )
   5.158 +		write_cr4(cr4);
   5.159 +}
   5.160 +
   5.161 +static void cyrix_set_arr(unsigned int reg, unsigned long base,
   5.162 +			  unsigned long size, mtrr_type type)
   5.163 +{
   5.164 +	unsigned char arr, arr_type, arr_size;
   5.165 +
   5.166 +	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
   5.167 +
   5.168 +	/* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
   5.169 +	if (reg >= 7)
   5.170 +		size >>= 6;
   5.171 +
   5.172 +	size &= 0x7fff;		/* make sure arr_size <= 14 */
   5.173 +	for (arr_size = 0; size; arr_size++, size >>= 1) ;
   5.174 +
   5.175 +	if (reg < 7) {
   5.176 +		switch (type) {
   5.177 +		case MTRR_TYPE_UNCACHABLE:
   5.178 +			arr_type = 1;
   5.179 +			break;
   5.180 +		case MTRR_TYPE_WRCOMB:
   5.181 +			arr_type = 9;
   5.182 +			break;
   5.183 +		case MTRR_TYPE_WRTHROUGH:
   5.184 +			arr_type = 24;
   5.185 +			break;
   5.186 +		default:
   5.187 +			arr_type = 8;
   5.188 +			break;
   5.189 +		}
   5.190 +	} else {
   5.191 +		switch (type) {
   5.192 +		case MTRR_TYPE_UNCACHABLE:
   5.193 +			arr_type = 0;
   5.194 +			break;
   5.195 +		case MTRR_TYPE_WRCOMB:
   5.196 +			arr_type = 8;
   5.197 +			break;
   5.198 +		case MTRR_TYPE_WRTHROUGH:
   5.199 +			arr_type = 25;
   5.200 +			break;
   5.201 +		default:
   5.202 +			arr_type = 9;
   5.203 +			break;
   5.204 +		}
   5.205 +	}
   5.206 +
   5.207 +	prepare_set();
   5.208 +
   5.209 +	base <<= PAGE_SHIFT;
   5.210 +	setCx86(arr, ((unsigned char *) &base)[3]);
   5.211 +	setCx86(arr + 1, ((unsigned char *) &base)[2]);
   5.212 +	setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
   5.213 +	setCx86(CX86_RCR_BASE + reg, arr_type);
   5.214 +
   5.215 +	post_set();
   5.216 +}
   5.217 +
   5.218 +typedef struct {
   5.219 +	unsigned long base;
   5.220 +	unsigned int size;
   5.221 +	mtrr_type type;
   5.222 +} arr_state_t;
   5.223 +
   5.224 +arr_state_t arr_state[8] __initdata = {
   5.225 +	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
   5.226 +	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
   5.227 +};
   5.228 +
   5.229 +unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
   5.230 +
   5.231 +static void cyrix_set_all(void)
   5.232 +{
   5.233 +	int i;
   5.234 +
   5.235 +	prepare_set();
   5.236 +
   5.237 +	/* the CCRs are not contiguous */
   5.238 +	for (i = 0; i < 4; i++)
   5.239 +		setCx86(CX86_CCR0 + i, ccr_state[i]);
   5.240 +	for (; i < 7; i++)
   5.241 +		setCx86(CX86_CCR4 + i, ccr_state[i]);
   5.242 +	for (i = 0; i < 8; i++)
   5.243 +		cyrix_set_arr(i, arr_state[i].base, 
   5.244 +			      arr_state[i].size, arr_state[i].type);
   5.245 +
   5.246 +	post_set();
   5.247 +}
   5.248 +
   5.249 +/*
   5.250 + * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
   5.251 + * with the SMM (System Management Mode) mode. So we need the following:
   5.252 + * Check whether SMI_LOCK (CCR3 bit 0) is set
   5.253 + *   if it is set, write a warning message: ARR3 cannot be changed!
   5.254 + *     (it cannot be changed until the next processor reset)
   5.255 + *   if it is reset, then we can change it, set all the needed bits:
   5.256 + *   - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
   5.257 + *   - disable access to SMM memory (CCR1 bit 2 reset)
   5.258 + *   - disable SMM mode (CCR1 bit 1 reset)
   5.259 + *   - disable write protection of ARR3 (CCR6 bit 1 reset)
   5.260 + *   - (maybe) disable ARR3
   5.261 + * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
   5.262 + */
   5.263 +static void __init
   5.264 +cyrix_arr_init(void)
   5.265 +{
   5.266 +	struct set_mtrr_context ctxt;
   5.267 +	unsigned char ccr[7];
   5.268 +	int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
   5.269 +#ifdef CONFIG_SMP
   5.270 +	int i;
   5.271 +#endif
   5.272 +
   5.273 +	/* flush cache and enable MAPEN */
   5.274 +	set_mtrr_prepare_save(&ctxt);
   5.275 +	set_mtrr_cache_disable(&ctxt);
   5.276 +
   5.277 +	/* Save all CCRs locally */
   5.278 +	ccr[0] = getCx86(CX86_CCR0);
   5.279 +	ccr[1] = getCx86(CX86_CCR1);
   5.280 +	ccr[2] = getCx86(CX86_CCR2);
   5.281 +	ccr[3] = ctxt.ccr3;
   5.282 +	ccr[4] = getCx86(CX86_CCR4);
   5.283 +	ccr[5] = getCx86(CX86_CCR5);
   5.284 +	ccr[6] = getCx86(CX86_CCR6);
   5.285 +
   5.286 +	if (ccr[3] & 1) {
   5.287 +		ccrc[3] = 1;
   5.288 +		arr3_protected = 1;
   5.289 +	} else {
   5.290 +		/* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
   5.291 +		 * access to SMM memory through ARR3 (bit 7).
   5.292 +		 */
   5.293 +		if (ccr[1] & 0x80) {
   5.294 +			ccr[1] &= 0x7f;
   5.295 +			ccrc[1] |= 0x80;
   5.296 +		}
   5.297 +		if (ccr[1] & 0x04) {
   5.298 +			ccr[1] &= 0xfb;
   5.299 +			ccrc[1] |= 0x04;
   5.300 +		}
   5.301 +		if (ccr[1] & 0x02) {
   5.302 +			ccr[1] &= 0xfd;
   5.303 +			ccrc[1] |= 0x02;
   5.304 +		}
   5.305 +		arr3_protected = 0;
   5.306 +		if (ccr[6] & 0x02) {
   5.307 +			ccr[6] &= 0xfd;
   5.308 +			ccrc[6] = 1;	/* Disable write protection of ARR3 */
   5.309 +			setCx86(CX86_CCR6, ccr[6]);
   5.310 +		}
   5.311 +		/* Disable ARR3. This is safe now that we disabled SMM. */
   5.312 +		/* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
   5.313 +	}
   5.314 +	/* If we changed CCR1 in memory, change it in the processor, too. */
   5.315 +	if (ccrc[1])
   5.316 +		setCx86(CX86_CCR1, ccr[1]);
   5.317 +
   5.318 +	/* Enable ARR usage by the processor */
   5.319 +	if (!(ccr[5] & 0x20)) {
   5.320 +		ccr[5] |= 0x20;
   5.321 +		ccrc[5] = 1;
   5.322 +		setCx86(CX86_CCR5, ccr[5]);
   5.323 +	}
   5.324 +#ifdef CONFIG_SMP
   5.325 +	for (i = 0; i < 7; i++)
   5.326 +		ccr_state[i] = ccr[i];
   5.327 +	for (i = 0; i < 8; i++)
   5.328 +		cyrix_get_arr(i,
   5.329 +			      &arr_state[i].base, &arr_state[i].size,
   5.330 +			      &arr_state[i].type);
   5.331 +#endif
   5.332 +
   5.333 +	set_mtrr_done(&ctxt);	/* flush cache and disable MAPEN */
   5.334 +
   5.335 +	if (ccrc[5])
   5.336 +		printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
   5.337 +	if (ccrc[3])
   5.338 +		printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
   5.339 +/*
   5.340 +    if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
   5.341 +    if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
   5.342 +    if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
   5.343 +*/
   5.344 +	if (ccrc[6])
   5.345 +		printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
   5.346 +}
   5.347 +
   5.348 +static struct mtrr_ops cyrix_mtrr_ops = {
   5.349 +	.vendor            = X86_VENDOR_CYRIX,
   5.350 +	.init              = cyrix_arr_init,
   5.351 +	.set_all	   = cyrix_set_all,
   5.352 +	.set               = cyrix_set_arr,
   5.353 +	.get               = cyrix_get_arr,
   5.354 +	.get_free_region   = cyrix_get_free_region,
   5.355 +	.validate_add_page = generic_validate_add_page,
   5.356 +	.have_wrcomb       = positive_have_wrcomb,
   5.357 +};
   5.358 +
   5.359 +int __init cyrix_init_mtrr(void)
   5.360 +{
   5.361 +	set_mtrr_ops(&cyrix_mtrr_ops);
   5.362 +	return 0;
   5.363 +}
   5.364 +
   5.365 +//arch_initcall(cyrix_init_mtrr);
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/x86/mtrr/generic.c	Mon Nov 29 10:10:30 2004 +0000
     6.3 @@ -0,0 +1,394 @@
     6.4 +/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
     6.5 +   because MTRRs can span upto 40 bits (36bits on most modern x86) */ 
     6.6 +#include <xen/init.h>
     6.7 +#include <xen/slab.h>
     6.8 +#include <xen/mm.h>
     6.9 +#include <asm/io.h>
    6.10 +#include <asm/mtrr.h>
    6.11 +#include <asm/msr.h>
    6.12 +#include <asm/system.h>
    6.13 +#include <asm/cpufeature.h>
    6.14 +//#include <asm/tlbflush.h>
    6.15 +#include "mtrr.h"
    6.16 +
    6.17 +struct mtrr_state {
    6.18 +	struct mtrr_var_range *var_ranges;
    6.19 +	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
    6.20 +	unsigned char enabled;
    6.21 +	mtrr_type def_type;
    6.22 +};
    6.23 +
    6.24 +static unsigned long smp_changes_mask;
    6.25 +struct mtrr_state mtrr_state = {};
    6.26 +
    6.27 +
    6.28 +/*  Get the MSR pair relating to a var range  */
    6.29 +static void __init
    6.30 +get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
    6.31 +{
    6.32 +	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
    6.33 +	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
    6.34 +}
    6.35 +
    6.36 +static void __init
    6.37 +get_fixed_ranges(mtrr_type * frs)
    6.38 +{
    6.39 +	unsigned int *p = (unsigned int *) frs;
    6.40 +	int i;
    6.41 +
    6.42 +	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
    6.43 +
    6.44 +	for (i = 0; i < 2; i++)
    6.45 +		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
    6.46 +	for (i = 0; i < 8; i++)
    6.47 +		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
    6.48 +}
    6.49 +
    6.50 +/*  Grab all of the MTRR state for this CPU into *state  */
    6.51 +void __init get_mtrr_state(void)
    6.52 +{
    6.53 +	unsigned int i;
    6.54 +	struct mtrr_var_range *vrs;
    6.55 +	unsigned lo, dummy;
    6.56 +
    6.57 +	if (!mtrr_state.var_ranges) {
    6.58 +		mtrr_state.var_ranges = xmalloc(num_var_ranges * sizeof (struct mtrr_var_range));
    6.59 +		if (!mtrr_state.var_ranges)
    6.60 +			return;
    6.61 +	} 
    6.62 +	vrs = mtrr_state.var_ranges;
    6.63 +
    6.64 +	for (i = 0; i < num_var_ranges; i++)
    6.65 +		get_mtrr_var_range(i, &vrs[i]);
    6.66 +	get_fixed_ranges(mtrr_state.fixed_ranges);
    6.67 +
    6.68 +	rdmsr(MTRRdefType_MSR, lo, dummy);
    6.69 +	mtrr_state.def_type = (lo & 0xff);
    6.70 +	mtrr_state.enabled = (lo & 0xc00) >> 10;
    6.71 +}
    6.72 +
    6.73 +/*  Free resources associated with a struct mtrr_state  */
    6.74 +void __init finalize_mtrr_state(void)
    6.75 +{
    6.76 +	if (mtrr_state.var_ranges)
    6.77 +		xfree(mtrr_state.var_ranges);
    6.78 +	mtrr_state.var_ranges = NULL;
    6.79 +}
    6.80 +
    6.81 +/*  Some BIOS's are fucked and don't set all MTRRs the same!  */
    6.82 +void __init mtrr_state_warn(void)
    6.83 +{
    6.84 +	unsigned long mask = smp_changes_mask;
    6.85 +
    6.86 +	if (!mask)
    6.87 +		return;
    6.88 +	if (mask & MTRR_CHANGE_MASK_FIXED)
    6.89 +		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
    6.90 +	if (mask & MTRR_CHANGE_MASK_VARIABLE)
    6.91 +		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
    6.92 +	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
    6.93 +		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
    6.94 +	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
    6.95 +	printk(KERN_INFO "mtrr: corrected configuration.\n");
    6.96 +}
    6.97 +
    6.98 +
    6.99 +int generic_get_free_region(unsigned long base, unsigned long size)
   6.100 +/*  [SUMMARY] Get a free MTRR.
   6.101 +    <base> The starting (base) address of the region.
   6.102 +    <size> The size (in bytes) of the region.
   6.103 +    [RETURNS] The index of the region on success, else -1 on error.
   6.104 +*/
   6.105 +{
   6.106 +	int i, max;
   6.107 +	mtrr_type ltype;
   6.108 +	unsigned long lbase;
   6.109 +	unsigned lsize;
   6.110 +
   6.111 +	max = num_var_ranges;
   6.112 +	for (i = 0; i < max; ++i) {
   6.113 +		mtrr_if->get(i, &lbase, &lsize, &ltype);
   6.114 +		if (lsize == 0)
   6.115 +			return i;
   6.116 +	}
   6.117 +	return -ENOSPC;
   6.118 +}
   6.119 +
   6.120 +void generic_get_mtrr(unsigned int reg, unsigned long *base,
   6.121 +		      unsigned int *size, mtrr_type * type)
   6.122 +{
   6.123 +	unsigned int mask_lo, mask_hi, base_lo, base_hi;
   6.124 +
   6.125 +	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
   6.126 +	if ((mask_lo & 0x800) == 0) {
   6.127 +		/*  Invalid (i.e. free) range  */
   6.128 +		*base = 0;
   6.129 +		*size = 0;
   6.130 +		*type = 0;
   6.131 +		return;
   6.132 +	}
   6.133 +
   6.134 +	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
   6.135 +
   6.136 +	/* Work out the shifted address mask. */
   6.137 +	mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
   6.138 +	    | mask_lo >> PAGE_SHIFT;
   6.139 +
   6.140 +	/* This works correctly if size is a power of two, i.e. a
   6.141 +	   contiguous range. */
   6.142 +	*size = -mask_lo;
   6.143 +	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
   6.144 +	*type = base_lo & 0xff;
   6.145 +}
   6.146 +
   6.147 +static int set_fixed_ranges(mtrr_type * frs)
   6.148 +{
   6.149 +	unsigned int *p = (unsigned int *) frs;
   6.150 +	int changed = FALSE;
   6.151 +	int i;
   6.152 +	unsigned int lo, hi;
   6.153 +
   6.154 +	rdmsr(MTRRfix64K_00000_MSR, lo, hi);
   6.155 +	if (p[0] != lo || p[1] != hi) {
   6.156 +		wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
   6.157 +		changed = TRUE;
   6.158 +	}
   6.159 +
   6.160 +	for (i = 0; i < 2; i++) {
   6.161 +		rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
   6.162 +		if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
   6.163 +			wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
   6.164 +			      p[3 + i * 2]);
   6.165 +			changed = TRUE;
   6.166 +		}
   6.167 +	}
   6.168 +
   6.169 +	for (i = 0; i < 8; i++) {
   6.170 +		rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
   6.171 +		if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
   6.172 +			wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
   6.173 +			      p[7 + i * 2]);
   6.174 +			changed = TRUE;
   6.175 +		}
   6.176 +	}
   6.177 +	return changed;
   6.178 +}
   6.179 +
   6.180 +/*  Set the MSR pair relating to a var range. Returns TRUE if
   6.181 +    changes are made  */
   6.182 +static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
   6.183 +{
   6.184 +	unsigned int lo, hi;
   6.185 +	int changed = FALSE;
   6.186 +
   6.187 +	rdmsr(MTRRphysBase_MSR(index), lo, hi);
   6.188 +	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
   6.189 +	    || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
   6.190 +		wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
   6.191 +		changed = TRUE;
   6.192 +	}
   6.193 +
   6.194 +	rdmsr(MTRRphysMask_MSR(index), lo, hi);
   6.195 +
   6.196 +	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
   6.197 +	    || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
   6.198 +		wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
   6.199 +		changed = TRUE;
   6.200 +	}
   6.201 +	return changed;
   6.202 +}
   6.203 +
   6.204 +static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
   6.205 +/*  [SUMMARY] Set the MTRR state for this CPU.
   6.206 +    <state> The MTRR state information to read.
   6.207 +    <ctxt> Some relevant CPU context.
   6.208 +    [NOTE] The CPU must already be in a safe state for MTRR changes.
   6.209 +    [RETURNS] 0 if no changes made, else a mask indication what was changed.
   6.210 +*/
   6.211 +{
   6.212 +	unsigned int i;
   6.213 +	unsigned long change_mask = 0;
   6.214 +
   6.215 +	for (i = 0; i < num_var_ranges; i++)
   6.216 +		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
   6.217 +			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
   6.218 +
   6.219 +	if (set_fixed_ranges(mtrr_state.fixed_ranges))
   6.220 +		change_mask |= MTRR_CHANGE_MASK_FIXED;
   6.221 +
   6.222 +	/*  Set_mtrr_restore restores the old value of MTRRdefType,
   6.223 +	   so to set it we fiddle with the saved value  */
   6.224 +	if ((deftype_lo & 0xff) != mtrr_state.def_type
   6.225 +	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
   6.226 +		deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
   6.227 +		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
   6.228 +	}
   6.229 +
   6.230 +	return change_mask;
   6.231 +}
   6.232 +
   6.233 +
   6.234 +static unsigned long cr4 = 0;
   6.235 +static u32 deftype_lo, deftype_hi;
   6.236 +static spinlock_t set_atomicity_lock = SPIN_LOCK_UNLOCKED;
   6.237 +
   6.238 +static void prepare_set(void)
   6.239 +{
   6.240 +	unsigned long cr0;
   6.241 +
   6.242 +	/*  Note that this is not ideal, since the cache is only flushed/disabled
   6.243 +	   for this CPU while the MTRRs are changed, but changing this requires
   6.244 +	   more invasive changes to the way the kernel boots  */
   6.245 +	spin_lock(&set_atomicity_lock);
   6.246 +
   6.247 +	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
   6.248 +	cr0 = read_cr0() | 0x40000000;	/* set CD flag */
   6.249 +	wbinvd();
   6.250 +	write_cr0(cr0);
   6.251 +	wbinvd();
   6.252 +
   6.253 +	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
   6.254 +	if ( cpu_has_pge ) {
   6.255 +		cr4 = read_cr4();
   6.256 +		write_cr4(cr4 & (unsigned char) ~(1 << 7));
   6.257 +	}
   6.258 +
   6.259 +	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
   6.260 +	__flush_tlb();
   6.261 +
   6.262 +	/*  Save MTRR state */
   6.263 +	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
   6.264 +
   6.265 +	/*  Disable MTRRs, and set the default type to uncached  */
   6.266 +	wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
   6.267 +}
   6.268 +
   6.269 +static void post_set(void)
   6.270 +{
   6.271 +	/*  Flush caches and TLBs  */
   6.272 +	wbinvd();
   6.273 +	__flush_tlb();
   6.274 +
   6.275 +	/* Intel (P6) standard MTRRs */
   6.276 +	wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
   6.277 +		
   6.278 +	/*  Enable caches  */
   6.279 +	write_cr0(read_cr0() & 0xbfffffff);
   6.280 +
   6.281 +	/*  Restore value of CR4  */
   6.282 +	if ( cpu_has_pge )
   6.283 +		write_cr4(cr4);
   6.284 +	spin_unlock(&set_atomicity_lock);
   6.285 +}
   6.286 +
   6.287 +static void generic_set_all(void)
   6.288 +{
   6.289 +	unsigned long mask, count;
   6.290 +
   6.291 +	prepare_set();
   6.292 +
   6.293 +	/* Actually set the state */
   6.294 +	mask = set_mtrr_state(deftype_lo,deftype_hi);
   6.295 +
   6.296 +	post_set();
   6.297 +
   6.298 +	/*  Use the atomic bitops to update the global mask  */
   6.299 +	for (count = 0; count < sizeof mask * 8; ++count) {
   6.300 +		if (mask & 0x01)
   6.301 +			set_bit(count, &smp_changes_mask);
   6.302 +		mask >>= 1;
   6.303 +	}
   6.304 +	
   6.305 +}
   6.306 +
   6.307 +static void generic_set_mtrr(unsigned int reg, unsigned long base,
   6.308 +			     unsigned long size, mtrr_type type)
   6.309 +/*  [SUMMARY] Set variable MTRR register on the local CPU.
   6.310 +    <reg> The register to set.
   6.311 +    <base> The base address of the region.
   6.312 +    <size> The size of the region. If this is 0 the region is disabled.
   6.313 +    <type> The type of the region.
   6.314 +    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
   6.315 +    be done externally.
   6.316 +    [RETURNS] Nothing.
   6.317 +*/
   6.318 +{
   6.319 +	prepare_set();
   6.320 +
   6.321 +	if (size == 0) {
   6.322 +		/* The invalid bit is kept in the mask, so we simply clear the
   6.323 +		   relevant mask register to disable a range. */
   6.324 +		wrmsr(MTRRphysMask_MSR(reg), 0, 0);
   6.325 +	} else {
   6.326 +		wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
   6.327 +		      (base & size_and_mask) >> (32 - PAGE_SHIFT));
   6.328 +		wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
   6.329 +		      (-size & size_and_mask) >> (32 - PAGE_SHIFT));
   6.330 +	}
   6.331 +
   6.332 +	post_set();
   6.333 +}
   6.334 +
   6.335 +int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
   6.336 +{
   6.337 +	unsigned long lbase, last;
   6.338 +
   6.339 +	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned 
   6.340 +	    and not touch 0x70000000->0x7003FFFF */
   6.341 +	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
   6.342 +	    boot_cpu_data.x86_model == 1 &&
   6.343 +	    boot_cpu_data.x86_mask <= 7) {
   6.344 +		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
   6.345 +			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
   6.346 +			return -EINVAL;
   6.347 +		}
   6.348 +		if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
   6.349 +		    (type == MTRR_TYPE_WRCOMB
   6.350 +		     || type == MTRR_TYPE_WRBACK)) {
   6.351 +			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
   6.352 +			return -EINVAL;
   6.353 +		}
   6.354 +	}
   6.355 +
   6.356 +	if (base + size < 0x100) {
   6.357 +		printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
   6.358 +		       base, size);
   6.359 +		return -EINVAL;
   6.360 +	}
   6.361 +	/*  Check upper bits of base and last are equal and lower bits are 0
   6.362 +	    for base and 1 for last  */
   6.363 +	last = base + size - 1;
   6.364 +	for (lbase = base; !(lbase & 1) && (last & 1);
   6.365 +	     lbase = lbase >> 1, last = last >> 1) ;
   6.366 +	if (lbase != last) {
   6.367 +		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
   6.368 +		       base, size);
   6.369 +		return -EINVAL;
   6.370 +	}
   6.371 +	return 0;
   6.372 +}
   6.373 +
   6.374 +
   6.375 +int generic_have_wrcomb(void)
   6.376 +{
   6.377 +	unsigned long config, dummy;
   6.378 +	rdmsr(MTRRcap_MSR, config, dummy);
   6.379 +	return (config & (1 << 10));
   6.380 +}
   6.381 +
   6.382 +int positive_have_wrcomb(void)
   6.383 +{
   6.384 +	return 1;
   6.385 +}
   6.386 +
   6.387 +/* generic structure...
   6.388 + */
   6.389 +struct mtrr_ops generic_mtrr_ops = {
   6.390 +	.use_intel_if      = 1,
   6.391 +	.set_all	   = generic_set_all,
   6.392 +	.get               = generic_get_mtrr,
   6.393 +	.get_free_region   = generic_get_free_region,
   6.394 +	.set               = generic_set_mtrr,
   6.395 +	.validate_add_page = generic_validate_add_page,
   6.396 +	.have_wrcomb       = generic_have_wrcomb,
   6.397 +};
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/x86/mtrr/main.c	Mon Nov 29 10:10:30 2004 +0000
     7.3 @@ -0,0 +1,674 @@
     7.4 +/*  Generic MTRR (Memory Type Range Register) driver.
     7.5 +
     7.6 +    Copyright (C) 1997-2000  Richard Gooch
     7.7 +    Copyright (c) 2002	     Patrick Mochel
     7.8 +
     7.9 +    This library is free software; you can redistribute it and/or
    7.10 +    modify it under the terms of the GNU Library General Public
    7.11 +    License as published by the Free Software Foundation; either
    7.12 +    version 2 of the License, or (at your option) any later version.
    7.13 +
    7.14 +    This library is distributed in the hope that it will be useful,
    7.15 +    but WITHOUT ANY WARRANTY; without even the implied warranty of
    7.16 +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    7.17 +    Library General Public License for more details.
    7.18 +
    7.19 +    You should have received a copy of the GNU Library General Public
    7.20 +    License along with this library; if not, write to the Free
    7.21 +    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
    7.22 +
    7.23 +    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
    7.24 +    The postal address is:
    7.25 +      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
    7.26 +
    7.27 +    Source: "Pentium Pro Family Developer's Manual, Volume 3:
    7.28 +    Operating System Writer's Guide" (Intel document number 242692),
    7.29 +    section 11.11.7
    7.30 +
    7.31 +    This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> 
    7.32 +    on 6-7 March 2002. 
    7.33 +    Source: Intel Architecture Software Developers Manual, Volume 3: 
    7.34 +    System Programming Guide; Section 9.11. (1997 edition - PPro).
    7.35 +*/
    7.36 +
    7.37 +#include <xen/config.h>
    7.38 +#include <xen/init.h>
    7.39 +#include <xen/pci.h>
    7.40 +#include <xen/smp.h>
    7.41 +#include <asm/mtrr.h>
    7.42 +#include <asm/uaccess.h>
    7.43 +#include <asm/processor.h>
    7.44 +#include <asm/msr.h>
    7.45 +#include "mtrr.h"
    7.46 +
    7.47 +#define MTRR_VERSION            "2.0 (20020519)"
    7.48 +
    7.49 +/* No blocking mutexes in Xen. Spin instead. */
    7.50 +#define DECLARE_MUTEX(_m) spinlock_t _m = SPIN_LOCK_UNLOCKED
    7.51 +#define down(_m) spin_lock(_m)
    7.52 +#define up(_m) spin_unlock(_m)
    7.53 +
    7.54 +#define num_booting_cpus() smp_num_cpus
    7.55 +
    7.56 +u32 num_var_ranges = 0;
    7.57 +
    7.58 +unsigned int *usage_table;
    7.59 +static DECLARE_MUTEX(main_lock);
    7.60 +
    7.61 +u32 size_or_mask, size_and_mask;
    7.62 +
    7.63 +static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
    7.64 +
    7.65 +struct mtrr_ops * mtrr_if = NULL;
    7.66 +
    7.67 +__initdata char *mtrr_if_name[] = {
    7.68 +    "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR"
    7.69 +};
    7.70 +
    7.71 +static void set_mtrr(unsigned int reg, unsigned long base,
    7.72 +		     unsigned long size, mtrr_type type);
    7.73 +
    7.74 +extern int arr3_protected;
    7.75 +
    7.76 +static char *mtrr_strings[MTRR_NUM_TYPES] =
    7.77 +{
    7.78 +    "uncachable",               /* 0 */
    7.79 +    "write-combining",          /* 1 */
    7.80 +    "?",                        /* 2 */
    7.81 +    "?",                        /* 3 */
    7.82 +    "write-through",            /* 4 */
    7.83 +    "write-protect",            /* 5 */
    7.84 +    "write-back",               /* 6 */
    7.85 +};
    7.86 +
    7.87 +char *mtrr_attrib_to_str(int x)
    7.88 +{
    7.89 +	return (x <= 6) ? mtrr_strings[x] : "?";
    7.90 +}
    7.91 +
    7.92 +void set_mtrr_ops(struct mtrr_ops * ops)
    7.93 +{
    7.94 +	if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
    7.95 +		mtrr_ops[ops->vendor] = ops;
    7.96 +}
    7.97 +
    7.98 +/*  Returns non-zero if we have the write-combining memory type  */
    7.99 +static int have_wrcomb(void)
   7.100 +{
   7.101 +	struct pci_dev *dev;
   7.102 +	
   7.103 +	if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
   7.104 +		/* ServerWorks LE chipsets have problems with write-combining 
   7.105 +		   Don't allow it and leave room for other chipsets to be tagged */
   7.106 +		if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
   7.107 +		    dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
   7.108 +			printk(KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n");
   7.109 +			return 0;
   7.110 +		}
   7.111 +		/* Intel 450NX errata # 23. Non ascending cachline evictions to
   7.112 +		   write combining memory may resulting in data corruption */
   7.113 +		if (dev->vendor == PCI_VENDOR_ID_INTEL &&
   7.114 +		    dev->device == PCI_DEVICE_ID_INTEL_82451NX)
   7.115 +		{
   7.116 +			printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
   7.117 +			return 0;
   7.118 +		}
   7.119 +	}		
   7.120 +	return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
   7.121 +}
   7.122 +
   7.123 +/*  This function returns the number of variable MTRRs  */
   7.124 +void __init set_num_var_ranges(void)
   7.125 +{
   7.126 +	unsigned long config = 0, dummy;
   7.127 +
   7.128 +	if (use_intel()) {
   7.129 +		rdmsr(MTRRcap_MSR, config, dummy);
   7.130 +	} else if (is_cpu(AMD))
   7.131 +		config = 2;
   7.132 +	else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
   7.133 +		config = 8;
   7.134 +	num_var_ranges = config & 0xff;
   7.135 +}
   7.136 +
   7.137 +static void __init init_table(void)
   7.138 +{
   7.139 +	int i, max;
   7.140 +
   7.141 +	max = num_var_ranges;
   7.142 +	if ((usage_table = xmalloc(max * sizeof *usage_table))
   7.143 +	    == NULL) {
   7.144 +		printk(KERN_ERR "mtrr: could not allocate\n");
   7.145 +		return;
   7.146 +	}
   7.147 +	for (i = 0; i < max; i++)
   7.148 +		usage_table[i] = 1;
   7.149 +}
   7.150 +
   7.151 +struct set_mtrr_data {
   7.152 +	atomic_t	count;
   7.153 +	atomic_t	gate;
   7.154 +	unsigned long	smp_base;
   7.155 +	unsigned long	smp_size;
   7.156 +	unsigned int	smp_reg;
   7.157 +	mtrr_type	smp_type;
   7.158 +};
   7.159 +
   7.160 +#ifdef CONFIG_SMP
   7.161 +
   7.162 +static void ipi_handler(void *info)
   7.163 +/*  [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
   7.164 +    [RETURNS] Nothing.
   7.165 +*/
   7.166 +{
   7.167 +	struct set_mtrr_data *data = info;
   7.168 +	unsigned long flags;
   7.169 +
   7.170 +	local_irq_save(flags);
   7.171 +
   7.172 +	atomic_dec(&data->count);
   7.173 +	while(!atomic_read(&data->gate)) {
   7.174 +		cpu_relax();
   7.175 +		barrier();
   7.176 +	}
   7.177 +
   7.178 +	/*  The master has cleared me to execute  */
   7.179 +	if (data->smp_reg != ~0U) 
   7.180 +		mtrr_if->set(data->smp_reg, data->smp_base, 
   7.181 +			     data->smp_size, data->smp_type);
   7.182 +	else
   7.183 +		mtrr_if->set_all();
   7.184 +
   7.185 +	atomic_dec(&data->count);
   7.186 +	while(atomic_read(&data->gate)) {
   7.187 +		cpu_relax();
   7.188 +		barrier();
   7.189 +	}
   7.190 +	atomic_dec(&data->count);
   7.191 +	local_irq_restore(flags);
   7.192 +}
   7.193 +
   7.194 +#endif
   7.195 +
   7.196 +/**
   7.197 + * set_mtrr - update mtrrs on all processors
   7.198 + * @reg:	mtrr in question
   7.199 + * @base:	mtrr base
   7.200 + * @size:	mtrr size
   7.201 + * @type:	mtrr type
   7.202 + *
   7.203 + * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
   7.204 + * 
   7.205 + * 1. Send IPI to do the following:
   7.206 + * 2. Disable Interrupts
   7.207 + * 3. Wait for all procs to do so 
   7.208 + * 4. Enter no-fill cache mode
   7.209 + * 5. Flush caches
   7.210 + * 6. Clear PGE bit
   7.211 + * 7. Flush all TLBs
   7.212 + * 8. Disable all range registers
   7.213 + * 9. Update the MTRRs
   7.214 + * 10. Enable all range registers
   7.215 + * 11. Flush all TLBs and caches again
   7.216 + * 12. Enter normal cache mode and reenable caching
   7.217 + * 13. Set PGE 
   7.218 + * 14. Wait for buddies to catch up
   7.219 + * 15. Enable interrupts.
   7.220 + * 
   7.221 + * What does that mean for us? Well, first we set data.count to the number
   7.222 + * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
   7.223 + * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
   7.224 + * Meanwhile, they are waiting for that flag to be set. Once it's set, each 
   7.225 + * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it 
   7.226 + * differently, so we call mtrr_if->set() callback and let them take care of it.
   7.227 + * When they're done, they again decrement data->count and wait for data.gate to 
   7.228 + * be reset. 
   7.229 + * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
   7.230 + * Everyone then enables interrupts and we all continue on.
   7.231 + *
   7.232 + * Note that the mechanism is the same for UP systems, too; all the SMP stuff
   7.233 + * becomes nops.
   7.234 + */
   7.235 +static void set_mtrr(unsigned int reg, unsigned long base,
   7.236 +		     unsigned long size, mtrr_type type)
   7.237 +{
   7.238 +	struct set_mtrr_data data;
   7.239 +	unsigned long flags;
   7.240 +
   7.241 +	data.smp_reg = reg;
   7.242 +	data.smp_base = base;
   7.243 +	data.smp_size = size;
   7.244 +	data.smp_type = type;
   7.245 +	atomic_set(&data.count, num_booting_cpus() - 1);
   7.246 +	atomic_set(&data.gate,0);
   7.247 +
   7.248 +	/*  Start the ball rolling on other CPUs  */
   7.249 +	if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
   7.250 +		panic("mtrr: timed out waiting for other CPUs\n");
   7.251 +
   7.252 +	local_irq_save(flags);
   7.253 +
   7.254 +	while(atomic_read(&data.count)) {
   7.255 +		cpu_relax();
   7.256 +		barrier();
   7.257 +	}
   7.258 +	/* ok, reset count and toggle gate */
   7.259 +	atomic_set(&data.count, num_booting_cpus() - 1);
   7.260 +	atomic_set(&data.gate,1);
   7.261 +
   7.262 +	/* do our MTRR business */
   7.263 +
   7.264 +	/* HACK!
   7.265 +	 * We use this same function to initialize the mtrrs on boot.
   7.266 +	 * The state of the boot cpu's mtrrs has been saved, and we want
   7.267 +	 * to replicate across all the APs. 
   7.268 +	 * If we're doing that @reg is set to something special...
   7.269 +	 */
   7.270 +	if (reg != ~0U) 
   7.271 +		mtrr_if->set(reg,base,size,type);
   7.272 +
   7.273 +	/* wait for the others */
   7.274 +	while(atomic_read(&data.count)) {
   7.275 +		cpu_relax();
   7.276 +		barrier();
   7.277 +	}
   7.278 +	atomic_set(&data.count, num_booting_cpus() - 1);
   7.279 +	atomic_set(&data.gate,0);
   7.280 +
   7.281 +	/*
   7.282 +	 * Wait here for everyone to have seen the gate change
   7.283 +	 * So we're the last ones to touch 'data'
   7.284 +	 */
   7.285 +	while(atomic_read(&data.count)) {
   7.286 +		cpu_relax();
   7.287 +		barrier();
   7.288 +	}
   7.289 +	local_irq_restore(flags);
   7.290 +}
   7.291 +
   7.292 +/**
   7.293 + *	mtrr_add_page - Add a memory type region
   7.294 + *	@base: Physical base address of region in pages (4 KB)
   7.295 + *	@size: Physical size of region in pages (4 KB)
   7.296 + *	@type: Type of MTRR desired
   7.297 + *	@increment: If this is true do usage counting on the region
   7.298 + *
   7.299 + *	Memory type region registers control the caching on newer Intel and
   7.300 + *	non Intel processors. This function allows drivers to request an
   7.301 + *	MTRR is added. The details and hardware specifics of each processor's
   7.302 + *	implementation are hidden from the caller, but nevertheless the 
   7.303 + *	caller should expect to need to provide a power of two size on an
   7.304 + *	equivalent power of two boundary.
   7.305 + *
   7.306 + *	If the region cannot be added either because all regions are in use
   7.307 + *	or the CPU cannot support it a negative value is returned. On success
   7.308 + *	the register number for this entry is returned, but should be treated
   7.309 + *	as a cookie only.
   7.310 + *
   7.311 + *	On a multiprocessor machine the changes are made to all processors.
   7.312 + *	This is required on x86 by the Intel processors.
   7.313 + *
   7.314 + *	The available types are
   7.315 + *
   7.316 + *	%MTRR_TYPE_UNCACHABLE	-	No caching
   7.317 + *
   7.318 + *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
   7.319 + *
   7.320 + *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
   7.321 + *
   7.322 + *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
   7.323 + *
   7.324 + *	BUGS: Needs a quiet flag for the cases where drivers do not mind
   7.325 + *	failures and do not wish system log messages to be sent.
   7.326 + */
   7.327 +
   7.328 +int mtrr_add_page(unsigned long base, unsigned long size, 
   7.329 +		  unsigned int type, char increment)
   7.330 +{
   7.331 +	int i;
   7.332 +	mtrr_type ltype;
   7.333 +	unsigned long lbase;
   7.334 +	unsigned int lsize;
   7.335 +	int error;
   7.336 +
   7.337 +	if (!mtrr_if)
   7.338 +		return -ENXIO;
   7.339 +		
   7.340 +	if ((error = mtrr_if->validate_add_page(base,size,type)))
   7.341 +		return error;
   7.342 +
   7.343 +	if (type >= MTRR_NUM_TYPES) {
   7.344 +		printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
   7.345 +		return -EINVAL;
   7.346 +	}
   7.347 +
   7.348 +	/*  If the type is WC, check that this processor supports it  */
   7.349 +	if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
   7.350 +		printk(KERN_WARNING
   7.351 +		       "mtrr: your processor doesn't support write-combining\n");
   7.352 +		return -ENOSYS;
   7.353 +	}
   7.354 +
   7.355 +	if (base & size_or_mask || size & size_or_mask) {
   7.356 +		printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
   7.357 +		return -EINVAL;
   7.358 +	}
   7.359 +
   7.360 +	error = -EINVAL;
   7.361 +
   7.362 +	/*  Search for existing MTRR  */
   7.363 +	down(&main_lock);
   7.364 +	for (i = 0; i < num_var_ranges; ++i) {
   7.365 +		mtrr_if->get(i, &lbase, &lsize, &ltype);
   7.366 +		if (base >= lbase + lsize)
   7.367 +			continue;
   7.368 +		if ((base < lbase) && (base + size <= lbase))
   7.369 +			continue;
   7.370 +		/*  At this point we know there is some kind of overlap/enclosure  */
   7.371 +		if ((base < lbase) || (base + size > lbase + lsize)) {
   7.372 +			printk(KERN_WARNING
   7.373 +			       "mtrr: 0x%lx000,0x%lx000 overlaps existing"
   7.374 +			       " 0x%lx000,0x%x000\n", base, size, lbase,
   7.375 +			       lsize);
   7.376 +			goto out;
   7.377 +		}
   7.378 +		/*  New region is enclosed by an existing region  */
   7.379 +		if (ltype != type) {
   7.380 +			if (type == MTRR_TYPE_UNCACHABLE)
   7.381 +				continue;
   7.382 +			printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
   7.383 +			     base, size, mtrr_attrib_to_str(ltype),
   7.384 +			     mtrr_attrib_to_str(type));
   7.385 +			goto out;
   7.386 +		}
   7.387 +		if (increment)
   7.388 +			++usage_table[i];
   7.389 +		error = i;
   7.390 +		goto out;
   7.391 +	}
   7.392 +	/*  Search for an empty MTRR  */
   7.393 +	i = mtrr_if->get_free_region(base, size);
   7.394 +	if (i >= 0) {
   7.395 +		set_mtrr(i, base, size, type);
   7.396 +		usage_table[i] = 1;
   7.397 +	} else
   7.398 +		printk(KERN_INFO "mtrr: no more MTRRs available\n");
   7.399 +	error = i;
   7.400 + out:
   7.401 +	up(&main_lock);
   7.402 +	return error;
   7.403 +}
   7.404 +
   7.405 +/**
   7.406 + *	mtrr_add - Add a memory type region
   7.407 + *	@base: Physical base address of region
   7.408 + *	@size: Physical size of region
   7.409 + *	@type: Type of MTRR desired
   7.410 + *	@increment: If this is true do usage counting on the region
   7.411 + *
   7.412 + *	Memory type region registers control the caching on newer Intel and
   7.413 + *	non Intel processors. This function allows drivers to request an
   7.414 + *	MTRR is added. The details and hardware specifics of each processor's
   7.415 + *	implementation are hidden from the caller, but nevertheless the 
   7.416 + *	caller should expect to need to provide a power of two size on an
   7.417 + *	equivalent power of two boundary.
   7.418 + *
   7.419 + *	If the region cannot be added either because all regions are in use
   7.420 + *	or the CPU cannot support it a negative value is returned. On success
   7.421 + *	the register number for this entry is returned, but should be treated
   7.422 + *	as a cookie only.
   7.423 + *
   7.424 + *	On a multiprocessor machine the changes are made to all processors.
   7.425 + *	This is required on x86 by the Intel processors.
   7.426 + *
   7.427 + *	The available types are
   7.428 + *
   7.429 + *	%MTRR_TYPE_UNCACHABLE	-	No caching
   7.430 + *
   7.431 + *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
   7.432 + *
   7.433 + *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
   7.434 + *
   7.435 + *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
   7.436 + *
   7.437 + *	BUGS: Needs a quiet flag for the cases where drivers do not mind
   7.438 + *	failures and do not wish system log messages to be sent.
   7.439 + */
   7.440 +
   7.441 +int
   7.442 +mtrr_add(unsigned long base, unsigned long size, unsigned int type,
   7.443 +	 char increment)
   7.444 +{
   7.445 +	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
   7.446 +		printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
   7.447 +		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
   7.448 +		return -EINVAL;
   7.449 +	}
   7.450 +	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
   7.451 +			     increment);
   7.452 +}
   7.453 +
   7.454 +/**
   7.455 + *	mtrr_del_page - delete a memory type region
   7.456 + *	@reg: Register returned by mtrr_add
   7.457 + *	@base: Physical base address
   7.458 + *	@size: Size of region
   7.459 + *
   7.460 + *	If register is supplied then base and size are ignored. This is
   7.461 + *	how drivers should call it.
   7.462 + *
   7.463 + *	Releases an MTRR region. If the usage count drops to zero the 
   7.464 + *	register is freed and the region returns to default state.
   7.465 + *	On success the register is returned, on failure a negative error
   7.466 + *	code.
   7.467 + */
   7.468 +
   7.469 +int mtrr_del_page(int reg, unsigned long base, unsigned long size)
   7.470 +{
   7.471 +	int i, max;
   7.472 +	mtrr_type ltype;
   7.473 +	unsigned long lbase;
   7.474 +	unsigned int lsize;
   7.475 +	int error = -EINVAL;
   7.476 +
   7.477 +	if (!mtrr_if)
   7.478 +		return -ENXIO;
   7.479 +
   7.480 +	max = num_var_ranges;
   7.481 +	down(&main_lock);
   7.482 +	if (reg < 0) {
   7.483 +		/*  Search for existing MTRR  */
   7.484 +		for (i = 0; i < max; ++i) {
   7.485 +			mtrr_if->get(i, &lbase, &lsize, &ltype);
   7.486 +			if (lbase == base && lsize == size) {
   7.487 +				reg = i;
   7.488 +				break;
   7.489 +			}
   7.490 +		}
   7.491 +		if (reg < 0) {
   7.492 +			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
   7.493 +			       size);
   7.494 +			goto out;
   7.495 +		}
   7.496 +	}
   7.497 +	if (reg >= max) {
   7.498 +		printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
   7.499 +		goto out;
   7.500 +	}
   7.501 +	if (is_cpu(CYRIX) && !use_intel()) {
   7.502 +		if ((reg == 3) && arr3_protected) {
   7.503 +			printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
   7.504 +			goto out;
   7.505 +		}
   7.506 +	}
   7.507 +	mtrr_if->get(reg, &lbase, &lsize, &ltype);
   7.508 +	if (lsize < 1) {
   7.509 +		printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
   7.510 +		goto out;
   7.511 +	}
   7.512 +	if (usage_table[reg] < 1) {
   7.513 +		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
   7.514 +		goto out;
   7.515 +	}
   7.516 +	if (--usage_table[reg] < 1)
   7.517 +		set_mtrr(reg, 0, 0, 0);
   7.518 +	error = reg;
   7.519 + out:
   7.520 +	up(&main_lock);
   7.521 +	return error;
   7.522 +}
   7.523 +/**
   7.524 + *	mtrr_del - delete a memory type region
   7.525 + *	@reg: Register returned by mtrr_add
   7.526 + *	@base: Physical base address
   7.527 + *	@size: Size of region
   7.528 + *
   7.529 + *	If register is supplied then base and size are ignored. This is
   7.530 + *	how drivers should call it.
   7.531 + *
   7.532 + *	Releases an MTRR region. If the usage count drops to zero the 
   7.533 + *	register is freed and the region returns to default state.
   7.534 + *	On success the register is returned, on failure a negative error
   7.535 + *	code.
   7.536 + */
   7.537 +
   7.538 +int
   7.539 +mtrr_del(int reg, unsigned long base, unsigned long size)
   7.540 +{
   7.541 +	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
   7.542 +		printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
   7.543 +		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
   7.544 +		return -EINVAL;
   7.545 +	}
   7.546 +	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
   7.547 +}
   7.548 +
   7.549 +EXPORT_SYMBOL(mtrr_add);
   7.550 +EXPORT_SYMBOL(mtrr_del);
   7.551 +
   7.552 +/* HACK ALERT!
   7.553 + * These should be called implicitly, but we can't yet until all the initcall
   7.554 + * stuff is done...
   7.555 + */
   7.556 +extern void amd_init_mtrr(void);
   7.557 +extern void cyrix_init_mtrr(void);
   7.558 +extern void centaur_init_mtrr(void);
   7.559 +
   7.560 +static void __init init_ifs(void)
   7.561 +{
   7.562 +	amd_init_mtrr();
   7.563 +	cyrix_init_mtrr();
   7.564 +	centaur_init_mtrr();
   7.565 +}
   7.566 +
   7.567 +static void __init init_other_cpus(void)
   7.568 +{
   7.569 +	if (use_intel())
   7.570 +		get_mtrr_state();
   7.571 +
   7.572 +	/* bring up the other processors */
   7.573 +	set_mtrr(~0U,0,0,0);
   7.574 +
   7.575 +	if (use_intel()) {
   7.576 +		finalize_mtrr_state();
   7.577 +		mtrr_state_warn();
   7.578 +	}
   7.579 +}
   7.580 +
   7.581 +
   7.582 +struct mtrr_value {
   7.583 +	mtrr_type	ltype;
   7.584 +	unsigned long	lbase;
   7.585 +	unsigned int	lsize;
   7.586 +};
   7.587 +
   7.588 +/**
   7.589 + * mtrr_init - initialize mtrrs on the boot CPU
   7.590 + *
   7.591 + * This needs to be called early; before any of the other CPUs are 
   7.592 + * initialized (i.e. before smp_init()).
   7.593 + * 
   7.594 + */
   7.595 +static int __init mtrr_init(void)
   7.596 +{
   7.597 +	init_ifs();
   7.598 +
   7.599 +	if (cpu_has_mtrr) {
   7.600 +		mtrr_if = &generic_mtrr_ops;
   7.601 +		size_or_mask = 0xff000000;	/* 36 bits */
   7.602 +		size_and_mask = 0x00f00000;
   7.603 +			
   7.604 +		switch (boot_cpu_data.x86_vendor) {
   7.605 +		case X86_VENDOR_AMD:
   7.606 +			/* The original Athlon docs said that
   7.607 +			   total addressable memory is 44 bits wide.
   7.608 +			   It was not really clear whether its MTRRs
   7.609 +			   follow this or not. (Read: 44 or 36 bits).
   7.610 +			   However, "x86-64_overview.pdf" explicitly
   7.611 +			   states that "previous implementations support
   7.612 +			   36 bit MTRRs" and also provides a way to
   7.613 +			   query the width (in bits) of the physical
   7.614 +			   addressable memory on the Hammer family.
   7.615 +			 */
   7.616 +			if (boot_cpu_data.x86 == 15
   7.617 +			    && (cpuid_eax(0x80000000) >= 0x80000008)) {
   7.618 +				u32 phys_addr;
   7.619 +				phys_addr = cpuid_eax(0x80000008) & 0xff;
   7.620 +				size_or_mask =
   7.621 +				    ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
   7.622 +				size_and_mask = ~size_or_mask & 0xfff00000;
   7.623 +			}
   7.624 +			/* Athlon MTRRs use an Intel-compatible interface for 
   7.625 +			 * getting and setting */
   7.626 +			break;
   7.627 +		case X86_VENDOR_CENTAUR:
   7.628 +			if (boot_cpu_data.x86 == 6) {
   7.629 +				/* VIA Cyrix family have Intel style MTRRs, but don't support PAE */
   7.630 +				size_or_mask = 0xfff00000;	/* 32 bits */
   7.631 +				size_and_mask = 0;
   7.632 +			}
   7.633 +			break;
   7.634 +		
   7.635 +		default:
   7.636 +			break;
   7.637 +		}
   7.638 +	} else {
   7.639 +		switch (boot_cpu_data.x86_vendor) {
   7.640 +		case X86_VENDOR_AMD:
   7.641 +			if (cpu_has_k6_mtrr) {
   7.642 +				/* Pre-Athlon (K6) AMD CPU MTRRs */
   7.643 +				mtrr_if = mtrr_ops[X86_VENDOR_AMD];
   7.644 +				size_or_mask = 0xfff00000;	/* 32 bits */
   7.645 +				size_and_mask = 0;
   7.646 +			}
   7.647 +			break;
   7.648 +		case X86_VENDOR_CENTAUR:
   7.649 +			if (cpu_has_centaur_mcr) {
   7.650 +				mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
   7.651 +				size_or_mask = 0xfff00000;	/* 32 bits */
   7.652 +				size_and_mask = 0;
   7.653 +			}
   7.654 +			break;
   7.655 +		case X86_VENDOR_CYRIX:
   7.656 +			if (cpu_has_cyrix_arr) {
   7.657 +				mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
   7.658 +				size_or_mask = 0xfff00000;	/* 32 bits */
   7.659 +				size_and_mask = 0;
   7.660 +			}
   7.661 +			break;
   7.662 +		default:
   7.663 +			break;
   7.664 +		}
   7.665 +	}
   7.666 +	printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION);
   7.667 +
   7.668 +	if (mtrr_if) {
   7.669 +		set_num_var_ranges();
   7.670 +		init_table();
   7.671 +		init_other_cpus();
   7.672 +		return 0;
   7.673 +	}
   7.674 +	return -ENXIO;
   7.675 +}
   7.676 +
   7.677 +__initcall(mtrr_init);
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/xen/arch/x86/mtrr/mtrr.h	Mon Nov 29 10:10:30 2004 +0000
     8.3 @@ -0,0 +1,99 @@
     8.4 +/*
     8.5 + * local mtrr defines.
     8.6 + */
     8.7 +
     8.8 +#ifndef TRUE
     8.9 +#define TRUE  1
    8.10 +#define FALSE 0
    8.11 +#endif
    8.12 +
    8.13 +#define MTRRcap_MSR     0x0fe
    8.14 +#define MTRRdefType_MSR 0x2ff
    8.15 +
    8.16 +#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
    8.17 +#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
    8.18 +
    8.19 +#define NUM_FIXED_RANGES 88
    8.20 +#define MTRRfix64K_00000_MSR 0x250
    8.21 +#define MTRRfix16K_80000_MSR 0x258
    8.22 +#define MTRRfix16K_A0000_MSR 0x259
    8.23 +#define MTRRfix4K_C0000_MSR 0x268
    8.24 +#define MTRRfix4K_C8000_MSR 0x269
    8.25 +#define MTRRfix4K_D0000_MSR 0x26a
    8.26 +#define MTRRfix4K_D8000_MSR 0x26b
    8.27 +#define MTRRfix4K_E0000_MSR 0x26c
    8.28 +#define MTRRfix4K_E8000_MSR 0x26d
    8.29 +#define MTRRfix4K_F0000_MSR 0x26e
    8.30 +#define MTRRfix4K_F8000_MSR 0x26f
    8.31 +
    8.32 +#define MTRR_CHANGE_MASK_FIXED     0x01
    8.33 +#define MTRR_CHANGE_MASK_VARIABLE  0x02
    8.34 +#define MTRR_CHANGE_MASK_DEFTYPE   0x04
    8.35 +
    8.36 +/* In the Intel processor's MTRR interface, the MTRR type is always held in
    8.37 +   an 8 bit field: */
    8.38 +typedef u8 mtrr_type;
    8.39 +
    8.40 +struct mtrr_ops {
    8.41 +	u32	vendor;
    8.42 +	u32	use_intel_if;
    8.43 +	void	(*init)(void);
    8.44 +	void	(*set)(unsigned int reg, unsigned long base,
    8.45 +		       unsigned long size, mtrr_type type);
    8.46 +	void	(*set_all)(void);
    8.47 +
    8.48 +	void	(*get)(unsigned int reg, unsigned long *base,
    8.49 +		       unsigned int *size, mtrr_type * type);
    8.50 +	int	(*get_free_region) (unsigned long base, unsigned long size);
    8.51 +
    8.52 +	int	(*validate_add_page)(unsigned long base, unsigned long size,
    8.53 +				     unsigned int type);
    8.54 +	int	(*have_wrcomb)(void);
    8.55 +};
    8.56 +
    8.57 +extern int generic_get_free_region(unsigned long base, unsigned long size);
    8.58 +extern int generic_validate_add_page(unsigned long base, unsigned long size,
    8.59 +				     unsigned int type);
    8.60 +
    8.61 +extern struct mtrr_ops generic_mtrr_ops;
    8.62 +
    8.63 +extern int generic_have_wrcomb(void);
    8.64 +extern int positive_have_wrcomb(void);
    8.65 +
    8.66 +/* library functions for processor-specific routines */
    8.67 +struct set_mtrr_context {
    8.68 +	unsigned long flags;
    8.69 +	unsigned long deftype_lo;
    8.70 +	unsigned long deftype_hi;
    8.71 +	unsigned long cr4val;
    8.72 +	unsigned long ccr3;
    8.73 +};
    8.74 +
    8.75 +struct mtrr_var_range {
    8.76 +	unsigned long base_lo;
    8.77 +	unsigned long base_hi;
    8.78 +	unsigned long mask_lo;
    8.79 +	unsigned long mask_hi;
    8.80 +};
    8.81 +
    8.82 +void set_mtrr_done(struct set_mtrr_context *ctxt);
    8.83 +void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
    8.84 +void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
    8.85 +
    8.86 +void get_mtrr_state(void);
    8.87 +
    8.88 +extern void set_mtrr_ops(struct mtrr_ops * ops);
    8.89 +
    8.90 +extern u32 size_or_mask, size_and_mask;
    8.91 +extern struct mtrr_ops * mtrr_if;
    8.92 +
    8.93 +#define is_cpu(vnd)	(mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
    8.94 +#define use_intel()	(mtrr_if && mtrr_if->use_intel_if == 1)
    8.95 +
    8.96 +extern unsigned int num_var_ranges;
    8.97 +
    8.98 +void finalize_mtrr_state(void);
    8.99 +void mtrr_state_warn(void);
   8.100 +char *mtrr_attrib_to_str(int x);
   8.101 +
   8.102 +extern char * mtrr_if_name[];
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xen/arch/x86/mtrr/state.c	Mon Nov 29 10:10:30 2004 +0000
     9.3 @@ -0,0 +1,78 @@
     9.4 +#include <xen/mm.h>
     9.5 +#include <xen/init.h>
     9.6 +#include <asm/io.h>
     9.7 +#include <asm/mtrr.h>
     9.8 +#include <asm/msr.h>
     9.9 +#include "mtrr.h"
    9.10 +
    9.11 +
    9.12 +/*  Put the processor into a state where MTRRs can be safely set  */
    9.13 +void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
    9.14 +{
    9.15 +	unsigned int cr0;
    9.16 +
    9.17 +	/*  Disable interrupts locally  */
    9.18 +	local_irq_save(ctxt->flags);
    9.19 +
    9.20 +	if (use_intel() || is_cpu(CYRIX)) {
    9.21 +
    9.22 +		/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
    9.23 +		if ( cpu_has_pge ) {
    9.24 +			ctxt->cr4val = read_cr4();
    9.25 +			write_cr4(ctxt->cr4val & (unsigned char) ~(1 << 7));
    9.26 +		}
    9.27 +
    9.28 +		/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
    9.29 +		    a side-effect  */
    9.30 +		cr0 = read_cr0() | 0x40000000;
    9.31 +		wbinvd();
    9.32 +		write_cr0(cr0);
    9.33 +		wbinvd();
    9.34 +
    9.35 +		if (use_intel())
    9.36 +			/*  Save MTRR state */
    9.37 +			rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
    9.38 +		else
    9.39 +			/* Cyrix ARRs - everything else were excluded at the top */
    9.40 +			ctxt->ccr3 = getCx86(CX86_CCR3);
    9.41 +	}
    9.42 +}
    9.43 +
    9.44 +void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
    9.45 +{
    9.46 +	if (use_intel()) 
    9.47 +		/*  Disable MTRRs, and set the default type to uncached  */
    9.48 +		wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL,
    9.49 +		      ctxt->deftype_hi);
    9.50 +	else if (is_cpu(CYRIX))
    9.51 +		/* Cyrix ARRs - everything else were excluded at the top */
    9.52 +		setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
    9.53 +}
    9.54 +
    9.55 +/*  Restore the processor after a set_mtrr_prepare  */
    9.56 +void set_mtrr_done(struct set_mtrr_context *ctxt)
    9.57 +{
    9.58 +	if (use_intel() || is_cpu(CYRIX)) {
    9.59 +
    9.60 +		/*  Flush caches and TLBs  */
    9.61 +		wbinvd();
    9.62 +
    9.63 +		/*  Restore MTRRdefType  */
    9.64 +		if (use_intel())
    9.65 +			/* Intel (P6) standard MTRRs */
    9.66 +			wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
    9.67 +		else
    9.68 +			/* Cyrix ARRs - everything else was excluded at the top */
    9.69 +			setCx86(CX86_CCR3, ctxt->ccr3);
    9.70 +		
    9.71 +		/*  Enable caches  */
    9.72 +		write_cr0(read_cr0() & 0xbfffffff);
    9.73 +
    9.74 +		/*  Restore value of CR4  */
    9.75 +		if ( cpu_has_pge )
    9.76 +			write_cr4(ctxt->cr4val);
    9.77 +	}
    9.78 +	/*  Re-enable interrupts locally (if enabled previously)  */
    9.79 +	local_irq_restore(ctxt->flags);
    9.80 +}
    9.81 +
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/include/asm-x86/mtrr.h	Mon Nov 29 10:10:30 2004 +0000
    10.3 @@ -0,0 +1,22 @@
    10.4 +#ifndef __ASM_X86_MTRR_H__
    10.5 +#define __ASM_X86_MTRR_H__
    10.6 +
    10.7 +#include <xen/config.h>
    10.8 +
    10.9 +/* These are the region types. They match the architectural specification. */
   10.10 +#define MTRR_TYPE_UNCACHABLE 0
   10.11 +#define MTRR_TYPE_WRCOMB     1
   10.12 +#define MTRR_TYPE_WRTHROUGH  4
   10.13 +#define MTRR_TYPE_WRPROT     5
   10.14 +#define MTRR_TYPE_WRBACK     6
   10.15 +#define MTRR_NUM_TYPES       7
   10.16 +
   10.17 +extern int mtrr_add(unsigned long base, unsigned long size,
   10.18 +                    unsigned int type, char increment);
   10.19 +extern int mtrr_add_page(unsigned long base, unsigned long size,
   10.20 +                         unsigned int type, char increment);
   10.21 +extern int mtrr_del(int reg, unsigned long base, unsigned long size);
   10.22 +extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
   10.23 +extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
   10.24 +
   10.25 +#endif /* __ASM_X86_MTRR_H__ */
    11.1 --- a/xen/include/asm-x86/processor.h	Mon Nov 29 09:19:52 2004 +0000
    11.2 +++ b/xen/include/asm-x86/processor.h	Mon Nov 29 10:10:30 2004 +0000
    11.3 @@ -32,6 +32,7 @@
    11.4  #define X86_VENDOR_TRANSMETA 7
    11.5  #define X86_VENDOR_NSC 8
    11.6  #define X86_VENDOR_SIS 9
    11.7 +#define X86_VENDOR_NUM 10
    11.8  #define X86_VENDOR_UNKNOWN 0xff
    11.9  
   11.10  /*
   11.11 @@ -255,6 +256,16 @@ static inline unsigned int cpuid_edx(uns
   11.12  #define write_cr0(x) \
   11.13  	__asm__("mov"__OS" %0,%%cr0": :"r" (x));
   11.14  
   11.15 +#define read_cr4() ({ \
   11.16 +	unsigned int __dummy; \
   11.17 +	__asm__( \
   11.18 +		"movl %%cr4,%0\n\t" \
   11.19 +		:"=r" (__dummy)); \
   11.20 +	__dummy; \
   11.21 +})
   11.22 +
   11.23 +#define write_cr4(x) \
   11.24 +	__asm__("movl %0,%%cr4": :"r" (x));
   11.25  
   11.26  /*
   11.27   * Save the cr4 feature set we're using (ie
   11.28 @@ -284,6 +295,37 @@ static inline void clear_in_cr4 (unsigne
   11.29              :"ax");
   11.30  }
   11.31  
   11.32 +/*
   11.33 + *      NSC/Cyrix CPU configuration register indexes
   11.34 + */
   11.35 +
   11.36 +#define CX86_PCR0 0x20
   11.37 +#define CX86_GCR  0xb8
   11.38 +#define CX86_CCR0 0xc0
   11.39 +#define CX86_CCR1 0xc1
   11.40 +#define CX86_CCR2 0xc2
   11.41 +#define CX86_CCR3 0xc3
   11.42 +#define CX86_CCR4 0xe8
   11.43 +#define CX86_CCR5 0xe9
   11.44 +#define CX86_CCR6 0xea
   11.45 +#define CX86_CCR7 0xeb
   11.46 +#define CX86_PCR1 0xf0
   11.47 +#define CX86_DIR0 0xfe
   11.48 +#define CX86_DIR1 0xff
   11.49 +#define CX86_ARR_BASE 0xc4
   11.50 +#define CX86_RCR_BASE 0xdc
   11.51 +
   11.52 +/*
   11.53 + *      NSC/Cyrix CPU indexed register access macros
   11.54 + */
   11.55 +
   11.56 +#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
   11.57 +
   11.58 +#define setCx86(reg, data) do { \
   11.59 +	outb((reg), 0x22); \
   11.60 +	outb((data), 0x23); \
   11.61 +} while (0)
   11.62 +
   11.63  #define IOBMP_BYTES             8192
   11.64  #define IOBMP_BYTES_PER_SELBIT  (IOBMP_BYTES / 64)
   11.65  #define IOBMP_BITS_PER_SELBIT   (IOBMP_BYTES_PER_SELBIT * 8)