ia64/xen-unstable

changeset 3203:c23dd7ec1f54

bitkeeper revision 1.1159.1.472 (41ab49ab0UJYMrnMqSqKOFhhX2ypGw)

Merge arcadians.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into arcadians.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xen.bk-smp
author cl349@arcadians.cl.cam.ac.uk
date Mon Nov 29 16:09:15 2004 +0000 (2004-11-29)
parents ddbbd48e3254 2e64fe4ea360
children 17888d1a1210
files .rootkeys buildconfigs/Rules.mk docs/src/user.tex linux-2.4.28-xen-sparse/arch/xen/kernel/ldt.c linux-2.4.28-xen-sparse/include/asm-xen/pgtable.h linux-2.6.9-xen-sparse/arch/xen/configs/xen0_defconfig linux-2.6.9-xen-sparse/arch/xen/i386/Kconfig linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/Makefile linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ldt.c linux-2.6.9-xen-sparse/arch/xen/i386/kernel/setup.c linux-2.6.9-xen-sparse/arch/xen/i386/mm/init.c linux-2.6.9-xen-sparse/arch/xen/i386/mm/ioremap.c linux-2.6.9-xen-sparse/arch/xen/i386/mm/pgtable.c linux-2.6.9-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6.9-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/io.h linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable.h netbsd-2.0-xen-sparse/nbconfig-xen netbsd-2.0-xen-sparse/nbmake-xen patches/linux-2.6.9/agpgart.patch patches/linux-2.6.9/drm.patch patches/linux-2.6.9/nettel.patch xen/arch/x86/Makefile xen/arch/x86/dom0_ops.c xen/arch/x86/mtrr/amd.c xen/arch/x86/mtrr/centaur.c xen/arch/x86/mtrr/cyrix.c xen/arch/x86/mtrr/generic.c xen/arch/x86/mtrr/main.c xen/arch/x86/mtrr/mtrr.h xen/arch/x86/mtrr/state.c xen/include/asm-x86/mtrr.h xen/include/asm-x86/processor.h xen/include/public/dom0_ops.h
line diff
     1.1 --- a/.rootkeys	Mon Nov 29 16:03:16 2004 +0000
     1.2 +++ b/.rootkeys	Mon Nov 29 16:09:15 2004 +0000
     1.3 @@ -138,6 +138,8 @@ 40f56238u2CJdXNpjsZgHBxeVyY-2g linux-2.6
     1.4  40f56238eczveJ86k_4hNxCLRQIF-g linux-2.6.9-xen-sparse/arch/xen/i386/kernel/Makefile
     1.5  40f56238rXVTJQKbBuXXLH52qEArcg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
     1.6  40f562385s4lr6Zg92gExe7UQ4A76Q linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/common.c
     1.7 +41ab440bnpxZdWShZrGgM9pPaz5rmA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile
     1.8 +41ab440bBKWz-aEOEojU4PAMXe3Ppg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c
     1.9  40f56238XDtHSijkAFlbv1PT8Bhw_Q linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S
    1.10  40f56238bnvciAuyzAiMkdzGErYt1A linux-2.6.9-xen-sparse/arch/xen/i386/kernel/head.S
    1.11  40f58a0d31M2EkuPbG94ns_nOi0PVA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
    1.12 @@ -670,6 +672,13 @@ 3ddb79bcKIkRR0kqWaJhe5VUDkMdxg xen/arch/
    1.13  3ddb79bdqfIcjkz_h9Hvtp8Tk_19Zw xen/arch/x86/irq.c
    1.14  40ec29ffuOa1ZvmJHzFKyZn4k_RcXg xen/arch/x86/memory.c
    1.15  3ddb79bdS4UeWWXDH-FaBKqcpMFcnw xen/arch/x86/mpparse.c
    1.16 +41aaf566Z4sTDgJ77eEg0TzzQ1ka6Q xen/arch/x86/mtrr/amd.c
    1.17 +41aaf566TOpOBXT00wwQGUh20f1rlA xen/arch/x86/mtrr/centaur.c
    1.18 +41aaf566yhr0zKYnGVSOQpkWMM0Kiw xen/arch/x86/mtrr/cyrix.c
    1.19 +41aaf567t3hFKsyfEFoy3KAnB-bj8w xen/arch/x86/mtrr/generic.c
    1.20 +41aaf567tqrKGSTDK8OVeAbpeoccPw xen/arch/x86/mtrr/main.c
    1.21 +41aaf567a36esU-rUK7twPiv-yTFyw xen/arch/x86/mtrr/mtrr.h
    1.22 +41aaf567DcTL6pqVtLZJI5cSryyA1A xen/arch/x86/mtrr/state.c
    1.23  3f12cff65EV3qOG2j37Qm0ShgvXGRw xen/arch/x86/nmi.c
    1.24  3ddb79bdHe6_Uij4-glW91vInNtBYQ xen/arch/x86/pci-irq.c
    1.25  3ddb79bcZ_2FxINljqNSkqa17ISyJw xen/arch/x86/pci-pc.c
    1.26 @@ -805,6 +814,7 @@ 3ddb79c3I98vWcQR8xEo34JMJ4Ahyw xen/inclu
    1.27  40ec25fd7cSvbP7Biw91zaU_g0xsEQ xen/include/asm-x86/mm.h
    1.28  3ddb79c3n_UbPuxlkNxvvLycClIkxA xen/include/asm-x86/mpspec.h
    1.29  3ddb79c2wa0dA_LGigxOelSGbJ284Q xen/include/asm-x86/msr.h
    1.30 +41aaf567Mi3OishhvrCtET1y-mxQBg xen/include/asm-x86/mtrr.h
    1.31  41a61536MFhNalgbVmYGXAhQsPTZNw xen/include/asm-x86/multicall.h
    1.32  3ddb79c3xjYnrv5t3VqYlR4tNEOl4Q xen/include/asm-x86/page.h
    1.33  3ddb79c3ysKUbxZuwKBRK3WXU2TlEg xen/include/asm-x86/pci.h
     2.1 --- a/buildconfigs/Rules.mk	Mon Nov 29 16:03:16 2004 +0000
     2.2 +++ b/buildconfigs/Rules.mk	Mon Nov 29 16:09:15 2004 +0000
     2.3 @@ -48,6 +48,9 @@ pristine-%: %.tar.bz2
     2.4  	mv tmp-$(@F)/* $@
     2.5  	touch $@ # update timestamp to avoid rebuild
     2.6  	@rm -rf tmp-$(@F)
     2.7 +	[ -d patches/$* ] && \
     2.8 +	  for i in patches/$*/*.patch ; do ( cd $@ ; patch -p1 <../$$i ) ; done || \
     2.9 +	  true
    2.10  
    2.11  %-build:
    2.12  	$(MAKE) -f buildconfigs/mk.$* build
     3.1 --- a/docs/src/user.tex	Mon Nov 29 16:03:16 2004 +0000
     3.2 +++ b/docs/src/user.tex	Mon Nov 29 16:09:15 2004 +0000
     3.3 @@ -707,7 +707,9 @@ culminating in a login prompt.
     3.4  Ttylinux is a very small Linux distribution, designed to require very
     3.5  few resources.  We will use it as a concrete example of how to start a
     3.6  Xen domain.  Most users will probably want to install a full-featured
     3.7 -distribution once they have mastered the basics.
     3.8 +distribution once they have mastered the basics\footnote{ttylinux is
     3.9 +maintained by Pascal Schmidt. You can download source packages from
    3.10 +the distribution's home page: {\tt http://www.minimalinux.org/ttylinux/}}.
    3.11  
    3.12  \begin{enumerate}
    3.13  \item Download and extract the ttylinux disk image from the Files
     4.1 --- a/linux-2.4.28-xen-sparse/arch/xen/kernel/ldt.c	Mon Nov 29 16:03:16 2004 +0000
     4.2 +++ b/linux-2.4.28-xen-sparse/arch/xen/kernel/ldt.c	Mon Nov 29 16:09:15 2004 +0000
     4.3 @@ -179,7 +179,7 @@ static int write_ldt(void * ptr, unsigne
     4.4  {
     4.5  	struct mm_struct * mm = current->mm;
     4.6  	__u32 entry_1, entry_2, *lp;
     4.7 -	unsigned long phys_lp;
     4.8 +	unsigned long mach_lp;
     4.9  	int error;
    4.10  	struct modify_ldt_ldt_s ldt_info;
    4.11  
    4.12 @@ -208,7 +208,7 @@ static int write_ldt(void * ptr, unsigne
    4.13  	}
    4.14  
    4.15  	lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
    4.16 -	phys_lp = arbitrary_virt_to_phys(lp);
    4.17 +	mach_lp = arbitrary_virt_to_machine(lp);
    4.18  
    4.19     	/* Allow LDTs to be cleared by the user. */
    4.20     	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
    4.21 @@ -241,7 +241,7 @@ static int write_ldt(void * ptr, unsigne
    4.22  
    4.23  	/* Install the new entry ...  */
    4.24  install:
    4.25 -	error = HYPERVISOR_update_descriptor(phys_lp, entry_1, entry_2);
    4.26 +	error = HYPERVISOR_update_descriptor(mach_lp, entry_1, entry_2);
    4.27  
    4.28  out_unlock:
    4.29  	up(&mm->context.sem);
     5.1 --- a/linux-2.4.28-xen-sparse/include/asm-xen/pgtable.h	Mon Nov 29 16:03:16 2004 +0000
     5.2 +++ b/linux-2.4.28-xen-sparse/include/asm-xen/pgtable.h	Mon Nov 29 16:09:15 2004 +0000
     5.3 @@ -350,7 +350,7 @@ static inline void make_pages_writable(v
     5.4      }
     5.5  }
     5.6  
     5.7 -static inline unsigned long arbitrary_virt_to_phys(void *va)
     5.8 +static inline unsigned long arbitrary_virt_to_machine(void *va)
     5.9  {
    5.10      pgd_t *pgd = pgd_offset_k((unsigned long)va);
    5.11      pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
     6.1 --- a/linux-2.6.9-xen-sparse/arch/xen/configs/xen0_defconfig	Mon Nov 29 16:03:16 2004 +0000
     6.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/configs/xen0_defconfig	Mon Nov 29 16:09:15 2004 +0000
     6.3 @@ -112,6 +112,7 @@ CONFIG_X86_CPUID=y
     6.4  # CONFIG_EDD is not set
     6.5  CONFIG_NOHIGHMEM=y
     6.6  # CONFIG_HIGHMEM4G is not set
     6.7 +CONFIG_MTRR=y
     6.8  CONFIG_HAVE_DEC_LOCK=y
     6.9  # CONFIG_REGPARM is not set
    6.10  
     7.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/Kconfig	Mon Nov 29 16:03:16 2004 +0000
     7.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/Kconfig	Mon Nov 29 16:09:15 2004 +0000
     7.3 @@ -561,6 +561,11 @@ config HAVE_ARCH_BOOTMEM_NODE
     7.4  #	  low memory.  Setting this option will put user-space page table
     7.5  #	  entries in high memory.
     7.6  
     7.7 +config MTRR
     7.8 +	bool
     7.9 +	depends on XEN_PRIVILEGED_GUEST
    7.10 +	default y
    7.11 +
    7.12  #config MTRR
    7.13  #	 bool "MTRR (Memory Type Range Register) support"
    7.14  #	 ---help---
     8.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/Makefile	Mon Nov 29 16:03:16 2004 +0000
     8.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/Makefile	Mon Nov 29 16:09:15 2004 +0000
     8.3 @@ -18,7 +18,7 @@ c-obj-y	+=	umc.o
     8.4  
     8.5  #obj-$(CONFIG_X86_MCE)	+=	../../../../i386/kernel/cpu/mcheck/
     8.6  
     8.7 -#obj-$(CONFIG_MTRR)	+= 	../../../../i386/kernel/cpu/mtrr/
     8.8 +obj-$(CONFIG_MTRR)	+= 	mtrr/
     8.9  #obj-$(CONFIG_CPU_FREQ)	+=	../../../../i386/kernel/cpu/cpufreq/
    8.10  
    8.11  c-link	:=
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile	Mon Nov 29 16:09:15 2004 +0000
     9.3 @@ -0,0 +1,16 @@
     9.4 +obj-y	:= main.o
     9.5 +c-obj-y	:= if.o
     9.6 +
     9.7 +c-link	:=
     9.8 +
     9.9 +$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)): $(obj)/mtrr.h
    9.10 +	@ln -fsn $(srctree)/arch/i386/kernel/cpu/mtrr/$(notdir $@) $@
    9.11 +
    9.12 +$(patsubst %.o,$(obj)/%.c,$(obj-y)): $(obj)/mtrr.h
    9.13 +
    9.14 +$(obj)/mtrr.h:
    9.15 +	@ln -fsn $(srctree)/arch/i386/kernel/cpu/mtrr/mtrr.h $@
    9.16 +
    9.17 +obj-y	+= $(c-obj-y)
    9.18 +
    9.19 +clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c	Mon Nov 29 16:09:15 2004 +0000
    10.3 @@ -0,0 +1,165 @@
    10.4 +#include <linux/init.h>
    10.5 +#include <linux/proc_fs.h>
    10.6 +#include <linux/ctype.h>
    10.7 +#include <linux/module.h>
    10.8 +#include <linux/seq_file.h>
    10.9 +#include <asm/uaccess.h>
   10.10 +
   10.11 +#include <asm/mtrr.h>
   10.12 +#include "mtrr.h"
   10.13 +
   10.14 +void generic_get_mtrr(unsigned int reg, unsigned long *base,
   10.15 +		      unsigned int *size, mtrr_type * type)
   10.16 +{
   10.17 +	dom0_op_t op;
   10.18 +
   10.19 +	op.cmd = DOM0_READ_MEMTYPE;
   10.20 +	op.u.read_memtype.reg = reg;
   10.21 +	(void)HYPERVISOR_dom0_op(&op);
   10.22 +
   10.23 +	*size = op.u.read_memtype.nr_pfns;
   10.24 +	*base = op.u.read_memtype.pfn;
   10.25 +	*type = op.u.read_memtype.type;
   10.26 +}
   10.27 +
   10.28 +struct mtrr_ops generic_mtrr_ops = {
   10.29 +	.use_intel_if      = 1,
   10.30 +	.get               = generic_get_mtrr,
   10.31 +};
   10.32 +
   10.33 +struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
   10.34 +unsigned int num_var_ranges;
   10.35 +unsigned int *usage_table;
   10.36 +
   10.37 +void __init set_num_var_ranges(void)
   10.38 +{
   10.39 +	dom0_op_t op;
   10.40 +
   10.41 +	for (num_var_ranges = 0; ; num_var_ranges++) {
   10.42 +		op.cmd = DOM0_READ_MEMTYPE;
   10.43 +		op.u.read_memtype.reg = num_var_ranges;
   10.44 +		if (HYPERVISOR_dom0_op(&op) != 0)
   10.45 +			break;
   10.46 +	}
   10.47 +}
   10.48 +
   10.49 +static void __init init_table(void)
   10.50 +{
   10.51 +	int i, max;
   10.52 +
   10.53 +	max = num_var_ranges;
   10.54 +	if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
   10.55 +	    == NULL) {
   10.56 +		printk(KERN_ERR "mtrr: could not allocate\n");
   10.57 +		return;
   10.58 +	}
   10.59 +	for (i = 0; i < max; i++)
   10.60 +		usage_table[i] = 0;
   10.61 +}
   10.62 +
   10.63 +int mtrr_add_page(unsigned long base, unsigned long size, 
   10.64 +		  unsigned int type, char increment)
   10.65 +{
   10.66 +	int error;
   10.67 +	dom0_op_t op;
   10.68 +
   10.69 +	op.cmd = DOM0_ADD_MEMTYPE;
   10.70 +	op.u.add_memtype.pfn     = base;
   10.71 +	op.u.add_memtype.nr_pfns = size;
   10.72 +	op.u.add_memtype.type    = type;
   10.73 +	if ((error = HYPERVISOR_dom0_op(&op)))
   10.74 +		return error;
   10.75 +
   10.76 +	if (increment)
   10.77 +		++usage_table[op.u.add_memtype.reg];
   10.78 +
   10.79 +	return op.u.add_memtype.reg;
   10.80 +}
   10.81 +
   10.82 +int
   10.83 +mtrr_add(unsigned long base, unsigned long size, unsigned int type,
   10.84 +	 char increment)
   10.85 +{
   10.86 +	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
   10.87 +		printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
   10.88 +		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
   10.89 +		return -EINVAL;
   10.90 +	}
   10.91 +	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
   10.92 +			     increment);
   10.93 +}
   10.94 +
   10.95 +int mtrr_del_page(int reg, unsigned long base, unsigned long size)
   10.96 +{
   10.97 +	int i, max;
   10.98 +	mtrr_type ltype;
   10.99 +	unsigned long lbase;
  10.100 +	unsigned int lsize;
  10.101 +	int error = -EINVAL;
  10.102 +	dom0_op_t op;
  10.103 +
  10.104 +	max = num_var_ranges;
  10.105 +	if (reg < 0) {
  10.106 +		/*  Search for existing MTRR  */
  10.107 +		for (i = 0; i < max; ++i) {
  10.108 +			mtrr_if->get(i, &lbase, &lsize, &ltype);
  10.109 +			if (lbase == base && lsize == size) {
  10.110 +				reg = i;
  10.111 +				break;
  10.112 +			}
  10.113 +		}
  10.114 +		if (reg < 0) {
  10.115 +			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
  10.116 +			       size);
  10.117 +			goto out;
  10.118 +		}
  10.119 +	}
  10.120 +	if (usage_table[reg] < 1) {
  10.121 +		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
  10.122 +		goto out;
  10.123 +	}
  10.124 +	if (--usage_table[reg] < 1) {
  10.125 +		op.cmd = DOM0_DEL_MEMTYPE;
  10.126 +		op.u.del_memtype.handle = 0;
  10.127 +		op.u.add_memtype.reg    = reg;
  10.128 +		(void)HYPERVISOR_dom0_op(&op);
  10.129 +	}
  10.130 +	error = reg;
  10.131 + out:
  10.132 +	return error;
  10.133 +}
  10.134 +
  10.135 +int
  10.136 +mtrr_del(int reg, unsigned long base, unsigned long size)
  10.137 +{
  10.138 +	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
  10.139 +		printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
  10.140 +		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
  10.141 +		return -EINVAL;
  10.142 +	}
  10.143 +	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
  10.144 +}
  10.145 +
  10.146 +EXPORT_SYMBOL(mtrr_add);
  10.147 +EXPORT_SYMBOL(mtrr_del);
  10.148 +
  10.149 +static int __init mtrr_init(void)
  10.150 +{
  10.151 +	struct cpuinfo_x86 *c = &boot_cpu_data;
  10.152 +
  10.153 +	if (!(xen_start_info.flags & SIF_PRIVILEGED))
  10.154 +		return -ENODEV;
  10.155 +
  10.156 +	if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
  10.157 +	    (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
  10.158 +	    (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
  10.159 +	    (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
  10.160 +		return -ENODEV;
  10.161 +
  10.162 +	set_num_var_ranges();
  10.163 +	init_table();
  10.164 +
  10.165 +	return 0;
  10.166 +}
  10.167 +
  10.168 +subsys_initcall(mtrr_init);
    11.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ldt.c	Mon Nov 29 16:03:16 2004 +0000
    11.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ldt.c	Mon Nov 29 16:09:15 2004 +0000
    11.3 @@ -193,7 +193,7 @@ static int write_ldt(void __user * ptr, 
    11.4  {
    11.5  	struct mm_struct * mm = current->mm;
    11.6  	__u32 entry_1, entry_2, *lp;
    11.7 -	unsigned long phys_lp;
    11.8 +	unsigned long mach_lp;
    11.9  	int error;
   11.10  	struct user_desc ldt_info;
   11.11  
   11.12 @@ -222,7 +222,7 @@ static int write_ldt(void __user * ptr, 
   11.13  	}
   11.14  
   11.15  	lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
   11.16 -	phys_lp = arbitrary_virt_to_phys(lp);
   11.17 +	mach_lp = arbitrary_virt_to_machine(lp);
   11.18  
   11.19     	/* Allow LDTs to be cleared by the user. */
   11.20     	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
   11.21 @@ -240,7 +240,7 @@ static int write_ldt(void __user * ptr, 
   11.22  
   11.23  	/* Install the new entry ...  */
   11.24  install:
   11.25 -	error = HYPERVISOR_update_descriptor(phys_lp, entry_1, entry_2);
   11.26 +	error = HYPERVISOR_update_descriptor(mach_lp, entry_1, entry_2);
   11.27  
   11.28  out_unlock:
   11.29  	up(&mm->context.sem);
    12.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/setup.c	Mon Nov 29 16:03:16 2004 +0000
    12.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/setup.c	Mon Nov 29 16:09:15 2004 +0000
    12.3 @@ -1390,10 +1390,11 @@ void __init setup_arch(char **cmdline_p)
    12.4  	init_mm.end_data = (unsigned long) _edata;
    12.5  	init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames) << PAGE_SHIFT;
    12.6  
    12.7 -	code_resource.start = virt_to_phys(_text);
    12.8 -	code_resource.end = virt_to_phys(_etext)-1;
    12.9 -	data_resource.start = virt_to_phys(_etext);
   12.10 -	data_resource.end = virt_to_phys(_edata)-1;
   12.11 +	/* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
   12.12 +	/*code_resource.start = virt_to_phys(_text);*/
   12.13 +	/*code_resource.end = virt_to_phys(_etext)-1;*/
   12.14 +	/*data_resource.start = virt_to_phys(_etext);*/
   12.15 +	/*data_resource.end = virt_to_phys(_edata)-1;*/
   12.16  
   12.17  	parse_cmdline_early(cmdline_p);
   12.18  
    13.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/init.c	Mon Nov 29 16:03:16 2004 +0000
    13.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/init.c	Mon Nov 29 16:09:15 2004 +0000
    13.3 @@ -435,17 +435,24 @@ void zap_low_mappings (void)
    13.4  void __init zone_sizes_init(void)
    13.5  {
    13.6  	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
    13.7 -	unsigned int max_dma, high, low;
    13.8 +	unsigned int /*max_dma,*/ high, low;
    13.9  	
   13.10 -	max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
   13.11 +	/*
   13.12 +	 * XEN: Our notion of "DMA memory" is fake when running over Xen.
   13.13 +	 * We simply put all RAM in the DMA zone so that those drivers which
   13.14 +	 * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
   13.15 +	 * Those drivers that *do* require lowmem are screwed anyway when
   13.16 +	 * running over Xen!
   13.17 +	 */
   13.18 +	/*max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;*/
   13.19  	low = max_low_pfn;
   13.20  	high = highend_pfn;
   13.21  	
   13.22 -	if (low < max_dma)
   13.23 +	/*if (low < max_dma)*/
   13.24  		zones_size[ZONE_DMA] = low;
   13.25 -	else {
   13.26 -		zones_size[ZONE_DMA] = max_dma;
   13.27 -		zones_size[ZONE_NORMAL] = low - max_dma;
   13.28 +	/*else*/ {
   13.29 +		/*zones_size[ZONE_DMA] = max_dma;*/
   13.30 +		/*zones_size[ZONE_NORMAL] = low - max_dma;*/
   13.31  #ifdef CONFIG_HIGHMEM
   13.32  		zones_size[ZONE_HIGHMEM] = high - low;
   13.33  #endif
    14.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/ioremap.c	Mon Nov 29 16:03:16 2004 +0000
    14.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/ioremap.c	Mon Nov 29 16:09:15 2004 +0000
    14.3 @@ -36,6 +36,20 @@ void __init bt_iounmap(void *addr, unsig
    14.4  
    14.5  #else
    14.6  
    14.7 +/*
    14.8 + * Does @address reside within a non-highmem page that is local to this virtual
    14.9 + * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
   14.10 + * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
   14.11 + * why this works.
   14.12 + */
   14.13 +static inline int is_local_lowmem(unsigned long address)
   14.14 +{
   14.15 +	extern unsigned long max_low_pfn;
   14.16 +	unsigned long mfn = address >> PAGE_SHIFT;
   14.17 +	unsigned long pfn = mfn_to_pfn(mfn);
   14.18 +	return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
   14.19 +}
   14.20 +
   14.21  static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
   14.22  	unsigned long phys_addr, unsigned long flags)
   14.23  {
   14.24 @@ -140,19 +154,18 @@ void __iomem * __ioremap(unsigned long p
   14.25  	if (!size || last_addr < phys_addr)
   14.26  		return NULL;
   14.27  
   14.28 -        if (phys_addr >= 0x0 && last_addr < 0x100000)
   14.29 -                return isa_bus_to_virt(phys_addr);
   14.30 -
   14.31 +#ifdef CONFIG_XEN_PRIVILEGED_GUEST
   14.32  	/*
   14.33  	 * Don't remap the low PCI/ISA area, it's always mapped..
   14.34  	 */
   14.35 -	if (phys_addr >= 0xA0000 && last_addr < 0x100000)
   14.36 -		return (void __iomem *) phys_to_virt(phys_addr);
   14.37 +	if (phys_addr >= 0x0 && last_addr < 0x100000)
   14.38 +		return isa_bus_to_virt(phys_addr);
   14.39 +#endif
   14.40  
   14.41  	/*
   14.42  	 * Don't allow anybody to remap normal RAM that we're using..
   14.43  	 */
   14.44 -	if (machine_to_phys(phys_addr) < virt_to_phys(high_memory)) {
   14.45 +	if (is_local_lowmem(phys_addr)) {
   14.46  		char *t_addr, *t_end;
   14.47  		struct page *page;
   14.48  
   14.49 @@ -219,7 +232,7 @@ void __iomem *ioremap_nocache (unsigned 
   14.50  	/* Guaranteed to be > phys_addr, as per __ioremap() */
   14.51  	last_addr = phys_addr + size - 1;
   14.52  
   14.53 -	if (machine_to_phys(last_addr) < virt_to_phys(high_memory)) { 
   14.54 +	if (is_local_lowmem(last_addr)) { 
   14.55  		struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
   14.56  		unsigned long npages;
   14.57  
   14.58 @@ -256,7 +269,7 @@ void iounmap(volatile void __iomem *addr
   14.59  		return;
   14.60  	} 
   14.61  
   14.62 -	if (p->flags && machine_to_phys(p->phys_addr) < virt_to_phys(high_memory)) { 
   14.63 +	if (p->flags && is_local_lowmem(p->phys_addr)) { 
   14.64  		change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
   14.65  				 p->size >> PAGE_SHIFT,
   14.66  				 PAGE_KERNEL); 				 
   14.67 @@ -276,14 +289,13 @@ void __init *bt_ioremap(unsigned long ph
   14.68  	if (!size || last_addr < phys_addr)
   14.69  		return NULL;
   14.70  
   14.71 -        if (phys_addr >= 0x0 && last_addr < 0x100000)
   14.72 -                return isa_bus_to_virt(phys_addr);
   14.73 -
   14.74 +#ifdef CONFIG_XEN_PRIVILEGED_GUEST
   14.75  	/*
   14.76  	 * Don't remap the low PCI/ISA area, it's always mapped..
   14.77  	 */
   14.78 -	if (phys_addr >= 0xA0000 && last_addr < 0x100000)
   14.79 -		return phys_to_virt(phys_addr);
   14.80 +	if (phys_addr >= 0x0 && last_addr < 0x100000)
   14.81 +		return isa_bus_to_virt(phys_addr);
   14.82 +#endif
   14.83  
   14.84  	/*
   14.85  	 * Mappings have to be page-aligned
    15.1 --- a/linux-2.6.9-xen-sparse/arch/xen/i386/mm/pgtable.c	Mon Nov 29 16:03:16 2004 +0000
    15.2 +++ b/linux-2.6.9-xen-sparse/arch/xen/i386/mm/pgtable.c	Mon Nov 29 16:09:15 2004 +0000
    15.3 @@ -195,7 +195,7 @@ void pte_ctor(void *pte, kmem_cache_t *c
    15.4  
    15.5  	clear_page(pte);
    15.6  	make_page_readonly(pte);
    15.7 -	queue_pte_pin(virt_to_phys(pte));
    15.8 +	queue_pte_pin(__pa(pte));
    15.9  	flush_page_update_queue();
   15.10  }
   15.11  
   15.12 @@ -204,7 +204,7 @@ void pte_dtor(void *pte, kmem_cache_t *c
   15.13  	struct page *page = virt_to_page(pte);
   15.14  	ClearPageForeign(page);
   15.15  
   15.16 -	queue_pte_unpin(virt_to_phys(pte));
   15.17 +	queue_pte_unpin(__pa(pte));
   15.18  	make_page_writable(pte);
   15.19  	flush_page_update_queue();
   15.20  }
    16.1 --- a/linux-2.6.9-xen-sparse/drivers/xen/blkfront/blkfront.c	Mon Nov 29 16:03:16 2004 +0000
    16.2 +++ b/linux-2.6.9-xen-sparse/drivers/xen/blkfront/blkfront.c	Mon Nov 29 16:09:15 2004 +0000
    16.3 @@ -783,7 +783,7 @@ static int blkif_queue_request(unsigned 
    16.4                                 unsigned short  nr_sectors,
    16.5                                 kdev_t          device)
    16.6  {
    16.7 -    unsigned long       buffer_ma = phys_to_machine(virt_to_phys(buffer)); 
    16.8 +    unsigned long       buffer_ma = virt_to_bus(buffer);
    16.9      unsigned long       xid;
   16.10      struct gendisk     *gd;
   16.11      blkif_request_t    *req;
    17.1 --- a/linux-2.6.9-xen-sparse/drivers/xen/netfront/netfront.c	Mon Nov 29 16:03:16 2004 +0000
    17.2 +++ b/linux-2.6.9-xen-sparse/drivers/xen/netfront/netfront.c	Mon Nov 29 16:09:15 2004 +0000
    17.3 @@ -383,7 +383,7 @@ static void network_alloc_rx_buffers(str
    17.4          rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
    17.5  
    17.6  	/* Remove this page from pseudo phys map before passing back to Xen. */
    17.7 -	phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT] 
    17.8 +	phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 
    17.9  	    = INVALID_P2M_ENTRY;
   17.10  
   17.11          rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
    18.1 --- a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/io.h	Mon Nov 29 16:03:16 2004 +0000
    18.2 +++ b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/io.h	Mon Nov 29 16:09:15 2004 +0000
    18.3 @@ -146,8 +146,8 @@ extern void bt_iounmap(void *addr, unsig
    18.4   *
    18.5   * Allow them on x86 for legacy drivers, though.
    18.6   */
    18.7 -#define virt_to_bus(_x) phys_to_machine(virt_to_phys(_x))
    18.8 -#define bus_to_virt(_x) phys_to_virt(machine_to_phys(_x))
    18.9 +#define virt_to_bus(_x) phys_to_machine(__pa(_x))
   18.10 +#define bus_to_virt(_x) __va(machine_to_phys(_x))
   18.11  
   18.12  /*
   18.13   * readX/writeX() are used to access memory mapped devices. On some
    19.1 --- a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h	Mon Nov 29 16:03:16 2004 +0000
    19.2 +++ b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h	Mon Nov 29 16:09:15 2004 +0000
    19.3 @@ -28,8 +28,9 @@ void __init machine_specific_modify_cpu_
    19.4  	clear_bit(X86_FEATURE_DE, c->x86_capability);
    19.5  	clear_bit(X86_FEATURE_PSE, c->x86_capability);
    19.6  	clear_bit(X86_FEATURE_PGE, c->x86_capability);
    19.7 -	clear_bit(X86_FEATURE_MTRR, c->x86_capability);
    19.8  	clear_bit(X86_FEATURE_FXSR, c->x86_capability);
    19.9 +	if (!(xen_start_info.flags & SIF_PRIVILEGED))
   19.10 +		clear_bit(X86_FEATURE_MTRR, c->x86_capability);
   19.11  }
   19.12  
   19.13  extern void hypervisor_callback(void);
    20.1 --- a/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Mon Nov 29 16:03:16 2004 +0000
    20.2 +++ b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Mon Nov 29 16:09:15 2004 +0000
    20.3 @@ -472,7 +472,7 @@ void make_page_writable(void *va);
    20.4  void make_pages_readonly(void *va, unsigned int nr);
    20.5  void make_pages_writable(void *va, unsigned int nr);
    20.6  
    20.7 -static inline unsigned long arbitrary_virt_to_phys(void *va)
    20.8 +static inline unsigned long arbitrary_virt_to_machine(void *va)
    20.9  {
   20.10  	pgd_t *pgd = pgd_offset_k((unsigned long)va);
   20.11  	pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
    21.1 --- a/netbsd-2.0-xen-sparse/nbconfig-xen	Mon Nov 29 16:03:16 2004 +0000
    21.2 +++ b/netbsd-2.0-xen-sparse/nbconfig-xen	Mon Nov 29 16:09:15 2004 +0000
    21.3 @@ -2,11 +2,17 @@
    21.4  #
    21.5  
    21.6  : ${HOS:=$(uname -s | tr /A-Z/ /a-z/)}
    21.7 -: ${HARCH:=$(uname -i 2>/dev/null || echo i386)}
    21.8 +: ${HARCH:=$(uname -m 2>/dev/null || echo i386)}
    21.9  : ${NETBSD_RELEASE:=$(basename $(cd $(dirname $0) && pwd) | sed 's/netbsd-\([0-9]\+\.[0-9]\+\).*/\1/')}
   21.10  : ${NETBSD_VERSION:=$(basename $(cd $(dirname $0) && pwd) | sed 's/netbsd-\([0-9]\+\.[0-9]\+.*\)-xen.*/\1/')}
   21.11  : ${TOPDIR:=$(cd $(dirname $0) && pwd | sed 's/\(netbsd-[0-9]\+\.[0-9]\+.*-xen[^/]*\)/\1/')}
   21.12  
   21.13 +case "$HARCH" in
   21.14 +i586|i686)
   21.15 +  HARCH=i386
   21.16 +  ;;
   21.17 +esac
   21.18 +
   21.19  TOOLDIR="$TOPDIR/../netbsd-${NETBSD_RELEASE}-tools/$HOS-$HARCH"; export TOOLDIR
   21.20  
   21.21  CONF="$1"
    22.1 --- a/netbsd-2.0-xen-sparse/nbmake-xen	Mon Nov 29 16:03:16 2004 +0000
    22.2 +++ b/netbsd-2.0-xen-sparse/nbmake-xen	Mon Nov 29 16:09:15 2004 +0000
    22.3 @@ -4,11 +4,17 @@
    22.4  #
    22.5  
    22.6  : ${HOS:=$(uname -s | tr /A-Z/ /a-z/)}
    22.7 -: ${HARCH:=$(uname -i 2>/dev/null || echo i386)}
    22.8 +: ${HARCH:=$(uname -m 2>/dev/null || echo i386)}
    22.9  : ${NETBSD_RELEASE:=$(basename $(cd $(dirname $0) && pwd) | sed 's/netbsd-\([0-9]\+\.[0-9]\+\).*/\1/')}
   22.10  : ${NETBSD_VERSION:=$(basename $(cd $(dirname $0) && pwd) | sed 's/netbsd-\([0-9]\+\.[0-9]\+.*\)-xen.*/\1/')}
   22.11  : ${TOPDIR:=$(cd $(dirname $0) && pwd | sed 's/\(netbsd-[0-9]\+\.[0-9]\+.*-xen[^/]*\)/\1/')}
   22.12  
   22.13 +case "$HARCH" in
   22.14 +i586|i686)
   22.15 +  HARCH=i386
   22.16 +  ;;
   22.17 +esac
   22.18 +
   22.19  NETBSDSRCDIR="$TOPDIR"; export NETBSDSRCDIR
   22.20  DESTDIR="$TOPDIR/root"; export DESTDIR
   22.21  unset MAKEOBJDIRPREFIX
    23.1 --- a/patches/linux-2.6.9/agpgart.patch	Mon Nov 29 16:03:16 2004 +0000
    23.2 +++ b/patches/linux-2.6.9/agpgart.patch	Mon Nov 29 16:09:15 2004 +0000
    23.3 @@ -1,5 +1,6 @@
    23.4 ---- linux-2.6.8.1/drivers/char/agp/ali-agp.c	2004-08-14 11:55:35.000000000 +0100
    23.5 -+++ linux-2.6.8.1-xen0/drivers/char/agp/ali-agp.c	2004-09-05 05:55:58.876495340 +0100
    23.6 +diff -ur linux-2.6.9/drivers/char/agp/ali-agp.c linux-2.6.9-new/drivers/char/agp/ali-agp.c
    23.7 +--- linux-2.6.9/drivers/char/agp/ali-agp.c	2004-10-18 22:54:38.000000000 +0100
    23.8 ++++ linux-2.6.9-new/drivers/char/agp/ali-agp.c	2004-11-28 19:32:03.000000000 +0000
    23.9  @@ -150,7 +150,7 @@
   23.10   	pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
   23.11   	pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
   23.12 @@ -18,8 +19,9 @@
   23.13   	agp_generic_destroy_page(addr);
   23.14   }
   23.15   
   23.16 ---- linux-2.6.8.1/drivers/char/agp/amd-k7-agp.c	2004-08-14 11:56:24.000000000 +0100
   23.17 -+++ linux-2.6.8.1-xen0/drivers/char/agp/amd-k7-agp.c	2004-09-05 05:55:58.877495108 +0100
   23.18 +diff -ur linux-2.6.9/drivers/char/agp/amd-k7-agp.c linux-2.6.9-new/drivers/char/agp/amd-k7-agp.c
   23.19 +--- linux-2.6.9/drivers/char/agp/amd-k7-agp.c	2004-10-18 22:55:36.000000000 +0100
   23.20 ++++ linux-2.6.9-new/drivers/char/agp/amd-k7-agp.c	2004-11-28 19:36:39.000000000 +0000
   23.21  @@ -43,7 +43,7 @@
   23.22   
   23.23   	SetPageReserved(virt_to_page(page_map->real));
   23.24 @@ -32,23 +34,24 @@
   23.25  @@ -152,7 +152,7 @@
   23.26   
   23.27   	agp_bridge->gatt_table_real = (u32 *)page_dir.real;
   23.28 - 	agp_bridge->gatt_table = (u32 *)page_dir.remapped;
   23.29 + 	agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
   23.30  -	agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
   23.31  +	agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
   23.32   
   23.33   	/* Get the address for the gart region.
   23.34   	 * This is a bus address even on the alpha, b/c its
   23.35 -@@ -166,7 +166,7 @@
   23.36 +@@ -165,7 +165,7 @@
   23.37 + 
   23.38   	/* Calculate the agp offset */
   23.39   	for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
   23.40 - 		page_dir.remapped[GET_PAGE_DIR_OFF(addr)] =
   23.41 --			virt_to_phys(amd_irongate_private.gatt_pages[i]->real);
   23.42 -+			virt_to_bus(amd_irongate_private.gatt_pages[i]->real);
   23.43 - 		page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001;
   23.44 +-		writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
   23.45 ++		writel(virt_to_bus(amd_irongate_private.gatt_pages[i]->real) | 1,
   23.46 + 			page_dir.remapped+GET_PAGE_DIR_OFF(addr));
   23.47   	}
   23.48   
   23.49 ---- linux-2.6.8.1/drivers/char/agp/amd64-agp.c	2004-08-14 11:55:47.000000000 +0100
   23.50 -+++ linux-2.6.8.1-xen0/drivers/char/agp/amd64-agp.c	2004-09-05 05:55:58.877495108 +0100
   23.51 +diff -ur linux-2.6.9/drivers/char/agp/amd64-agp.c linux-2.6.9-new/drivers/char/agp/amd64-agp.c
   23.52 +--- linux-2.6.9/drivers/char/agp/amd64-agp.c	2004-10-18 22:54:38.000000000 +0100
   23.53 ++++ linux-2.6.9-new/drivers/char/agp/amd64-agp.c	2004-11-28 19:32:03.000000000 +0000
   23.54  @@ -212,7 +212,7 @@
   23.55   
   23.56   static int amd_8151_configure(void)
   23.57 @@ -67,8 +70,9 @@
   23.58   			   amd64_aperture_sizes[bridge->aperture_size_idx].size);
   23.59   	agp_remove_bridge(bridge);
   23.60   	agp_put_bridge(bridge);
   23.61 ---- linux-2.6.8.1/drivers/char/agp/ati-agp.c	2004-08-14 11:55:48.000000000 +0100
   23.62 -+++ linux-2.6.8.1-xen0/drivers/char/agp/ati-agp.c	2004-09-05 05:55:58.877495108 +0100
   23.63 +diff -ur linux-2.6.9/drivers/char/agp/ati-agp.c linux-2.6.9-new/drivers/char/agp/ati-agp.c
   23.64 +--- linux-2.6.9/drivers/char/agp/ati-agp.c	2004-10-18 22:54:40.000000000 +0100
   23.65 ++++ linux-2.6.9-new/drivers/char/agp/ati-agp.c	2004-11-28 19:32:03.000000000 +0000
   23.66  @@ -64,7 +64,7 @@
   23.67   
   23.68   	/* CACHE_FLUSH(); */
   23.69 @@ -78,8 +82,9 @@
   23.70   					    PAGE_SIZE);
   23.71   	if (page_map->remapped == NULL || err) {
   23.72   		ClearPageReserved(virt_to_page(page_map->real));
   23.73 ---- linux-2.6.8.1/drivers/char/agp/backend.c	2004-08-14 11:55:47.000000000 +0100
   23.74 -+++ linux-2.6.8.1-xen0/drivers/char/agp/backend.c	2004-09-05 05:55:58.878494876 +0100
   23.75 +diff -ur linux-2.6.9/drivers/char/agp/backend.c linux-2.6.9-new/drivers/char/agp/backend.c
   23.76 +--- linux-2.6.9/drivers/char/agp/backend.c	2004-10-18 22:54:39.000000000 +0100
   23.77 ++++ linux-2.6.9-new/drivers/char/agp/backend.c	2004-11-28 19:32:03.000000000 +0000
   23.78  @@ -142,7 +142,7 @@
   23.79   			return -ENOMEM;
   23.80   		}
   23.81 @@ -107,9 +112,22 @@
   23.82   }
   23.83   
   23.84   static const drm_agp_t drm_agp = {
   23.85 ---- linux-2.6.8.1/drivers/char/agp/generic.c	2004-08-14 11:55:10.000000000 +0100
   23.86 -+++ linux-2.6.8.1-xen0/drivers/char/agp/generic.c	2004-09-05 05:55:58.879494644 +0100
   23.87 -@@ -127,7 +127,7 @@
   23.88 +diff -ur linux-2.6.9/drivers/char/agp/efficeon-agp.c linux-2.6.9-new/drivers/char/agp/efficeon-agp.c
   23.89 +--- linux-2.6.9/drivers/char/agp/efficeon-agp.c	2004-10-18 22:53:06.000000000 +0100
   23.90 ++++ linux-2.6.9-new/drivers/char/agp/efficeon-agp.c	2004-11-28 19:38:28.000000000 +0000
   23.91 +@@ -219,7 +219,7 @@
   23.92 + 
   23.93 + 		efficeon_private.l1_table[index] = page;
   23.94 + 
   23.95 +-		value = __pa(page) | pati | present | index;
   23.96 ++		value = virt_to_bus(page) | pati | present | index;
   23.97 + 
   23.98 + 		pci_write_config_dword(agp_bridge->dev,
   23.99 + 			EFFICEON_ATTPAGE, value);
  23.100 +diff -ur linux-2.6.9/drivers/char/agp/generic.c linux-2.6.9-new/drivers/char/agp/generic.c
  23.101 +--- linux-2.6.9/drivers/char/agp/generic.c	2004-10-18 22:53:50.000000000 +0100
  23.102 ++++ linux-2.6.9-new/drivers/char/agp/generic.c	2004-11-28 19:32:03.000000000 +0000
  23.103 +@@ -128,7 +128,7 @@
  23.104   	}
  23.105   	if (curr->page_count != 0) {
  23.106   		for (i = 0; i < curr->page_count; i++) {
  23.107 @@ -118,7 +136,7 @@
  23.108   		}
  23.109   	}
  23.110   	agp_free_key(curr->key);
  23.111 -@@ -181,7 +181,7 @@
  23.112 +@@ -182,7 +182,7 @@
  23.113   			return NULL;
  23.114   		}
  23.115   		new->memory[i] =
  23.116 @@ -127,7 +145,7 @@
  23.117   		new->page_count++;
  23.118   	}
  23.119   
  23.120 -@@ -636,6 +636,7 @@
  23.121 +@@ -637,6 +637,7 @@
  23.122   	int i;
  23.123   	void *temp;
  23.124   	struct page *page;
  23.125 @@ -135,7 +153,7 @@
  23.126   
  23.127   	/* The generic routines can't handle 2 level gatt's */
  23.128   	if (agp_bridge->driver->size_type == LVL2_APER_SIZE)
  23.129 -@@ -674,8 +675,10 @@
  23.130 +@@ -675,8 +676,10 @@
  23.131   				break;
  23.132   			}
  23.133   
  23.134 @@ -148,7 +166,7 @@
  23.135   
  23.136   			if (table == NULL) {
  23.137   				i++;
  23.138 -@@ -706,7 +709,9 @@
  23.139 +@@ -707,7 +710,9 @@
  23.140   		size = ((struct aper_size_info_fixed *) temp)->size;
  23.141   		page_order = ((struct aper_size_info_fixed *) temp)->page_order;
  23.142   		num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
  23.143 @@ -159,7 +177,7 @@
  23.144   	}
  23.145   
  23.146   	if (table == NULL)
  23.147 -@@ -721,7 +726,7 @@
  23.148 +@@ -722,7 +727,7 @@
  23.149   	agp_gatt_table = (void *)table;
  23.150   
  23.151   	agp_bridge->driver->cache_flush();
  23.152 @@ -168,7 +186,7 @@
  23.153   					(PAGE_SIZE * (1 << page_order)));
  23.154   	agp_bridge->driver->cache_flush();
  23.155   
  23.156 -@@ -729,11 +734,12 @@
  23.157 +@@ -730,11 +735,12 @@
  23.158   		for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  23.159   			ClearPageReserved(page);
  23.160   
  23.161 @@ -183,7 +201,7 @@
  23.162   
  23.163   	/* AK: bogus, should encode addresses > 4GB */
  23.164   	for (i = 0; i < num_entries; i++)
  23.165 -@@ -785,7 +791,8 @@
  23.166 +@@ -786,7 +792,8 @@
  23.167   	for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
  23.168   		ClearPageReserved(page);
  23.169   
  23.170 @@ -193,8 +211,9 @@
  23.171   
  23.172   	agp_gatt_table = NULL;
  23.173   	agp_bridge->gatt_table = NULL;
  23.174 ---- linux-2.6.8.1/drivers/char/agp/hp-agp.c	2004-08-14 11:55:59.000000000 +0100
  23.175 -+++ linux-2.6.8.1-xen0/drivers/char/agp/hp-agp.c	2004-09-05 05:55:58.879494644 +0100
  23.176 +diff -ur linux-2.6.9/drivers/char/agp/hp-agp.c linux-2.6.9-new/drivers/char/agp/hp-agp.c
  23.177 +--- linux-2.6.9/drivers/char/agp/hp-agp.c	2004-10-18 22:54:55.000000000 +0100
  23.178 ++++ linux-2.6.9-new/drivers/char/agp/hp-agp.c	2004-11-28 19:32:03.000000000 +0000
  23.179  @@ -110,7 +110,7 @@
  23.180   	hp->gart_size = HP_ZX1_GART_SIZE;
  23.181   	hp->gatt_entries = hp->gart_size / hp->io_page_size;
  23.182 @@ -213,8 +232,9 @@
  23.183   		OUTREG64(hp->ioc_regs, HP_ZX1_TCNFG, hp->io_tlb_ps);
  23.184   		OUTREG64(hp->ioc_regs, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
  23.185   		OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, hp->iova_base | 0x1);
  23.186 ---- linux-2.6.8.1/drivers/char/agp/i460-agp.c	2004-08-14 11:55:34.000000000 +0100
  23.187 -+++ linux-2.6.8.1-xen0/drivers/char/agp/i460-agp.c	2004-09-05 05:55:58.879494644 +0100
  23.188 +diff -ur linux-2.6.9/drivers/char/agp/i460-agp.c linux-2.6.9-new/drivers/char/agp/i460-agp.c
  23.189 +--- linux-2.6.9/drivers/char/agp/i460-agp.c	2004-10-18 22:54:32.000000000 +0100
  23.190 ++++ linux-2.6.9-new/drivers/char/agp/i460-agp.c	2004-11-28 19:32:03.000000000 +0000
  23.191  @@ -371,7 +371,7 @@
  23.192   	}
  23.193   	memset(lp->alloced_map, 0, map_size);
  23.194 @@ -233,8 +253,9 @@
  23.195   	atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
  23.196   }
  23.197   
  23.198 ---- linux-2.6.8.1/drivers/char/agp/intel-agp.c	2004-08-14 11:55:32.000000000 +0100
  23.199 -+++ linux-2.6.8.1-xen0/drivers/char/agp/intel-agp.c	2004-09-05 05:55:58.880494412 +0100
  23.200 +diff -ur linux-2.6.9/drivers/char/agp/intel-agp.c linux-2.6.9-new/drivers/char/agp/intel-agp.c
  23.201 +--- linux-2.6.9/drivers/char/agp/intel-agp.c	2004-10-18 22:54:08.000000000 +0100
  23.202 ++++ linux-2.6.9-new/drivers/char/agp/intel-agp.c	2004-11-28 19:37:22.000000000 +0000
  23.203  @@ -285,7 +285,7 @@
  23.204   	if (new == NULL)
  23.205   		return NULL;
  23.206 @@ -251,14 +272,15 @@
  23.207  -			i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
  23.208  +			i8xx_destroy_pages(bus_to_virt(curr->memory[0]));
  23.209   		else
  23.210 - 	   		agp_bridge->driver->agp_destroy_page(
  23.211 + 			agp_bridge->driver->agp_destroy_page(
  23.212  -				 phys_to_virt(curr->memory[0]));
  23.213  +				 bus_to_virt(curr->memory[0]));
  23.214   		vfree(curr->memory);
  23.215   	}
  23.216   	kfree(curr);
  23.217 ---- linux-2.6.8.1/drivers/char/agp/intel-mch-agp.c	2004-08-14 11:54:49.000000000 +0100
  23.218 -+++ linux-2.6.8.1-xen0/drivers/char/agp/intel-mch-agp.c	2004-09-05 05:55:58.880494412 +0100
  23.219 +diff -ur linux-2.6.9/drivers/char/agp/intel-mch-agp.c linux-2.6.9-new/drivers/char/agp/intel-mch-agp.c
  23.220 +--- linux-2.6.9/drivers/char/agp/intel-mch-agp.c	2004-10-18 22:53:13.000000000 +0100
  23.221 ++++ linux-2.6.9-new/drivers/char/agp/intel-mch-agp.c	2004-11-28 19:32:03.000000000 +0000
  23.222  @@ -51,7 +51,7 @@
  23.223   	if (new == NULL)
  23.224   		return NULL;
  23.225 @@ -277,8 +299,9 @@
  23.226   		vfree(curr->memory);
  23.227   	}
  23.228   	kfree(curr);
  23.229 ---- linux-2.6.8.1/drivers/char/agp/sworks-agp.c	2004-08-14 11:55:10.000000000 +0100
  23.230 -+++ linux-2.6.8.1-xen0/drivers/char/agp/sworks-agp.c	2004-09-05 05:55:58.881494180 +0100
  23.231 +diff -ur linux-2.6.9/drivers/char/agp/sworks-agp.c linux-2.6.9-new/drivers/char/agp/sworks-agp.c
  23.232 +--- linux-2.6.9/drivers/char/agp/sworks-agp.c	2004-10-18 22:53:50.000000000 +0100
  23.233 ++++ linux-2.6.9-new/drivers/char/agp/sworks-agp.c	2004-11-28 19:38:02.000000000 +0000
  23.234  @@ -51,7 +51,7 @@
  23.235   	}
  23.236   	SetPageReserved(virt_to_page(page_map->real));
  23.237 @@ -288,35 +311,36 @@
  23.238   					    PAGE_SIZE);
  23.239   	if (page_map->remapped == NULL) {
  23.240   		ClearPageReserved(virt_to_page(page_map->real));
  23.241 -@@ -164,7 +164,7 @@
  23.242 +@@ -162,7 +162,7 @@
  23.243 + 	/* Create a fake scratch directory */
  23.244   	for(i = 0; i < 1024; i++) {
  23.245 - 		serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge->scratch_page;
  23.246 - 		page_dir.remapped[i] =
  23.247 --			virt_to_phys(serverworks_private.scratch_dir.real);
  23.248 -+			virt_to_bus(serverworks_private.scratch_dir.real);
  23.249 - 		page_dir.remapped[i] |= 0x00000001;
  23.250 + 		writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
  23.251 +-		writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
  23.252 ++		writel(virt_to_bus(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
  23.253   	}
  23.254   
  23.255 -@@ -177,7 +177,7 @@
  23.256 + 	retval = serverworks_create_gatt_pages(value->num_entries / 1024);
  23.257 +@@ -174,7 +174,7 @@
  23.258   
  23.259   	agp_bridge->gatt_table_real = (u32 *)page_dir.real;
  23.260 - 	agp_bridge->gatt_table = (u32 *)page_dir.remapped;
  23.261 + 	agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
  23.262  -	agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
  23.263  +	agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
  23.264   
  23.265   	/* Get the address for the gart region.
  23.266   	 * This is a bus address even on the alpha, b/c its
  23.267 -@@ -191,7 +191,7 @@
  23.268 +@@ -187,7 +187,7 @@
  23.269 + 	/* Calculate the agp offset */	
  23.270   
  23.271 - 	for(i = 0; i < value->num_entries / 1024; i++) {
  23.272 - 		page_dir.remapped[i] =
  23.273 --			virt_to_phys(serverworks_private.gatt_pages[i]->real);
  23.274 -+			virt_to_bus(serverworks_private.gatt_pages[i]->real);
  23.275 - 		page_dir.remapped[i] |= 0x00000001;
  23.276 - 	}
  23.277 + 	for(i = 0; i < value->num_entries / 1024; i++)
  23.278 +-		writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
  23.279 ++		writel(virt_to_bus(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
  23.280   
  23.281 ---- linux-2.6.8.1/drivers/char/agp/uninorth-agp.c	2004-08-14 11:55:32.000000000 +0100
  23.282 -+++ linux-2.6.8.1-xen0/drivers/char/agp/uninorth-agp.c	2004-09-05 05:55:58.881494180 +0100
  23.283 + 	return 0;
  23.284 + }
  23.285 +diff -ur linux-2.6.9/drivers/char/agp/uninorth-agp.c linux-2.6.9-new/drivers/char/agp/uninorth-agp.c
  23.286 +--- linux-2.6.9/drivers/char/agp/uninorth-agp.c	2004-10-18 22:54:29.000000000 +0100
  23.287 ++++ linux-2.6.9-new/drivers/char/agp/uninorth-agp.c	2004-11-28 19:32:03.000000000 +0000
  23.288  @@ -200,7 +200,7 @@
  23.289   
  23.290   	agp_bridge->gatt_table_real = (u32 *) table;
  23.291 @@ -326,8 +350,9 @@
  23.292   
  23.293   	for (i = 0; i < num_entries; i++) {
  23.294   		agp_bridge->gatt_table[i] =
  23.295 ---- linux-2.6.8.1/include/asm-i386/agp.h	2004-08-14 11:54:47.000000000 +0100
  23.296 -+++ linux-2.6.8.1-xen0/include/asm-i386/agp.h	2004-09-05 05:57:26.040268956 +0100
  23.297 +diff -ur linux-2.6.9/include/asm-i386/agp.h linux-2.6.9-new/include/asm-i386/agp.h
  23.298 +--- linux-2.6.9/include/asm-i386/agp.h	2004-10-18 22:53:06.000000000 +0100
  23.299 ++++ linux-2.6.9-new/include/asm-i386/agp.h	2004-11-28 19:32:03.000000000 +0000
  23.300  @@ -3,6 +3,7 @@
  23.301   
  23.302   #include <asm/pgtable.h>
    24.1 --- a/patches/linux-2.6.9/drm.patch	Mon Nov 29 16:03:16 2004 +0000
    24.2 +++ b/patches/linux-2.6.9/drm.patch	Mon Nov 29 16:09:15 2004 +0000
    24.3 @@ -1,5 +1,6 @@
    24.4 ---- linux-2.6.8.1/drivers/char/drm/ati_pcigart.h	2004-08-14 11:56:14.000000000 +0100
    24.5 -+++ linux-2.6.8.1-xen0/drivers/char/drm/ati_pcigart.h	2004-09-05 06:14:51.751782846 +0100
    24.6 +diff -ur linux-2.6.9/drivers/char/drm/ati_pcigart.h linux-2.6.9-new/drivers/char/drm/ati_pcigart.h
    24.7 +--- linux-2.6.9/drivers/char/drm/ati_pcigart.h	2004-10-18 22:55:07.000000000 +0100
    24.8 ++++ linux-2.6.9-new/drivers/char/drm/ati_pcigart.h	2004-11-28 19:42:41.000000000 +0000
    24.9  @@ -158,7 +158,7 @@
   24.10   	ret = 1;
   24.11   
    25.1 --- a/patches/linux-2.6.9/nettel.patch	Mon Nov 29 16:03:16 2004 +0000
    25.2 +++ b/patches/linux-2.6.9/nettel.patch	Mon Nov 29 16:09:15 2004 +0000
    25.3 @@ -1,6 +1,6 @@
    25.4 -diff -ru linux-2.6.8.1/drivers/mtd/maps/nettel.c linux-2.6.8.1-xen0/drivers/mtd/maps/nettel.c
    25.5 ---- linux-2.6.8.1/drivers/mtd/maps/nettel.c	2004-10-21 15:59:29.000000000 -0600
    25.6 -+++ linux-2.6.8.1-xen0/drivers/mtd/maps/nettel.c	2004-10-22 09:12:17.000000000 -0600
    25.7 +diff -ur linux-2.6.9/drivers/mtd/maps/nettel.c linux-2.6.9-new/drivers/mtd/maps/nettel.c
    25.8 +--- linux-2.6.9/drivers/mtd/maps/nettel.c	2004-10-18 22:53:44.000000000 +0100
    25.9 ++++ linux-2.6.9-new/drivers/mtd/maps/nettel.c	2004-11-28 19:45:35.000000000 +0000
   25.10  @@ -270,7 +270,7 @@
   25.11   	maxsize = AMD_WINDOW_MAXSIZE;
   25.12   
    26.1 --- a/xen/arch/x86/Makefile	Mon Nov 29 16:03:16 2004 +0000
    26.2 +++ b/xen/arch/x86/Makefile	Mon Nov 29 16:09:15 2004 +0000
    26.3 @@ -8,6 +8,7 @@ endif
    26.4  
    26.5  OBJS += $(patsubst %.S,%.o,$(wildcard $(TARGET_SUBARCH)/*.S))
    26.6  OBJS += $(patsubst %.c,%.o,$(wildcard $(TARGET_SUBARCH)/*.c))
    26.7 +OBJS += $(patsubst %.c,%.o,$(wildcard mtrr/*.c))
    26.8  
    26.9  OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
   26.10  
    27.1 --- a/xen/arch/x86/dom0_ops.c	Mon Nov 29 16:03:16 2004 +0000
    27.2 +++ b/xen/arch/x86/dom0_ops.c	Mon Nov 29 16:09:15 2004 +0000
    27.3 @@ -21,6 +21,9 @@
    27.4  #include <asm/shadow.h>
    27.5  #include <public/sched_ctl.h>
    27.6  
    27.7 +#include <asm/mtrr.h>
    27.8 +#include "mtrr/mtrr.h"
    27.9 +
   27.10  #define TRC_DOM0OP_ENTER_BASE  0x00020000
   27.11  #define TRC_DOM0OP_LEAVE_BASE  0x00030000
   27.12  
   27.13 @@ -93,6 +96,40 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
   27.14      }
   27.15      break;
   27.16  
   27.17 +    case DOM0_ADD_MEMTYPE:
   27.18 +    {
   27.19 +        ret = mtrr_add_page(
   27.20 +            op->u.add_memtype.pfn,
   27.21 +            op->u.add_memtype.nr_pfns,
   27.22 +            op->u.add_memtype.type,
   27.23 +            1);
   27.24 +    }
   27.25 +    break;
   27.26 +
   27.27 +    case DOM0_DEL_MEMTYPE:
   27.28 +    {
   27.29 +        ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
   27.30 +    }
   27.31 +    break;
   27.32 +
   27.33 +    case DOM0_READ_MEMTYPE:
   27.34 +    {
   27.35 +        unsigned long pfn;
   27.36 +        unsigned int  nr_pfns;
   27.37 +        mtrr_type     type;
   27.38 +
   27.39 +        ret = -EINVAL;
   27.40 +        if ( op->u.read_memtype.reg < num_var_ranges )
   27.41 +        {
   27.42 +            mtrr_if->get(op->u.read_memtype.reg, &pfn, &nr_pfns, &type);
   27.43 +            (void)__put_user(pfn, &u_dom0_op->u.read_memtype.pfn);
   27.44 +            (void)__put_user(nr_pfns, &u_dom0_op->u.read_memtype.nr_pfns);
   27.45 +            (void)__put_user(type, &u_dom0_op->u.read_memtype.type);
   27.46 +            ret = 0;
   27.47 +        }
   27.48 +    }
   27.49 +    break;
   27.50 +
   27.51      default:
   27.52          ret = -ENOSYS;
   27.53  
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/xen/arch/x86/mtrr/amd.c	Mon Nov 29 16:09:15 2004 +0000
    28.3 @@ -0,0 +1,121 @@
    28.4 +#include <xen/init.h>
    28.5 +#include <xen/mm.h>
    28.6 +#include <asm/mtrr.h>
    28.7 +#include <asm/msr.h>
    28.8 +
    28.9 +#include "mtrr.h"
   28.10 +
   28.11 +static void
   28.12 +amd_get_mtrr(unsigned int reg, unsigned long *base,
   28.13 +	     unsigned int *size, mtrr_type * type)
   28.14 +{
   28.15 +	unsigned long low, high;
   28.16 +
   28.17 +	rdmsr(MSR_K6_UWCCR, low, high);
   28.18 +	/*  Upper dword is region 1, lower is region 0  */
   28.19 +	if (reg == 1)
   28.20 +		low = high;
   28.21 +	/*  The base masks off on the right alignment  */
   28.22 +	*base = (low & 0xFFFE0000) >> PAGE_SHIFT;
   28.23 +	*type = 0;
   28.24 +	if (low & 1)
   28.25 +		*type = MTRR_TYPE_UNCACHABLE;
   28.26 +	if (low & 2)
   28.27 +		*type = MTRR_TYPE_WRCOMB;
   28.28 +	if (!(low & 3)) {
   28.29 +		*size = 0;
   28.30 +		return;
   28.31 +	}
   28.32 +	/*
   28.33 +	 *  This needs a little explaining. The size is stored as an
   28.34 +	 *  inverted mask of bits of 128K granularity 15 bits long offset
   28.35 +	 *  2 bits
   28.36 +	 *
   28.37 +	 *  So to get a size we do invert the mask and add 1 to the lowest
   28.38 +	 *  mask bit (4 as its 2 bits in). This gives us a size we then shift
   28.39 +	 *  to turn into 128K blocks
   28.40 +	 *
   28.41 +	 *  eg              111 1111 1111 1100      is 512K
   28.42 +	 *
   28.43 +	 *  invert          000 0000 0000 0011
   28.44 +	 *  +1              000 0000 0000 0100
   28.45 +	 *  *128K   ...
   28.46 +	 */
   28.47 +	low = (~low) & 0x1FFFC;
   28.48 +	*size = (low + 4) << (15 - PAGE_SHIFT);
   28.49 +	return;
   28.50 +}
   28.51 +
   28.52 +static void amd_set_mtrr(unsigned int reg, unsigned long base,
   28.53 +			 unsigned long size, mtrr_type type)
   28.54 +/*  [SUMMARY] Set variable MTRR register on the local CPU.
   28.55 +    <reg> The register to set.
   28.56 +    <base> The base address of the region.
   28.57 +    <size> The size of the region. If this is 0 the region is disabled.
   28.58 +    <type> The type of the region.
   28.59 +    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
   28.60 +    be done externally.
   28.61 +    [RETURNS] Nothing.
   28.62 +*/
   28.63 +{
   28.64 +	u32 regs[2];
   28.65 +
   28.66 +	/*
   28.67 +	 *  Low is MTRR0 , High MTRR 1
   28.68 +	 */
   28.69 +	rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
   28.70 +	/*
   28.71 +	 *  Blank to disable
   28.72 +	 */
   28.73 +	if (size == 0)
   28.74 +		regs[reg] = 0;
   28.75 +	else
   28.76 +		/* Set the register to the base, the type (off by one) and an
   28.77 +		   inverted bitmask of the size The size is the only odd
   28.78 +		   bit. We are fed say 512K We invert this and we get 111 1111
   28.79 +		   1111 1011 but if you subtract one and invert you get the   
   28.80 +		   desired 111 1111 1111 1100 mask
   28.81 +
   28.82 +		   But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!  */
   28.83 +		regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
   28.84 +		    | (base << PAGE_SHIFT) | (type + 1);
   28.85 +
   28.86 +	/*
   28.87 +	 *  The writeback rule is quite specific. See the manual. Its
   28.88 +	 *  disable local interrupts, write back the cache, set the mtrr
   28.89 +	 */
   28.90 +	wbinvd();
   28.91 +	wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
   28.92 +}
   28.93 +
   28.94 +static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
   28.95 +{
   28.96 +	/* Apply the K6 block alignment and size rules
   28.97 +	   In order
   28.98 +	   o Uncached or gathering only
   28.99 +	   o 128K or bigger block
  28.100 +	   o Power of 2 block
  28.101 +	   o base suitably aligned to the power
  28.102 +	*/
  28.103 +	if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
  28.104 +	    || (size & ~(size - 1)) - size || (base & (size - 1)))
  28.105 +		return -EINVAL;
  28.106 +	return 0;
  28.107 +}
  28.108 +
  28.109 +static struct mtrr_ops amd_mtrr_ops = {
  28.110 +	.vendor            = X86_VENDOR_AMD,
  28.111 +	.set               = amd_set_mtrr,
  28.112 +	.get               = amd_get_mtrr,
  28.113 +	.get_free_region   = generic_get_free_region,
  28.114 +	.validate_add_page = amd_validate_add_page,
  28.115 +	.have_wrcomb       = positive_have_wrcomb,
  28.116 +};
  28.117 +
  28.118 +int __init amd_init_mtrr(void)
  28.119 +{
  28.120 +	set_mtrr_ops(&amd_mtrr_ops);
  28.121 +	return 0;
  28.122 +}
  28.123 +
  28.124 +//arch_initcall(amd_mtrr_init);
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/arch/x86/mtrr/centaur.c	Mon Nov 29 16:09:15 2004 +0000
    29.3 @@ -0,0 +1,220 @@
    29.4 +#include <xen/init.h>
    29.5 +#include <xen/mm.h>
    29.6 +#include <asm/mtrr.h>
    29.7 +#include <asm/msr.h>
    29.8 +#include "mtrr.h"
    29.9 +
   29.10 +static struct {
   29.11 +	unsigned long high;
   29.12 +	unsigned long low;
   29.13 +} centaur_mcr[8];
   29.14 +
   29.15 +static u8 centaur_mcr_reserved;
   29.16 +static u8 centaur_mcr_type;	/* 0 for winchip, 1 for winchip2 */
   29.17 +
   29.18 +/*
   29.19 + *	Report boot time MCR setups 
   29.20 + */
   29.21 +
   29.22 +static int
   29.23 +centaur_get_free_region(unsigned long base, unsigned long size)
   29.24 +/*  [SUMMARY] Get a free MTRR.
   29.25 +    <base> The starting (base) address of the region.
   29.26 +    <size> The size (in bytes) of the region.
   29.27 +    [RETURNS] The index of the region on success, else -1 on error.
   29.28 +*/
   29.29 +{
   29.30 +	int i, max;
   29.31 +	mtrr_type ltype;
   29.32 +	unsigned long lbase;
   29.33 +	unsigned int lsize;
   29.34 +
   29.35 +	max = num_var_ranges;
   29.36 +	for (i = 0; i < max; ++i) {
   29.37 +		if (centaur_mcr_reserved & (1 << i))
   29.38 +			continue;
   29.39 +		mtrr_if->get(i, &lbase, &lsize, &ltype);
   29.40 +		if (lsize == 0)
   29.41 +			return i;
   29.42 +	}
   29.43 +	return -ENOSPC;
   29.44 +}
   29.45 +
   29.46 +void
   29.47 +mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
   29.48 +{
   29.49 +	centaur_mcr[mcr].low = lo;
   29.50 +	centaur_mcr[mcr].high = hi;
   29.51 +}
   29.52 +
   29.53 +static void
   29.54 +centaur_get_mcr(unsigned int reg, unsigned long *base,
   29.55 +		unsigned int *size, mtrr_type * type)
   29.56 +{
   29.57 +	*base = centaur_mcr[reg].high >> PAGE_SHIFT;
   29.58 +	*size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
   29.59 +	*type = MTRR_TYPE_WRCOMB;	/*  If it is there, it is write-combining  */
   29.60 +	if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2))
   29.61 +		*type = MTRR_TYPE_UNCACHABLE;
   29.62 +	if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25)
   29.63 +		*type = MTRR_TYPE_WRBACK;
   29.64 +	if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31)
   29.65 +		*type = MTRR_TYPE_WRBACK;
   29.66 +
   29.67 +}
   29.68 +
   29.69 +static void centaur_set_mcr(unsigned int reg, unsigned long base,
   29.70 +			    unsigned long size, mtrr_type type)
   29.71 +{
   29.72 +	unsigned long low, high;
   29.73 +
   29.74 +	if (size == 0) {
   29.75 +		/*  Disable  */
   29.76 +		high = low = 0;
   29.77 +	} else {
   29.78 +		high = base << PAGE_SHIFT;
   29.79 +		if (centaur_mcr_type == 0)
   29.80 +			low = -size << PAGE_SHIFT | 0x1f;	/* only support write-combining... */
   29.81 +		else {
   29.82 +			if (type == MTRR_TYPE_UNCACHABLE)
   29.83 +				low = -size << PAGE_SHIFT | 0x02;	/* NC */
   29.84 +			else
   29.85 +				low = -size << PAGE_SHIFT | 0x09;	/* WWO,WC */
   29.86 +		}
   29.87 +	}
   29.88 +	centaur_mcr[reg].high = high;
   29.89 +	centaur_mcr[reg].low = low;
   29.90 +	wrmsr(MSR_IDT_MCR0 + reg, low, high);
   29.91 +}
   29.92 +/*
   29.93 + *	Initialise the later (saner) Winchip MCR variant. In this version
   29.94 + *	the BIOS can pass us the registers it has used (but not their values)
   29.95 + *	and the control register is read/write
   29.96 + */
   29.97 +
   29.98 +static void __init
   29.99 +centaur_mcr1_init(void)
  29.100 +{
  29.101 +	unsigned i;
  29.102 +	u32 lo, hi;
  29.103 +
  29.104 +	/* Unfortunately, MCR's are read-only, so there is no way to
  29.105 +	 * find out what the bios might have done.
  29.106 +	 */
  29.107 +
  29.108 +	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
  29.109 +	if (((lo >> 17) & 7) == 1) {	/* Type 1 Winchip2 MCR */
  29.110 +		lo &= ~0x1C0;	/* clear key */
  29.111 +		lo |= 0x040;	/* set key to 1 */
  29.112 +		wrmsr(MSR_IDT_MCR_CTRL, lo, hi);	/* unlock MCR */
  29.113 +	}
  29.114 +
  29.115 +	centaur_mcr_type = 1;
  29.116 +
  29.117 +	/*
  29.118 +	 *  Clear any unconfigured MCR's.
  29.119 +	 */
  29.120 +
  29.121 +	for (i = 0; i < 8; ++i) {
  29.122 +		if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) {
  29.123 +			if (!(lo & (1 << (9 + i))))
  29.124 +				wrmsr(MSR_IDT_MCR0 + i, 0, 0);
  29.125 +			else
  29.126 +				/*
  29.127 +				 *      If the BIOS set up an MCR we cannot see it
  29.128 +				 *      but we don't wish to obliterate it
  29.129 +				 */
  29.130 +				centaur_mcr_reserved |= (1 << i);
  29.131 +		}
  29.132 +	}
  29.133 +	/*  
  29.134 +	 *  Throw the main write-combining switch... 
  29.135 +	 *  However if OOSTORE is enabled then people have already done far
  29.136 +	 *  cleverer things and we should behave. 
  29.137 +	 */
  29.138 +
  29.139 +	lo |= 15;		/* Write combine enables */
  29.140 +	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
  29.141 +}
  29.142 +
  29.143 +/*
  29.144 + *	Initialise the original winchip with read only MCR registers
  29.145 + *	no used bitmask for the BIOS to pass on and write only control
  29.146 + */
  29.147 +
  29.148 +static void __init
  29.149 +centaur_mcr0_init(void)
  29.150 +{
  29.151 +	unsigned i;
  29.152 +
  29.153 +	/* Unfortunately, MCR's are read-only, so there is no way to
  29.154 +	 * find out what the bios might have done.
  29.155 +	 */
  29.156 +
  29.157 +	/* Clear any unconfigured MCR's.
  29.158 +	 * This way we are sure that the centaur_mcr array contains the actual
  29.159 +	 * values. The disadvantage is that any BIOS tweaks are thus undone.
  29.160 +	 *
  29.161 +	 */
  29.162 +	for (i = 0; i < 8; ++i) {
  29.163 +		if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0)
  29.164 +			wrmsr(MSR_IDT_MCR0 + i, 0, 0);
  29.165 +	}
  29.166 +
  29.167 +	wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);	/* Write only */
  29.168 +}
  29.169 +
  29.170 +/*
  29.171 + *	Initialise Winchip series MCR registers
  29.172 + */
  29.173 +
  29.174 +static void __init
  29.175 +centaur_mcr_init(void)
  29.176 +{
  29.177 +	struct set_mtrr_context ctxt;
  29.178 +
  29.179 +	set_mtrr_prepare_save(&ctxt);
  29.180 +	set_mtrr_cache_disable(&ctxt);
  29.181 +
  29.182 +	if (boot_cpu_data.x86_model == 4)
  29.183 +		centaur_mcr0_init();
  29.184 +	else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9)
  29.185 +		centaur_mcr1_init();
  29.186 +
  29.187 +	set_mtrr_done(&ctxt);
  29.188 +}
  29.189 +
  29.190 +static int centaur_validate_add_page(unsigned long base, 
  29.191 +				     unsigned long size, unsigned int type)
  29.192 +{
  29.193 +	/*
  29.194 +	 *  FIXME: Winchip2 supports uncached
  29.195 +	 */
  29.196 +	if (type != MTRR_TYPE_WRCOMB && 
  29.197 +	    (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
  29.198 +		printk(KERN_WARNING
  29.199 +		       "mtrr: only write-combining%s supported\n",
  29.200 +		       centaur_mcr_type ? " and uncacheable are"
  29.201 +		       : " is");
  29.202 +		return -EINVAL;
  29.203 +	}
  29.204 +	return 0;
  29.205 +}
  29.206 +
  29.207 +static struct mtrr_ops centaur_mtrr_ops = {
  29.208 +	.vendor            = X86_VENDOR_CENTAUR,
  29.209 +	.init              = centaur_mcr_init,
  29.210 +	.set               = centaur_set_mcr,
  29.211 +	.get               = centaur_get_mcr,
  29.212 +	.get_free_region   = centaur_get_free_region,
  29.213 +	.validate_add_page = centaur_validate_add_page,
  29.214 +	.have_wrcomb       = positive_have_wrcomb,
  29.215 +};
  29.216 +
  29.217 +int __init centaur_init_mtrr(void)
  29.218 +{
  29.219 +	set_mtrr_ops(&centaur_mtrr_ops);
  29.220 +	return 0;
  29.221 +}
  29.222 +
  29.223 +//arch_initcall(centaur_init_mtrr);
    30.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.2 +++ b/xen/arch/x86/mtrr/cyrix.c	Mon Nov 29 16:09:15 2004 +0000
    30.3 @@ -0,0 +1,362 @@
    30.4 +#include <xen/init.h>
    30.5 +#include <xen/mm.h>
    30.6 +#include <asm/mtrr.h>
    30.7 +#include <asm/msr.h>
    30.8 +#include <asm/io.h>
    30.9 +#include "mtrr.h"
   30.10 +
   30.11 +int arr3_protected;
   30.12 +
   30.13 +static void
   30.14 +cyrix_get_arr(unsigned int reg, unsigned long *base,
   30.15 +	      unsigned int *size, mtrr_type * type)
   30.16 +{
   30.17 +	unsigned long flags;
   30.18 +	unsigned char arr, ccr3, rcr, shift;
   30.19 +
   30.20 +	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
   30.21 +
   30.22 +	/* Save flags and disable interrupts */
   30.23 +	local_irq_save(flags);
   30.24 +
   30.25 +	ccr3 = getCx86(CX86_CCR3);
   30.26 +	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
   30.27 +	((unsigned char *) base)[3] = getCx86(arr);
   30.28 +	((unsigned char *) base)[2] = getCx86(arr + 1);
   30.29 +	((unsigned char *) base)[1] = getCx86(arr + 2);
   30.30 +	rcr = getCx86(CX86_RCR_BASE + reg);
   30.31 +	setCx86(CX86_CCR3, ccr3);	/* disable MAPEN */
   30.32 +
   30.33 +	/* Enable interrupts if it was enabled previously */
   30.34 +	local_irq_restore(flags);
   30.35 +	shift = ((unsigned char *) base)[1] & 0x0f;
   30.36 +	*base >>= PAGE_SHIFT;
   30.37 +
   30.38 +	/* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
   30.39 +	 * Note: shift==0xf means 4G, this is unsupported.
   30.40 +	 */
   30.41 +	if (shift)
   30.42 +		*size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
   30.43 +	else
   30.44 +		*size = 0;
   30.45 +
   30.46 +	/* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
   30.47 +	if (reg < 7) {
   30.48 +		switch (rcr) {
   30.49 +		case 1:
   30.50 +			*type = MTRR_TYPE_UNCACHABLE;
   30.51 +			break;
   30.52 +		case 8:
   30.53 +			*type = MTRR_TYPE_WRBACK;
   30.54 +			break;
   30.55 +		case 9:
   30.56 +			*type = MTRR_TYPE_WRCOMB;
   30.57 +			break;
   30.58 +		case 24:
   30.59 +		default:
   30.60 +			*type = MTRR_TYPE_WRTHROUGH;
   30.61 +			break;
   30.62 +		}
   30.63 +	} else {
   30.64 +		switch (rcr) {
   30.65 +		case 0:
   30.66 +			*type = MTRR_TYPE_UNCACHABLE;
   30.67 +			break;
   30.68 +		case 8:
   30.69 +			*type = MTRR_TYPE_WRCOMB;
   30.70 +			break;
   30.71 +		case 9:
   30.72 +			*type = MTRR_TYPE_WRBACK;
   30.73 +			break;
   30.74 +		case 25:
   30.75 +		default:
   30.76 +			*type = MTRR_TYPE_WRTHROUGH;
   30.77 +			break;
   30.78 +		}
   30.79 +	}
   30.80 +}
   30.81 +
   30.82 +static int
   30.83 +cyrix_get_free_region(unsigned long base, unsigned long size)
   30.84 +/*  [SUMMARY] Get a free ARR.
   30.85 +    <base> The starting (base) address of the region.
   30.86 +    <size> The size (in bytes) of the region.
   30.87 +    [RETURNS] The index of the region on success, else -1 on error.
   30.88 +*/
   30.89 +{
   30.90 +	int i;
   30.91 +	mtrr_type ltype;
   30.92 +	unsigned long lbase;
   30.93 +	unsigned int  lsize;
   30.94 +
   30.95 +	/* If we are to set up a region >32M then look at ARR7 immediately */
   30.96 +	if (size > 0x2000) {
   30.97 +		cyrix_get_arr(7, &lbase, &lsize, &ltype);
   30.98 +		if (lsize == 0)
   30.99 +			return 7;
  30.100 +		/*  Else try ARR0-ARR6 first  */
  30.101 +	} else {
  30.102 +		for (i = 0; i < 7; i++) {
  30.103 +			cyrix_get_arr(i, &lbase, &lsize, &ltype);
  30.104 +			if ((i == 3) && arr3_protected)
  30.105 +				continue;
  30.106 +			if (lsize == 0)
  30.107 +				return i;
  30.108 +		}
  30.109 +		/* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
  30.110 +		cyrix_get_arr(i, &lbase, &lsize, &ltype);
  30.111 +		if ((lsize == 0) && (size >= 0x40))
  30.112 +			return i;
  30.113 +	}
  30.114 +	return -ENOSPC;
  30.115 +}
  30.116 +
  30.117 +static u32 cr4 = 0;
  30.118 +static u32 ccr3;
  30.119 +
  30.120 +static void prepare_set(void)
  30.121 +{
  30.122 +	u32 cr0;
  30.123 +
  30.124 +	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
  30.125 +	if ( cpu_has_pge ) {
  30.126 +		cr4 = read_cr4();
  30.127 +		write_cr4(cr4 & (unsigned char) ~(1 << 7));
  30.128 +	}
  30.129 +
  30.130 +	/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
  30.131 +	    a side-effect  */
  30.132 +	cr0 = read_cr0() | 0x40000000;
  30.133 +	wbinvd();
  30.134 +	write_cr0(cr0);
  30.135 +	wbinvd();
  30.136 +
  30.137 +	/* Cyrix ARRs - everything else were excluded at the top */
  30.138 +	ccr3 = getCx86(CX86_CCR3);
  30.139 +
  30.140 +	/* Cyrix ARRs - everything else were excluded at the top */
  30.141 +	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
  30.142 +
  30.143 +}
  30.144 +
  30.145 +static void post_set(void)
  30.146 +{
  30.147 +	/*  Flush caches and TLBs  */
  30.148 +	wbinvd();
  30.149 +
  30.150 +	/* Cyrix ARRs - everything else was excluded at the top */
  30.151 +	setCx86(CX86_CCR3, ccr3);
  30.152 +		
  30.153 +	/*  Enable caches  */
  30.154 +	write_cr0(read_cr0() & 0xbfffffff);
  30.155 +
  30.156 +	/*  Restore value of CR4  */
  30.157 +	if ( cpu_has_pge )
  30.158 +		write_cr4(cr4);
  30.159 +}
  30.160 +
  30.161 +static void cyrix_set_arr(unsigned int reg, unsigned long base,
  30.162 +			  unsigned long size, mtrr_type type)
  30.163 +{
  30.164 +	unsigned char arr, arr_type, arr_size;
  30.165 +
  30.166 +	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
  30.167 +
  30.168 +	/* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
  30.169 +	if (reg >= 7)
  30.170 +		size >>= 6;
  30.171 +
  30.172 +	size &= 0x7fff;		/* make sure arr_size <= 14 */
  30.173 +	for (arr_size = 0; size; arr_size++, size >>= 1) ;
  30.174 +
  30.175 +	if (reg < 7) {
  30.176 +		switch (type) {
  30.177 +		case MTRR_TYPE_UNCACHABLE:
  30.178 +			arr_type = 1;
  30.179 +			break;
  30.180 +		case MTRR_TYPE_WRCOMB:
  30.181 +			arr_type = 9;
  30.182 +			break;
  30.183 +		case MTRR_TYPE_WRTHROUGH:
  30.184 +			arr_type = 24;
  30.185 +			break;
  30.186 +		default:
  30.187 +			arr_type = 8;
  30.188 +			break;
  30.189 +		}
  30.190 +	} else {
  30.191 +		switch (type) {
  30.192 +		case MTRR_TYPE_UNCACHABLE:
  30.193 +			arr_type = 0;
  30.194 +			break;
  30.195 +		case MTRR_TYPE_WRCOMB:
  30.196 +			arr_type = 8;
  30.197 +			break;
  30.198 +		case MTRR_TYPE_WRTHROUGH:
  30.199 +			arr_type = 25;
  30.200 +			break;
  30.201 +		default:
  30.202 +			arr_type = 9;
  30.203 +			break;
  30.204 +		}
  30.205 +	}
  30.206 +
  30.207 +	prepare_set();
  30.208 +
  30.209 +	base <<= PAGE_SHIFT;
  30.210 +	setCx86(arr, ((unsigned char *) &base)[3]);
  30.211 +	setCx86(arr + 1, ((unsigned char *) &base)[2]);
  30.212 +	setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
  30.213 +	setCx86(CX86_RCR_BASE + reg, arr_type);
  30.214 +
  30.215 +	post_set();
  30.216 +}
  30.217 +
  30.218 +typedef struct {
  30.219 +	unsigned long base;
  30.220 +	unsigned int size;
  30.221 +	mtrr_type type;
  30.222 +} arr_state_t;
  30.223 +
  30.224 +arr_state_t arr_state[8] __initdata = {
  30.225 +	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
  30.226 +	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
  30.227 +};
  30.228 +
  30.229 +unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
  30.230 +
  30.231 +static void cyrix_set_all(void)
  30.232 +{
  30.233 +	int i;
  30.234 +
  30.235 +	prepare_set();
  30.236 +
  30.237 +	/* the CCRs are not contiguous */
  30.238 +	for (i = 0; i < 4; i++)
  30.239 +		setCx86(CX86_CCR0 + i, ccr_state[i]);
  30.240 +	for (; i < 7; i++)
  30.241 +		setCx86(CX86_CCR4 + i, ccr_state[i]);
  30.242 +	for (i = 0; i < 8; i++)
  30.243 +		cyrix_set_arr(i, arr_state[i].base, 
  30.244 +			      arr_state[i].size, arr_state[i].type);
  30.245 +
  30.246 +	post_set();
  30.247 +}
  30.248 +
  30.249 +/*
  30.250 + * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
  30.251 + * with the SMM (System Management Mode) mode. So we need the following:
  30.252 + * Check whether SMI_LOCK (CCR3 bit 0) is set
  30.253 + *   if it is set, write a warning message: ARR3 cannot be changed!
  30.254 + *     (it cannot be changed until the next processor reset)
  30.255 + *   if it is reset, then we can change it, set all the needed bits:
  30.256 + *   - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
  30.257 + *   - disable access to SMM memory (CCR1 bit 2 reset)
  30.258 + *   - disable SMM mode (CCR1 bit 1 reset)
  30.259 + *   - disable write protection of ARR3 (CCR6 bit 1 reset)
  30.260 + *   - (maybe) disable ARR3
  30.261 + * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
  30.262 + */
  30.263 +static void __init
  30.264 +cyrix_arr_init(void)
  30.265 +{
  30.266 +	struct set_mtrr_context ctxt;
  30.267 +	unsigned char ccr[7];
  30.268 +	int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
  30.269 +#ifdef CONFIG_SMP
  30.270 +	int i;
  30.271 +#endif
  30.272 +
  30.273 +	/* flush cache and enable MAPEN */
  30.274 +	set_mtrr_prepare_save(&ctxt);
  30.275 +	set_mtrr_cache_disable(&ctxt);
  30.276 +
  30.277 +	/* Save all CCRs locally */
  30.278 +	ccr[0] = getCx86(CX86_CCR0);
  30.279 +	ccr[1] = getCx86(CX86_CCR1);
  30.280 +	ccr[2] = getCx86(CX86_CCR2);
  30.281 +	ccr[3] = ctxt.ccr3;
  30.282 +	ccr[4] = getCx86(CX86_CCR4);
  30.283 +	ccr[5] = getCx86(CX86_CCR5);
  30.284 +	ccr[6] = getCx86(CX86_CCR6);
  30.285 +
  30.286 +	if (ccr[3] & 1) {
  30.287 +		ccrc[3] = 1;
  30.288 +		arr3_protected = 1;
  30.289 +	} else {
  30.290 +		/* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
  30.291 +		 * access to SMM memory through ARR3 (bit 7).
  30.292 +		 */
  30.293 +		if (ccr[1] & 0x80) {
  30.294 +			ccr[1] &= 0x7f;
  30.295 +			ccrc[1] |= 0x80;
  30.296 +		}
  30.297 +		if (ccr[1] & 0x04) {
  30.298 +			ccr[1] &= 0xfb;
  30.299 +			ccrc[1] |= 0x04;
  30.300 +		}
  30.301 +		if (ccr[1] & 0x02) {
  30.302 +			ccr[1] &= 0xfd;
  30.303 +			ccrc[1] |= 0x02;
  30.304 +		}
  30.305 +		arr3_protected = 0;
  30.306 +		if (ccr[6] & 0x02) {
  30.307 +			ccr[6] &= 0xfd;
  30.308 +			ccrc[6] = 1;	/* Disable write protection of ARR3 */
  30.309 +			setCx86(CX86_CCR6, ccr[6]);
  30.310 +		}
  30.311 +		/* Disable ARR3. This is safe now that we disabled SMM. */
  30.312 +		/* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
  30.313 +	}
  30.314 +	/* If we changed CCR1 in memory, change it in the processor, too. */
  30.315 +	if (ccrc[1])
  30.316 +		setCx86(CX86_CCR1, ccr[1]);
  30.317 +
  30.318 +	/* Enable ARR usage by the processor */
  30.319 +	if (!(ccr[5] & 0x20)) {
  30.320 +		ccr[5] |= 0x20;
  30.321 +		ccrc[5] = 1;
  30.322 +		setCx86(CX86_CCR5, ccr[5]);
  30.323 +	}
  30.324 +#ifdef CONFIG_SMP
  30.325 +	for (i = 0; i < 7; i++)
  30.326 +		ccr_state[i] = ccr[i];
  30.327 +	for (i = 0; i < 8; i++)
  30.328 +		cyrix_get_arr(i,
  30.329 +			      &arr_state[i].base, &arr_state[i].size,
  30.330 +			      &arr_state[i].type);
  30.331 +#endif
  30.332 +
  30.333 +	set_mtrr_done(&ctxt);	/* flush cache and disable MAPEN */
  30.334 +
  30.335 +	if (ccrc[5])
  30.336 +		printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
  30.337 +	if (ccrc[3])
  30.338 +		printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
  30.339 +/*
  30.340 +    if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
  30.341 +    if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
  30.342 +    if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
  30.343 +*/
  30.344 +	if (ccrc[6])
  30.345 +		printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
  30.346 +}
  30.347 +
  30.348 +static struct mtrr_ops cyrix_mtrr_ops = {
  30.349 +	.vendor            = X86_VENDOR_CYRIX,
  30.350 +	.init              = cyrix_arr_init,
  30.351 +	.set_all	   = cyrix_set_all,
  30.352 +	.set               = cyrix_set_arr,
  30.353 +	.get               = cyrix_get_arr,
  30.354 +	.get_free_region   = cyrix_get_free_region,
  30.355 +	.validate_add_page = generic_validate_add_page,
  30.356 +	.have_wrcomb       = positive_have_wrcomb,
  30.357 +};
  30.358 +
  30.359 +int __init cyrix_init_mtrr(void)
  30.360 +{
  30.361 +	set_mtrr_ops(&cyrix_mtrr_ops);
  30.362 +	return 0;
  30.363 +}
  30.364 +
  30.365 +//arch_initcall(cyrix_init_mtrr);
    31.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.2 +++ b/xen/arch/x86/mtrr/generic.c	Mon Nov 29 16:09:15 2004 +0000
    31.3 @@ -0,0 +1,394 @@
    31.4 +/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
    31.5 +   because MTRRs can span upto 40 bits (36bits on most modern x86) */ 
    31.6 +#include <xen/init.h>
    31.7 +#include <xen/slab.h>
    31.8 +#include <xen/mm.h>
    31.9 +#include <asm/io.h>
   31.10 +#include <asm/mtrr.h>
   31.11 +#include <asm/msr.h>
   31.12 +#include <asm/system.h>
   31.13 +#include <asm/cpufeature.h>
   31.14 +//#include <asm/tlbflush.h>
   31.15 +#include "mtrr.h"
   31.16 +
   31.17 +struct mtrr_state {
   31.18 +	struct mtrr_var_range *var_ranges;
   31.19 +	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
   31.20 +	unsigned char enabled;
   31.21 +	mtrr_type def_type;
   31.22 +};
   31.23 +
   31.24 +static unsigned long smp_changes_mask;
   31.25 +struct mtrr_state mtrr_state = {};
   31.26 +
   31.27 +
   31.28 +/*  Get the MSR pair relating to a var range  */
   31.29 +static void __init
   31.30 +get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
   31.31 +{
   31.32 +	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
   31.33 +	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
   31.34 +}
   31.35 +
   31.36 +static void __init
   31.37 +get_fixed_ranges(mtrr_type * frs)
   31.38 +{
   31.39 +	unsigned int *p = (unsigned int *) frs;
   31.40 +	int i;
   31.41 +
   31.42 +	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
   31.43 +
   31.44 +	for (i = 0; i < 2; i++)
   31.45 +		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
   31.46 +	for (i = 0; i < 8; i++)
   31.47 +		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
   31.48 +}
   31.49 +
   31.50 +/*  Grab all of the MTRR state for this CPU into *state  */
   31.51 +void __init get_mtrr_state(void)
   31.52 +{
   31.53 +	unsigned int i;
   31.54 +	struct mtrr_var_range *vrs;
   31.55 +	unsigned lo, dummy;
   31.56 +
   31.57 +	if (!mtrr_state.var_ranges) {
   31.58 +		mtrr_state.var_ranges = xmalloc(num_var_ranges * sizeof (struct mtrr_var_range));
   31.59 +		if (!mtrr_state.var_ranges)
   31.60 +			return;
   31.61 +	} 
   31.62 +	vrs = mtrr_state.var_ranges;
   31.63 +
   31.64 +	for (i = 0; i < num_var_ranges; i++)
   31.65 +		get_mtrr_var_range(i, &vrs[i]);
   31.66 +	get_fixed_ranges(mtrr_state.fixed_ranges);
   31.67 +
   31.68 +	rdmsr(MTRRdefType_MSR, lo, dummy);
   31.69 +	mtrr_state.def_type = (lo & 0xff);
   31.70 +	mtrr_state.enabled = (lo & 0xc00) >> 10;
   31.71 +}
   31.72 +
   31.73 +/*  Free resources associated with a struct mtrr_state  */
   31.74 +void __init finalize_mtrr_state(void)
   31.75 +{
   31.76 +	if (mtrr_state.var_ranges)
   31.77 +		xfree(mtrr_state.var_ranges);
   31.78 +	mtrr_state.var_ranges = NULL;
   31.79 +}
   31.80 +
   31.81 +/*  Some BIOS's are fucked and don't set all MTRRs the same!  */
   31.82 +void __init mtrr_state_warn(void)
   31.83 +{
   31.84 +	unsigned long mask = smp_changes_mask;
   31.85 +
   31.86 +	if (!mask)
   31.87 +		return;
   31.88 +	if (mask & MTRR_CHANGE_MASK_FIXED)
   31.89 +		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
   31.90 +	if (mask & MTRR_CHANGE_MASK_VARIABLE)
   31.91 +		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
   31.92 +	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
   31.93 +		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
   31.94 +	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
   31.95 +	printk(KERN_INFO "mtrr: corrected configuration.\n");
   31.96 +}
   31.97 +
   31.98 +
   31.99 +int generic_get_free_region(unsigned long base, unsigned long size)
  31.100 +/*  [SUMMARY] Get a free MTRR.
  31.101 +    <base> The starting (base) address of the region.
  31.102 +    <size> The size (in bytes) of the region.
  31.103 +    [RETURNS] The index of the region on success, else -1 on error.
  31.104 +*/
  31.105 +{
  31.106 +	int i, max;
  31.107 +	mtrr_type ltype;
  31.108 +	unsigned long lbase;
  31.109 +	unsigned lsize;
  31.110 +
  31.111 +	max = num_var_ranges;
  31.112 +	for (i = 0; i < max; ++i) {
  31.113 +		mtrr_if->get(i, &lbase, &lsize, &ltype);
  31.114 +		if (lsize == 0)
  31.115 +			return i;
  31.116 +	}
  31.117 +	return -ENOSPC;
  31.118 +}
  31.119 +
  31.120 +void generic_get_mtrr(unsigned int reg, unsigned long *base,
  31.121 +		      unsigned int *size, mtrr_type * type)
  31.122 +{
  31.123 +	unsigned int mask_lo, mask_hi, base_lo, base_hi;
  31.124 +
  31.125 +	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
  31.126 +	if ((mask_lo & 0x800) == 0) {
  31.127 +		/*  Invalid (i.e. free) range  */
  31.128 +		*base = 0;
  31.129 +		*size = 0;
  31.130 +		*type = 0;
  31.131 +		return;
  31.132 +	}
  31.133 +
  31.134 +	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
  31.135 +
  31.136 +	/* Work out the shifted address mask. */
  31.137 +	mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
  31.138 +	    | mask_lo >> PAGE_SHIFT;
  31.139 +
  31.140 +	/* This works correctly if size is a power of two, i.e. a
  31.141 +	   contiguous range. */
  31.142 +	*size = -mask_lo;
  31.143 +	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
  31.144 +	*type = base_lo & 0xff;
  31.145 +}
  31.146 +
  31.147 +static int set_fixed_ranges(mtrr_type * frs)
  31.148 +{
  31.149 +	unsigned int *p = (unsigned int *) frs;
  31.150 +	int changed = FALSE;
  31.151 +	int i;
  31.152 +	unsigned int lo, hi;
  31.153 +
  31.154 +	rdmsr(MTRRfix64K_00000_MSR, lo, hi);
  31.155 +	if (p[0] != lo || p[1] != hi) {
  31.156 +		wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
  31.157 +		changed = TRUE;
  31.158 +	}
  31.159 +
  31.160 +	for (i = 0; i < 2; i++) {
  31.161 +		rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
  31.162 +		if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
  31.163 +			wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
  31.164 +			      p[3 + i * 2]);
  31.165 +			changed = TRUE;
  31.166 +		}
  31.167 +	}
  31.168 +
  31.169 +	for (i = 0; i < 8; i++) {
  31.170 +		rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
  31.171 +		if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
  31.172 +			wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
  31.173 +			      p[7 + i * 2]);
  31.174 +			changed = TRUE;
  31.175 +		}
  31.176 +	}
  31.177 +	return changed;
  31.178 +}
  31.179 +
  31.180 +/*  Set the MSR pair relating to a var range. Returns TRUE if
  31.181 +    changes are made  */
  31.182 +static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
  31.183 +{
  31.184 +	unsigned int lo, hi;
  31.185 +	int changed = FALSE;
  31.186 +
  31.187 +	rdmsr(MTRRphysBase_MSR(index), lo, hi);
  31.188 +	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
  31.189 +	    || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
  31.190 +		wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
  31.191 +		changed = TRUE;
  31.192 +	}
  31.193 +
  31.194 +	rdmsr(MTRRphysMask_MSR(index), lo, hi);
  31.195 +
  31.196 +	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
  31.197 +	    || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
  31.198 +		wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
  31.199 +		changed = TRUE;
  31.200 +	}
  31.201 +	return changed;
  31.202 +}
  31.203 +
  31.204 +static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
  31.205 +/*  [SUMMARY] Set the MTRR state for this CPU.
  31.206 +    <state> The MTRR state information to read.
  31.207 +    <ctxt> Some relevant CPU context.
  31.208 +    [NOTE] The CPU must already be in a safe state for MTRR changes.
  31.209 +    [RETURNS] 0 if no changes made, else a mask indication what was changed.
  31.210 +*/
  31.211 +{
  31.212 +	unsigned int i;
  31.213 +	unsigned long change_mask = 0;
  31.214 +
  31.215 +	for (i = 0; i < num_var_ranges; i++)
  31.216 +		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
  31.217 +			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
  31.218 +
  31.219 +	if (set_fixed_ranges(mtrr_state.fixed_ranges))
  31.220 +		change_mask |= MTRR_CHANGE_MASK_FIXED;
  31.221 +
  31.222 +	/*  Set_mtrr_restore restores the old value of MTRRdefType,
  31.223 +	   so to set it we fiddle with the saved value  */
  31.224 +	if ((deftype_lo & 0xff) != mtrr_state.def_type
  31.225 +	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
  31.226 +		deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
  31.227 +		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
  31.228 +	}
  31.229 +
  31.230 +	return change_mask;
  31.231 +}
  31.232 +
  31.233 +
  31.234 +static unsigned long cr4 = 0;
  31.235 +static u32 deftype_lo, deftype_hi;
  31.236 +static spinlock_t set_atomicity_lock = SPIN_LOCK_UNLOCKED;
  31.237 +
  31.238 +static void prepare_set(void)
  31.239 +{
  31.240 +	unsigned long cr0;
  31.241 +
  31.242 +	/*  Note that this is not ideal, since the cache is only flushed/disabled
  31.243 +	   for this CPU while the MTRRs are changed, but changing this requires
  31.244 +	   more invasive changes to the way the kernel boots  */
  31.245 +	spin_lock(&set_atomicity_lock);
  31.246 +
  31.247 +	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
  31.248 +	cr0 = read_cr0() | 0x40000000;	/* set CD flag */
  31.249 +	wbinvd();
  31.250 +	write_cr0(cr0);
  31.251 +	wbinvd();
  31.252 +
  31.253 +	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
  31.254 +	if ( cpu_has_pge ) {
  31.255 +		cr4 = read_cr4();
  31.256 +		write_cr4(cr4 & (unsigned char) ~(1 << 7));
  31.257 +	}
  31.258 +
  31.259 +	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
  31.260 +	__flush_tlb();
  31.261 +
  31.262 +	/*  Save MTRR state */
  31.263 +	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
  31.264 +
  31.265 +	/*  Disable MTRRs, and set the default type to uncached  */
  31.266 +	wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
  31.267 +}
  31.268 +
  31.269 +static void post_set(void)
  31.270 +{
  31.271 +	/*  Flush caches and TLBs  */
  31.272 +	wbinvd();
  31.273 +	__flush_tlb();
  31.274 +
  31.275 +	/* Intel (P6) standard MTRRs */
  31.276 +	wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
  31.277 +		
  31.278 +	/*  Enable caches  */
  31.279 +	write_cr0(read_cr0() & 0xbfffffff);
  31.280 +
  31.281 +	/*  Restore value of CR4  */
  31.282 +	if ( cpu_has_pge )
  31.283 +		write_cr4(cr4);
  31.284 +	spin_unlock(&set_atomicity_lock);
  31.285 +}
  31.286 +
  31.287 +static void generic_set_all(void)
  31.288 +{
  31.289 +	unsigned long mask, count;
  31.290 +
  31.291 +	prepare_set();
  31.292 +
  31.293 +	/* Actually set the state */
  31.294 +	mask = set_mtrr_state(deftype_lo,deftype_hi);
  31.295 +
  31.296 +	post_set();
  31.297 +
  31.298 +	/*  Use the atomic bitops to update the global mask  */
  31.299 +	for (count = 0; count < sizeof mask * 8; ++count) {
  31.300 +		if (mask & 0x01)
  31.301 +			set_bit(count, &smp_changes_mask);
  31.302 +		mask >>= 1;
  31.303 +	}
  31.304 +	
  31.305 +}
  31.306 +
  31.307 +static void generic_set_mtrr(unsigned int reg, unsigned long base,
  31.308 +			     unsigned long size, mtrr_type type)
  31.309 +/*  [SUMMARY] Set variable MTRR register on the local CPU.
  31.310 +    <reg> The register to set.
  31.311 +    <base> The base address of the region.
  31.312 +    <size> The size of the region. If this is 0 the region is disabled.
  31.313 +    <type> The type of the region.
  31.314 +    <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
  31.315 +    be done externally.
  31.316 +    [RETURNS] Nothing.
  31.317 +*/
  31.318 +{
  31.319 +	prepare_set();
  31.320 +
  31.321 +	if (size == 0) {
  31.322 +		/* The invalid bit is kept in the mask, so we simply clear the
  31.323 +		   relevant mask register to disable a range. */
  31.324 +		wrmsr(MTRRphysMask_MSR(reg), 0, 0);
  31.325 +	} else {
  31.326 +		wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
  31.327 +		      (base & size_and_mask) >> (32 - PAGE_SHIFT));
  31.328 +		wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
  31.329 +		      (-size & size_and_mask) >> (32 - PAGE_SHIFT));
  31.330 +	}
  31.331 +
  31.332 +	post_set();
  31.333 +}
  31.334 +
  31.335 +int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
  31.336 +{
  31.337 +	unsigned long lbase, last;
  31.338 +
  31.339 +	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned 
  31.340 +	    and not touch 0x70000000->0x7003FFFF */
  31.341 +	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
  31.342 +	    boot_cpu_data.x86_model == 1 &&
  31.343 +	    boot_cpu_data.x86_mask <= 7) {
  31.344 +		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
  31.345 +			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
  31.346 +			return -EINVAL;
  31.347 +		}
  31.348 +		if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
  31.349 +		    (type == MTRR_TYPE_WRCOMB
  31.350 +		     || type == MTRR_TYPE_WRBACK)) {
  31.351 +			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
  31.352 +			return -EINVAL;
  31.353 +		}
  31.354 +	}
  31.355 +
  31.356 +	if (base + size < 0x100) {
  31.357 +		printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
  31.358 +		       base, size);
  31.359 +		return -EINVAL;
  31.360 +	}
  31.361 +	/*  Check upper bits of base and last are equal and lower bits are 0
  31.362 +	    for base and 1 for last  */
  31.363 +	last = base + size - 1;
  31.364 +	for (lbase = base; !(lbase & 1) && (last & 1);
  31.365 +	     lbase = lbase >> 1, last = last >> 1) ;
  31.366 +	if (lbase != last) {
  31.367 +		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
  31.368 +		       base, size);
  31.369 +		return -EINVAL;
  31.370 +	}
  31.371 +	return 0;
  31.372 +}
  31.373 +
  31.374 +
  31.375 +int generic_have_wrcomb(void)
  31.376 +{
  31.377 +	unsigned long config, dummy;
  31.378 +	rdmsr(MTRRcap_MSR, config, dummy);
  31.379 +	return (config & (1 << 10));
  31.380 +}
  31.381 +
  31.382 +int positive_have_wrcomb(void)
  31.383 +{
  31.384 +	return 1;
  31.385 +}
  31.386 +
  31.387 +/* generic structure...
  31.388 + */
  31.389 +struct mtrr_ops generic_mtrr_ops = {
  31.390 +	.use_intel_if      = 1,
  31.391 +	.set_all	   = generic_set_all,
  31.392 +	.get               = generic_get_mtrr,
  31.393 +	.get_free_region   = generic_get_free_region,
  31.394 +	.set               = generic_set_mtrr,
  31.395 +	.validate_add_page = generic_validate_add_page,
  31.396 +	.have_wrcomb       = generic_have_wrcomb,
  31.397 +};
    32.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.2 +++ b/xen/arch/x86/mtrr/main.c	Mon Nov 29 16:09:15 2004 +0000
    32.3 @@ -0,0 +1,674 @@
    32.4 +/*  Generic MTRR (Memory Type Range Register) driver.
    32.5 +
    32.6 +    Copyright (C) 1997-2000  Richard Gooch
    32.7 +    Copyright (c) 2002	     Patrick Mochel
    32.8 +
    32.9 +    This library is free software; you can redistribute it and/or
   32.10 +    modify it under the terms of the GNU Library General Public
   32.11 +    License as published by the Free Software Foundation; either
   32.12 +    version 2 of the License, or (at your option) any later version.
   32.13 +
   32.14 +    This library is distributed in the hope that it will be useful,
   32.15 +    but WITHOUT ANY WARRANTY; without even the implied warranty of
   32.16 +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   32.17 +    Library General Public License for more details.
   32.18 +
   32.19 +    You should have received a copy of the GNU Library General Public
   32.20 +    License along with this library; if not, write to the Free
   32.21 +    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
   32.22 +
   32.23 +    Richard Gooch may be reached by email at  rgooch@atnf.csiro.au
   32.24 +    The postal address is:
   32.25 +      Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
   32.26 +
   32.27 +    Source: "Pentium Pro Family Developer's Manual, Volume 3:
   32.28 +    Operating System Writer's Guide" (Intel document number 242692),
   32.29 +    section 11.11.7
   32.30 +
   32.31 +    This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> 
   32.32 +    on 6-7 March 2002. 
   32.33 +    Source: Intel Architecture Software Developers Manual, Volume 3: 
   32.34 +    System Programming Guide; Section 9.11. (1997 edition - PPro).
   32.35 +*/
   32.36 +
   32.37 +#include <xen/config.h>
   32.38 +#include <xen/init.h>
   32.39 +#include <xen/pci.h>
   32.40 +#include <xen/smp.h>
   32.41 +#include <asm/mtrr.h>
   32.42 +#include <asm/uaccess.h>
   32.43 +#include <asm/processor.h>
   32.44 +#include <asm/msr.h>
   32.45 +#include "mtrr.h"
   32.46 +
   32.47 +#define MTRR_VERSION            "2.0 (20020519)"
   32.48 +
   32.49 +/* No blocking mutexes in Xen. Spin instead. */
   32.50 +#define DECLARE_MUTEX(_m) spinlock_t _m = SPIN_LOCK_UNLOCKED
   32.51 +#define down(_m) spin_lock(_m)
   32.52 +#define up(_m) spin_unlock(_m)
   32.53 +
   32.54 +#define num_booting_cpus() smp_num_cpus
   32.55 +
   32.56 +u32 num_var_ranges = 0;
   32.57 +
   32.58 +unsigned int *usage_table;
   32.59 +static DECLARE_MUTEX(main_lock);
   32.60 +
   32.61 +u32 size_or_mask, size_and_mask;
   32.62 +
   32.63 +static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
   32.64 +
   32.65 +struct mtrr_ops * mtrr_if = NULL;
   32.66 +
   32.67 +__initdata char *mtrr_if_name[] = {
   32.68 +    "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR"
   32.69 +};
   32.70 +
   32.71 +static void set_mtrr(unsigned int reg, unsigned long base,
   32.72 +		     unsigned long size, mtrr_type type);
   32.73 +
   32.74 +extern int arr3_protected;
   32.75 +
   32.76 +static char *mtrr_strings[MTRR_NUM_TYPES] =
   32.77 +{
   32.78 +    "uncachable",               /* 0 */
   32.79 +    "write-combining",          /* 1 */
   32.80 +    "?",                        /* 2 */
   32.81 +    "?",                        /* 3 */
   32.82 +    "write-through",            /* 4 */
   32.83 +    "write-protect",            /* 5 */
   32.84 +    "write-back",               /* 6 */
   32.85 +};
   32.86 +
   32.87 +char *mtrr_attrib_to_str(int x)
   32.88 +{
   32.89 +	return (x <= 6) ? mtrr_strings[x] : "?";
   32.90 +}
   32.91 +
   32.92 +void set_mtrr_ops(struct mtrr_ops * ops)
   32.93 +{
   32.94 +	if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
   32.95 +		mtrr_ops[ops->vendor] = ops;
   32.96 +}
   32.97 +
   32.98 +/*  Returns non-zero if we have the write-combining memory type  */
   32.99 +static int have_wrcomb(void)
  32.100 +{
  32.101 +	struct pci_dev *dev;
  32.102 +	
  32.103 +	if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
  32.104 +		/* ServerWorks LE chipsets have problems with write-combining 
  32.105 +		   Don't allow it and leave room for other chipsets to be tagged */
  32.106 +		if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
  32.107 +		    dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
  32.108 +			printk(KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n");
  32.109 +			return 0;
  32.110 +		}
  32.111 +		/* Intel 450NX errata # 23. Non ascending cachline evictions to
  32.112 +		   write combining memory may resulting in data corruption */
  32.113 +		if (dev->vendor == PCI_VENDOR_ID_INTEL &&
  32.114 +		    dev->device == PCI_DEVICE_ID_INTEL_82451NX)
  32.115 +		{
  32.116 +			printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
  32.117 +			return 0;
  32.118 +		}
  32.119 +	}		
  32.120 +	return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
  32.121 +}
  32.122 +
  32.123 +/*  This function returns the number of variable MTRRs  */
  32.124 +void __init set_num_var_ranges(void)
  32.125 +{
  32.126 +	unsigned long config = 0, dummy;
  32.127 +
  32.128 +	if (use_intel()) {
  32.129 +		rdmsr(MTRRcap_MSR, config, dummy);
  32.130 +	} else if (is_cpu(AMD))
  32.131 +		config = 2;
  32.132 +	else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
  32.133 +		config = 8;
  32.134 +	num_var_ranges = config & 0xff;
  32.135 +}
  32.136 +
  32.137 +static void __init init_table(void)
  32.138 +{
  32.139 +	int i, max;
  32.140 +
  32.141 +	max = num_var_ranges;
  32.142 +	if ((usage_table = xmalloc(max * sizeof *usage_table))
  32.143 +	    == NULL) {
  32.144 +		printk(KERN_ERR "mtrr: could not allocate\n");
  32.145 +		return;
  32.146 +	}
  32.147 +	for (i = 0; i < max; i++)
  32.148 +		usage_table[i] = 1;
  32.149 +}
  32.150 +
  32.151 +struct set_mtrr_data {
  32.152 +	atomic_t	count;
  32.153 +	atomic_t	gate;
  32.154 +	unsigned long	smp_base;
  32.155 +	unsigned long	smp_size;
  32.156 +	unsigned int	smp_reg;
  32.157 +	mtrr_type	smp_type;
  32.158 +};
  32.159 +
  32.160 +#ifdef CONFIG_SMP
  32.161 +
  32.162 +static void ipi_handler(void *info)
  32.163 +/*  [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
  32.164 +    [RETURNS] Nothing.
  32.165 +*/
  32.166 +{
  32.167 +	struct set_mtrr_data *data = info;
  32.168 +	unsigned long flags;
  32.169 +
  32.170 +	local_irq_save(flags);
  32.171 +
  32.172 +	atomic_dec(&data->count);
  32.173 +	while(!atomic_read(&data->gate)) {
  32.174 +		cpu_relax();
  32.175 +		barrier();
  32.176 +	}
  32.177 +
  32.178 +	/*  The master has cleared me to execute  */
  32.179 +	if (data->smp_reg != ~0U) 
  32.180 +		mtrr_if->set(data->smp_reg, data->smp_base, 
  32.181 +			     data->smp_size, data->smp_type);
  32.182 +	else
  32.183 +		mtrr_if->set_all();
  32.184 +
  32.185 +	atomic_dec(&data->count);
  32.186 +	while(atomic_read(&data->gate)) {
  32.187 +		cpu_relax();
  32.188 +		barrier();
  32.189 +	}
  32.190 +	atomic_dec(&data->count);
  32.191 +	local_irq_restore(flags);
  32.192 +}
  32.193 +
  32.194 +#endif
  32.195 +
  32.196 +/**
  32.197 + * set_mtrr - update mtrrs on all processors
  32.198 + * @reg:	mtrr in question
  32.199 + * @base:	mtrr base
  32.200 + * @size:	mtrr size
  32.201 + * @type:	mtrr type
  32.202 + *
  32.203 + * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
  32.204 + * 
  32.205 + * 1. Send IPI to do the following:
  32.206 + * 2. Disable Interrupts
  32.207 + * 3. Wait for all procs to do so 
  32.208 + * 4. Enter no-fill cache mode
  32.209 + * 5. Flush caches
  32.210 + * 6. Clear PGE bit
  32.211 + * 7. Flush all TLBs
  32.212 + * 8. Disable all range registers
  32.213 + * 9. Update the MTRRs
  32.214 + * 10. Enable all range registers
  32.215 + * 11. Flush all TLBs and caches again
  32.216 + * 12. Enter normal cache mode and reenable caching
  32.217 + * 13. Set PGE 
  32.218 + * 14. Wait for buddies to catch up
  32.219 + * 15. Enable interrupts.
  32.220 + * 
  32.221 + * What does that mean for us? Well, first we set data.count to the number
  32.222 + * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
  32.223 + * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
  32.224 + * Meanwhile, they are waiting for that flag to be set. Once it's set, each 
  32.225 + * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it 
  32.226 + * differently, so we call mtrr_if->set() callback and let them take care of it.
  32.227 + * When they're done, they again decrement data->count and wait for data.gate to 
  32.228 + * be reset. 
  32.229 + * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
  32.230 + * Everyone then enables interrupts and we all continue on.
  32.231 + *
  32.232 + * Note that the mechanism is the same for UP systems, too; all the SMP stuff
  32.233 + * becomes nops.
  32.234 + */
  32.235 +static void set_mtrr(unsigned int reg, unsigned long base,
  32.236 +		     unsigned long size, mtrr_type type)
  32.237 +{
  32.238 +	struct set_mtrr_data data;
  32.239 +	unsigned long flags;
  32.240 +
  32.241 +	data.smp_reg = reg;
  32.242 +	data.smp_base = base;
  32.243 +	data.smp_size = size;
  32.244 +	data.smp_type = type;
  32.245 +	atomic_set(&data.count, num_booting_cpus() - 1);
  32.246 +	atomic_set(&data.gate,0);
  32.247 +
  32.248 +	/*  Start the ball rolling on other CPUs  */
  32.249 +	if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
  32.250 +		panic("mtrr: timed out waiting for other CPUs\n");
  32.251 +
  32.252 +	local_irq_save(flags);
  32.253 +
  32.254 +	while(atomic_read(&data.count)) {
  32.255 +		cpu_relax();
  32.256 +		barrier();
  32.257 +	}
  32.258 +	/* ok, reset count and toggle gate */
  32.259 +	atomic_set(&data.count, num_booting_cpus() - 1);
  32.260 +	atomic_set(&data.gate,1);
  32.261 +
  32.262 +	/* do our MTRR business */
  32.263 +
  32.264 +	/* HACK!
  32.265 +	 * We use this same function to initialize the mtrrs on boot.
  32.266 +	 * The state of the boot cpu's mtrrs has been saved, and we want
  32.267 +	 * to replicate across all the APs. 
  32.268 +	 * If we're doing that @reg is set to something special...
  32.269 +	 */
  32.270 +	if (reg != ~0U) 
  32.271 +		mtrr_if->set(reg,base,size,type);
  32.272 +
  32.273 +	/* wait for the others */
  32.274 +	while(atomic_read(&data.count)) {
  32.275 +		cpu_relax();
  32.276 +		barrier();
  32.277 +	}
  32.278 +	atomic_set(&data.count, num_booting_cpus() - 1);
  32.279 +	atomic_set(&data.gate,0);
  32.280 +
  32.281 +	/*
  32.282 +	 * Wait here for everyone to have seen the gate change
  32.283 +	 * So we're the last ones to touch 'data'
  32.284 +	 */
  32.285 +	while(atomic_read(&data.count)) {
  32.286 +		cpu_relax();
  32.287 +		barrier();
  32.288 +	}
  32.289 +	local_irq_restore(flags);
  32.290 +}
  32.291 +
  32.292 +/**
  32.293 + *	mtrr_add_page - Add a memory type region
  32.294 + *	@base: Physical base address of region in pages (4 KB)
  32.295 + *	@size: Physical size of region in pages (4 KB)
  32.296 + *	@type: Type of MTRR desired
  32.297 + *	@increment: If this is true do usage counting on the region
  32.298 + *
  32.299 + *	Memory type region registers control the caching on newer Intel and
  32.300 + *	non Intel processors. This function allows drivers to request an
  32.301 + *	MTRR is added. The details and hardware specifics of each processor's
  32.302 + *	implementation are hidden from the caller, but nevertheless the 
  32.303 + *	caller should expect to need to provide a power of two size on an
  32.304 + *	equivalent power of two boundary.
  32.305 + *
  32.306 + *	If the region cannot be added either because all regions are in use
  32.307 + *	or the CPU cannot support it a negative value is returned. On success
  32.308 + *	the register number for this entry is returned, but should be treated
  32.309 + *	as a cookie only.
  32.310 + *
  32.311 + *	On a multiprocessor machine the changes are made to all processors.
  32.312 + *	This is required on x86 by the Intel processors.
  32.313 + *
  32.314 + *	The available types are
  32.315 + *
  32.316 + *	%MTRR_TYPE_UNCACHABLE	-	No caching
  32.317 + *
  32.318 + *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
  32.319 + *
  32.320 + *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
  32.321 + *
  32.322 + *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
  32.323 + *
  32.324 + *	BUGS: Needs a quiet flag for the cases where drivers do not mind
  32.325 + *	failures and do not wish system log messages to be sent.
  32.326 + */
  32.327 +
  32.328 +int mtrr_add_page(unsigned long base, unsigned long size, 
  32.329 +		  unsigned int type, char increment)
  32.330 +{
  32.331 +	int i;
  32.332 +	mtrr_type ltype;
  32.333 +	unsigned long lbase;
  32.334 +	unsigned int lsize;
  32.335 +	int error;
  32.336 +
  32.337 +	if (!mtrr_if)
  32.338 +		return -ENXIO;
  32.339 +		
  32.340 +	if ((error = mtrr_if->validate_add_page(base,size,type)))
  32.341 +		return error;
  32.342 +
  32.343 +	if (type >= MTRR_NUM_TYPES) {
  32.344 +		printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
  32.345 +		return -EINVAL;
  32.346 +	}
  32.347 +
  32.348 +	/*  If the type is WC, check that this processor supports it  */
  32.349 +	if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
  32.350 +		printk(KERN_WARNING
  32.351 +		       "mtrr: your processor doesn't support write-combining\n");
  32.352 +		return -ENOSYS;
  32.353 +	}
  32.354 +
  32.355 +	if (base & size_or_mask || size & size_or_mask) {
  32.356 +		printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
  32.357 +		return -EINVAL;
  32.358 +	}
  32.359 +
  32.360 +	error = -EINVAL;
  32.361 +
  32.362 +	/*  Search for existing MTRR  */
  32.363 +	down(&main_lock);
  32.364 +	for (i = 0; i < num_var_ranges; ++i) {
  32.365 +		mtrr_if->get(i, &lbase, &lsize, &ltype);
  32.366 +		if (base >= lbase + lsize)
  32.367 +			continue;
  32.368 +		if ((base < lbase) && (base + size <= lbase))
  32.369 +			continue;
  32.370 +		/*  At this point we know there is some kind of overlap/enclosure  */
  32.371 +		if ((base < lbase) || (base + size > lbase + lsize)) {
  32.372 +			printk(KERN_WARNING
  32.373 +			       "mtrr: 0x%lx000,0x%lx000 overlaps existing"
  32.374 +			       " 0x%lx000,0x%x000\n", base, size, lbase,
  32.375 +			       lsize);
  32.376 +			goto out;
  32.377 +		}
  32.378 +		/*  New region is enclosed by an existing region  */
  32.379 +		if (ltype != type) {
  32.380 +			if (type == MTRR_TYPE_UNCACHABLE)
  32.381 +				continue;
  32.382 +			printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
  32.383 +			     base, size, mtrr_attrib_to_str(ltype),
  32.384 +			     mtrr_attrib_to_str(type));
  32.385 +			goto out;
  32.386 +		}
  32.387 +		if (increment)
  32.388 +			++usage_table[i];
  32.389 +		error = i;
  32.390 +		goto out;
  32.391 +	}
  32.392 +	/*  Search for an empty MTRR  */
  32.393 +	i = mtrr_if->get_free_region(base, size);
  32.394 +	if (i >= 0) {
  32.395 +		set_mtrr(i, base, size, type);
  32.396 +		usage_table[i] = 1;
  32.397 +	} else
  32.398 +		printk(KERN_INFO "mtrr: no more MTRRs available\n");
  32.399 +	error = i;
  32.400 + out:
  32.401 +	up(&main_lock);
  32.402 +	return error;
  32.403 +}
  32.404 +
  32.405 +/**
  32.406 + *	mtrr_add - Add a memory type region
  32.407 + *	@base: Physical base address of region
  32.408 + *	@size: Physical size of region
  32.409 + *	@type: Type of MTRR desired
  32.410 + *	@increment: If this is true do usage counting on the region
  32.411 + *
  32.412 + *	Memory type region registers control the caching on newer Intel and
  32.413 + *	non Intel processors. This function allows drivers to request an
  32.414 + *	MTRR is added. The details and hardware specifics of each processor's
  32.415 + *	implementation are hidden from the caller, but nevertheless the 
  32.416 + *	caller should expect to need to provide a power of two size on an
  32.417 + *	equivalent power of two boundary.
  32.418 + *
  32.419 + *	If the region cannot be added either because all regions are in use
  32.420 + *	or the CPU cannot support it a negative value is returned. On success
  32.421 + *	the register number for this entry is returned, but should be treated
  32.422 + *	as a cookie only.
  32.423 + *
  32.424 + *	On a multiprocessor machine the changes are made to all processors.
  32.425 + *	This is required on x86 by the Intel processors.
  32.426 + *
  32.427 + *	The available types are
  32.428 + *
  32.429 + *	%MTRR_TYPE_UNCACHABLE	-	No caching
  32.430 + *
  32.431 + *	%MTRR_TYPE_WRBACK	-	Write data back in bursts whenever
  32.432 + *
  32.433 + *	%MTRR_TYPE_WRCOMB	-	Write data back soon but allow bursts
  32.434 + *
  32.435 + *	%MTRR_TYPE_WRTHROUGH	-	Cache reads but not writes
  32.436 + *
  32.437 + *	BUGS: Needs a quiet flag for the cases where drivers do not mind
  32.438 + *	failures and do not wish system log messages to be sent.
  32.439 + */
  32.440 +
  32.441 +int
  32.442 +mtrr_add(unsigned long base, unsigned long size, unsigned int type,
  32.443 +	 char increment)
  32.444 +{
  32.445 +	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
  32.446 +		printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
  32.447 +		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
  32.448 +		return -EINVAL;
  32.449 +	}
  32.450 +	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
  32.451 +			     increment);
  32.452 +}
  32.453 +
  32.454 +/**
  32.455 + *	mtrr_del_page - delete a memory type region
  32.456 + *	@reg: Register returned by mtrr_add
  32.457 + *	@base: Physical base address
  32.458 + *	@size: Size of region
  32.459 + *
  32.460 + *	If register is supplied then base and size are ignored. This is
  32.461 + *	how drivers should call it.
  32.462 + *
  32.463 + *	Releases an MTRR region. If the usage count drops to zero the 
  32.464 + *	register is freed and the region returns to default state.
  32.465 + *	On success the register is returned, on failure a negative error
  32.466 + *	code.
  32.467 + */
  32.468 +
  32.469 +int mtrr_del_page(int reg, unsigned long base, unsigned long size)
  32.470 +{
  32.471 +	int i, max;
  32.472 +	mtrr_type ltype;
  32.473 +	unsigned long lbase;
  32.474 +	unsigned int lsize;
  32.475 +	int error = -EINVAL;
  32.476 +
  32.477 +	if (!mtrr_if)
  32.478 +		return -ENXIO;
  32.479 +
  32.480 +	max = num_var_ranges;
  32.481 +	down(&main_lock);
  32.482 +	if (reg < 0) {
  32.483 +		/*  Search for existing MTRR  */
  32.484 +		for (i = 0; i < max; ++i) {
  32.485 +			mtrr_if->get(i, &lbase, &lsize, &ltype);
  32.486 +			if (lbase == base && lsize == size) {
  32.487 +				reg = i;
  32.488 +				break;
  32.489 +			}
  32.490 +		}
  32.491 +		if (reg < 0) {
  32.492 +			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
  32.493 +			       size);
  32.494 +			goto out;
  32.495 +		}
  32.496 +	}
  32.497 +	if (reg >= max) {
  32.498 +		printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
  32.499 +		goto out;
  32.500 +	}
  32.501 +	if (is_cpu(CYRIX) && !use_intel()) {
  32.502 +		if ((reg == 3) && arr3_protected) {
  32.503 +			printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
  32.504 +			goto out;
  32.505 +		}
  32.506 +	}
  32.507 +	mtrr_if->get(reg, &lbase, &lsize, &ltype);
  32.508 +	if (lsize < 1) {
  32.509 +		printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
  32.510 +		goto out;
  32.511 +	}
  32.512 +	if (usage_table[reg] < 1) {
  32.513 +		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
  32.514 +		goto out;
  32.515 +	}
  32.516 +	if (--usage_table[reg] < 1)
  32.517 +		set_mtrr(reg, 0, 0, 0);
  32.518 +	error = reg;
  32.519 + out:
  32.520 +	up(&main_lock);
  32.521 +	return error;
  32.522 +}
  32.523 +/**
  32.524 + *	mtrr_del - delete a memory type region
  32.525 + *	@reg: Register returned by mtrr_add
  32.526 + *	@base: Physical base address
  32.527 + *	@size: Size of region
  32.528 + *
  32.529 + *	If register is supplied then base and size are ignored. This is
  32.530 + *	how drivers should call it.
  32.531 + *
  32.532 + *	Releases an MTRR region. If the usage count drops to zero the 
  32.533 + *	register is freed and the region returns to default state.
  32.534 + *	On success the register is returned, on failure a negative error
  32.535 + *	code.
  32.536 + */
  32.537 +
  32.538 +int
  32.539 +mtrr_del(int reg, unsigned long base, unsigned long size)
  32.540 +{
  32.541 +	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
  32.542 +		printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
  32.543 +		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
  32.544 +		return -EINVAL;
  32.545 +	}
  32.546 +	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
  32.547 +}
  32.548 +
  32.549 +EXPORT_SYMBOL(mtrr_add);
  32.550 +EXPORT_SYMBOL(mtrr_del);
  32.551 +
  32.552 +/* HACK ALERT!
  32.553 + * These should be called implicitly, but we can't yet until all the initcall
  32.554 + * stuff is done...
  32.555 + */
  32.556 +extern void amd_init_mtrr(void);
  32.557 +extern void cyrix_init_mtrr(void);
  32.558 +extern void centaur_init_mtrr(void);
  32.559 +
  32.560 +static void __init init_ifs(void)
  32.561 +{
  32.562 +	amd_init_mtrr();
  32.563 +	cyrix_init_mtrr();
  32.564 +	centaur_init_mtrr();
  32.565 +}
  32.566 +
  32.567 +static void __init init_other_cpus(void)
  32.568 +{
  32.569 +	if (use_intel())
  32.570 +		get_mtrr_state();
  32.571 +
  32.572 +	/* bring up the other processors */
  32.573 +	set_mtrr(~0U,0,0,0);
  32.574 +
  32.575 +	if (use_intel()) {
  32.576 +		finalize_mtrr_state();
  32.577 +		mtrr_state_warn();
  32.578 +	}
  32.579 +}
  32.580 +
  32.581 +
  32.582 +struct mtrr_value {
  32.583 +	mtrr_type	ltype;
  32.584 +	unsigned long	lbase;
  32.585 +	unsigned int	lsize;
  32.586 +};
  32.587 +
  32.588 +/**
  32.589 + * mtrr_init - initialize mtrrs on the boot CPU
  32.590 + *
  32.591 + * This needs to be called early; before any of the other CPUs are 
  32.592 + * initialized (i.e. before smp_init()).
  32.593 + * 
  32.594 + */
  32.595 +static int __init mtrr_init(void)
  32.596 +{
  32.597 +	init_ifs();
  32.598 +
  32.599 +	if (cpu_has_mtrr) {
  32.600 +		mtrr_if = &generic_mtrr_ops;
  32.601 +		size_or_mask = 0xff000000;	/* 36 bits */
  32.602 +		size_and_mask = 0x00f00000;
  32.603 +			
  32.604 +		switch (boot_cpu_data.x86_vendor) {
  32.605 +		case X86_VENDOR_AMD:
  32.606 +			/* The original Athlon docs said that
  32.607 +			   total addressable memory is 44 bits wide.
  32.608 +			   It was not really clear whether its MTRRs
  32.609 +			   follow this or not. (Read: 44 or 36 bits).
  32.610 +			   However, "x86-64_overview.pdf" explicitly
  32.611 +			   states that "previous implementations support
  32.612 +			   36 bit MTRRs" and also provides a way to
  32.613 +			   query the width (in bits) of the physical
  32.614 +			   addressable memory on the Hammer family.
  32.615 +			 */
  32.616 +			if (boot_cpu_data.x86 == 15
  32.617 +			    && (cpuid_eax(0x80000000) >= 0x80000008)) {
  32.618 +				u32 phys_addr;
  32.619 +				phys_addr = cpuid_eax(0x80000008) & 0xff;
  32.620 +				size_or_mask =
  32.621 +				    ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
  32.622 +				size_and_mask = ~size_or_mask & 0xfff00000;
  32.623 +			}
  32.624 +			/* Athlon MTRRs use an Intel-compatible interface for 
  32.625 +			 * getting and setting */
  32.626 +			break;
  32.627 +		case X86_VENDOR_CENTAUR:
  32.628 +			if (boot_cpu_data.x86 == 6) {
  32.629 +				/* VIA Cyrix family have Intel style MTRRs, but don't support PAE */
  32.630 +				size_or_mask = 0xfff00000;	/* 32 bits */
  32.631 +				size_and_mask = 0;
  32.632 +			}
  32.633 +			break;
  32.634 +		
  32.635 +		default:
  32.636 +			break;
  32.637 +		}
  32.638 +	} else {
  32.639 +		switch (boot_cpu_data.x86_vendor) {
  32.640 +		case X86_VENDOR_AMD:
  32.641 +			if (cpu_has_k6_mtrr) {
  32.642 +				/* Pre-Athlon (K6) AMD CPU MTRRs */
  32.643 +				mtrr_if = mtrr_ops[X86_VENDOR_AMD];
  32.644 +				size_or_mask = 0xfff00000;	/* 32 bits */
  32.645 +				size_and_mask = 0;
  32.646 +			}
  32.647 +			break;
  32.648 +		case X86_VENDOR_CENTAUR:
  32.649 +			if (cpu_has_centaur_mcr) {
  32.650 +				mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
  32.651 +				size_or_mask = 0xfff00000;	/* 32 bits */
  32.652 +				size_and_mask = 0;
  32.653 +			}
  32.654 +			break;
  32.655 +		case X86_VENDOR_CYRIX:
  32.656 +			if (cpu_has_cyrix_arr) {
  32.657 +				mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
  32.658 +				size_or_mask = 0xfff00000;	/* 32 bits */
  32.659 +				size_and_mask = 0;
  32.660 +			}
  32.661 +			break;
  32.662 +		default:
  32.663 +			break;
  32.664 +		}
  32.665 +	}
  32.666 +	printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION);
  32.667 +
  32.668 +	if (mtrr_if) {
  32.669 +		set_num_var_ranges();
  32.670 +		init_table();
  32.671 +		init_other_cpus();
  32.672 +		return 0;
  32.673 +	}
  32.674 +	return -ENXIO;
  32.675 +}
  32.676 +
  32.677 +__initcall(mtrr_init);
    33.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.2 +++ b/xen/arch/x86/mtrr/mtrr.h	Mon Nov 29 16:09:15 2004 +0000
    33.3 @@ -0,0 +1,99 @@
    33.4 +/*
    33.5 + * local mtrr defines.
    33.6 + */
    33.7 +
    33.8 +#ifndef TRUE
    33.9 +#define TRUE  1
   33.10 +#define FALSE 0
   33.11 +#endif
   33.12 +
   33.13 +#define MTRRcap_MSR     0x0fe
   33.14 +#define MTRRdefType_MSR 0x2ff
   33.15 +
   33.16 +#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
   33.17 +#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
   33.18 +
   33.19 +#define NUM_FIXED_RANGES 88
   33.20 +#define MTRRfix64K_00000_MSR 0x250
   33.21 +#define MTRRfix16K_80000_MSR 0x258
   33.22 +#define MTRRfix16K_A0000_MSR 0x259
   33.23 +#define MTRRfix4K_C0000_MSR 0x268
   33.24 +#define MTRRfix4K_C8000_MSR 0x269
   33.25 +#define MTRRfix4K_D0000_MSR 0x26a
   33.26 +#define MTRRfix4K_D8000_MSR 0x26b
   33.27 +#define MTRRfix4K_E0000_MSR 0x26c
   33.28 +#define MTRRfix4K_E8000_MSR 0x26d
   33.29 +#define MTRRfix4K_F0000_MSR 0x26e
   33.30 +#define MTRRfix4K_F8000_MSR 0x26f
   33.31 +
   33.32 +#define MTRR_CHANGE_MASK_FIXED     0x01
   33.33 +#define MTRR_CHANGE_MASK_VARIABLE  0x02
   33.34 +#define MTRR_CHANGE_MASK_DEFTYPE   0x04
   33.35 +
   33.36 +/* In the Intel processor's MTRR interface, the MTRR type is always held in
   33.37 +   an 8 bit field: */
   33.38 +typedef u8 mtrr_type;
   33.39 +
   33.40 +struct mtrr_ops {
   33.41 +	u32	vendor;
   33.42 +	u32	use_intel_if;
   33.43 +	void	(*init)(void);
   33.44 +	void	(*set)(unsigned int reg, unsigned long base,
   33.45 +		       unsigned long size, mtrr_type type);
   33.46 +	void	(*set_all)(void);
   33.47 +
   33.48 +	void	(*get)(unsigned int reg, unsigned long *base,
   33.49 +		       unsigned int *size, mtrr_type * type);
   33.50 +	int	(*get_free_region) (unsigned long base, unsigned long size);
   33.51 +
   33.52 +	int	(*validate_add_page)(unsigned long base, unsigned long size,
   33.53 +				     unsigned int type);
   33.54 +	int	(*have_wrcomb)(void);
   33.55 +};
   33.56 +
   33.57 +extern int generic_get_free_region(unsigned long base, unsigned long size);
   33.58 +extern int generic_validate_add_page(unsigned long base, unsigned long size,
   33.59 +				     unsigned int type);
   33.60 +
   33.61 +extern struct mtrr_ops generic_mtrr_ops;
   33.62 +
   33.63 +extern int generic_have_wrcomb(void);
   33.64 +extern int positive_have_wrcomb(void);
   33.65 +
   33.66 +/* library functions for processor-specific routines */
   33.67 +struct set_mtrr_context {
   33.68 +	unsigned long flags;
   33.69 +	unsigned long deftype_lo;
   33.70 +	unsigned long deftype_hi;
   33.71 +	unsigned long cr4val;
   33.72 +	unsigned long ccr3;
   33.73 +};
   33.74 +
   33.75 +struct mtrr_var_range {
   33.76 +	unsigned long base_lo;
   33.77 +	unsigned long base_hi;
   33.78 +	unsigned long mask_lo;
   33.79 +	unsigned long mask_hi;
   33.80 +};
   33.81 +
   33.82 +void set_mtrr_done(struct set_mtrr_context *ctxt);
   33.83 +void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
   33.84 +void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
   33.85 +
   33.86 +void get_mtrr_state(void);
   33.87 +
   33.88 +extern void set_mtrr_ops(struct mtrr_ops * ops);
   33.89 +
   33.90 +extern u32 size_or_mask, size_and_mask;
   33.91 +extern struct mtrr_ops * mtrr_if;
   33.92 +
   33.93 +#define is_cpu(vnd)	(mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
   33.94 +#define use_intel()	(mtrr_if && mtrr_if->use_intel_if == 1)
   33.95 +
   33.96 +extern unsigned int num_var_ranges;
   33.97 +
   33.98 +void finalize_mtrr_state(void);
   33.99 +void mtrr_state_warn(void);
  33.100 +char *mtrr_attrib_to_str(int x);
  33.101 +
  33.102 +extern char * mtrr_if_name[];
    34.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.2 +++ b/xen/arch/x86/mtrr/state.c	Mon Nov 29 16:09:15 2004 +0000
    34.3 @@ -0,0 +1,78 @@
    34.4 +#include <xen/mm.h>
    34.5 +#include <xen/init.h>
    34.6 +#include <asm/io.h>
    34.7 +#include <asm/mtrr.h>
    34.8 +#include <asm/msr.h>
    34.9 +#include "mtrr.h"
   34.10 +
   34.11 +
   34.12 +/*  Put the processor into a state where MTRRs can be safely set  */
   34.13 +void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
   34.14 +{
   34.15 +	unsigned int cr0;
   34.16 +
   34.17 +	/*  Disable interrupts locally  */
   34.18 +	local_irq_save(ctxt->flags);
   34.19 +
   34.20 +	if (use_intel() || is_cpu(CYRIX)) {
   34.21 +
   34.22 +		/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
   34.23 +		if ( cpu_has_pge ) {
   34.24 +			ctxt->cr4val = read_cr4();
   34.25 +			write_cr4(ctxt->cr4val & (unsigned char) ~(1 << 7));
   34.26 +		}
   34.27 +
   34.28 +		/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
   34.29 +		    a side-effect  */
   34.30 +		cr0 = read_cr0() | 0x40000000;
   34.31 +		wbinvd();
   34.32 +		write_cr0(cr0);
   34.33 +		wbinvd();
   34.34 +
   34.35 +		if (use_intel())
   34.36 +			/*  Save MTRR state */
   34.37 +			rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
   34.38 +		else
   34.39 +			/* Cyrix ARRs - everything else were excluded at the top */
   34.40 +			ctxt->ccr3 = getCx86(CX86_CCR3);
   34.41 +	}
   34.42 +}
   34.43 +
   34.44 +void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
   34.45 +{
   34.46 +	if (use_intel()) 
   34.47 +		/*  Disable MTRRs, and set the default type to uncached  */
   34.48 +		wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL,
   34.49 +		      ctxt->deftype_hi);
   34.50 +	else if (is_cpu(CYRIX))
   34.51 +		/* Cyrix ARRs - everything else were excluded at the top */
   34.52 +		setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
   34.53 +}
   34.54 +
   34.55 +/*  Restore the processor after a set_mtrr_prepare  */
   34.56 +void set_mtrr_done(struct set_mtrr_context *ctxt)
   34.57 +{
   34.58 +	if (use_intel() || is_cpu(CYRIX)) {
   34.59 +
   34.60 +		/*  Flush caches and TLBs  */
   34.61 +		wbinvd();
   34.62 +
   34.63 +		/*  Restore MTRRdefType  */
   34.64 +		if (use_intel())
   34.65 +			/* Intel (P6) standard MTRRs */
   34.66 +			wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
   34.67 +		else
   34.68 +			/* Cyrix ARRs - everything else was excluded at the top */
   34.69 +			setCx86(CX86_CCR3, ctxt->ccr3);
   34.70 +		
   34.71 +		/*  Enable caches  */
   34.72 +		write_cr0(read_cr0() & 0xbfffffff);
   34.73 +
   34.74 +		/*  Restore value of CR4  */
   34.75 +		if ( cpu_has_pge )
   34.76 +			write_cr4(ctxt->cr4val);
   34.77 +	}
   34.78 +	/*  Re-enable interrupts locally (if enabled previously)  */
   34.79 +	local_irq_restore(ctxt->flags);
   34.80 +}
   34.81 +
    35.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.2 +++ b/xen/include/asm-x86/mtrr.h	Mon Nov 29 16:09:15 2004 +0000
    35.3 @@ -0,0 +1,22 @@
    35.4 +#ifndef __ASM_X86_MTRR_H__
    35.5 +#define __ASM_X86_MTRR_H__
    35.6 +
    35.7 +#include <xen/config.h>
    35.8 +
    35.9 +/* These are the region types. They match the architectural specification. */
   35.10 +#define MTRR_TYPE_UNCACHABLE 0
   35.11 +#define MTRR_TYPE_WRCOMB     1
   35.12 +#define MTRR_TYPE_WRTHROUGH  4
   35.13 +#define MTRR_TYPE_WRPROT     5
   35.14 +#define MTRR_TYPE_WRBACK     6
   35.15 +#define MTRR_NUM_TYPES       7
   35.16 +
   35.17 +extern int mtrr_add(unsigned long base, unsigned long size,
   35.18 +                    unsigned int type, char increment);
   35.19 +extern int mtrr_add_page(unsigned long base, unsigned long size,
   35.20 +                         unsigned int type, char increment);
   35.21 +extern int mtrr_del(int reg, unsigned long base, unsigned long size);
   35.22 +extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
   35.23 +extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
   35.24 +
   35.25 +#endif /* __ASM_X86_MTRR_H__ */
    36.1 --- a/xen/include/asm-x86/processor.h	Mon Nov 29 16:03:16 2004 +0000
    36.2 +++ b/xen/include/asm-x86/processor.h	Mon Nov 29 16:09:15 2004 +0000
    36.3 @@ -32,6 +32,7 @@
    36.4  #define X86_VENDOR_TRANSMETA 7
    36.5  #define X86_VENDOR_NSC 8
    36.6  #define X86_VENDOR_SIS 9
    36.7 +#define X86_VENDOR_NUM 10
    36.8  #define X86_VENDOR_UNKNOWN 0xff
    36.9  
   36.10  /*
   36.11 @@ -256,6 +257,16 @@ static inline unsigned int cpuid_edx(uns
   36.12  #define write_cr0(x) \
   36.13  	__asm__("mov"__OS" %0,%%cr0": :"r" (x));
   36.14  
   36.15 +#define read_cr4() ({ \
   36.16 +	unsigned int __dummy; \
   36.17 +	__asm__( \
   36.18 +		"movl %%cr4,%0\n\t" \
   36.19 +		:"=r" (__dummy)); \
   36.20 +	__dummy; \
   36.21 +})
   36.22 +
   36.23 +#define write_cr4(x) \
   36.24 +	__asm__("movl %0,%%cr4": :"r" (x));
   36.25  
   36.26  /*
   36.27   * Save the cr4 feature set we're using (ie
   36.28 @@ -285,6 +296,37 @@ static inline void clear_in_cr4 (unsigne
   36.29              :"ax");
   36.30  }
   36.31  
   36.32 +/*
   36.33 + *      NSC/Cyrix CPU configuration register indexes
   36.34 + */
   36.35 +
   36.36 +#define CX86_PCR0 0x20
   36.37 +#define CX86_GCR  0xb8
   36.38 +#define CX86_CCR0 0xc0
   36.39 +#define CX86_CCR1 0xc1
   36.40 +#define CX86_CCR2 0xc2
   36.41 +#define CX86_CCR3 0xc3
   36.42 +#define CX86_CCR4 0xe8
   36.43 +#define CX86_CCR5 0xe9
   36.44 +#define CX86_CCR6 0xea
   36.45 +#define CX86_CCR7 0xeb
   36.46 +#define CX86_PCR1 0xf0
   36.47 +#define CX86_DIR0 0xfe
   36.48 +#define CX86_DIR1 0xff
   36.49 +#define CX86_ARR_BASE 0xc4
   36.50 +#define CX86_RCR_BASE 0xdc
   36.51 +
   36.52 +/*
   36.53 + *      NSC/Cyrix CPU indexed register access macros
   36.54 + */
   36.55 +
   36.56 +#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
   36.57 +
   36.58 +#define setCx86(reg, data) do { \
   36.59 +	outb((reg), 0x22); \
   36.60 +	outb((data), 0x23); \
   36.61 +} while (0)
   36.62 +
   36.63  #define IOBMP_BYTES             8192
   36.64  #define IOBMP_BYTES_PER_SELBIT  (IOBMP_BYTES / 64)
   36.65  #define IOBMP_BITS_PER_SELBIT   (IOBMP_BYTES_PER_SELBIT * 8)
    37.1 --- a/xen/include/public/dom0_ops.h	Mon Nov 29 16:03:16 2004 +0000
    37.2 +++ b/xen/include/public/dom0_ops.h	Mon Nov 29 16:09:15 2004 +0000
    37.3 @@ -336,6 +336,56 @@ typedef struct {
    37.4      u32          type;                /*  8: vm_assist cmd */
    37.5  } PACKED dom0_setdomainvmassist_t; /* 12 bytes */
    37.6  
    37.7 +/*
    37.8 + * Request memory range (@pfn, @pfn+@nr_pfns-1) to have type @type.
    37.9 + * On x86, @type is an architecture-defined MTRR memory type.
   37.10 + * On success, returns the MTRR that was used (@reg) and a handle that can
   37.11 + * be passed to DOM0_DEL_MEMTYPE to accurately tear down the new setting.
   37.12 + * (x86-specific).
   37.13 + */
   37.14 +#define DOM0_ADD_MEMTYPE         31
   37.15 +typedef struct {
   37.16 +    /* IN variables. */
   37.17 +    memory_t pfn;                     /*  0 */
   37.18 +    MEMORY_PADDING;
   37.19 +    memory_t nr_pfns;                 /*  8 */
   37.20 +    MEMORY_PADDING;
   37.21 +    u32      type;                    /* 16 */
   37.22 +    u32      __pad0;
   37.23 +    /* OUT variables. */
   37.24 +    u32      handle;                  /* 24 */
   37.25 +    u32      reg;                     /* 28 */
   37.26 +} PACKED dom0_add_memtype_t; /* 32 bytes */
   37.27 +
   37.28 +/*
   37.29 + * Tear down an existing memory-range type. If @handle is remembered then it
   37.30 + * should be passed in to accurately tear down the correct setting (in case
   37.31 + * of overlapping memory regions with differing types). If it is not known
   37.32 + * then @handle should be set to zero. In all cases @reg must be set.
   37.33 + * (x86-specific).
   37.34 + */
   37.35 +#define DOM0_DEL_MEMTYPE         32
   37.36 +typedef struct {
   37.37 +    /* IN variables. */
   37.38 +    u32      handle;                  /*  0 */
   37.39 +    u32      reg;                     /*  4 */
   37.40 +} PACKED dom0_del_memtype_t; /* 8 bytes */
   37.41 +
   37.42 +/* Read current type of an MTRR (x86-specific). */
   37.43 +#define DOM0_READ_MEMTYPE        33
   37.44 +typedef struct {
   37.45 +    /* IN variables. */
   37.46 +    u32      reg;                     /*  0 */
   37.47 +    u32      __pad0;
   37.48 +    /* OUT variables. */
   37.49 +    memory_t pfn;                     /*  8 */
   37.50 +    MEMORY_PADDING;
   37.51 +    memory_t nr_pfns;                 /* 16 */
   37.52 +    MEMORY_PADDING;
   37.53 +    u32      type;                    /* 24 */
   37.54 +    u32      __pad1;
   37.55 +} PACKED dom0_read_memtype_t; /* 32 bytes */
   37.56 +
   37.57  typedef struct {
   37.58      u32 cmd;                          /* 0 */
   37.59      u32 interface_version;            /* 4 */ /* DOM0_INTERFACE_VERSION */
   37.60 @@ -366,6 +416,9 @@ typedef struct {
   37.61  	dom0_setdomainmaxmem_t   setdomainmaxmem;
   37.62  	dom0_getpageframeinfo2_t getpageframeinfo2;
   37.63  	dom0_setdomainvmassist_t setdomainvmassist;
   37.64 +        dom0_add_memtype_t       add_memtype;
   37.65 +        dom0_del_memtype_t       del_memtype;
   37.66 +        dom0_read_memtype_t      read_memtype;
   37.67      } PACKED u;
   37.68  } PACKED dom0_op_t; /* 80 bytes */
   37.69