ia64/xen-unstable

changeset 7733:40fc727dd1c0

Changes so that Xen can be compiled with gcc 4.0.2: (by Tristan Gingold)
* functions must be declared before being called.
* a cast or a conditionnal expression is not an lvalue.
* a function cannot be declared static and redeclared extern (or reverse).
* gcc 4.0.2 missed a range optimization (ia64_setreg_unknown_kr).
* ia64_ksyms is not used (removed from Makefile).
* (added by Dan M: since now modified, move gfp.h from linux to linux-xen)
author djm@kirby.fc.hp.com
date Wed Nov 16 17:45:36 2005 -0600 (2005-11-16)
parents 36cea432bbed
children 8c42a46de7f8
files xen/arch/ia64/Makefile xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/linux-xen/linux/README.origin xen/include/asm-ia64/linux-xen/linux/gfp.h xen/include/asm-ia64/linux-xen/linux/interrupt.h xen/include/asm-ia64/linux/README.origin xen/include/asm-ia64/mm.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/Makefile	Wed Nov 16 16:59:41 2005 -0600
     1.2 +++ b/xen/arch/ia64/Makefile	Wed Nov 16 17:45:36 2005 -0600
     1.3 @@ -2,7 +2,7 @@ include $(BASEDIR)/Rules.mk
     1.4  
     1.5  VPATH = xen vmx linux linux-xen
     1.6  
     1.7 -OBJS = xensetup.o setup.o time.o irq.o ia64_ksyms.o process.o smp.o \
     1.8 +OBJS = xensetup.o setup.o time.o irq.o process.o smp.o \
     1.9  	xenmisc.o acpi.o hypercall.o \
    1.10  	machvec.o dom0_ops.o domain.o hpsimserial.o pcdp.o \
    1.11  	idle0_task.o pal.o hpsim.o efi.o efi_stub.o ivt.o mm_contig.o \
    1.12 @@ -10,7 +10,7 @@ OBJS = xensetup.o setup.o time.o irq.o i
    1.13  	extable.o linuxextable.o sort.o xenirq.o xentime.o \
    1.14  	regionreg.o entry.o unaligned.o privop.o vcpu.o \
    1.15  	irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o \
    1.16 -	grant_table.o sn_console.o
    1.17 +	grant_table.o sn_console.o # ia64_ksyms.o 
    1.18  
    1.19  OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\
    1.20  	vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
    1.21 @@ -45,7 +45,7 @@ asm-offsets.s: asm-offsets.c $(BASEDIR)/
    1.22  
    1.23  asm-xsi-offsets.s: asm-xsi-offsets.c 
    1.24  	$(CC) $(CFLAGS) -S -o $@ $<
    1.25 -	
    1.26 +
    1.27  $(BASEDIR)/include/asm-ia64/asm-xsi-offsets.h: asm-xsi-offsets.s
    1.28  	@(set -e; \
    1.29  	  echo "/*"; \
     2.1 --- a/xen/arch/ia64/vmx/mmio.c	Wed Nov 16 16:59:41 2005 -0600
     2.2 +++ b/xen/arch/ia64/vmx/mmio.c	Wed Nov 16 17:45:36 2005 -0600
     2.3 @@ -49,6 +49,8 @@ struct mmio_list *lookup_mmio(u64 gpa, s
     2.4  #define PIB_OFST_INTA           0x1E0000
     2.5  #define PIB_OFST_XTP            0x1E0008
     2.6  
     2.7 +static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
     2.8 +
     2.9  static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
    2.10  {
    2.11      switch (pib_off) {
     3.1 --- a/xen/arch/ia64/vmx/vmmu.c	Wed Nov 16 16:59:41 2005 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Wed Nov 16 17:45:36 2005 -0600
     3.3 @@ -157,11 +157,13 @@ static thash_cb_t *init_domain_vhpt(stru
     3.4      printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
     3.5      memset(vbase, 0, VCPU_TLB_SIZE);
     3.6      vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
     3.7 -    vhpt = --((thash_cb_t*)vcur);
     3.8 +    vcur -= sizeof (thash_cb_t);
     3.9 +    vhpt = vcur;
    3.10      vhpt->ht = THASH_VHPT;
    3.11      vhpt->vcpu = d;
    3.12      vhpt->hash_func = machine_thash;
    3.13 -    vs = --((vhpt_special *)vcur);
    3.14 +    vs -= sizeof (vhpt_special);
    3.15 +    vs = vcur;
    3.16  
    3.17      /* Setup guest pta */
    3.18      pta_value.val = 0;
    3.19 @@ -199,10 +201,12 @@ thash_cb_t *init_domain_tlb(struct vcpu 
    3.20      printk("Allocate domain tlb at 0x%lx\n", (u64)vbase);
    3.21      memset(vbase, 0, VCPU_TLB_SIZE);
    3.22      vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
    3.23 -    tlb = --((thash_cb_t*)vcur);
    3.24 +    vcur -= sizeof (thash_cb_t);
    3.25 +    tlb = vcur;
    3.26      tlb->ht = THASH_TLB;
    3.27      tlb->vcpu = d;
    3.28 -    ts = --((tlb_special_t *)vcur);
    3.29 +    vcur -= sizeof (tlb_special_t);
    3.30 +    ts = vcur;
    3.31      tlb->ts = ts;
    3.32      tlb->ts->vhpt = init_domain_vhpt(d);
    3.33      tlb->hash_func = machine_thash;
     4.1 --- a/xen/arch/ia64/xen/vcpu.c	Wed Nov 16 16:59:41 2005 -0600
     4.2 +++ b/xen/arch/ia64/xen/vcpu.c	Wed Nov 16 17:45:36 2005 -0600
     4.3 @@ -147,6 +147,9 @@ void vcpu_load_kernel_regs(VCPU *vcpu)
     4.4  	ia64_set_kr(7, VCPU(vcpu, krs[7]));
     4.5  }
     4.6  
     4.7 +/* GCC 4.0.2 seems not to be able to suppress this call!.  */
     4.8 +#define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
     4.9 +
    4.10  IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
    4.11  {
    4.12  	if (reg == 44) return (vcpu_set_itc(vcpu,val));
     5.1 --- a/xen/include/asm-ia64/linux-xen/linux/README.origin	Wed Nov 16 16:59:41 2005 -0600
     5.2 +++ b/xen/include/asm-ia64/linux-xen/linux/README.origin	Wed Nov 16 17:45:36 2005 -0600
     5.3 @@ -6,5 +6,6 @@
     5.4  # easily updated to future versions of the corresponding Linux files.
     5.5  
     5.6  cpumask.h 		-> linux/include/linux/cpumask.h
     5.7 +gfp.h	 		-> linux/include/linux/gfp.h
     5.8  hardirq.h 		-> linux/include/linux/hardirq.h
     5.9  interrupt.h 		-> linux/include/linux/interrupt.h
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/include/asm-ia64/linux-xen/linux/gfp.h	Wed Nov 16 17:45:36 2005 -0600
     6.3 @@ -0,0 +1,145 @@
     6.4 +#ifndef __LINUX_GFP_H
     6.5 +#define __LINUX_GFP_H
     6.6 +
     6.7 +#ifdef XEN
     6.8 +#include <asm/bitops.h>
     6.9 +#endif
    6.10 +#include <linux/mmzone.h>
    6.11 +#include <linux/stddef.h>
    6.12 +#include <linux/linkage.h>
    6.13 +#include <linux/config.h>
    6.14 +
    6.15 +struct vm_area_struct;
    6.16 +
    6.17 +/*
    6.18 + * GFP bitmasks..
    6.19 + */
    6.20 +/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
    6.21 +#define __GFP_DMA	0x01u
    6.22 +#define __GFP_HIGHMEM	0x02u
    6.23 +
    6.24 +/*
    6.25 + * Action modifiers - doesn't change the zoning
    6.26 + *
    6.27 + * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
    6.28 + * _might_ fail.  This depends upon the particular VM implementation.
    6.29 + *
    6.30 + * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
    6.31 + * cannot handle allocation failures.
    6.32 + *
    6.33 + * __GFP_NORETRY: The VM implementation must not retry indefinitely.
    6.34 + */
    6.35 +#define __GFP_WAIT	0x10u	/* Can wait and reschedule? */
    6.36 +#define __GFP_HIGH	0x20u	/* Should access emergency pools? */
    6.37 +#define __GFP_IO	0x40u	/* Can start physical IO? */
    6.38 +#define __GFP_FS	0x80u	/* Can call down to low-level FS? */
    6.39 +#define __GFP_COLD	0x100u	/* Cache-cold page required */
    6.40 +#define __GFP_NOWARN	0x200u	/* Suppress page allocation failure warning */
    6.41 +#define __GFP_REPEAT	0x400u	/* Retry the allocation.  Might fail */
    6.42 +#define __GFP_NOFAIL	0x800u	/* Retry for ever.  Cannot fail */
    6.43 +#define __GFP_NORETRY	0x1000u	/* Do not retry.  Might fail */
    6.44 +#define __GFP_NO_GROW	0x2000u	/* Slab internal usage */
    6.45 +#define __GFP_COMP	0x4000u	/* Add compound page metadata */
    6.46 +#define __GFP_ZERO	0x8000u	/* Return zeroed page on success */
    6.47 +#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
    6.48 +#define __GFP_NORECLAIM  0x20000u /* No realy zone reclaim during allocation */
    6.49 +
    6.50 +#define __GFP_BITS_SHIFT 20	/* Room for 20 __GFP_FOO bits */
    6.51 +#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
    6.52 +
    6.53 +/* if you forget to add the bitmask here kernel will crash, period */
    6.54 +#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
    6.55 +			__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
    6.56 +			__GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
    6.57 +			__GFP_NOMEMALLOC|__GFP_NORECLAIM)
    6.58 +
    6.59 +#define GFP_ATOMIC	(__GFP_HIGH)
    6.60 +#define GFP_NOIO	(__GFP_WAIT)
    6.61 +#define GFP_NOFS	(__GFP_WAIT | __GFP_IO)
    6.62 +#define GFP_KERNEL	(__GFP_WAIT | __GFP_IO | __GFP_FS)
    6.63 +#define GFP_USER	(__GFP_WAIT | __GFP_IO | __GFP_FS)
    6.64 +#define GFP_HIGHUSER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
    6.65 +
    6.66 +/* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
    6.67 +   platforms, used as appropriate on others */
    6.68 +
    6.69 +#define GFP_DMA		__GFP_DMA
    6.70 +
    6.71 +
    6.72 +/*
    6.73 + * There is only one page-allocator function, and two main namespaces to
    6.74 + * it. The alloc_page*() variants return 'struct page *' and as such
    6.75 + * can allocate highmem pages, the *get*page*() variants return
    6.76 + * virtual kernel addresses to the allocated page(s).
    6.77 + */
    6.78 +
    6.79 +/*
    6.80 + * We get the zone list from the current node and the gfp_mask.
    6.81 + * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
    6.82 + *
    6.83 + * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
    6.84 + * optimized to &contig_page_data at compile-time.
    6.85 + */
    6.86 +
    6.87 +#ifndef HAVE_ARCH_FREE_PAGE
    6.88 +static inline void arch_free_page(struct page *page, int order) { }
    6.89 +#endif
    6.90 +
    6.91 +extern struct page *
    6.92 +FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
    6.93 +
    6.94 +static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask,
    6.95 +						unsigned int order)
    6.96 +{
    6.97 +	if (unlikely(order >= MAX_ORDER))
    6.98 +		return NULL;
    6.99 +
   6.100 +	return __alloc_pages(gfp_mask, order,
   6.101 +		NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
   6.102 +}
   6.103 +
   6.104 +#ifdef CONFIG_NUMA
   6.105 +extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order);
   6.106 +
   6.107 +static inline struct page *
   6.108 +alloc_pages(unsigned int __nocast gfp_mask, unsigned int order)
   6.109 +{
   6.110 +	if (unlikely(order >= MAX_ORDER))
   6.111 +		return NULL;
   6.112 +
   6.113 +	return alloc_pages_current(gfp_mask, order);
   6.114 +}
   6.115 +extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
   6.116 +			struct vm_area_struct *vma, unsigned long addr);
   6.117 +#else
   6.118 +#define alloc_pages(gfp_mask, order) \
   6.119 +		alloc_pages_node(numa_node_id(), gfp_mask, order)
   6.120 +#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
   6.121 +#endif
   6.122 +#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
   6.123 +
   6.124 +extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order));
   6.125 +extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
   6.126 +
   6.127 +#define __get_free_page(gfp_mask) \
   6.128 +		__get_free_pages((gfp_mask),0)
   6.129 +
   6.130 +#define __get_dma_pages(gfp_mask, order) \
   6.131 +		__get_free_pages((gfp_mask) | GFP_DMA,(order))
   6.132 +
   6.133 +extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
   6.134 +extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
   6.135 +extern void FASTCALL(free_hot_page(struct page *page));
   6.136 +extern void FASTCALL(free_cold_page(struct page *page));
   6.137 +
   6.138 +#define __free_page(page) __free_pages((page), 0)
   6.139 +#define free_page(addr) free_pages((addr),0)
   6.140 +
   6.141 +void page_alloc_init(void);
   6.142 +#ifdef CONFIG_NUMA
   6.143 +void drain_remote_pages(void);
   6.144 +#else
   6.145 +static inline void drain_remote_pages(void) { };
   6.146 +#endif
   6.147 +
   6.148 +#endif /* __LINUX_GFP_H */
     7.1 --- a/xen/include/asm-ia64/linux-xen/linux/interrupt.h	Wed Nov 16 16:59:41 2005 -0600
     7.2 +++ b/xen/include/asm-ia64/linux-xen/linux/interrupt.h	Wed Nov 16 17:45:36 2005 -0600
     7.3 @@ -131,7 +131,9 @@ extern void open_softirq(int nr, void (*
     7.4  extern void softirq_init(void);
     7.5  #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
     7.6  extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
     7.7 +#ifndef XEN
     7.8  extern void FASTCALL(raise_softirq(unsigned int nr));
     7.9 +#endif
    7.10  
    7.11  
    7.12  /* Tasklets --- multithreaded analogue of BHs.
     8.1 --- a/xen/include/asm-ia64/linux/README.origin	Wed Nov 16 16:59:41 2005 -0600
     8.2 +++ b/xen/include/asm-ia64/linux/README.origin	Wed Nov 16 17:45:36 2005 -0600
     8.3 @@ -10,7 +10,6 @@ bitops.h		->linux/include/linux/bitops.h
     8.4  dma-mapping.h		->linux/include/linux/dma-mapping.h
     8.5  efi.h			->linux/include/linux/efi.h
     8.6  err.h			->linux/include/linux/err.h
     8.7 -gfp.h			->linux/include/linux/gfp.h
     8.8  initrd.h		->linux/include/linux/initrd.h
     8.9  jiffies.h		->linux/include/linux/jiffies.h
    8.10  kmalloc_sizes.h		->linux/include/linux/kmalloc_sizes.h
     9.1 --- a/xen/include/asm-ia64/linux/gfp.h	Wed Nov 16 16:59:41 2005 -0600
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,142 +0,0 @@
     9.4 -#ifndef __LINUX_GFP_H
     9.5 -#define __LINUX_GFP_H
     9.6 -
     9.7 -#include <linux/mmzone.h>
     9.8 -#include <linux/stddef.h>
     9.9 -#include <linux/linkage.h>
    9.10 -#include <linux/config.h>
    9.11 -
    9.12 -struct vm_area_struct;
    9.13 -
    9.14 -/*
    9.15 - * GFP bitmasks..
    9.16 - */
    9.17 -/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
    9.18 -#define __GFP_DMA	0x01u
    9.19 -#define __GFP_HIGHMEM	0x02u
    9.20 -
    9.21 -/*
    9.22 - * Action modifiers - doesn't change the zoning
    9.23 - *
    9.24 - * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
    9.25 - * _might_ fail.  This depends upon the particular VM implementation.
    9.26 - *
    9.27 - * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
    9.28 - * cannot handle allocation failures.
    9.29 - *
    9.30 - * __GFP_NORETRY: The VM implementation must not retry indefinitely.
    9.31 - */
    9.32 -#define __GFP_WAIT	0x10u	/* Can wait and reschedule? */
    9.33 -#define __GFP_HIGH	0x20u	/* Should access emergency pools? */
    9.34 -#define __GFP_IO	0x40u	/* Can start physical IO? */
    9.35 -#define __GFP_FS	0x80u	/* Can call down to low-level FS? */
    9.36 -#define __GFP_COLD	0x100u	/* Cache-cold page required */
    9.37 -#define __GFP_NOWARN	0x200u	/* Suppress page allocation failure warning */
    9.38 -#define __GFP_REPEAT	0x400u	/* Retry the allocation.  Might fail */
    9.39 -#define __GFP_NOFAIL	0x800u	/* Retry for ever.  Cannot fail */
    9.40 -#define __GFP_NORETRY	0x1000u	/* Do not retry.  Might fail */
    9.41 -#define __GFP_NO_GROW	0x2000u	/* Slab internal usage */
    9.42 -#define __GFP_COMP	0x4000u	/* Add compound page metadata */
    9.43 -#define __GFP_ZERO	0x8000u	/* Return zeroed page on success */
    9.44 -#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
    9.45 -#define __GFP_NORECLAIM  0x20000u /* No realy zone reclaim during allocation */
    9.46 -
    9.47 -#define __GFP_BITS_SHIFT 20	/* Room for 20 __GFP_FOO bits */
    9.48 -#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
    9.49 -
    9.50 -/* if you forget to add the bitmask here kernel will crash, period */
    9.51 -#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
    9.52 -			__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
    9.53 -			__GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
    9.54 -			__GFP_NOMEMALLOC|__GFP_NORECLAIM)
    9.55 -
    9.56 -#define GFP_ATOMIC	(__GFP_HIGH)
    9.57 -#define GFP_NOIO	(__GFP_WAIT)
    9.58 -#define GFP_NOFS	(__GFP_WAIT | __GFP_IO)
    9.59 -#define GFP_KERNEL	(__GFP_WAIT | __GFP_IO | __GFP_FS)
    9.60 -#define GFP_USER	(__GFP_WAIT | __GFP_IO | __GFP_FS)
    9.61 -#define GFP_HIGHUSER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
    9.62 -
    9.63 -/* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
    9.64 -   platforms, used as appropriate on others */
    9.65 -
    9.66 -#define GFP_DMA		__GFP_DMA
    9.67 -
    9.68 -
    9.69 -/*
    9.70 - * There is only one page-allocator function, and two main namespaces to
    9.71 - * it. The alloc_page*() variants return 'struct page *' and as such
    9.72 - * can allocate highmem pages, the *get*page*() variants return
    9.73 - * virtual kernel addresses to the allocated page(s).
    9.74 - */
    9.75 -
    9.76 -/*
    9.77 - * We get the zone list from the current node and the gfp_mask.
    9.78 - * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
    9.79 - *
    9.80 - * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
    9.81 - * optimized to &contig_page_data at compile-time.
    9.82 - */
    9.83 -
    9.84 -#ifndef HAVE_ARCH_FREE_PAGE
    9.85 -static inline void arch_free_page(struct page *page, int order) { }
    9.86 -#endif
    9.87 -
    9.88 -extern struct page *
    9.89 -FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
    9.90 -
    9.91 -static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask,
    9.92 -						unsigned int order)
    9.93 -{
    9.94 -	if (unlikely(order >= MAX_ORDER))
    9.95 -		return NULL;
    9.96 -
    9.97 -	return __alloc_pages(gfp_mask, order,
    9.98 -		NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
    9.99 -}
   9.100 -
   9.101 -#ifdef CONFIG_NUMA
   9.102 -extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order);
   9.103 -
   9.104 -static inline struct page *
   9.105 -alloc_pages(unsigned int __nocast gfp_mask, unsigned int order)
   9.106 -{
   9.107 -	if (unlikely(order >= MAX_ORDER))
   9.108 -		return NULL;
   9.109 -
   9.110 -	return alloc_pages_current(gfp_mask, order);
   9.111 -}
   9.112 -extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
   9.113 -			struct vm_area_struct *vma, unsigned long addr);
   9.114 -#else
   9.115 -#define alloc_pages(gfp_mask, order) \
   9.116 -		alloc_pages_node(numa_node_id(), gfp_mask, order)
   9.117 -#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
   9.118 -#endif
   9.119 -#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
   9.120 -
   9.121 -extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order));
   9.122 -extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
   9.123 -
   9.124 -#define __get_free_page(gfp_mask) \
   9.125 -		__get_free_pages((gfp_mask),0)
   9.126 -
   9.127 -#define __get_dma_pages(gfp_mask, order) \
   9.128 -		__get_free_pages((gfp_mask) | GFP_DMA,(order))
   9.129 -
   9.130 -extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
   9.131 -extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
   9.132 -extern void FASTCALL(free_hot_page(struct page *page));
   9.133 -extern void FASTCALL(free_cold_page(struct page *page));
   9.134 -
   9.135 -#define __free_page(page) __free_pages((page), 0)
   9.136 -#define free_page(addr) free_pages((addr),0)
   9.137 -
   9.138 -void page_alloc_init(void);
   9.139 -#ifdef CONFIG_NUMA
   9.140 -void drain_remote_pages(void);
   9.141 -#else
   9.142 -static inline void drain_remote_pages(void) { };
   9.143 -#endif
   9.144 -
   9.145 -#endif /* __LINUX_GFP_H */
    10.1 --- a/xen/include/asm-ia64/mm.h	Wed Nov 16 16:59:41 2005 -0600
    10.2 +++ b/xen/include/asm-ia64/mm.h	Wed Nov 16 17:45:36 2005 -0600
    10.3 @@ -3,7 +3,7 @@
    10.4  
    10.5  #include <xen/config.h>
    10.6  #ifdef LINUX_2_6
    10.7 -#include <xen/gfp.h>
    10.8 +#include <linux/gfp.h>
    10.9  #endif
   10.10  #include <xen/list.h>
   10.11  #include <xen/spinlock.h>
    11.1 --- a/xen/include/asm-ia64/vmmu.h	Wed Nov 16 16:59:41 2005 -0600
    11.2 +++ b/xen/include/asm-ia64/vmmu.h	Wed Nov 16 17:45:36 2005 -0600
    11.3 @@ -222,7 +222,10 @@ typedef struct thash_cb {
    11.4  #define ITR(hcb,id)             ((hcb)->ts->itr[id])
    11.5  #define DTR(hcb,id)             ((hcb)->ts->dtr[id])
    11.6  #define INVALIDATE_HASH(hcb,hash)           {   \
    11.7 -           INVALID_ENTRY(hcb, hash) = 1;        \
    11.8 +           if ((hcb)->ht==THASH_TLB)            \
    11.9 +             INVALID_TLB(hash) = 1;             \
   11.10 +           else                                 \
   11.11 +             INVALID_VHPT(hash) = 1;            \
   11.12             hash->next = NULL; }
   11.13  
   11.14  #define PURGABLE_ENTRY(hcb,en)  1
    12.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Wed Nov 16 16:59:41 2005 -0600
    12.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Wed Nov 16 17:45:36 2005 -0600
    12.3 @@ -71,7 +71,9 @@ extern thash_cb_t *vmx_vcpu_get_vtlb(VCP
    12.4  extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
    12.5  extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
    12.6  extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
    12.7 +#if 0
    12.8  extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
    12.9 +#endif
   12.10  extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
   12.11  IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
   12.12  extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);