ia64/xen-unstable

changeset 6457:d34925e4144b

Stil more cleanup and moving to 2.6.13 base
author djm@kirby.fc.hp.com
date Thu Sep 01 11:09:27 2005 -0600 (2005-09-01)
parents 23217792aa3b
children 3ca4ca7a9cc2
files xen/arch/ia64/hpsimserial.c xen/arch/ia64/linux-xen/README.origin xen/arch/ia64/linux-xen/hpsim_ssc.h xen/arch/ia64/process.c xen/arch/ia64/xenmisc.c xen/include/asm-ia64/config.h xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h xen/include/asm-ia64/linux-xen/asm/ia64regs.h xen/include/asm-ia64/linux-xen/asm/io.h xen/include/asm-ia64/linux-xen/asm/kregs.h xen/include/asm-ia64/linux-xen/asm/mca_asm.h xen/include/asm-ia64/linux-xen/asm/page.h xen/include/asm-ia64/linux-xen/asm/pgalloc.h xen/include/asm-ia64/linux-xen/asm/processor.h xen/include/asm-ia64/linux-xen/asm/spinlock.h xen/include/asm-ia64/linux-xen/asm/system.h xen/include/asm-ia64/linux-xen/asm/tlbflush.h xen/include/asm-ia64/linux-xen/asm/types.h xen/include/asm-ia64/linux-xen/asm/uaccess.h xen/include/asm-ia64/linux-xen/linux/cpumask.h xen/include/asm-ia64/linux-xen/linux/hardirq.h xen/include/asm-ia64/linux-xen/linux/interrupt.h xen/include/asm-ia64/xengcc_intrin.h xen/include/asm-ia64/xenia64regs.h xen/include/asm-ia64/xenkregs.h xen/include/asm-ia64/xenpage.h xen/include/asm-ia64/xenprocessor.h xen/include/asm-ia64/xenspinlock.h xen/include/asm-ia64/xensystem.h xen/include/asm-ia64/xentypes.h
line diff
     1.1 --- a/xen/arch/ia64/hpsimserial.c	Wed Aug 31 17:21:24 2005 -0600
     1.2 +++ b/xen/arch/ia64/hpsimserial.c	Thu Sep 01 11:09:27 2005 -0600
     1.3 @@ -8,7 +8,7 @@
     1.4  #include <linux/config.h>
     1.5  #include <xen/sched.h>
     1.6  #include <xen/serial.h>
     1.7 -#include <asm/hpsim_ssc.h>
     1.8 +#include "hpsim_ssc.h"
     1.9  
    1.10  static void hp_ski_putc(struct serial_port *port, char c)
    1.11  {
     2.1 --- a/xen/arch/ia64/linux-xen/README.origin	Wed Aug 31 17:21:24 2005 -0600
     2.2 +++ b/xen/arch/ia64/linux-xen/README.origin	Thu Sep 01 11:09:27 2005 -0600
     2.3 @@ -7,6 +7,7 @@ to future versions of the corresponding 
     2.4  efi.c		-> linux/arch/ia64/kernel/efi.c
     2.5  entry.h		-> linux/arch/ia64/kernel/entry.h
     2.6  entry.S		-> linux/arch/ia64/kernel/entry.S
     2.7 +hpsim_ssc.h	-> linux/arch/ia64/hp/sim/hpsim_ssc.h
     2.8  irq_ia64.c	-> linux/arch/ia64/kernel/irq_ia64.c
     2.9  minstate.h	-> linux/arch/ia64/kernel/minstate.h
    2.10  mm_contig.c	-> linux/arch/ia64/mm/contig.c
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/ia64/linux-xen/hpsim_ssc.h	Thu Sep 01 11:09:27 2005 -0600
     3.3 @@ -0,0 +1,55 @@
     3.4 +/*
     3.5 + * Platform dependent support for HP simulator.
     3.6 + *
     3.7 + * Copyright (C) 1998, 1999 Hewlett-Packard Co
     3.8 + * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
     3.9 + * Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
    3.10 + */
    3.11 +#ifndef _IA64_PLATFORM_HPSIM_SSC_H
    3.12 +#define _IA64_PLATFORM_HPSIM_SSC_H
    3.13 +
    3.14 +/* Simulator system calls: */
    3.15 +
    3.16 +#define SSC_CONSOLE_INIT		20
    3.17 +#define SSC_GETCHAR			21
    3.18 +#define SSC_PUTCHAR			31
    3.19 +#define SSC_CONNECT_INTERRUPT		58
    3.20 +#define SSC_GENERATE_INTERRUPT		59
    3.21 +#define SSC_SET_PERIODIC_INTERRUPT	60
    3.22 +#define SSC_GET_RTC			65
    3.23 +#define SSC_EXIT			66
    3.24 +#define SSC_LOAD_SYMBOLS		69
    3.25 +#define SSC_GET_TOD			74
    3.26 +#define SSC_CTL_TRACE			76
    3.27 +
    3.28 +#define SSC_NETDEV_PROBE		100
    3.29 +#define SSC_NETDEV_SEND			101
    3.30 +#define SSC_NETDEV_RECV			102
    3.31 +#define SSC_NETDEV_ATTACH		103
    3.32 +#define SSC_NETDEV_DETACH		104
    3.33 +
    3.34 +/*
    3.35 + * Simulator system call.
    3.36 + */
    3.37 +extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
    3.38 +
    3.39 +#ifdef XEN
    3.40 +/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
    3.41 + * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
    3.42 +#define SSC_OPEN			50
    3.43 +#define SSC_CLOSE			51
    3.44 +#define SSC_READ			52
    3.45 +#define SSC_WRITE			53
    3.46 +#define SSC_GET_COMPLETION		54
    3.47 +#define SSC_WAIT_COMPLETION		55
    3.48 +
    3.49 +#define SSC_WRITE_ACCESS		2
    3.50 +#define SSC_READ_ACCESS			1
    3.51 +
    3.52 +struct ssc_disk_req {
    3.53 +	unsigned long addr;
    3.54 +	unsigned long len;
    3.55 +};
    3.56 +#endif
    3.57 +
    3.58 +#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
     4.1 --- a/xen/arch/ia64/process.c	Wed Aug 31 17:21:24 2005 -0600
     4.2 +++ b/xen/arch/ia64/process.c	Thu Sep 01 11:09:27 2005 -0600
     4.3 @@ -28,8 +28,8 @@
     4.4  #include <asm/privop.h>
     4.5  #include <asm/vcpu.h>
     4.6  #include <asm/ia64_int.h>
     4.7 -#include <asm/hpsim_ssc.h>
     4.8  #include <asm/dom_fw.h>
     4.9 +#include "hpsim_ssc.h"
    4.10  
    4.11  extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
    4.12  extern struct ia64_sal_retval pal_emulator_static(UINT64);
     5.1 --- a/xen/arch/ia64/xenmisc.c	Wed Aug 31 17:21:24 2005 -0600
     5.2 +++ b/xen/arch/ia64/xenmisc.c	Thu Sep 01 11:09:27 2005 -0600
     5.3 @@ -177,6 +177,34 @@ void free_page_type(struct pfn_info *pag
     5.4  }
     5.5  
     5.6  ///////////////////////////////
     5.7 +//// misc memory stuff
     5.8 +///////////////////////////////
     5.9 +
    5.10 +unsigned long __get_free_pages(unsigned int mask, unsigned int order)
    5.11 +{
    5.12 +	void *p = alloc_xenheap_pages(order);
    5.13 +
    5.14 +	memset(p,0,PAGE_SIZE<<order);
    5.15 +	return (unsigned long)p;
    5.16 +}
    5.17 +
    5.18 +void __free_pages(struct page *page, unsigned int order)
    5.19 +{
    5.20 +	if (order) BUG();
    5.21 +	free_xenheap_page(page);
    5.22 +}
    5.23 +
    5.24 +void *pgtable_quicklist_alloc(void)
    5.25 +{
    5.26 +	return alloc_xenheap_pages(0);
    5.27 +}
    5.28 +
    5.29 +void pgtable_quicklist_free(void *pgtable_entry)
    5.30 +{
    5.31 +	free_xenheap_page(pgtable_entry);
    5.32 +}
    5.33 +
    5.34 +///////////////////////////////
    5.35  // from arch/ia64/traps.c
    5.36  ///////////////////////////////
    5.37  
     6.1 --- a/xen/include/asm-ia64/config.h	Wed Aug 31 17:21:24 2005 -0600
     6.2 +++ b/xen/include/asm-ia64/config.h	Thu Sep 01 11:09:27 2005 -0600
     6.3 @@ -168,7 +168,9 @@ struct page;
     6.4  #define ____cacheline_aligned_in_smp
     6.5  #define ____cacheline_maxaligned_in_smp
     6.6  
     6.7 +#ifndef __ASSEMBLY__
     6.8  #include "asm/types.h"	// for u64
     6.9 +#endif
    6.10  
    6.11  // warning: unless search_extable is declared, the return value gets
    6.12  // truncated to 32-bits, causing a very strange error in privop handling
     7.1 --- a/xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h	Wed Aug 31 17:21:24 2005 -0600
     7.2 +++ b/xen/include/asm-ia64/linux-xen/asm/gcc_intrin.h	Thu Sep 01 11:09:27 2005 -0600
     7.3 @@ -133,13 +133,17 @@ register unsigned long ia64_r13 asm ("r1
     7.4  	ia64_intri_res;								\
     7.5  })
     7.6  
     7.7 -#define ia64_popcnt(x)						\
     7.8 -({								\
     7.9 +#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
    7.10 +# define ia64_popcnt(x)		__builtin_popcountl(x)
    7.11 +#else
    7.12 +# define ia64_popcnt(x)						\
    7.13 +  ({								\
    7.14  	__u64 ia64_intri_res;					\
    7.15  	asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x));	\
    7.16  								\
    7.17  	ia64_intri_res;						\
    7.18 -})
    7.19 +  })
    7.20 +#endif
    7.21  
    7.22  #define ia64_getf_exp(x)					\
    7.23  ({								\
    7.24 @@ -368,66 +372,6 @@ register unsigned long ia64_r13 asm ("r1
    7.25  #define ia64_mf()	asm volatile ("mf" ::: "memory")
    7.26  #define ia64_mfa()	asm volatile ("mf.a" ::: "memory")
    7.27  
    7.28 -#ifdef CONFIG_VTI
    7.29 -/*
    7.30 - * Flushrs instruction stream.
    7.31 - */
    7.32 -#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
    7.33 -
    7.34 -#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
    7.35 -
    7.36 -#define ia64_get_rsc()                          \
    7.37 -({                                  \
    7.38 -    unsigned long val;                     \
    7.39 -    asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory");  \
    7.40 -    val;                               \
    7.41 -})
    7.42 -
    7.43 -#define ia64_set_rsc(val)                       \
    7.44 -    asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
    7.45 -
    7.46 -#define ia64_get_bspstore()     \
    7.47 -({                                  \
    7.48 -    unsigned long val;                     \
    7.49 -    asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory");  \
    7.50 -    val;                               \
    7.51 -})
    7.52 -
    7.53 -#define ia64_set_bspstore(val)                       \
    7.54 -    asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
    7.55 -
    7.56 -#define ia64_get_rnat()     \
    7.57 -({                                  \
    7.58 -    unsigned long val;                     \
    7.59 -    asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory");  \
    7.60 -    val;                               \
    7.61 -})
    7.62 -
    7.63 -#define ia64_set_rnat(val)                       \
    7.64 -    asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
    7.65 -
    7.66 -#define ia64_ttag(addr)							\
    7.67 -({										\
    7.68 -	__u64 ia64_intri_res;							\
    7.69 -	asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
    7.70 -	ia64_intri_res;								\
    7.71 -})
    7.72 -
    7.73 -#define ia64_get_dcr()                          \
    7.74 -({                                      \
    7.75 -    __u64 result;                               \
    7.76 -    asm volatile ("mov %0=cr.dcr" : "=r"(result) : );           \
    7.77 -    result;                                 \
    7.78 -})
    7.79 -
    7.80 -#define ia64_set_dcr(val)                           \
    7.81 -({                                      \
    7.82 -    asm volatile ("mov cr.dcr=%0" :: "r"(val) );            \
    7.83 -})
    7.84 -
    7.85 -#endif // CONFIG_VTI
    7.86 -
    7.87 -
    7.88  #define ia64_invala() asm volatile ("invala" ::: "memory")
    7.89  
    7.90  #define ia64_thash(addr)							\
    7.91 @@ -654,4 +598,8 @@ do {								\
    7.92  		      :: "r"((x)) : "p6", "p7", "memory");	\
    7.93  } while (0)
    7.94  
    7.95 +#ifdef XEN
    7.96 +#include <asm/xengcc_intrin.h>
    7.97 +#endif
    7.98 +
    7.99  #endif /* _ASM_IA64_GCC_INTRIN_H */
     8.1 --- a/xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h	Wed Aug 31 17:21:24 2005 -0600
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,55 +0,0 @@
     8.4 -/*
     8.5 - * Platform dependent support for HP simulator.
     8.6 - *
     8.7 - * Copyright (C) 1998, 1999 Hewlett-Packard Co
     8.8 - * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
     8.9 - * Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
    8.10 - */
    8.11 -#ifndef _IA64_PLATFORM_HPSIM_SSC_H
    8.12 -#define _IA64_PLATFORM_HPSIM_SSC_H
    8.13 -
    8.14 -/* Simulator system calls: */
    8.15 -
    8.16 -#define SSC_CONSOLE_INIT		20
    8.17 -#define SSC_GETCHAR			21
    8.18 -#define SSC_PUTCHAR			31
    8.19 -#define SSC_CONNECT_INTERRUPT		58
    8.20 -#define SSC_GENERATE_INTERRUPT		59
    8.21 -#define SSC_SET_PERIODIC_INTERRUPT	60
    8.22 -#define SSC_GET_RTC			65
    8.23 -#define SSC_EXIT			66
    8.24 -#define SSC_LOAD_SYMBOLS		69
    8.25 -#define SSC_GET_TOD			74
    8.26 -#define SSC_CTL_TRACE			76
    8.27 -
    8.28 -#define SSC_NETDEV_PROBE		100
    8.29 -#define SSC_NETDEV_SEND			101
    8.30 -#define SSC_NETDEV_RECV			102
    8.31 -#define SSC_NETDEV_ATTACH		103
    8.32 -#define SSC_NETDEV_DETACH		104
    8.33 -
    8.34 -/*
    8.35 - * Simulator system call.
    8.36 - */
    8.37 -extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
    8.38 -
    8.39 -#ifdef XEN
    8.40 -/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
    8.41 - * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
    8.42 -#define SSC_OPEN			50
    8.43 -#define SSC_CLOSE			51
    8.44 -#define SSC_READ			52
    8.45 -#define SSC_WRITE			53
    8.46 -#define SSC_GET_COMPLETION		54
    8.47 -#define SSC_WAIT_COMPLETION		55
    8.48 -
    8.49 -#define SSC_WRITE_ACCESS		2
    8.50 -#define SSC_READ_ACCESS			1
    8.51 -
    8.52 -struct ssc_disk_req {
    8.53 -	unsigned long addr;
    8.54 -	unsigned long len;
    8.55 -};
    8.56 -#endif
    8.57 -
    8.58 -#endif /* _IA64_PLATFORM_HPSIM_SSC_H */
     9.1 --- a/xen/include/asm-ia64/linux-xen/asm/ia64regs.h	Wed Aug 31 17:21:24 2005 -0600
     9.2 +++ b/xen/include/asm-ia64/linux-xen/asm/ia64regs.h	Thu Sep 01 11:09:27 2005 -0600
     9.3 @@ -87,35 +87,6 @@
     9.4  #define _IA64_REG_CR_LRR0	4176
     9.5  #define _IA64_REG_CR_LRR1	4177
     9.6  
     9.7 -#ifdef  CONFIG_VTI
     9.8 -#define IA64_REG_CR_DCR   0
     9.9 -#define IA64_REG_CR_ITM   1
    9.10 -#define IA64_REG_CR_IVA   2
    9.11 -#define IA64_REG_CR_PTA   8
    9.12 -#define IA64_REG_CR_IPSR  16
    9.13 -#define IA64_REG_CR_ISR   17
    9.14 -#define IA64_REG_CR_IIP   19
    9.15 -#define IA64_REG_CR_IFA   20
    9.16 -#define IA64_REG_CR_ITIR  21
    9.17 -#define IA64_REG_CR_IIPA  22
    9.18 -#define IA64_REG_CR_IFS   23
    9.19 -#define IA64_REG_CR_IIM   24
    9.20 -#define IA64_REG_CR_IHA   25
    9.21 -#define IA64_REG_CR_LID   64
    9.22 -#define IA64_REG_CR_IVR   65
    9.23 -#define IA64_REG_CR_TPR   66
    9.24 -#define IA64_REG_CR_EOI   67
    9.25 -#define IA64_REG_CR_IRR0  68
    9.26 -#define IA64_REG_CR_IRR1  69
    9.27 -#define IA64_REG_CR_IRR2  70
    9.28 -#define IA64_REG_CR_IRR3  71
    9.29 -#define IA64_REG_CR_ITV   72
    9.30 -#define IA64_REG_CR_PMV   73
    9.31 -#define IA64_REG_CR_CMCV  74
    9.32 -#define IA64_REG_CR_LRR0  80
    9.33 -#define IA64_REG_CR_LRR1  81
    9.34 -#endif  //  CONFIG_VTI
    9.35 -
    9.36  /* Indirect Registers for getindreg() and setindreg() */
    9.37  
    9.38  #define _IA64_REG_INDR_CPUID	9000	/* getindreg only */
    9.39 @@ -126,4 +97,8 @@
    9.40  #define _IA64_REG_INDR_PMD	9005
    9.41  #define _IA64_REG_INDR_RR	9006
    9.42  
    9.43 +#ifdef XEN
    9.44 +#include <asm/xenia64regs.h>
    9.45 +#endif
    9.46 +
    9.47  #endif /* _ASM_IA64_IA64REGS_H */
    10.1 --- a/xen/include/asm-ia64/linux-xen/asm/io.h	Wed Aug 31 17:21:24 2005 -0600
    10.2 +++ b/xen/include/asm-ia64/linux-xen/asm/io.h	Thu Sep 01 11:09:27 2005 -0600
    10.3 @@ -124,14 +124,6 @@ static inline void ___ia64_mmiowb(void)
    10.4  	ia64_mfa();
    10.5  }
    10.6  
    10.7 -static inline const unsigned long
    10.8 -__ia64_get_io_port_base (void)
    10.9 -{
   10.10 -	extern unsigned long ia64_iobase;
   10.11 -
   10.12 -	return ia64_iobase;
   10.13 -}
   10.14 -
   10.15  static inline void*
   10.16  __ia64_mk_io_addr (unsigned long port)
   10.17  {
    11.1 --- a/xen/include/asm-ia64/linux-xen/asm/kregs.h	Wed Aug 31 17:21:24 2005 -0600
    11.2 +++ b/xen/include/asm-ia64/linux-xen/asm/kregs.h	Thu Sep 01 11:09:27 2005 -0600
    11.3 @@ -29,21 +29,8 @@
    11.4   */
    11.5  #define IA64_TR_KERNEL		0	/* itr0, dtr0: maps kernel image (code & data) */
    11.6  #define IA64_TR_PALCODE		1	/* itr1: maps PALcode as required by EFI */
    11.7 -#ifdef CONFIG_VTI
    11.8 -#define IA64_TR_XEN_IN_DOM	6	/* itr6, dtr6: Double mapping for xen image in domain space */
    11.9 -#endif // CONFIG_VTI
   11.10  #define IA64_TR_PERCPU_DATA	1	/* dtr1: percpu data */
   11.11  #define IA64_TR_CURRENT_STACK	2	/* dtr2: maps kernel's memory- & register-stacks */
   11.12 -#ifdef XEN
   11.13 -#define IA64_TR_SHARED_INFO	3	/* dtr3: page shared with domain */
   11.14 -#define	IA64_TR_VHPT		4	/* dtr4: vhpt */
   11.15 -#define IA64_TR_ARCH_INFO      5
   11.16 -#ifdef CONFIG_VTI
   11.17 -#define IA64_TR_VHPT_IN_DOM	5	/* dtr5: Double mapping for vhpt table in domain space */
   11.18 -#define IA64_TR_RR7_SWITCH_STUB	7	/* dtr7: mapping for rr7 switch stub */
   11.19 -#define IA64_TEMP_PHYSICAL	8	/* itr8, dtr8: temp mapping for guest physical memory 256M */
   11.20 -#endif // CONFIG_VTI
   11.21 -#endif
   11.22  
   11.23  /* Processor status register bits: */
   11.24  #define IA64_PSR_BE_BIT		1
   11.25 @@ -79,9 +66,6 @@
   11.26  #define IA64_PSR_ED_BIT		43
   11.27  #define IA64_PSR_BN_BIT		44
   11.28  #define IA64_PSR_IA_BIT		45
   11.29 -#ifdef CONFIG_VTI
   11.30 -#define IA64_PSR_VM_BIT		46
   11.31 -#endif // CONFIG_VTI
   11.32  
   11.33  /* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
   11.34     execve().  Only list flags here that need to be cleared/set for BOTH clone2() and
   11.35 @@ -123,9 +107,6 @@
   11.36  #define IA64_PSR_ED	(__IA64_UL(1) << IA64_PSR_ED_BIT)
   11.37  #define IA64_PSR_BN	(__IA64_UL(1) << IA64_PSR_BN_BIT)
   11.38  #define IA64_PSR_IA	(__IA64_UL(1) << IA64_PSR_IA_BIT)
   11.39 -#ifdef CONFIG_VTI
   11.40 -#define IA64_PSR_VM	(__IA64_UL(1) << IA64_PSR_VM_BIT)
   11.41 -#endif // CONFIG_VTI
   11.42  
   11.43  /* User mask bits: */
   11.44  #define IA64_PSR_UM	(IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
   11.45 @@ -180,20 +161,7 @@
   11.46  #define IA64_ISR_CODE_PROBEF	5
   11.47  
   11.48  #ifdef XEN
   11.49 -/* Interruption Function State */
   11.50 -#define IA64_IFS_V_BIT		63
   11.51 -#define IA64_IFS_V	(__IA64_UL(1) << IA64_IFS_V_BIT)
   11.52 -
   11.53 -/* Page Table Address */
   11.54 -#define IA64_PTA_VE_BIT 0
   11.55 -#define IA64_PTA_SIZE_BIT 2
   11.56 -#define IA64_PTA_VF_BIT 8
   11.57 -#define IA64_PTA_BASE_BIT 15
   11.58 -
   11.59 -#define IA64_PTA_VE     (__IA64_UL(1) << IA64_PTA_VE_BIT)
   11.60 -#define IA64_PTA_SIZE   (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
   11.61 -#define IA64_PTA_VF     (__IA64_UL(1) << IA64_PTA_VF_BIT)
   11.62 -#define IA64_PTA_BASE   (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
   11.63 +#include <asm/xenkregs.h>
   11.64  #endif
   11.65  
   11.66  #endif /* _ASM_IA64_kREGS_H */
    12.1 --- a/xen/include/asm-ia64/linux-xen/asm/mca_asm.h	Wed Aug 31 17:21:24 2005 -0600
    12.2 +++ b/xen/include/asm-ia64/linux-xen/asm/mca_asm.h	Thu Sep 01 11:09:27 2005 -0600
    12.3 @@ -29,10 +29,10 @@
    12.4  #ifdef XEN
    12.5  #define INST_VA_TO_PA(addr)							\
    12.6  	dep	addr	= 0, addr, 60, 4
    12.7 -#else // XEN
    12.8 +#else
    12.9  #define INST_VA_TO_PA(addr)							\
   12.10  	dep	addr	= 0, addr, 61, 3
   12.11 -#endif // XEN
   12.12 +#endif
   12.13  /*
   12.14   * This macro converts a data virtual address to a physical address
   12.15   * Right now for simulation purposes the virtual addresses are
   12.16 @@ -51,15 +51,19 @@
   12.17  #define DATA_PA_TO_VA(addr,temp)							\
   12.18  	mov	temp	= 0xf	;;							\
   12.19  	dep	addr	= temp, addr, 60, 4
   12.20 -#else // XEN
   12.21 +#else
   12.22  #define DATA_PA_TO_VA(addr,temp)							\
   12.23  	mov	temp	= 0x7	;;							\
   12.24  	dep	addr	= temp, addr, 61, 3
   12.25 -#endif // XEN
   12.26 +#endif
   12.27  
   12.28 +#ifdef XEN
   12.29 +//FIXME LATER
   12.30 +#else
   12.31  #define GET_THIS_PADDR(reg, var)		\
   12.32  	mov	reg = IA64_KR(PER_CPU_DATA);;	\
   12.33          addl	reg = THIS_CPU(var), reg
   12.34 +#endif
   12.35  
   12.36  /*
   12.37   * This macro jumps to the instruction at the given virtual address
    13.1 --- a/xen/include/asm-ia64/linux-xen/asm/page.h	Wed Aug 31 17:21:24 2005 -0600
    13.2 +++ b/xen/include/asm-ia64/linux-xen/asm/page.h	Thu Sep 01 11:09:27 2005 -0600
    13.3 @@ -32,7 +32,6 @@
    13.4  #define PAGE_ALIGN(addr)	(((addr) + PAGE_SIZE - 1) & PAGE_MASK)
    13.5  
    13.6  #define PERCPU_PAGE_SHIFT	16	/* log2() of max. size of per-CPU area */
    13.7 -
    13.8  #define PERCPU_PAGE_SIZE	(__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
    13.9  
   13.10  #define RGN_MAP_LIMIT	((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)	/* per region addr limit */
   13.11 @@ -96,15 +95,9 @@ extern int ia64_pfn_valid (unsigned long
   13.12  #endif
   13.13  
   13.14  #ifndef CONFIG_DISCONTIGMEM
   13.15 -#ifdef XEN
   13.16 -# define pfn_valid(pfn)		(0)
   13.17 -# define page_to_pfn(_page)	((unsigned long)((_page) - frame_table))
   13.18 -# define pfn_to_page(_pfn)	(frame_table + (_pfn))
   13.19 -#else
   13.20  # define pfn_valid(pfn)		(((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
   13.21  # define page_to_pfn(page)	((unsigned long) (page - mem_map))
   13.22  # define pfn_to_page(pfn)	(mem_map + (pfn))
   13.23 -#endif
   13.24  #else
   13.25  extern struct page *vmem_map;
   13.26  extern unsigned long max_low_pfn;
   13.27 @@ -116,11 +109,6 @@ extern unsigned long max_low_pfn;
   13.28  #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
   13.29  #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
   13.30  
   13.31 -#ifdef XEN
   13.32 -#define page_to_virt(_page)	phys_to_virt(page_to_phys(_page))
   13.33 -#define phys_to_page(kaddr)	pfn_to_page(((kaddr) >> PAGE_SHIFT))
   13.34 -#endif
   13.35 -
   13.36  typedef union ia64_va {
   13.37  	struct {
   13.38  		unsigned long off : 61;		/* intra-region offset */
   13.39 @@ -136,23 +124,8 @@ typedef union ia64_va {
   13.40   * expressed in this way to ensure they result in a single "dep"
   13.41   * instruction.
   13.42   */
   13.43 -#ifdef XEN
   13.44 -typedef union xen_va {
   13.45 -	struct {
   13.46 -		unsigned long off : 60;
   13.47 -		unsigned long reg : 4;
   13.48 -	} f;
   13.49 -	unsigned long l;
   13.50 -	void *p;
   13.51 -} xen_va;
   13.52 -
   13.53 -// xen/drivers/console.c uses __va in a declaration (should be fixed!)
   13.54 -#define __pa(x)		({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
   13.55 -#define __va(x)		({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
   13.56 -#else
   13.57  #define __pa(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
   13.58  #define __va(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
   13.59 -#endif
   13.60  
   13.61  #define REGION_NUMBER(x)	({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
   13.62  #define REGION_OFFSET(x)	({ia64_va _v; _v.l = (long) (x); _v.f.off;})
   13.63 @@ -164,9 +137,9 @@ typedef union xen_va {
   13.64  # define htlbpage_to_page(x)	(((unsigned long) REGION_NUMBER(x) << 61)			\
   13.65  				 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
   13.66  # define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
   13.67 -# define is_hugepage_only_range(addr, len)		\
   13.68 +# define is_hugepage_only_range(mm, addr, len)		\
   13.69  	 (REGION_NUMBER(addr) == REGION_HPAGE &&	\
   13.70 -	  REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
   13.71 +	  REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE)
   13.72  extern unsigned int hpage_shift;
   13.73  #endif
   13.74  
   13.75 @@ -224,15 +197,15 @@ get_order (unsigned long size)
   13.76  # define __pgprot(x)	(x)
   13.77  #endif /* !STRICT_MM_TYPECHECKS */
   13.78  
   13.79 -#ifdef XEN
   13.80 -#define PAGE_OFFSET			__IA64_UL_CONST(0xf000000000000000)
   13.81 -#else
   13.82  #define PAGE_OFFSET			__IA64_UL_CONST(0xe000000000000000)
   13.83 -#endif
   13.84  
   13.85  #define VM_DATA_DEFAULT_FLAGS		(VM_READ | VM_WRITE |					\
   13.86  					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |		\
   13.87  					 (((current->personality & READ_IMPLIES_EXEC) != 0)	\
   13.88  					  ? VM_EXEC : 0))
   13.89  
   13.90 +#ifdef XEN
   13.91 +#include <asm/xenpage.h>
   13.92 +#endif
   13.93 +
   13.94  #endif /* _ASM_IA64_PAGE_H */
    14.1 --- a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h	Wed Aug 31 17:21:24 2005 -0600
    14.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h	Thu Sep 01 11:09:27 2005 -0600
    14.3 @@ -21,176 +21,127 @@
    14.4  #include <linux/threads.h>
    14.5  
    14.6  #include <asm/mmu_context.h>
    14.7 -#include <asm/processor.h>
    14.8  
    14.9 -/*
   14.10 - * Very stupidly, we used to get new pgd's and pmd's, init their contents
   14.11 - * to point to the NULL versions of the next level page table, later on
   14.12 - * completely re-init them the same way, then free them up.  This wasted
   14.13 - * a lot of work and caused unnecessary memory traffic.  How broken...
   14.14 - * We fix this by caching them.
   14.15 - */
   14.16 -#define pgd_quicklist		(local_cpu_data->pgd_quick)
   14.17 -#define pmd_quicklist		(local_cpu_data->pmd_quick)
   14.18 -#define pgtable_cache_size	(local_cpu_data->pgtable_cache_sz)
   14.19 +#ifndef XEN
   14.20 +DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
   14.21 +#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
   14.22 +DECLARE_PER_CPU(long, __pgtable_quicklist_size);
   14.23 +#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
   14.24  
   14.25 -static inline pgd_t*
   14.26 -pgd_alloc_one_fast (struct mm_struct *mm)
   14.27 +static inline long pgtable_quicklist_total_size(void)
   14.28  {
   14.29 -	unsigned long *ret = NULL;
   14.30 -
   14.31 -	preempt_disable();
   14.32 +	long ql_size = 0;
   14.33 +	int cpuid;
   14.34  
   14.35 -	ret = pgd_quicklist;
   14.36 -	if (likely(ret != NULL)) {
   14.37 -		pgd_quicklist = (unsigned long *)(*ret);
   14.38 -		ret[0] = 0;
   14.39 -		--pgtable_cache_size;
   14.40 -	} else
   14.41 -		ret = NULL;
   14.42 -
   14.43 -	preempt_enable();
   14.44 -
   14.45 -	return (pgd_t *) ret;
   14.46 +	for_each_online_cpu(cpuid) {
   14.47 +		ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
   14.48 +	}
   14.49 +	return ql_size;
   14.50  }
   14.51  
   14.52 -static inline pgd_t*
   14.53 -pgd_alloc (struct mm_struct *mm)
   14.54 -{
   14.55 -	/* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
   14.56 -	pgd_t *pgd = pgd_alloc_one_fast(mm);
   14.57 -
   14.58 -	if (unlikely(pgd == NULL)) {
   14.59 -#ifdef XEN
   14.60 -		pgd = (pgd_t *)alloc_xenheap_page();
   14.61 -		memset(pgd,0,PAGE_SIZE);
   14.62 -#else
   14.63 -		pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
   14.64 -#endif
   14.65 -	}
   14.66 -	return pgd;
   14.67 -}
   14.68 -
   14.69 -static inline void
   14.70 -pgd_free (pgd_t *pgd)
   14.71 -{
   14.72 -	preempt_disable();
   14.73 -	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
   14.74 -	pgd_quicklist = (unsigned long *) pgd;
   14.75 -	++pgtable_cache_size;
   14.76 -	preempt_enable();
   14.77 -}
   14.78 -
   14.79 -static inline void
   14.80 -pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd)
   14.81 -{
   14.82 -	pud_val(*pud_entry) = __pa(pmd);
   14.83 -}
   14.84 -
   14.85 -static inline pmd_t*
   14.86 -pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
   14.87 +static inline void *pgtable_quicklist_alloc(void)
   14.88  {
   14.89  	unsigned long *ret = NULL;
   14.90  
   14.91  	preempt_disable();
   14.92  
   14.93 -	ret = (unsigned long *)pmd_quicklist;
   14.94 +	ret = pgtable_quicklist;
   14.95  	if (likely(ret != NULL)) {
   14.96 -		pmd_quicklist = (unsigned long *)(*ret);
   14.97 +		pgtable_quicklist = (unsigned long *)(*ret);
   14.98  		ret[0] = 0;
   14.99 -		--pgtable_cache_size;
  14.100 +		--pgtable_quicklist_size;
  14.101 +		preempt_enable();
  14.102 +	} else {
  14.103 +		preempt_enable();
  14.104 +		ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
  14.105  	}
  14.106  
  14.107 -	preempt_enable();
  14.108 -
  14.109 -	return (pmd_t *)ret;
  14.110 +	return ret;
  14.111  }
  14.112  
  14.113 -static inline pmd_t*
  14.114 -pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
  14.115 +static inline void pgtable_quicklist_free(void *pgtable_entry)
  14.116  {
  14.117 -#ifdef XEN
  14.118 -	pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
  14.119 -	memset(pmd,0,PAGE_SIZE);
  14.120 -#else
  14.121 -	pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  14.122 +#ifdef CONFIG_NUMA
  14.123 +	unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
  14.124 +
  14.125 +	if (unlikely(nid != numa_node_id())) {
  14.126 +		free_page((unsigned long)pgtable_entry);
  14.127 +		return;
  14.128 +	}
  14.129  #endif
  14.130  
  14.131 -	return pmd;
  14.132 +	preempt_disable();
  14.133 +	*(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
  14.134 +	pgtable_quicklist = (unsigned long *)pgtable_entry;
  14.135 +	++pgtable_quicklist_size;
  14.136 +	preempt_enable();
  14.137 +}
  14.138 +#endif
  14.139 +
  14.140 +static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  14.141 +{
  14.142 +	return pgtable_quicklist_alloc();
  14.143 +}
  14.144 +
  14.145 +static inline void pgd_free(pgd_t * pgd)
  14.146 +{
  14.147 +	pgtable_quicklist_free(pgd);
  14.148  }
  14.149  
  14.150  static inline void
  14.151 -pmd_free (pmd_t *pmd)
  14.152 +pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
  14.153 +{
  14.154 +	pud_val(*pud_entry) = __pa(pmd);
  14.155 +}
  14.156 +
  14.157 +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  14.158  {
  14.159 -	preempt_disable();
  14.160 -	*(unsigned long *)pmd = (unsigned long) pmd_quicklist;
  14.161 -	pmd_quicklist = (unsigned long *) pmd;
  14.162 -	++pgtable_cache_size;
  14.163 -	preempt_enable();
  14.164 +	return pgtable_quicklist_alloc();
  14.165 +}
  14.166 +
  14.167 +static inline void pmd_free(pmd_t * pmd)
  14.168 +{
  14.169 +	pgtable_quicklist_free(pmd);
  14.170  }
  14.171  
  14.172  #define __pmd_free_tlb(tlb, pmd)	pmd_free(pmd)
  14.173  
  14.174  static inline void
  14.175 -pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte)
  14.176 +pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
  14.177  {
  14.178  	pmd_val(*pmd_entry) = page_to_phys(pte);
  14.179  }
  14.180  
  14.181  static inline void
  14.182 -pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
  14.183 +pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
  14.184  {
  14.185  	pmd_val(*pmd_entry) = __pa(pte);
  14.186  }
  14.187  
  14.188 -static inline struct page *
  14.189 -pte_alloc_one (struct mm_struct *mm, unsigned long addr)
  14.190 +static inline struct page *pte_alloc_one(struct mm_struct *mm,
  14.191 +					 unsigned long addr)
  14.192  {
  14.193 -#ifdef XEN
  14.194 -	struct page *pte = alloc_xenheap_page();
  14.195 -	memset(pte,0,PAGE_SIZE);
  14.196 -#else
  14.197 -	struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
  14.198 -#endif
  14.199 -
  14.200 -	return pte;
  14.201 +	return virt_to_page(pgtable_quicklist_alloc());
  14.202  }
  14.203  
  14.204 -static inline pte_t *
  14.205 -pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
  14.206 +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  14.207 +					  unsigned long addr)
  14.208  {
  14.209 -#ifdef XEN
  14.210 -	pte_t *pte = (pte_t *)alloc_xenheap_page();
  14.211 -	memset(pte,0,PAGE_SIZE);
  14.212 -#else
  14.213 -	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  14.214 -#endif
  14.215 -
  14.216 -	return pte;
  14.217 +	return pgtable_quicklist_alloc();
  14.218  }
  14.219  
  14.220 -static inline void
  14.221 -pte_free (struct page *pte)
  14.222 +static inline void pte_free(struct page *pte)
  14.223  {
  14.224 -#ifdef XEN
  14.225 -	free_xenheap_page(pte);
  14.226 -#else
  14.227 -	__free_page(pte);
  14.228 -#endif
  14.229 +	pgtable_quicklist_free(page_address(pte));
  14.230  }
  14.231  
  14.232 -static inline void
  14.233 -pte_free_kernel (pte_t *pte)
  14.234 +static inline void pte_free_kernel(pte_t * pte)
  14.235  {
  14.236 -#ifdef XEN
  14.237 -	free_xenheap_page((unsigned long) pte);
  14.238 -#else
  14.239 -	free_page((unsigned long) pte);
  14.240 -#endif
  14.241 +	pgtable_quicklist_free(pte);
  14.242  }
  14.243  
  14.244 -#define __pte_free_tlb(tlb, pte)	tlb_remove_page((tlb), (pte))
  14.245 +#define __pte_free_tlb(tlb, pte)	pte_free(pte)
  14.246  
  14.247 -extern void check_pgt_cache (void);
  14.248 +extern void check_pgt_cache(void);
  14.249  
  14.250 -#endif /* _ASM_IA64_PGALLOC_H */
  14.251 +#endif				/* _ASM_IA64_PGALLOC_H */
    15.1 --- a/xen/include/asm-ia64/linux-xen/asm/processor.h	Wed Aug 31 17:21:24 2005 -0600
    15.2 +++ b/xen/include/asm-ia64/linux-xen/asm/processor.h	Thu Sep 01 11:09:27 2005 -0600
    15.3 @@ -43,14 +43,6 @@
    15.4  #define TASK_SIZE		(current->thread.task_size)
    15.5  
    15.6  /*
    15.7 - * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
    15.8 - * address-space MM.  Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
    15.9 - * because the kernel may have installed helper-mappings above TASK_SIZE.  For example,
   15.10 - * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
   15.11 - */
   15.12 -#define MM_VM_SIZE(mm)		DEFAULT_TASK_SIZE
   15.13 -
   15.14 -/*
   15.15   * This decides where the kernel will search for a free chunk of vm
   15.16   * space during mmap's.
   15.17   */
   15.18 @@ -94,11 +86,10 @@
   15.19  #ifdef CONFIG_NUMA
   15.20  #include <asm/nodedata.h>
   15.21  #endif
   15.22 +
   15.23  #ifdef XEN
   15.24  #include <asm/xenprocessor.h>
   15.25 -#endif
   15.26 -
   15.27 -#ifndef XEN
   15.28 +#else
   15.29  /* like above but expressed as bitfields for more efficient access: */
   15.30  struct ia64_psr {
   15.31  	__u64 reserved0 : 1;
   15.32 @@ -150,9 +141,6 @@ struct cpuinfo_ia64 {
   15.33  	__u64 nsec_per_cyc;	/* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
   15.34  	__u64 unimpl_va_mask;	/* mask of unimplemented virtual address bits (from PAL) */
   15.35  	__u64 unimpl_pa_mask;	/* mask of unimplemented physical address bits (from PAL) */
   15.36 -	__u64 *pgd_quick;
   15.37 -	__u64 *pmd_quick;
   15.38 -	__u64 pgtable_cache_sz;
   15.39  	__u64 itc_freq;		/* frequency of ITC counter */
   15.40  	__u64 proc_freq;	/* frequency of processor */
   15.41  	__u64 cyc_per_usec;	/* itc_freq/1000000 */
   15.42 @@ -190,22 +178,6 @@ struct cpuinfo_ia64 {
   15.43  
   15.44  DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
   15.45  
   15.46 -typedef union {
   15.47 -	struct {
   15.48 -		__u64 kr0;
   15.49 -		__u64 kr1;
   15.50 -		__u64 kr2;
   15.51 -		__u64 kr3;
   15.52 -		__u64 kr4;
   15.53 -		__u64 kr5;
   15.54 -		__u64 kr6;
   15.55 -		__u64 kr7;
   15.56 -	};
   15.57 -	__u64 _kr[8];
   15.58 -} cpu_kr_ia64_t;
   15.59 -
   15.60 -DECLARE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
   15.61 -
   15.62  /*
   15.63   * The "local" data variable.  It refers to the per-CPU data of the currently executing
   15.64   * CPU, much like "current" points to the per-task data of the currently executing task.
   15.65 @@ -435,7 +407,10 @@ extern void ia64_setreg_unknown_kr (void
   15.66   * task_struct at this point.
   15.67   */
   15.68  
   15.69 -/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
   15.70 +/*
   15.71 + * Return TRUE if task T owns the fph partition of the CPU we're running on.
   15.72 + * Must be called from code that has preemption disabled.
   15.73 + */
   15.74  #ifndef XEN
   15.75  #define ia64_is_local_fpu_owner(t)								\
   15.76  ({												\
   15.77 @@ -445,7 +420,10 @@ extern void ia64_setreg_unknown_kr (void
   15.78  })
   15.79  #endif
   15.80  
   15.81 -/* Mark task T as owning the fph partition of the CPU we're running on. */
   15.82 +/*
   15.83 + * Mark task T as owning the fph partition of the CPU we're running on.
   15.84 + * Must be called from code that has preemption disabled.
   15.85 + */
   15.86  #define ia64_set_local_fpu_owner(t) do {						\
   15.87  	struct task_struct *__ia64_slfo_task = (t);					\
   15.88  	__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();			\
    16.1 --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h	Wed Aug 31 17:21:24 2005 -0600
    16.2 +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h	Thu Sep 01 11:09:27 2005 -0600
    16.3 @@ -120,35 +120,6 @@ do {											\
    16.4  #define _raw_spin_trylock(x)	(cmpxchg_acq(&(x)->lock, 0, 1) == 0)
    16.5  #define spin_unlock_wait(x)	do { barrier(); } while ((x)->lock)
    16.6  
    16.7 -#ifdef XEN
    16.8 -/*
    16.9 - * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
   16.10 - * reentered recursively on the same CPU. All critical regions that may form
   16.11 - * part of a recursively-nested set must be protected by these forms. If there
   16.12 - * are any critical regions that cannot form part of such a set, they can use
   16.13 - * standard spin_[un]lock().
   16.14 - */
   16.15 -#define _raw_spin_lock_recursive(_lock)            \
   16.16 -    do {                                           \
   16.17 -        int cpu = smp_processor_id();              \
   16.18 -        if ( likely((_lock)->recurse_cpu != cpu) ) \
   16.19 -        {                                          \
   16.20 -            spin_lock(_lock);                      \
   16.21 -            (_lock)->recurse_cpu = cpu;            \
   16.22 -        }                                          \
   16.23 -        (_lock)->recurse_cnt++;                    \
   16.24 -    } while ( 0 )
   16.25 -
   16.26 -#define _raw_spin_unlock_recursive(_lock)          \
   16.27 -    do {                                           \
   16.28 -        if ( likely(--(_lock)->recurse_cnt == 0) ) \
   16.29 -        {                                          \
   16.30 -            (_lock)->recurse_cpu = -1;             \
   16.31 -            spin_unlock(_lock);                    \
   16.32 -        }                                          \
   16.33 -    } while ( 0 )
   16.34 -#endif
   16.35 -
   16.36  typedef struct {
   16.37  	volatile unsigned int read_counter	: 31;
   16.38  	volatile unsigned int write_lock	:  1;
   16.39 @@ -238,4 +209,7 @@ do {										\
   16.40  	clear_bit(31, (x));								\
   16.41  })
   16.42  
   16.43 +#ifdef XEN
   16.44 +#include <asm/xenspinlock.h>
   16.45 +#endif
   16.46  #endif /*  _ASM_IA64_SPINLOCK_H */
    17.1 --- a/xen/include/asm-ia64/linux-xen/asm/system.h	Wed Aug 31 17:21:24 2005 -0600
    17.2 +++ b/xen/include/asm-ia64/linux-xen/asm/system.h	Thu Sep 01 11:09:27 2005 -0600
    17.3 @@ -18,19 +18,14 @@
    17.4  #include <asm/page.h>
    17.5  #include <asm/pal.h>
    17.6  #include <asm/percpu.h>
    17.7 -#ifdef XEN
    17.8 -#include <asm/xensystem.h>
    17.9 -#endif
   17.10  
   17.11  #define GATE_ADDR		__IA64_UL_CONST(0xa000000000000000)
   17.12  /*
   17.13   * 0xa000000000000000+2*PERCPU_PAGE_SIZE
   17.14   * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
   17.15   */
   17.16 -#ifndef XEN
   17.17  #define KERNEL_START		 __IA64_UL_CONST(0xa000000100000000)
   17.18  #define PERCPU_ADDR		(-PERCPU_PAGE_SIZE)
   17.19 -#endif
   17.20  
   17.21  #ifndef __ASSEMBLY__
   17.22  
   17.23 @@ -188,8 +183,6 @@ do {								\
   17.24  
   17.25  #ifdef __KERNEL__
   17.26  
   17.27 -#define prepare_to_switch()    do { } while(0)
   17.28 -
   17.29  #ifdef CONFIG_IA32_SUPPORT
   17.30  # define IS_IA32_PROCESS(regs)	(ia64_psr(regs)->is != 0)
   17.31  #else
   17.32 @@ -223,7 +216,6 @@ extern void ia64_load_extra (struct task
   17.33  # define PERFMON_IS_SYSWIDE() (0)
   17.34  #endif
   17.35  
   17.36 -#ifndef XEN
   17.37  #define IA64_HAS_EXTRA_STATE(t)							\
   17.38  	((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)	\
   17.39  	 || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
   17.40 @@ -236,7 +228,6 @@ extern void ia64_load_extra (struct task
   17.41  	ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);			 \
   17.42  	(last) = ia64_switch_to((next));							 \
   17.43  } while (0)
   17.44 -#endif 
   17.45  
   17.46  #ifdef CONFIG_SMP
   17.47  /*
   17.48 @@ -247,9 +238,9 @@ extern void ia64_load_extra (struct task
   17.49   */
   17.50  # define switch_to(prev,next,last) do {						\
   17.51  	if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) {				\
   17.52 -		/* ia64_psr(ia64_task_regs(prev))->mfh = 0; */			\
   17.53 -		/* (prev)->thread.flags |= IA64_THREAD_FPH_VALID; */			\
   17.54 -		/* __ia64_save_fpu((prev)->thread.fph); */				\
   17.55 +		ia64_psr(ia64_task_regs(prev))->mfh = 0;			\
   17.56 +		(prev)->thread.flags |= IA64_THREAD_FPH_VALID;			\
   17.57 +		__ia64_save_fpu((prev)->thread.fph);				\
   17.58  	}									\
   17.59  	__switch_to(prev, next, last);						\
   17.60  } while (0)
   17.61 @@ -281,19 +272,20 @@ extern void ia64_load_extra (struct task
   17.62   * of that CPU which will not be released, because there we wait for the
   17.63   * tasklist_lock to become available.
   17.64   */
   17.65 -#define prepare_arch_switch(rq, next)		\
   17.66 -do {						\
   17.67 -	spin_lock(&(next)->switch_lock);	\
   17.68 -	spin_unlock(&(rq)->lock);		\
   17.69 -} while (0)
   17.70 -#define finish_arch_switch(rq, prev)	spin_unlock_irq(&(prev)->switch_lock)
   17.71 -#define task_running(rq, p) 		((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
   17.72 +#define __ARCH_WANT_UNLOCKED_CTXSW
   17.73  
   17.74  #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
   17.75  
   17.76  void cpu_idle_wait(void);
   17.77 +
   17.78 +#define arch_align_stack(x) (x)
   17.79 +
   17.80  #endif /* __KERNEL__ */
   17.81  
   17.82  #endif /* __ASSEMBLY__ */
   17.83  
   17.84 +#ifdef XEN
   17.85 +#include <asm/xensystem.h>
   17.86 +#endif
   17.87 +
   17.88  #endif /* _ASM_IA64_SYSTEM_H */
    18.1 --- a/xen/include/asm-ia64/linux-xen/asm/tlbflush.h	Wed Aug 31 17:21:24 2005 -0600
    18.2 +++ b/xen/include/asm-ia64/linux-xen/asm/tlbflush.h	Thu Sep 01 11:09:27 2005 -0600
    18.3 @@ -37,6 +37,7 @@ static inline void
    18.4  local_finish_flush_tlb_mm (struct mm_struct *mm)
    18.5  {
    18.6  #ifndef XEN
    18.7 +// FIXME SMP?
    18.8  	if (mm == current->active_mm)
    18.9  		activate_context(mm);
   18.10  #endif
   18.11 @@ -54,6 +55,7 @@ flush_tlb_mm (struct mm_struct *mm)
   18.12  		return;
   18.13  
   18.14  #ifndef XEN
   18.15 +// FIXME SMP?
   18.16  	mm->context = 0;
   18.17  #endif
   18.18  
   18.19 @@ -81,6 +83,7 @@ flush_tlb_page (struct vm_area_struct *v
   18.20  	if (vma->vm_mm == current->active_mm)
   18.21  		ia64_ptcl(addr, (PAGE_SHIFT << 2));
   18.22  #ifndef XEN
   18.23 +// FIXME SMP?
   18.24  	else
   18.25  		vma->vm_mm->context = 0;
   18.26  #endif
    19.1 --- a/xen/include/asm-ia64/linux-xen/asm/types.h	Wed Aug 31 17:21:24 2005 -0600
    19.2 +++ b/xen/include/asm-ia64/linux-xen/asm/types.h	Thu Sep 01 11:09:27 2005 -0600
    19.3 @@ -1,12 +1,5 @@
    19.4  #ifndef _ASM_IA64_TYPES_H
    19.5  #define _ASM_IA64_TYPES_H
    19.6 -#ifdef XEN
    19.7 -#ifndef __ASSEMBLY__
    19.8 -typedef unsigned long ssize_t;
    19.9 -typedef unsigned long size_t;
   19.10 -typedef long long loff_t;
   19.11 -#endif
   19.12 -#endif
   19.13  
   19.14  /*
   19.15   * This file is never included by application software unless explicitly requested (e.g.,
   19.16 @@ -68,28 +61,6 @@ typedef __u32 u32;
   19.17  typedef __s64 s64;
   19.18  typedef __u64 u64;
   19.19  
   19.20 -#ifdef XEN
   19.21 -/*
   19.22 - * Below are truly Linux-specific types that should never collide with
   19.23 - * any application/library that wants linux/types.h.
   19.24 - */
   19.25 -
   19.26 -#ifdef __CHECKER__
   19.27 -#define __bitwise __attribute__((bitwise))
   19.28 -#else
   19.29 -#define __bitwise
   19.30 -#endif
   19.31 -
   19.32 -typedef __u16 __bitwise __le16;
   19.33 -typedef __u16 __bitwise __be16;
   19.34 -typedef __u32 __bitwise __le32;
   19.35 -typedef __u32 __bitwise __be32;
   19.36 -#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
   19.37 -typedef __u64 __bitwise __le64;
   19.38 -typedef __u64 __bitwise __be64;
   19.39 -#endif
   19.40 -#endif
   19.41 -
   19.42  #define BITS_PER_LONG 64
   19.43  
   19.44  /* DMA addresses are 64-bits wide, in general.  */
   19.45 @@ -101,4 +72,8 @@ typedef unsigned short kmem_bufctl_t;
   19.46  # endif /* __KERNEL__ */
   19.47  #endif /* !__ASSEMBLY__ */
   19.48  
   19.49 +#ifdef XEN
   19.50 +#include <asm/xentypes.h>
   19.51 +#endif
   19.52 +
   19.53  #endif /* _ASM_IA64_TYPES_H */
    20.1 --- a/xen/include/asm-ia64/linux-xen/asm/uaccess.h	Wed Aug 31 17:21:24 2005 -0600
    20.2 +++ b/xen/include/asm-ia64/linux-xen/asm/uaccess.h	Thu Sep 01 11:09:27 2005 -0600
    20.3 @@ -32,16 +32,15 @@
    20.4   *	David Mosberger-Tang <davidm@hpl.hp.com>
    20.5   */
    20.6  
    20.7 -#ifdef CONFIG_VTI
    20.8 -#include <asm/vmx_uaccess.h>
    20.9 -#else // CONFIG_VTI
   20.10 -
   20.11  #include <linux/compiler.h>
   20.12  #include <linux/errno.h>
   20.13  #include <linux/sched.h>
   20.14 +#include <linux/page-flags.h>
   20.15 +#include <linux/mm.h>
   20.16  
   20.17  #include <asm/intrinsics.h>
   20.18  #include <asm/pgtable.h>
   20.19 +#include <asm/io.h>
   20.20  
   20.21  /*
   20.22   * For historical reasons, the following macros are grossly misnamed:
   20.23 @@ -65,7 +64,6 @@
   20.24   * point inside the virtually mapped linear page table.
   20.25   */
   20.26  #ifdef XEN
   20.27 -/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
   20.28  #define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
   20.29  #define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned long)(addr)))
   20.30  #else
   20.31 @@ -79,7 +77,8 @@
   20.32  #endif
   20.33  #define access_ok(type, addr, size)	__access_ok((addr), (size), get_fs())
   20.34  
   20.35 -static inline int
   20.36 +/* this function will go away soon - use access_ok() instead */
   20.37 +static inline int __deprecated
   20.38  verify_area (int type, const void __user *addr, unsigned long size)
   20.39  {
   20.40  	return access_ok(type, addr, size) ? 0 : -EFAULT;
   20.41 @@ -353,7 +352,6 @@ extern unsigned long __strnlen_user (con
   20.42  	__su_ret;						\
   20.43  })
   20.44  
   20.45 -#endif // CONFIG_VTI
   20.46  /* Generic code can't deal with the location-relative format that we use for compactness.  */
   20.47  #define ARCH_HAS_SORT_EXTABLE
   20.48  #define ARCH_HAS_SEARCH_EXTABLE
   20.49 @@ -378,4 +376,40 @@ ia64_done_with_exception (struct pt_regs
   20.50  	return 0;
   20.51  }
   20.52  
   20.53 +#ifndef XEN
   20.54 +#define ARCH_HAS_TRANSLATE_MEM_PTR	1
   20.55 +static __inline__ char *
   20.56 +xlate_dev_mem_ptr (unsigned long p)
   20.57 +{
   20.58 +	struct page *page;
   20.59 +	char * ptr;
   20.60 +
   20.61 +	page = pfn_to_page(p >> PAGE_SHIFT);
   20.62 +	if (PageUncached(page))
   20.63 +		ptr = (char *)p + __IA64_UNCACHED_OFFSET;
   20.64 +	else
   20.65 +		ptr = __va(p);
   20.66 +
   20.67 +	return ptr;
   20.68 +}
   20.69 +
   20.70 +/*
   20.71 + * Convert a virtual cached kernel memory pointer to an uncached pointer
   20.72 + */
   20.73 +static __inline__ char *
   20.74 +xlate_dev_kmem_ptr (char * p)
   20.75 +{
   20.76 +	struct page *page;
   20.77 +	char * ptr;
   20.78 +
   20.79 +	page = virt_to_page((unsigned long)p >> PAGE_SHIFT);
   20.80 +	if (PageUncached(page))
   20.81 +		ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
   20.82 +	else
   20.83 +		ptr = p;
   20.84 +
   20.85 +	return ptr;
   20.86 +}
   20.87 +#endif
   20.88 +
   20.89  #endif /* _ASM_IA64_UACCESS_H */
    21.1 --- a/xen/include/asm-ia64/linux-xen/linux/cpumask.h	Wed Aug 31 17:21:24 2005 -0600
    21.2 +++ b/xen/include/asm-ia64/linux-xen/linux/cpumask.h	Thu Sep 01 11:09:27 2005 -0600
    21.3 @@ -10,6 +10,8 @@
    21.4   *
    21.5   * For details of cpumask_scnprintf() and cpumask_parse(),
    21.6   * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
    21.7 + * For details of cpulist_scnprintf() and cpulist_parse(), see
    21.8 + * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
    21.9   *
   21.10   * The available cpumask operations are:
   21.11   *
   21.12 @@ -46,6 +48,8 @@
   21.13   *
   21.14   * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
   21.15   * int cpumask_parse(ubuf, ulen, mask)	Parse ascii string as cpumask
   21.16 + * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
   21.17 + * int cpulist_parse(buf, map)		Parse ascii string as cpulist
   21.18   *
   21.19   * for_each_cpu_mask(cpu, mask)		for-loop cpu over mask
   21.20   *
   21.21 @@ -268,14 +272,28 @@ static inline int __cpumask_scnprintf(ch
   21.22  	return bitmap_scnprintf(buf, len, srcp->bits, nbits);
   21.23  }
   21.24  
   21.25 -#define cpumask_parse(ubuf, ulen, src) \
   21.26 -			__cpumask_parse((ubuf), (ulen), &(src), NR_CPUS)
   21.27 +#define cpumask_parse(ubuf, ulen, dst) \
   21.28 +			__cpumask_parse((ubuf), (ulen), &(dst), NR_CPUS)
   21.29  static inline int __cpumask_parse(const char __user *buf, int len,
   21.30  					cpumask_t *dstp, int nbits)
   21.31  {
   21.32  	return bitmap_parse(buf, len, dstp->bits, nbits);
   21.33  }
   21.34  
   21.35 +#define cpulist_scnprintf(buf, len, src) \
   21.36 +			__cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
   21.37 +static inline int __cpulist_scnprintf(char *buf, int len,
   21.38 +					const cpumask_t *srcp, int nbits)
   21.39 +{
   21.40 +	return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
   21.41 +}
   21.42 +
   21.43 +#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
   21.44 +static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
   21.45 +{
   21.46 +	return bitmap_parselist(buf, dstp->bits, nbits);
   21.47 +}
   21.48 +
   21.49  #if NR_CPUS > 1
   21.50  #define for_each_cpu_mask(cpu, mask)		\
   21.51  	for ((cpu) = first_cpu(mask);		\
    22.1 --- a/xen/include/asm-ia64/linux-xen/linux/hardirq.h	Wed Aug 31 17:21:24 2005 -0600
    22.2 +++ b/xen/include/asm-ia64/linux-xen/linux/hardirq.h	Thu Sep 01 11:09:27 2005 -0600
    22.3 @@ -2,6 +2,7 @@
    22.4  #define LINUX_HARDIRQ_H
    22.5  
    22.6  #include <linux/config.h>
    22.7 +#include <linux/preempt.h>
    22.8  #include <linux/smp_lock.h>
    22.9  #include <asm/hardirq.h>
   22.10  #include <asm/system.h>
   22.11 @@ -43,13 +44,19 @@
   22.12  #define __IRQ_MASK(x)	((1UL << (x))-1)
   22.13  
   22.14  #define PREEMPT_MASK	(__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
   22.15 +#define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
   22.16  #define HARDIRQ_MASK	(__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
   22.17 -#define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
   22.18  
   22.19  #define PREEMPT_OFFSET	(1UL << PREEMPT_SHIFT)
   22.20  #define SOFTIRQ_OFFSET	(1UL << SOFTIRQ_SHIFT)
   22.21  #define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT)
   22.22  
   22.23 +#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
   22.24 +#ifndef XEN
   22.25 +#error PREEMPT_ACTIVE is too low!
   22.26 +#endif
   22.27 +#endif
   22.28 +
   22.29  #define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
   22.30  #define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
   22.31  #define irq_count()	(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
   22.32 @@ -60,10 +67,10 @@
   22.33   */
   22.34  #define in_irq()		(hardirq_count())
   22.35  #define in_softirq()		(softirq_count())
   22.36 -#ifndef XEN
   22.37 +#ifdef XEN
   22.38 +#define in_interrupt()		0 		// FIXME SMP LATER
   22.39 +#else
   22.40  #define in_interrupt()		(irq_count())
   22.41 -#else
   22.42 -#define in_interrupt()		0		// FIXME LATER
   22.43  #endif
   22.44  
   22.45  #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
    23.1 --- a/xen/include/asm-ia64/linux-xen/linux/interrupt.h	Wed Aug 31 17:21:24 2005 -0600
    23.2 +++ b/xen/include/asm-ia64/linux-xen/linux/interrupt.h	Thu Sep 01 11:09:27 2005 -0600
    23.3 @@ -123,7 +123,9 @@ struct softirq_action
    23.4  };
    23.5  
    23.6  asmlinkage void do_softirq(void);
    23.7 -//extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
    23.8 +#ifndef XEN
    23.9 +extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
   23.10 +#endif
   23.11  extern void softirq_init(void);
   23.12  #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
   23.13  extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
    24.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.2 +++ b/xen/include/asm-ia64/xengcc_intrin.h	Thu Sep 01 11:09:27 2005 -0600
    24.3 @@ -0,0 +1,59 @@
    24.4 +#ifndef _ASM_IA64_XENGCC_INTRIN_H
    24.5 +#define _ASM_IA64_XENGCC_INTRIN_H
    24.6 +/*
    24.7 + * Flushrs instruction stream.
    24.8 + */
    24.9 +#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
   24.10 +
   24.11 +#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
   24.12 +
   24.13 +#define ia64_get_rsc()                          \
   24.14 +({                                  \
   24.15 +    unsigned long val;                     \
   24.16 +    asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory");  \
   24.17 +    val;                               \
   24.18 +})
   24.19 +
   24.20 +#define ia64_set_rsc(val)                       \
   24.21 +    asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory")
   24.22 +
   24.23 +#define ia64_get_bspstore()     \
   24.24 +({                                  \
   24.25 +    unsigned long val;                     \
   24.26 +    asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory");  \
   24.27 +    val;                               \
   24.28 +})
   24.29 +
   24.30 +#define ia64_set_bspstore(val)                       \
   24.31 +    asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory")
   24.32 +
   24.33 +#define ia64_get_rnat()     \
   24.34 +({                                  \
   24.35 +    unsigned long val;                     \
   24.36 +    asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory");  \
   24.37 +    val;                               \
   24.38 +})
   24.39 +
   24.40 +#define ia64_set_rnat(val)                       \
   24.41 +    asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory")
   24.42 +
   24.43 +#define ia64_ttag(addr)							\
   24.44 +({										\
   24.45 +	__u64 ia64_intri_res;							\
   24.46 +	asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
   24.47 +	ia64_intri_res;								\
   24.48 +})
   24.49 +
   24.50 +#define ia64_get_dcr()                          \
   24.51 +({                                      \
   24.52 +    __u64 result;                               \
   24.53 +    asm volatile ("mov %0=cr.dcr" : "=r"(result) : );           \
   24.54 +    result;                                 \
   24.55 +})
   24.56 +
   24.57 +#define ia64_set_dcr(val)                           \
   24.58 +({                                      \
   24.59 +    asm volatile ("mov cr.dcr=%0" :: "r"(val) );            \
   24.60 +})
   24.61 +
   24.62 +#endif /* _ASM_IA64_XENGCC_INTRIN_H */
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/xen/include/asm-ia64/xenia64regs.h	Thu Sep 01 11:09:27 2005 -0600
    25.3 @@ -0,0 +1,31 @@
    25.4 +#ifndef _ASM_IA64_XENIA64REGS_H
    25.5 +#define _ASM_IA64_XENIA64REGS_H
    25.6 +
    25.7 +#define IA64_REG_CR_DCR   0
    25.8 +#define IA64_REG_CR_ITM   1
    25.9 +#define IA64_REG_CR_IVA   2
   25.10 +#define IA64_REG_CR_PTA   8
   25.11 +#define IA64_REG_CR_IPSR  16
   25.12 +#define IA64_REG_CR_ISR   17
   25.13 +#define IA64_REG_CR_IIP   19
   25.14 +#define IA64_REG_CR_IFA   20
   25.15 +#define IA64_REG_CR_ITIR  21
   25.16 +#define IA64_REG_CR_IIPA  22
   25.17 +#define IA64_REG_CR_IFS   23
   25.18 +#define IA64_REG_CR_IIM   24
   25.19 +#define IA64_REG_CR_IHA   25
   25.20 +#define IA64_REG_CR_LID   64
   25.21 +#define IA64_REG_CR_IVR   65
   25.22 +#define IA64_REG_CR_TPR   66
   25.23 +#define IA64_REG_CR_EOI   67
   25.24 +#define IA64_REG_CR_IRR0  68
   25.25 +#define IA64_REG_CR_IRR1  69
   25.26 +#define IA64_REG_CR_IRR2  70
   25.27 +#define IA64_REG_CR_IRR3  71
   25.28 +#define IA64_REG_CR_ITV   72
   25.29 +#define IA64_REG_CR_PMV   73
   25.30 +#define IA64_REG_CR_CMCV  74
   25.31 +#define IA64_REG_CR_LRR0  80
   25.32 +#define IA64_REG_CR_LRR1  81
   25.33 +
   25.34 +#endif /* _ASM_IA64_XENIA64REGS_H */
    26.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.2 +++ b/xen/include/asm-ia64/xenkregs.h	Thu Sep 01 11:09:27 2005 -0600
    26.3 @@ -0,0 +1,37 @@
    26.4 +#ifndef _ASM_IA64_XENKREGS_H
    26.5 +#define _ASM_IA64_XENKREGS_H
    26.6 +
    26.7 +/*
    26.8 + * Translation registers:
    26.9 + */
   26.10 +#define IA64_TR_SHARED_INFO	3	/* dtr3: page shared with domain */
   26.11 +#define	IA64_TR_VHPT		4	/* dtr4: vhpt */
   26.12 +#define IA64_TR_ARCH_INFO      5
   26.13 +
   26.14 +#ifdef CONFIG_VTI
   26.15 +#define IA64_TR_VHPT_IN_DOM	5	/* dtr5: Double mapping for vhpt table in domain space */
   26.16 +#define IA64_TR_XEN_IN_DOM	6	/* itr6, dtr6: Double mapping for xen image in domain space */
   26.17 +#define IA64_TR_RR7_SWITCH_STUB	7	/* dtr7: mapping for rr7 switch stub */
   26.18 +#define IA64_TEMP_PHYSICAL	8	/* itr8, dtr8: temp mapping for guest physical memory 256M */
   26.19 +#endif // CONFIG_VTI
   26.20 +
   26.21 +/* Processor status register bits: */
   26.22 +#define IA64_PSR_VM_BIT		46
   26.23 +#define IA64_PSR_VM	(__IA64_UL(1) << IA64_PSR_VM_BIT)
   26.24 +
   26.25 +/* Interruption Function State */
   26.26 +#define IA64_IFS_V_BIT		63
   26.27 +#define IA64_IFS_V	(__IA64_UL(1) << IA64_IFS_V_BIT)
   26.28 +
   26.29 +/* Page Table Address */
   26.30 +#define IA64_PTA_VE_BIT 0
   26.31 +#define IA64_PTA_SIZE_BIT 2
   26.32 +#define IA64_PTA_VF_BIT 8
   26.33 +#define IA64_PTA_BASE_BIT 15
   26.34 +
   26.35 +#define IA64_PTA_VE     (__IA64_UL(1) << IA64_PTA_VE_BIT)
   26.36 +#define IA64_PTA_SIZE   (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT)
   26.37 +#define IA64_PTA_VF     (__IA64_UL(1) << IA64_PTA_VF_BIT)
   26.38 +#define IA64_PTA_BASE   (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT)))
   26.39 +
   26.40 +#endif /* _ASM_IA64_XENKREGS_H */
    27.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.2 +++ b/xen/include/asm-ia64/xenpage.h	Thu Sep 01 11:09:27 2005 -0600
    27.3 @@ -0,0 +1,42 @@
    27.4 +#ifndef _ASM_IA64_XENPAGE_H
    27.5 +#define _ASM_IA64_XENPAGE_H
    27.6 +
    27.7 +#ifdef CONFIG_DISCONTIGMEM
    27.8 +#error "xenpage.h: page macros need to be defined for CONFIG_DISCONTIGMEM"
    27.9 +#endif
   27.10 +
   27.11 +#undef pfn_valid
   27.12 +#undef page_to_pfn
   27.13 +#undef pfn_to_page
   27.14 +# define pfn_valid(pfn)		(0)
   27.15 +# define page_to_pfn(_page)	((unsigned long) ((_page) - frame_table))
   27.16 +# define pfn_to_page(_pfn)	(frame_table + (_pfn))
   27.17 +
   27.18 +#undef page_to_phys
   27.19 +#undef virt_to_page
   27.20 +#define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
   27.21 +#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
   27.22 +
   27.23 +#define page_to_virt(_page)	phys_to_virt(page_to_phys(_page))
   27.24 +#define phys_to_page(kaddr)	pfn_to_page(((kaddr) >> PAGE_SHIFT))
   27.25 +
   27.26 +#ifndef __ASSEMBLY__
   27.27 +typedef union xen_va {
   27.28 +	struct {
   27.29 +		unsigned long off : 60;
   27.30 +		unsigned long reg : 4;
   27.31 +	} f;
   27.32 +	unsigned long l;
   27.33 +	void *p;
   27.34 +} xen_va;
   27.35 +#endif
   27.36 +
   27.37 +#undef __pa
   27.38 +#undef __va
   27.39 +#define __pa(x)		({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
   27.40 +#define __va(x)		({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
   27.41 +
   27.42 +#undef PAGE_OFFSET
   27.43 +#define PAGE_OFFSET	__IA64_UL_CONST(0xf000000000000000)
   27.44 +
   27.45 +#endif /* _ASM_IA64_XENPAGE_H */
    28.1 --- a/xen/include/asm-ia64/xenprocessor.h	Wed Aug 31 17:21:24 2005 -0600
    28.2 +++ b/xen/include/asm-ia64/xenprocessor.h	Thu Sep 01 11:09:27 2005 -0600
    28.3 @@ -213,4 +213,20 @@ enum {
    28.4          ret;                            \
    28.5  })
    28.6  
    28.7 +typedef union {
    28.8 +	struct {
    28.9 +		__u64 kr0;
   28.10 +		__u64 kr1;
   28.11 +		__u64 kr2;
   28.12 +		__u64 kr3;
   28.13 +		__u64 kr4;
   28.14 +		__u64 kr5;
   28.15 +		__u64 kr6;
   28.16 +		__u64 kr7;
   28.17 +	};
   28.18 +	__u64 _kr[8];
   28.19 +} cpu_kr_ia64_t;
   28.20 +
   28.21 +DECLARE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
   28.22 +
   28.23  #endif // _ASM_IA64_XENPROCESSOR_H
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/include/asm-ia64/xenspinlock.h	Thu Sep 01 11:09:27 2005 -0600
    29.3 @@ -0,0 +1,30 @@
    29.4 +#ifndef _ASM_IA64_XENSPINLOCK_H
    29.5 +#define _ASM_IA64_XENSPINLOCK_H
    29.6 +
    29.7 +/*
    29.8 + * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
    29.9 + * reentered recursively on the same CPU. All critical regions that may form
   29.10 + * part of a recursively-nested set must be protected by these forms. If there
   29.11 + * are any critical regions that cannot form part of such a set, they can use
   29.12 + * standard spin_[un]lock().
   29.13 + */
   29.14 +#define _raw_spin_lock_recursive(_lock)            \
   29.15 +    do {                                           \
   29.16 +        int cpu = smp_processor_id();              \
   29.17 +        if ( likely((_lock)->recurse_cpu != cpu) ) \
   29.18 +        {                                          \
   29.19 +            spin_lock(_lock);                      \
   29.20 +            (_lock)->recurse_cpu = cpu;            \
   29.21 +        }                                          \
   29.22 +        (_lock)->recurse_cnt++;                    \
   29.23 +    } while ( 0 )
   29.24 +
   29.25 +#define _raw_spin_unlock_recursive(_lock)          \
   29.26 +    do {                                           \
   29.27 +        if ( likely(--(_lock)->recurse_cnt == 0) ) \
   29.28 +        {                                          \
   29.29 +            (_lock)->recurse_cpu = -1;             \
   29.30 +            spin_unlock(_lock);                    \
   29.31 +        }                                          \
   29.32 +    } while ( 0 )
   29.33 +#endif /*  _ASM_IA64_XENSPINLOCK_H */
    30.1 --- a/xen/include/asm-ia64/xensystem.h	Wed Aug 31 17:21:24 2005 -0600
    30.2 +++ b/xen/include/asm-ia64/xensystem.h	Thu Sep 01 11:09:27 2005 -0600
    30.3 @@ -22,7 +22,9 @@
    30.4  #endif // CONFIG_VTI
    30.5  
    30.6  #define XEN_START_ADDR		 0xf000000000000000
    30.7 +#undef KERNEL_START
    30.8  #define KERNEL_START		 0xf000000004000000
    30.9 +#undef PERCPU_ADDR
   30.10  #define PERCPU_ADDR		 0xf100000000000000-PERCPU_PAGE_SIZE
   30.11  #define SHAREDINFO_ADDR		 0xf100000000000000
   30.12  #define VHPT_ADDR		 0xf200000000000000
   30.13 @@ -31,8 +33,10 @@
   30.14  
   30.15  #ifndef __ASSEMBLY__
   30.16  
   30.17 +#undef IA64_HAS_EXTRA_STATE
   30.18  #define IA64_HAS_EXTRA_STATE(t) 0
   30.19  
   30.20 +#undef __switch_to
   30.21  #ifdef CONFIG_VTI
   30.22  extern struct task_struct *vmx_ia64_switch_to (void *next_task);
   30.23  #define __switch_to(prev,next,last) do {	\
    31.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.2 +++ b/xen/include/asm-ia64/xentypes.h	Thu Sep 01 11:09:27 2005 -0600
    31.3 @@ -0,0 +1,29 @@
    31.4 +#ifndef _ASM_IA64_XENTYPES_H
    31.5 +#define _ASM_IA64_XENTYPES_H
    31.6 +
    31.7 +#ifndef __ASSEMBLY__
    31.8 +typedef unsigned long ssize_t;
    31.9 +typedef unsigned long size_t;
   31.10 +typedef long long loff_t;
   31.11 +
   31.12 +#ifdef __KERNEL__
   31.13 +/* these lines taken from linux/types.h.  they belong in xen/types.h */
   31.14 +#ifdef __CHECKER__
   31.15 +#define __bitwise __attribute__((bitwise))
   31.16 +#else
   31.17 +#define __bitwise
   31.18 +#endif
   31.19 +
   31.20 +typedef __u16 __bitwise __le16;
   31.21 +typedef __u16 __bitwise __be16;
   31.22 +typedef __u32 __bitwise __le32;
   31.23 +typedef __u32 __bitwise __be32;
   31.24 +#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
   31.25 +typedef __u64 __bitwise __le64;
   31.26 +typedef __u64 __bitwise __be64;
   31.27 +#endif
   31.28 +
   31.29 +# endif /* __KERNEL__ */
   31.30 +#endif /* !__ASSEMBLY__ */
   31.31 +
   31.32 +#endif /* _ASM_IA64_XENTYPES_H */