direct-io.hg

changeset 6837:4e4f1db8ea94

More updating to linux 2.6.13 sources
author djm@kirby.fc.hp.com
date Wed Aug 31 16:55:04 2005 -0600 (2005-08-31)
parents b7276814008c
children 23217792aa3b
files xen/arch/ia64/Makefile xen/arch/ia64/linux-xen/README.origin xen/arch/ia64/linux-xen/efi.c xen/arch/ia64/linux-xen/entry.S xen/arch/ia64/linux-xen/entry.h xen/arch/ia64/linux-xen/head.S xen/arch/ia64/linux-xen/irq_ia64.c xen/arch/ia64/linux-xen/minstate.h xen/arch/ia64/linux-xen/mm_contig.c xen/arch/ia64/linux-xen/unaligned.c xen/arch/ia64/linux/README.origin xen/arch/ia64/vmx_irq_ia64.c
line diff
     1.1 --- a/xen/arch/ia64/Makefile	Wed Aug 31 14:32:27 2005 -0600
     1.2 +++ b/xen/arch/ia64/Makefile	Wed Aug 31 16:55:04 2005 -0600
     1.3 @@ -23,7 +23,8 @@ OBJS += vmx_init.o
     1.4  ifeq ($(CONFIG_VTI),y)
     1.5  OBJS += vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\
     1.6  	vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
     1.7 -	vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o pal_emul.o
     1.8 +	vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o \
     1.9 +	pal_emul.o vmx_irq_ia64.o
    1.10  endif
    1.11  
    1.12  # files from xen/arch/ia64/linux/lib (linux/arch/ia64/lib)
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/xen/arch/ia64/linux-xen/README.origin	Wed Aug 31 16:55:04 2005 -0600
     2.3 @@ -0,0 +1,21 @@
     2.4 +Source files in this directory are near-identical copies of linux-2.6.13 files:
     2.5 +
     2.6 +NOTE: ALL changes to these files should be clearly marked (e.g. with
     2.7 +#ifdef XEN or XEN in a comment) so that they can be easily updated
     2.8 +to future versions of the corresponding Linux files.
     2.9 +
    2.10 +efi.c		-> linux/arch/ia64/kernel/efi.c
    2.11 +entry.h		-> linux/arch/ia64/kernel/entry.h
    2.12 +entry.S		-> linux/arch/ia64/kernel/entry.S
    2.13 +irq_ia64.c	-> linux/arch/ia64/kernel/irq_ia64.c
    2.14 +minstate.h	-> linux/arch/ia64/kernel/minstate.h
    2.15 +mm_contig.c	-> linux/arch/ia64/mm/contig.c
    2.16 +pal.S		-> linux/arch/ia64/kernel/pal.S
    2.17 +sal.c		-> linux/arch/ia64/kernel/sal.c
    2.18 +setup.c		-> linux/arch/ia64/kernel/setup.c
    2.19 +smp.c		-> linux/arch/ia64/kernel/smp.c
    2.20 +smpboot.c	-> linux/arch/ia64/kernel/smpboot.c
    2.21 +sort.c		-> linux/lib/sort.c
    2.22 +time.c		-> linux/arch/ia64/kernel/time.c
    2.23 +tlb.c		-> linux/arch/ia64/mm/tlb.c
    2.24 +unaligned.c	-> linux/arch/ia64/kernel/unaligned.c
     3.1 --- a/xen/arch/ia64/linux-xen/efi.c	Wed Aug 31 14:32:27 2005 -0600
     3.2 +++ b/xen/arch/ia64/linux-xen/efi.c	Wed Aug 31 16:55:04 2005 -0600
     3.3 @@ -420,6 +420,38 @@ efi_memmap_walk (efi_freemem_callback_t 
     3.4  }
     3.5  
     3.6  /*
     3.7 + * Walk the EFI memory map to pull out leftover pages in the lower
     3.8 + * memory regions which do not end up in the regular memory map and
     3.9 + * stick them into the uncached allocator
    3.10 + *
    3.11 + * The regular walk function is significantly more complex than the
    3.12 + * uncached walk which means it really doesn't make sense to try and
    3.13 + * marge the two.
    3.14 + */
    3.15 +void __init
    3.16 +efi_memmap_walk_uc (efi_freemem_callback_t callback)
    3.17 +{
    3.18 +	void *efi_map_start, *efi_map_end, *p;
    3.19 +	efi_memory_desc_t *md;
    3.20 +	u64 efi_desc_size, start, end;
    3.21 +
    3.22 +	efi_map_start = __va(ia64_boot_param->efi_memmap);
    3.23 +	efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
    3.24 +	efi_desc_size = ia64_boot_param->efi_memdesc_size;
    3.25 +
    3.26 +	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
    3.27 +		md = p;
    3.28 +		if (md->attribute == EFI_MEMORY_UC) {
    3.29 +			start = PAGE_ALIGN(md->phys_addr);
    3.30 +			end = PAGE_ALIGN((md->phys_addr+(md->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK);
    3.31 +			if ((*callback)(start, end, NULL) < 0)
    3.32 +				return;
    3.33 +		}
    3.34 +	}
    3.35 +}
    3.36 +
    3.37 +
    3.38 +/*
    3.39   * Look for the PAL_CODE region reported by EFI and maps it using an
    3.40   * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor
    3.41   * Abstraction Layer chapter 11 in ADAG
     4.1 --- a/xen/arch/ia64/linux-xen/entry.S	Wed Aug 31 14:32:27 2005 -0600
     4.2 +++ b/xen/arch/ia64/linux-xen/entry.S	Wed Aug 31 16:55:04 2005 -0600
     4.3 @@ -175,7 +175,7 @@ GLOBAL_ENTRY(sys_clone)
     4.4  	mov rp=loc0
     4.5  	br.ret.sptk.many rp
     4.6  END(sys_clone)
     4.7 -#endif /* !XEN */
     4.8 +#endif
     4.9  
    4.10  /*
    4.11   * prev_task <- ia64_switch_to(struct task_struct *next)
    4.12 @@ -191,12 +191,14 @@ GLOBAL_ENTRY(ia64_switch_to)
    4.13  
    4.14  	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
    4.15  	movl r25=init_task
    4.16 +#ifdef XEN
    4.17  	movl r27=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;;
    4.18  	ld8 r27=[r27]
    4.19  	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
    4.20 -#ifdef XEN
    4.21  	dep r20=0,in0,60,4		// physical address of "next"
    4.22  #else
    4.23 +	mov r27=IA64_KR(CURRENT_STACK)
    4.24 +	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
    4.25  	dep r20=0,in0,61,3		// physical address of "next"
    4.26  #endif
    4.27  	;;
    4.28 @@ -215,8 +217,12 @@ GLOBAL_ENTRY(ia64_switch_to)
    4.29  	;;
    4.30  (p6)	srlz.d
    4.31  	ld8 sp=[r21]			// load kernel stack pointer of new task
    4.32 +#ifdef XEN
    4.33  	movl r8=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
    4.34  	st8 [r8]=in0
    4.35 +#else
    4.36 +	mov IA64_KR(CURRENT)=in0	// update "current" application register
    4.37 +#endif
    4.38  	mov r8=r13			// return pointer to previously running task
    4.39  	mov r13=in0			// set "current" pointer
    4.40  	;;
    4.41 @@ -250,8 +256,14 @@ GLOBAL_ENTRY(ia64_switch_to)
    4.42  	mov cr.ifa=in0			// VA of next task...
    4.43  	;;
    4.44  	mov r25=IA64_TR_CURRENT_STACK
    4.45 +#ifdef XEN
    4.46  	movl r8=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;;
    4.47  	st8 [r8]=r26
    4.48 +	
    4.49 +#else
    4.50 +	mov IA64_KR(CURRENT_STACK)=r26	// remember last page we mapped...
    4.51 +#endif
    4.52 +	;;
    4.53  	itr.d dtr[r25]=r23		// wire in new mapping...
    4.54  	br.cond.sptk .done
    4.55  END(ia64_switch_to)
    4.56 @@ -494,18 +506,6 @@ ENTRY(load_switch_stack)
    4.57  END(load_switch_stack)
    4.58  
    4.59  #ifndef XEN
    4.60 -GLOBAL_ENTRY(__ia64_syscall)
    4.61 -	.regstk 6,0,0,0
    4.62 -	mov r15=in5				// put syscall number in place
    4.63 -	break __BREAK_SYSCALL
    4.64 -	movl r2=errno
    4.65 -	cmp.eq p6,p7=-1,r10
    4.66 -	;;
    4.67 -(p6)	st4 [r2]=r8
    4.68 -(p6)	mov r8=-1
    4.69 -	br.ret.sptk.many rp
    4.70 -END(__ia64_syscall)
    4.71 -
    4.72  GLOBAL_ENTRY(execve)
    4.73  	mov r15=__NR_execve			// put syscall number in place
    4.74  	break __BREAK_SYSCALL
    4.75 @@ -672,7 +672,7 @@ END(ia64_ret_from_syscall)
    4.76   *	      r8-r11: restored (syscall return value(s))
    4.77   *		 r12: restored (user-level stack pointer)
    4.78   *		 r13: restored (user-level thread pointer)
    4.79 - *		 r14: cleared
    4.80 + *		 r14: set to __kernel_syscall_via_epc
    4.81   *		 r15: restored (syscall #)
    4.82   *	     r16-r17: cleared
    4.83   *		 r18: user-level b6
    4.84 @@ -693,7 +693,7 @@ END(ia64_ret_from_syscall)
    4.85   *		  pr: restored (user-level pr)
    4.86   *		  b0: restored (user-level rp)
    4.87   *	          b6: restored
    4.88 - *		  b7: cleared
    4.89 + *		  b7: set to __kernel_syscall_via_epc
    4.90   *	     ar.unat: restored (user-level ar.unat)
    4.91   *	      ar.pfs: restored (user-level ar.pfs)
    4.92   *	      ar.rsc: restored (user-level ar.rsc)
    4.93 @@ -743,15 +743,15 @@ ENTRY(ia64_leave_syscall)
    4.94  (p6)	ld4 r31=[r18]				// load current_thread_info()->flags
    4.95  #endif
    4.96  	ld8 r19=[r2],PT(B6)-PT(LOADRS)		// load ar.rsc value for "loadrs"
    4.97 -	mov b7=r0		// clear b7
    4.98 +	nop.i 0
    4.99  	;;
   4.100 -	ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)	// load ar.bspstore (may be garbage)
   4.101 +	mov r16=ar.bsp				// M2  get existing backing store pointer
   4.102  	ld8 r18=[r2],PT(R9)-PT(B6)		// load b6
   4.103  #ifndef XEN
   4.104  (p6)	and r15=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
   4.105  #endif
   4.106  	;;
   4.107 -	mov r16=ar.bsp				// M2  get existing backing store pointer
   4.108 +	ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)	// load ar.bspstore (may be garbage)
   4.109  #ifndef XEN
   4.110  (p6)	cmp4.ne.unc p6,p0=r15, r0		// any special work pending?
   4.111  (p6)	br.cond.spnt .work_pending_syscall
   4.112 @@ -760,63 +760,74 @@ ENTRY(ia64_leave_syscall)
   4.113  	// start restoring the state saved on the kernel stack (struct pt_regs):
   4.114  	ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
   4.115  	ld8 r11=[r3],PT(CR_IIP)-PT(R11)
   4.116 -	mov f6=f0		// clear f6
   4.117 +(pNonSys) break 0		//      bug check: we shouldn't be here if pNonSys is TRUE!
   4.118  	;;
   4.119  	invala			// M0|1 invalidate ALAT
   4.120 -	rsm psr.i | psr.ic	// M2 initiate turning off of interrupt and interruption collection
   4.121 -	mov f9=f0		// clear f9
   4.122 +	rsm psr.i | psr.ic	// M2   turn off interrupts and interruption collection
   4.123 +	cmp.eq p9,p0=r0,r0	// A    set p9 to indicate that we should restore cr.ifs
   4.124  
   4.125 -	ld8 r29=[r2],16		// load cr.ipsr
   4.126 -	ld8 r28=[r3],16			// load cr.iip
   4.127 -	mov f8=f0		// clear f8
   4.128 +	ld8 r29=[r2],16		// M0|1 load cr.ipsr
   4.129 +	ld8 r28=[r3],16		// M0|1 load cr.iip
   4.130 +	mov r22=r0		// A    clear r22
   4.131  	;;
   4.132  	ld8 r30=[r2],16		// M0|1 load cr.ifs
   4.133 -	mov.m ar.ssd=r0		// M2 clear ar.ssd
   4.134 -	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
   4.135 -	;;
   4.136  	ld8 r25=[r3],16		// M0|1 load ar.unat
   4.137 -	mov.m ar.csd=r0		// M2 clear ar.csd
   4.138 -	mov r22=r0		// clear r22
   4.139 +(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
   4.140  	;;
   4.141  	ld8 r26=[r2],PT(B0)-PT(AR_PFS)	// M0|1 load ar.pfs
   4.142 -(pKStk)	mov r22=psr		// M2 read PSR now that interrupts are disabled
   4.143 -	mov f10=f0		// clear f10
   4.144 +(pKStk)	mov r22=psr			// M2   read PSR now that interrupts are disabled
   4.145 +	nop 0
   4.146 +	;;
   4.147 +	ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
   4.148 +	ld8 r27=[r3],PT(PR)-PT(AR_RSC)	// M0|1 load ar.rsc
   4.149 +	mov f6=f0			// F    clear f6
   4.150  	;;
   4.151 -	ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
   4.152 -	ld8 r27=[r3],PT(PR)-PT(AR_RSC)	// load ar.rsc
   4.153 -	mov f11=f0		// clear f11
   4.154 +	ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)	// M0|1 load ar.rnat (may be garbage)
   4.155 +	ld8 r31=[r3],PT(R1)-PT(PR)		// M0|1 load predicates
   4.156 +	mov f7=f0				// F    clear f7
   4.157  	;;
   4.158 -	ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)	// load ar.rnat (may be garbage)
   4.159 -	ld8 r31=[r3],PT(R1)-PT(PR)		// load predicates
   4.160 -(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
   4.161 +	ld8 r20=[r2],PT(R12)-PT(AR_FPSR)	// M0|1 load ar.fpsr
   4.162 +	ld8.fill r1=[r3],16			// M0|1 load r1
   4.163 +(pUStk) mov r17=1				// A
   4.164 +	;;
   4.165 +(pUStk) st1 [r14]=r17				// M2|3
   4.166 +	ld8.fill r13=[r3],16			// M0|1
   4.167 +	mov f8=f0				// F    clear f8
   4.168  	;;
   4.169 -	ld8 r20=[r2],PT(R12)-PT(AR_FPSR)	// load ar.fpsr
   4.170 -	ld8.fill r1=[r3],16	// load r1
   4.171 -(pUStk) mov r17=1
   4.172 +	ld8.fill r12=[r2]			// M0|1 restore r12 (sp)
   4.173 +	ld8.fill r15=[r3]			// M0|1 restore r15
   4.174 +	mov b6=r18				// I0   restore b6
   4.175 +
   4.176 +#ifdef XEN
   4.177 +	movl r17=THIS_CPU(ia64_phys_stacked_size_p8)    // A
   4.178 +#else
   4.179 +	addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
   4.180 +#endif
   4.181 +	mov f9=f0					// F    clear f9
   4.182 +(pKStk) br.cond.dpnt.many skip_rbs_switch		// B
   4.183 +
   4.184 +	srlz.d				// M0   ensure interruption collection is off (for cover)
   4.185 +	shr.u r18=r19,16		// I0|1 get byte size of existing "dirty" partition
   4.186 +	cover				// B    add current frame into dirty partition & set cr.ifs
   4.187  	;;
   4.188 -	srlz.d			// M0  ensure interruption collection is off
   4.189 -	ld8.fill r13=[r3],16
   4.190 -	mov f7=f0		// clear f7
   4.191 -	;;
   4.192 -	ld8.fill r12=[r2]	// restore r12 (sp)
   4.193 -	ld8.fill r15=[r3]	// restore r15
   4.194 +(pUStk) ld4 r17=[r17]			// M0|1 r17 = cpu_data->phys_stacked_size_p8
   4.195 +	mov r19=ar.bsp			// M2   get new backing store pointer
   4.196 +	mov f10=f0			// F    clear f10
   4.197 +
   4.198 +	nop.m 0
   4.199  #ifdef XEN
   4.200 -	movl r3=THIS_CPU(ia64_phys_stacked_size_p8)
   4.201 +	mov r14=r0
   4.202  #else
   4.203 -	addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
   4.204 +	movl r14=__kernel_syscall_via_epc // X
   4.205  #endif
   4.206  	;;
   4.207 -(pUStk)	ld4 r3=[r3]		// r3 = cpu_data->phys_stacked_size_p8
   4.208 -(pUStk) st1 [r14]=r17
   4.209 -	mov b6=r18		// I0  restore b6
   4.210 -	;;
   4.211 -	mov r14=r0		// clear r14
   4.212 -	shr.u r18=r19,16	// I0|1 get byte size of existing "dirty" partition
   4.213 -(pKStk) br.cond.dpnt.many skip_rbs_switch
   4.214 +	mov.m ar.csd=r0			// M2   clear ar.csd
   4.215 +	mov.m ar.ccv=r0			// M2   clear ar.ccv
   4.216 +	mov b7=r14			// I0   clear b7 (hint with __kernel_syscall_via_epc)
   4.217  
   4.218 -	mov.m ar.ccv=r0		// clear ar.ccv
   4.219 -(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
   4.220 -	br.cond.sptk.many rbs_switch
   4.221 +	mov.m ar.ssd=r0			// M2   clear ar.ssd
   4.222 +	mov f11=f0			// F    clear f11
   4.223 +	br.cond.sptk.many rbs_switch	// B
   4.224  END(ia64_leave_syscall)
   4.225  
   4.226  #ifdef CONFIG_IA32_SUPPORT
   4.227 @@ -829,7 +840,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
   4.228  	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
   4.229  	.mem.offset 8,0
   4.230  	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
   4.231 -END(ia64_ret_from_ia32_execve_syscall)
   4.232 +END(ia64_ret_from_ia32_execve)
   4.233  	// fall through
   4.234  #endif /* CONFIG_IA32_SUPPORT */
   4.235  GLOBAL_ENTRY(ia64_leave_kernel)
   4.236 @@ -884,11 +895,15 @@ GLOBAL_ENTRY(ia64_leave_kernel)
   4.237  	ld8 r28=[r2],8		// load b6
   4.238  	adds r29=PT(R24)+16,r12
   4.239  
   4.240 +#ifdef XEN
   4.241  	ld8.fill r16=[r3]
   4.242 +	adds r3=PT(AR_CSD)-PT(R16),r3
   4.243 +#else
   4.244 +	ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
   4.245 +#endif
   4.246  	adds r30=PT(AR_CCV)+16,r12
   4.247  (p6)	and r19=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
   4.248  	;;
   4.249 -	adds r3=PT(AR_CSD)-PT(R16),r3
   4.250  	ld8.fill r24=[r29]
   4.251  	ld8 r15=[r30]		// load ar.ccv
   4.252  (p6)	cmp4.ne.unc p6,p0=r19, r0		// any special work pending?
   4.253 @@ -944,14 +959,18 @@ GLOBAL_ENTRY(ia64_leave_kernel)
   4.254  	ldf.fill f7=[r2],PT(F11)-PT(F7)
   4.255  	ldf.fill f8=[r3],32
   4.256  	;;
   4.257 -	srlz.i			// ensure interruption collection is off
   4.258 +	srlz.d	// ensure that inter. collection is off (VHPT is don't care, since text is pinned)
   4.259  	mov ar.ccv=r15
   4.260  	;;
   4.261  	ldf.fill f11=[r2]
   4.262  	bsw.0			// switch back to bank 0 (no stop bit required beforehand...)
   4.263  	;;
   4.264 +#ifdef XEN
   4.265  (pUStk) movl r18=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   4.266  (pUStk) ld8 r18=[r18]
   4.267 +#else
   4.268 +(pUStk)	mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
   4.269 +#endif
   4.270  	adds r16=PT(CR_IPSR)+16,r12
   4.271  	adds r17=PT(CR_IIP)+16,r12
   4.272  
   4.273 @@ -1009,11 +1028,10 @@ GLOBAL_ENTRY(ia64_leave_kernel)
   4.274  	 * NOTE: alloc, loadrs, and cover can't be predicated.
   4.275  	 */
   4.276  (pNonSys) br.cond.dpnt dont_preserve_current_frame
   4.277 -
   4.278 -rbs_switch:
   4.279  	cover				// add current frame into dirty partition and set cr.ifs
   4.280  	;;
   4.281  	mov r19=ar.bsp			// get new backing store pointer
   4.282 +rbs_switch:
   4.283  	sub r16=r16,r18			// krbs = old bsp - size of dirty partition
   4.284  	cmp.ne p9,p0=r0,r0		// clear p9 to skip restore of cr.ifs
   4.285  	;;
   4.286 @@ -1088,14 +1106,14 @@ rse_clear_invalid:
   4.287  	mov loc5=0
   4.288  	mov loc6=0
   4.289  	mov loc7=0
   4.290 -(pRecurse) br.call.sptk.few b0=rse_clear_invalid
   4.291 +(pRecurse) br.call.dptk.few b0=rse_clear_invalid
   4.292  	;;
   4.293  	mov loc8=0
   4.294  	mov loc9=0
   4.295  	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret
   4.296  	mov loc10=0
   4.297  	mov loc11=0
   4.298 -(pReturn) br.ret.sptk.many b0
   4.299 +(pReturn) br.ret.dptk.many b0
   4.300  #endif /* !CONFIG_ITANIUM */
   4.301  #	undef pRecurse
   4.302  #	undef pReturn
   4.303 @@ -1249,7 +1267,7 @@ ENTRY(notify_resume_user)
   4.304  	;;
   4.305  (pNonSys) mov out2=0				// out2==0 => not a syscall
   4.306  	.fframe 16
   4.307 -	.spillpsp ar.unat, 16			// (note that offset is relative to psp+0x10!)
   4.308 +	.spillsp ar.unat, 16
   4.309  	st8 [sp]=r9,-16				// allocate space for ar.unat and save it
   4.310  	st8 [out1]=loc1,-8			// save ar.pfs, out1=&sigscratch
   4.311  	.body
   4.312 @@ -1275,7 +1293,7 @@ GLOBAL_ENTRY(sys_rt_sigsuspend)
   4.313  	adds out2=8,sp				// out2=&sigscratch->ar_pfs
   4.314  	;;
   4.315  	.fframe 16
   4.316 -	.spillpsp ar.unat, 16			// (note that offset is relative to psp+0x10!)
   4.317 +	.spillsp ar.unat, 16
   4.318  	st8 [sp]=r9,-16				// allocate space for ar.unat and save it
   4.319  	st8 [out2]=loc1,-8			// save ar.pfs, out2=&sigscratch
   4.320  	.body
   4.321 @@ -1322,7 +1340,7 @@ ENTRY(sys_rt_sigreturn)
   4.322   	stf.spill [r17]=f11
   4.323  	adds out0=16,sp				// out0 = &sigscratch
   4.324  	br.call.sptk.many rp=ia64_rt_sigreturn
   4.325 -.ret19:	.restore sp 0
   4.326 +.ret19:	.restore sp,0
   4.327  	adds sp=16,sp
   4.328  	;;
   4.329  	ld8 r9=[sp]				// load new ar.unat
   4.330 @@ -1486,7 +1504,7 @@ sys_call_table:
   4.331  	data8 sys_msgrcv
   4.332  	data8 sys_msgctl
   4.333  	data8 sys_shmget
   4.334 -	data8 ia64_shmat
   4.335 +	data8 sys_shmat
   4.336  	data8 sys_shmdt				// 1115
   4.337  	data8 sys_shmctl
   4.338  	data8 sys_syslog
   4.339 @@ -1646,12 +1664,12 @@ sys_call_table:
   4.340  	data8 sys_add_key
   4.341  	data8 sys_request_key
   4.342  	data8 sys_keyctl
   4.343 -	data8 sys_ni_syscall
   4.344 -	data8 sys_ni_syscall			// 1275
   4.345 +	data8 sys_ioprio_set
   4.346 +	data8 sys_ioprio_get			// 1275
   4.347  	data8 sys_ni_syscall
   4.348 -	data8 sys_ni_syscall
   4.349 -	data8 sys_ni_syscall
   4.350 -	data8 sys_ni_syscall
   4.351 +	data8 sys_inotify_init
   4.352 +	data8 sys_inotify_add_watch
   4.353 +	data8 sys_inotify_rm_watch
   4.354  
   4.355  	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls
   4.356  #endif
     5.1 --- a/xen/arch/ia64/linux-xen/entry.h	Wed Aug 31 14:32:27 2005 -0600
     5.2 +++ b/xen/arch/ia64/linux-xen/entry.h	Wed Aug 31 16:55:04 2005 -0600
     5.3 @@ -7,12 +7,6 @@
     5.4  #define PRED_LEAVE_SYSCALL	1 /* TRUE iff leave from syscall */
     5.5  #define PRED_KERNEL_STACK	2 /* returning to kernel-stacks? */
     5.6  #define PRED_USER_STACK		3 /* returning to user-stacks? */
     5.7 -#ifdef CONFIG_VTI
     5.8 -#define PRED_EMUL		2 /* Need to save r4-r7 for inst emulation */
     5.9 -#define PRED_NON_EMUL		3 /* No need to save r4-r7 for normal path */
    5.10 -#define PRED_BN0		6 /* Guest is in bank 0 */
    5.11 -#define PRED_BN1		7 /* Guest is in bank 1 */
    5.12 -#endif // CONFIG_VTI
    5.13  #define PRED_SYSCALL		4 /* inside a system call? */
    5.14  #define PRED_NON_SYSCALL	5 /* complement of PRED_SYSCALL */
    5.15  
    5.16 @@ -23,21 +17,26 @@
    5.17  # define pLvSys		PASTE(p,PRED_LEAVE_SYSCALL)
    5.18  # define pKStk		PASTE(p,PRED_KERNEL_STACK)
    5.19  # define pUStk		PASTE(p,PRED_USER_STACK)
    5.20 -#ifdef CONFIG_VTI
    5.21 -# define pEml		PASTE(p,PRED_EMUL)
    5.22 -# define pNonEml	PASTE(p,PRED_NON_EMUL)
    5.23 -# define pBN0		PASTE(p,PRED_BN0)
    5.24 -# define pBN1		PASTE(p,PRED_BN1)
    5.25 -#endif // CONFIG_VTI
    5.26  # define pSys		PASTE(p,PRED_SYSCALL)
    5.27  # define pNonSys	PASTE(p,PRED_NON_SYSCALL)
    5.28  #endif
    5.29  
    5.30  #define PT(f)		(IA64_PT_REGS_##f##_OFFSET)
    5.31  #define SW(f)		(IA64_SWITCH_STACK_##f##_OFFSET)
    5.32 +
    5.33 +#ifdef XEN
    5.34  #ifdef CONFIG_VTI
    5.35 +#define PRED_EMUL		2 /* Need to save r4-r7 for inst emulation */
    5.36 +#define PRED_NON_EMUL		3 /* No need to save r4-r7 for normal path */
    5.37 +#define PRED_BN0		6 /* Guest is in bank 0 */
    5.38 +#define PRED_BN1		7 /* Guest is in bank 1 */
    5.39 +# define pEml		PASTE(p,PRED_EMUL)
    5.40 +# define pNonEml	PASTE(p,PRED_NON_EMUL)
    5.41 +# define pBN0		PASTE(p,PRED_BN0)
    5.42 +# define pBN1		PASTE(p,PRED_BN1)
    5.43  #define VPD(f)      (VPD_##f##_START_OFFSET)
    5.44  #endif // CONFIG_VTI
    5.45 +#endif
    5.46  
    5.47  #define PT_REGS_SAVES(off)			\
    5.48  	.unwabi 3, 'i';				\
    5.49 @@ -75,7 +74,7 @@
    5.50  	.spillsp @priunat,SW(AR_UNAT)+16+(off);					\
    5.51  	.spillsp ar.rnat,SW(AR_RNAT)+16+(off);					\
    5.52  	.spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off);				\
    5.53 -	.spillsp pr,SW(PR)+16+(off))
    5.54 +	.spillsp pr,SW(PR)+16+(off)
    5.55  
    5.56  #define DO_SAVE_SWITCH_STACK			\
    5.57  	movl r28=1f;				\
     6.1 --- a/xen/arch/ia64/linux-xen/head.S	Wed Aug 31 14:32:27 2005 -0600
     6.2 +++ b/xen/arch/ia64/linux-xen/head.S	Wed Aug 31 16:55:04 2005 -0600
     6.3 @@ -15,6 +15,8 @@
     6.4   * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
     6.5   * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com>
     6.6   *   -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
     6.7 + * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
     6.8 + *   Support for CPU Hotplug
     6.9   */
    6.10  
    6.11  #include <linux/config.h>
    6.12 @@ -29,6 +31,146 @@
    6.13  #include <asm/processor.h>
    6.14  #include <asm/ptrace.h>
    6.15  #include <asm/system.h>
    6.16 +#include <asm/mca_asm.h>
    6.17 +
    6.18 +#ifdef CONFIG_HOTPLUG_CPU
    6.19 +#define SAL_PSR_BITS_TO_SET				\
    6.20 +	(IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL)
    6.21 +
    6.22 +#define SAVE_FROM_REG(src, ptr, dest)	\
    6.23 +	mov dest=src;;						\
    6.24 +	st8 [ptr]=dest,0x08
    6.25 +
    6.26 +#define RESTORE_REG(reg, ptr, _tmp)		\
    6.27 +	ld8 _tmp=[ptr],0x08;;				\
    6.28 +	mov reg=_tmp
    6.29 +
    6.30 +#define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\
    6.31 +	mov ar.lc=IA64_NUM_DBG_REGS-1;; 			\
    6.32 +	mov _idx=0;; 								\
    6.33 +1: 												\
    6.34 +	SAVE_FROM_REG(_breg[_idx], ptr, _dest);;	\
    6.35 +	add _idx=1,_idx;;							\
    6.36 +	br.cloop.sptk.many 1b
    6.37 +
    6.38 +#define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\
    6.39 +	mov ar.lc=IA64_NUM_DBG_REGS-1;;			\
    6.40 +	mov _idx=0;;							\
    6.41 +_lbl:  RESTORE_REG(_breg[_idx], ptr, _tmp);;	\
    6.42 +	add _idx=1, _idx;;						\
    6.43 +	br.cloop.sptk.many _lbl
    6.44 +
    6.45 +#define SAVE_ONE_RR(num, _reg, _tmp) \
    6.46 +	movl _tmp=(num<<61);;	\
    6.47 +	mov _reg=rr[_tmp]
    6.48 +
    6.49 +#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
    6.50 +	SAVE_ONE_RR(0,_r0, _tmp);; \
    6.51 +	SAVE_ONE_RR(1,_r1, _tmp);; \
    6.52 +	SAVE_ONE_RR(2,_r2, _tmp);; \
    6.53 +	SAVE_ONE_RR(3,_r3, _tmp);; \
    6.54 +	SAVE_ONE_RR(4,_r4, _tmp);; \
    6.55 +	SAVE_ONE_RR(5,_r5, _tmp);; \
    6.56 +	SAVE_ONE_RR(6,_r6, _tmp);; \
    6.57 +	SAVE_ONE_RR(7,_r7, _tmp);;
    6.58 +
    6.59 +#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
    6.60 +	st8 [ptr]=_r0, 8;; \
    6.61 +	st8 [ptr]=_r1, 8;; \
    6.62 +	st8 [ptr]=_r2, 8;; \
    6.63 +	st8 [ptr]=_r3, 8;; \
    6.64 +	st8 [ptr]=_r4, 8;; \
    6.65 +	st8 [ptr]=_r5, 8;; \
    6.66 +	st8 [ptr]=_r6, 8;; \
    6.67 +	st8 [ptr]=_r7, 8;;
    6.68 +
    6.69 +#define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \
    6.70 +	mov		ar.lc=0x08-1;;						\
    6.71 +	movl	_idx1=0x00;;						\
    6.72 +RestRR:											\
    6.73 +	dep.z	_idx2=_idx1,61,3;;					\
    6.74 +	ld8		_tmp=[ptr],8;;						\
    6.75 +	mov		rr[_idx2]=_tmp;;					\
    6.76 +	srlz.d;;									\
    6.77 +	add		_idx1=1,_idx1;;						\
    6.78 +	br.cloop.sptk.few	RestRR
    6.79 +
    6.80 +#define SET_AREA_FOR_BOOTING_CPU(reg1, reg2) \
    6.81 +	movl reg1=sal_state_for_booting_cpu;;	\
    6.82 +	ld8 reg2=[reg1];;
    6.83 +
    6.84 +/*
    6.85 + * Adjust region registers saved before starting to save
    6.86 + * break regs and rest of the states that need to be preserved.
    6.87 + */
    6.88 +#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred)  \
    6.89 +	SAVE_FROM_REG(b0,_reg1,_reg2);;						\
    6.90 +	SAVE_FROM_REG(b1,_reg1,_reg2);;						\
    6.91 +	SAVE_FROM_REG(b2,_reg1,_reg2);;						\
    6.92 +	SAVE_FROM_REG(b3,_reg1,_reg2);;						\
    6.93 +	SAVE_FROM_REG(b4,_reg1,_reg2);;						\
    6.94 +	SAVE_FROM_REG(b5,_reg1,_reg2);;						\
    6.95 +	st8 [_reg1]=r1,0x08;;								\
    6.96 +	st8 [_reg1]=r12,0x08;;								\
    6.97 +	st8 [_reg1]=r13,0x08;;								\
    6.98 +	SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);;				\
    6.99 +	SAVE_FROM_REG(ar.pfs,_reg1,_reg2);;					\
   6.100 +	SAVE_FROM_REG(ar.rnat,_reg1,_reg2);;				\
   6.101 +	SAVE_FROM_REG(ar.unat,_reg1,_reg2);;				\
   6.102 +	SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);;			\
   6.103 +	SAVE_FROM_REG(cr.dcr,_reg1,_reg2);;					\
   6.104 +	SAVE_FROM_REG(cr.iva,_reg1,_reg2);;					\
   6.105 +	SAVE_FROM_REG(cr.pta,_reg1,_reg2);;					\
   6.106 +	SAVE_FROM_REG(cr.itv,_reg1,_reg2);;					\
   6.107 +	SAVE_FROM_REG(cr.pmv,_reg1,_reg2);;					\
   6.108 +	SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);;				\
   6.109 +	SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);;				\
   6.110 +	SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);;				\
   6.111 +	st8 [_reg1]=r4,0x08;;								\
   6.112 +	st8 [_reg1]=r5,0x08;;								\
   6.113 +	st8 [_reg1]=r6,0x08;;								\
   6.114 +	st8 [_reg1]=r7,0x08;;								\
   6.115 +	st8 [_reg1]=_pred,0x08;;							\
   6.116 +	SAVE_FROM_REG(ar.lc, _reg1, _reg2);;				\
   6.117 +	stf.spill.nta [_reg1]=f2,16;;						\
   6.118 +	stf.spill.nta [_reg1]=f3,16;;						\
   6.119 +	stf.spill.nta [_reg1]=f4,16;;						\
   6.120 +	stf.spill.nta [_reg1]=f5,16;;						\
   6.121 +	stf.spill.nta [_reg1]=f16,16;;						\
   6.122 +	stf.spill.nta [_reg1]=f17,16;;						\
   6.123 +	stf.spill.nta [_reg1]=f18,16;;						\
   6.124 +	stf.spill.nta [_reg1]=f19,16;;						\
   6.125 +	stf.spill.nta [_reg1]=f20,16;;						\
   6.126 +	stf.spill.nta [_reg1]=f21,16;;						\
   6.127 +	stf.spill.nta [_reg1]=f22,16;;						\
   6.128 +	stf.spill.nta [_reg1]=f23,16;;						\
   6.129 +	stf.spill.nta [_reg1]=f24,16;;						\
   6.130 +	stf.spill.nta [_reg1]=f25,16;;						\
   6.131 +	stf.spill.nta [_reg1]=f26,16;;						\
   6.132 +	stf.spill.nta [_reg1]=f27,16;;						\
   6.133 +	stf.spill.nta [_reg1]=f28,16;;						\
   6.134 +	stf.spill.nta [_reg1]=f29,16;;						\
   6.135 +	stf.spill.nta [_reg1]=f30,16;;						\
   6.136 +	stf.spill.nta [_reg1]=f31,16;;
   6.137 +
   6.138 +#else
   6.139 +#define SET_AREA_FOR_BOOTING_CPU(a1, a2)
   6.140 +#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2, a3)
   6.141 +#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
   6.142 +#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
   6.143 +#endif
   6.144 +
   6.145 +#ifdef XEN
   6.146 +#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
   6.147 +	movl _tmp1=(num << 61);;	\
   6.148 +	movl _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \
   6.149 +	mov rr[_tmp1]=_tmp2
   6.150 +#else
   6.151 +#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
   6.152 +	movl _tmp1=(num << 61);;	\
   6.153 +	mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \
   6.154 +	mov rr[_tmp1]=_tmp2
   6.155 +#endif
   6.156  
   6.157  	.section __special_page_section,"ax"
   6.158  
   6.159 @@ -64,6 +206,12 @@ start_ap:
   6.160  	srlz.i
   6.161  	;;
   6.162  	/*
   6.163 +	 * Save the region registers, predicate before they get clobbered
   6.164 +	 */
   6.165 +	SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15);
   6.166 +	mov r25=pr;;
   6.167 +
   6.168 +	/*
   6.169  	 * Initialize kernel region registers:
   6.170  	 *	rr[0]: VHPT enabled, page size = PAGE_SHIFT
   6.171  	 *	rr[1]: VHPT enabled, page size = PAGE_SHIFT
   6.172 @@ -76,32 +224,14 @@ start_ap:
   6.173  	 * We initialize all of them to prevent inadvertently assuming
   6.174  	 * something about the state of address translation early in boot.
   6.175  	 */
   6.176 -	movl r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   6.177 -	movl r7=(0<<61)
   6.178 -	movl r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   6.179 -	movl r9=(1<<61)
   6.180 -	movl r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   6.181 -	movl r11=(2<<61)
   6.182 -	movl r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   6.183 -	movl r13=(3<<61)
   6.184 -	movl r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   6.185 -	movl r15=(4<<61)
   6.186 -	movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
   6.187 -	movl r17=(5<<61)
   6.188 -	movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
   6.189 -	movl r19=(6<<61)
   6.190 -	movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
   6.191 -	movl r21=(7<<61)
   6.192 -	;;
   6.193 -	mov rr[r7]=r6
   6.194 -	mov rr[r9]=r8
   6.195 -	mov rr[r11]=r10
   6.196 -	mov rr[r13]=r12
   6.197 -	mov rr[r15]=r14
   6.198 -	mov rr[r17]=r16
   6.199 -	mov rr[r19]=r18
   6.200 -	mov rr[r21]=r20
   6.201 -	;;
   6.202 +	SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);;
   6.203 +	SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);;
   6.204 +	SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);;
   6.205 +	SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);;
   6.206 +	SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);;
   6.207 +	SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);;
   6.208 +	SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);;
   6.209 +	SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);;
   6.210  	/*
   6.211  	 * Now pin mappings into the TLB for kernel text and data
   6.212  	 */
   6.213 @@ -129,13 +259,13 @@ start_ap:
   6.214  	/*
   6.215  	 * Switch into virtual mode:
   6.216  	 */
   6.217 -#ifdef CONFIG_VTI
   6.218 -	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH \
   6.219 +#if defined(XEN) && defined(CONFIG_VTI)
   6.220 +	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH\
   6.221  		  |IA64_PSR_DI)
   6.222 -#else // CONFIG_VTI
   6.223 +#else
   6.224  	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
   6.225  		  |IA64_PSR_DI)
   6.226 -#endif // CONFIG_VTI
   6.227 +#endif
   6.228  	;;
   6.229  	mov cr.ipsr=r16
   6.230  	movl r17=1f
   6.231 @@ -147,12 +277,18 @@ start_ap:
   6.232  	;;
   6.233  1:	// now we are in virtual mode
   6.234  
   6.235 +	SET_AREA_FOR_BOOTING_CPU(r2, r16);
   6.236 +
   6.237 +	STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15);
   6.238 +	SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25)
   6.239 +	;;
   6.240 +
   6.241  	// set IVT entry point---can't access I/O ports without it
   6.242 -#ifdef CONFIG_VTI
   6.243 -    movl r3=vmx_ia64_ivt
   6.244 -#else // CONFIG_VTI
   6.245 +#if defined(XEN) && defined(CONFIG_VTI)
   6.246 +	movl r3=vmx_ia64_ivt
   6.247 +#else
   6.248  	movl r3=ia64_ivt
   6.249 -#endif // CONFIG_VTI
   6.250 +#endif
   6.251  	;;
   6.252  	mov cr.iva=r3
   6.253  	movl r2=FPSR_DEFAULT
   6.254 @@ -220,23 +356,24 @@ 1:	// now we are in virtual mode
   6.255  
   6.256  .load_current:
   6.257  	// load the "current" pointer (r13) and ar.k6 with the current task
   6.258 -#ifdef CONFIG_VTI
   6.259 -	mov r21=r2		// virtual address
   6.260 +#if defined(XEN) && defined(CONFIG_VTI)
   6.261 +	mov r21=r2
   6.262  	;;
   6.263  	bsw.1
   6.264  	;;
   6.265 -#else // CONFIG_VTI
   6.266 -	mov IA64_KR(CURRENT)=r2
   6.267 +#else
   6.268 +	mov IA64_KR(CURRENT)=r2		// virtual address
   6.269  	mov IA64_KR(CURRENT_STACK)=r16
   6.270 -#endif // CONFIG_VTI
   6.271 +#endif
   6.272  	mov r13=r2
   6.273  	/*
   6.274 -	 * Reserve space at the top of the stack for "struct pt_regs".  Kernel threads
   6.275 -	 * don't store interesting values in that structure, but the space still needs
   6.276 -	 * to be there because time-critical stuff such as the context switching can
   6.277 -	 * be implemented more efficiently (for example, __switch_to()
   6.278 +	 * Reserve space at the top of the stack for "struct pt_regs".  Kernel
   6.279 +	 * threads don't store interesting values in that structure, but the space
   6.280 +	 * still needs to be there because time-critical stuff such as the context
   6.281 +	 * switching can be implemented more efficiently (for example, __switch_to()
   6.282  	 * always sets the psr.dfh bit of the task it is switching to).
   6.283  	 */
   6.284 +
   6.285  	addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
   6.286  	addl r2=IA64_RBS_OFFSET,r2	// initialize the RSE
   6.287  	mov ar.rsc=0		// place RSE in enforced lazy mode
   6.288 @@ -278,9 +415,13 @@ 1:	// now we are in virtual mode
   6.289  	br.call.sptk.many b0=console_print
   6.290  
   6.291  self:	hint @pause
   6.292 +#ifdef XEN
   6.293  	;;
   6.294  	br.sptk.many self		// endless loop
   6.295  	;;
   6.296 +#else
   6.297 +	br.sptk.many self		// endless loop
   6.298 +#endif
   6.299  END(_start)
   6.300  
   6.301  GLOBAL_ENTRY(ia64_save_debug_regs)
   6.302 @@ -1023,4 +1164,98 @@ END(ia64_spinlock_contention)
   6.303  
   6.304  #endif
   6.305  
   6.306 +#ifdef CONFIG_HOTPLUG_CPU
   6.307 +GLOBAL_ENTRY(ia64_jump_to_sal)
   6.308 +	alloc r16=ar.pfs,1,0,0,0;;
   6.309 +	rsm psr.i  | psr.ic
   6.310 +{
   6.311 +	flushrs
   6.312 +	srlz.i
   6.313 +}
   6.314 +	tpa r25=in0
   6.315 +	movl r18=tlb_purge_done;;
   6.316 +	DATA_VA_TO_PA(r18);;
   6.317 +	mov b1=r18 	// Return location
   6.318 +	movl r18=ia64_do_tlb_purge;;
   6.319 +	DATA_VA_TO_PA(r18);;
   6.320 +	mov b2=r18 	// doing tlb_flush work
   6.321 +	mov ar.rsc=0  // Put RSE  in enforced lazy, LE mode
   6.322 +	movl r17=1f;;
   6.323 +	DATA_VA_TO_PA(r17);;
   6.324 +	mov cr.iip=r17
   6.325 +	movl r16=SAL_PSR_BITS_TO_SET;;
   6.326 +	mov cr.ipsr=r16
   6.327 +	mov cr.ifs=r0;;
   6.328 +	rfi;;
   6.329 +1:
   6.330 +	/*
   6.331 +	 * Invalidate all TLB data/inst
   6.332 +	 */
   6.333 +	br.sptk.many b2;; // jump to tlb purge code
   6.334 +
   6.335 +tlb_purge_done:
   6.336 +	RESTORE_REGION_REGS(r25, r17,r18,r19);;
   6.337 +	RESTORE_REG(b0, r25, r17);;
   6.338 +	RESTORE_REG(b1, r25, r17);;
   6.339 +	RESTORE_REG(b2, r25, r17);;
   6.340 +	RESTORE_REG(b3, r25, r17);;
   6.341 +	RESTORE_REG(b4, r25, r17);;
   6.342 +	RESTORE_REG(b5, r25, r17);;
   6.343 +	ld8 r1=[r25],0x08;;
   6.344 +	ld8 r12=[r25],0x08;;
   6.345 +	ld8 r13=[r25],0x08;;
   6.346 +	RESTORE_REG(ar.fpsr, r25, r17);;
   6.347 +	RESTORE_REG(ar.pfs, r25, r17);;
   6.348 +	RESTORE_REG(ar.rnat, r25, r17);;
   6.349 +	RESTORE_REG(ar.unat, r25, r17);;
   6.350 +	RESTORE_REG(ar.bspstore, r25, r17);;
   6.351 +	RESTORE_REG(cr.dcr, r25, r17);;
   6.352 +	RESTORE_REG(cr.iva, r25, r17);;
   6.353 +	RESTORE_REG(cr.pta, r25, r17);;
   6.354 +	RESTORE_REG(cr.itv, r25, r17);;
   6.355 +	RESTORE_REG(cr.pmv, r25, r17);;
   6.356 +	RESTORE_REG(cr.cmcv, r25, r17);;
   6.357 +	RESTORE_REG(cr.lrr0, r25, r17);;
   6.358 +	RESTORE_REG(cr.lrr1, r25, r17);;
   6.359 +	ld8 r4=[r25],0x08;;
   6.360 +	ld8 r5=[r25],0x08;;
   6.361 +	ld8 r6=[r25],0x08;;
   6.362 +	ld8 r7=[r25],0x08;;
   6.363 +	ld8 r17=[r25],0x08;;
   6.364 +	mov pr=r17,-1;;
   6.365 +	RESTORE_REG(ar.lc, r25, r17);;
   6.366 +	/*
   6.367 +	 * Now Restore floating point regs
   6.368 +	 */
   6.369 +	ldf.fill.nta f2=[r25],16;;
   6.370 +	ldf.fill.nta f3=[r25],16;;
   6.371 +	ldf.fill.nta f4=[r25],16;;
   6.372 +	ldf.fill.nta f5=[r25],16;;
   6.373 +	ldf.fill.nta f16=[r25],16;;
   6.374 +	ldf.fill.nta f17=[r25],16;;
   6.375 +	ldf.fill.nta f18=[r25],16;;
   6.376 +	ldf.fill.nta f19=[r25],16;;
   6.377 +	ldf.fill.nta f20=[r25],16;;
   6.378 +	ldf.fill.nta f21=[r25],16;;
   6.379 +	ldf.fill.nta f22=[r25],16;;
   6.380 +	ldf.fill.nta f23=[r25],16;;
   6.381 +	ldf.fill.nta f24=[r25],16;;
   6.382 +	ldf.fill.nta f25=[r25],16;;
   6.383 +	ldf.fill.nta f26=[r25],16;;
   6.384 +	ldf.fill.nta f27=[r25],16;;
   6.385 +	ldf.fill.nta f28=[r25],16;;
   6.386 +	ldf.fill.nta f29=[r25],16;;
   6.387 +	ldf.fill.nta f30=[r25],16;;
   6.388 +	ldf.fill.nta f31=[r25],16;;
   6.389 +
   6.390 +	/*
   6.391 +	 * Now that we have done all the register restores
   6.392 +	 * we are now ready for the big DIVE to SAL Land
   6.393 +	 */
   6.394 +	ssm psr.ic;;
   6.395 +	srlz.d;;
   6.396 +	br.ret.sptk.many b0;;
   6.397 +END(ia64_jump_to_sal)
   6.398 +#endif /* CONFIG_HOTPLUG_CPU */
   6.399 +
   6.400  #endif /* CONFIG_SMP */
     7.1 --- a/xen/arch/ia64/linux-xen/irq_ia64.c	Wed Aug 31 14:32:27 2005 -0600
     7.2 +++ b/xen/arch/ia64/linux-xen/irq_ia64.c	Wed Aug 31 16:55:04 2005 -0600
     7.3 @@ -70,8 +70,7 @@ assign_irq_vector (int irq)
     7.4  	pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
     7.5  	vector = IA64_FIRST_DEVICE_VECTOR + pos;
     7.6  	if (vector > IA64_LAST_DEVICE_VECTOR)
     7.7 -		/* XXX could look for sharable vectors instead of panic'ing... */
     7.8 -		panic("assign_irq_vector: out of interrupt vectors!");
     7.9 +		return -ENOSPC;
    7.10  	if (test_and_set_bit(pos, ia64_vector_mask))
    7.11  		goto again;
    7.12  	return vector;
    7.13 @@ -173,103 +172,6 @@ ia64_handle_irq (ia64_vector vector, str
    7.14  	irq_exit();
    7.15  }
    7.16  
    7.17 -#ifdef  CONFIG_VTI
    7.18 -#define vmx_irq_enter()		\
    7.19 -	add_preempt_count(HARDIRQ_OFFSET);
    7.20 -
    7.21 -/* Now softirq will be checked when leaving hypervisor, or else
    7.22 - * scheduler irq will be executed too early.
    7.23 - */
    7.24 -#define vmx_irq_exit(void)	\
    7.25 -	sub_preempt_count(HARDIRQ_OFFSET);
    7.26 -/*
    7.27 - * That's where the IVT branches when we get an external
    7.28 - * interrupt. This branches to the correct hardware IRQ handler via
    7.29 - * function ptr.
    7.30 - */
    7.31 -void
    7.32 -vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
    7.33 -{
    7.34 -	unsigned long saved_tpr;
    7.35 -	int	wake_dom0 = 0;
    7.36 -
    7.37 -
    7.38 -#if IRQ_DEBUG
    7.39 -	{
    7.40 -		unsigned long bsp, sp;
    7.41 -
    7.42 -		/*
    7.43 -		 * Note: if the interrupt happened while executing in
    7.44 -		 * the context switch routine (ia64_switch_to), we may
    7.45 -		 * get a spurious stack overflow here.  This is
    7.46 -		 * because the register and the memory stack are not
    7.47 -		 * switched atomically.
    7.48 -		 */
    7.49 -		bsp = ia64_getreg(_IA64_REG_AR_BSP);
    7.50 -		sp = ia64_getreg(_IA64_REG_AR_SP);
    7.51 -
    7.52 -		if ((sp - bsp) < 1024) {
    7.53 -			static unsigned char count;
    7.54 -			static long last_time;
    7.55 -
    7.56 -			if (jiffies - last_time > 5*HZ)
    7.57 -				count = 0;
    7.58 -			if (++count < 5) {
    7.59 -				last_time = jiffies;
    7.60 -				printk("ia64_handle_irq: DANGER: less than "
    7.61 -				       "1KB of free stack space!!\n"
    7.62 -				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
    7.63 -			}
    7.64 -		}
    7.65 -	}
    7.66 -#endif /* IRQ_DEBUG */
    7.67 -
    7.68 -	/*
    7.69 -	 * Always set TPR to limit maximum interrupt nesting depth to
    7.70 -	 * 16 (without this, it would be ~240, which could easily lead
    7.71 -	 * to kernel stack overflows).
    7.72 -	 */
    7.73 -	vmx_irq_enter();
    7.74 -	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
    7.75 -	ia64_srlz_d();
    7.76 -	while (vector != IA64_SPURIOUS_INT_VECTOR) {
    7.77 -	    if (!IS_RESCHEDULE(vector)) {
    7.78 -		ia64_setreg(_IA64_REG_CR_TPR, vector);
    7.79 -		ia64_srlz_d();
    7.80 -
    7.81 -		if (vector != IA64_TIMER_VECTOR) {
    7.82 -			/* FIXME: Leave IRQ re-route later */
    7.83 -			vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
    7.84 -			wake_dom0 = 1;
    7.85 -		}
    7.86 -		else {	// FIXME: Handle Timer only now
    7.87 -			__do_IRQ(local_vector_to_irq(vector), regs);
    7.88 -		}
    7.89 -		
    7.90 -		/*
    7.91 -		 * Disable interrupts and send EOI:
    7.92 -		 */
    7.93 -		local_irq_disable();
    7.94 -		ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
    7.95 -	    }
    7.96 -	    else {
    7.97 -                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
    7.98 -            }
    7.99 -	    ia64_eoi();
   7.100 -	    vector = ia64_get_ivr();
   7.101 -	}
   7.102 -	/*
   7.103 -	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
   7.104 -	 * handler needs to be able to wait for further keyboard interrupts, which can't
   7.105 -	 * come through until ia64_eoi() has been done.
   7.106 -	 */
   7.107 -	vmx_irq_exit();
   7.108 -	if ( wake_dom0 && current != dom0 ) 
   7.109 -		vcpu_wake(dom0->vcpu[0]);
   7.110 -}
   7.111 -#endif
   7.112 -
   7.113 -
   7.114  #ifdef CONFIG_HOTPLUG_CPU
   7.115  /*
   7.116   * This function emulates a interrupt processing when a cpu is about to be
     8.1 --- a/xen/arch/ia64/linux-xen/minstate.h	Wed Aug 31 14:32:27 2005 -0600
     8.2 +++ b/xen/arch/ia64/linux-xen/minstate.h	Wed Aug 31 16:55:04 2005 -0600
     8.3 @@ -26,7 +26,7 @@
     8.4  (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;			/* if in kernel mode, use sp (r12) */	\
     8.5  	;;											\
     8.6  (pUStk)	mov r18=ar.bsp;										\
     8.7 -(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
     8.8 +(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */
     8.9  
    8.10  #define MINSTATE_END_SAVE_MIN_VIRT								\
    8.11  	bsw.1;			/* switch back to bank 1 (must be last in insn group) */	\
    8.12 @@ -41,7 +41,7 @@
    8.13  (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;;							\
    8.14  (pKStk) ld8 r3 = [r3];;										\
    8.15  (pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;;						\
    8.16 -(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;						\
    8.17 +(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;						\
    8.18  (pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
    8.19  (pUStk)	addl r22=IA64_RBS_OFFSET,r1;		/* compute base of register backing store */	\
    8.20  	;;											\
    8.21 @@ -50,7 +50,6 @@
    8.22  (pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
    8.23  (pUStk)	dep r22=-1,r22,61,3;			/* compute kernel virtual addr of RBS */	\
    8.24  	;;											\
    8.25 -(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;		/* if in kernel mode, use sp (r12) */		\
    8.26  (pUStk)	mov ar.bspstore=r22;			/* switch to kernel RBS */			\
    8.27  	;;											\
    8.28  (pUStk)	mov r18=ar.bsp;										\
    8.29 @@ -61,9 +60,13 @@
    8.30  	;;
    8.31  
    8.32  #ifdef MINSTATE_VIRT
    8.33 -# define MINSTATE_GET_CURRENT(reg)	\
    8.34 -		movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;\
    8.35 -		ld8 reg=[reg]
    8.36 +#ifdef XEN
    8.37 +# define MINSTATE_GET_CURRENT(reg)					\
    8.38 +               movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;	\
    8.39 +               ld8 reg=[reg]
    8.40 +#else
    8.41 +# define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT)
    8.42 +#endif
    8.43  # define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_VIRT
    8.44  # define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_VIRT
    8.45  #endif
    8.46 @@ -172,8 +175,8 @@
    8.47  	;;											\
    8.48  .mem.offset 0,0; st8.spill [r16]=r13,16;							\
    8.49  .mem.offset 8,0; st8.spill [r17]=r21,16;	/* save ar.fpsr */				\
    8.50 -	movl r13=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;					\
    8.51 -	ld8 r13=[r13];			/* establish 'current' */				\
    8.52 +	/* XEN mov r13=IA64_KR(CURRENT);	/* establish `current' */				\
    8.53 +	MINSTATE_GET_CURRENT(r13);		/* XEN establish `current' */				\
    8.54  	;;											\
    8.55  .mem.offset 0,0; st8.spill [r16]=r15,16;							\
    8.56  .mem.offset 8,0; st8.spill [r17]=r14,16;							\
     9.1 --- a/xen/arch/ia64/linux-xen/mm_contig.c	Wed Aug 31 14:32:27 2005 -0600
     9.2 +++ b/xen/arch/ia64/linux-xen/mm_contig.c	Wed Aug 31 16:55:04 2005 -0600
     9.3 @@ -62,7 +62,8 @@ show_mem (void)
     9.4  	printk("%d reserved pages\n", reserved);
     9.5  	printk("%d pages shared\n", shared);
     9.6  	printk("%d pages swap cached\n", cached);
     9.7 -	printk("%ld pages in page table cache\n", pgtable_cache_size);
     9.8 +	printk("%ld pages in page table cache\n",
     9.9 +		pgtable_quicklist_total_size());
    9.10  }
    9.11  #endif
    9.12  
    9.13 @@ -290,7 +291,7 @@ paging_init (void)
    9.14  		vmem_map = (struct page *) vmalloc_end;
    9.15  		efi_memmap_walk(create_mem_map_page_table, NULL);
    9.16  
    9.17 -		mem_map = contig_page_data.node_mem_map = vmem_map;
    9.18 +		NODE_DATA(0)->node_mem_map = vmem_map;
    9.19  		free_area_init_node(0, &contig_page_data, zones_size,
    9.20  				    0, zholes_size);
    9.21  
    9.22 @@ -307,4 +308,4 @@ paging_init (void)
    9.23  #endif /* !CONFIG_VIRTUAL_MEM_MAP */
    9.24  	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
    9.25  }
    9.26 -#endif /* !CONFIG_XEN */
    9.27 +#endif
    10.1 --- a/xen/arch/ia64/linux-xen/unaligned.c	Wed Aug 31 14:32:27 2005 -0600
    10.2 +++ b/xen/arch/ia64/linux-xen/unaligned.c	Wed Aug 31 16:55:04 2005 -0600
    10.3 @@ -201,7 +201,7 @@ static u16 gr_info[32]={
    10.4  
    10.5  	RPT(r1), RPT(r2), RPT(r3),
    10.6  
    10.7 -#ifdef  CONFIG_VTI
    10.8 +#if defined(XEN) && defined(CONFIG_VTI)
    10.9  	RPT(r4), RPT(r5), RPT(r6), RPT(r7),
   10.10  #else   //CONFIG_VTI
   10.11  	RSW(r4), RSW(r5), RSW(r6), RSW(r7),
   10.12 @@ -295,7 +295,7 @@ rotate_reg (unsigned long sor, unsigned 
   10.13  	return reg;
   10.14  }
   10.15  
   10.16 -#ifdef CONFIG_VTI
   10.17 +#if defined(XEN) && defined(CONFIG_VTI)
   10.18  static void
   10.19  set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
   10.20  {
   10.21 @@ -359,56 +359,6 @@ set_rse_reg (struct pt_regs *regs, unsig
   10.22      }
   10.23      ia64_set_rsc(old_rsc);
   10.24  }
   10.25 -
   10.26 -
   10.27 -static void
   10.28 -get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, unsigned long *nat)
   10.29 -{
   10.30 -	struct switch_stack *sw = (struct switch_stack *) regs - 1;
   10.31 -	unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
   10.32 -	unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
   10.33 -	unsigned long rnats, nat_mask;
   10.34 -	unsigned long on_kbs;
   10.35 -    unsigned long old_rsc, new_rsc;
   10.36 -	long sof = (regs->cr_ifs) & 0x7f;
   10.37 -	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
   10.38 -	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
   10.39 -	long ridx = r1 - 32;
   10.40 -
   10.41 -	if (ridx >= sof) {
   10.42 -		/* read of out-of-frame register returns an undefined value; 0 in our case.  */
   10.43 -		DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
   10.44 -		panic("wrong stack register number");
   10.45 -	}
   10.46 -
   10.47 -	if (ridx < sor)
   10.48 -		ridx = rotate_reg(sor, rrb_gr, ridx);
   10.49 -
   10.50 -    old_rsc=ia64_get_rsc();
   10.51 -    new_rsc=old_rsc&(~(0x3));
   10.52 -    ia64_set_rsc(new_rsc);
   10.53 -
   10.54 -    bspstore = ia64_get_bspstore();
   10.55 -    bsp =kbs + (regs->loadrs >> 19); //16+3;
   10.56 -
   10.57 -	addr = ia64_rse_skip_regs(bsp, -sof + ridx);
   10.58 -    nat_mask = 1UL << ia64_rse_slot_num(addr);
   10.59 -	rnat_addr = ia64_rse_rnat_addr(addr);
   10.60 -
   10.61 -    if(addr >= bspstore){
   10.62 -
   10.63 -        ia64_flushrs ();
   10.64 -        ia64_mf ();
   10.65 -        bspstore = ia64_get_bspstore();
   10.66 -    }
   10.67 -	*val=*addr;
   10.68 -    if(bspstore < rnat_addr){
   10.69 -        *nat=!!(ia64_get_rnat()&nat_mask);
   10.70 -    }else{
   10.71 -        *nat = !!((*rnat_addr)&nat_mask);
   10.72 -    }
   10.73 -    ia64_set_rsc(old_rsc);
   10.74 -}
   10.75  #else // CONFIG_VTI
   10.76  static void
   10.77  set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
   10.78 @@ -590,7 +540,7 @@ setreg (unsigned long regnum, unsigned l
   10.79  		unat = &sw->ar_unat;
   10.80  	} else {
   10.81  		addr = (unsigned long)regs;
   10.82 -#ifdef CONFIG_VTI
   10.83 +#if defined(XEN) && defined(CONFIG_VTI)
   10.84  		unat = &regs->eml_unat;
   10.85  #else //CONFIG_VTI
   10.86  		unat = &sw->caller_unat;
   10.87 @@ -780,7 +730,7 @@ getreg (unsigned long regnum, unsigned l
   10.88  		unat = &sw->ar_unat;
   10.89  	} else {
   10.90  		addr = (unsigned long)regs;
   10.91 -#ifdef  CONFIG_VTI
   10.92 +#if defined(XEN) && defined(CONFIG_VTI)
   10.93  		unat = &regs->eml_unat;;
   10.94  #else   //CONFIG_VTI
   10.95  		unat = &sw->caller_unat;
   10.96 @@ -1527,6 +1477,10 @@ printk("ia64_handle_unaligned: called, n
   10.97  	 *		- ldX.spill
   10.98  	 *		- stX.spill
   10.99  	 *	Reason: RNATs are based on addresses
  10.100 +	 *		- ld16
  10.101 +	 *		- st16
  10.102 +	 *	Reason: ld16 and st16 are supposed to occur in a single
  10.103 +	 *		memory op
  10.104  	 *
  10.105  	 *	synchronization:
  10.106  	 *		- cmpxchg
  10.107 @@ -1548,6 +1502,10 @@ printk("ia64_handle_unaligned: called, n
  10.108  	switch (opcode) {
  10.109  	      case LDS_OP:
  10.110  	      case LDSA_OP:
  10.111 +		if (u.insn.x)
  10.112 +			/* oops, really a semaphore op (cmpxchg, etc) */
  10.113 +			goto failure;
  10.114 +		/* no break */
  10.115  	      case LDS_IMM_OP:
  10.116  	      case LDSA_IMM_OP:
  10.117  	      case LDFS_OP:
  10.118 @@ -1572,6 +1530,10 @@ printk("ia64_handle_unaligned: called, n
  10.119  	      case LDCCLR_OP:
  10.120  	      case LDCNC_OP:
  10.121  	      case LDCCLRACQ_OP:
  10.122 +		if (u.insn.x)
  10.123 +			/* oops, really a semaphore op (cmpxchg, etc) */
  10.124 +			goto failure;
  10.125 +		/* no break */
  10.126  	      case LD_IMM_OP:
  10.127  	      case LDA_IMM_OP:
  10.128  	      case LDBIAS_IMM_OP:
  10.129 @@ -1584,6 +1546,10 @@ printk("ia64_handle_unaligned: called, n
  10.130  
  10.131  	      case ST_OP:
  10.132  	      case STREL_OP:
  10.133 +		if (u.insn.x)
  10.134 +			/* oops, really a semaphore op (cmpxchg, etc) */
  10.135 +			goto failure;
  10.136 +		/* no break */
  10.137  	      case ST_IMM_OP:
  10.138  	      case STREL_IMM_OP:
  10.139  		ret = emulate_store_int(ifa, u.insn, regs);
    11.1 --- a/xen/arch/ia64/linux/README.origin	Wed Aug 31 14:32:27 2005 -0600
    11.2 +++ b/xen/arch/ia64/linux/README.origin	Wed Aug 31 16:55:04 2005 -0600
    11.3 @@ -1,5 +1,9 @@
    11.4  Source files in this directory are identical copies of linux-2.6.13 files:
    11.5  
    11.6 +NOTE: DO NOT commit changes to these files!   If a file
    11.7 +needs to be changed, move it to ../linux-xen and follow
    11.8 +the instructions in the README there.
    11.9 +
   11.10  cmdline.c		-> linux/lib/cmdline.c
   11.11  efi_stub.S		-> linux/arch/ia64/efi_stub.S
   11.12  extable.c		-> linux/arch/ia64/mm/extable.c
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xen/arch/ia64/vmx_irq_ia64.c	Wed Aug 31 16:55:04 2005 -0600
    12.3 @@ -0,0 +1,127 @@
    12.4 +#include <linux/config.h>
    12.5 +#include <linux/module.h>
    12.6 +
    12.7 +#include <linux/jiffies.h>
    12.8 +#include <linux/errno.h>
    12.9 +#include <linux/init.h>
   12.10 +#include <linux/interrupt.h>
   12.11 +#include <linux/ioport.h>
   12.12 +#include <linux/kernel_stat.h>
   12.13 +#include <linux/slab.h>
   12.14 +#include <linux/ptrace.h>
   12.15 +#include <linux/random.h>	/* for rand_initialize_irq() */
   12.16 +#include <linux/signal.h>
   12.17 +#include <linux/smp.h>
   12.18 +#include <linux/smp_lock.h>
   12.19 +#include <linux/threads.h>
   12.20 +#include <linux/bitops.h>
   12.21 +
   12.22 +#include <asm/delay.h>
   12.23 +#include <asm/intrinsics.h>
   12.24 +#include <asm/io.h>
   12.25 +#include <asm/hw_irq.h>
   12.26 +#include <asm/machvec.h>
   12.27 +#include <asm/pgtable.h>
   12.28 +#include <asm/system.h>
   12.29 +
   12.30 +#ifdef CONFIG_PERFMON
   12.31 +# include <asm/perfmon.h>
   12.32 +#endif
   12.33 +
   12.34 +#define IRQ_DEBUG	0
   12.35 +
   12.36 +#ifdef  CONFIG_VTI
   12.37 +#define vmx_irq_enter()		\
   12.38 +	add_preempt_count(HARDIRQ_OFFSET);
   12.39 +
   12.40 +/* Now softirq will be checked when leaving hypervisor, or else
   12.41 + * scheduler irq will be executed too early.
   12.42 + */
   12.43 +#define vmx_irq_exit(void)	\
   12.44 +	sub_preempt_count(HARDIRQ_OFFSET);
   12.45 +/*
   12.46 + * That's where the IVT branches when we get an external
   12.47 + * interrupt. This branches to the correct hardware IRQ handler via
   12.48 + * function ptr.
   12.49 + */
   12.50 +void
   12.51 +vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
   12.52 +{
   12.53 +	unsigned long saved_tpr;
   12.54 +	int	wake_dom0 = 0;
   12.55 +
   12.56 +
   12.57 +#if IRQ_DEBUG
   12.58 +	{
   12.59 +		unsigned long bsp, sp;
   12.60 +
   12.61 +		/*
   12.62 +		 * Note: if the interrupt happened while executing in
   12.63 +		 * the context switch routine (ia64_switch_to), we may
   12.64 +		 * get a spurious stack overflow here.  This is
   12.65 +		 * because the register and the memory stack are not
   12.66 +		 * switched atomically.
   12.67 +		 */
   12.68 +		bsp = ia64_getreg(_IA64_REG_AR_BSP);
   12.69 +		sp = ia64_getreg(_IA64_REG_AR_SP);
   12.70 +
   12.71 +		if ((sp - bsp) < 1024) {
   12.72 +			static unsigned char count;
   12.73 +			static long last_time;
   12.74 +
   12.75 +			if (jiffies - last_time > 5*HZ)
   12.76 +				count = 0;
   12.77 +			if (++count < 5) {
   12.78 +				last_time = jiffies;
   12.79 +				printk("ia64_handle_irq: DANGER: less than "
   12.80 +				       "1KB of free stack space!!\n"
   12.81 +				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
   12.82 +			}
   12.83 +		}
   12.84 +	}
   12.85 +#endif /* IRQ_DEBUG */
   12.86 +
   12.87 +	/*
   12.88 +	 * Always set TPR to limit maximum interrupt nesting depth to
   12.89 +	 * 16 (without this, it would be ~240, which could easily lead
   12.90 +	 * to kernel stack overflows).
   12.91 +	 */
   12.92 +	vmx_irq_enter();
   12.93 +	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
   12.94 +	ia64_srlz_d();
   12.95 +	while (vector != IA64_SPURIOUS_INT_VECTOR) {
   12.96 +	    if (!IS_RESCHEDULE(vector)) {
   12.97 +		ia64_setreg(_IA64_REG_CR_TPR, vector);
   12.98 +		ia64_srlz_d();
   12.99 +
  12.100 +		if (vector != IA64_TIMER_VECTOR) {
  12.101 +			/* FIXME: Leave IRQ re-route later */
  12.102 +			vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
  12.103 +			wake_dom0 = 1;
  12.104 +		}
  12.105 +		else {	// FIXME: Handle Timer only now
  12.106 +			__do_IRQ(local_vector_to_irq(vector), regs);
  12.107 +		}
  12.108 +		
  12.109 +		/*
  12.110 +		 * Disable interrupts and send EOI:
  12.111 +		 */
  12.112 +		local_irq_disable();
  12.113 +		ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
  12.114 +	    }
  12.115 +	    else {
  12.116 +                printf("Oops: RESCHEDULE IPI absorbed by HV\n");
  12.117 +            }
  12.118 +	    ia64_eoi();
  12.119 +	    vector = ia64_get_ivr();
  12.120 +	}
  12.121 +	/*
  12.122 +	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
  12.123 +	 * handler needs to be able to wait for further keyboard interrupts, which can't
  12.124 +	 * come through until ia64_eoi() has been done.
  12.125 +	 */
  12.126 +	vmx_irq_exit();
  12.127 +	if ( wake_dom0 && current != dom0 ) 
  12.128 +		vcpu_wake(dom0->vcpu[0]);
  12.129 +}
  12.130 +#endif