ia64/xen-unstable

changeset 15151:7476a0ea8ee4

[IA64] Cleanup ivt.S

xen/ivt.S is full of #ifndef XEN conditionnal parts. However they are not
maintained and there is no reason to update with linux ivt.S (very very
different). To make the code more readable I remove the conditional parts.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Thu May 31 11:37:38 2007 -0600 (2007-05-31)
parents b1b80a14d023
children 33c3dbc8ab3d
files xen/arch/ia64/xen/ivt.S
line diff
     1.1 --- a/xen/arch/ia64/xen/ivt.S	Thu May 31 11:25:46 2007 -0600
     1.2 +++ b/xen/arch/ia64/xen/ivt.S	Thu May 31 11:37:38 2007 -0600
     1.3 @@ -1,9 +1,7 @@
     1.4 -#ifdef XEN
     1.5  #include <asm/debugger.h>
     1.6  #include <asm/vhpt.h>
     1.7  #include <public/arch-ia64.h>
     1.8  #include <asm/config.h>
     1.9 -#endif
    1.10  /*
    1.11   * arch/ia64/kernel/ivt.S
    1.12   *
    1.13 @@ -58,11 +56,7 @@
    1.14  #include <asm/system.h>
    1.15  #include <asm/thread_info.h>
    1.16  #include <asm/unistd.h>
    1.17 -#ifdef XEN
    1.18  #include <xen/errno.h>
    1.19 -#else
    1.20 -#include <asm/errno.h>
    1.21 -#endif
    1.22  
    1.23  #if 1
    1.24  # define PSR_DEFAULT_BITS	psr.ac
    1.25 @@ -110,144 +104,7 @@ ia64_ivt:
    1.26  // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
    1.27  ENTRY(vhpt_miss)
    1.28  	DBG_FAULT(0)
    1.29 -#ifdef XEN
    1.30  	FAULT(0)
    1.31 -#else
    1.32 -	/*
    1.33 -	 * The VHPT vector is invoked when the TLB entry for the virtual
    1.34 -	 * page table is missing.  This happens only as a result of a 
    1.35 -	 * previous (the "original") TLB miss, which may either be caused
    1.36 -	 * by an instruction fetch or a data access (or non-access).
    1.37 -	 *
    1.38 -	 * What we do here is normal TLB miss handing for the _original_ 
    1.39 -	 * miss, followed by inserting the TLB entry for the virtual page
    1.40 -	 * table page that the VHPT walker was attempting to access.  The
    1.41 -	 * latter gets inserted as long as both L1 and L2 have valid 
    1.42 -	 * mappings for the faulting address.  The TLB entry for the 
    1.43 -	 * original miss gets inserted only if the L3 entry indicates
    1.44 -	 * that the page is present.
    1.45 -	 *
    1.46 -	 * do_page_fault gets invoked in the following cases:
    1.47 -	 *	- the faulting virtual address uses unimplemented address bits
    1.48 -	 *	- the faulting virtual address has no L1, L2, or L3 mapping
    1.49 -	 */
    1.50 -	mov r16=cr.ifa			// get address that caused the TLB miss
    1.51 -#ifdef CONFIG_HUGETLB_PAGE
    1.52 -	movl r18=PAGE_SHIFT
    1.53 -	mov r25=cr.itir
    1.54 -#endif
    1.55 -	;;
    1.56 -	rsm psr.dt			// use physical addressing for data
    1.57 -	mov r31=pr			// save the predicate registers
    1.58 -	mov r19=IA64_KR(PT_BASE)	// get page table base address
    1.59 -	shl r21=r16,3			// shift bit 60 into sign bit
    1.60 -	shr.u r17=r16,61		// get the region number into r17
    1.61 -	;;
    1.62 -	shr r22=r21,3
    1.63 -#ifdef CONFIG_HUGETLB_PAGE
    1.64 -	extr.u r26=r25,2,6
    1.65 -	;;
    1.66 -	cmp.ne p8,p0=r18,r26
    1.67 -	sub r27=r26,r18
    1.68 -	;;
    1.69 -(p8)	dep r25=r18,r25,2,6
    1.70 -(p8)	shr r22=r22,r27
    1.71 -#endif
    1.72 -	;;
    1.73 -	cmp.eq p6,p7=5,r17		// is IFA pointing into to region 5?
    1.74 -	shr.u r18=r22,PGDIR_SHIFT	// get bits 33-63 of faulting address
    1.75 -	;;
    1.76 -(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3  // put region number bits in place
    1.77 -
    1.78 -	srlz.d
    1.79 -	LOAD_PHYSICAL(p6, r19, swapper_pg_dir)	// region 5 is rooted at 
    1.80 -						//   swapper_pg_dir
    1.81 -
    1.82 -	.pred.rel "mutex", p6, p7
    1.83 -(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
    1.84 -(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
    1.85 -	;;
    1.86 -(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
    1.87 -(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + 
    1.88 -						//     (((IFA(61,63) << 7) |
    1.89 -						//      IFA(33,39))*8)
    1.90 -	cmp.eq p7,p6=0,r21			// unused address bits all zero?
    1.91 -	shr.u r18=r22,PMD_SHIFT			// shift L2 index into position
    1.92 -	;;
    1.93 -	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
    1.94 -	;;
    1.95 -(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
    1.96 -	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page
    1.97 -						//   table entry
    1.98 -	;;
    1.99 -(p7)	ld8 r20=[r17]				// fetch the L2 entry (may be 0)
   1.100 -	shr.u r19=r22,PAGE_SHIFT		// shift L3 index into position
   1.101 -	;;
   1.102 -(p7)	cmp.eq.or.andcm p6,p7=r20,r0		// was L2 entry NULL?
   1.103 -	dep r21=r19,r20,3,(PAGE_SHIFT-3)	// compute address of L3 page
   1.104 -						//   table entry
   1.105 -	;;
   1.106 -(p7)	ld8 r18=[r21]				// read the L3 PTE
   1.107 -	mov r19=cr.isr				// cr.isr bit 0 tells us if
   1.108 -						//   this is an insn miss
   1.109 -	;;
   1.110 -(p7)	tbit.z p6,p7=r18,_PAGE_P_BIT		// page present bit cleared?
   1.111 -	mov r22=cr.iha				// get the VHPT address that
   1.112 -						//   caused the TLB miss
   1.113 -	;;					// avoid RAW on p7
   1.114 -(p7)	tbit.nz.unc p10,p11=r19,32		// is it an instruction TLB
   1.115 -						//   miss?
   1.116 -	dep r23=0,r20,0,PAGE_SHIFT		// clear low bits to get page
   1.117 -						//   address
   1.118 -	;;
   1.119 -(p10)	itc.i r18				// insert the instruction TLB
   1.120 -						//   entry
   1.121 -(p11)	itc.d r18				// insert the data TLB entry
   1.122 -(p6)	br.cond.spnt.many page_fault		// handle bad address/page not
   1.123 -						//   present (page fault)
   1.124 -	mov cr.ifa=r22
   1.125 -
   1.126 -#ifdef CONFIG_HUGETLB_PAGE
   1.127 -(p8)	mov cr.itir=r25				// change to default page-size
   1.128 -						//   for VHPT
   1.129 -#endif
   1.130 -
   1.131 -	/*
   1.132 -	 * Now compute and insert the TLB entry for the virtual page table.
   1.133 -	 * We never execute in a page table page so there is no need to set
   1.134 -	 * the exception deferral bit.
   1.135 -	 */
   1.136 -	adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
   1.137 -	;;
   1.138 -(p7)	itc.d r24
   1.139 -	;;
   1.140 -#ifdef CONFIG_SMP
   1.141 -	/*
   1.142 -	 * Tell the assemblers dependency-violation checker that the above
   1.143 -	 * "itc" instructions cannot possibly affect the following loads:
   1.144 -	 */
   1.145 -	dv_serialize_data
   1.146 -
   1.147 -	/*
   1.148 -	 * Re-check L2 and L3 pagetable.  If they changed, we may have 
   1.149 -	 * received a ptc.g between reading the pagetable and the "itc".
   1.150 -	 * If so, flush the entry we inserted and retry.
   1.151 -	 */
   1.152 -	ld8 r25=[r21]				// read L3 PTE again
   1.153 -	ld8 r26=[r17]				// read L2 entry again
   1.154 -	;;
   1.155 -	cmp.ne p6,p7=r26,r20			// did L2 entry change
   1.156 -	mov r27=PAGE_SHIFT<<2
   1.157 -	;;
   1.158 -(p6)	ptc.l r22,r27				// purge PTE page translation
   1.159 -(p7)	cmp.ne.or.andcm p6,p7=r25,r18		// did L3 PTE change
   1.160 -	;;
   1.161 -(p6)	ptc.l r16,r27				// purge translation
   1.162 -#endif
   1.163 -
   1.164 -	mov pr=r31,-1				// restore predicate registers
   1.165 -	rfi
   1.166 -#endif
   1.167  END(vhpt_miss)
   1.168  
   1.169  	.org ia64_ivt+0x400
   1.170 @@ -255,7 +112,6 @@ END(vhpt_miss)
   1.171  // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
   1.172  ENTRY(itlb_miss)
   1.173  	DBG_FAULT(1)
   1.174 -#ifdef XEN
   1.175  	mov r16 = cr.ifa
   1.176  	mov r31 = pr
   1.177  	;;
   1.178 @@ -274,7 +130,6 @@ ENTRY(itlb_miss)
   1.179  	br.cond.sptk fast_tlb_miss_reflect
   1.180  	;;
   1.181  #endif
   1.182 -#endif
   1.183  	/*
   1.184  	 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
   1.185  	 * page table.  If a nested TLB miss occurs, we switch into physical
   1.186 @@ -320,7 +175,6 @@ END(itlb_miss)
   1.187  // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
   1.188  ENTRY(dtlb_miss)
   1.189  	DBG_FAULT(2)
   1.190 -#ifdef XEN
   1.191  	mov r16=cr.ifa			// get virtual address
   1.192  	mov r31=pr
   1.193  	;;
   1.194 @@ -394,17 +248,6 @@ 2:
   1.195  	;;
   1.196  #endif
   1.197  	mov r29=b0				// save b0
   1.198 -#else	
   1.199 -	/*
   1.200 -	 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
   1.201 -	 * page table.  If a nested TLB miss occurs, we switch into physical
   1.202 -	 * mode, walk the page table, and then re-execute the L3 PTE read
   1.203 -	 * and go on normally after that.
   1.204 -	 */
   1.205 -	mov r16=cr.ifa				// get virtual address
   1.206 -	mov r29=b0				// save b0
   1.207 -	mov r31=pr				// save predicates
   1.208 -#endif
   1.209  dtlb_fault:
   1.210  	mov r17=cr.iha				// get virtual address of L3 PTE
   1.211  	movl r30=1f				// load nested fault 
   1.212 @@ -441,7 +284,6 @@ END(dtlb_miss)
   1.213  // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
   1.214  ENTRY(alt_itlb_miss)
   1.215  	DBG_FAULT(3)
   1.216 -#ifdef XEN
   1.217  	mov r16=cr.ifa		// get address that caused the TLB miss
   1.218  	mov r31=pr
   1.219  	;;
   1.220 @@ -450,14 +292,6 @@ late_alt_itlb_miss:
   1.221  	movl r17=PAGE_KERNEL
   1.222  	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
   1.223  	;;
   1.224 -#else
   1.225 -	mov r16=cr.ifa		// get address that caused the TLB miss
   1.226 -	movl r17=PAGE_KERNEL
   1.227 -	mov r21=cr.ipsr
   1.228 -	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
   1.229 -	mov r31=pr
   1.230 -	;;
   1.231 -#endif
   1.232  #ifdef CONFIG_DISABLE_VHPT
   1.233  	shr.u r22=r16,61		// get the region number into r21
   1.234  	;;
   1.235 @@ -471,15 +305,9 @@ late_alt_itlb_miss:
   1.236  #endif
   1.237  	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
   1.238  	and r19=r19,r16		// clear ed, reserved bits, and PTE control bits
   1.239 -#ifdef XEN
   1.240  	shr.u r18=r16,55	// move address bit 59 to bit 4
   1.241  	;;
   1.242  	and r18=0x10,r18	// bit 4=address-bit(59)
   1.243 -#else
   1.244 -	shr.u r18=r16,57	// move address bit 61 to bit 4
   1.245 -	;;
   1.246 -	andcm r18=0x10,r18	// bit 4=~address-bit(61)
   1.247 -#endif
   1.248  	cmp.ne p8,p0=r0,r23	// psr.cpl != 0?
   1.249  	or r19=r17,r19		// insert PTE control bits into r19
   1.250  	;;
   1.251 @@ -497,7 +325,6 @@ END(alt_itlb_miss)
   1.252  // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
   1.253  ENTRY(alt_dtlb_miss)
   1.254  	DBG_FAULT(4)
   1.255 -#ifdef XEN
   1.256  	mov r16=cr.ifa		// get address that caused the TLB miss
   1.257  	mov r31=pr
   1.258  	;;
   1.259 @@ -507,7 +334,6 @@ late_alt_dtlb_miss:
   1.260  	mov r21=cr.ipsr
   1.261  	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
   1.262  	;;
   1.263 -#endif
   1.264  #ifdef CONFIG_DISABLE_VHPT
   1.265  	shr.u r22=r16,61			// get the region into r22
   1.266  	;;
   1.267 @@ -522,25 +348,15 @@ late_alt_dtlb_miss:
   1.268  	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
   1.269  	and r22=IA64_ISR_CODE_MASK,r20		// get the isr.code field
   1.270  	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT	// is speculation bit on?
   1.271 -#ifdef XEN
   1.272  	shr.u r18=r16,55			// move address bit 59 to bit 4
   1.273  	and r19=r19,r16				// clear ed, reserved bits, and
   1.274  						//   PTE control bits
   1.275  	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
   1.276  	;;
   1.277  	and r18=0x10,r18	// bit 4=address-bit(59)
   1.278 -#else
   1.279 -	shr.u r18=r16,57			// move address bit 61 to bit 4
   1.280 -	and r19=r19,r16				// clear ed, reserved bits, and
   1.281 -						//   PTE control bits
   1.282 -	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
   1.283 -	;;
   1.284 -	andcm r18=0x10,r18	// bit 4=~address-bit(61)
   1.285 -#endif
   1.286  	cmp.ne p8,p0=r0,r23
   1.287  (p9)	cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
   1.288  (p8)	br.cond.spnt page_fault
   1.289 -#ifdef XEN
   1.290  	;;
   1.291  #ifdef CONFIG_VIRTUAL_FRAME_TABLE
   1.292  	shr r22=r16,56	 	// Test for the address of virtual frame_table
   1.293 @@ -560,8 +376,6 @@ late_alt_dtlb_miss:
   1.294  (p8)	br.cond.sptk page_fault
   1.295  	;;
   1.296  1:
   1.297 -#endif
   1.298 -
   1.299  	dep r21=-1,r21,IA64_PSR_ED_BIT,1
   1.300  	or r19=r19,r17		// insert PTE control bits into r19
   1.301  	;;
   1.302 @@ -644,76 +458,9 @@ END(ia64_frametable_probe)
   1.303  // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
   1.304  ENTRY(nested_dtlb_miss)
   1.305  	DBG_FAULT(5)
   1.306 -#ifdef XEN
   1.307  	mov b0=r30
   1.308  	br.sptk.many b0			// return to the continuation point
   1.309  	;;
   1.310 -#else
   1.311 -	/*
   1.312 -	 * In the absence of kernel bugs, we get here when the virtually
   1.313 -	 * mapped linear page table is accessed non-speculatively (e.g.,
   1.314 -	 * in the Dirty-bit, Instruction Access-bit, or Data Access-bit 
   1.315 -	 * faults).  If the DTLB entry for the virtual page table is missing,
   1.316 -	 * a nested TLB miss fault is triggered and control is transferred 
   1.317 -	 * to this point.  When this happens, we lookup the pte for the
   1.318 -	 * faulting address by walking the page table in physical mode
   1.319 -	 * and return to the continuation point passed in register r30
   1.320 -	 * (or call page_fault if the address is not mapped).
   1.321 -	 *
   1.322 -	 * Input:	r16:	faulting address
   1.323 -	 *		r29:	saved b0
   1.324 -	 *		r30:	continuation address
   1.325 -	 *		r31:	saved pr
   1.326 -	 *
   1.327 -	 * Output:	r17:	physical address of L3 PTE of faulting address
   1.328 -	 *		r29:	saved b0
   1.329 -	 *		r30:	continuation address
   1.330 -	 *		r31:	saved pr
   1.331 -	 *
   1.332 -	 * Clobbered:	b0, r18, r19, r21, psr.dt (cleared)
   1.333 -	 */
   1.334 -	rsm psr.dt			// switch to using physical data 
   1.335 -					//   addressing
   1.336 -	mov r19=IA64_KR(PT_BASE)	// get the page table base address
   1.337 -	shl r21=r16,3			// shift bit 60 into sign bit
   1.338 -	;;
   1.339 -	shr.u r17=r16,61		// get the region number into r17
   1.340 -	;;
   1.341 -	cmp.eq p6,p7=5,r17		// is faulting address in region 5?
   1.342 -	shr.u r18=r16,PGDIR_SHIFT	// get bits 33-63 of faulting address
   1.343 -	;;
   1.344 -(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3  // put region number bits in place
   1.345 -
   1.346 -	srlz.d
   1.347 -	LOAD_PHYSICAL(p6, r19, swapper_pg_dir)	// region 5 is rooted at 
   1.348 -						//   swapper_pg_dir
   1.349 -
   1.350 -	.pred.rel "mutex", p6, p7
   1.351 -(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
   1.352 -(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
   1.353 -	;;
   1.354 -(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)  // r17=PTA + IFA(33,42)*8
   1.355 -(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)  // r17=PTA + (((IFA(61,63) << 7) |
   1.356 -					  //            IFA(33,39))*8)
   1.357 -	cmp.eq p7,p6=0,r21		// unused address bits all zeroes?
   1.358 -	shr.u r18=r16,PMD_SHIFT		// shift L2 index into position
   1.359 -	;;
   1.360 -	ld8 r17=[r17]			// fetch the L1 entry (may be 0)
   1.361 -	;;
   1.362 -(p7)	cmp.eq p6,p7=r17,r0		// was L1 entry NULL?
   1.363 -	dep r17=r18,r17,3,(PAGE_SHIFT-3)  // compute address of L2 page table
   1.364 -					  //   entry
   1.365 -	;;
   1.366 -(p7)	ld8 r17=[r17]			// fetch the L2 entry (may be 0)
   1.367 -	shr.u r19=r16,PAGE_SHIFT	// shift L3 index into position
   1.368 -	;;
   1.369 -(p7)	cmp.eq.or.andcm p6,p7=r17,r0	// was L2 entry NULL?
   1.370 -	dep r17=r19,r17,3,(PAGE_SHIFT-3)  // compute address of L3 page table
   1.371 -					  //   entry
   1.372 -(p6)	br.cond.spnt page_fault
   1.373 -	mov b0=r30
   1.374 -	br.sptk.many b0			// return to continuation point
   1.375 -#endif
   1.376  END(nested_dtlb_miss)
   1.377  
   1.378  	.org ia64_ivt+0x1800
   1.379 @@ -721,36 +468,22 @@ END(nested_dtlb_miss)
   1.380  // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
   1.381  ENTRY(ikey_miss)
   1.382  	DBG_FAULT(6)
   1.383 -#ifdef XEN
   1.384  	FAULT_OR_REFLECT(6)
   1.385 -#else
   1.386 -	FAULT(6)
   1.387 -#endif
   1.388  END(ikey_miss)
   1.389  
   1.390  	//----------------------------------------------------------------
   1.391  	// call do_page_fault (predicates are in r31, psr.dt may be off, 
   1.392  	// r16 is faulting address)
   1.393 -#ifdef XEN
   1.394  GLOBAL_ENTRY(page_fault)
   1.395 -#else
   1.396 -ENTRY(page_fault)
   1.397 -#endif
   1.398  	ssm psr.dt
   1.399  	;;
   1.400  	srlz.i
   1.401  	;;
   1.402  	SAVE_MIN_WITH_COVER
   1.403 -#ifdef XEN
   1.404  	alloc r15=ar.pfs,0,0,4,0
   1.405  	mov out0=cr.ifa
   1.406  	mov out1=cr.isr
   1.407  	mov out3=cr.itir
   1.408 -#else
   1.409 -	alloc r15=ar.pfs,0,0,3,0
   1.410 -	mov out0=cr.ifa
   1.411 -	mov out1=cr.isr
   1.412 -#endif
   1.413  	adds r3=8,r2			// set up second base pointer
   1.414  	;;
   1.415  	ssm psr.ic | PSR_DEFAULT_BITS
   1.416 @@ -773,11 +506,7 @@ END(page_fault)
   1.417  // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
   1.418  ENTRY(dkey_miss)
   1.419  	DBG_FAULT(7)
   1.420 -#ifdef XEN
   1.421  	FAULT_OR_REFLECT(7)
   1.422 -#else
   1.423 -	FAULT(7)
   1.424 -#endif
   1.425  END(dkey_miss)
   1.426  
   1.427  	.org ia64_ivt+0x2000
   1.428 @@ -785,7 +514,6 @@ END(dkey_miss)
   1.429  // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
   1.430  ENTRY(dirty_bit)
   1.431  	DBG_FAULT(8)
   1.432 -#ifdef XEN
   1.433  	mov r20=cr.ipsr
   1.434  	mov r31=pr
   1.435  	;;
   1.436 @@ -830,67 +558,6 @@ ENTRY(dirty_bit)
   1.437  	;;
   1.438  	mov rp=r14
   1.439  	br.call.sptk.many b6=ia64_shadow_fault
   1.440 -#else
   1.441 -	/*
   1.442 -	 * What we do here is to simply turn on the dirty bit in the PTE.
   1.443 -	 * We need to update both the page-table and the TLB entry.  To 
   1.444 -	 * efficiently access the PTE, we address it through the virtual
   1.445 -	 * page table.  Most likely, the TLB entry for the relevant virtual
   1.446 -	 * page table page is still present in the TLB so we can normally 
   1.447 -	 * do this without additional TLB misses.  In case the necessary 
   1.448 -	 * virtual page table TLB entry isn't present, we take a nested 
   1.449 -	 * TLB miss hit where we look up the physical address of the L3
   1.450 -	 * PTE and then continue at label 1 below.
   1.451 -	 */
   1.452 -	mov r16=cr.ifa			// get the address that caused the 
   1.453 -					//   fault
   1.454 -	movl r30=1f			// load continuation point in case 
   1.455 -					//   of nested fault
   1.456 -	;;
   1.457 -	thash r17=r16			// compute virtual address of L3 PTE
   1.458 -	mov r29=b0			// save b0 in case of nested fault
   1.459 -	mov r31=pr			// save pr
   1.460 -#ifdef CONFIG_SMP
   1.461 -	mov r28=ar.ccv			// save ar.ccv
   1.462 -	;;
   1.463 -1:	ld8 r18=[r17]
   1.464 -	;;				// avoid RAW on r18
   1.465 -	mov ar.ccv=r18			// set compare value for cmpxchg
   1.466 -	or r25=_PAGE_D|_PAGE_A,r18	// set the dirty and accessed bits
   1.467 -	;;
   1.468 -	cmpxchg8.acq r26=[r17],r25,ar.ccv
   1.469 -	mov r24=PAGE_SHIFT<<2
   1.470 -	;;
   1.471 -	cmp.eq p6,p7=r26,r18
   1.472 -	;;
   1.473 -(p6)	itc.d r25			// install updated PTE
   1.474 -	;;
   1.475 -	/*
   1.476 -	 * Tell the assemblers dependency-violation checker that the above
   1.477 -	 * "itc" instructions cannot possibly affect the following loads:
   1.478 -	 */
   1.479 -	dv_serialize_data
   1.480 -
   1.481 -	ld8 r18=[r17]			// read PTE again
   1.482 -	;;
   1.483 -	cmp.eq p6,p7=r18,r25		// is it same as the newly installed
   1.484 -	;;
   1.485 -(p7)	ptc.l r16,r24
   1.486 -	mov b0=r29			// restore b0
   1.487 -	mov ar.ccv=r28
   1.488 -#else
   1.489 -	;;
   1.490 -1:	ld8 r18=[r17]
   1.491 -	;;				// avoid RAW on r18
   1.492 -	or r18=_PAGE_D|_PAGE_A,r18	// set the dirty and accessed bits
   1.493 -	mov b0=r29			// restore b0
   1.494 -	;;
   1.495 -	st8 [r17]=r18			// store back updated PTE
   1.496 -	itc.d r18			// install updated PTE
   1.497 -#endif
   1.498 -	mov pr=r31,-1			// restore pr
   1.499 -	rfi
   1.500 -#endif
   1.501  END(dirty_bit)
   1.502  
   1.503  	.org ia64_ivt+0x2400
   1.504 @@ -898,75 +565,12 @@ END(dirty_bit)
   1.505  // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
   1.506  ENTRY(iaccess_bit)
   1.507  	DBG_FAULT(9)
   1.508 -#ifdef XEN
   1.509  	mov r16=cr.isr
   1.510  	mov r17=cr.ifa
   1.511  	mov r31=pr
   1.512  	mov r19=9
   1.513  	mov r20=0x2400
   1.514  	br.sptk.many fast_access_reflect;;
   1.515 -#else
   1.516 -	// Like Entry 8, except for instruction access
   1.517 -	mov r16=cr.ifa			// get the address that caused the
   1.518 -					//   fault
   1.519 -	movl r30=1f			// load continuation point in case 
   1.520 -					//   of nested fault
   1.521 -	mov r31=pr			// save predicates
   1.522 -#ifdef CONFIG_ITANIUM
   1.523 -	/*
   1.524 -	 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
   1.525 -	 */
   1.526 -	mov r17=cr.ipsr
   1.527 -	;;
   1.528 -	mov r18=cr.iip
   1.529 -	tbit.z p6,p0=r17,IA64_PSR_IS_BIT  // IA64 instruction set?
   1.530 -	;;
   1.531 -(p6)	mov r16=r18			// if so, use cr.iip instead of cr.ifa
   1.532 -#endif /* CONFIG_ITANIUM */
   1.533 -	;;
   1.534 -	thash r17=r16			// compute virtual address of L3 PTE
   1.535 -	mov r29=b0			// save b0 in case of nested fault)
   1.536 -#ifdef CONFIG_SMP
   1.537 -	mov r28=ar.ccv			// save ar.ccv
   1.538 -	;;
   1.539 -1:	ld8 r18=[r17]
   1.540 -	;;
   1.541 -	mov ar.ccv=r18			// set compare value for cmpxchg
   1.542 -	or r25=_PAGE_A,r18		// set the accessed bit
   1.543 -	;;
   1.544 -	cmpxchg8.acq r26=[r17],r25,ar.ccv
   1.545 -	mov r24=PAGE_SHIFT<<2
   1.546 -	;;
   1.547 -	cmp.eq p6,p7=r26,r18
   1.548 -	;;
   1.549 -(p6)	itc.i r25			// install updated PTE
   1.550 -	;;
   1.551 -	/*
   1.552 -	 * Tell the assemblers dependency-violation checker that the above
   1.553 -	 * "itc" instructions cannot possibly affect the following loads:
   1.554 -	 */
   1.555 -	dv_serialize_data
   1.556 -
   1.557 -	ld8 r18=[r17]			// read PTE again
   1.558 -	;;
   1.559 -	cmp.eq p6,p7=r18,r25		// is it same as the newly installed
   1.560 -	;;
   1.561 -(p7)	ptc.l r16,r24
   1.562 -	mov b0=r29			// restore b0
   1.563 -	mov ar.ccv=r28
   1.564 -#else /* !CONFIG_SMP */
   1.565 -	;;
   1.566 -1:	ld8 r18=[r17]
   1.567 -	;;
   1.568 -	or r18=_PAGE_A,r18		// set the accessed bit
   1.569 -	mov b0=r29			// restore b0
   1.570 -	;;
   1.571 -	st8 [r17]=r18			// store back updated PTE
   1.572 -	itc.i r18			// install updated PTE
   1.573 -#endif /* !CONFIG_SMP */
   1.574 -	mov pr=r31,-1
   1.575 -	rfi
   1.576 -#endif
   1.577  END(iaccess_bit)
   1.578  
   1.579  	.org ia64_ivt+0x2800
   1.580 @@ -974,7 +578,6 @@ END(iaccess_bit)
   1.581  // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
   1.582  ENTRY(daccess_bit)
   1.583  	DBG_FAULT(10)
   1.584 -#ifdef XEN
   1.585  	mov r16=cr.isr
   1.586  	mov r17=cr.ifa
   1.587  	mov r31=pr
   1.588 @@ -982,55 +585,6 @@ ENTRY(daccess_bit)
   1.589  	mov r20=0x2800
   1.590  	br.sptk.many fast_access_reflect
   1.591  	;;
   1.592 -#else
   1.593 -	// Like Entry 8, except for data access
   1.594 -	mov r16=cr.ifa			// get the address that caused the
   1.595 -					//   fault
   1.596 -	movl r30=1f			// load continuation point in case
   1.597 -					//   of nested fault
   1.598 -	;;
   1.599 -	thash r17=r16			// compute virtual address of L3 PTE
   1.600 -	mov r31=pr
   1.601 -	mov r29=b0			// save b0 in case of nested fault)
   1.602 -#ifdef CONFIG_SMP
   1.603 -	mov r28=ar.ccv			// save ar.ccv
   1.604 -	;;
   1.605 -1:	ld8 r18=[r17]
   1.606 -	;;				// avoid RAW on r18
   1.607 -	mov ar.ccv=r18			// set compare value for cmpxchg
   1.608 -	or r25=_PAGE_A,r18		// set the dirty bit
   1.609 -	;;
   1.610 -	cmpxchg8.acq r26=[r17],r25,ar.ccv
   1.611 -	mov r24=PAGE_SHIFT<<2
   1.612 -	;;
   1.613 -	cmp.eq p6,p7=r26,r18
   1.614 -	;;
   1.615 -(p6)	itc.d r25			// install updated PTE
   1.616 -	/*
   1.617 -	 * Tell the assemblers dependency-violation checker that the above
   1.618 -	 * "itc" instructions cannot possibly affect the following loads:
   1.619 -	 */
   1.620 -	dv_serialize_data
   1.621 -	;;
   1.622 -	ld8 r18=[r17]			// read PTE again
   1.623 -	;;
   1.624 -	cmp.eq p6,p7=r18,r25		// is it same as the newly installed
   1.625 -	;;
   1.626 -(p7)	ptc.l r16,r24
   1.627 -	mov ar.ccv=r28
   1.628 -#else
   1.629 -	;;
   1.630 -1:	ld8 r18=[r17]
   1.631 -	;;				// avoid RAW on r18
   1.632 -	or r18=_PAGE_A,r18		// set the accessed bit
   1.633 -	;;
   1.634 -	st8 [r17]=r18			// store back updated PTE
   1.635 -	itc.d r18			// install updated PTE
   1.636 -#endif
   1.637 -	mov b0=r29			// restore b0
   1.638 -	mov pr=r31,-1
   1.639 -	rfi
   1.640 -#endif
   1.641  END(daccess_bit)
   1.642  
   1.643  	.org ia64_ivt+0x2c00
   1.644 @@ -1307,7 +861,6 @@ ENTRY(interrupt)
   1.645  	DBG_FAULT(12)
   1.646  	mov r31=pr		// prepare to save predicates
   1.647  	;;
   1.648 -#ifdef XEN
   1.649  	mov r30=cr.ivr		// pass cr.ivr as first arg
   1.650  	// FIXME: this is a hack... use cpuinfo.ksoftirqd because its
   1.651  	// not used anywhere else and we need a place to stash ivr and
   1.652 @@ -1326,7 +879,6 @@ ENTRY(interrupt)
   1.653  	;;
   1.654  slow_interrupt:
   1.655  	mov rp=r29;;
   1.656 -#endif
   1.657  	SAVE_MIN_WITH_COVER	// uses r31; defines r2 and r3
   1.658  	ssm psr.ic | PSR_DEFAULT_BITS
   1.659  	;;
   1.660 @@ -1336,17 +888,9 @@ slow_interrupt:
   1.661  	SAVE_REST
   1.662  	;;
   1.663  	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
   1.664 -#ifdef XEN
   1.665  	movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
   1.666  	ld8 out0=[out0];;
   1.667 -#else
   1.668 -	mov out0=cr.ivr		// pass cr.ivr as first arg
   1.669 -#endif
   1.670  	add out1=16,sp		// pass pointer to pt_regs as second arg
   1.671 -#ifndef XEN
   1.672 -	;;
   1.673 -	srlz.d			// make sure we see the effect of cr.ivr
   1.674 -#endif
   1.675  	movl r14=ia64_leave_kernel
   1.676  	;;
   1.677  	mov rp=r14
   1.678 @@ -1359,7 +903,6 @@ END(interrupt)
   1.679  	DBG_FAULT(13)
   1.680  	FAULT(13)
   1.681  
   1.682 -#ifdef XEN
   1.683  	// There is no particular reason for this code to be here, other
   1.684  	// than that there happens to be space here that would go unused 
   1.685  	// otherwise.  If this fault ever gets "unreserved", simply move
   1.686 @@ -1389,7 +932,6 @@ dispatch_break_fault_post_save:
   1.687  //	br.sptk.many ia64_prepare_handle_break	// TODO: why commented out?
   1.688      	br.call.sptk.many b6=ia64_handle_break
   1.689  END(dispatch_break_fault)
   1.690 -#endif
   1.691  
   1.692  	.org ia64_ivt+0x3800
   1.693  //////////////////////////////////////////////////////////////////////////
   1.694 @@ -1397,7 +939,6 @@ END(dispatch_break_fault)
   1.695  	DBG_FAULT(14)
   1.696  	FAULT(14)
   1.697  
   1.698 -#ifdef XEN
   1.699      // this code segment is from 2.6.16.13
   1.700      
   1.701  	/*
   1.702 @@ -1539,152 +1080,6 @@ GLOBAL_ENTRY(ia64_syscall_setup)
   1.703  	br.ret.sptk.many b7
   1.704  END(ia64_syscall_setup)
   1.705  
   1.706 -
   1.707 -#else    
   1.708 -	/*
   1.709 -	 * There is no particular reason for this code to be here, other 
   1.710 -	 * than that there happens to be space here that would go unused 
   1.711 -	 * otherwise.  If this fault ever gets "unreserved", simply move
   1.712 -	 * the following code to a more suitable spot...
   1.713 -	 *
   1.714 -	 * ia64_syscall_setup() is a separate subroutine so that it can
   1.715 -	 *	allocate stacked registers so it can safely demine any
   1.716 -	 *	potential NaT values from the input registers.
   1.717 -	 *
   1.718 -	 * On entry:
   1.719 -	 *	- executing on bank 0 or bank 1 register set (doesn't matter)
   1.720 -	 *	-  r1: stack pointer
   1.721 -	 *	-  r2: current task pointer
   1.722 -	 *	-  r3: preserved
   1.723 -	 *	- r11: original contents (saved ar.pfs to be saved)
   1.724 -	 *	- r12: original contents (sp to be saved)
   1.725 -	 *	- r13: original contents (tp to be saved)
   1.726 -	 *	- r15: original contents (syscall # to be saved)
   1.727 -	 *	- r18: saved bsp (after switching to kernel stack)
   1.728 -	 *	- r19: saved b6
   1.729 -	 *	- r20: saved r1 (gp)
   1.730 -	 *	- r21: saved ar.fpsr
   1.731 -	 *	- r22: kernel's register backing store base (krbs_base)
   1.732 -	 *	- r23: saved ar.bspstore
   1.733 -	 *	- r24: saved ar.rnat
   1.734 -	 *	- r25: saved ar.unat
   1.735 -	 *	- r26: saved ar.pfs
   1.736 -	 *	- r27: saved ar.rsc
   1.737 -	 *	- r28: saved cr.iip
   1.738 -	 *	- r29: saved cr.ipsr
   1.739 -	 *	- r31: saved pr
   1.740 -	 *	-  b0: original contents (to be saved)
   1.741 -	 * On exit:
   1.742 -	 *	- executing on bank 1 registers
   1.743 -	 *	- psr.ic enabled, interrupts restored
   1.744 -	 *	-  p10: TRUE if syscall is invoked with more than 8 out
   1.745 -	 *		registers or r15's Nat is true
   1.746 -	 *	-  r1: kernel's gp
   1.747 -	 *	-  r3: preserved (same as on entry)
   1.748 -	 *	-  r8: -EINVAL if p10 is true
   1.749 -	 *	- r12: points to kernel stack
   1.750 -	 *	- r13: points to current task
   1.751 -	 *	- p15: TRUE if interrupts need to be re-enabled
   1.752 -	 *	- ar.fpsr: set to kernel settings
   1.753 -	 */
   1.754 -GLOBAL_ENTRY(ia64_syscall_setup)
   1.755 -#ifndef XEN
   1.756 -#if PT(B6) != 0
   1.757 -# error This code assumes that b6 is the first field in pt_regs.
   1.758 -#endif
   1.759 -#endif
   1.760 -	st8 [r1]=r19			// save b6
   1.761 -	add r16=PT(CR_IPSR),r1		// initialize first base pointer
   1.762 -	add r17=PT(R11),r1		// initialize second base pointer
   1.763 -	;;
   1.764 -	alloc r19=ar.pfs,8,0,0,0	// ensure in0-in7 are writable
   1.765 -	st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR)	// save cr.ipsr
   1.766 -	tnat.nz p8,p0=in0
   1.767 -
   1.768 -	st8.spill [r17]=r11,PT(CR_IIP)-PT(R11)	// save r11
   1.769 -	tnat.nz p9,p0=in1
   1.770 -(pKStk)	mov r18=r0				// make sure r18 isn't NaT
   1.771 -	;;
   1.772 -
   1.773 -	st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS)	// save ar.pfs
   1.774 -	st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP)	// save cr.iip
   1.775 -	mov r28=b0				// save b0 (2 cyc)
   1.776 -	;;
   1.777 -
   1.778 -	st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT)	// save ar.unat
   1.779 -	dep r19=0,r19,38,26			// clear all bits but 0..37 [I0]
   1.780 -(p8)	mov in0=-1
   1.781 -	;;
   1.782 -
   1.783 -	st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS)	// store ar.pfs.pfm in cr.ifs
   1.784 -	extr.u r11=r19,7,7	// I0		// get sol of ar.pfs
   1.785 -	and r8=0x7f,r19		// A		// get sof of ar.pfs
   1.786 -
   1.787 -	st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
   1.788 -	tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
   1.789 -(p9)	mov in1=-1
   1.790 -	;;
   1.791 -
   1.792 -(pUStk) sub r18=r18,r22				// r18=RSE.ndirty*8
   1.793 -	tnat.nz p10,p0=in2
   1.794 -	add r11=8,r11
   1.795 -	;;
   1.796 -(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16		// skip over ar_rnat field
   1.797 -(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17	// skip over ar_bspstore field
   1.798 -	tnat.nz p11,p0=in3
   1.799 -	;;
   1.800 -(p10)	mov in2=-1
   1.801 -	tnat.nz p12,p0=in4			// [I0]
   1.802 -(p11)	mov in3=-1
   1.803 -	;;
   1.804 -(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT)	// save ar.rnat
   1.805 -(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE)	// save ar.bspstore
   1.806 -	shl r18=r18,16				// compute ar.rsc to be used
   1.807 -						//   for "loadrs"
   1.808 -	;;
   1.809 -	st8 [r16]=r31,PT(LOADRS)-PT(PR)		// save predicates
   1.810 -	st8 [r17]=r28,PT(R1)-PT(B0)		// save b0
   1.811 -	tnat.nz p13,p0=in5			// [I0]
   1.812 -	;;
   1.813 -	st8 [r16]=r18,PT(R12)-PT(LOADRS)	// save ar.rsc value for
   1.814 -						//   "loadrs"
   1.815 -	st8.spill [r17]=r20,PT(R13)-PT(R1)	// save original r1
   1.816 -(p12)	mov in4=-1
   1.817 -	;;
   1.818 -
   1.819 -.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12)	// save r12
   1.820 -.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13)		// save r13
   1.821 -(p13)	mov in5=-1
   1.822 -	;;
   1.823 -	st8 [r16]=r21,PT(R8)-PT(AR_FPSR)	// save ar.fpsr
   1.824 -	tnat.nz p14,p0=in6
   1.825 -	cmp.lt p10,p9=r11,r8	// frame size can't be more than local+8
   1.826 -	;;
   1.827 -	stf8 [r16]=f1		// ensure pt_regs.r8 != 0 
   1.828 -				//   (see handle_syscall_error)
   1.829 -(p9)	tnat.nz p10,p0=r15
   1.830 -	adds r12=-16,r1		// switch to kernel memory stack (with 16 
   1.831 -				//   bytes of scratch)
   1.832 -
   1.833 -	st8.spill [r17]=r15	// save r15
   1.834 -	tnat.nz p8,p0=in7
   1.835 -	nop.i 0
   1.836 -
   1.837 -	mov r13=r2		// establish `current'
   1.838 -	movl r1=__gp		// establish kernel global pointer
   1.839 -	;;
   1.840 -(p14)	mov in6=-1
   1.841 -(p8)	mov in7=-1
   1.842 -	nop.i 0
   1.843 -
   1.844 -	cmp.eq pSys,pNonSys=r0,r0	// set pSys=1, pNonSys=0
   1.845 -	movl r17=FPSR_DEFAULT
   1.846 -	;;
   1.847 -	mov.m ar.fpsr=r17		// set ar.fpsr to kernel default value
   1.848 -(p10)	mov r8=-EINVAL
   1.849 -	br.ret.sptk.many b7
   1.850 -END(ia64_syscall_setup)
   1.851 -#endif /* XEN */
   1.852  	
   1.853  	.org ia64_ivt+0x3c00
   1.854  //////////////////////////////////////////////////////////////////////////
   1.855 @@ -1692,47 +1087,6 @@ END(ia64_syscall_setup)
   1.856  	DBG_FAULT(15)
   1.857  	FAULT(15)
   1.858  
   1.859 -#ifndef XEN
   1.860 -	/*
   1.861 -	 * Squatting in this space ...
   1.862 -	 *
   1.863 -	 * This special case dispatcher for illegal operation faults 
   1.864 -	 * allows preserved registers to be modified through a callback
   1.865 -	 * function (asm only) that is handed back from the fault handler
   1.866 -	 * in r8.  Up to three arguments can be passed to the callback
   1.867 -	 * function by returning an aggregate with the callback as its 
   1.868 -	 * first element, followed by the arguments.
   1.869 -	 */
   1.870 -ENTRY(dispatch_illegal_op_fault)
   1.871 -	SAVE_MIN_WITH_COVER
   1.872 -	ssm psr.ic | PSR_DEFAULT_BITS
   1.873 -	;;
   1.874 -	srlz.i		// guarantee that interruption collection is on
   1.875 -	;;
   1.876 -(p15)	ssm psr.i	// restore psr.i
   1.877 -	adds r3=8,r2	// set up second base pointer for SAVE_REST
   1.878 -	;;
   1.879 -	alloc r14=ar.pfs,0,0,1,0	// must be first in insn group
   1.880 -	mov out0=ar.ec
   1.881 -	;;
   1.882 -	SAVE_REST
   1.883 -	;;
   1.884 -	br.call.sptk.many rp=ia64_illegal_op_fault
   1.885 -.ret0:	;;
   1.886 -	alloc r14=ar.pfs,0,0,3,0	// must be first in insn group
   1.887 -	mov out0=r9
   1.888 -	mov out1=r10
   1.889 -	mov out2=r11
   1.890 -	movl r15=ia64_leave_kernel
   1.891 -	;;
   1.892 -	mov rp=r15
   1.893 -	mov b6=r8
   1.894 -	;;
   1.895 -	cmp.ne p6,p0=0,r8
   1.896 -(p6)	br.call.dpnt.many b6=b6		// call returns to ia64_leave_kernel
   1.897 -	br.sptk.many ia64_leave_kernel
   1.898 -END(dispatch_illegal_op_fault)
   1.899 -#endif
   1.900  
   1.901  	.org ia64_ivt+0x4000
   1.902  //////////////////////////////////////////////////////////////////////////
   1.903 @@ -1740,7 +1094,6 @@ END(dispatch_illegal_op_fault)
   1.904  	DBG_FAULT(16)
   1.905  	FAULT(16)
   1.906  
   1.907 -#ifdef XEN
   1.908  	// There is no particular reason for this code to be here, other
   1.909  	// than that there happens to be space here that would go unused 
   1.910  	// otherwise.  If this fault ever gets "unreserved", simply move
   1.911 @@ -1771,7 +1124,6 @@ ENTRY(dispatch_privop_fault)
   1.912  //	br.sptk.many ia64_prepare_handle_privop  // TODO: why commented out?
   1.913       	br.call.sptk.many b6=ia64_handle_privop
   1.914  END(dispatch_privop_fault)
   1.915 -#endif
   1.916  
   1.917  
   1.918  	.org ia64_ivt+0x4400
   1.919 @@ -1780,34 +1132,6 @@ END(dispatch_privop_fault)
   1.920  	DBG_FAULT(17)
   1.921  	FAULT(17)
   1.922  
   1.923 -#ifndef XEN
   1.924 -ENTRY(non_syscall)
   1.925 -	SAVE_MIN_WITH_COVER
   1.926 -
   1.927 -	// There is no particular reason for this code to be here, other
   1.928 -	// than that there happens to be space here that would go unused 
   1.929 -	// otherwise.  If this fault ever gets "unreserved", simply move
   1.930 -	// the following code to a more suitable spot...
   1.931 -
   1.932 -	alloc r14=ar.pfs,0,0,2,0
   1.933 -	mov out0=cr.iim
   1.934 -	add out1=16,sp
   1.935 -	adds r3=8,r2		// set up second base pointer for SAVE_REST
   1.936 -
   1.937 -	ssm psr.ic | PSR_DEFAULT_BITS
   1.938 -	;;
   1.939 -	srlz.i			// guarantee that interruption collection is on
   1.940 -	;;
   1.941 -(p15)	ssm psr.i		// restore psr.i
   1.942 -	movl r15=ia64_leave_kernel
   1.943 -	;;
   1.944 -	SAVE_REST
   1.945 -	mov rp=r15
   1.946 -	;;
   1.947 -	br.call.sptk.many b6=ia64_bad_break	// avoid WAW on CFM and 
   1.948 -						//   ignore return addr
   1.949 -END(non_syscall)
   1.950 -#endif
   1.951  
   1.952  	.org ia64_ivt+0x4800
   1.953  //////////////////////////////////////////////////////////////////////////
   1.954 @@ -1815,37 +1139,6 @@ END(non_syscall)
   1.955  	DBG_FAULT(18)
   1.956  	FAULT(18)
   1.957  
   1.958 -#ifndef XEN
   1.959 -	/*
   1.960 -	 * There is no particular reason for this code to be here, other
   1.961 -	 * than that there happens to be space here that would go unused 
   1.962 -	 * otherwise.  If this fault ever gets "unreserved", simply move
   1.963 -	 * the following code to a more suitable spot...
   1.964 -	 */
   1.965 -ENTRY(dispatch_unaligned_handler)
   1.966 -	SAVE_MIN_WITH_COVER
   1.967 -	;;
   1.968 -	alloc r14=ar.pfs,0,0,2,0	// now it's safe (must be first in
   1.969 -					//   insn group!)
   1.970 -	mov out0=cr.ifa
   1.971 -	adds out1=16,sp
   1.972 -
   1.973 -	ssm psr.ic | PSR_DEFAULT_BITS
   1.974 -	;;
   1.975 -	srlz.i				// guarantee that interruption 
   1.976 -					//   collection is on
   1.977 -	;;
   1.978 -(p15)	ssm psr.i			// restore psr.i
   1.979 -	adds r3=8,r2			// set up second base pointer
   1.980 -	;;
   1.981 -	SAVE_REST
   1.982 -	movl r14=ia64_leave_kernel
   1.983 -	;;
   1.984 -	mov rp=r14
   1.985 -//	br.sptk.many ia64_prepare_handle_unaligned // TODO: why commented out?
   1.986 -    	br.call.sptk.many b6=ia64_handle_unaligned
   1.987 -END(dispatch_unaligned_handler)
   1.988 -#endif
   1.989  
   1.990  	.org ia64_ivt+0x4c00
   1.991  //////////////////////////////////////////////////////////////////////////
   1.992 @@ -1900,24 +1193,7 @@ END(dispatch_to_fault_handler)
   1.993  // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
   1.994  ENTRY(page_not_present)
   1.995  	DBG_FAULT(20)
   1.996 -#ifdef XEN
   1.997  	FAULT_OR_REFLECT(20)
   1.998 -#else
   1.999 -	mov r16=cr.ifa
  1.1000 -	rsm psr.dt
  1.1001 -	/*
  1.1002 -	 * The Linux page fault handler doesn't expect non-present pages
  1.1003 -	 * to be in the TLB.  Flush the existing entry now, so we meet 
  1.1004 -	 * that expectation.
  1.1005 -	 */
  1.1006 -	mov r17=PAGE_SHIFT<<2
  1.1007 -	;;
  1.1008 -	ptc.l r16,r17
  1.1009 -	;;
  1.1010 -	mov r31=pr
  1.1011 -	srlz.d
  1.1012 -	br.sptk.many page_fault
  1.1013 -#endif
  1.1014  END(page_not_present)
  1.1015  
  1.1016  	.org ia64_ivt+0x5100
  1.1017 @@ -1925,16 +1201,7 @@ END(page_not_present)
  1.1018  // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
  1.1019  ENTRY(key_permission)
  1.1020  	DBG_FAULT(21)
  1.1021 -#ifdef XEN
  1.1022  	FAULT_OR_REFLECT(21)
  1.1023 -#else
  1.1024 -	mov r16=cr.ifa
  1.1025 -	rsm psr.dt
  1.1026 -	mov r31=pr
  1.1027 -	;;
  1.1028 -	srlz.d
  1.1029 -	br.sptk.many page_fault
  1.1030 -#endif
  1.1031  END(key_permission)
  1.1032  
  1.1033  	.org ia64_ivt+0x5200
  1.1034 @@ -1942,16 +1209,7 @@ END(key_permission)
  1.1035  // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
  1.1036  ENTRY(iaccess_rights)
  1.1037  	DBG_FAULT(22)
  1.1038 -#ifdef XEN
  1.1039  	FAULT_OR_REFLECT(22)
  1.1040 -#else
  1.1041 -	mov r16=cr.ifa
  1.1042 -	rsm psr.dt
  1.1043 -	mov r31=pr
  1.1044 -	;;
  1.1045 -	srlz.d
  1.1046 -	br.sptk.many page_fault
  1.1047 -#endif
  1.1048  END(iaccess_rights)
  1.1049  
  1.1050  	.org ia64_ivt+0x5300
  1.1051 @@ -1959,7 +1217,6 @@ END(iaccess_rights)
  1.1052  // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
  1.1053  ENTRY(daccess_rights)
  1.1054  	DBG_FAULT(23)
  1.1055 -#ifdef XEN
  1.1056  	mov r31=pr
  1.1057  	;;
  1.1058  	mov r16=cr.isr
  1.1059 @@ -1968,14 +1225,6 @@ ENTRY(daccess_rights)
  1.1060  	movl r20=0x5300
  1.1061  	br.sptk.many fast_access_reflect
  1.1062  	;;
  1.1063 -#else
  1.1064 -	mov r16=cr.ifa
  1.1065 -	rsm psr.dt
  1.1066 -	mov r31=pr
  1.1067 -	;;
  1.1068 -	srlz.d
  1.1069 -	br.sptk.many page_fault
  1.1070 -#endif
  1.1071  END(daccess_rights)
  1.1072  
  1.1073  	.org ia64_ivt+0x5400
  1.1074 @@ -1986,15 +1235,10 @@ ENTRY(general_exception)
  1.1075  	mov r16=cr.isr
  1.1076  	mov r31=pr
  1.1077  	;;
  1.1078 -#ifdef XEN
  1.1079  	cmp4.ge p6,p0=0x20,r16
  1.1080  (p6)	br.sptk.many dispatch_privop_fault
  1.1081  	;;
  1.1082  	FAULT_OR_REFLECT(24)
  1.1083 -#else
  1.1084 -	cmp4.eq p6,p0=0,r16
  1.1085 -(p6)	br.sptk.many dispatch_illegal_op_fault
  1.1086 -#endif
  1.1087  	;;
  1.1088  	mov r19=24		// fault number
  1.1089  	br.sptk.many dispatch_to_fault_handler
  1.1090 @@ -2005,7 +1249,6 @@ END(general_exception)
  1.1091  // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
  1.1092  ENTRY(disabled_fp_reg)
  1.1093  	DBG_FAULT(25)
  1.1094 -#ifdef XEN
  1.1095  #if 0				// TODO: can this be removed?
  1.1096  	mov r20=pr
  1.1097  	movl r16=0x2000000000000000
  1.1098 @@ -2028,7 +1271,6 @@ ENTRY(disabled_fp_reg)
  1.1099  //floating_panic:		// TODO: can this be removed?
  1.1100  //	br.sptk.many floating_panic
  1.1101  	;;
  1.1102 -#endif
  1.1103  	rsm psr.dfh		// ensure we can access fph
  1.1104  	;;
  1.1105  	srlz.d
  1.1106 @@ -2054,42 +1296,8 @@ END(nat_consumption)
  1.1107  // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
  1.1108  ENTRY(speculation_vector)
  1.1109  	DBG_FAULT(27)
  1.1110 -#ifdef XEN
  1.1111  	// this probably need not reflect...
  1.1112  	FAULT_OR_REFLECT(27)
  1.1113 -#else
  1.1114 -	/*
  1.1115 -	 * A [f]chk.[as] instruction needs to take the branch to the
  1.1116 -	 * recovery code but this part of the architecture is not 
  1.1117 -	 * implemented in hardware on some CPUs, such as Itanium.  Thus,
  1.1118 -	 * in general we need to emulate the behavior.  IIM contains the
  1.1119 -	 * relative target (not yet sign extended).  So after sign extending 
  1.1120 -	 * it we simply add it to IIP.  We also need to reset the EI field
  1.1121 -	 * of the IPSR to zero, i.e., the slot to restart into.
  1.1122 -	 *
  1.1123 -	 * cr.imm contains zero_ext(imm21)
  1.1124 -	 */
  1.1125 -	mov r18=cr.iim
  1.1126 -	;;
  1.1127 -	mov r17=cr.iip
  1.1128 -	shl r18=r18,43			// put sign bit in position (43=64-21)
  1.1129 -	;;
  1.1130 -
  1.1131 -	mov r16=cr.ipsr
  1.1132 -	shr r18=r18,39			// sign extend (39=43-4)
  1.1133 -	;;
  1.1134 -
  1.1135 -	add r17=r17,r18			// now add the offset
  1.1136 -	;;
  1.1137 -	mov cr.iip=r17
  1.1138 -	dep r16=0,r16,41,2		// clear EI
  1.1139 -	;;
  1.1140 -
  1.1141 -	mov cr.ipsr=r16
  1.1142 -	;;
  1.1143 -
  1.1144 -	rfi				// and go back
  1.1145 -#endif
  1.1146  END(speculation_vector)
  1.1147  
  1.1148  	.org ia64_ivt+0x5800
  1.1149 @@ -2115,14 +1323,7 @@ END(debug_vector)
  1.1150  // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
  1.1151  ENTRY(unaligned_access)
  1.1152  	DBG_FAULT(30)
  1.1153 -#ifdef XEN
  1.1154  	FAULT_OR_REFLECT(30)
  1.1155 -#else
  1.1156 -	mov r16=cr.ipsr
  1.1157 -	mov r31=pr		// prepare to save predicates
  1.1158 -	;;
  1.1159 -	br.sptk.many dispatch_unaligned_handler
  1.1160 -#endif
  1.1161  END(unaligned_access)
  1.1162  
  1.1163  	.org ia64_ivt+0x5b00
  1.1164 @@ -2130,11 +1331,7 @@ END(unaligned_access)
  1.1165  // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
  1.1166  ENTRY(unsupported_data_reference)
  1.1167  	DBG_FAULT(31)
  1.1168 -#ifdef XEN
  1.1169  	FAULT_OR_REFLECT(31)
  1.1170 -#else
  1.1171 -	FAULT(31)
  1.1172 -#endif
  1.1173  END(unsupported_data_reference)
  1.1174  
  1.1175  	.org ia64_ivt+0x5c00
  1.1176 @@ -2142,11 +1339,7 @@ END(unsupported_data_reference)
  1.1177  // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
  1.1178  ENTRY(floating_point_fault)
  1.1179  	DBG_FAULT(32)
  1.1180 -#ifdef XEN
  1.1181  	FAULT_OR_REFLECT(32)
  1.1182 -#else
  1.1183 -	FAULT(32)
  1.1184 -#endif
  1.1185  END(floating_point_fault)
  1.1186  
  1.1187  	.org ia64_ivt+0x5d00
  1.1188 @@ -2154,11 +1347,7 @@ END(floating_point_fault)
  1.1189  // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
  1.1190  ENTRY(floating_point_trap)
  1.1191  	DBG_FAULT(33)
  1.1192 -#ifdef XEN
  1.1193  	FAULT_OR_REFLECT(33)
  1.1194 -#else
  1.1195 -	FAULT(33)
  1.1196 -#endif
  1.1197  END(floating_point_trap)
  1.1198  
  1.1199  	.org ia64_ivt+0x5e00
  1.1200 @@ -2166,11 +1355,7 @@ END(floating_point_trap)
  1.1201  // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
  1.1202  ENTRY(lower_privilege_trap)
  1.1203  	DBG_FAULT(34)
  1.1204 -#ifdef XEN
  1.1205  	FAULT_OR_REFLECT(34)
  1.1206 -#else
  1.1207 -	FAULT(34)
  1.1208 -#endif
  1.1209  END(lower_privilege_trap)
  1.1210  
  1.1211  	.org ia64_ivt+0x5f00
  1.1212 @@ -2178,11 +1363,7 @@ END(lower_privilege_trap)
  1.1213  // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
  1.1214  ENTRY(taken_branch_trap)
  1.1215  	DBG_FAULT(35)
  1.1216 -#ifdef XEN
  1.1217  	FAULT_OR_REFLECT(35)
  1.1218 -#else
  1.1219 -	FAULT(35)
  1.1220 -#endif
  1.1221  END(taken_branch_trap)
  1.1222  
  1.1223  	.org ia64_ivt+0x6000
  1.1224 @@ -2190,11 +1371,7 @@ END(taken_branch_trap)
  1.1225  // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
  1.1226  ENTRY(single_step_trap)
  1.1227  	DBG_FAULT(36)
  1.1228 -#ifdef XEN
  1.1229  	FAULT_OR_REFLECT(36)
  1.1230 -#else
  1.1231 -	FAULT(36)
  1.1232 -#endif
  1.1233  END(single_step_trap)
  1.1234  
  1.1235  	.org ia64_ivt+0x6100
  1.1236 @@ -2252,11 +1429,7 @@ END(single_step_trap)
  1.1237  //						       73,75,76,77)
  1.1238  ENTRY(ia32_exception)
  1.1239  	DBG_FAULT(45)
  1.1240 -#ifdef XEN
  1.1241  	FAULT_OR_REFLECT(45)
  1.1242 -#else
  1.1243 -	FAULT(45)
  1.1244 -#endif
  1.1245  END(ia32_exception)
  1.1246  
  1.1247  	.org ia64_ivt+0x6a00
  1.1248 @@ -2264,33 +1437,7 @@ END(ia32_exception)
  1.1249  // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
  1.1250  ENTRY(ia32_intercept)
  1.1251  	DBG_FAULT(46)
  1.1252 -#ifdef XEN
  1.1253  	FAULT_OR_REFLECT(46)
  1.1254 -#else
  1.1255 -#ifdef	CONFIG_IA32_SUPPORT
  1.1256 -	mov r31=pr
  1.1257 -	mov r16=cr.isr
  1.1258 -	;;
  1.1259 -	extr.u r17=r16,16,8	// get ISR.code
  1.1260 -	mov r18=ar.eflag
  1.1261 -	mov r19=cr.iim		// old eflag value
  1.1262 -	;;
  1.1263 -	cmp.ne p6,p0=2,r17
  1.1264 -(p6)	br.cond.spnt 1f		// not a system flag fault
  1.1265 -	xor r16=r18,r19
  1.1266 -	;;
  1.1267 -	extr.u r17=r16,18,1	// get the eflags.ac bit
  1.1268 -	;;
  1.1269 -	cmp.eq p6,p0=0,r17
  1.1270 -(p6)	br.cond.spnt 1f		// eflags.ac bit didn't change
  1.1271 -	;;
  1.1272 -	mov pr=r31,-1		// restore predicate registers
  1.1273 -	rfi
  1.1274 -
  1.1275 -1:
  1.1276 -#endif	// CONFIG_IA32_SUPPORT
  1.1277 -	FAULT(46)
  1.1278 -#endif
  1.1279  END(ia32_intercept)
  1.1280  
  1.1281  	.org ia64_ivt+0x6b00
  1.1282 @@ -2298,16 +1445,7 @@ END(ia32_intercept)
  1.1283  // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
  1.1284  ENTRY(ia32_interrupt)
  1.1285  	DBG_FAULT(47)
  1.1286 -#ifdef XEN
  1.1287  	FAULT_OR_REFLECT(47)
  1.1288 -#else
  1.1289 -#ifdef CONFIG_IA32_SUPPORT
  1.1290 -	mov r31=pr
  1.1291 -	br.sptk.many dispatch_to_ia32_handler
  1.1292 -#else
  1.1293 -	FAULT(47)
  1.1294 -#endif
  1.1295 -#endif
  1.1296  END(ia32_interrupt)
  1.1297  
  1.1298  	.org ia64_ivt+0x6c00
  1.1299 @@ -2430,7 +1568,6 @@ END(ia32_interrupt)
  1.1300  	DBG_FAULT(67)
  1.1301  	FAULT(67)
  1.1302  
  1.1303 -#ifdef XEN
  1.1304  	.org ia64_ivt+0x8000
  1.1305  GLOBAL_ENTRY(dispatch_reflection)
  1.1306  	/*
  1.1307 @@ -2472,93 +1609,3 @@ GLOBAL_ENTRY(dispatch_slow_hyperprivop)
  1.1308  	;;
  1.1309  	br.sptk.many dispatch_break_fault_post_save
  1.1310  END(dispatch_slow_hyperprivop)
  1.1311 -#endif
  1.1312 -
  1.1313 -#ifdef CONFIG_IA32_SUPPORT
  1.1314 -
  1.1315 -	/*
  1.1316 -	 * There is no particular reason for this code to be here, other 
  1.1317 -	 * than that there happens to be space here that would go unused 
  1.1318 -	 * otherwise.  If this fault ever gets "unreserved", simply move
  1.1319 -	 * the following code to a more suitable spot...
  1.1320 -	 */
  1.1321 -
  1.1322 -	// IA32 interrupt entry point
  1.1323 -
  1.1324 -ENTRY(dispatch_to_ia32_handler)
  1.1325 -	SAVE_MIN
  1.1326 -	;;
  1.1327 -	mov r14=cr.isr
  1.1328 -	ssm psr.ic | PSR_DEFAULT_BITS
  1.1329 -	;;
  1.1330 -	srlz.i			// guarantee that interruption collection is on
  1.1331 -	;;
  1.1332 -(p15)	ssm psr.i
  1.1333 -	adds r3=8,r2		// Base pointer for SAVE_REST
  1.1334 -	;;
  1.1335 -	SAVE_REST
  1.1336 -	;;
  1.1337 -	mov r15=0x80
  1.1338 -	shr r14=r14,16		// Get interrupt number
  1.1339 -	;;
  1.1340 -	cmp.ne p6,p0=r14,r15
  1.1341 -(p6)	br.call.dpnt.many b6=non_ia32_syscall
  1.1342 -
  1.1343 -	adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp	// 16 byte hole per SW 
  1.1344 -						//   conventions
  1.1345 -	adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
  1.1346 -	;;
  1.1347 -	cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
  1.1348 -	ld8 r8=[r14]		// get r8
  1.1349 -	;;
  1.1350 -	st8 [r15]=r8		// save original EAX in r1 (IA32 procs 
  1.1351 -				//   don't use the GP)
  1.1352 -	;;
  1.1353 -	alloc r15=ar.pfs,0,0,6,0	// must be first in an insn group
  1.1354 -	;;
  1.1355 -	ld4 r8=[r14],8		// r8 == eax (syscall number)
  1.1356 -	mov r15=IA32_NR_syscalls
  1.1357 -	;;
  1.1358 -	cmp.ltu.unc p6,p7=r8,r15
  1.1359 -	ld4 out1=[r14],8	// r9 == ecx
  1.1360 -	;;
  1.1361 -	ld4 out2=[r14],8	// r10 == edx
  1.1362 -	;;
  1.1363 -	ld4 out0=[r14]		// r11 == ebx
  1.1364 -	adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
  1.1365 -	;;
  1.1366 -	ld4 out5=[r14],PT(R14)-PT(R13)	// r13 == ebp
  1.1367 -	;;
  1.1368 -	ld4 out3=[r14],PT(R15)-PT(R14)	// r14 == esi
  1.1369 -	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
  1.1370 -	;;
  1.1371 -	ld4 out4=[r14]		// r15 == edi
  1.1372 -	movl r16=ia32_syscall_table
  1.1373 -	;;
  1.1374 -(p6)	shladd r16=r8,3,r16	// force ni_syscall if not valid syscall number
  1.1375 -	ld4 r2=[r2]		// r2 = current_thread_info()->flags
  1.1376 -	;;
  1.1377 -	ld8 r16=[r16]
  1.1378 -	and r2=_TIF_SYSCALL_TRACEAUDIT,r2	// mask trace or audit
  1.1379 -	;;
  1.1380 -	mov b6=r16
  1.1381 -	movl r15=ia32_ret_from_syscall
  1.1382 -	cmp.eq p8,p0=r2,r0
  1.1383 -	;;
  1.1384 -	mov rp=r15
  1.1385 -(p8)	br.call.sptk.many b6=b6
  1.1386 -	br.cond.sptk ia32_trace_syscall
  1.1387 -
  1.1388 -non_ia32_syscall:
  1.1389 -	alloc r15=ar.pfs,0,0,2,0
  1.1390 -	mov out0=r14				// interrupt #
  1.1391 -	add out1=16,sp				// pointer to pt_regs
  1.1392 -	;;					// avoid WAW on CFM
  1.1393 -	br.call.sptk.many rp=ia32_bad_interrupt
  1.1394 -.ret1:	movl r15=ia64_leave_kernel
  1.1395 -	;;
  1.1396 -	mov rp=r15
  1.1397 -	br.ret.sptk.many rp
  1.1398 -END(dispatch_to_ia32_handler)
  1.1399 -
  1.1400 -#endif /* CONFIG_IA32_SUPPORT */