ia64/xen-unstable

changeset 5601:cef83293394d

bitkeeper revision 1.1726.1.7 (42c194ee68Qy_Egi118UCoJdFnkGPQ)

Add bank switch for hyper_rfi hyperprivop
Signed-off-by: Dan Magenheimer <dan.magenheimer@hp.com>
author djm@kirby.fc.hp.com
date Tue Jun 28 18:20:30 2005 +0000 (2005-06-28)
parents dca57ff5d1c1
children 2d8651ec10b9
files xen/arch/ia64/hyperprivop.S xen/arch/ia64/privop.c
line diff
     1.1 --- a/xen/arch/ia64/hyperprivop.S	Thu Jun 23 18:59:01 2005 +0000
     1.2 +++ b/xen/arch/ia64/hyperprivop.S	Tue Jun 28 18:20:30 2005 +0000
     1.3 @@ -282,20 +282,20 @@ GLOBAL_ENTRY(fast_tick_reflect)
     1.4  #endif
     1.5  	mov r28=IA64_TIMER_VECTOR;;
     1.6  	cmp.ne p6,p0=r28,r30
     1.7 -(p6)	br.cond.sptk.many rp;;
     1.8 +(p6)	br.cond.spnt.few rp;;
     1.9  	movl r20=(PERCPU_ADDR)+IA64_CPUINFO_ITM_NEXT_OFFSET;;
    1.10  	ld8 r21=[r20];;
    1.11  	mov r27=ar.itc;;
    1.12  	cmp.ltu p6,p0=r21,r27
    1.13 -(p6)	br.cond.sptk.many rp;;
    1.14 +(p6)	br.cond.spnt.few rp;;
    1.15  	mov r17=cr.ipsr;;
    1.16  	// slow path if: ipsr.be==1, ipsr.pp==1
    1.17  	extr.u r21=r17,IA64_PSR_BE_BIT,1 ;;
    1.18  	cmp.ne p6,p0=r21,r0
    1.19 -(p6)	br.cond.sptk.many rp;;
    1.20 +(p6)	br.cond.spnt.few rp;;
    1.21  	extr.u r21=r17,IA64_PSR_PP_BIT,1 ;;
    1.22  	cmp.ne p6,p0=r21,r0
    1.23 -(p6)	br.cond.sptk.many rp;;
    1.24 +(p6)	br.cond.spnt.few rp;;
    1.25  #ifdef FAST_REFLECT_CNT
    1.26  	movl r20=fast_reflect_count+((0x3000>>8)*8);;
    1.27  	ld8 r21=[r20];;
    1.28 @@ -309,9 +309,9 @@ GLOBAL_ENTRY(fast_tick_reflect)
    1.29  	adds r20=XSI_ITV_OFS-XSI_PSR_IC_OFS,r18 ;;
    1.30  	ld8 r20=[r20];;
    1.31  	cmp.eq p6,p0=r20,r0	// if cr.itv==0 done
    1.32 -(p6)	br.cond.sptk.many fast_tick_reflect_done;;
    1.33 +(p6)	br.cond.spnt.few fast_tick_reflect_done;;
    1.34  	tbit.nz p6,p0=r20,16;;	// check itv.m (discard) bit
    1.35 -(p6)	br.cond.sptk.many fast_tick_reflect_done;;
    1.36 +(p6)	br.cond.spnt.few fast_tick_reflect_done;;
    1.37  	extr.u r27=r20,0,6	// r27 has low 6 bits of itv.vector
    1.38  	extr.u r26=r20,6,2;;	// r26 has irr index of itv.vector
    1.39  	mov r19=IA64_KR(CURRENT);;
    1.40 @@ -320,7 +320,7 @@ GLOBAL_ENTRY(fast_tick_reflect)
    1.41  	ld8 r24=[r22];;
    1.42  	ld8 r23=[r23];;
    1.43  	cmp.eq p6,p0=r23,r24	// skip if this tick already delivered
    1.44 -(p6)	br.cond.sptk.many fast_tick_reflect_done;;
    1.45 +(p6)	br.cond.spnt.few fast_tick_reflect_done;;
    1.46  	// set irr bit
    1.47  	adds r21=IA64_VCPU_IRR0_OFFSET,r19;
    1.48  	shl r26=r26,3;;
    1.49 @@ -337,20 +337,19 @@ GLOBAL_ENTRY(fast_tick_reflect)
    1.50  	// if interrupted at pl0, we're done
    1.51  	extr.u r16=r17,IA64_PSR_CPL0_BIT,2;;
    1.52  	cmp.eq p6,p0=r16,r0;;
    1.53 -(p6)	br.cond.sptk.many fast_tick_reflect_done;;
    1.54 -	// now deliver to iva+0x3000
    1.55 +(p6)	br.cond.spnt.few fast_tick_reflect_done;;
    1.56 +	// if guest vpsr.i is off, we're done
    1.57 +	adds r21=XSI_PSR_I_OFS-XSI_PSR_IC_OFS,r18 ;;
    1.58 +	ld4 r21=[r21];;
    1.59 +	cmp.eq p6,p0=r21,r0
    1.60 +(p6)	br.cond.spnt.few fast_tick_reflect_done;;
    1.61 +
    1.62 +	// OK, we have a clock tick to deliver to the active domain!
    1.63 +	// so deliver to iva+0x3000
    1.64  	//	r17 == cr.ipsr
    1.65  	//	r18 == XSI_PSR_IC
    1.66  	//	r19 == IA64_KR(CURRENT)
    1.67  	//	r31 == pr
    1.68 -
    1.69 -	// if guest vpsr.i is off, we're done
    1.70 -	adds r21=XSI_PSR_I_OFS-XSI_PSR_IC_OFS,r18 ;;
    1.71 -	ld4 r21=[r21];;
    1.72 -	cmp.eq p6,p0=r21,r0
    1.73 -(p6)	br.cond.sptk.many fast_tick_reflect_done;;
    1.74 -
    1.75 -	// OK, we have a clock tick to deliver to the active domain!
    1.76  	mov r16=cr.isr;;
    1.77  	mov r29=cr.iip;;
    1.78  	adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
    1.79 @@ -456,21 +455,21 @@ GLOBAL_ENTRY(fast_break_reflect)
    1.80  	mov r29=cr.iip;;
    1.81  	extr.u r21=r30,IA64_PSR_BE_BIT,1 ;;
    1.82  	cmp.ne p7,p0=r21,r0 ;;
    1.83 -(p7)	br.sptk.many dispatch_break_fault ;;
    1.84 +(p7)	br.spnt.few dispatch_break_fault ;;
    1.85  	extr.u r21=r30,IA64_PSR_PP_BIT,1 ;;
    1.86  	cmp.ne p7,p0=r21,r0 ;;
    1.87 -(p7)	br.sptk.many dispatch_break_fault ;;
    1.88 +(p7)	br.spnt.few dispatch_break_fault ;;
    1.89  #if 1 /* special handling in case running on simulator */
    1.90  	movl r20=first_break;;
    1.91  	ld4 r23=[r20];;
    1.92  	movl r21=0x80001;
    1.93  	movl r22=0x80002;;
    1.94  	cmp.ne p7,p0=r23,r0;;
    1.95 -(p7)	br.sptk.many dispatch_break_fault ;;
    1.96 +(p7)	br.spnt.few dispatch_break_fault ;;
    1.97  	cmp.eq p7,p0=r21,r17;
    1.98 -(p7)	br.sptk.many dispatch_break_fault ;;
    1.99 +(p7)	br.spnt.few dispatch_break_fault ;;
   1.100  	cmp.eq p7,p0=r22,r17;
   1.101 -(p7)	br.sptk.many dispatch_break_fault ;;
   1.102 +(p7)	br.spnt.few dispatch_break_fault ;;
   1.103  #endif
   1.104  #ifdef FAST_REFLECT_CNT
   1.105  	movl r20=fast_reflect_count+((0x2c00>>8)*8);;
   1.106 @@ -579,24 +578,26 @@ 1:
   1.107  	extr.u r22=r21,IA64_PSR_BE_BIT,1 ;;
   1.108  	// if turning on psr.be, give up for now and do it the slow way
   1.109  	cmp.ne p7,p0=r22,r0
   1.110 -(p7)	br.sptk.many dispatch_break_fault ;;
   1.111 +(p7)	br.spnt.few dispatch_break_fault ;;
   1.112  	// if (!(vpsr.dt && vpsr.rt && vpsr.it)), do it the slow way
   1.113  	movl r20=(IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT);;
   1.114  	and r22=r20,r21
   1.115  	;;
   1.116  	cmp.ne p7,p0=r22,r20
   1.117 -(p7)	br.sptk.many dispatch_break_fault ;;
   1.118 +(p7)	br.spnt.few dispatch_break_fault ;;
   1.119  	// if was in metaphys mode, do it the slow way (FIXME later?)
   1.120  	adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.121  	ld4 r20=[r20];;
   1.122  	cmp.ne p7,p0=r20,r0
   1.123 -(p7)	br.sptk.many dispatch_break_fault ;;
   1.124 +(p7)	br.spnt.few dispatch_break_fault ;;
   1.125  	// if domain hasn't already done virtual bank switch
   1.126  	//  do it the slow way (FIXME later?)
   1.127 +#if 0
   1.128  	adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.129  	ld4 r20=[r20];;
   1.130  	cmp.eq p7,p0=r20,r0
   1.131 -(p7)	br.sptk.many dispatch_break_fault ;;
   1.132 +(p7)	br.spnt.few dispatch_break_fault ;;
   1.133 +#endif
   1.134  	// validate vcr.iip, if in Xen range, do it the slow way
   1.135  	adds r20=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.136  	ld8 r22=[r20];;
   1.137 @@ -604,7 +605,11 @@ 1:
   1.138  	movl r24=XEN_VIRT_SPACE_HIGH ;;
   1.139  	cmp.ltu p0,p7=r22,r23 ;;	// if !(iip<low) &&
   1.140  (p7)	cmp.geu p0,p7=r22,r24 ;;	//    !(iip>=high)
   1.141 -(p7)	br.sptk.many dispatch_break_fault ;;
   1.142 +(p7)	br.spnt.few dispatch_break_fault ;;
   1.143 +#ifndef RFI_TO_INTERRUPT
   1.144 +	cmp.ne p6,p0=r30,r0
   1.145 +(p6)	br.cond.spnt.few dispatch_break_fault ;;
   1.146 +#endif
   1.147  
   1.148  1:	// OK now, let's do an rfi.
   1.149  #ifdef FAST_HYPERPRIVOP_CNT
   1.150 @@ -613,9 +618,12 @@ 1:	// OK now, let's do an rfi.
   1.151  	adds r23=1,r23;;
   1.152  	st8 [r20]=r23;;
   1.153  #endif
   1.154 +#ifdef RFI_TO_INTERRUPT
   1.155 +	// maybe do an immediate interrupt delivery?
   1.156  	cmp.ne p6,p0=r30,r0
   1.157 -(p6)	br.cond.sptk.many check_extint;
   1.158 -	;;
   1.159 +(p6)	br.cond.spnt.few rfi_check_extint;;
   1.160 +#endif
   1.161 +
   1.162  just_do_rfi:
   1.163  	// r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
   1.164  	mov cr.iip=r22;;
   1.165 @@ -643,43 +651,75 @@ just_do_rfi:
   1.166  	or r21=r21,r20
   1.167  	;;
   1.168  	mov cr.ipsr=r21
   1.169 -	mov pr=r31,-1
   1.170 +	adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.171 +	ld4 r21=[r20];;
   1.172 +	cmp.ne p7,p0=r21,r0	// domain already did "bank 1 switch?"
   1.173 +(p7)	br.cond.spnt.few 1f;
   1.174 +	// OK, now all set to go except for switch to virtual bank1
   1.175 +	mov r22=1;; st4 [r20]=r22;
   1.176 +	mov r30=r2; mov r29=r3;;
   1.177 +	adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
   1.178 +	adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
   1.179 +	bsw.1;;
   1.180 +	// FIXME: need to handle ar.unat!
   1.181 +	.mem.offset 0,0; ld8.fill r16=[r2],16 ;
   1.182 +	.mem.offset 8,0; ld8.fill r17=[r3],16 ;;
   1.183 +	.mem.offset 0,0; ld8.fill r18=[r2],16 ;
   1.184 +	.mem.offset 0,0; ld8.fill r19=[r3],16 ;;
   1.185 +	.mem.offset 8,0; ld8.fill r20=[r2],16 ;
   1.186 +	.mem.offset 8,0; ld8.fill r21=[r3],16 ;;
   1.187 +	.mem.offset 8,0; ld8.fill r22=[r2],16 ;
   1.188 +	.mem.offset 8,0; ld8.fill r23=[r3],16 ;;
   1.189 +	.mem.offset 8,0; ld8.fill r24=[r2],16 ;
   1.190 +	.mem.offset 8,0; ld8.fill r25=[r3],16 ;;
   1.191 +	.mem.offset 8,0; ld8.fill r26=[r2],16 ;
   1.192 +	.mem.offset 8,0; ld8.fill r27=[r3],16 ;;
   1.193 +	.mem.offset 8,0; ld8.fill r28=[r2],16 ;
   1.194 +	.mem.offset 8,0; ld8.fill r29=[r3],16 ;;
   1.195 +	.mem.offset 8,0; ld8.fill r30=[r2],16 ;
   1.196 +	.mem.offset 8,0; ld8.fill r31=[r3],16 ;;
   1.197 +	bsw.0 ;;
   1.198 +	mov r2=r30; mov r3=r29;;
   1.199 +1:	mov pr=r31,-1
   1.200  	;;
   1.201  	rfi
   1.202  	;;
   1.203  
   1.204 -check_extint:
   1.205 -	br.sptk.many dispatch_break_fault ;;
   1.206 +#ifdef RFI_TO_INTERRUPT
   1.207 +GLOBAL_ENTRY(rfi_check_extint)
   1.208 +	//br.sptk.many dispatch_break_fault ;;
   1.209  
   1.210  	// r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
   1.211 +	// make sure none of these get trashed in case going to just_do_rfi
   1.212  	mov r30=IA64_KR(CURRENT);;
   1.213  	adds r24=IA64_VCPU_INSVC3_OFFSET,r30;;
   1.214  	mov r25=192
   1.215 -	adds r22=IA64_VCPU_IRR3_OFFSET,r30;;
   1.216 -	ld8 r23=[r22];;
   1.217 +	adds r16=IA64_VCPU_IRR3_OFFSET,r30;;
   1.218 +	ld8 r23=[r16];;
   1.219  	cmp.eq p6,p0=r23,r0;;
   1.220 -(p6)	adds r22=-8,r22;;
   1.221 +(p6)	adds r16=-8,r16;;
   1.222  (p6)	adds r24=-8,r24;;
   1.223  (p6)	adds r25=-64,r25;;
   1.224 -(p6)	ld8 r23=[r22];;
   1.225 +(p6)	ld8 r23=[r16];;
   1.226  (p6)	cmp.eq p6,p0=r23,r0;;
   1.227 -(p6)	adds r22=-8,r22;;
   1.228 +(p6)	adds r16=-8,r16;;
   1.229  (p6)	adds r24=-8,r24;;
   1.230  (p6)	adds r25=-64,r25;;
   1.231 -(p6)	ld8 r23=[r22];;
   1.232 +(p6)	ld8 r23=[r16];;
   1.233  (p6)	cmp.eq p6,p0=r23,r0;;
   1.234 -(p6)	adds r22=-8,r22;;
   1.235 +(p6)	adds r16=-8,r16;;
   1.236  (p6)	adds r24=-8,r24;;
   1.237  (p6)	adds r25=-64,r25;;
   1.238 -(p6)	ld8 r23=[r22];;
   1.239 +(p6)	ld8 r23=[r16];;
   1.240  (p6)	cmp.eq p6,p0=r23,r0;;
   1.241  	cmp.eq p6,p0=r23,r0
   1.242 -(p6)	br.cond.sptk.many 1f;	// this is actually an error
   1.243 -	// r22 points to non-zero element of irr, r23 has value
   1.244 +(p6)	br.cond.spnt.few just_do_rfi;	// this is actually an error
   1.245 +	// r16 points to non-zero element of irr, r23 has value
   1.246  	// r24 points to corr element of insvc, r25 has elt*64
   1.247  	ld8 r26=[r24];;
   1.248  	cmp.geu p6,p0=r26,r23
   1.249 -(p6)	br.cond.spnt.many 1f;
   1.250 +(p6)	br.cond.spnt.many just_do_rfi;
   1.251 +
   1.252  	// not masked by insvc, get vector number
   1.253  	shr.u r26=r23,1;;
   1.254  	or r26=r23,r26;;
   1.255 @@ -706,21 +746,109 @@ check_extint:
   1.256  	ld8 r20=[r20] ;;
   1.257  	extr.u r28=r20,16,1
   1.258  	extr.u r29=r20,4,4 ;;
   1.259 -	cmp.ne p6,p0=r28,r0	// if tpr.mmi is set, return SPURIOUS
   1.260 -(p6)	br.cond.sptk.many 1f;
   1.261 +	cmp.ne p6,p0=r28,r0	// if tpr.mmi is set, just rfi
   1.262 +(p6)	br.cond.spnt.few just_do_rfi;;
   1.263  	shl r29=r29,4;;
   1.264  	adds r29=15,r29;;
   1.265 -	cmp.ge p6,p0=r29,r26
   1.266 -(p6)	br.cond.sptk.many 1f;
   1.267 -	// OK, have an unmasked vector to process/return
   1.268 -	ld8 r25=[r24];;
   1.269 -	or r25=r25,r27;;
   1.270 -	st8 [r24]=r25;;
   1.271 -	ld8 r25=[r22];;
   1.272 -	andcm r25=r25,r27;;
   1.273 -	st8 [r22]=r25;;
   1.274 -	mov r8=r26;;
   1.275 -	// not done yet
   1.276 +	cmp.ge p6,p0=r29,r26	// if tpr masks interrupt, just rfi
   1.277 +(p6)	br.cond.spnt.few just_do_rfi;;
   1.278 +
   1.279 +// this doesn't work yet (dies early after getting to user mode)
   1.280 +// but happens relatively infrequently, so fix it later.
   1.281 +// NOTE that these will be counted incorrectly for now (for privcnt output)
   1.282 +GLOBAL_ENTRY(rfi_with_interrupt)
   1.283 +#if 1
   1.284 +	br.sptk.many dispatch_break_fault ;;
   1.285 +#endif
   1.286 +
   1.287 +	// OK, have an unmasked vector, so deliver extint to vcr.iva+0x3000
   1.288 +	//	r18 == XSI_PSR_IC
   1.289 +	//	r21 == vipsr (ipsr in shared_mem)
   1.290 +	//	r30 == IA64_KR(CURRENT)
   1.291 +	//	r31 == pr
   1.292 +	mov r17=cr.ipsr;;
   1.293 +	mov r16=cr.isr;;
   1.294 +	// set shared_mem isr
   1.295 +	extr.u r16=r16,38,1;;	// grab cr.isr.ir bit
   1.296 +	dep r16=r16,r0,38,1 ;;	// insert into cr.isr (rest of bits zero)
   1.297 +	extr.u r20=r21,41,2 ;;	// get v(!)psr.ri
   1.298 +	dep r16=r20,r16,41,2 ;; // deposit cr.isr.ei
   1.299 +	adds r22=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;; 
   1.300 +	st8 [r22]=r16 ;;
   1.301 +	// set cr.ipsr (make sure cpl==2!)
   1.302 +	mov r29=r17 ;;
   1.303 +	movl r28=DELIVER_PSR_SET;;
   1.304 +	movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
   1.305 +	or r29=r29,r28;;
   1.306 +	and r29=r29,r27;;
   1.307 +	mov cr.ipsr=r29;;
   1.308 +	// v.ipsr and v.iip are already set (and v.iip validated) as rfi target
   1.309 +	// set shared_mem interrupt_delivery_enabled to 0
   1.310 +	// set shared_mem interrupt_collection_enabled to 0
   1.311 +	st8 [r18]=r0;;
   1.312 +	// cover and set shared_mem precover_ifs to cr.ifs
   1.313 +	// set shared_mem ifs and incomplete_regframe to 0
   1.314 +#if 0
   1.315 +	cover ;;
   1.316 +	mov r20=cr.ifs;;
   1.317 +	adds r22=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.318 +	st4 [r22]=r0 ;;
   1.319 +	adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.320 +	st8 [r22]=r0 ;;
   1.321 +	adds r22=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.322 +	st8 [r22]=r20 ;;
   1.323 +	// leave cr.ifs alone for later rfi
   1.324 +#else
   1.325 +	adds r22=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.326 +	st4 [r22]=r0 ;;
   1.327 +	adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.328 +	ld8 r20=[r22];;
   1.329 +	st8 [r22]=r0 ;;
   1.330 +	adds r22=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.331 +	st8 [r22]=r20 ;;
   1.332 +#endif
   1.333 +	// set iip to go to domain IVA break instruction vector
   1.334 +	adds r22=IA64_VCPU_IVA_OFFSET,r30;;
   1.335 +	ld8 r23=[r22];;
   1.336 +	movl r24=0x3000;;
   1.337 +	add r24=r24,r23;;
   1.338 +	mov cr.iip=r24;;
   1.339 +#if 0
   1.340 +	// OK, now all set to go except for switch to virtual bank0
   1.341 +	mov r30=r2; mov r29=r3;;
   1.342 +	adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
   1.343 +	adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
   1.344 +	bsw.1;;
   1.345 +	// FIXME: need to handle ar.unat!
   1.346 +	.mem.offset 0,0; st8.spill [r2]=r16,16;
   1.347 +	.mem.offset 8,0; st8.spill [r3]=r17,16 ;;
   1.348 +	.mem.offset 0,0; st8.spill [r2]=r18,16;
   1.349 +	.mem.offset 8,0; st8.spill [r3]=r19,16 ;;
   1.350 +	.mem.offset 0,0; st8.spill [r2]=r20,16;
   1.351 +	.mem.offset 8,0; st8.spill [r3]=r21,16 ;;
   1.352 +	.mem.offset 0,0; st8.spill [r2]=r22,16;
   1.353 +	.mem.offset 8,0; st8.spill [r3]=r23,16 ;;
   1.354 +	.mem.offset 0,0; st8.spill [r2]=r24,16;
   1.355 +	.mem.offset 8,0; st8.spill [r3]=r25,16 ;;
   1.356 +	.mem.offset 0,0; st8.spill [r2]=r26,16;
   1.357 +	.mem.offset 8,0; st8.spill [r3]=r27,16 ;;
   1.358 +	.mem.offset 0,0; st8.spill [r2]=r28,16;
   1.359 +	.mem.offset 8,0; st8.spill [r3]=r29,16 ;;
   1.360 +	.mem.offset 0,0; st8.spill [r2]=r30,16;
   1.361 +	.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
   1.362 +	movl r31=XSI_IPSR;;
   1.363 +	bsw.0 ;;
   1.364 +	mov r2=r30; mov r3=r29;;
   1.365 +#else
   1.366 +	bsw.1;;
   1.367 +	movl r31=XSI_IPSR;;
   1.368 +	bsw.0 ;;
   1.369 +#endif
   1.370 +	adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.371 +	st4 [r20]=r0 ;;
   1.372 +	mov pr=r31,-1 ;;
   1.373 +	rfi
   1.374 +#endif // RFI_TO_INTERRUPT
   1.375  
   1.376  ENTRY(hyper_cover)
   1.377  #ifdef FAST_HYPERPRIVOP_CNT
   1.378 @@ -919,7 +1047,7 @@ ENTRY(hyper_get_ivr)
   1.379  (p6)	ld8 r23=[r22];;
   1.380  (p6)	cmp.eq p6,p0=r23,r0;;
   1.381  	cmp.eq p6,p0=r23,r0
   1.382 -(p6)	br.cond.sptk.many 1f;	// this is actually an error
   1.383 +(p6)	br.cond.spnt.few 1f;	// this is actually an error
   1.384  	// r22 points to non-zero element of irr, r23 has value
   1.385  	// r24 points to corr element of insvc, r25 has elt*64
   1.386  	ld8 r26=[r24];;
   1.387 @@ -952,11 +1080,11 @@ ENTRY(hyper_get_ivr)
   1.388  	extr.u r28=r20,16,1
   1.389  	extr.u r29=r20,4,4 ;;
   1.390  	cmp.ne p6,p0=r28,r0	// if tpr.mmi is set, return SPURIOUS
   1.391 -(p6)	br.cond.sptk.many 1f;
   1.392 +(p6)	br.cond.spnt.few 1f;
   1.393  	shl r29=r29,4;;
   1.394  	adds r29=15,r29;;
   1.395  	cmp.ge p6,p0=r29,r26
   1.396 -(p6)	br.cond.sptk.many 1f;
   1.397 +(p6)	br.cond.spnt.few 1f;
   1.398  	// OK, have an unmasked vector to process/return
   1.399  	ld8 r25=[r24];;
   1.400  	or r25=r25,r27;;
   1.401 @@ -1016,7 +1144,7 @@ ENTRY(hyper_eoi)
   1.402  (p6)	ld8 r23=[r22];;
   1.403  (p6)	cmp.eq p6,p0=r23,r0;;
   1.404  	cmp.eq p6,p0=r23,r0
   1.405 -(p6)	br.cond.sptk.many 1f;	// this is actually an error
   1.406 +(p6)	br.cond.spnt.few 1f;	// this is actually an error
   1.407  	// r22 points to non-zero element of insvc, r23 has value
   1.408  	shr.u r24=r23,1;;
   1.409  	or r24=r23,r24;;
   1.410 @@ -1146,7 +1274,7 @@ ENTRY(hyper_set_rr)
   1.411  	adds r24=IA64_VCPU_META_SAVED_RR0_OFFSET,r20;;
   1.412  	add r22=r26,r22;;
   1.413  	cmp.geu p6,p0=r22,r23	// if r9.rid + starting_rid >= ending_rid
   1.414 -(p6)	br.cond.sptk.many 1f;	// this is an error, but just ignore/return
   1.415 +(p6)	br.cond.spnt.few 1f;	// this is an error, but just ignore/return
   1.416  	// r21=starting_rid
   1.417  	adds r20=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18 ;;
   1.418  	shl r25=r25,3;;
     2.1 --- a/xen/arch/ia64/privop.c	Thu Jun 23 18:59:01 2005 +0000
     2.2 +++ b/xen/arch/ia64/privop.c	Tue Jun 28 18:20:30 2005 +0000
     2.3 @@ -693,8 +693,8 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
     2.4  		break;
     2.5  	}
     2.6          //printf("We who are about do die salute you\n");
     2.7 -	printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
     2.8 -		 iip, (UINT64)inst.inst, slot, slot_type);
     2.9 +	printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d), ipsr=%p\n",
    2.10 +		 iip, (UINT64)inst.inst, slot, slot_type, ipsr);
    2.11          //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
    2.12          //thread_mozambique("privop fault\n");
    2.13  	return (IA64_ILLOP_FAULT);
    2.14 @@ -734,6 +734,8 @@ priv_emulate(VCPU *vcpu, REGS *regs, UIN
    2.15  		// update iip/ipsr to point to the next instruction
    2.16  		(void)vcpu_increment_iip(vcpu);
    2.17  	}
    2.18 +	if (fault == IA64_ILLOP_FAULT)
    2.19 +		printf("priv_emulate: priv_handle_op fails, isr=%p\n",isr);
    2.20  	return fault;
    2.21  }
    2.22