ia64/xen-unstable

changeset 15667:77c87416fbd0

[IA64] Set rr0 to rr4 hyperprivop

Implement set_rr0_to_rr4 hyperprivop to reduce linux
context switch hyperprivop.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Wed Aug 01 09:40:58 2007 -0600 (2007-08-01)
parents 039f2ccb1e38
children d83c9d87ede4
files xen/arch/ia64/xen/hyperprivop.S xen/arch/ia64/xen/privop.c xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/vcpu.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Tue Jul 31 10:30:40 2007 -0600
     1.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Wed Aug 01 09:40:58 2007 -0600
     1.3 @@ -41,6 +41,7 @@
     1.4  # define FAST_SSM_I
     1.5  # define FAST_PTC_GA
     1.6  # undef RFI_TO_INTERRUPT // not working yet
     1.7 +# define FAST_SET_RR0_TO_RR4
     1.8  #endif
     1.9  
    1.10  #ifdef CONFIG_SMP
    1.11 @@ -76,7 +77,7 @@
    1.12  //	r16 == cr.isr
    1.13  //	r17 == cr.iim
    1.14  //	r18 == XSI_PSR_IC_OFS
    1.15 -//	r19 == vpsr.ic
    1.16 +//	r19 == ipsr.cpl
    1.17  //	r31 == pr
    1.18  GLOBAL_ENTRY(fast_hyperprivop)
    1.19  	// HYPERPRIVOP_SSM_I?
    1.20 @@ -108,62 +109,67 @@ GLOBAL_ENTRY(fast_hyperprivop)
    1.21  	;;
    1.22  
    1.23  	// HYPERPRIVOP_COVER?
    1.24 -	cmp.eq p7,p6=HYPERPRIVOP_COVER,r17
    1.25 +	cmp.eq p7,p0=HYPERPRIVOP_COVER,r17
    1.26  (p7)	br.sptk.many hyper_cover
    1.27  	;;
    1.28  
    1.29  	// HYPERPRIVOP_SSM_DT?
    1.30 -	cmp.eq p7,p6=HYPERPRIVOP_SSM_DT,r17
    1.31 +	cmp.eq p7,p0=HYPERPRIVOP_SSM_DT,r17
    1.32  (p7)	br.sptk.many hyper_ssm_dt
    1.33  	;;
    1.34  
    1.35  	// HYPERPRIVOP_RSM_DT?
    1.36 -	cmp.eq p7,p6=HYPERPRIVOP_RSM_DT,r17
    1.37 +	cmp.eq p7,p0=HYPERPRIVOP_RSM_DT,r17
    1.38  (p7)	br.sptk.many hyper_rsm_dt
    1.39  	;;
    1.40  
    1.41  	// HYPERPRIVOP_SET_ITM?
    1.42 -	cmp.eq p7,p6=HYPERPRIVOP_SET_ITM,r17
    1.43 +	cmp.eq p7,p0=HYPERPRIVOP_SET_ITM,r17
    1.44  (p7)	br.sptk.many hyper_set_itm
    1.45  	;;
    1.46  
    1.47 +	// HYPERPRIVOP_SET_RR0_TO_RR4?
    1.48 +	cmp.eq p7,p0=HYPERPRIVOP_SET_RR0_TO_RR4,r17
    1.49 +(p7)	br.sptk.many hyper_set_rr0_to_rr4
    1.50 +	;;
    1.51 +
    1.52  	// HYPERPRIVOP_SET_RR?
    1.53 -	cmp.eq p7,p6=HYPERPRIVOP_SET_RR,r17
    1.54 +	cmp.eq p7,p0=HYPERPRIVOP_SET_RR,r17
    1.55  (p7)	br.sptk.many hyper_set_rr
    1.56  	;;
    1.57  
    1.58  	// HYPERPRIVOP_GET_RR?
    1.59 -	cmp.eq p7,p6=HYPERPRIVOP_GET_RR,r17
    1.60 +	cmp.eq p7,p0=HYPERPRIVOP_GET_RR,r17
    1.61  (p7)	br.sptk.many hyper_get_rr
    1.62  	;;
    1.63  
    1.64  	// HYPERPRIVOP_GET_PSR?
    1.65 -	cmp.eq p7,p6=HYPERPRIVOP_GET_PSR,r17
    1.66 +	cmp.eq p7,p0=HYPERPRIVOP_GET_PSR,r17
    1.67  (p7)	br.sptk.many hyper_get_psr
    1.68  	;;
    1.69  
    1.70  	// HYPERPRIVOP_PTC_GA?
    1.71 -	cmp.eq p7,p6=HYPERPRIVOP_PTC_GA,r17
    1.72 +	cmp.eq p7,p0=HYPERPRIVOP_PTC_GA,r17
    1.73  (p7)	br.sptk.many hyper_ptc_ga
    1.74  	;;
    1.75  
    1.76  	// HYPERPRIVOP_ITC_D?
    1.77 -	cmp.eq p7,p6=HYPERPRIVOP_ITC_D,r17
    1.78 +	cmp.eq p7,p0=HYPERPRIVOP_ITC_D,r17
    1.79  (p7)	br.sptk.many hyper_itc_d
    1.80  	;;
    1.81  
    1.82  	// HYPERPRIVOP_ITC_I?
    1.83 -	cmp.eq p7,p6=HYPERPRIVOP_ITC_I,r17
    1.84 +	cmp.eq p7,p0=HYPERPRIVOP_ITC_I,r17
    1.85  (p7)	br.sptk.many hyper_itc_i
    1.86  	;;
    1.87  
    1.88  	// HYPERPRIVOP_THASH?
    1.89 -	cmp.eq p7,p6=HYPERPRIVOP_THASH,r17
    1.90 +	cmp.eq p7,p0=HYPERPRIVOP_THASH,r17
    1.91  (p7)	br.sptk.many hyper_thash
    1.92  	;;
    1.93  
    1.94  	// HYPERPRIVOP_SET_KR?
    1.95 -	cmp.eq p7,p6=HYPERPRIVOP_SET_KR,r17
    1.96 +	cmp.eq p7,p0=HYPERPRIVOP_SET_KR,r17
    1.97  (p7)	br.sptk.many hyper_set_kr
    1.98  	;;
    1.99  
   1.100 @@ -208,7 +214,7 @@ END(fast_hyperprivop)
   1.101  //	r16 == cr.isr
   1.102  //	r17 == cr.iim
   1.103  //	r18 == XSI_PSR_IC
   1.104 -//	r19 == vpsr.ic 
   1.105 +//	r19 == ipsr.cpl 
   1.106  //	r31 == pr
   1.107  ENTRY(hyper_ssm_i)
   1.108  #ifndef FAST_SSM_I
   1.109 @@ -545,7 +551,7 @@ END(fast_tick_reflect)
   1.110  //	r16 == cr.isr
   1.111  //	r17 == cr.iim
   1.112  //	r18 == XSI_PSR_IC
   1.113 -//	r19 == vpsr.ic
   1.114 +//	r19 == ipsr.cpl
   1.115  //	r31 == pr
   1.116  GLOBAL_ENTRY(fast_break_reflect)
   1.117  #ifndef FAST_BREAK // see beginning of file
   1.118 @@ -1644,6 +1650,244 @@ 1:	mov r24=cr.ipsr
   1.119  	;;
   1.120  END(hyper_set_rr)
   1.121  
   1.122 +// r8  = val0
   1.123 +// r9  = val1
   1.124 +// r10 = val2
   1.125 +// r11 = val3
   1.126 +// r14 = val4
   1.127 +// mov  rr[0x0000000000000000UL] = r8
   1.128 +// mov  rr[0x2000000000000000UL] = r9
   1.129 +// mov  rr[0x4000000000000000UL] = r10
   1.130 +// mov  rr[0x6000000000000000UL] = r11
   1.131 +// mov  rr[0x8000000000000000UL] = r14
   1.132 +ENTRY(hyper_set_rr0_to_rr4)
   1.133 +#ifndef FAST_SET_RR0_TO_RR4
   1.134 +	br.spnt.few dispatch_break_fault ;;
   1.135 +#endif
   1.136 +#ifdef FAST_HYPERPRIVOP_CNT
   1.137 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_RR0_TO_RR4);;
   1.138 +	ld4 r21=[r20];;
   1.139 +	adds r21=1,r21;;
   1.140 +	st4 [r20]=r21;;
   1.141 +#endif
   1.142 +	movl r17=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   1.143 +	ld8 r17=[r17];;
   1.144 +
   1.145 +	adds r21=IA64_VCPU_STARTING_RID_OFFSET,r17
   1.146 +	adds r25=IA64_VCPU_ENDING_RID_OFFSET,r17
   1.147 +	;; 
   1.148 +	ld4 r22=[r21] // r22 = current->starting_rid
   1.149 +	extr.u r26=r8,8,24	// r26 = r8.rid
   1.150 +	extr.u r27=r9,8,24	// r27 = r9.rid
   1.151 +	ld4 r23=[r25] // r23 = current->ending_rid
   1.152 +	extr.u r28=r10,8,24	// r28 = r10.rid
   1.153 +	extr.u r29=r11,8,24	// r29 = r11.rid
   1.154 +	adds r24=IA64_VCPU_META_SAVED_RR0_OFFSET,r17
   1.155 +	extr.u r30=r14,8,24	// r30 = r14.rid
   1.156 +	;; 
   1.157 +	add r16=r26,r22
   1.158 +	add r17=r27,r22
   1.159 +	add r19=r28,r22
   1.160 +	add r20=r29,r22
   1.161 +	add r21=r30,r22	
   1.162 +	;; 
   1.163 +	cmp.geu p6,p0=r16,r23	// if r8.rid + starting_rid >= ending_rid
   1.164 +	cmp.geu p7,p0=r17,r23	// if r9.rid + starting_rid >= ending_rid
   1.165 +	cmp.geu p8,p0=r19,r23	// if r10.rid + starting_rid >= ending_rid
   1.166 +(p6)	br.cond.spnt.few 1f	// this is an error, but just ignore/return
   1.167 +(p7)	br.cond.spnt.few 1f	// this is an error, but just ignore/return
   1.168 +	cmp.geu p9,p0=r20,r23	// if r11.rid + starting_rid >= ending_rid
   1.169 +(p8)	br.cond.spnt.few 1f	// this is an error, but just ignore/return
   1.170 +(p9)	br.cond.spnt.few 1f	// this is an error, but just ignore/return
   1.171 +	cmp.geu p10,p0=r21,r23	// if r14.rid + starting_rid >= ending_rid
   1.172 +(p10)	br.cond.spnt.few 1f	// this is an error, but just ignore/return
   1.173 +	
   1.174 +	mov r25=1
   1.175 +	adds r22=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18
   1.176 +	;;
   1.177 +	shl r30=r25,61	// r30 = 0x2000000000000000
   1.178 +
   1.179 +#if 0
   1.180 +	// simple plain version
   1.181 +	// rr0
   1.182 +	st8 [r22]=r8, 8 // current->rrs[0] = r8
   1.183 +
   1.184 +	mov r26=0	// r26=0x0000000000000000
   1.185 +	extr.u r27=r16,0,8
   1.186 +	extr.u r28=r16,8,8
   1.187 +	extr.u r29=r16,16,8;;
   1.188 +	dep.z r23=PAGE_SHIFT,2,6;;
   1.189 +	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   1.190 +	dep r23=r27,r23,24,8;;
   1.191 +	dep r23=r28,r23,16,8;;
   1.192 +	dep r23=r29,r23,8,8;; 
   1.193 +	st8 [r24]=r23		// save for metaphysical
   1.194 +	mov rr[r26]=r23
   1.195 +	dv_serialize_data
   1.196 +
   1.197 +	// rr1
   1.198 +	st8 [r22]=r9, 8 // current->rrs[1] = r9
   1.199 +	add r26=r26,r30	// r26 = 0x2000000000000000
   1.200 +	extr.u r27=r17,0,8
   1.201 +	extr.u r28=r17,8,8
   1.202 +	extr.u r29=r17,16,8;;
   1.203 +	dep.z r23=PAGE_SHIFT,2,6;;
   1.204 +	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   1.205 +	dep r23=r27,r23,24,8;;
   1.206 +	dep r23=r28,r23,16,8;;
   1.207 +	dep r23=r29,r23,8,8;; 
   1.208 +	mov rr[r26]=r23
   1.209 +	dv_serialize_data
   1.210 +
   1.211 +	// rr2
   1.212 +	st8 [r22]=r10, 8 // current->rrs[2] = r10
   1.213 +	add r26=r26,r30	// r26 = 0x4000000000000000
   1.214 +	extr.u r27=r19,0,8
   1.215 +	extr.u r28=r19,8,8
   1.216 +	extr.u r29=r19,16,8;;
   1.217 +	dep.z r23=PAGE_SHIFT,2,6;;
   1.218 +	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   1.219 +	dep r23=r27,r23,24,8;;
   1.220 +	dep r23=r28,r23,16,8;;
   1.221 +	dep r23=r29,r23,8,8;; 
   1.222 +	mov rr[r26]=r23
   1.223 +	dv_serialize_data
   1.224 +
   1.225 +	// rr3
   1.226 +	st8 [r22]=r11, 8 // current->rrs[3] = r11
   1.227 +
   1.228 +	add r26=r26,r30	// r26 = 0x6000000000000000
   1.229 +	extr.u r27=r20,0,8
   1.230 +	extr.u r28=r20,8,8
   1.231 +	extr.u r29=r20,16,8;;
   1.232 +	dep.z r23=PAGE_SHIFT,2,6;;
   1.233 +	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   1.234 +	dep r23=r27,r23,24,8;;
   1.235 +	dep r23=r28,r23,16,8;;
   1.236 +	dep r23=r29,r23,8,8;; 
   1.237 +	mov rr[r26]=r23
   1.238 +	dv_serialize_data
   1.239 +	
   1.240 +	// rr4
   1.241 +	st8 [r22]=r14 // current->rrs[4] = r14
   1.242 +
   1.243 +	add r26=r26,r30	// r26 = 0x8000000000000000
   1.244 +	extr.u r27=r21,0,8
   1.245 +	extr.u r28=r21,8,8
   1.246 +	extr.u r29=r21,16,8;;
   1.247 +	dep.z r23=PAGE_SHIFT,2,6;;
   1.248 +	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   1.249 +	dep r23=r27,r23,24,8;;
   1.250 +	dep r23=r28,r23,16,8;;
   1.251 +	dep r23=r29,r23,8,8;; 
   1.252 +	mov rr[r26]=r23
   1.253 +	dv_serialize_data
   1.254 +#else
   1.255 +	// shuffled version
   1.256 +	// rr0
   1.257 +	// uses r27, r28, r29 for mangling
   1.258 +	//      r23           for mangled value
   1.259 +	st8 [r22]=r8, 8 // current->rrs[0] = r8
   1.260 +	mov r26=0	// r26=0x0000000000000000
   1.261 +	extr.u r27=r16,0,8
   1.262 +	extr.u r28=r16,8,8
   1.263 +	extr.u r29=r16,16,8
   1.264 +	dep.z r23=PAGE_SHIFT,2,6;;
   1.265 +	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   1.266 +	extr.u r25=r17,0,8
   1.267 +	dep r23=r27,r23,24,8;;
   1.268 +	dep r23=r28,r23,16,8;;
   1.269 +	dep r23=r29,r23,8,8;; 
   1.270 +	st8 [r24]=r23		// save for metaphysical
   1.271 +	mov rr[r26]=r23
   1.272 +	dv_serialize_data
   1.273 +
   1.274 +	// r16, r24, r25 is usable.
   1.275 +	// rr1
   1.276 +	// uses r25, r28, r29 for mangling
   1.277 +	//      r23           for mangled value
   1.278 +	extr.u r28=r17,8,8
   1.279 +	st8 [r22]=r9, 8 // current->rrs[1] = r9
   1.280 +	extr.u r29=r17,16,8 ;; 
   1.281 +	dep.z r23=PAGE_SHIFT,2,6;;
   1.282 +	add r26=r26,r30	// r26 = 0x2000000000000000
   1.283 +	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   1.284 +	extr.u r24=r19,8,8
   1.285 +	extr.u r16=r19,0,8
   1.286 +	dep r23=r25,r23,24,8;;
   1.287 +	dep r23=r28,r23,16,8;;
   1.288 +	dep r23=r29,r23,8,8;; 
   1.289 +	mov rr[r26]=r23
   1.290 +	dv_serialize_data
   1.291 +
   1.292 +	// r16, r17, r24, r25 is usable
   1.293 +	// rr2
   1.294 +	// uses r16, r24, r29 for mangling
   1.295 +	//      r17           for mangled value
   1.296 +	extr.u r29=r19,16,8
   1.297 +	extr.u r27=r20,0,8
   1.298 +	st8 [r22]=r10, 8 // current->rrs[2] = r10
   1.299 +	dep.z r17=PAGE_SHIFT,2,6;;
   1.300 +	add r26=r26,r30	// r26 = 0x4000000000000000	
   1.301 +	dep r17=-1,r17,0,1;;	// mangling is swapping bytes 1 & 3
   1.302 +	dep r17=r16,r17,24,8;;
   1.303 +	dep r17=r24,r17,16,8;;
   1.304 +	dep r17=r29,r17,8,8;; 
   1.305 +	mov rr[r26]=r17
   1.306 +	dv_serialize_data
   1.307 +
   1.308 +	// r16, r17, r19, r24, r25 is usable
   1.309 +	// rr3
   1.310 +	// uses r27, r28, r29 for mangling
   1.311 +	//      r23           for mangled value
   1.312 +	extr.u r28=r20,8,8
   1.313 +	extr.u r29=r20,16,8
   1.314 +	st8 [r22]=r11, 8 // current->rrs[3] = r11
   1.315 +	extr.u r16=r21,0,8
   1.316 +	dep.z r23=PAGE_SHIFT,2,6;;
   1.317 +	add r26=r26,r30	// r26 = 0x6000000000000000
   1.318 +	dep r23=-1,r23,0,1;;	// mangling is swapping bytes 1 & 3
   1.319 +	dep r23=r27,r23,24,8;;
   1.320 +	dep r23=r28,r23,16,8;;
   1.321 +	dep r23=r29,r23,8,8;; 
   1.322 +	mov rr[r26]=r23
   1.323 +	dv_serialize_data
   1.324 +	
   1.325 +	// r16, r17, r19, r20, r24, r25
   1.326 +	// rr4
   1.327 +	// uses r16, r17, r24 for mangling
   1.328 +	//      r25           for mangled value
   1.329 +	extr.u r17=r21,8,8
   1.330 +	extr.u r24=r21,16,8
   1.331 +	st8 [r22]=r14 // current->rrs[4] = r14
   1.332 +	dep.z r25=PAGE_SHIFT,2,6;;
   1.333 +	add r26=r26,r30	// r26 = 0x8000000000000000
   1.334 +	dep r25=-1,r25,0,1;;	// mangling is swapping bytes 1 & 3
   1.335 +	dep r25=r16,r25,24,8;;
   1.336 +	dep r25=r17,r25,16,8;;
   1.337 +	dep r25=r24,r25,8,8;; 
   1.338 +	mov rr[r26]=r25
   1.339 +	dv_serialize_data
   1.340 +#endif
   1.341 +
   1.342 +	// done, mosey on back
   1.343 +1:	mov r24=cr.ipsr
   1.344 +	mov r25=cr.iip;;
   1.345 +	extr.u r26=r24,41,2 ;;
   1.346 +	cmp.eq p6,p7=2,r26 ;;
   1.347 +(p6)	mov r26=0
   1.348 +(p6)	adds r25=16,r25
   1.349 +(p7)	adds r26=1,r26
   1.350 +	;;
   1.351 +	dep r24=r26,r24,41,2
   1.352 +	;;
   1.353 +	mov cr.ipsr=r24
   1.354 +	mov cr.iip=r25
   1.355 +	mov pr=r31,-1 ;;
   1.356 +	rfi
   1.357 +	;;
   1.358 +END(hyper_set_rr0_to_rr4)
   1.359 +
   1.360  ENTRY(hyper_set_kr)
   1.361  	extr.u r25=r8,3,61;;
   1.362  	cmp.ne p7,p0=r0,r25	// if kr# > 7, go slow way
     2.1 --- a/xen/arch/ia64/xen/privop.c	Tue Jul 31 10:30:40 2007 -0600
     2.2 +++ b/xen/arch/ia64/xen/privop.c	Wed Aug 01 09:40:58 2007 -0600
     2.3 @@ -895,6 +895,10 @@ int ia64_hyperprivop(unsigned long iim, 
     2.4  		vcpu_get_psr_masked(v, &val);
     2.5  		regs->r8 = val;
     2.6  		return 1;
     2.7 +	case HYPERPRIVOP_SET_RR0_TO_RR4:
     2.8 +		vcpu_set_rr0_to_rr4(v, regs->r8, regs->r9, regs->r10,
     2.9 +				    regs->r11, regs->r14);
    2.10 +		return 1;
    2.11  	}
    2.12  	return 0;
    2.13  }
     3.1 --- a/xen/arch/ia64/xen/vcpu.c	Tue Jul 31 10:30:40 2007 -0600
     3.2 +++ b/xen/arch/ia64/xen/vcpu.c	Wed Aug 01 09:40:58 2007 -0600
     3.3 @@ -2107,6 +2107,30 @@ IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 r
     3.4  	return IA64_NO_FAULT;
     3.5  }
     3.6  
     3.7 +IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcpu, u64 val0, u64 val1, u64 val2,
     3.8 +			      u64 val3, u64 val4)
     3.9 +{
    3.10 +	u64 reg0 = 0x0000000000000000UL;
    3.11 +	u64 reg1 = 0x2000000000000000UL;
    3.12 +	u64 reg2 = 0x4000000000000000UL;
    3.13 +	u64 reg3 = 0x6000000000000000UL;
    3.14 +	u64 reg4 = 0x8000000000000000UL;
    3.15 +
    3.16 +	PSCB(vcpu, rrs)[reg0 >> 61] = val0;
    3.17 +	PSCB(vcpu, rrs)[reg1 >> 61] = val1;
    3.18 +	PSCB(vcpu, rrs)[reg2 >> 61] = val2;
    3.19 +	PSCB(vcpu, rrs)[reg3 >> 61] = val3;
    3.20 +	PSCB(vcpu, rrs)[reg4 >> 61] = val4;
    3.21 +	if (vcpu == current) {
    3.22 +		set_one_rr(reg0, val0);
    3.23 +		set_one_rr(reg1, val1);
    3.24 +		set_one_rr(reg2, val2);
    3.25 +		set_one_rr(reg3, val3);
    3.26 +		set_one_rr(reg4, val4);
    3.27 +	}
    3.28 +	return IA64_NO_FAULT;
    3.29 +}
    3.30 +
    3.31  /**************************************************************************
    3.32   VCPU protection key register access routines
    3.33  **************************************************************************/
     4.1 --- a/xen/include/asm-ia64/vcpu.h	Tue Jul 31 10:30:40 2007 -0600
     4.2 +++ b/xen/include/asm-ia64/vcpu.h	Wed Aug 01 09:40:58 2007 -0600
     4.3 @@ -124,6 +124,8 @@ extern IA64FAULT vcpu_bsw1(VCPU * vcpu);
     4.4  extern IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
     4.5  extern IA64FAULT vcpu_get_rr(VCPU * vcpu, u64 reg, u64 * pval);
     4.6  extern IA64FAULT vcpu_get_rr_ve(VCPU * vcpu, u64 vadr);
     4.7 +extern IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcpu, u64 val0, u64 val1,
     4.8 +				     u64 val2, u64 val3, u64 val4);
     4.9  /* protection key registers */
    4.10  extern void vcpu_pkr_load_regs(VCPU * vcpu);
    4.11  static inline int vcpu_pkr_in_use(VCPU * vcpu)
     5.1 --- a/xen/include/public/arch-ia64.h	Tue Jul 31 10:30:40 2007 -0600
     5.2 +++ b/xen/include/public/arch-ia64.h	Wed Aug 01 09:40:58 2007 -0600
     5.3 @@ -543,7 +543,8 @@ struct xen_ia64_boot_param {
     5.4  #define HYPERPRIVOP_SET_EFLAG		(HYPERPRIVOP_START + 0x16)
     5.5  #define HYPERPRIVOP_RSM_BE		(HYPERPRIVOP_START + 0x17)
     5.6  #define HYPERPRIVOP_GET_PSR		(HYPERPRIVOP_START + 0x18)
     5.7 -#define HYPERPRIVOP_MAX			(0x19)
     5.8 +#define HYPERPRIVOP_SET_RR0_TO_RR4	(HYPERPRIVOP_START + 0x19)
     5.9 +#define HYPERPRIVOP_MAX			(0x1a)
    5.10  
    5.11  /* Fast and light hypercalls.  */
    5.12  #define __HYPERVISOR_ia64_fast_eoi	__HYPERVISOR_arch_1