ia64/xen-unstable

view xen/arch/ia64/vmx/optvfault.S @ 16623:9152cf7f5b82

[IA64] Fix vmx_asm_thash typo

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author Alex Williamson <alex.williamson@hp.com>
date Fri Dec 14 10:26:06 2007 -0700 (2007-12-14)
parents 359484cee7d9
children 2900e4dacaa7
line source
1 /*
2 * arch/ia64/vmx/optvfault.S
3 * optimize virtualization fault handler
4 *
5 * Copyright (C) 2006 Intel Co
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 */
9 #include <linux/config.h>
10 #include <asm/config.h>
11 #include <asm/pgtable.h>
12 #include <asm/asmmacro.h>
13 #include <asm/kregs.h>
14 #include <asm/offsets.h>
15 #include <asm/percpu.h>
16 #include <asm/processor.h>
17 #include <asm/vmx_vpd.h>
18 #include <asm/vmx_pal_vsa.h>
19 #include <asm/asm-offsets.h>
20 #include <asm-ia64/vmx_mm_def.h>
21 #include <asm-ia64/vmx_phy_mode.h>
23 #define ACCE_MOV_FROM_AR
24 #define ACCE_MOV_FROM_RR
25 #define ACCE_MOV_TO_RR
26 #define ACCE_RSM
27 #define ACCE_SSM
28 #define ACCE_MOV_TO_PSR
29 #define ACCE_THASH
31 // Inputs are: r21 (= current), r24 (= cause), r25 (= insn), r31 (=saved pr)
34 //mov r1=ar3 (only itc is virtualized)
35 GLOBAL_ENTRY(vmx_asm_mov_from_ar)
36 #ifndef ACCE_MOV_FROM_AR
37 br.many vmx_virtualization_fault_back
38 #endif
39 add r18=VCPU_VTM_OFFSET_OFS,r21
40 add r16=VCPU_VTM_LAST_ITC_OFS,r21
41 extr.u r17=r25,6,7
42 ;;
43 ld8 r18=[r18]
44 mov r19=ar.itc
45 mov r24=b0
46 ;;
47 ld8 r16=[r16]
48 add r19=r19,r18
49 movl r20=asm_mov_to_reg
50 ;;
51 adds r30=vmx_resume_to_guest-asm_mov_to_reg,r20
52 shladd r17=r17,4,r20
53 cmp.gtu p6,p0=r16,r19
54 ;;
55 (p6) mov r19=r16
56 mov b0=r17
57 br.sptk.few b0
58 ;;
59 END(vmx_asm_mov_from_ar)
62 // mov r1=rr[r3]
63 GLOBAL_ENTRY(vmx_asm_mov_from_rr)
64 #ifndef ACCE_MOV_FROM_RR
65 br.many vmx_virtualization_fault_back
66 #endif
67 extr.u r16=r25,20,7
68 extr.u r17=r25,6,7
69 movl r20=asm_mov_from_reg
70 ;;
71 adds r30=vmx_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
72 shladd r16=r16,4,r20
73 mov r24=b0
74 ;;
75 add r27=VCPU_VRR0_OFS,r21
76 mov b0=r16
77 br.many b0
78 ;;
79 vmx_asm_mov_from_rr_back_1:
80 adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
81 adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
82 shr.u r26=r19,61
83 ;;
84 shladd r17=r17,4,r22
85 shladd r27=r26,3,r27
86 ;;
87 ld8 r19=[r27]
88 mov b0=r17
89 br.many b0
90 END(vmx_asm_mov_from_rr)
93 // mov rr[r3]=r2
94 GLOBAL_ENTRY(vmx_asm_mov_to_rr)
95 #ifndef ACCE_MOV_TO_RR
96 br.many vmx_virtualization_fault_back
97 #endif
98 add r22=IA64_VCPU_DOMAIN_OFFSET,r21
99 extr.u r16=r25,20,7 // r3
100 extr.u r17=r25,13,7 // r2
101 ;;
102 ld8 r22=[r22] // Get domain
103 movl r20=asm_mov_from_reg
104 ;;
105 adds r30=vmx_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
106 shladd r16=r16,4,r20 // get r3
107 mov r18=b0 // save b0
108 ;;
109 add r27=VCPU_VRR0_OFS,r21
110 mov b0=r16
111 br.many b0
112 ;;
113 vmx_asm_mov_to_rr_back_1:
114 adds r30=vmx_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
115 shr.u r23=r19,61 // get RR #
116 shladd r17=r17,4,r20 // get r2
117 ;;
118 //if rr7, go back
119 cmp.eq p6,p0=7,r23
120 mov b0=r18 // restore b0
121 (p6) br.cond.dpnt.many vmx_virtualization_fault_back
122 ;;
123 mov r28=r19 // save r3
124 mov b0=r17
125 br.many b0
126 vmx_asm_mov_to_rr_back_2:
127 adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
128 shladd r27=r23,3,r27 // address of VRR
129 add r22=IA64_DOMAIN_RID_BITS_OFFSET,r22
130 ;;
131 ld1 r22=[r22] // Load rid_bits from domain
132 mov b0=r18 // restore b0
133 adds r16=IA64_VCPU_STARTING_RID_OFFSET,r21
134 ;;
135 ld4 r16=[r16] // load starting_rid
136 extr.u r17=r19,8,24 // Extract RID
137 ;;
138 shr r17=r17,r22 // Shift out used bits
139 shl r16=r16,8
140 ;;
141 add r20=r19,r16
142 cmp.ne p6,p0=0,r17 // If reserved RID bits are set, use C fall back.
143 (p6) br.cond.dpnt.many vmx_virtualization_fault_back
144 ;; //mangling rid 1 and 3
145 extr.u r16=r20,8,8
146 extr.u r17=r20,24,8
147 mov r24=r18 // saved b0 for resume
148 ;;
149 extr.u r18=r20,2,6 // page size
150 dep r20=r16,r20,24,8
151 mov b0=r30
152 ;;
153 dep r20=r17,r20,8,8
154 ;; //set ve 1
155 dep r20=-1,r20,0,1
156 // If ps > PAGE_SHIFT, use PAGE_SHIFT
157 cmp.lt p6,p0=PAGE_SHIFT,r18
158 ;;
159 (p6) mov r18=PAGE_SHIFT
160 ;;
161 (p6) dep r20=r18,r20,2,6
162 ;;
163 st8 [r27]=r19 // Write to vrr.
164 // Write to save_rr if rr=0 or rr=4.
165 cmp.eq p6,p0=0,r23
166 ;;
167 cmp.eq.or p6,p0=4,r23
168 ;;
169 adds r16=IA64_VCPU_MMU_MODE_OFFSET,r21
170 (p6) adds r17=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
171 ;;
172 ld1 r16=[r16]
173 cmp.eq p7,p0=r0,r0
174 (p6) shladd r17=r23,1,r17
175 ;;
176 (p6) st8 [r17]=r20
177 (p6) cmp.eq p7,p0=VMX_MMU_VIRTUAL,r16 // Set physical rr if in virt mode
178 ;;
179 (p7) mov rr[r28]=r20
180 br.many b0
181 END(vmx_asm_mov_to_rr)
184 //rsm
185 GLOBAL_ENTRY(vmx_asm_rsm)
186 #ifndef ACCE_RSM
187 br.many vmx_virtualization_fault_back
188 #endif
189 add r16=IA64_VPD_BASE_OFFSET,r21
190 extr.u r26=r25,6,21 // Imm21
191 extr.u r27=r25,31,2 // I2d
192 ;;
193 ld8 r16=[r16]
194 extr.u r28=r25,36,1 // I
195 dep r26=r27,r26,21,2
196 ;;
197 add r17=VPD_VPSR_START_OFFSET,r16
198 add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
199 //r26 is imm24
200 dep r26=r28,r26,23,1
201 ;;
202 ld8 r18=[r17]
203 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
204 ld1 r23=[r22]
205 sub r27=-1,r26 // ~r26
206 mov r24=b0
207 ;;
208 mov r20=cr.ipsr
209 or r28=r27,r28 // Keep IC,I,DT,SI
210 and r19=r18,r27 // Update vpsr
211 ;;
212 st8 [r17]=r19
213 and r20=r20,r28 // Update ipsr
214 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
215 ;;
216 ld8 r27=[r27]
217 ;;
218 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
219 ;;
220 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 // Keep dfh
221 ;;
222 mov cr.ipsr=r20
223 cmp.ne p6,p0=VMX_MMU_VIRTUAL,r23
224 ;;
225 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
226 (p6) br.dptk vmx_resume_to_guest // DT not cleared or already in phy mode
227 ;;
228 // Switch to meta physical mode D.
229 add r26=IA64_VCPU_META_RID_D_OFFSET,r21
230 mov r23=VMX_MMU_PHY_D
231 ;;
232 ld8 r26=[r26]
233 st1 [r22]=r23
234 dep.z r28=4,61,3
235 ;;
236 mov rr[r0]=r26
237 ;;
238 mov rr[r28]=r26
239 ;;
240 srlz.d
241 br.many vmx_resume_to_guest
242 END(vmx_asm_rsm)
245 //ssm
246 GLOBAL_ENTRY(vmx_asm_ssm)
247 #ifndef ACCE_SSM
248 br.many vmx_virtualization_fault_back
249 #endif
250 add r16=IA64_VPD_BASE_OFFSET,r21
251 extr.u r26=r25,6,21
252 extr.u r27=r25,31,2
253 ;;
254 ld8 r16=[r16]
255 extr.u r28=r25,36,1
256 dep r26=r27,r26,21,2
257 ;; //r26 is imm24
258 add r27=VPD_VPSR_START_OFFSET,r16
259 dep r26=r28,r26,23,1
260 ;; //r19 vpsr
261 ld8 r29=[r27]
262 mov r24=b0
263 ;;
264 add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
265 mov r20=cr.ipsr
266 or r19=r29,r26
267 ;;
268 ld1 r23=[r22] // mmu_mode
269 st8 [r27]=r19 // vpsr
270 or r20=r20,r26
271 ;;
272 mov cr.ipsr=r20
273 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
274 ;;
275 and r19=r28,r19
276 cmp.eq p6,p0=VMX_MMU_VIRTUAL,r23
277 ;;
278 cmp.ne.or p6,p0=r28,r19 // (vpsr & (it+dt+rt)) /= (it+dt+rt) ie stay in phy
279 (p6) br.dptk vmx_asm_ssm_1
280 ;;
281 add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
282 add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
283 mov r23=VMX_MMU_VIRTUAL
284 ;;
285 ld8 r26=[r26]
286 ld8 r27=[r27]
287 st1 [r22]=r23
288 dep.z r28=4,61,3
289 ;;
290 mov rr[r0]=r26
291 ;;
292 mov rr[r28]=r27
293 ;;
294 srlz.d
295 ;;
296 vmx_asm_ssm_1:
297 tbit.nz p6,p0=r29,IA64_PSR_I_BIT
298 ;;
299 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
300 (p6) br.dptk vmx_resume_to_guest
301 ;;
302 add r29=VPD_VTPR_START_OFFSET,r16
303 add r30=VPD_VHPI_START_OFFSET,r16
304 ;;
305 ld8 r29=[r29]
306 ld8 r30=[r30]
307 ;;
308 extr.u r17=r29,4,4
309 extr.u r18=r29,16,1
310 ;;
311 dep r17=r18,r17,4,1
312 ;;
313 cmp.gt p6,p0=r30,r17
314 (p6) br.dpnt.few vmx_asm_dispatch_vexirq
315 br.many vmx_resume_to_guest
316 END(vmx_asm_ssm)
319 //mov psr.l=r2
320 GLOBAL_ENTRY(vmx_asm_mov_to_psr)
321 #ifndef ACCE_MOV_TO_PSR
322 br.many vmx_virtualization_fault_back
323 #endif
324 add r16=IA64_VPD_BASE_OFFSET,r21
325 extr.u r26=r25,13,7 //r2
326 ;;
327 ld8 r16=[r16]
328 movl r20=asm_mov_from_reg
329 ;;
330 adds r30=vmx_asm_mov_to_psr_back-asm_mov_from_reg,r20
331 shladd r26=r26,4,r20
332 mov r24=b0
333 ;;
334 add r27=VPD_VPSR_START_OFFSET,r16
335 mov b0=r26
336 br.many b0
337 ;;
338 vmx_asm_mov_to_psr_back:
339 ld8 r17=[r27] // vpsr
340 add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
341 dep r19=0,r19,32,32 // Clear bits 32-63
342 ;;
343 ld1 r23=[r22] // mmu_mode
344 dep r18=0,r17,0,32
345 ;;
346 or r30=r18,r19
347 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
348 ;;
349 st8 [r27]=r30 // set vpsr
350 and r27=r28,r30
351 and r29=r28,r17
352 ;;
353 cmp.eq p5,p0=r29,r27 // (old_vpsr & (dt+rt+it)) == (new_vpsr & (dt+rt+it))
354 cmp.eq p6,p7=r28,r27 // (new_vpsr & (dt+rt+it)) == (dt+rt+it)
355 (p5) br.many vmx_asm_mov_to_psr_1 // no change
356 ;;
357 //virtual to physical D
358 (p7) add r26=IA64_VCPU_META_RID_D_OFFSET,r21
359 (p7) add r27=IA64_VCPU_META_RID_D_OFFSET,r21
360 (p7) mov r23=VMX_MMU_PHY_D
361 ;;
362 //physical to virtual
363 (p6) add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
364 (p6) add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
365 (p6) mov r23=VMX_MMU_VIRTUAL
366 ;;
367 ld8 r26=[r26]
368 ld8 r27=[r27]
369 st1 [r22]=r23
370 dep.z r28=4,61,3
371 ;;
372 mov rr[r0]=r26
373 ;;
374 mov rr[r28]=r27
375 ;;
376 srlz.d
377 ;;
378 vmx_asm_mov_to_psr_1:
379 mov r20=cr.ipsr
380 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
381 ;;
382 or r19=r19,r28
383 dep r20=0,r20,0,32
384 ;;
385 add r20=r19,r20
386 mov b0=r24
387 ;;
388 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
389 ;;
390 ld8 r27=[r27]
391 ;;
392 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
393 ;;
394 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
395 ;;
396 mov cr.ipsr=r20
397 cmp.ne p6,p0=r0,r0
398 ;;
399 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
400 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
401 (p6) br.dpnt.few vmx_resume_to_guest
402 ;;
403 add r29=VPD_VTPR_START_OFFSET,r16
404 add r30=VPD_VHPI_START_OFFSET,r16
405 ;;
406 ld8 r29=[r29]
407 ld8 r30=[r30]
408 ;;
409 extr.u r17=r29,4,4
410 extr.u r18=r29,16,1
411 ;;
412 dep r17=r18,r17,4,1
413 ;;
414 cmp.gt p6,p0=r30,r17
415 (p6) br.dpnt.few vmx_asm_dispatch_vexirq
416 br.many vmx_resume_to_guest
417 END(vmx_asm_mov_to_psr)
420 ENTRY(vmx_asm_dispatch_vexirq)
421 //increment iip
422 mov r16=cr.ipsr
423 ;;
424 extr.u r17=r16,IA64_PSR_RI_BIT,2
425 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
426 ;;
427 (p6) mov r18=cr.iip
428 (p6) mov r17=r0
429 (p7) add r17=1,r17
430 ;;
431 (p6) add r18=0x10,r18
432 dep r16=r17,r16,IA64_PSR_RI_BIT,2
433 ;;
434 (p6) mov cr.iip=r18
435 mov cr.ipsr=r16
436 br.many vmx_dispatch_vexirq
437 END(vmx_asm_dispatch_vexirq)
439 // thash r1=r3
440 // TODO: add support when pta.vf = 1
441 GLOBAL_ENTRY(vmx_asm_thash)
442 #ifndef ACCE_THASH
443 br.many vmx_virtualization_fault_back
444 #endif
445 extr.u r17=r25,20,7 // get r3 from opcode in r25
446 extr.u r18=r25,6,7 // get r1 from opcode in r25
447 movl r20=asm_mov_from_reg
448 ;;
449 adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20
450 shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
451 adds r16=IA64_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
452 mov r24=b0 // save b0
453 ;;
454 ld8 r16=[r16] // get VPD addr
455 mov b0=r17
456 br.many b0 // r19 return value
457 ;;
458 vmx_asm_thash_back1:
459 shr.u r23=r19,61 // get RR number
460 adds r25=VCPU_VRR0_OFS,r21 // get vcpu->arch.arch_vmx.vrr[0]'s addr
461 adds r16=IA64_VPD_VPTA_OFFSET,r16 // get virtual pta
462 ;;
463 shladd r27=r23,3,r25 // get vcpu->arch.arch_vmx.vrr[r23]'s addr
464 ld8 r17=[r16] // get virtual PTA
465 mov r26=1
466 ;;
467 extr.u r29=r17,2,6 // get pta.size
468 ld8 r25=[r27] // get vcpu->arch.arch_vmx.vrr[r23]'s value
469 ;;
470 // Fall-back to C if VF (long format) is set
471 tbit.nz p6,p0=r17,8
472 mov b0=r24
473 (p6) br.cond.dpnt.many vmx_virtualization_fault_back
474 extr.u r25=r25,2,6 // get rr.ps
475 shl r22=r26,r29 // 1UL << pta.size
476 ;;
477 shr.u r23=r19,r25 // vaddr >> rr.ps
478 adds r26=3,r29 // pta.size + 3
479 shl r27=r17,3 // pta << 3
480 ;;
481 shl r23=r23,3 // (vaddr >> rr.ps) << 3
482 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
483 movl r16=VRN_MASK
484 ;;
485 adds r22=-1,r22 // (1UL << pta.size) - 1
486 shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
487 and r19=r19,r16 // vaddr & VRN_MASK
488 ;;
489 and r22=r22,r23 // vhpt_offset
490 or r19=r19,r27 // (vadr&VRN_MASK) |(((pta<<3)>>(pta.size + 3))<<pta.size)
491 adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
492 ;;
493 or r19=r19,r22 // calc pval
494 shladd r17=r18,4,r26
495 adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
496 ;;
497 mov b0=r17
498 br.many b0
499 END(vmx_asm_thash)
501 #define MOV_TO_REG0 \
502 {; \
503 nop.b 0x0; \
504 nop.b 0x0; \
505 nop.b 0x0; \
506 ;; \
507 };
510 #define MOV_TO_REG(n) \
511 {; \
512 mov r##n##=r19; \
513 mov b0=r30; \
514 br.sptk.many b0; \
515 ;; \
516 };
519 #define MOV_FROM_REG(n) \
520 {; \
521 mov r19=r##n##; \
522 mov b0=r30; \
523 br.sptk.many b0; \
524 ;; \
525 };
528 #define MOV_TO_BANK0_REG(n) \
529 ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
530 {; \
531 mov r26=r2; \
532 mov r2=r19; \
533 bsw.1; \
534 ;; \
535 }; \
536 {; \
537 mov r##n##=r2; \
538 nop.b 0x0; \
539 bsw.0; \
540 ;; \
541 }; \
542 {; \
543 mov r2=r26; \
544 mov b0=r30; \
545 br.sptk.many b0; \
546 ;; \
547 }; \
548 END(asm_mov_to_bank0_reg##n##)
551 #define MOV_FROM_BANK0_REG(n) \
552 ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
553 {; \
554 mov r26=r2; \
555 nop.b 0x0; \
556 bsw.1; \
557 ;; \
558 }; \
559 {; \
560 mov r2=r##n##; \
561 nop.b 0x0; \
562 bsw.0; \
563 ;; \
564 }; \
565 {; \
566 mov r19=r2; \
567 mov r2=r26; \
568 mov b0=r30; \
569 }; \
570 {; \
571 nop.b 0x0; \
572 nop.b 0x0; \
573 br.sptk.many b0; \
574 ;; \
575 }; \
576 END(asm_mov_from_bank0_reg##n##)
579 #define JMP_TO_MOV_TO_BANK0_REG(n) \
580 {; \
581 nop.b 0x0; \
582 nop.b 0x0; \
583 br.sptk.many asm_mov_to_bank0_reg##n##; \
584 ;; \
585 }
588 #define JMP_TO_MOV_FROM_BANK0_REG(n) \
589 {; \
590 nop.b 0x0; \
591 nop.b 0x0; \
592 br.sptk.many asm_mov_from_bank0_reg##n##; \
593 ;; \
594 }
597 MOV_FROM_BANK0_REG(16)
598 MOV_FROM_BANK0_REG(17)
599 MOV_FROM_BANK0_REG(18)
600 MOV_FROM_BANK0_REG(19)
601 MOV_FROM_BANK0_REG(20)
602 MOV_FROM_BANK0_REG(21)
603 MOV_FROM_BANK0_REG(22)
604 MOV_FROM_BANK0_REG(23)
605 MOV_FROM_BANK0_REG(24)
606 MOV_FROM_BANK0_REG(25)
607 MOV_FROM_BANK0_REG(26)
608 MOV_FROM_BANK0_REG(27)
609 MOV_FROM_BANK0_REG(28)
610 MOV_FROM_BANK0_REG(29)
611 MOV_FROM_BANK0_REG(30)
612 MOV_FROM_BANK0_REG(31)
615 // mov from reg table
616 // r19: value, r30: return address
617 // r26 may be destroyed
618 ENTRY(asm_mov_from_reg)
619 MOV_FROM_REG(0)
620 MOV_FROM_REG(1)
621 MOV_FROM_REG(2)
622 MOV_FROM_REG(3)
623 MOV_FROM_REG(4)
624 MOV_FROM_REG(5)
625 MOV_FROM_REG(6)
626 MOV_FROM_REG(7)
627 MOV_FROM_REG(8)
628 MOV_FROM_REG(9)
629 MOV_FROM_REG(10)
630 MOV_FROM_REG(11)
631 MOV_FROM_REG(12)
632 MOV_FROM_REG(13)
633 MOV_FROM_REG(14)
634 MOV_FROM_REG(15)
635 JMP_TO_MOV_FROM_BANK0_REG(16)
636 JMP_TO_MOV_FROM_BANK0_REG(17)
637 JMP_TO_MOV_FROM_BANK0_REG(18)
638 JMP_TO_MOV_FROM_BANK0_REG(19)
639 JMP_TO_MOV_FROM_BANK0_REG(20)
640 JMP_TO_MOV_FROM_BANK0_REG(21)
641 JMP_TO_MOV_FROM_BANK0_REG(22)
642 JMP_TO_MOV_FROM_BANK0_REG(23)
643 JMP_TO_MOV_FROM_BANK0_REG(24)
644 JMP_TO_MOV_FROM_BANK0_REG(25)
645 JMP_TO_MOV_FROM_BANK0_REG(26)
646 JMP_TO_MOV_FROM_BANK0_REG(27)
647 JMP_TO_MOV_FROM_BANK0_REG(28)
648 JMP_TO_MOV_FROM_BANK0_REG(29)
649 JMP_TO_MOV_FROM_BANK0_REG(30)
650 JMP_TO_MOV_FROM_BANK0_REG(31)
651 MOV_FROM_REG(32)
652 MOV_FROM_REG(33)
653 MOV_FROM_REG(34)
654 MOV_FROM_REG(35)
655 MOV_FROM_REG(36)
656 MOV_FROM_REG(37)
657 MOV_FROM_REG(38)
658 MOV_FROM_REG(39)
659 MOV_FROM_REG(40)
660 MOV_FROM_REG(41)
661 MOV_FROM_REG(42)
662 MOV_FROM_REG(43)
663 MOV_FROM_REG(44)
664 MOV_FROM_REG(45)
665 MOV_FROM_REG(46)
666 MOV_FROM_REG(47)
667 MOV_FROM_REG(48)
668 MOV_FROM_REG(49)
669 MOV_FROM_REG(50)
670 MOV_FROM_REG(51)
671 MOV_FROM_REG(52)
672 MOV_FROM_REG(53)
673 MOV_FROM_REG(54)
674 MOV_FROM_REG(55)
675 MOV_FROM_REG(56)
676 MOV_FROM_REG(57)
677 MOV_FROM_REG(58)
678 MOV_FROM_REG(59)
679 MOV_FROM_REG(60)
680 MOV_FROM_REG(61)
681 MOV_FROM_REG(62)
682 MOV_FROM_REG(63)
683 MOV_FROM_REG(64)
684 MOV_FROM_REG(65)
685 MOV_FROM_REG(66)
686 MOV_FROM_REG(67)
687 MOV_FROM_REG(68)
688 MOV_FROM_REG(69)
689 MOV_FROM_REG(70)
690 MOV_FROM_REG(71)
691 MOV_FROM_REG(72)
692 MOV_FROM_REG(73)
693 MOV_FROM_REG(74)
694 MOV_FROM_REG(75)
695 MOV_FROM_REG(76)
696 MOV_FROM_REG(77)
697 MOV_FROM_REG(78)
698 MOV_FROM_REG(79)
699 MOV_FROM_REG(80)
700 MOV_FROM_REG(81)
701 MOV_FROM_REG(82)
702 MOV_FROM_REG(83)
703 MOV_FROM_REG(84)
704 MOV_FROM_REG(85)
705 MOV_FROM_REG(86)
706 MOV_FROM_REG(87)
707 MOV_FROM_REG(88)
708 MOV_FROM_REG(89)
709 MOV_FROM_REG(90)
710 MOV_FROM_REG(91)
711 MOV_FROM_REG(92)
712 MOV_FROM_REG(93)
713 MOV_FROM_REG(94)
714 MOV_FROM_REG(95)
715 MOV_FROM_REG(96)
716 MOV_FROM_REG(97)
717 MOV_FROM_REG(98)
718 MOV_FROM_REG(99)
719 MOV_FROM_REG(100)
720 MOV_FROM_REG(101)
721 MOV_FROM_REG(102)
722 MOV_FROM_REG(103)
723 MOV_FROM_REG(104)
724 MOV_FROM_REG(105)
725 MOV_FROM_REG(106)
726 MOV_FROM_REG(107)
727 MOV_FROM_REG(108)
728 MOV_FROM_REG(109)
729 MOV_FROM_REG(110)
730 MOV_FROM_REG(111)
731 MOV_FROM_REG(112)
732 MOV_FROM_REG(113)
733 MOV_FROM_REG(114)
734 MOV_FROM_REG(115)
735 MOV_FROM_REG(116)
736 MOV_FROM_REG(117)
737 MOV_FROM_REG(118)
738 MOV_FROM_REG(119)
739 MOV_FROM_REG(120)
740 MOV_FROM_REG(121)
741 MOV_FROM_REG(122)
742 MOV_FROM_REG(123)
743 MOV_FROM_REG(124)
744 MOV_FROM_REG(125)
745 MOV_FROM_REG(126)
746 MOV_FROM_REG(127)
747 END(asm_mov_from_reg)
750 /* must be in bank 0
751 * parameter:
752 * r31: pr
753 * r24: b0
754 */
755 ENTRY(vmx_resume_to_guest)
756 mov r16=cr.ipsr
757 movl r20=__vsa_base
758 ;;
759 ld8 r20=[r20]
760 adds r19=IA64_VPD_BASE_OFFSET,r21
761 ;;
762 ld8 r25=[r19]
763 extr.u r17=r16,IA64_PSR_RI_BIT,2
764 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
765 ;;
766 (p6) mov r18=cr.iip
767 (p6) mov r17=r0
768 ;;
769 (p6) add r18=0x10,r18
770 (p7) add r17=1,r17
771 ;;
772 (p6) mov cr.iip=r18
773 dep r16=r17,r16,IA64_PSR_RI_BIT,2
774 ;;
775 mov cr.ipsr=r16
776 adds r19= VPD_VPSR_START_OFFSET,r25
777 add r28=PAL_VPS_RESUME_NORMAL,r20
778 add r29=PAL_VPS_RESUME_HANDLER,r20
779 ;;
780 ld8 r19=[r19]
781 mov b0=r29
782 cmp.ne p6,p7 = r0,r0
783 ;;
784 tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
785 ;;
786 (p6) ld8 r26=[r25]
787 (p7) mov b0=r28
788 mov pr=r31,-2
789 br.sptk.many b0 // call pal service
790 ;;
791 END(vmx_resume_to_guest)
794 MOV_TO_BANK0_REG(16)
795 MOV_TO_BANK0_REG(17)
796 MOV_TO_BANK0_REG(18)
797 MOV_TO_BANK0_REG(19)
798 MOV_TO_BANK0_REG(20)
799 MOV_TO_BANK0_REG(21)
800 MOV_TO_BANK0_REG(22)
801 MOV_TO_BANK0_REG(23)
802 MOV_TO_BANK0_REG(24)
803 MOV_TO_BANK0_REG(25)
804 MOV_TO_BANK0_REG(26)
805 MOV_TO_BANK0_REG(27)
806 MOV_TO_BANK0_REG(28)
807 MOV_TO_BANK0_REG(29)
808 MOV_TO_BANK0_REG(30)
809 MOV_TO_BANK0_REG(31)
812 // mov to reg table
813 // r19: value, r30: return address
814 ENTRY(asm_mov_to_reg)
815 MOV_TO_REG0
816 MOV_TO_REG(1)
817 MOV_TO_REG(2)
818 MOV_TO_REG(3)
819 MOV_TO_REG(4)
820 MOV_TO_REG(5)
821 MOV_TO_REG(6)
822 MOV_TO_REG(7)
823 MOV_TO_REG(8)
824 MOV_TO_REG(9)
825 MOV_TO_REG(10)
826 MOV_TO_REG(11)
827 MOV_TO_REG(12)
828 MOV_TO_REG(13)
829 MOV_TO_REG(14)
830 MOV_TO_REG(15)
831 JMP_TO_MOV_TO_BANK0_REG(16)
832 JMP_TO_MOV_TO_BANK0_REG(17)
833 JMP_TO_MOV_TO_BANK0_REG(18)
834 JMP_TO_MOV_TO_BANK0_REG(19)
835 JMP_TO_MOV_TO_BANK0_REG(20)
836 JMP_TO_MOV_TO_BANK0_REG(21)
837 JMP_TO_MOV_TO_BANK0_REG(22)
838 JMP_TO_MOV_TO_BANK0_REG(23)
839 JMP_TO_MOV_TO_BANK0_REG(24)
840 JMP_TO_MOV_TO_BANK0_REG(25)
841 JMP_TO_MOV_TO_BANK0_REG(26)
842 JMP_TO_MOV_TO_BANK0_REG(27)
843 JMP_TO_MOV_TO_BANK0_REG(28)
844 JMP_TO_MOV_TO_BANK0_REG(29)
845 JMP_TO_MOV_TO_BANK0_REG(30)
846 JMP_TO_MOV_TO_BANK0_REG(31)
847 MOV_TO_REG(32)
848 MOV_TO_REG(33)
849 MOV_TO_REG(34)
850 MOV_TO_REG(35)
851 MOV_TO_REG(36)
852 MOV_TO_REG(37)
853 MOV_TO_REG(38)
854 MOV_TO_REG(39)
855 MOV_TO_REG(40)
856 MOV_TO_REG(41)
857 MOV_TO_REG(42)
858 MOV_TO_REG(43)
859 MOV_TO_REG(44)
860 MOV_TO_REG(45)
861 MOV_TO_REG(46)
862 MOV_TO_REG(47)
863 MOV_TO_REG(48)
864 MOV_TO_REG(49)
865 MOV_TO_REG(50)
866 MOV_TO_REG(51)
867 MOV_TO_REG(52)
868 MOV_TO_REG(53)
869 MOV_TO_REG(54)
870 MOV_TO_REG(55)
871 MOV_TO_REG(56)
872 MOV_TO_REG(57)
873 MOV_TO_REG(58)
874 MOV_TO_REG(59)
875 MOV_TO_REG(60)
876 MOV_TO_REG(61)
877 MOV_TO_REG(62)
878 MOV_TO_REG(63)
879 MOV_TO_REG(64)
880 MOV_TO_REG(65)
881 MOV_TO_REG(66)
882 MOV_TO_REG(67)
883 MOV_TO_REG(68)
884 MOV_TO_REG(69)
885 MOV_TO_REG(70)
886 MOV_TO_REG(71)
887 MOV_TO_REG(72)
888 MOV_TO_REG(73)
889 MOV_TO_REG(74)
890 MOV_TO_REG(75)
891 MOV_TO_REG(76)
892 MOV_TO_REG(77)
893 MOV_TO_REG(78)
894 MOV_TO_REG(79)
895 MOV_TO_REG(80)
896 MOV_TO_REG(81)
897 MOV_TO_REG(82)
898 MOV_TO_REG(83)
899 MOV_TO_REG(84)
900 MOV_TO_REG(85)
901 MOV_TO_REG(86)
902 MOV_TO_REG(87)
903 MOV_TO_REG(88)
904 MOV_TO_REG(89)
905 MOV_TO_REG(90)
906 MOV_TO_REG(91)
907 MOV_TO_REG(92)
908 MOV_TO_REG(93)
909 MOV_TO_REG(94)
910 MOV_TO_REG(95)
911 MOV_TO_REG(96)
912 MOV_TO_REG(97)
913 MOV_TO_REG(98)
914 MOV_TO_REG(99)
915 MOV_TO_REG(100)
916 MOV_TO_REG(101)
917 MOV_TO_REG(102)
918 MOV_TO_REG(103)
919 MOV_TO_REG(104)
920 MOV_TO_REG(105)
921 MOV_TO_REG(106)
922 MOV_TO_REG(107)
923 MOV_TO_REG(108)
924 MOV_TO_REG(109)
925 MOV_TO_REG(110)
926 MOV_TO_REG(111)
927 MOV_TO_REG(112)
928 MOV_TO_REG(113)
929 MOV_TO_REG(114)
930 MOV_TO_REG(115)
931 MOV_TO_REG(116)
932 MOV_TO_REG(117)
933 MOV_TO_REG(118)
934 MOV_TO_REG(119)
935 MOV_TO_REG(120)
936 MOV_TO_REG(121)
937 MOV_TO_REG(122)
938 MOV_TO_REG(123)
939 MOV_TO_REG(124)
940 MOV_TO_REG(125)
941 MOV_TO_REG(126)
942 MOV_TO_REG(127)
943 END(asm_mov_to_reg)