ia64/xen-unstable

view xen/arch/ia64/vmx/optvfault.S @ 16177:98ac6d05aed2

[IA64] Enable switch to PHY_D mmu mode

Last patch for PHY_D mmu mode

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Sun Oct 21 15:58:00 2007 -0600 (2007-10-21)
parents 5c56ce7b9892
children 359484cee7d9
line source
1 /*
2 * arch/ia64/vmx/optvfault.S
3 * optimize virtualization fault handler
4 *
5 * Copyright (C) 2006 Intel Co
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 */
9 #include <linux/config.h>
10 #include <asm/asmmacro.h>
11 #include <asm/kregs.h>
12 #include <asm/offsets.h>
13 #include <asm/percpu.h>
14 #include <asm/processor.h>
15 #include <asm/vmx_vpd.h>
16 #include <asm/vmx_pal_vsa.h>
17 #include <asm/asm-offsets.h>
18 #include <asm-ia64/vmx_mm_def.h>
19 #include <asm-ia64/vmx_phy_mode.h>
21 #define ACCE_MOV_FROM_AR
22 #define ACCE_MOV_FROM_RR
23 #define ACCE_MOV_TO_RR
24 #define ACCE_RSM
25 #define ACCE_SSM
26 #define ACCE_MOV_TO_PSR
27 #define ACCE_THASH
29 //mov r1=ar3 (only itc is virtualized)
30 GLOBAL_ENTRY(vmx_asm_mov_from_ar)
31 #ifndef ACCE_MOV_FROM_AR
32 br.many vmx_virtualization_fault_back
33 #endif
34 add r18=VCPU_VTM_OFFSET_OFS,r21
35 add r16=VCPU_VTM_LAST_ITC_OFS,r21
36 extr.u r17=r25,6,7
37 ;;
38 ld8 r18=[r18]
39 mov r19=ar.itc
40 mov r24=b0
41 ;;
42 ld8 r16=[r16]
43 add r19=r19,r18
44 movl r20=asm_mov_to_reg
45 ;;
46 adds r30=vmx_resume_to_guest-asm_mov_to_reg,r20
47 shladd r17=r17,4,r20
48 cmp.gtu p6,p0=r16,r19
49 ;;
50 (p6) mov r19=r16
51 mov b0=r17
52 br.sptk.few b0
53 ;;
54 END(vmx_asm_mov_from_ar)
57 // mov r1=rr[r3]
58 GLOBAL_ENTRY(vmx_asm_mov_from_rr)
59 #ifndef ACCE_MOV_FROM_RR
60 br.many vmx_virtualization_fault_back
61 #endif
62 extr.u r16=r25,20,7
63 extr.u r17=r25,6,7
64 movl r20=asm_mov_from_reg
65 ;;
66 adds r30=vmx_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
67 shladd r16=r16,4,r20
68 mov r24=b0
69 ;;
70 add r27=VCPU_VRR0_OFS,r21
71 mov b0=r16
72 br.many b0
73 ;;
74 vmx_asm_mov_from_rr_back_1:
75 adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
76 adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
77 shr.u r26=r19,61
78 ;;
79 shladd r17=r17,4,r22
80 shladd r27=r26,3,r27
81 ;;
82 ld8 r19=[r27]
83 mov b0=r17
84 br.many b0
85 END(vmx_asm_mov_from_rr)
88 // mov rr[r3]=r2
89 GLOBAL_ENTRY(vmx_asm_mov_to_rr)
90 #ifndef ACCE_MOV_TO_RR
91 br.many vmx_virtualization_fault_back
92 #endif
93 extr.u r16=r25,20,7
94 extr.u r17=r25,13,7
95 movl r20=asm_mov_from_reg
96 ;;
97 adds r30=vmx_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
98 shladd r16=r16,4,r20
99 mov r22=b0
100 ;;
101 add r27=VCPU_VRR0_OFS,r21
102 mov b0=r16
103 br.many b0
104 ;;
105 vmx_asm_mov_to_rr_back_1:
106 adds r30=vmx_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
107 shr.u r23=r19,61
108 shladd r17=r17,4,r20
109 ;;
110 //if rr7, go back
111 cmp.eq p6,p0=7,r23
112 mov b0=r22
113 (p6) br.cond.dpnt.many vmx_virtualization_fault_back
114 ;;
115 mov r28=r19
116 mov b0=r17
117 br.many b0
118 vmx_asm_mov_to_rr_back_2:
119 adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
120 shladd r27=r23,3,r27
121 ;; // +starting_rid
122 st8 [r27]=r19
123 mov b0=r30
124 ;;
125 adds r16=IA64_VCPU_STARTING_RID_OFFSET,r21
126 ;;
127 ld4 r16=[r16]
128 ;;
129 shl r16=r16,8
130 ;;
131 add r19=r19,r16
132 ;; //mangling rid 1 and 3
133 extr.u r16=r19,8,8
134 extr.u r17=r19,24,8
135 extr.u r18=r19,2,6 // page size
136 ;;
137 dep r19=r16,r19,24,8
138 ;;
139 dep r19=r17,r19,8,8
140 ;; //set ve 1
141 dep r19=-1,r19,0,1
142 cmp.lt p6,p0=14,r18
143 ;;
144 (p6) mov r18=14
145 ;;
146 (p6) dep r19=r18,r19,2,6
147 ;;
148 cmp.eq p6,p0=0,r23
149 ;;
150 cmp.eq.or p6,p0=4,r23
151 ;;
152 adds r16=IA64_VCPU_MMU_MODE_OFFSET,r21
153 (p6) adds r17=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
154 ;;
155 ld1 r16=[r16]
156 cmp.eq p7,p0=r0,r0
157 (p6) shladd r17=r23,1,r17
158 ;;
159 (p6) st8 [r17]=r19
160 (p6) cmp.eq p7,p0=VMX_MMU_VIRTUAL,r16 // Set physical rr if in virt mode
161 ;;
162 (p7) mov rr[r28]=r19
163 mov r24=r22
164 br.many b0
165 END(vmx_asm_mov_to_rr)
168 //rsm
169 GLOBAL_ENTRY(vmx_asm_rsm)
170 #ifndef ACCE_RSM
171 br.many vmx_virtualization_fault_back
172 #endif
173 add r16=IA64_VPD_BASE_OFFSET,r21
174 extr.u r26=r25,6,21 // Imm21
175 extr.u r27=r25,31,2 // I2d
176 ;;
177 ld8 r16=[r16]
178 extr.u r28=r25,36,1 // I
179 dep r26=r27,r26,21,2
180 ;;
181 add r17=VPD_VPSR_START_OFFSET,r16
182 add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
183 //r26 is imm24
184 dep r26=r28,r26,23,1
185 ;;
186 ld8 r18=[r17]
187 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
188 ld1 r23=[r22]
189 sub r27=-1,r26 // ~r26
190 mov r24=b0
191 ;;
192 mov r20=cr.ipsr
193 or r28=r27,r28 // Keep IC,I,DT,SI
194 and r19=r18,r27 // Update vpsr
195 ;;
196 st8 [r17]=r19
197 and r20=r20,r28 // Update ipsr
198 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
199 ;;
200 ld8 r27=[r27]
201 ;;
202 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
203 ;;
204 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 // Keep dfh
205 ;;
206 mov cr.ipsr=r20
207 cmp.ne p6,p0=VMX_MMU_VIRTUAL,r23
208 ;;
209 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
210 (p6) br.dptk vmx_resume_to_guest // DT not cleared or already in phy mode
211 ;;
212 // Switch to meta physical mode D.
213 add r26=IA64_VCPU_META_RID_D_OFFSET,r21
214 mov r23=VMX_MMU_PHY_D
215 ;;
216 ld8 r26=[r26]
217 st1 [r22]=r23
218 dep.z r28=4,61,3
219 ;;
220 mov rr[r0]=r26
221 ;;
222 mov rr[r28]=r26
223 ;;
224 srlz.d
225 br.many vmx_resume_to_guest
226 END(vmx_asm_rsm)
229 //ssm
230 GLOBAL_ENTRY(vmx_asm_ssm)
231 #ifndef ACCE_SSM
232 br.many vmx_virtualization_fault_back
233 #endif
234 add r16=IA64_VPD_BASE_OFFSET,r21
235 extr.u r26=r25,6,21
236 extr.u r27=r25,31,2
237 ;;
238 ld8 r16=[r16]
239 extr.u r28=r25,36,1
240 dep r26=r27,r26,21,2
241 ;; //r26 is imm24
242 add r27=VPD_VPSR_START_OFFSET,r16
243 dep r26=r28,r26,23,1
244 ;; //r19 vpsr
245 ld8 r29=[r27]
246 mov r24=b0
247 ;;
248 add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
249 mov r20=cr.ipsr
250 or r19=r29,r26
251 ;;
252 ld1 r23=[r22] // mmu_mode
253 st8 [r27]=r19 // vpsr
254 or r20=r20,r26
255 ;;
256 mov cr.ipsr=r20
257 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
258 ;;
259 and r19=r28,r19
260 cmp.eq p6,p0=VMX_MMU_VIRTUAL,r23
261 ;;
262 cmp.ne.or p6,p0=r28,r19 // (vpsr & (it+dt+rt)) /= (it+dt+rt) ie stay in phy
263 (p6) br.dptk vmx_asm_ssm_1
264 ;;
265 add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
266 add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
267 mov r23=VMX_MMU_VIRTUAL
268 ;;
269 ld8 r26=[r26]
270 ld8 r27=[r27]
271 st1 [r22]=r23
272 dep.z r28=4,61,3
273 ;;
274 mov rr[r0]=r26
275 ;;
276 mov rr[r28]=r27
277 ;;
278 srlz.d
279 ;;
280 vmx_asm_ssm_1:
281 tbit.nz p6,p0=r29,IA64_PSR_I_BIT
282 ;;
283 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
284 (p6) br.dptk vmx_resume_to_guest
285 ;;
286 add r29=VPD_VTPR_START_OFFSET,r16
287 add r30=VPD_VHPI_START_OFFSET,r16
288 ;;
289 ld8 r29=[r29]
290 ld8 r30=[r30]
291 ;;
292 extr.u r17=r29,4,4
293 extr.u r18=r29,16,1
294 ;;
295 dep r17=r18,r17,4,1
296 ;;
297 cmp.gt p6,p0=r30,r17
298 (p6) br.dpnt.few vmx_asm_dispatch_vexirq
299 br.many vmx_resume_to_guest
300 END(vmx_asm_ssm)
303 //mov psr.l=r2
304 GLOBAL_ENTRY(vmx_asm_mov_to_psr)
305 #ifndef ACCE_MOV_TO_PSR
306 br.many vmx_virtualization_fault_back
307 #endif
308 add r16=IA64_VPD_BASE_OFFSET,r21
309 extr.u r26=r25,13,7 //r2
310 ;;
311 ld8 r16=[r16]
312 movl r20=asm_mov_from_reg
313 ;;
314 adds r30=vmx_asm_mov_to_psr_back-asm_mov_from_reg,r20
315 shladd r26=r26,4,r20
316 mov r24=b0
317 ;;
318 add r27=VPD_VPSR_START_OFFSET,r16
319 mov b0=r26
320 br.many b0
321 ;;
322 vmx_asm_mov_to_psr_back:
323 ld8 r17=[r27] // vpsr
324 add r22=IA64_VCPU_MMU_MODE_OFFSET,r21
325 dep r19=0,r19,32,32 // Clear bits 32-63
326 ;;
327 ld1 r23=[r22] // mmu_mode
328 dep r18=0,r17,0,32
329 ;;
330 or r30=r18,r19
331 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
332 ;;
333 st8 [r27]=r30 // set vpsr
334 and r27=r28,r30
335 and r29=r28,r17
336 ;;
337 cmp.eq p5,p0=r29,r27 // (old_vpsr & (dt+rt+it)) == (new_vpsr & (dt+rt+it))
338 cmp.eq p6,p7=r28,r27 // (new_vpsr & (dt+rt+it)) == (dt+rt+it)
339 (p5) br.many vmx_asm_mov_to_psr_1 // no change
340 ;;
341 //virtual to physical D
342 (p7) add r26=IA64_VCPU_META_RID_D_OFFSET,r21
343 (p7) add r27=IA64_VCPU_META_RID_D_OFFSET,r21
344 (p7) mov r23=VMX_MMU_PHY_D
345 ;;
346 //physical to virtual
347 (p6) add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
348 (p6) add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
349 (p6) mov r23=VMX_MMU_VIRTUAL
350 ;;
351 ld8 r26=[r26]
352 ld8 r27=[r27]
353 st1 [r22]=r23
354 dep.z r28=4,61,3
355 ;;
356 mov rr[r0]=r26
357 ;;
358 mov rr[r28]=r27
359 ;;
360 srlz.d
361 ;;
362 vmx_asm_mov_to_psr_1:
363 mov r20=cr.ipsr
364 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
365 ;;
366 or r19=r19,r28
367 dep r20=0,r20,0,32
368 ;;
369 add r20=r19,r20
370 mov b0=r24
371 ;;
372 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
373 ;;
374 ld8 r27=[r27]
375 ;;
376 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
377 ;;
378 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
379 ;;
380 mov cr.ipsr=r20
381 cmp.ne p6,p0=r0,r0
382 ;;
383 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
384 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
385 (p6) br.dpnt.few vmx_resume_to_guest
386 ;;
387 add r29=VPD_VTPR_START_OFFSET,r16
388 add r30=VPD_VHPI_START_OFFSET,r16
389 ;;
390 ld8 r29=[r29]
391 ld8 r30=[r30]
392 ;;
393 extr.u r17=r29,4,4
394 extr.u r18=r29,16,1
395 ;;
396 dep r17=r18,r17,4,1
397 ;;
398 cmp.gt p6,p0=r30,r17
399 (p6) br.dpnt.few vmx_asm_dispatch_vexirq
400 br.many vmx_resume_to_guest
401 END(vmx_asm_mov_to_psr)
404 ENTRY(vmx_asm_dispatch_vexirq)
405 //increment iip
406 mov r16=cr.ipsr
407 ;;
408 extr.u r17=r16,IA64_PSR_RI_BIT,2
409 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
410 ;;
411 (p6) mov r18=cr.iip
412 (p6) mov r17=r0
413 (p7) add r17=1,r17
414 ;;
415 (p6) add r18=0x10,r18
416 dep r16=r17,r16,IA64_PSR_RI_BIT,2
417 ;;
418 (p6) mov cr.iip=r18
419 mov cr.ipsr=r16
420 br.many vmx_dispatch_vexirq
421 END(vmx_asm_dispatch_vexirq)
423 // thash
424 // TODO: add support when pta.vf = 1
425 GLOBAL_ENTRY(vmx_asm_thash)
426 #ifndef ACCE_THASH
427 br.many vmx_virtualization_fault_back
428 #endif
429 extr.u r17=r25,20,7 // get r3 from opcode in r25
430 extr.u r18=r25,6,7 // get r1 from opcode in r25
431 movl r20=asm_mov_from_reg
432 ;;
433 adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20
434 shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
435 adds r16=IA64_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
436 ;;
437 mov r24=b0
438 ;;
439 ld8 r16=[r16] // get VPD addr
440 mov b0=r17
441 br.many b0 // r19 return value
442 ;;
443 vmx_asm_thash_back1:
444 shr.u r23=r19,61 // get RR number
445 adds r25=VCPU_VRR0_OFS,r21 // get vcpu->arch.arch_vmx.vrr[0]'s addr
446 adds r16=IA64_VPD_VPTA_OFFSET,r16 // get virtual pta
447 ;;
448 shladd r27=r23,3,r25 // get vcpu->arch.arch_vmx.vrr[r23]'s addr
449 ld8 r17=[r16] // get virtual PTA
450 mov r26=1
451 ;;
452 extr.u r29=r17,2,6 // get pta.size
453 ld8 r25=[r27] // get vcpu->arch.arch_vmx.vrr[r23]'s value
454 ;;
455 extr.u r25=r25,2,6 // get rr.ps
456 shl r22=r26,r29 // 1UL << pta.size
457 ;;
458 shr.u r23=r19,r25 // vaddr >> rr.ps
459 adds r26=3,r29 // pta.size + 3
460 shl r27=r17,3 // pta << 3
461 ;;
462 shl r23=r23,3 // (vaddr >> rr.ps) << 3
463 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
464 movl r16=VRN_MASK
465 ;;
466 adds r22=-1,r22 // (1UL << pta.size) - 1
467 shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
468 and r19=r19,r16 // vaddr & VRN_MASK
469 ;;
470 and r22=r22,r23 // vhpt_offset
471 or r19=r19,r27 // (vadr&VRN_MASK) |(((pta<<3)>>(pta.size + 3))<<pta.size)
472 adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
473 ;;
474 or r19=r19,r22 // calc pval
475 shladd r17=r18,4,r26
476 adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
477 ;;
478 mov b0=r17
479 br.many b0
480 END(vmx_asm_thash)
482 #define MOV_TO_REG0 \
483 {; \
484 nop.b 0x0; \
485 nop.b 0x0; \
486 nop.b 0x0; \
487 ;; \
488 };
491 #define MOV_TO_REG(n) \
492 {; \
493 mov r##n##=r19; \
494 mov b0=r30; \
495 br.sptk.many b0; \
496 ;; \
497 };
500 #define MOV_FROM_REG(n) \
501 {; \
502 mov r19=r##n##; \
503 mov b0=r30; \
504 br.sptk.many b0; \
505 ;; \
506 };
509 #define MOV_TO_BANK0_REG(n) \
510 ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
511 {; \
512 mov r26=r2; \
513 mov r2=r19; \
514 bsw.1; \
515 ;; \
516 }; \
517 {; \
518 mov r##n##=r2; \
519 nop.b 0x0; \
520 bsw.0; \
521 ;; \
522 }; \
523 {; \
524 mov r2=r26; \
525 mov b0=r30; \
526 br.sptk.many b0; \
527 ;; \
528 }; \
529 END(asm_mov_to_bank0_reg##n##)
532 #define MOV_FROM_BANK0_REG(n) \
533 ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
534 {; \
535 mov r26=r2; \
536 nop.b 0x0; \
537 bsw.1; \
538 ;; \
539 }; \
540 {; \
541 mov r2=r##n##; \
542 nop.b 0x0; \
543 bsw.0; \
544 ;; \
545 }; \
546 {; \
547 mov r19=r2; \
548 mov r2=r26; \
549 mov b0=r30; \
550 }; \
551 {; \
552 nop.b 0x0; \
553 nop.b 0x0; \
554 br.sptk.many b0; \
555 ;; \
556 }; \
557 END(asm_mov_from_bank0_reg##n##)
560 #define JMP_TO_MOV_TO_BANK0_REG(n) \
561 {; \
562 nop.b 0x0; \
563 nop.b 0x0; \
564 br.sptk.many asm_mov_to_bank0_reg##n##; \
565 ;; \
566 }
569 #define JMP_TO_MOV_FROM_BANK0_REG(n) \
570 {; \
571 nop.b 0x0; \
572 nop.b 0x0; \
573 br.sptk.many asm_mov_from_bank0_reg##n##; \
574 ;; \
575 }
578 MOV_FROM_BANK0_REG(16)
579 MOV_FROM_BANK0_REG(17)
580 MOV_FROM_BANK0_REG(18)
581 MOV_FROM_BANK0_REG(19)
582 MOV_FROM_BANK0_REG(20)
583 MOV_FROM_BANK0_REG(21)
584 MOV_FROM_BANK0_REG(22)
585 MOV_FROM_BANK0_REG(23)
586 MOV_FROM_BANK0_REG(24)
587 MOV_FROM_BANK0_REG(25)
588 MOV_FROM_BANK0_REG(26)
589 MOV_FROM_BANK0_REG(27)
590 MOV_FROM_BANK0_REG(28)
591 MOV_FROM_BANK0_REG(29)
592 MOV_FROM_BANK0_REG(30)
593 MOV_FROM_BANK0_REG(31)
596 // mov from reg table
597 // r19: value, r30: return address
598 ENTRY(asm_mov_from_reg)
599 MOV_FROM_REG(0)
600 MOV_FROM_REG(1)
601 MOV_FROM_REG(2)
602 MOV_FROM_REG(3)
603 MOV_FROM_REG(4)
604 MOV_FROM_REG(5)
605 MOV_FROM_REG(6)
606 MOV_FROM_REG(7)
607 MOV_FROM_REG(8)
608 MOV_FROM_REG(9)
609 MOV_FROM_REG(10)
610 MOV_FROM_REG(11)
611 MOV_FROM_REG(12)
612 MOV_FROM_REG(13)
613 MOV_FROM_REG(14)
614 MOV_FROM_REG(15)
615 JMP_TO_MOV_FROM_BANK0_REG(16)
616 JMP_TO_MOV_FROM_BANK0_REG(17)
617 JMP_TO_MOV_FROM_BANK0_REG(18)
618 JMP_TO_MOV_FROM_BANK0_REG(19)
619 JMP_TO_MOV_FROM_BANK0_REG(20)
620 JMP_TO_MOV_FROM_BANK0_REG(21)
621 JMP_TO_MOV_FROM_BANK0_REG(22)
622 JMP_TO_MOV_FROM_BANK0_REG(23)
623 JMP_TO_MOV_FROM_BANK0_REG(24)
624 JMP_TO_MOV_FROM_BANK0_REG(25)
625 JMP_TO_MOV_FROM_BANK0_REG(26)
626 JMP_TO_MOV_FROM_BANK0_REG(27)
627 JMP_TO_MOV_FROM_BANK0_REG(28)
628 JMP_TO_MOV_FROM_BANK0_REG(29)
629 JMP_TO_MOV_FROM_BANK0_REG(30)
630 JMP_TO_MOV_FROM_BANK0_REG(31)
631 MOV_FROM_REG(32)
632 MOV_FROM_REG(33)
633 MOV_FROM_REG(34)
634 MOV_FROM_REG(35)
635 MOV_FROM_REG(36)
636 MOV_FROM_REG(37)
637 MOV_FROM_REG(38)
638 MOV_FROM_REG(39)
639 MOV_FROM_REG(40)
640 MOV_FROM_REG(41)
641 MOV_FROM_REG(42)
642 MOV_FROM_REG(43)
643 MOV_FROM_REG(44)
644 MOV_FROM_REG(45)
645 MOV_FROM_REG(46)
646 MOV_FROM_REG(47)
647 MOV_FROM_REG(48)
648 MOV_FROM_REG(49)
649 MOV_FROM_REG(50)
650 MOV_FROM_REG(51)
651 MOV_FROM_REG(52)
652 MOV_FROM_REG(53)
653 MOV_FROM_REG(54)
654 MOV_FROM_REG(55)
655 MOV_FROM_REG(56)
656 MOV_FROM_REG(57)
657 MOV_FROM_REG(58)
658 MOV_FROM_REG(59)
659 MOV_FROM_REG(60)
660 MOV_FROM_REG(61)
661 MOV_FROM_REG(62)
662 MOV_FROM_REG(63)
663 MOV_FROM_REG(64)
664 MOV_FROM_REG(65)
665 MOV_FROM_REG(66)
666 MOV_FROM_REG(67)
667 MOV_FROM_REG(68)
668 MOV_FROM_REG(69)
669 MOV_FROM_REG(70)
670 MOV_FROM_REG(71)
671 MOV_FROM_REG(72)
672 MOV_FROM_REG(73)
673 MOV_FROM_REG(74)
674 MOV_FROM_REG(75)
675 MOV_FROM_REG(76)
676 MOV_FROM_REG(77)
677 MOV_FROM_REG(78)
678 MOV_FROM_REG(79)
679 MOV_FROM_REG(80)
680 MOV_FROM_REG(81)
681 MOV_FROM_REG(82)
682 MOV_FROM_REG(83)
683 MOV_FROM_REG(84)
684 MOV_FROM_REG(85)
685 MOV_FROM_REG(86)
686 MOV_FROM_REG(87)
687 MOV_FROM_REG(88)
688 MOV_FROM_REG(89)
689 MOV_FROM_REG(90)
690 MOV_FROM_REG(91)
691 MOV_FROM_REG(92)
692 MOV_FROM_REG(93)
693 MOV_FROM_REG(94)
694 MOV_FROM_REG(95)
695 MOV_FROM_REG(96)
696 MOV_FROM_REG(97)
697 MOV_FROM_REG(98)
698 MOV_FROM_REG(99)
699 MOV_FROM_REG(100)
700 MOV_FROM_REG(101)
701 MOV_FROM_REG(102)
702 MOV_FROM_REG(103)
703 MOV_FROM_REG(104)
704 MOV_FROM_REG(105)
705 MOV_FROM_REG(106)
706 MOV_FROM_REG(107)
707 MOV_FROM_REG(108)
708 MOV_FROM_REG(109)
709 MOV_FROM_REG(110)
710 MOV_FROM_REG(111)
711 MOV_FROM_REG(112)
712 MOV_FROM_REG(113)
713 MOV_FROM_REG(114)
714 MOV_FROM_REG(115)
715 MOV_FROM_REG(116)
716 MOV_FROM_REG(117)
717 MOV_FROM_REG(118)
718 MOV_FROM_REG(119)
719 MOV_FROM_REG(120)
720 MOV_FROM_REG(121)
721 MOV_FROM_REG(122)
722 MOV_FROM_REG(123)
723 MOV_FROM_REG(124)
724 MOV_FROM_REG(125)
725 MOV_FROM_REG(126)
726 MOV_FROM_REG(127)
727 END(asm_mov_from_reg)
730 /* must be in bank 0
731 * parameter:
732 * r31: pr
733 * r24: b0
734 */
735 ENTRY(vmx_resume_to_guest)
736 mov r16=cr.ipsr
737 movl r20=__vsa_base
738 ;;
739 ld8 r20=[r20]
740 adds r19=IA64_VPD_BASE_OFFSET,r21
741 ;;
742 ld8 r25=[r19]
743 extr.u r17=r16,IA64_PSR_RI_BIT,2
744 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
745 ;;
746 (p6) mov r18=cr.iip
747 (p6) mov r17=r0
748 ;;
749 (p6) add r18=0x10,r18
750 (p7) add r17=1,r17
751 ;;
752 (p6) mov cr.iip=r18
753 dep r16=r17,r16,IA64_PSR_RI_BIT,2
754 ;;
755 mov cr.ipsr=r16
756 adds r19= VPD_VPSR_START_OFFSET,r25
757 add r28=PAL_VPS_RESUME_NORMAL,r20
758 add r29=PAL_VPS_RESUME_HANDLER,r20
759 ;;
760 ld8 r19=[r19]
761 mov b0=r29
762 cmp.ne p6,p7 = r0,r0
763 ;;
764 tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
765 ;;
766 (p6) ld8 r26=[r25]
767 (p7) mov b0=r28
768 mov pr=r31,-2
769 br.sptk.many b0 // call pal service
770 ;;
771 END(vmx_resume_to_guest)
774 MOV_TO_BANK0_REG(16)
775 MOV_TO_BANK0_REG(17)
776 MOV_TO_BANK0_REG(18)
777 MOV_TO_BANK0_REG(19)
778 MOV_TO_BANK0_REG(20)
779 MOV_TO_BANK0_REG(21)
780 MOV_TO_BANK0_REG(22)
781 MOV_TO_BANK0_REG(23)
782 MOV_TO_BANK0_REG(24)
783 MOV_TO_BANK0_REG(25)
784 MOV_TO_BANK0_REG(26)
785 MOV_TO_BANK0_REG(27)
786 MOV_TO_BANK0_REG(28)
787 MOV_TO_BANK0_REG(29)
788 MOV_TO_BANK0_REG(30)
789 MOV_TO_BANK0_REG(31)
792 // mov to reg table
793 // r19: value, r30: return address
794 ENTRY(asm_mov_to_reg)
795 MOV_TO_REG0
796 MOV_TO_REG(1)
797 MOV_TO_REG(2)
798 MOV_TO_REG(3)
799 MOV_TO_REG(4)
800 MOV_TO_REG(5)
801 MOV_TO_REG(6)
802 MOV_TO_REG(7)
803 MOV_TO_REG(8)
804 MOV_TO_REG(9)
805 MOV_TO_REG(10)
806 MOV_TO_REG(11)
807 MOV_TO_REG(12)
808 MOV_TO_REG(13)
809 MOV_TO_REG(14)
810 MOV_TO_REG(15)
811 JMP_TO_MOV_TO_BANK0_REG(16)
812 JMP_TO_MOV_TO_BANK0_REG(17)
813 JMP_TO_MOV_TO_BANK0_REG(18)
814 JMP_TO_MOV_TO_BANK0_REG(19)
815 JMP_TO_MOV_TO_BANK0_REG(20)
816 JMP_TO_MOV_TO_BANK0_REG(21)
817 JMP_TO_MOV_TO_BANK0_REG(22)
818 JMP_TO_MOV_TO_BANK0_REG(23)
819 JMP_TO_MOV_TO_BANK0_REG(24)
820 JMP_TO_MOV_TO_BANK0_REG(25)
821 JMP_TO_MOV_TO_BANK0_REG(26)
822 JMP_TO_MOV_TO_BANK0_REG(27)
823 JMP_TO_MOV_TO_BANK0_REG(28)
824 JMP_TO_MOV_TO_BANK0_REG(29)
825 JMP_TO_MOV_TO_BANK0_REG(30)
826 JMP_TO_MOV_TO_BANK0_REG(31)
827 MOV_TO_REG(32)
828 MOV_TO_REG(33)
829 MOV_TO_REG(34)
830 MOV_TO_REG(35)
831 MOV_TO_REG(36)
832 MOV_TO_REG(37)
833 MOV_TO_REG(38)
834 MOV_TO_REG(39)
835 MOV_TO_REG(40)
836 MOV_TO_REG(41)
837 MOV_TO_REG(42)
838 MOV_TO_REG(43)
839 MOV_TO_REG(44)
840 MOV_TO_REG(45)
841 MOV_TO_REG(46)
842 MOV_TO_REG(47)
843 MOV_TO_REG(48)
844 MOV_TO_REG(49)
845 MOV_TO_REG(50)
846 MOV_TO_REG(51)
847 MOV_TO_REG(52)
848 MOV_TO_REG(53)
849 MOV_TO_REG(54)
850 MOV_TO_REG(55)
851 MOV_TO_REG(56)
852 MOV_TO_REG(57)
853 MOV_TO_REG(58)
854 MOV_TO_REG(59)
855 MOV_TO_REG(60)
856 MOV_TO_REG(61)
857 MOV_TO_REG(62)
858 MOV_TO_REG(63)
859 MOV_TO_REG(64)
860 MOV_TO_REG(65)
861 MOV_TO_REG(66)
862 MOV_TO_REG(67)
863 MOV_TO_REG(68)
864 MOV_TO_REG(69)
865 MOV_TO_REG(70)
866 MOV_TO_REG(71)
867 MOV_TO_REG(72)
868 MOV_TO_REG(73)
869 MOV_TO_REG(74)
870 MOV_TO_REG(75)
871 MOV_TO_REG(76)
872 MOV_TO_REG(77)
873 MOV_TO_REG(78)
874 MOV_TO_REG(79)
875 MOV_TO_REG(80)
876 MOV_TO_REG(81)
877 MOV_TO_REG(82)
878 MOV_TO_REG(83)
879 MOV_TO_REG(84)
880 MOV_TO_REG(85)
881 MOV_TO_REG(86)
882 MOV_TO_REG(87)
883 MOV_TO_REG(88)
884 MOV_TO_REG(89)
885 MOV_TO_REG(90)
886 MOV_TO_REG(91)
887 MOV_TO_REG(92)
888 MOV_TO_REG(93)
889 MOV_TO_REG(94)
890 MOV_TO_REG(95)
891 MOV_TO_REG(96)
892 MOV_TO_REG(97)
893 MOV_TO_REG(98)
894 MOV_TO_REG(99)
895 MOV_TO_REG(100)
896 MOV_TO_REG(101)
897 MOV_TO_REG(102)
898 MOV_TO_REG(103)
899 MOV_TO_REG(104)
900 MOV_TO_REG(105)
901 MOV_TO_REG(106)
902 MOV_TO_REG(107)
903 MOV_TO_REG(108)
904 MOV_TO_REG(109)
905 MOV_TO_REG(110)
906 MOV_TO_REG(111)
907 MOV_TO_REG(112)
908 MOV_TO_REG(113)
909 MOV_TO_REG(114)
910 MOV_TO_REG(115)
911 MOV_TO_REG(116)
912 MOV_TO_REG(117)
913 MOV_TO_REG(118)
914 MOV_TO_REG(119)
915 MOV_TO_REG(120)
916 MOV_TO_REG(121)
917 MOV_TO_REG(122)
918 MOV_TO_REG(123)
919 MOV_TO_REG(124)
920 MOV_TO_REG(125)
921 MOV_TO_REG(126)
922 MOV_TO_REG(127)
923 END(asm_mov_to_reg)