ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_entry.S @ 10520:c4b68afe97d3

[IA64] make PAL_VPS_RESUME_HANDLER comply with VTI-spec

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Jun 28 07:51:52 2006 -0600 (2006-06-28)
parents bf396988059e
children 26dae1c72cd9
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_entry.S:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
21 */
23 #ifndef VCPU_TLB_SHIFT
24 #define VCPU_TLB_SHIFT 22
25 #endif
26 #include <linux/config.h>
27 #include <asm/asmmacro.h>
28 #include <asm/cache.h>
29 #include <asm/kregs.h>
30 #include <asm/offsets.h>
31 #include <asm/pgtable.h>
32 #include <asm/percpu.h>
33 #include <asm/processor.h>
34 #include <asm/thread_info.h>
35 #include <asm/unistd.h>
36 #include <asm/vhpt.h>
37 #include <asm/vmmu.h>
38 #include "vmx_minstate.h"
40 GLOBAL_ENTRY(ia64_leave_nested)
41 rsm psr.i
42 ;;
43 adds r21=PT(PR)+16,r12
44 ;;
45 lfetch [r21],PT(CR_IPSR)-PT(PR)
46 adds r2=PT(B6)+16,r12
47 adds r3=PT(R16)+16,r12
48 ;;
49 lfetch [r21]
50 ld8 r28=[r2],8 // load b6
51 adds r29=PT(R24)+16,r12
53 ld8.fill r16=[r3]
54 adds r3=PT(AR_CSD)-PT(R16),r3
55 adds r30=PT(AR_CCV)+16,r12
56 ;;
57 ld8.fill r24=[r29]
58 ld8 r15=[r30] // load ar.ccv
59 ;;
60 ld8 r29=[r2],16 // load b7
61 ld8 r30=[r3],16 // load ar.csd
62 ;;
63 ld8 r31=[r2],16 // load ar.ssd
64 ld8.fill r8=[r3],16
65 ;;
66 ld8.fill r9=[r2],16
67 ld8.fill r10=[r3],PT(R17)-PT(R10)
68 ;;
69 ld8.fill r11=[r2],PT(R18)-PT(R11)
70 ld8.fill r17=[r3],16
71 ;;
72 ld8.fill r18=[r2],16
73 ld8.fill r19=[r3],16
74 ;;
75 ld8.fill r20=[r2],16
76 ld8.fill r21=[r3],16
77 mov ar.csd=r30
78 mov ar.ssd=r31
79 ;;
80 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
81 invala // invalidate ALAT
82 ;;
83 ld8.fill r22=[r2],24
84 ld8.fill r23=[r3],24
85 mov b6=r28
86 ;;
87 ld8.fill r25=[r2],16
88 ld8.fill r26=[r3],16
89 mov b7=r29
90 ;;
91 ld8.fill r27=[r2],16
92 ld8.fill r28=[r3],16
93 ;;
94 ld8.fill r29=[r2],16
95 ld8.fill r30=[r3],24
96 ;;
97 ld8.fill r31=[r2],PT(F9)-PT(R31)
98 adds r3=PT(F10)-PT(F6),r3
99 ;;
100 ldf.fill f9=[r2],PT(F6)-PT(F9)
101 ldf.fill f10=[r3],PT(F8)-PT(F10)
102 ;;
103 ldf.fill f6=[r2],PT(F7)-PT(F6)
104 ;;
105 ldf.fill f7=[r2],PT(F11)-PT(F7)
106 ldf.fill f8=[r3],32
107 ;;
108 srlz.i // ensure interruption collection is off
109 mov ar.ccv=r15
110 ;;
111 bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
112 ;;
113 ldf.fill f11=[r2]
114 // mov r18=r13
115 // mov r21=r13
116 adds r16=PT(CR_IPSR)+16,r12
117 adds r17=PT(CR_IIP)+16,r12
118 ;;
119 ld8 r29=[r16],16 // load cr.ipsr
120 ld8 r28=[r17],16 // load cr.iip
121 ;;
122 ld8 r30=[r16],16 // load cr.ifs
123 ld8 r25=[r17],16 // load ar.unat
124 ;;
125 ld8 r26=[r16],16 // load ar.pfs
126 ld8 r27=[r17],16 // load ar.rsc
127 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
128 ;;
129 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
130 ld8 r23=[r17],16// load ar.bspstore (may be garbage)
131 ;;
132 ld8 r31=[r16],16 // load predicates
133 ld8 r22=[r17],16 // load b0
134 ;;
135 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
136 ld8.fill r1=[r17],16 // load r1
137 ;;
138 ld8.fill r12=[r16],16
139 ld8.fill r13=[r17],16
140 ;;
141 ld8 r20=[r16],16 // ar.fpsr
142 ld8.fill r15=[r17],16
143 ;;
144 ld8.fill r14=[r16],16
145 ld8.fill r2=[r17]
146 ;;
147 ld8.fill r3=[r16]
148 ;;
149 mov r16=ar.bsp // get existing backing store pointer
150 ;;
151 mov b0=r22
152 mov ar.pfs=r26
153 mov cr.ifs=r30
154 mov cr.ipsr=r29
155 mov ar.fpsr=r20
156 mov cr.iip=r28
157 ;;
158 mov ar.rsc=r27
159 mov ar.unat=r25
160 mov pr=r31,-1
161 rfi
162 END(ia64_leave_nested)
166 GLOBAL_ENTRY(ia64_leave_hypervisor)
167 PT_REGS_UNWIND_INFO(0)
168 /*
169 * work.need_resched etc. mustn't get changed by this CPU before it returns to
170 ;;
171 * user- or fsys-mode, hence we disable interrupts early on:
172 */
173 rsm psr.i
174 ;;
175 alloc loc0=ar.pfs,0,1,1,0
176 adds out0=16,r12
177 adds r7 = PT(EML_UNAT)+16,r12
178 ;;
179 ld8 r7 = [r7]
180 br.call.sptk.many b0=leave_hypervisor_tail
181 ;;
182 mov ar.pfs=loc0
183 mov ar.unat=r7
184 adds r20=PT(PR)+16,r12
185 ;;
186 lfetch [r20],PT(CR_IPSR)-PT(PR)
187 adds r2 = PT(B6)+16,r12
188 adds r3 = PT(B7)+16,r12
189 ;;
190 lfetch [r20]
191 ;;
192 ld8 r24=[r2],16 /* B6 */
193 ld8 r25=[r3],16 /* B7 */
194 ;;
195 ld8 r26=[r2],16 /* ar_csd */
196 ld8 r27=[r3],16 /* ar_ssd */
197 mov b6 = r24
198 ;;
199 ld8.fill r8=[r2],16
200 ld8.fill r9=[r3],16
201 mov b7 = r25
202 ;;
203 mov ar.csd = r26
204 mov ar.ssd = r27
205 ;;
206 ld8.fill r10=[r2],PT(R15)-PT(R10)
207 ld8.fill r11=[r3],PT(R14)-PT(R11)
208 ;;
209 ld8.fill r15=[r2],PT(R16)-PT(R15)
210 ld8.fill r14=[r3],PT(R17)-PT(R14)
211 ;;
212 ld8.fill r16=[r2],16
213 ld8.fill r17=[r3],16
214 ;;
215 ld8.fill r18=[r2],16
216 ld8.fill r19=[r3],16
217 ;;
218 ld8.fill r20=[r2],16
219 ld8.fill r21=[r3],16
220 ;;
221 ld8.fill r22=[r2],16
222 ld8.fill r23=[r3],16
223 ;;
224 ld8.fill r24=[r2],16
225 ld8.fill r25=[r3],16
226 ;;
227 ld8.fill r26=[r2],16
228 ld8.fill r27=[r3],16
229 ;;
230 ld8.fill r28=[r2],16
231 ld8.fill r29=[r3],16
232 ;;
233 ld8.fill r30=[r2],PT(F6)-PT(R30)
234 ld8.fill r31=[r3],PT(F7)-PT(R31)
235 ;;
236 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
237 invala // invalidate ALAT
238 ;;
239 ldf.fill f6=[r2],32
240 ldf.fill f7=[r3],32
241 ;;
242 ldf.fill f8=[r2],32
243 ldf.fill f9=[r3],32
244 ;;
245 ldf.fill f10=[r2],32
246 ldf.fill f11=[r3],24
247 ;;
248 ld8.fill r4=[r2],16 //load r4
249 ld8.fill r5=[r3],16 //load r5
250 ;;
251 ld8.fill r6=[r2] //load r6
252 ld8.fill r7=[r3] //load r7
253 ;;
254 srlz.i // ensure interruption collection is off
255 ;;
256 bsw.0
257 ;;
258 adds r16 = PT(CR_IPSR)+16,r12
259 adds r17 = PT(CR_IIP)+16,r12
260 mov r21=r13 // get current
261 ;;
262 ld8 r31=[r16],16 // load cr.ipsr
263 ld8 r30=[r17],16 // load cr.iip
264 ;;
265 ld8 r29=[r16],16 // load cr.ifs
266 ld8 r28=[r17],16 // load ar.unat
267 ;;
268 ld8 r27=[r16],16 // load ar.pfs
269 ld8 r26=[r17],16 // load ar.rsc
270 ;;
271 ld8 r25=[r16],16 // load ar.rnat
272 ld8 r24=[r17],16 // load ar.bspstore
273 ;;
274 ld8 r23=[r16],16 // load predicates
275 ld8 r22=[r17],16 // load b0
276 ;;
277 ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
278 ld8.fill r1=[r17],16 //load r1
279 ;;
280 ld8.fill r12=[r16],16 //load r12
281 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
282 ;;
283 ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
284 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
285 ;;
286 ld8.fill r3=[r16] //load r3
287 ld8 r18=[r17],PT(RFI_PFS)-PT(AR_CCV) //load ar_ccv
288 ;;
289 mov ar.fpsr=r19
290 mov ar.ccv=r18
291 ;;
292 //rbs_switch
294 shr.u r18=r20,16
295 ;;
296 movl r19= THIS_CPU(ia64_phys_stacked_size_p8)
297 ;;
298 ld4 r19=[r19]
300 vmx_dont_preserve_current_frame:
301 /*
302 * To prevent leaking bits between the hypervisor and guest domain,
303 * we must clear the stacked registers in the "invalid" partition here.
304 * 5 registers/cycle on McKinley).
305 */
306 # define pRecurse p6
307 # define pReturn p7
308 # define Nregs 14
310 alloc loc0=ar.pfs,2,Nregs-2,2,0
311 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
312 sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
313 ;;
314 mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
315 shladd in0=loc1,3,r19
316 mov in1=0
317 ;;
318 TEXT_ALIGN(32)
319 vmx_rse_clear_invalid:
320 alloc loc0=ar.pfs,2,Nregs-2,2,0
321 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
322 add out0=-Nregs*8,in0
323 add out1=1,in1 // increment recursion count
324 mov loc1=0
325 mov loc2=0
326 ;;
327 mov loc3=0
328 mov loc4=0
329 mov loc5=0
330 mov loc6=0
331 mov loc7=0
332 (pRecurse) br.call.dptk.few b0=vmx_rse_clear_invalid
333 ;;
334 mov loc8=0
335 mov loc9=0
336 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
337 mov loc10=0
338 mov loc11=0
339 (pReturn) br.ret.dptk.many b0
341 # undef pRecurse
342 # undef pReturn
344 // loadrs has already been shifted
345 alloc r16=ar.pfs,0,0,0,0 // drop current register frame
346 ;;
347 loadrs
348 ;;
349 mov ar.bspstore=r24
350 ;;
351 ld8 r24=[r17] //load rfi_pfs
352 mov ar.unat=r28
353 mov ar.rnat=r25
354 mov ar.rsc=r26
355 ;;
356 mov cr.ipsr=r31
357 mov cr.iip=r30
358 mov cr.ifs=r29
359 cmp.ne p6,p0=r24,r0
360 (p6)br.sptk vmx_dorfirfi
361 ;;
362 vmx_dorfirfi_back:
363 mov ar.pfs=r27
364 adds r18=IA64_VPD_BASE_OFFSET,r21
365 ;;
366 ld8 r18=[r18] //vpd
367 adds r17=IA64_VCPU_ISR_OFFSET,r21
368 ;;
369 ld8 r17=[r17]
370 adds r19=VPD(VPSR),r18
371 ;;
372 ld8 r19=[r19] //vpsr
373 //vsa_sync_write_start
374 movl r20=__vsa_base
375 ;;
376 ld8 r20=[r20] // read entry point
377 mov r25=r18
378 ;;
379 add r16=PAL_VPS_SYNC_WRITE,r20
380 movl r24=switch_rr7 // calculate return address
381 ;;
382 mov b0=r16
383 br.cond.sptk b0 // call the service
384 ;;
385 END(ia64_leave_hypervisor)
386 switch_rr7:
387 // fall through
388 GLOBAL_ENTRY(ia64_vmm_entry)
389 /*
390 * must be at bank 0
391 * parameter:
392 * r17:cr.isr
393 * r18:vpd
394 * r19:vpsr
395 * r20:__vsa_base
396 * r22:b0
397 * r23:predicate
398 */
399 mov r24=r22
400 mov r25=r18
401 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
402 ;;
403 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
404 (p1) br.sptk.many ia64_vmm_entry_out
405 ;;
406 tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir
407 ;;
408 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
409 (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
410 (p2) ld8 r26=[r25]
411 ;;
412 ia64_vmm_entry_out:
413 mov pr=r23,-2
414 mov b0=r29
415 ;;
416 br.cond.sptk b0 // call pal service
417 END(ia64_vmm_entry)
419 //r24 rfi_pfs
420 //r17 address of rfi_pfs
421 GLOBAL_ENTRY(vmx_dorfirfi)
422 mov r16=ar.ec
423 movl r20 = vmx_dorfirfi_back
424 ;;
425 // clean rfi_pfs
426 st8 [r17]=r0
427 mov b0=r20
428 // pfs.pec=ar.ec
429 dep r24 = r16, r24, 52, 6
430 ;;
431 mov ar.pfs=r24
432 ;;
433 br.ret.sptk b0
434 ;;
435 END(vmx_dorfirfi)
437 #ifdef XEN_DBL_MAPPING /* will be removed */
439 #define VMX_PURGE_RR7 0
440 #define VMX_INSERT_RR7 1
441 /*
442 * in0: old rr7
443 * in1: virtual address of xen image
444 * in2: virtual address of vhpt table
445 */
446 GLOBAL_ENTRY(vmx_purge_double_mapping)
447 alloc loc1 = ar.pfs,5,9,0,0
448 mov loc0 = rp
449 movl r8 = 1f
450 ;;
451 movl loc4 = KERNEL_TR_PAGE_SHIFT
452 movl loc5 = VCPU_TLB_SHIFT
453 mov loc6 = psr
454 movl loc7 = XEN_RR7_SWITCH_STUB
455 mov loc8 = (1<<VMX_PURGE_RR7)
456 ;;
457 srlz.i
458 ;;
459 rsm psr.i | psr.ic
460 ;;
461 srlz.i
462 ;;
463 mov ar.rsc = 0
464 mov b6 = loc7
465 mov rp = r8
466 ;;
467 br.sptk b6
468 1:
469 mov ar.rsc = 3
470 mov rp = loc0
471 ;;
472 mov psr.l = loc6
473 ;;
474 srlz.i
475 ;;
476 br.ret.sptk rp
477 END(vmx_purge_double_mapping)
479 /*
480 * in0: new rr7
481 * in1: virtual address of xen image
482 * in2: virtual address of vhpt table
483 * in3: pte entry of xen image
484 * in4: pte entry of vhpt table
485 */
486 GLOBAL_ENTRY(vmx_insert_double_mapping)
487 alloc loc1 = ar.pfs,5,9,0,0
488 mov loc0 = rp
489 movl loc2 = IA64_TR_XEN_IN_DOM // TR number for xen image
490 ;;
491 movl loc3 = IA64_TR_VHPT_IN_DOM // TR number for vhpt table
492 movl r8 = 1f
493 movl loc4 = KERNEL_TR_PAGE_SHIFT
494 ;;
495 movl loc5 = VCPU_TLB_SHIFT
496 mov loc6 = psr
497 movl loc7 = XEN_RR7_SWITCH_STUB
498 ;;
499 srlz.i
500 ;;
501 rsm psr.i | psr.ic
502 mov loc8 = (1<<VMX_INSERT_RR7)
503 ;;
504 srlz.i
505 ;;
506 mov ar.rsc = 0
507 mov b6 = loc7
508 mov rp = r8
509 ;;
510 br.sptk b6
511 1:
512 mov ar.rsc = 3
513 mov rp = loc0
514 ;;
515 mov psr.l = loc6
516 ;;
517 srlz.i
518 ;;
519 br.ret.sptk rp
520 END(vmx_insert_double_mapping)
522 .align PAGE_SIZE
523 /*
524 * Stub to add double mapping for new domain, which shouldn't
525 * access any memory when active. Before reaching this point,
526 * both psr.i/ic is cleared and rse is set in lazy mode.
527 *
528 * in0: new rr7
529 * in1: virtual address of xen image
530 * in2: virtual address of vhpt table
531 * in3: pte entry of xen image
532 * in4: pte entry of vhpt table
533 * loc2: TR number for xen image
534 * loc3: TR number for vhpt table
535 * loc4: page size for xen image
536 * loc5: page size of vhpt table
537 * loc7: free to use
538 * loc8: purge or insert
539 * r8: will contain old rid value
540 */
541 GLOBAL_ENTRY(vmx_switch_rr7)
542 movl loc7 = (7<<61)
543 dep.z loc4 = loc4, 2, 6
544 dep.z loc5 = loc5, 2, 6
545 ;;
546 tbit.nz p6,p7=loc8, VMX_INSERT_RR7
547 mov r8 = rr[loc7]
548 ;;
549 mov rr[loc7] = in0
550 (p6)mov cr.ifa = in1
551 (p6)mov cr.itir = loc4
552 ;;
553 srlz.i
554 ;;
555 (p6)itr.i itr[loc2] = in3
556 (p7)ptr.i in1, loc4
557 ;;
558 (p6)itr.d dtr[loc2] = in3
559 (p7)ptr.d in1, loc4
560 ;;
561 srlz.i
562 ;;
563 (p6)mov cr.ifa = in2
564 (p6)mov cr.itir = loc5
565 ;;
566 (p6)itr.d dtr[loc3] = in4
567 (p7)ptr.d in2, loc5
568 ;;
569 srlz.i
570 ;;
571 mov rr[loc7] = r8
572 ;;
573 srlz.i
574 br.sptk rp
575 END(vmx_switch_rr7)
576 .align PAGE_SIZE
578 #else
579 /*
580 * in0: new rr7
581 * in1: virtual address of shared_info
582 * in2: virtual address of shared_arch_info (VPD)
583 * in3: virtual address of guest_vhpt
584 * in4: virtual address of pal code segment
585 * r8: will contain old rid value
586 */
589 #define PSR_BITS_TO_CLEAR \
590 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |IA64_PSR_RT | \
591 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
592 IA64_PSR_DFL | IA64_PSR_DFH)
593 #define PSR_BITS_TO_SET IA64_PSR_BN
595 //extern void vmx_switch_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, void *guest_vhpt, void * pal_vaddr );
597 GLOBAL_ENTRY(vmx_switch_rr7)
598 // not sure this unwind statement is correct...
599 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
600 alloc loc1 = ar.pfs, 5, 9, 0, 0
601 1: {
602 mov r28 = in0 // copy procedure index
603 mov r8 = ip // save ip to compute branch
604 mov loc0 = rp // save rp
605 };;
606 .body
607 movl loc2=PERCPU_ADDR
608 ;;
609 tpa loc2 = loc2 // get physical address of per cpu date
610 ;;
611 dep loc3 = 0,in1,60,4 // get physical address of shared_info
612 dep loc4 = 0,in2,60,4 // get physical address of shared_arch_info
613 dep loc5 = 0,in3,60,4 // get physical address of guest_vhpt
614 dep loc6 = 0,in4,60,4 // get physical address of pal code
615 ;;
616 mov loc7 = psr // save psr
617 ;;
618 mov loc8 = ar.rsc // save RSE configuration
619 ;;
620 mov ar.rsc = 0 // put RSE in enforced lazy, LE mode
621 movl r16=PSR_BITS_TO_CLEAR
622 movl r17=PSR_BITS_TO_SET
623 ;;
624 or loc7 = loc7,r17 // add in psr the bits to set
625 ;;
626 andcm r16=loc7,r16 // removes bits to clear from psr
627 br.call.sptk.many rp=ia64_switch_mode_phys
628 1:
629 // now in physical mode with psr.i/ic off so do rr7 switch
630 dep r16=-1,r0,61,3
631 ;;
632 mov rr[r16]=in0
633 srlz.d
634 ;;
635 rsm 0x6000
636 ;;
637 srlz.d
639 // re-pin mappings for kernel text and data
640 mov r18=KERNEL_TR_PAGE_SHIFT<<2
641 movl r17=KERNEL_START
642 ;;
643 ptr.i r17,r18
644 ptr.d r17,r18
645 ;;
646 mov cr.itir=r18
647 mov cr.ifa=r17
648 mov r16=IA64_TR_KERNEL
649 //mov r3=ip
650 movl r25 = PAGE_KERNEL
651 ;;
652 dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
653 ;;
654 or r18=r2,r25
655 ;;
656 srlz.i
657 ;;
658 itr.i itr[r16]=r18
659 ;;
660 itr.d dtr[r16]=r18
661 ;;
663 // re-pin mappings for per-cpu data
665 movl r22 = PERCPU_ADDR
666 ;;
667 mov r24=IA64_TR_PERCPU_DATA
668 or loc2 = r25,loc2 // construct PA | page properties
669 mov r23=PERCPU_PAGE_SHIFT<<2
670 ;;
671 ptr.d r22,r23
672 ;;
673 mov cr.itir=r23
674 mov cr.ifa=r22
675 ;;
676 itr.d dtr[r24]=loc2 // wire in new mapping...
677 ;;
679 // re-pin mappings for guest_vhpt
681 mov r24=IA64_TR_PERVP_VHPT
682 movl r25=PAGE_KERNEL
683 ;;
684 or loc5 = r25,loc5 // construct PA | page properties
685 mov r23 = VCPU_VHPT_SHIFT <<2
686 ;;
687 ptr.d in3,r23
688 ;;
689 mov cr.itir=r23
690 mov cr.ifa=in3
691 ;;
692 itr.d dtr[r24]=loc5 // wire in new mapping...
693 ;;
695 // re-pin mappings for PAL code section
697 mov r24=IA64_TR_PALCODE
698 or loc6 = r25,loc6 // construct PA | page properties
699 mov r23 = IA64_GRANULE_SHIFT<<2
700 ;;
701 ptr.i in4,r23
702 ;;
703 mov cr.itir=r23
704 mov cr.ifa=in4
705 ;;
706 itr.i itr[r24]=loc6 // wire in new mapping...
707 ;;
709 // done, switch back to virtual and return
710 mov r16=loc7 // r16= original psr
711 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
712 mov ar.pfs = loc1
713 mov rp = loc0
714 ;;
715 mov ar.rsc=loc8 // restore RSE configuration
716 srlz.d // seralize restoration of psr.l
717 br.ret.sptk.many rp
718 END(vmx_switch_rr7)
719 #endif