ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_entry.S @ 8370:2d5c57be196d

Remove some unused VTI code segments
Signed-off-by Anthony Xu <anthony.xu@intel.com>
author djm@kirby.fc.hp.com
date Thu Dec 15 16:10:22 2005 -0600 (2005-12-15)
parents 760f5e85c706
children cfe20f41f043
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_entry.S:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
21 */
23 #ifndef VCPU_TLB_SHIFT
24 #define VCPU_TLB_SHIFT 22
25 #endif
26 #include <linux/config.h>
27 #include <asm/asmmacro.h>
28 #include <asm/cache.h>
29 #include <asm/kregs.h>
30 #include <asm/offsets.h>
31 #include <asm/pgtable.h>
32 #include <asm/percpu.h>
33 #include <asm/processor.h>
34 #include <asm/thread_info.h>
35 #include <asm/unistd.h>
36 #include <asm/vhpt.h>
37 #include "vmx_minstate.h"
39 /*
40 * prev_task <- vmx_ia64_switch_to(struct task_struct *next)
41 * With Ingo's new scheduler, interrupts are disabled when this routine gets
42 * called. The code starting at .map relies on this. The rest of the code
43 * doesn't care about the interrupt masking status.
44 *
45 * Since we allocate domain stack in xenheap, there's no need to map new
46 * domain's stack since all xenheap is mapped by TR. Another different task
47 * for vmx_ia64_switch_to is to switch to bank0 and change current pointer.
48 */
49 GLOBAL_ENTRY(vmx_ia64_switch_to)
50 .prologue
51 alloc r16=ar.pfs,1,0,0,0
52 DO_SAVE_SWITCH_STACK
53 .body
55 bsw.0 // Switch to bank0, because bank0 r21 is current pointer
56 ;;
57 adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
58 movl r25=init_task
59 adds r26=IA64_TASK_THREAD_KSP_OFFSET,in0
60 ;;
61 st8 [r22]=sp // save kernel stack pointer of old task
62 ;;
63 /*
64 * TR always mapped this task's page, we can skip doing it again.
65 */
66 ld8 sp=[r26] // load kernel stack pointer of new task
67 mov r21=in0 // update "current" application register
68 mov r8=r13 // return pointer to previously running task
69 mov r13=in0 // set "current" pointer
70 ;;
71 bsw.1
72 ;;
73 DO_LOAD_SWITCH_STACK
75 #ifdef CONFIG_SMP
76 sync.i // ensure "fc"s done by this CPU are visible on other CPUs
77 #endif
78 br.ret.sptk.many rp // boogie on out in new context
79 END(vmx_ia64_switch_to)
81 GLOBAL_ENTRY(ia64_leave_nested)
82 rsm psr.i
83 ;;
84 adds r21=PT(PR)+16,r12
85 ;;
86 lfetch [r21],PT(CR_IPSR)-PT(PR)
87 adds r2=PT(B6)+16,r12
88 adds r3=PT(R16)+16,r12
89 ;;
90 lfetch [r21]
91 ld8 r28=[r2],8 // load b6
92 adds r29=PT(R24)+16,r12
94 ld8.fill r16=[r3]
95 adds r3=PT(AR_CSD)-PT(R16),r3
96 adds r30=PT(AR_CCV)+16,r12
97 ;;
98 ld8.fill r24=[r29]
99 ld8 r15=[r30] // load ar.ccv
100 ;;
101 ld8 r29=[r2],16 // load b7
102 ld8 r30=[r3],16 // load ar.csd
103 ;;
104 ld8 r31=[r2],16 // load ar.ssd
105 ld8.fill r8=[r3],16
106 ;;
107 ld8.fill r9=[r2],16
108 ld8.fill r10=[r3],PT(R17)-PT(R10)
109 ;;
110 ld8.fill r11=[r2],PT(R18)-PT(R11)
111 ld8.fill r17=[r3],16
112 ;;
113 ld8.fill r18=[r2],16
114 ld8.fill r19=[r3],16
115 ;;
116 ld8.fill r20=[r2],16
117 ld8.fill r21=[r3],16
118 mov ar.csd=r30
119 mov ar.ssd=r31
120 ;;
121 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
122 invala // invalidate ALAT
123 ;;
124 ld8.fill r22=[r2],24
125 ld8.fill r23=[r3],24
126 mov b6=r28
127 ;;
128 ld8.fill r25=[r2],16
129 ld8.fill r26=[r3],16
130 mov b7=r29
131 ;;
132 ld8.fill r27=[r2],16
133 ld8.fill r28=[r3],16
134 ;;
135 ld8.fill r29=[r2],16
136 ld8.fill r30=[r3],24
137 ;;
138 ld8.fill r31=[r2],PT(F9)-PT(R31)
139 adds r3=PT(F10)-PT(F6),r3
140 ;;
141 ldf.fill f9=[r2],PT(F6)-PT(F9)
142 ldf.fill f10=[r3],PT(F8)-PT(F10)
143 ;;
144 ldf.fill f6=[r2],PT(F7)-PT(F6)
145 ;;
146 ldf.fill f7=[r2],PT(F11)-PT(F7)
147 ldf.fill f8=[r3],32
148 ;;
149 srlz.i // ensure interruption collection is off
150 mov ar.ccv=r15
151 ;;
152 bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
153 ;;
154 ldf.fill f11=[r2]
155 // mov r18=r13
156 // mov r21=r13
157 adds r16=PT(CR_IPSR)+16,r12
158 adds r17=PT(CR_IIP)+16,r12
159 ;;
160 ld8 r29=[r16],16 // load cr.ipsr
161 ld8 r28=[r17],16 // load cr.iip
162 ;;
163 ld8 r30=[r16],16 // load cr.ifs
164 ld8 r25=[r17],16 // load ar.unat
165 ;;
166 ld8 r26=[r16],16 // load ar.pfs
167 ld8 r27=[r17],16 // load ar.rsc
168 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
169 ;;
170 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
171 ld8 r23=[r17],16// load ar.bspstore (may be garbage)
172 ;;
173 ld8 r31=[r16],16 // load predicates
174 ld8 r22=[r17],16 // load b0
175 ;;
176 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
177 ld8.fill r1=[r17],16 // load r1
178 ;;
179 ld8.fill r12=[r16],16
180 ld8.fill r13=[r17],16
181 ;;
182 ld8 r20=[r16],16 // ar.fpsr
183 ld8.fill r15=[r17],16
184 ;;
185 ld8.fill r14=[r16],16
186 ld8.fill r2=[r17]
187 ;;
188 ld8.fill r3=[r16]
189 ;;
190 mov r16=ar.bsp // get existing backing store pointer
191 ;;
192 mov b0=r22
193 mov ar.pfs=r26
194 mov cr.ifs=r30
195 mov cr.ipsr=r29
196 mov ar.fpsr=r20
197 mov cr.iip=r28
198 ;;
199 mov ar.rsc=r27
200 mov ar.unat=r25
201 mov pr=r31,-1
202 rfi
203 END(ia64_leave_nested)
207 GLOBAL_ENTRY(ia64_leave_hypervisor)
208 PT_REGS_UNWIND_INFO(0)
209 /*
210 * work.need_resched etc. mustn't get changed by this CPU before it returns to
211 ;;
212 * user- or fsys-mode, hence we disable interrupts early on:
213 */
214 rsm psr.i
215 ;;
216 alloc loc0=ar.pfs,0,1,1,0
217 adds out0=16,r12
218 adds r7 = PT(EML_UNAT)+16,r12
219 ;;
220 ld8 r7 = [r7]
221 br.call.sptk.many b0=leave_hypervisor_tail
222 ;;
223 mov ar.pfs=loc0
224 mov ar.unat=r7
225 adds r20=PT(PR)+16,r12
226 ;;
227 lfetch [r20],PT(CR_IPSR)-PT(PR)
228 adds r2 = PT(B6)+16,r12
229 adds r3 = PT(B7)+16,r12
230 ;;
231 lfetch [r20]
232 ;;
233 ld8 r24=[r2],16 /* B6 */
234 ld8 r25=[r3],16 /* B7 */
235 ;;
236 ld8 r26=[r2],16 /* ar_csd */
237 ld8 r27=[r3],16 /* ar_ssd */
238 mov b6 = r24
239 ;;
240 ld8.fill r8=[r2],16
241 ld8.fill r9=[r3],16
242 mov b7 = r25
243 ;;
244 mov ar.csd = r26
245 mov ar.ssd = r27
246 ;;
247 ld8.fill r10=[r2],PT(R15)-PT(R10)
248 ld8.fill r11=[r3],PT(R14)-PT(R11)
249 ;;
250 ld8.fill r15=[r2],PT(R16)-PT(R15)
251 ld8.fill r14=[r3],PT(R17)-PT(R14)
252 ;;
253 ld8.fill r16=[r2],16
254 ld8.fill r17=[r3],16
255 ;;
256 ld8.fill r18=[r2],16
257 ld8.fill r19=[r3],16
258 ;;
259 ld8.fill r20=[r2],16
260 ld8.fill r21=[r3],16
261 ;;
262 ld8.fill r22=[r2],16
263 ld8.fill r23=[r3],16
264 ;;
265 ld8.fill r24=[r2],16
266 ld8.fill r25=[r3],16
267 ;;
268 ld8.fill r26=[r2],16
269 ld8.fill r27=[r3],16
270 ;;
271 ld8.fill r28=[r2],16
272 ld8.fill r29=[r3],16
273 ;;
274 ld8.fill r30=[r2],PT(F6)-PT(R30)
275 ld8.fill r31=[r3],PT(F7)-PT(R31)
276 ;;
277 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
278 invala // invalidate ALAT
279 ;;
280 ldf.fill f6=[r2],32
281 ldf.fill f7=[r3],32
282 ;;
283 ldf.fill f8=[r2],32
284 ldf.fill f9=[r3],32
285 ;;
286 ldf.fill f10=[r2],32
287 ldf.fill f11=[r3],24
288 ;;
289 ld8.fill r4=[r2],16 //load r4
290 ld8.fill r5=[r3],16 //load r5
291 ;;
292 ld8.fill r6=[r2] //load r6
293 ld8.fill r7=[r3] //load r7
294 ;;
295 srlz.i // ensure interruption collection is off
296 ;;
297 bsw.0
298 ;;
299 adds r16 = PT(CR_IPSR)+16,r12
300 adds r17 = PT(CR_IIP)+16,r12
301 mov r21=r13 // get current
302 ;;
303 ld8 r31=[r16],16 // load cr.ipsr
304 ld8 r30=[r17],16 // load cr.iip
305 ;;
306 ld8 r29=[r16],16 // load cr.ifs
307 ld8 r28=[r17],16 // load ar.unat
308 ;;
309 ld8 r27=[r16],16 // load ar.pfs
310 ld8 r26=[r17],16 // load ar.rsc
311 ;;
312 ld8 r25=[r16],16 // load ar.rnat
313 ld8 r24=[r17],16 // load ar.bspstore
314 ;;
315 ld8 r23=[r16],16 // load predicates
316 ld8 r22=[r17],16 // load b0
317 ;;
318 ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
319 ld8.fill r1=[r17],16 //load r1
320 ;;
321 ld8.fill r12=[r16],16 //load r12
322 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
323 ;;
324 ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
325 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
326 ;;
327 ld8.fill r3=[r16] //load r3
328 ld8 r18=[r17],PT(RFI_PFS)-PT(AR_CCV) //load ar_ccv
329 ;;
330 mov ar.fpsr=r19
331 mov ar.ccv=r18
332 ;;
333 //rbs_switch
334 // loadrs has already been shifted
335 alloc r16=ar.pfs,0,0,0,0 // drop current register frame
336 ;;
337 mov ar.rsc=r20
338 ;;
339 loadrs
340 ;;
341 mov ar.bspstore=r24
342 ;;
343 ld8 r24=[r17] //load rfi_pfs
344 mov ar.unat=r28
345 mov ar.rnat=r25
346 mov ar.rsc=r26
347 ;;
348 mov cr.ipsr=r31
349 mov cr.iip=r30
350 mov cr.ifs=r29
351 cmp.ne p6,p0=r24,r0
352 (p6)br.sptk vmx_dorfirfi
353 ;;
354 vmx_dorfirfi_back:
355 mov ar.pfs=r27
356 adds r18=IA64_VPD_BASE_OFFSET,r21
357 ;;
358 ld8 r18=[r18] //vpd
359 ;;
360 adds r19=VPD(VPSR),r18
361 ;;
362 ld8 r19=[r19] //vpsr
363 //vsa_sync_write_start
364 movl r20=__vsa_base
365 ;;
366 ld8 r20=[r20] // read entry point
367 mov r25=r18
368 ;;
369 add r16=PAL_VPS_SYNC_WRITE,r20
370 movl r24=switch_rr7 // calculate return address
371 ;;
372 mov b0=r16
373 br.cond.sptk b0 // call the service
374 ;;
375 switch_rr7:
376 // fall through
377 GLOBAL_ENTRY(ia64_vmm_entry)
378 /*
379 * must be at bank 0
380 * parameter:
381 * r18:vpd
382 * r19:vpsr
383 * r20:__vsa_base
384 * r22:b0
385 * r23:predicate
386 */
387 mov r24=r22
388 mov r25=r18
389 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
390 ;;
391 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
392 (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
393 ;;
394 mov pr=r23,-2
395 mov b0=r29
396 ;;
397 br.cond.sptk b0 // call pal service
398 END(ia64_leave_hypervisor)
400 //r24 rfi_pfs
401 //r17 address of rfi_pfs
402 GLOBAL_ENTRY(vmx_dorfirfi)
403 mov r16=ar.ec
404 movl r20 = vmx_dorfirfi_back
405 ;;
406 // clean rfi_pfs
407 st8 [r17]=r0
408 mov b0=r20
409 // pfs.pec=ar.ec
410 dep r24 = r16, r24, 52, 6
411 ;;
412 mov ar.pfs=r24
413 ;;
414 br.ret.sptk b0
415 ;;
416 END(vmx_dorfirfi)
418 #ifdef XEN_DBL_MAPPING /* will be removed */
420 #define VMX_PURGE_RR7 0
421 #define VMX_INSERT_RR7 1
422 /*
423 * in0: old rr7
424 * in1: virtual address of xen image
425 * in2: virtual address of vhpt table
426 */
427 GLOBAL_ENTRY(vmx_purge_double_mapping)
428 alloc loc1 = ar.pfs,5,9,0,0
429 mov loc0 = rp
430 movl r8 = 1f
431 ;;
432 movl loc4 = KERNEL_TR_PAGE_SHIFT
433 movl loc5 = VCPU_TLB_SHIFT
434 mov loc6 = psr
435 movl loc7 = XEN_RR7_SWITCH_STUB
436 mov loc8 = (1<<VMX_PURGE_RR7)
437 ;;
438 srlz.i
439 ;;
440 rsm psr.i | psr.ic
441 ;;
442 srlz.i
443 ;;
444 mov ar.rsc = 0
445 mov b6 = loc7
446 mov rp = r8
447 ;;
448 br.sptk b6
449 1:
450 mov ar.rsc = 3
451 mov rp = loc0
452 ;;
453 mov psr.l = loc6
454 ;;
455 srlz.i
456 ;;
457 br.ret.sptk rp
458 END(vmx_purge_double_mapping)
460 /*
461 * in0: new rr7
462 * in1: virtual address of xen image
463 * in2: virtual address of vhpt table
464 * in3: pte entry of xen image
465 * in4: pte entry of vhpt table
466 */
467 GLOBAL_ENTRY(vmx_insert_double_mapping)
468 alloc loc1 = ar.pfs,5,9,0,0
469 mov loc0 = rp
470 movl loc2 = IA64_TR_XEN_IN_DOM // TR number for xen image
471 ;;
472 movl loc3 = IA64_TR_VHPT_IN_DOM // TR number for vhpt table
473 movl r8 = 1f
474 movl loc4 = KERNEL_TR_PAGE_SHIFT
475 ;;
476 movl loc5 = VCPU_TLB_SHIFT
477 mov loc6 = psr
478 movl loc7 = XEN_RR7_SWITCH_STUB
479 ;;
480 srlz.i
481 ;;
482 rsm psr.i | psr.ic
483 mov loc8 = (1<<VMX_INSERT_RR7)
484 ;;
485 srlz.i
486 ;;
487 mov ar.rsc = 0
488 mov b6 = loc7
489 mov rp = r8
490 ;;
491 br.sptk b6
492 1:
493 mov ar.rsc = 3
494 mov rp = loc0
495 ;;
496 mov psr.l = loc6
497 ;;
498 srlz.i
499 ;;
500 br.ret.sptk rp
501 END(vmx_insert_double_mapping)
503 .align PAGE_SIZE
504 /*
505 * Stub to add double mapping for new domain, which shouldn't
506 * access any memory when active. Before reaching this point,
507 * both psr.i/ic is cleared and rse is set in lazy mode.
508 *
509 * in0: new rr7
510 * in1: virtual address of xen image
511 * in2: virtual address of vhpt table
512 * in3: pte entry of xen image
513 * in4: pte entry of vhpt table
514 * loc2: TR number for xen image
515 * loc3: TR number for vhpt table
516 * loc4: page size for xen image
517 * loc5: page size of vhpt table
518 * loc7: free to use
519 * loc8: purge or insert
520 * r8: will contain old rid value
521 */
522 GLOBAL_ENTRY(vmx_switch_rr7)
523 movl loc7 = (7<<61)
524 dep.z loc4 = loc4, 2, 6
525 dep.z loc5 = loc5, 2, 6
526 ;;
527 tbit.nz p6,p7=loc8, VMX_INSERT_RR7
528 mov r8 = rr[loc7]
529 ;;
530 mov rr[loc7] = in0
531 (p6)mov cr.ifa = in1
532 (p6)mov cr.itir = loc4
533 ;;
534 srlz.i
535 ;;
536 (p6)itr.i itr[loc2] = in3
537 (p7)ptr.i in1, loc4
538 ;;
539 (p6)itr.d dtr[loc2] = in3
540 (p7)ptr.d in1, loc4
541 ;;
542 srlz.i
543 ;;
544 (p6)mov cr.ifa = in2
545 (p6)mov cr.itir = loc5
546 ;;
547 (p6)itr.d dtr[loc3] = in4
548 (p7)ptr.d in2, loc5
549 ;;
550 srlz.i
551 ;;
552 mov rr[loc7] = r8
553 ;;
554 srlz.i
555 br.sptk rp
556 END(vmx_switch_rr7)
557 .align PAGE_SIZE
559 #else
560 /*
561 * in0: new rr7
562 * in1: virtual address of shared_info
563 * in2: virtual address of shared_arch_info (VPD)
564 * in3: virtual address of guest_vhpt
565 * in4: virtual address of pal code segment
566 * r8: will contain old rid value
567 */
570 #define PSR_BITS_TO_CLEAR \
571 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |IA64_PSR_RT | \
572 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
573 IA64_PSR_DFL | IA64_PSR_DFH)
574 #define PSR_BITS_TO_SET IA64_PSR_BN
576 //extern void vmx_switch_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, void *guest_vhpt, void * pal_vaddr );
578 GLOBAL_ENTRY(vmx_switch_rr7)
579 // not sure this unwind statement is correct...
580 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
581 alloc loc1 = ar.pfs, 5, 9, 0, 0
582 1: {
583 mov r28 = in0 // copy procedure index
584 mov r8 = ip // save ip to compute branch
585 mov loc0 = rp // save rp
586 };;
587 .body
588 movl loc2=PERCPU_ADDR
589 ;;
590 tpa loc2 = loc2 // get physical address of per cpu date
591 ;;
592 dep loc3 = 0,in1,60,4 // get physical address of shared_info
593 dep loc4 = 0,in2,60,4 // get physical address of shared_arch_info
594 dep loc5 = 0,in3,60,4 // get physical address of guest_vhpt
595 dep loc6 = 0,in4,60,4 // get physical address of pal code
596 ;;
597 mov loc7 = psr // save psr
598 ;;
599 mov loc8 = ar.rsc // save RSE configuration
600 ;;
601 mov ar.rsc = 0 // put RSE in enforced lazy, LE mode
602 movl r16=PSR_BITS_TO_CLEAR
603 movl r17=PSR_BITS_TO_SET
604 ;;
605 or loc7 = loc7,r17 // add in psr the bits to set
606 ;;
607 andcm r16=loc7,r16 // removes bits to clear from psr
608 br.call.sptk.many rp=ia64_switch_mode_phys
609 1:
610 // now in physical mode with psr.i/ic off so do rr7 switch
611 dep r16=-1,r0,61,3
612 ;;
613 mov rr[r16]=in0
614 srlz.d
615 ;;
616 rsm 0x6000
617 ;;
618 srlz.d
620 // re-pin mappings for kernel text and data
621 mov r18=KERNEL_TR_PAGE_SHIFT<<2
622 movl r17=KERNEL_START
623 ;;
624 ptr.i r17,r18
625 ptr.d r17,r18
626 ;;
627 mov cr.itir=r18
628 mov cr.ifa=r17
629 mov r16=IA64_TR_KERNEL
630 //mov r3=ip
631 movl r25 = PAGE_KERNEL
632 ;;
633 dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
634 ;;
635 or r18=r2,r25
636 ;;
637 srlz.i
638 ;;
639 itr.i itr[r16]=r18
640 ;;
641 itr.d dtr[r16]=r18
642 ;;
644 // re-pin mappings for per-cpu data
646 movl r22 = PERCPU_ADDR
647 ;;
648 mov r24=IA64_TR_PERCPU_DATA
649 or loc2 = r25,loc2 // construct PA | page properties
650 mov r23=PERCPU_PAGE_SHIFT<<2
651 ;;
652 ptr.d r22,r23
653 ;;
654 mov cr.itir=r23
655 mov cr.ifa=r22
656 ;;
657 itr.d dtr[r24]=loc2 // wire in new mapping...
658 ;;
661 #if 0
662 // re-pin mappings for shared_info
664 mov r24=IA64_TR_SHARED_INFO
665 movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
666 ;;
667 or loc3 = r25,loc3 // construct PA | page properties
668 mov r23 = PAGE_SHIFT<<2
669 ;;
670 ptr.d in1,r23
671 ;;
672 mov cr.itir=r23
673 mov cr.ifa=in1
674 ;;
675 itr.d dtr[r24]=loc3 // wire in new mapping...
676 ;;
677 // re-pin mappings for shared_arch_info
679 mov r24=IA64_TR_ARCH_INFO
680 or loc4 = r25,loc4 // construct PA | page properties
681 mov r23 = PAGE_SHIFT<<2
682 ;;
683 ptr.d in2,r23
684 ;;
685 mov cr.itir=r23
686 mov cr.ifa=in2
687 ;;
688 itr.d dtr[r24]=loc4 // wire in new mapping...
689 ;;
690 #endif
693 // re-pin mappings for guest_vhpt
695 mov r24=IA64_TR_PERVP_VHPT
696 movl r25=PAGE_KERNEL
697 ;;
698 or loc5 = r25,loc5 // construct PA | page properties
699 mov r23 = IA64_GRANULE_SHIFT <<2
700 ;;
701 ptr.d in3,r23
702 ;;
703 mov cr.itir=r23
704 mov cr.ifa=in3
705 ;;
706 itr.d dtr[r24]=loc5 // wire in new mapping...
707 ;;
709 // re-pin mappings for PAL code section
711 mov r24=IA64_TR_PALCODE
712 or loc6 = r25,loc6 // construct PA | page properties
713 mov r23 = IA64_GRANULE_SHIFT<<2
714 ;;
715 ptr.i in4,r23
716 ;;
717 mov cr.itir=r23
718 mov cr.ifa=in4
719 ;;
720 itr.i itr[r24]=loc6 // wire in new mapping...
721 ;;
723 // done, switch back to virtual and return
724 mov r16=loc7 // r16= original psr
725 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
726 mov ar.pfs = loc1
727 mov rp = loc0
728 ;;
729 mov ar.rsc=loc8 // restore RSE configuration
730 srlz.d // seralize restoration of psr.l
731 br.ret.sptk.many rp
732 END(vmx_switch_rr7)
733 #endif