ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_entry.S @ 9770:ced37bea0647

[IA64] FPH enabling + cleanup

Move contents of switch_to macro from xensystem.h to context_switch function.
Initialize FPU on all processors. FPH is always enabled in Xen.
Speed up context-switch (a little bit!) by not enabling/disabling FPH.
Cleanup (unused function/variablesi/fields, debug printf...)
vmx_ia64_switch_to removed (was unused).

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue Apr 25 22:35:41 2006 -0600 (2006-04-25)
parents cfe20f41f043
children 70b7d520bda4
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_entry.S:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
21 */
23 #ifndef VCPU_TLB_SHIFT
24 #define VCPU_TLB_SHIFT 22
25 #endif
26 #include <linux/config.h>
27 #include <asm/asmmacro.h>
28 #include <asm/cache.h>
29 #include <asm/kregs.h>
30 #include <asm/offsets.h>
31 #include <asm/pgtable.h>
32 #include <asm/percpu.h>
33 #include <asm/processor.h>
34 #include <asm/thread_info.h>
35 #include <asm/unistd.h>
36 #include <asm/vhpt.h>
37 #include <asm/vmmu.h>
38 #include "vmx_minstate.h"
40 GLOBAL_ENTRY(ia64_leave_nested)
41 rsm psr.i
42 ;;
43 adds r21=PT(PR)+16,r12
44 ;;
45 lfetch [r21],PT(CR_IPSR)-PT(PR)
46 adds r2=PT(B6)+16,r12
47 adds r3=PT(R16)+16,r12
48 ;;
49 lfetch [r21]
50 ld8 r28=[r2],8 // load b6
51 adds r29=PT(R24)+16,r12
53 ld8.fill r16=[r3]
54 adds r3=PT(AR_CSD)-PT(R16),r3
55 adds r30=PT(AR_CCV)+16,r12
56 ;;
57 ld8.fill r24=[r29]
58 ld8 r15=[r30] // load ar.ccv
59 ;;
60 ld8 r29=[r2],16 // load b7
61 ld8 r30=[r3],16 // load ar.csd
62 ;;
63 ld8 r31=[r2],16 // load ar.ssd
64 ld8.fill r8=[r3],16
65 ;;
66 ld8.fill r9=[r2],16
67 ld8.fill r10=[r3],PT(R17)-PT(R10)
68 ;;
69 ld8.fill r11=[r2],PT(R18)-PT(R11)
70 ld8.fill r17=[r3],16
71 ;;
72 ld8.fill r18=[r2],16
73 ld8.fill r19=[r3],16
74 ;;
75 ld8.fill r20=[r2],16
76 ld8.fill r21=[r3],16
77 mov ar.csd=r30
78 mov ar.ssd=r31
79 ;;
80 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
81 invala // invalidate ALAT
82 ;;
83 ld8.fill r22=[r2],24
84 ld8.fill r23=[r3],24
85 mov b6=r28
86 ;;
87 ld8.fill r25=[r2],16
88 ld8.fill r26=[r3],16
89 mov b7=r29
90 ;;
91 ld8.fill r27=[r2],16
92 ld8.fill r28=[r3],16
93 ;;
94 ld8.fill r29=[r2],16
95 ld8.fill r30=[r3],24
96 ;;
97 ld8.fill r31=[r2],PT(F9)-PT(R31)
98 adds r3=PT(F10)-PT(F6),r3
99 ;;
100 ldf.fill f9=[r2],PT(F6)-PT(F9)
101 ldf.fill f10=[r3],PT(F8)-PT(F10)
102 ;;
103 ldf.fill f6=[r2],PT(F7)-PT(F6)
104 ;;
105 ldf.fill f7=[r2],PT(F11)-PT(F7)
106 ldf.fill f8=[r3],32
107 ;;
108 srlz.i // ensure interruption collection is off
109 mov ar.ccv=r15
110 ;;
111 bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
112 ;;
113 ldf.fill f11=[r2]
114 // mov r18=r13
115 // mov r21=r13
116 adds r16=PT(CR_IPSR)+16,r12
117 adds r17=PT(CR_IIP)+16,r12
118 ;;
119 ld8 r29=[r16],16 // load cr.ipsr
120 ld8 r28=[r17],16 // load cr.iip
121 ;;
122 ld8 r30=[r16],16 // load cr.ifs
123 ld8 r25=[r17],16 // load ar.unat
124 ;;
125 ld8 r26=[r16],16 // load ar.pfs
126 ld8 r27=[r17],16 // load ar.rsc
127 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
128 ;;
129 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
130 ld8 r23=[r17],16// load ar.bspstore (may be garbage)
131 ;;
132 ld8 r31=[r16],16 // load predicates
133 ld8 r22=[r17],16 // load b0
134 ;;
135 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
136 ld8.fill r1=[r17],16 // load r1
137 ;;
138 ld8.fill r12=[r16],16
139 ld8.fill r13=[r17],16
140 ;;
141 ld8 r20=[r16],16 // ar.fpsr
142 ld8.fill r15=[r17],16
143 ;;
144 ld8.fill r14=[r16],16
145 ld8.fill r2=[r17]
146 ;;
147 ld8.fill r3=[r16]
148 ;;
149 mov r16=ar.bsp // get existing backing store pointer
150 ;;
151 mov b0=r22
152 mov ar.pfs=r26
153 mov cr.ifs=r30
154 mov cr.ipsr=r29
155 mov ar.fpsr=r20
156 mov cr.iip=r28
157 ;;
158 mov ar.rsc=r27
159 mov ar.unat=r25
160 mov pr=r31,-1
161 rfi
162 END(ia64_leave_nested)
166 GLOBAL_ENTRY(ia64_leave_hypervisor)
167 PT_REGS_UNWIND_INFO(0)
168 /*
169 * work.need_resched etc. mustn't get changed by this CPU before it returns to
170 ;;
171 * user- or fsys-mode, hence we disable interrupts early on:
172 */
173 rsm psr.i
174 ;;
175 alloc loc0=ar.pfs,0,1,1,0
176 adds out0=16,r12
177 adds r7 = PT(EML_UNAT)+16,r12
178 ;;
179 ld8 r7 = [r7]
180 br.call.sptk.many b0=leave_hypervisor_tail
181 ;;
182 mov ar.pfs=loc0
183 mov ar.unat=r7
184 adds r20=PT(PR)+16,r12
185 ;;
186 lfetch [r20],PT(CR_IPSR)-PT(PR)
187 adds r2 = PT(B6)+16,r12
188 adds r3 = PT(B7)+16,r12
189 ;;
190 lfetch [r20]
191 ;;
192 ld8 r24=[r2],16 /* B6 */
193 ld8 r25=[r3],16 /* B7 */
194 ;;
195 ld8 r26=[r2],16 /* ar_csd */
196 ld8 r27=[r3],16 /* ar_ssd */
197 mov b6 = r24
198 ;;
199 ld8.fill r8=[r2],16
200 ld8.fill r9=[r3],16
201 mov b7 = r25
202 ;;
203 mov ar.csd = r26
204 mov ar.ssd = r27
205 ;;
206 ld8.fill r10=[r2],PT(R15)-PT(R10)
207 ld8.fill r11=[r3],PT(R14)-PT(R11)
208 ;;
209 ld8.fill r15=[r2],PT(R16)-PT(R15)
210 ld8.fill r14=[r3],PT(R17)-PT(R14)
211 ;;
212 ld8.fill r16=[r2],16
213 ld8.fill r17=[r3],16
214 ;;
215 ld8.fill r18=[r2],16
216 ld8.fill r19=[r3],16
217 ;;
218 ld8.fill r20=[r2],16
219 ld8.fill r21=[r3],16
220 ;;
221 ld8.fill r22=[r2],16
222 ld8.fill r23=[r3],16
223 ;;
224 ld8.fill r24=[r2],16
225 ld8.fill r25=[r3],16
226 ;;
227 ld8.fill r26=[r2],16
228 ld8.fill r27=[r3],16
229 ;;
230 ld8.fill r28=[r2],16
231 ld8.fill r29=[r3],16
232 ;;
233 ld8.fill r30=[r2],PT(F6)-PT(R30)
234 ld8.fill r31=[r3],PT(F7)-PT(R31)
235 ;;
236 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
237 invala // invalidate ALAT
238 ;;
239 ldf.fill f6=[r2],32
240 ldf.fill f7=[r3],32
241 ;;
242 ldf.fill f8=[r2],32
243 ldf.fill f9=[r3],32
244 ;;
245 ldf.fill f10=[r2],32
246 ldf.fill f11=[r3],24
247 ;;
248 ld8.fill r4=[r2],16 //load r4
249 ld8.fill r5=[r3],16 //load r5
250 ;;
251 ld8.fill r6=[r2] //load r6
252 ld8.fill r7=[r3] //load r7
253 ;;
254 srlz.i // ensure interruption collection is off
255 ;;
256 bsw.0
257 ;;
258 adds r16 = PT(CR_IPSR)+16,r12
259 adds r17 = PT(CR_IIP)+16,r12
260 mov r21=r13 // get current
261 ;;
262 ld8 r31=[r16],16 // load cr.ipsr
263 ld8 r30=[r17],16 // load cr.iip
264 ;;
265 ld8 r29=[r16],16 // load cr.ifs
266 ld8 r28=[r17],16 // load ar.unat
267 ;;
268 ld8 r27=[r16],16 // load ar.pfs
269 ld8 r26=[r17],16 // load ar.rsc
270 ;;
271 ld8 r25=[r16],16 // load ar.rnat
272 ld8 r24=[r17],16 // load ar.bspstore
273 ;;
274 ld8 r23=[r16],16 // load predicates
275 ld8 r22=[r17],16 // load b0
276 ;;
277 ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
278 ld8.fill r1=[r17],16 //load r1
279 ;;
280 ld8.fill r12=[r16],16 //load r12
281 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
282 ;;
283 ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
284 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
285 ;;
286 ld8.fill r3=[r16] //load r3
287 ld8 r18=[r17],PT(RFI_PFS)-PT(AR_CCV) //load ar_ccv
288 ;;
289 mov ar.fpsr=r19
290 mov ar.ccv=r18
291 ;;
292 //rbs_switch
293 // loadrs has already been shifted
294 alloc r16=ar.pfs,0,0,0,0 // drop current register frame
295 ;;
296 mov ar.rsc=r20
297 ;;
298 loadrs
299 ;;
300 mov ar.bspstore=r24
301 ;;
302 ld8 r24=[r17] //load rfi_pfs
303 mov ar.unat=r28
304 mov ar.rnat=r25
305 mov ar.rsc=r26
306 ;;
307 mov cr.ipsr=r31
308 mov cr.iip=r30
309 mov cr.ifs=r29
310 cmp.ne p6,p0=r24,r0
311 (p6)br.sptk vmx_dorfirfi
312 ;;
313 vmx_dorfirfi_back:
314 mov ar.pfs=r27
315 adds r18=IA64_VPD_BASE_OFFSET,r21
316 ;;
317 ld8 r18=[r18] //vpd
318 ;;
319 adds r19=VPD(VPSR),r18
320 ;;
321 ld8 r19=[r19] //vpsr
322 //vsa_sync_write_start
323 movl r20=__vsa_base
324 ;;
325 ld8 r20=[r20] // read entry point
326 mov r25=r18
327 ;;
328 add r16=PAL_VPS_SYNC_WRITE,r20
329 movl r24=switch_rr7 // calculate return address
330 ;;
331 mov b0=r16
332 br.cond.sptk b0 // call the service
333 ;;
334 switch_rr7:
335 // fall through
336 GLOBAL_ENTRY(ia64_vmm_entry)
337 /*
338 * must be at bank 0
339 * parameter:
340 * r18:vpd
341 * r19:vpsr
342 * r20:__vsa_base
343 * r22:b0
344 * r23:predicate
345 */
346 mov r24=r22
347 mov r25=r18
348 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
349 ;;
350 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
351 (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
352 ;;
353 mov pr=r23,-2
354 mov b0=r29
355 ;;
356 br.cond.sptk b0 // call pal service
357 END(ia64_leave_hypervisor)
359 //r24 rfi_pfs
360 //r17 address of rfi_pfs
361 GLOBAL_ENTRY(vmx_dorfirfi)
362 mov r16=ar.ec
363 movl r20 = vmx_dorfirfi_back
364 ;;
365 // clean rfi_pfs
366 st8 [r17]=r0
367 mov b0=r20
368 // pfs.pec=ar.ec
369 dep r24 = r16, r24, 52, 6
370 ;;
371 mov ar.pfs=r24
372 ;;
373 br.ret.sptk b0
374 ;;
375 END(vmx_dorfirfi)
377 #ifdef XEN_DBL_MAPPING /* will be removed */
379 #define VMX_PURGE_RR7 0
380 #define VMX_INSERT_RR7 1
381 /*
382 * in0: old rr7
383 * in1: virtual address of xen image
384 * in2: virtual address of vhpt table
385 */
386 GLOBAL_ENTRY(vmx_purge_double_mapping)
387 alloc loc1 = ar.pfs,5,9,0,0
388 mov loc0 = rp
389 movl r8 = 1f
390 ;;
391 movl loc4 = KERNEL_TR_PAGE_SHIFT
392 movl loc5 = VCPU_TLB_SHIFT
393 mov loc6 = psr
394 movl loc7 = XEN_RR7_SWITCH_STUB
395 mov loc8 = (1<<VMX_PURGE_RR7)
396 ;;
397 srlz.i
398 ;;
399 rsm psr.i | psr.ic
400 ;;
401 srlz.i
402 ;;
403 mov ar.rsc = 0
404 mov b6 = loc7
405 mov rp = r8
406 ;;
407 br.sptk b6
408 1:
409 mov ar.rsc = 3
410 mov rp = loc0
411 ;;
412 mov psr.l = loc6
413 ;;
414 srlz.i
415 ;;
416 br.ret.sptk rp
417 END(vmx_purge_double_mapping)
419 /*
420 * in0: new rr7
421 * in1: virtual address of xen image
422 * in2: virtual address of vhpt table
423 * in3: pte entry of xen image
424 * in4: pte entry of vhpt table
425 */
426 GLOBAL_ENTRY(vmx_insert_double_mapping)
427 alloc loc1 = ar.pfs,5,9,0,0
428 mov loc0 = rp
429 movl loc2 = IA64_TR_XEN_IN_DOM // TR number for xen image
430 ;;
431 movl loc3 = IA64_TR_VHPT_IN_DOM // TR number for vhpt table
432 movl r8 = 1f
433 movl loc4 = KERNEL_TR_PAGE_SHIFT
434 ;;
435 movl loc5 = VCPU_TLB_SHIFT
436 mov loc6 = psr
437 movl loc7 = XEN_RR7_SWITCH_STUB
438 ;;
439 srlz.i
440 ;;
441 rsm psr.i | psr.ic
442 mov loc8 = (1<<VMX_INSERT_RR7)
443 ;;
444 srlz.i
445 ;;
446 mov ar.rsc = 0
447 mov b6 = loc7
448 mov rp = r8
449 ;;
450 br.sptk b6
451 1:
452 mov ar.rsc = 3
453 mov rp = loc0
454 ;;
455 mov psr.l = loc6
456 ;;
457 srlz.i
458 ;;
459 br.ret.sptk rp
460 END(vmx_insert_double_mapping)
462 .align PAGE_SIZE
463 /*
464 * Stub to add double mapping for new domain, which shouldn't
465 * access any memory when active. Before reaching this point,
466 * both psr.i/ic is cleared and rse is set in lazy mode.
467 *
468 * in0: new rr7
469 * in1: virtual address of xen image
470 * in2: virtual address of vhpt table
471 * in3: pte entry of xen image
472 * in4: pte entry of vhpt table
473 * loc2: TR number for xen image
474 * loc3: TR number for vhpt table
475 * loc4: page size for xen image
476 * loc5: page size of vhpt table
477 * loc7: free to use
478 * loc8: purge or insert
479 * r8: will contain old rid value
480 */
481 GLOBAL_ENTRY(vmx_switch_rr7)
482 movl loc7 = (7<<61)
483 dep.z loc4 = loc4, 2, 6
484 dep.z loc5 = loc5, 2, 6
485 ;;
486 tbit.nz p6,p7=loc8, VMX_INSERT_RR7
487 mov r8 = rr[loc7]
488 ;;
489 mov rr[loc7] = in0
490 (p6)mov cr.ifa = in1
491 (p6)mov cr.itir = loc4
492 ;;
493 srlz.i
494 ;;
495 (p6)itr.i itr[loc2] = in3
496 (p7)ptr.i in1, loc4
497 ;;
498 (p6)itr.d dtr[loc2] = in3
499 (p7)ptr.d in1, loc4
500 ;;
501 srlz.i
502 ;;
503 (p6)mov cr.ifa = in2
504 (p6)mov cr.itir = loc5
505 ;;
506 (p6)itr.d dtr[loc3] = in4
507 (p7)ptr.d in2, loc5
508 ;;
509 srlz.i
510 ;;
511 mov rr[loc7] = r8
512 ;;
513 srlz.i
514 br.sptk rp
515 END(vmx_switch_rr7)
516 .align PAGE_SIZE
518 #else
519 /*
520 * in0: new rr7
521 * in1: virtual address of shared_info
522 * in2: virtual address of shared_arch_info (VPD)
523 * in3: virtual address of guest_vhpt
524 * in4: virtual address of pal code segment
525 * r8: will contain old rid value
526 */
529 #define PSR_BITS_TO_CLEAR \
530 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |IA64_PSR_RT | \
531 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
532 IA64_PSR_DFL | IA64_PSR_DFH)
533 #define PSR_BITS_TO_SET IA64_PSR_BN
535 //extern void vmx_switch_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, void *guest_vhpt, void * pal_vaddr );
537 GLOBAL_ENTRY(vmx_switch_rr7)
538 // not sure this unwind statement is correct...
539 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
540 alloc loc1 = ar.pfs, 5, 9, 0, 0
541 1: {
542 mov r28 = in0 // copy procedure index
543 mov r8 = ip // save ip to compute branch
544 mov loc0 = rp // save rp
545 };;
546 .body
547 movl loc2=PERCPU_ADDR
548 ;;
549 tpa loc2 = loc2 // get physical address of per cpu date
550 ;;
551 dep loc3 = 0,in1,60,4 // get physical address of shared_info
552 dep loc4 = 0,in2,60,4 // get physical address of shared_arch_info
553 dep loc5 = 0,in3,60,4 // get physical address of guest_vhpt
554 dep loc6 = 0,in4,60,4 // get physical address of pal code
555 ;;
556 mov loc7 = psr // save psr
557 ;;
558 mov loc8 = ar.rsc // save RSE configuration
559 ;;
560 mov ar.rsc = 0 // put RSE in enforced lazy, LE mode
561 movl r16=PSR_BITS_TO_CLEAR
562 movl r17=PSR_BITS_TO_SET
563 ;;
564 or loc7 = loc7,r17 // add in psr the bits to set
565 ;;
566 andcm r16=loc7,r16 // removes bits to clear from psr
567 br.call.sptk.many rp=ia64_switch_mode_phys
568 1:
569 // now in physical mode with psr.i/ic off so do rr7 switch
570 dep r16=-1,r0,61,3
571 ;;
572 mov rr[r16]=in0
573 srlz.d
574 ;;
575 rsm 0x6000
576 ;;
577 srlz.d
579 // re-pin mappings for kernel text and data
580 mov r18=KERNEL_TR_PAGE_SHIFT<<2
581 movl r17=KERNEL_START
582 ;;
583 ptr.i r17,r18
584 ptr.d r17,r18
585 ;;
586 mov cr.itir=r18
587 mov cr.ifa=r17
588 mov r16=IA64_TR_KERNEL
589 //mov r3=ip
590 movl r25 = PAGE_KERNEL
591 ;;
592 dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
593 ;;
594 or r18=r2,r25
595 ;;
596 srlz.i
597 ;;
598 itr.i itr[r16]=r18
599 ;;
600 itr.d dtr[r16]=r18
601 ;;
603 // re-pin mappings for per-cpu data
605 movl r22 = PERCPU_ADDR
606 ;;
607 mov r24=IA64_TR_PERCPU_DATA
608 or loc2 = r25,loc2 // construct PA | page properties
609 mov r23=PERCPU_PAGE_SHIFT<<2
610 ;;
611 ptr.d r22,r23
612 ;;
613 mov cr.itir=r23
614 mov cr.ifa=r22
615 ;;
616 itr.d dtr[r24]=loc2 // wire in new mapping...
617 ;;
620 #if 0
621 // re-pin mappings for shared_info
623 mov r24=IA64_TR_SHARED_INFO
624 movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
625 ;;
626 or loc3 = r25,loc3 // construct PA | page properties
627 mov r23 = PAGE_SHIFT<<2
628 ;;
629 ptr.d in1,r23
630 ;;
631 mov cr.itir=r23
632 mov cr.ifa=in1
633 ;;
634 itr.d dtr[r24]=loc3 // wire in new mapping...
635 ;;
636 // re-pin mappings for shared_arch_info
638 mov r24=IA64_TR_ARCH_INFO
639 or loc4 = r25,loc4 // construct PA | page properties
640 mov r23 = PAGE_SHIFT<<2
641 ;;
642 ptr.d in2,r23
643 ;;
644 mov cr.itir=r23
645 mov cr.ifa=in2
646 ;;
647 itr.d dtr[r24]=loc4 // wire in new mapping...
648 ;;
649 #endif
652 // re-pin mappings for guest_vhpt
654 mov r24=IA64_TR_PERVP_VHPT
655 movl r25=PAGE_KERNEL
656 ;;
657 or loc5 = r25,loc5 // construct PA | page properties
658 mov r23 = VCPU_VHPT_SHIFT <<2
659 ;;
660 ptr.d in3,r23
661 ;;
662 mov cr.itir=r23
663 mov cr.ifa=in3
664 ;;
665 itr.d dtr[r24]=loc5 // wire in new mapping...
666 ;;
668 // re-pin mappings for PAL code section
670 mov r24=IA64_TR_PALCODE
671 or loc6 = r25,loc6 // construct PA | page properties
672 mov r23 = IA64_GRANULE_SHIFT<<2
673 ;;
674 ptr.i in4,r23
675 ;;
676 mov cr.itir=r23
677 mov cr.ifa=in4
678 ;;
679 itr.i itr[r24]=loc6 // wire in new mapping...
680 ;;
682 // done, switch back to virtual and return
683 mov r16=loc7 // r16= original psr
684 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
685 mov ar.pfs = loc1
686 mov rp = loc0
687 ;;
688 mov ar.rsc=loc8 // restore RSE configuration
689 srlz.d // seralize restoration of psr.l
690 br.ret.sptk.many rp
691 END(vmx_switch_rr7)
692 #endif