ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_entry.S @ 9861:70b7d520bda4

[IA64] Fix RSE issue in VTI-domain

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Mon May 08 12:49:53 2006 -0600 (2006-05-08)
parents ced37bea0647
children 11b7dc3529b9
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_entry.S:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
21 */
23 #ifndef VCPU_TLB_SHIFT
24 #define VCPU_TLB_SHIFT 22
25 #endif
26 #include <linux/config.h>
27 #include <asm/asmmacro.h>
28 #include <asm/cache.h>
29 #include <asm/kregs.h>
30 #include <asm/offsets.h>
31 #include <asm/pgtable.h>
32 #include <asm/percpu.h>
33 #include <asm/processor.h>
34 #include <asm/thread_info.h>
35 #include <asm/unistd.h>
36 #include <asm/vhpt.h>
37 #include <asm/vmmu.h>
38 #include "vmx_minstate.h"
40 GLOBAL_ENTRY(ia64_leave_nested)
41 rsm psr.i
42 ;;
43 adds r21=PT(PR)+16,r12
44 ;;
45 lfetch [r21],PT(CR_IPSR)-PT(PR)
46 adds r2=PT(B6)+16,r12
47 adds r3=PT(R16)+16,r12
48 ;;
49 lfetch [r21]
50 ld8 r28=[r2],8 // load b6
51 adds r29=PT(R24)+16,r12
53 ld8.fill r16=[r3]
54 adds r3=PT(AR_CSD)-PT(R16),r3
55 adds r30=PT(AR_CCV)+16,r12
56 ;;
57 ld8.fill r24=[r29]
58 ld8 r15=[r30] // load ar.ccv
59 ;;
60 ld8 r29=[r2],16 // load b7
61 ld8 r30=[r3],16 // load ar.csd
62 ;;
63 ld8 r31=[r2],16 // load ar.ssd
64 ld8.fill r8=[r3],16
65 ;;
66 ld8.fill r9=[r2],16
67 ld8.fill r10=[r3],PT(R17)-PT(R10)
68 ;;
69 ld8.fill r11=[r2],PT(R18)-PT(R11)
70 ld8.fill r17=[r3],16
71 ;;
72 ld8.fill r18=[r2],16
73 ld8.fill r19=[r3],16
74 ;;
75 ld8.fill r20=[r2],16
76 ld8.fill r21=[r3],16
77 mov ar.csd=r30
78 mov ar.ssd=r31
79 ;;
80 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
81 invala // invalidate ALAT
82 ;;
83 ld8.fill r22=[r2],24
84 ld8.fill r23=[r3],24
85 mov b6=r28
86 ;;
87 ld8.fill r25=[r2],16
88 ld8.fill r26=[r3],16
89 mov b7=r29
90 ;;
91 ld8.fill r27=[r2],16
92 ld8.fill r28=[r3],16
93 ;;
94 ld8.fill r29=[r2],16
95 ld8.fill r30=[r3],24
96 ;;
97 ld8.fill r31=[r2],PT(F9)-PT(R31)
98 adds r3=PT(F10)-PT(F6),r3
99 ;;
100 ldf.fill f9=[r2],PT(F6)-PT(F9)
101 ldf.fill f10=[r3],PT(F8)-PT(F10)
102 ;;
103 ldf.fill f6=[r2],PT(F7)-PT(F6)
104 ;;
105 ldf.fill f7=[r2],PT(F11)-PT(F7)
106 ldf.fill f8=[r3],32
107 ;;
108 srlz.i // ensure interruption collection is off
109 mov ar.ccv=r15
110 ;;
111 bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
112 ;;
113 ldf.fill f11=[r2]
114 // mov r18=r13
115 // mov r21=r13
116 adds r16=PT(CR_IPSR)+16,r12
117 adds r17=PT(CR_IIP)+16,r12
118 ;;
119 ld8 r29=[r16],16 // load cr.ipsr
120 ld8 r28=[r17],16 // load cr.iip
121 ;;
122 ld8 r30=[r16],16 // load cr.ifs
123 ld8 r25=[r17],16 // load ar.unat
124 ;;
125 ld8 r26=[r16],16 // load ar.pfs
126 ld8 r27=[r17],16 // load ar.rsc
127 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
128 ;;
129 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
130 ld8 r23=[r17],16// load ar.bspstore (may be garbage)
131 ;;
132 ld8 r31=[r16],16 // load predicates
133 ld8 r22=[r17],16 // load b0
134 ;;
135 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
136 ld8.fill r1=[r17],16 // load r1
137 ;;
138 ld8.fill r12=[r16],16
139 ld8.fill r13=[r17],16
140 ;;
141 ld8 r20=[r16],16 // ar.fpsr
142 ld8.fill r15=[r17],16
143 ;;
144 ld8.fill r14=[r16],16
145 ld8.fill r2=[r17]
146 ;;
147 ld8.fill r3=[r16]
148 ;;
149 mov r16=ar.bsp // get existing backing store pointer
150 ;;
151 mov b0=r22
152 mov ar.pfs=r26
153 mov cr.ifs=r30
154 mov cr.ipsr=r29
155 mov ar.fpsr=r20
156 mov cr.iip=r28
157 ;;
158 mov ar.rsc=r27
159 mov ar.unat=r25
160 mov pr=r31,-1
161 rfi
162 END(ia64_leave_nested)
166 GLOBAL_ENTRY(ia64_leave_hypervisor)
167 PT_REGS_UNWIND_INFO(0)
168 /*
169 * work.need_resched etc. mustn't get changed by this CPU before it returns to
170 ;;
171 * user- or fsys-mode, hence we disable interrupts early on:
172 */
173 rsm psr.i
174 ;;
175 alloc loc0=ar.pfs,0,1,1,0
176 adds out0=16,r12
177 adds r7 = PT(EML_UNAT)+16,r12
178 ;;
179 ld8 r7 = [r7]
180 br.call.sptk.many b0=leave_hypervisor_tail
181 ;;
182 mov ar.pfs=loc0
183 mov ar.unat=r7
184 adds r20=PT(PR)+16,r12
185 ;;
186 lfetch [r20],PT(CR_IPSR)-PT(PR)
187 adds r2 = PT(B6)+16,r12
188 adds r3 = PT(B7)+16,r12
189 ;;
190 lfetch [r20]
191 ;;
192 ld8 r24=[r2],16 /* B6 */
193 ld8 r25=[r3],16 /* B7 */
194 ;;
195 ld8 r26=[r2],16 /* ar_csd */
196 ld8 r27=[r3],16 /* ar_ssd */
197 mov b6 = r24
198 ;;
199 ld8.fill r8=[r2],16
200 ld8.fill r9=[r3],16
201 mov b7 = r25
202 ;;
203 mov ar.csd = r26
204 mov ar.ssd = r27
205 ;;
206 ld8.fill r10=[r2],PT(R15)-PT(R10)
207 ld8.fill r11=[r3],PT(R14)-PT(R11)
208 ;;
209 ld8.fill r15=[r2],PT(R16)-PT(R15)
210 ld8.fill r14=[r3],PT(R17)-PT(R14)
211 ;;
212 ld8.fill r16=[r2],16
213 ld8.fill r17=[r3],16
214 ;;
215 ld8.fill r18=[r2],16
216 ld8.fill r19=[r3],16
217 ;;
218 ld8.fill r20=[r2],16
219 ld8.fill r21=[r3],16
220 ;;
221 ld8.fill r22=[r2],16
222 ld8.fill r23=[r3],16
223 ;;
224 ld8.fill r24=[r2],16
225 ld8.fill r25=[r3],16
226 ;;
227 ld8.fill r26=[r2],16
228 ld8.fill r27=[r3],16
229 ;;
230 ld8.fill r28=[r2],16
231 ld8.fill r29=[r3],16
232 ;;
233 ld8.fill r30=[r2],PT(F6)-PT(R30)
234 ld8.fill r31=[r3],PT(F7)-PT(R31)
235 ;;
236 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
237 invala // invalidate ALAT
238 ;;
239 ldf.fill f6=[r2],32
240 ldf.fill f7=[r3],32
241 ;;
242 ldf.fill f8=[r2],32
243 ldf.fill f9=[r3],32
244 ;;
245 ldf.fill f10=[r2],32
246 ldf.fill f11=[r3],24
247 ;;
248 ld8.fill r4=[r2],16 //load r4
249 ld8.fill r5=[r3],16 //load r5
250 ;;
251 ld8.fill r6=[r2] //load r6
252 ld8.fill r7=[r3] //load r7
253 ;;
254 srlz.i // ensure interruption collection is off
255 ;;
256 bsw.0
257 ;;
258 adds r16 = PT(CR_IPSR)+16,r12
259 adds r17 = PT(CR_IIP)+16,r12
260 mov r21=r13 // get current
261 ;;
262 ld8 r31=[r16],16 // load cr.ipsr
263 ld8 r30=[r17],16 // load cr.iip
264 ;;
265 ld8 r29=[r16],16 // load cr.ifs
266 ld8 r28=[r17],16 // load ar.unat
267 ;;
268 ld8 r27=[r16],16 // load ar.pfs
269 ld8 r26=[r17],16 // load ar.rsc
270 ;;
271 ld8 r25=[r16],16 // load ar.rnat
272 ld8 r24=[r17],16 // load ar.bspstore
273 ;;
274 ld8 r23=[r16],16 // load predicates
275 ld8 r22=[r17],16 // load b0
276 ;;
277 ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
278 ld8.fill r1=[r17],16 //load r1
279 ;;
280 ld8.fill r12=[r16],16 //load r12
281 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
282 ;;
283 ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
284 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
285 ;;
286 ld8.fill r3=[r16] //load r3
287 ld8 r18=[r17],PT(RFI_PFS)-PT(AR_CCV) //load ar_ccv
288 ;;
289 mov ar.fpsr=r19
290 mov ar.ccv=r18
291 ;;
292 //rbs_switch
293 // loadrs has already been shifted
294 alloc r16=ar.pfs,0,0,0,0 // drop current register frame
295 ;;
296 mov ar.rsc=r20
297 ;;
298 loadrs
299 ;;
300 mov ar.bspstore=r24
301 ;;
302 ld8 r24=[r17] //load rfi_pfs
303 mov ar.unat=r28
304 mov ar.rnat=r25
305 mov ar.rsc=r26
306 ;;
307 mov cr.ipsr=r31
308 mov cr.iip=r30
309 mov cr.ifs=r29
310 cmp.ne p6,p0=r24,r0
311 (p6)br.sptk vmx_dorfirfi
312 ;;
313 vmx_dorfirfi_back:
314 mov ar.pfs=r27
315 adds r18=IA64_VPD_BASE_OFFSET,r21
316 ;;
317 ld8 r18=[r18] //vpd
318 adds r17=IA64_VCPU_ISR_OFFSET,r21
319 ;;
320 ld8 r17=[r17]
321 adds r19=VPD(VPSR),r18
322 ;;
323 ld8 r19=[r19] //vpsr
324 //vsa_sync_write_start
325 movl r20=__vsa_base
326 ;;
327 ld8 r20=[r20] // read entry point
328 mov r25=r18
329 ;;
330 add r16=PAL_VPS_SYNC_WRITE,r20
331 movl r24=switch_rr7 // calculate return address
332 ;;
333 mov b0=r16
334 br.cond.sptk b0 // call the service
335 ;;
336 switch_rr7:
337 // fall through
338 GLOBAL_ENTRY(ia64_vmm_entry)
339 /*
340 * must be at bank 0
341 * parameter:
342 * r17:cr.isr
343 * r18:vpd
344 * r19:vpsr
345 * r20:__vsa_base
346 * r22:b0
347 * r23:predicate
348 */
349 mov r24=r22
350 mov r25=r18
351 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
352 ;;
353 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
354 (p1) br.sptk.many ia64_vmm_entry_out
355 ;;
356 tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir
357 ;;
358 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
359 (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
360 ;;
361 ia64_vmm_entry_out:
362 mov pr=r23,-2
363 mov b0=r29
364 ;;
365 br.cond.sptk b0 // call pal service
366 END(ia64_leave_hypervisor)
368 //r24 rfi_pfs
369 //r17 address of rfi_pfs
370 GLOBAL_ENTRY(vmx_dorfirfi)
371 mov r16=ar.ec
372 movl r20 = vmx_dorfirfi_back
373 ;;
374 // clean rfi_pfs
375 st8 [r17]=r0
376 mov b0=r20
377 // pfs.pec=ar.ec
378 dep r24 = r16, r24, 52, 6
379 ;;
380 mov ar.pfs=r24
381 ;;
382 br.ret.sptk b0
383 ;;
384 END(vmx_dorfirfi)
386 #ifdef XEN_DBL_MAPPING /* will be removed */
388 #define VMX_PURGE_RR7 0
389 #define VMX_INSERT_RR7 1
390 /*
391 * in0: old rr7
392 * in1: virtual address of xen image
393 * in2: virtual address of vhpt table
394 */
395 GLOBAL_ENTRY(vmx_purge_double_mapping)
396 alloc loc1 = ar.pfs,5,9,0,0
397 mov loc0 = rp
398 movl r8 = 1f
399 ;;
400 movl loc4 = KERNEL_TR_PAGE_SHIFT
401 movl loc5 = VCPU_TLB_SHIFT
402 mov loc6 = psr
403 movl loc7 = XEN_RR7_SWITCH_STUB
404 mov loc8 = (1<<VMX_PURGE_RR7)
405 ;;
406 srlz.i
407 ;;
408 rsm psr.i | psr.ic
409 ;;
410 srlz.i
411 ;;
412 mov ar.rsc = 0
413 mov b6 = loc7
414 mov rp = r8
415 ;;
416 br.sptk b6
417 1:
418 mov ar.rsc = 3
419 mov rp = loc0
420 ;;
421 mov psr.l = loc6
422 ;;
423 srlz.i
424 ;;
425 br.ret.sptk rp
426 END(vmx_purge_double_mapping)
428 /*
429 * in0: new rr7
430 * in1: virtual address of xen image
431 * in2: virtual address of vhpt table
432 * in3: pte entry of xen image
433 * in4: pte entry of vhpt table
434 */
435 GLOBAL_ENTRY(vmx_insert_double_mapping)
436 alloc loc1 = ar.pfs,5,9,0,0
437 mov loc0 = rp
438 movl loc2 = IA64_TR_XEN_IN_DOM // TR number for xen image
439 ;;
440 movl loc3 = IA64_TR_VHPT_IN_DOM // TR number for vhpt table
441 movl r8 = 1f
442 movl loc4 = KERNEL_TR_PAGE_SHIFT
443 ;;
444 movl loc5 = VCPU_TLB_SHIFT
445 mov loc6 = psr
446 movl loc7 = XEN_RR7_SWITCH_STUB
447 ;;
448 srlz.i
449 ;;
450 rsm psr.i | psr.ic
451 mov loc8 = (1<<VMX_INSERT_RR7)
452 ;;
453 srlz.i
454 ;;
455 mov ar.rsc = 0
456 mov b6 = loc7
457 mov rp = r8
458 ;;
459 br.sptk b6
460 1:
461 mov ar.rsc = 3
462 mov rp = loc0
463 ;;
464 mov psr.l = loc6
465 ;;
466 srlz.i
467 ;;
468 br.ret.sptk rp
469 END(vmx_insert_double_mapping)
471 .align PAGE_SIZE
472 /*
473 * Stub to add double mapping for new domain, which shouldn't
474 * access any memory when active. Before reaching this point,
475 * both psr.i/ic is cleared and rse is set in lazy mode.
476 *
477 * in0: new rr7
478 * in1: virtual address of xen image
479 * in2: virtual address of vhpt table
480 * in3: pte entry of xen image
481 * in4: pte entry of vhpt table
482 * loc2: TR number for xen image
483 * loc3: TR number for vhpt table
484 * loc4: page size for xen image
485 * loc5: page size of vhpt table
486 * loc7: free to use
487 * loc8: purge or insert
488 * r8: will contain old rid value
489 */
490 GLOBAL_ENTRY(vmx_switch_rr7)
491 movl loc7 = (7<<61)
492 dep.z loc4 = loc4, 2, 6
493 dep.z loc5 = loc5, 2, 6
494 ;;
495 tbit.nz p6,p7=loc8, VMX_INSERT_RR7
496 mov r8 = rr[loc7]
497 ;;
498 mov rr[loc7] = in0
499 (p6)mov cr.ifa = in1
500 (p6)mov cr.itir = loc4
501 ;;
502 srlz.i
503 ;;
504 (p6)itr.i itr[loc2] = in3
505 (p7)ptr.i in1, loc4
506 ;;
507 (p6)itr.d dtr[loc2] = in3
508 (p7)ptr.d in1, loc4
509 ;;
510 srlz.i
511 ;;
512 (p6)mov cr.ifa = in2
513 (p6)mov cr.itir = loc5
514 ;;
515 (p6)itr.d dtr[loc3] = in4
516 (p7)ptr.d in2, loc5
517 ;;
518 srlz.i
519 ;;
520 mov rr[loc7] = r8
521 ;;
522 srlz.i
523 br.sptk rp
524 END(vmx_switch_rr7)
525 .align PAGE_SIZE
527 #else
528 /*
529 * in0: new rr7
530 * in1: virtual address of shared_info
531 * in2: virtual address of shared_arch_info (VPD)
532 * in3: virtual address of guest_vhpt
533 * in4: virtual address of pal code segment
534 * r8: will contain old rid value
535 */
538 #define PSR_BITS_TO_CLEAR \
539 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |IA64_PSR_RT | \
540 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
541 IA64_PSR_DFL | IA64_PSR_DFH)
542 #define PSR_BITS_TO_SET IA64_PSR_BN
544 //extern void vmx_switch_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, void *guest_vhpt, void * pal_vaddr );
546 GLOBAL_ENTRY(vmx_switch_rr7)
547 // not sure this unwind statement is correct...
548 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
549 alloc loc1 = ar.pfs, 5, 9, 0, 0
550 1: {
551 mov r28 = in0 // copy procedure index
552 mov r8 = ip // save ip to compute branch
553 mov loc0 = rp // save rp
554 };;
555 .body
556 movl loc2=PERCPU_ADDR
557 ;;
558 tpa loc2 = loc2 // get physical address of per cpu date
559 ;;
560 dep loc3 = 0,in1,60,4 // get physical address of shared_info
561 dep loc4 = 0,in2,60,4 // get physical address of shared_arch_info
562 dep loc5 = 0,in3,60,4 // get physical address of guest_vhpt
563 dep loc6 = 0,in4,60,4 // get physical address of pal code
564 ;;
565 mov loc7 = psr // save psr
566 ;;
567 mov loc8 = ar.rsc // save RSE configuration
568 ;;
569 mov ar.rsc = 0 // put RSE in enforced lazy, LE mode
570 movl r16=PSR_BITS_TO_CLEAR
571 movl r17=PSR_BITS_TO_SET
572 ;;
573 or loc7 = loc7,r17 // add in psr the bits to set
574 ;;
575 andcm r16=loc7,r16 // removes bits to clear from psr
576 br.call.sptk.many rp=ia64_switch_mode_phys
577 1:
578 // now in physical mode with psr.i/ic off so do rr7 switch
579 dep r16=-1,r0,61,3
580 ;;
581 mov rr[r16]=in0
582 srlz.d
583 ;;
584 rsm 0x6000
585 ;;
586 srlz.d
588 // re-pin mappings for kernel text and data
589 mov r18=KERNEL_TR_PAGE_SHIFT<<2
590 movl r17=KERNEL_START
591 ;;
592 ptr.i r17,r18
593 ptr.d r17,r18
594 ;;
595 mov cr.itir=r18
596 mov cr.ifa=r17
597 mov r16=IA64_TR_KERNEL
598 //mov r3=ip
599 movl r25 = PAGE_KERNEL
600 ;;
601 dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
602 ;;
603 or r18=r2,r25
604 ;;
605 srlz.i
606 ;;
607 itr.i itr[r16]=r18
608 ;;
609 itr.d dtr[r16]=r18
610 ;;
612 // re-pin mappings for per-cpu data
614 movl r22 = PERCPU_ADDR
615 ;;
616 mov r24=IA64_TR_PERCPU_DATA
617 or loc2 = r25,loc2 // construct PA | page properties
618 mov r23=PERCPU_PAGE_SHIFT<<2
619 ;;
620 ptr.d r22,r23
621 ;;
622 mov cr.itir=r23
623 mov cr.ifa=r22
624 ;;
625 itr.d dtr[r24]=loc2 // wire in new mapping...
626 ;;
629 #if 0
630 // re-pin mappings for shared_info
632 mov r24=IA64_TR_SHARED_INFO
633 movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
634 ;;
635 or loc3 = r25,loc3 // construct PA | page properties
636 mov r23 = PAGE_SHIFT<<2
637 ;;
638 ptr.d in1,r23
639 ;;
640 mov cr.itir=r23
641 mov cr.ifa=in1
642 ;;
643 itr.d dtr[r24]=loc3 // wire in new mapping...
644 ;;
645 // re-pin mappings for shared_arch_info
647 mov r24=IA64_TR_ARCH_INFO
648 or loc4 = r25,loc4 // construct PA | page properties
649 mov r23 = PAGE_SHIFT<<2
650 ;;
651 ptr.d in2,r23
652 ;;
653 mov cr.itir=r23
654 mov cr.ifa=in2
655 ;;
656 itr.d dtr[r24]=loc4 // wire in new mapping...
657 ;;
658 #endif
661 // re-pin mappings for guest_vhpt
663 mov r24=IA64_TR_PERVP_VHPT
664 movl r25=PAGE_KERNEL
665 ;;
666 or loc5 = r25,loc5 // construct PA | page properties
667 mov r23 = VCPU_VHPT_SHIFT <<2
668 ;;
669 ptr.d in3,r23
670 ;;
671 mov cr.itir=r23
672 mov cr.ifa=in3
673 ;;
674 itr.d dtr[r24]=loc5 // wire in new mapping...
675 ;;
677 // re-pin mappings for PAL code section
679 mov r24=IA64_TR_PALCODE
680 or loc6 = r25,loc6 // construct PA | page properties
681 mov r23 = IA64_GRANULE_SHIFT<<2
682 ;;
683 ptr.i in4,r23
684 ;;
685 mov cr.itir=r23
686 mov cr.ifa=in4
687 ;;
688 itr.i itr[r24]=loc6 // wire in new mapping...
689 ;;
691 // done, switch back to virtual and return
692 mov r16=loc7 // r16= original psr
693 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
694 mov ar.pfs = loc1
695 mov rp = loc0
696 ;;
697 mov ar.rsc=loc8 // restore RSE configuration
698 srlz.d // seralize restoration of psr.l
699 br.ret.sptk.many rp
700 END(vmx_switch_rr7)
701 #endif