ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_entry.S @ 9011:cfe20f41f043

[IA64] VTI: updated vtlb, support_non_contiguous memory on vtidomain

Previously VTI-domain only supported contiguous memory,
this patch is intended to make VTI-domain support non-contiguous memory.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Mar 01 08:29:00 2006 -0700 (2006-03-01)
parents 2d5c57be196d
children ced37bea0647
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_entry.S:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
21 */
23 #ifndef VCPU_TLB_SHIFT
24 #define VCPU_TLB_SHIFT 22
25 #endif
26 #include <linux/config.h>
27 #include <asm/asmmacro.h>
28 #include <asm/cache.h>
29 #include <asm/kregs.h>
30 #include <asm/offsets.h>
31 #include <asm/pgtable.h>
32 #include <asm/percpu.h>
33 #include <asm/processor.h>
34 #include <asm/thread_info.h>
35 #include <asm/unistd.h>
36 #include <asm/vhpt.h>
37 #include <asm/vmmu.h>
38 #include "vmx_minstate.h"
40 /*
41 * prev_task <- vmx_ia64_switch_to(struct task_struct *next)
42 * With Ingo's new scheduler, interrupts are disabled when this routine gets
43 * called. The code starting at .map relies on this. The rest of the code
44 * doesn't care about the interrupt masking status.
45 *
46 * Since we allocate domain stack in xenheap, there's no need to map new
47 * domain's stack since all xenheap is mapped by TR. Another different task
48 * for vmx_ia64_switch_to is to switch to bank0 and change current pointer.
49 */
50 GLOBAL_ENTRY(vmx_ia64_switch_to)
51 .prologue
52 alloc r16=ar.pfs,1,0,0,0
53 DO_SAVE_SWITCH_STACK
54 .body
56 bsw.0 // Switch to bank0, because bank0 r21 is current pointer
57 ;;
58 adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
59 movl r25=init_task
60 adds r26=IA64_TASK_THREAD_KSP_OFFSET,in0
61 ;;
62 st8 [r22]=sp // save kernel stack pointer of old task
63 ;;
64 /*
65 * TR always mapped this task's page, we can skip doing it again.
66 */
67 ld8 sp=[r26] // load kernel stack pointer of new task
68 mov r21=in0 // update "current" application register
69 mov r8=r13 // return pointer to previously running task
70 mov r13=in0 // set "current" pointer
71 ;;
72 bsw.1
73 ;;
74 DO_LOAD_SWITCH_STACK
76 #ifdef CONFIG_SMP
77 sync.i // ensure "fc"s done by this CPU are visible on other CPUs
78 #endif
79 br.ret.sptk.many rp // boogie on out in new context
80 END(vmx_ia64_switch_to)
82 GLOBAL_ENTRY(ia64_leave_nested)
83 rsm psr.i
84 ;;
85 adds r21=PT(PR)+16,r12
86 ;;
87 lfetch [r21],PT(CR_IPSR)-PT(PR)
88 adds r2=PT(B6)+16,r12
89 adds r3=PT(R16)+16,r12
90 ;;
91 lfetch [r21]
92 ld8 r28=[r2],8 // load b6
93 adds r29=PT(R24)+16,r12
95 ld8.fill r16=[r3]
96 adds r3=PT(AR_CSD)-PT(R16),r3
97 adds r30=PT(AR_CCV)+16,r12
98 ;;
99 ld8.fill r24=[r29]
100 ld8 r15=[r30] // load ar.ccv
101 ;;
102 ld8 r29=[r2],16 // load b7
103 ld8 r30=[r3],16 // load ar.csd
104 ;;
105 ld8 r31=[r2],16 // load ar.ssd
106 ld8.fill r8=[r3],16
107 ;;
108 ld8.fill r9=[r2],16
109 ld8.fill r10=[r3],PT(R17)-PT(R10)
110 ;;
111 ld8.fill r11=[r2],PT(R18)-PT(R11)
112 ld8.fill r17=[r3],16
113 ;;
114 ld8.fill r18=[r2],16
115 ld8.fill r19=[r3],16
116 ;;
117 ld8.fill r20=[r2],16
118 ld8.fill r21=[r3],16
119 mov ar.csd=r30
120 mov ar.ssd=r31
121 ;;
122 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
123 invala // invalidate ALAT
124 ;;
125 ld8.fill r22=[r2],24
126 ld8.fill r23=[r3],24
127 mov b6=r28
128 ;;
129 ld8.fill r25=[r2],16
130 ld8.fill r26=[r3],16
131 mov b7=r29
132 ;;
133 ld8.fill r27=[r2],16
134 ld8.fill r28=[r3],16
135 ;;
136 ld8.fill r29=[r2],16
137 ld8.fill r30=[r3],24
138 ;;
139 ld8.fill r31=[r2],PT(F9)-PT(R31)
140 adds r3=PT(F10)-PT(F6),r3
141 ;;
142 ldf.fill f9=[r2],PT(F6)-PT(F9)
143 ldf.fill f10=[r3],PT(F8)-PT(F10)
144 ;;
145 ldf.fill f6=[r2],PT(F7)-PT(F6)
146 ;;
147 ldf.fill f7=[r2],PT(F11)-PT(F7)
148 ldf.fill f8=[r3],32
149 ;;
150 srlz.i // ensure interruption collection is off
151 mov ar.ccv=r15
152 ;;
153 bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
154 ;;
155 ldf.fill f11=[r2]
156 // mov r18=r13
157 // mov r21=r13
158 adds r16=PT(CR_IPSR)+16,r12
159 adds r17=PT(CR_IIP)+16,r12
160 ;;
161 ld8 r29=[r16],16 // load cr.ipsr
162 ld8 r28=[r17],16 // load cr.iip
163 ;;
164 ld8 r30=[r16],16 // load cr.ifs
165 ld8 r25=[r17],16 // load ar.unat
166 ;;
167 ld8 r26=[r16],16 // load ar.pfs
168 ld8 r27=[r17],16 // load ar.rsc
169 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
170 ;;
171 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
172 ld8 r23=[r17],16// load ar.bspstore (may be garbage)
173 ;;
174 ld8 r31=[r16],16 // load predicates
175 ld8 r22=[r17],16 // load b0
176 ;;
177 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
178 ld8.fill r1=[r17],16 // load r1
179 ;;
180 ld8.fill r12=[r16],16
181 ld8.fill r13=[r17],16
182 ;;
183 ld8 r20=[r16],16 // ar.fpsr
184 ld8.fill r15=[r17],16
185 ;;
186 ld8.fill r14=[r16],16
187 ld8.fill r2=[r17]
188 ;;
189 ld8.fill r3=[r16]
190 ;;
191 mov r16=ar.bsp // get existing backing store pointer
192 ;;
193 mov b0=r22
194 mov ar.pfs=r26
195 mov cr.ifs=r30
196 mov cr.ipsr=r29
197 mov ar.fpsr=r20
198 mov cr.iip=r28
199 ;;
200 mov ar.rsc=r27
201 mov ar.unat=r25
202 mov pr=r31,-1
203 rfi
204 END(ia64_leave_nested)
208 GLOBAL_ENTRY(ia64_leave_hypervisor)
209 PT_REGS_UNWIND_INFO(0)
210 /*
211 * work.need_resched etc. mustn't get changed by this CPU before it returns to
212 ;;
213 * user- or fsys-mode, hence we disable interrupts early on:
214 */
215 rsm psr.i
216 ;;
217 alloc loc0=ar.pfs,0,1,1,0
218 adds out0=16,r12
219 adds r7 = PT(EML_UNAT)+16,r12
220 ;;
221 ld8 r7 = [r7]
222 br.call.sptk.many b0=leave_hypervisor_tail
223 ;;
224 mov ar.pfs=loc0
225 mov ar.unat=r7
226 adds r20=PT(PR)+16,r12
227 ;;
228 lfetch [r20],PT(CR_IPSR)-PT(PR)
229 adds r2 = PT(B6)+16,r12
230 adds r3 = PT(B7)+16,r12
231 ;;
232 lfetch [r20]
233 ;;
234 ld8 r24=[r2],16 /* B6 */
235 ld8 r25=[r3],16 /* B7 */
236 ;;
237 ld8 r26=[r2],16 /* ar_csd */
238 ld8 r27=[r3],16 /* ar_ssd */
239 mov b6 = r24
240 ;;
241 ld8.fill r8=[r2],16
242 ld8.fill r9=[r3],16
243 mov b7 = r25
244 ;;
245 mov ar.csd = r26
246 mov ar.ssd = r27
247 ;;
248 ld8.fill r10=[r2],PT(R15)-PT(R10)
249 ld8.fill r11=[r3],PT(R14)-PT(R11)
250 ;;
251 ld8.fill r15=[r2],PT(R16)-PT(R15)
252 ld8.fill r14=[r3],PT(R17)-PT(R14)
253 ;;
254 ld8.fill r16=[r2],16
255 ld8.fill r17=[r3],16
256 ;;
257 ld8.fill r18=[r2],16
258 ld8.fill r19=[r3],16
259 ;;
260 ld8.fill r20=[r2],16
261 ld8.fill r21=[r3],16
262 ;;
263 ld8.fill r22=[r2],16
264 ld8.fill r23=[r3],16
265 ;;
266 ld8.fill r24=[r2],16
267 ld8.fill r25=[r3],16
268 ;;
269 ld8.fill r26=[r2],16
270 ld8.fill r27=[r3],16
271 ;;
272 ld8.fill r28=[r2],16
273 ld8.fill r29=[r3],16
274 ;;
275 ld8.fill r30=[r2],PT(F6)-PT(R30)
276 ld8.fill r31=[r3],PT(F7)-PT(R31)
277 ;;
278 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
279 invala // invalidate ALAT
280 ;;
281 ldf.fill f6=[r2],32
282 ldf.fill f7=[r3],32
283 ;;
284 ldf.fill f8=[r2],32
285 ldf.fill f9=[r3],32
286 ;;
287 ldf.fill f10=[r2],32
288 ldf.fill f11=[r3],24
289 ;;
290 ld8.fill r4=[r2],16 //load r4
291 ld8.fill r5=[r3],16 //load r5
292 ;;
293 ld8.fill r6=[r2] //load r6
294 ld8.fill r7=[r3] //load r7
295 ;;
296 srlz.i // ensure interruption collection is off
297 ;;
298 bsw.0
299 ;;
300 adds r16 = PT(CR_IPSR)+16,r12
301 adds r17 = PT(CR_IIP)+16,r12
302 mov r21=r13 // get current
303 ;;
304 ld8 r31=[r16],16 // load cr.ipsr
305 ld8 r30=[r17],16 // load cr.iip
306 ;;
307 ld8 r29=[r16],16 // load cr.ifs
308 ld8 r28=[r17],16 // load ar.unat
309 ;;
310 ld8 r27=[r16],16 // load ar.pfs
311 ld8 r26=[r17],16 // load ar.rsc
312 ;;
313 ld8 r25=[r16],16 // load ar.rnat
314 ld8 r24=[r17],16 // load ar.bspstore
315 ;;
316 ld8 r23=[r16],16 // load predicates
317 ld8 r22=[r17],16 // load b0
318 ;;
319 ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
320 ld8.fill r1=[r17],16 //load r1
321 ;;
322 ld8.fill r12=[r16],16 //load r12
323 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
324 ;;
325 ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
326 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
327 ;;
328 ld8.fill r3=[r16] //load r3
329 ld8 r18=[r17],PT(RFI_PFS)-PT(AR_CCV) //load ar_ccv
330 ;;
331 mov ar.fpsr=r19
332 mov ar.ccv=r18
333 ;;
334 //rbs_switch
335 // loadrs has already been shifted
336 alloc r16=ar.pfs,0,0,0,0 // drop current register frame
337 ;;
338 mov ar.rsc=r20
339 ;;
340 loadrs
341 ;;
342 mov ar.bspstore=r24
343 ;;
344 ld8 r24=[r17] //load rfi_pfs
345 mov ar.unat=r28
346 mov ar.rnat=r25
347 mov ar.rsc=r26
348 ;;
349 mov cr.ipsr=r31
350 mov cr.iip=r30
351 mov cr.ifs=r29
352 cmp.ne p6,p0=r24,r0
353 (p6)br.sptk vmx_dorfirfi
354 ;;
355 vmx_dorfirfi_back:
356 mov ar.pfs=r27
357 adds r18=IA64_VPD_BASE_OFFSET,r21
358 ;;
359 ld8 r18=[r18] //vpd
360 ;;
361 adds r19=VPD(VPSR),r18
362 ;;
363 ld8 r19=[r19] //vpsr
364 //vsa_sync_write_start
365 movl r20=__vsa_base
366 ;;
367 ld8 r20=[r20] // read entry point
368 mov r25=r18
369 ;;
370 add r16=PAL_VPS_SYNC_WRITE,r20
371 movl r24=switch_rr7 // calculate return address
372 ;;
373 mov b0=r16
374 br.cond.sptk b0 // call the service
375 ;;
376 switch_rr7:
377 // fall through
378 GLOBAL_ENTRY(ia64_vmm_entry)
379 /*
380 * must be at bank 0
381 * parameter:
382 * r18:vpd
383 * r19:vpsr
384 * r20:__vsa_base
385 * r22:b0
386 * r23:predicate
387 */
388 mov r24=r22
389 mov r25=r18
390 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
391 ;;
392 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
393 (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
394 ;;
395 mov pr=r23,-2
396 mov b0=r29
397 ;;
398 br.cond.sptk b0 // call pal service
399 END(ia64_leave_hypervisor)
401 //r24 rfi_pfs
402 //r17 address of rfi_pfs
403 GLOBAL_ENTRY(vmx_dorfirfi)
404 mov r16=ar.ec
405 movl r20 = vmx_dorfirfi_back
406 ;;
407 // clean rfi_pfs
408 st8 [r17]=r0
409 mov b0=r20
410 // pfs.pec=ar.ec
411 dep r24 = r16, r24, 52, 6
412 ;;
413 mov ar.pfs=r24
414 ;;
415 br.ret.sptk b0
416 ;;
417 END(vmx_dorfirfi)
419 #ifdef XEN_DBL_MAPPING /* will be removed */
421 #define VMX_PURGE_RR7 0
422 #define VMX_INSERT_RR7 1
423 /*
424 * in0: old rr7
425 * in1: virtual address of xen image
426 * in2: virtual address of vhpt table
427 */
428 GLOBAL_ENTRY(vmx_purge_double_mapping)
429 alloc loc1 = ar.pfs,5,9,0,0
430 mov loc0 = rp
431 movl r8 = 1f
432 ;;
433 movl loc4 = KERNEL_TR_PAGE_SHIFT
434 movl loc5 = VCPU_TLB_SHIFT
435 mov loc6 = psr
436 movl loc7 = XEN_RR7_SWITCH_STUB
437 mov loc8 = (1<<VMX_PURGE_RR7)
438 ;;
439 srlz.i
440 ;;
441 rsm psr.i | psr.ic
442 ;;
443 srlz.i
444 ;;
445 mov ar.rsc = 0
446 mov b6 = loc7
447 mov rp = r8
448 ;;
449 br.sptk b6
450 1:
451 mov ar.rsc = 3
452 mov rp = loc0
453 ;;
454 mov psr.l = loc6
455 ;;
456 srlz.i
457 ;;
458 br.ret.sptk rp
459 END(vmx_purge_double_mapping)
461 /*
462 * in0: new rr7
463 * in1: virtual address of xen image
464 * in2: virtual address of vhpt table
465 * in3: pte entry of xen image
466 * in4: pte entry of vhpt table
467 */
468 GLOBAL_ENTRY(vmx_insert_double_mapping)
469 alloc loc1 = ar.pfs,5,9,0,0
470 mov loc0 = rp
471 movl loc2 = IA64_TR_XEN_IN_DOM // TR number for xen image
472 ;;
473 movl loc3 = IA64_TR_VHPT_IN_DOM // TR number for vhpt table
474 movl r8 = 1f
475 movl loc4 = KERNEL_TR_PAGE_SHIFT
476 ;;
477 movl loc5 = VCPU_TLB_SHIFT
478 mov loc6 = psr
479 movl loc7 = XEN_RR7_SWITCH_STUB
480 ;;
481 srlz.i
482 ;;
483 rsm psr.i | psr.ic
484 mov loc8 = (1<<VMX_INSERT_RR7)
485 ;;
486 srlz.i
487 ;;
488 mov ar.rsc = 0
489 mov b6 = loc7
490 mov rp = r8
491 ;;
492 br.sptk b6
493 1:
494 mov ar.rsc = 3
495 mov rp = loc0
496 ;;
497 mov psr.l = loc6
498 ;;
499 srlz.i
500 ;;
501 br.ret.sptk rp
502 END(vmx_insert_double_mapping)
504 .align PAGE_SIZE
505 /*
506 * Stub to add double mapping for new domain, which shouldn't
507 * access any memory when active. Before reaching this point,
508 * both psr.i/ic is cleared and rse is set in lazy mode.
509 *
510 * in0: new rr7
511 * in1: virtual address of xen image
512 * in2: virtual address of vhpt table
513 * in3: pte entry of xen image
514 * in4: pte entry of vhpt table
515 * loc2: TR number for xen image
516 * loc3: TR number for vhpt table
517 * loc4: page size for xen image
518 * loc5: page size of vhpt table
519 * loc7: free to use
520 * loc8: purge or insert
521 * r8: will contain old rid value
522 */
523 GLOBAL_ENTRY(vmx_switch_rr7)
524 movl loc7 = (7<<61)
525 dep.z loc4 = loc4, 2, 6
526 dep.z loc5 = loc5, 2, 6
527 ;;
528 tbit.nz p6,p7=loc8, VMX_INSERT_RR7
529 mov r8 = rr[loc7]
530 ;;
531 mov rr[loc7] = in0
532 (p6)mov cr.ifa = in1
533 (p6)mov cr.itir = loc4
534 ;;
535 srlz.i
536 ;;
537 (p6)itr.i itr[loc2] = in3
538 (p7)ptr.i in1, loc4
539 ;;
540 (p6)itr.d dtr[loc2] = in3
541 (p7)ptr.d in1, loc4
542 ;;
543 srlz.i
544 ;;
545 (p6)mov cr.ifa = in2
546 (p6)mov cr.itir = loc5
547 ;;
548 (p6)itr.d dtr[loc3] = in4
549 (p7)ptr.d in2, loc5
550 ;;
551 srlz.i
552 ;;
553 mov rr[loc7] = r8
554 ;;
555 srlz.i
556 br.sptk rp
557 END(vmx_switch_rr7)
558 .align PAGE_SIZE
560 #else
561 /*
562 * in0: new rr7
563 * in1: virtual address of shared_info
564 * in2: virtual address of shared_arch_info (VPD)
565 * in3: virtual address of guest_vhpt
566 * in4: virtual address of pal code segment
567 * r8: will contain old rid value
568 */
571 #define PSR_BITS_TO_CLEAR \
572 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |IA64_PSR_RT | \
573 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
574 IA64_PSR_DFL | IA64_PSR_DFH)
575 #define PSR_BITS_TO_SET IA64_PSR_BN
577 //extern void vmx_switch_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, void *guest_vhpt, void * pal_vaddr );
579 GLOBAL_ENTRY(vmx_switch_rr7)
580 // not sure this unwind statement is correct...
581 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
582 alloc loc1 = ar.pfs, 5, 9, 0, 0
583 1: {
584 mov r28 = in0 // copy procedure index
585 mov r8 = ip // save ip to compute branch
586 mov loc0 = rp // save rp
587 };;
588 .body
589 movl loc2=PERCPU_ADDR
590 ;;
591 tpa loc2 = loc2 // get physical address of per cpu date
592 ;;
593 dep loc3 = 0,in1,60,4 // get physical address of shared_info
594 dep loc4 = 0,in2,60,4 // get physical address of shared_arch_info
595 dep loc5 = 0,in3,60,4 // get physical address of guest_vhpt
596 dep loc6 = 0,in4,60,4 // get physical address of pal code
597 ;;
598 mov loc7 = psr // save psr
599 ;;
600 mov loc8 = ar.rsc // save RSE configuration
601 ;;
602 mov ar.rsc = 0 // put RSE in enforced lazy, LE mode
603 movl r16=PSR_BITS_TO_CLEAR
604 movl r17=PSR_BITS_TO_SET
605 ;;
606 or loc7 = loc7,r17 // add in psr the bits to set
607 ;;
608 andcm r16=loc7,r16 // removes bits to clear from psr
609 br.call.sptk.many rp=ia64_switch_mode_phys
610 1:
611 // now in physical mode with psr.i/ic off so do rr7 switch
612 dep r16=-1,r0,61,3
613 ;;
614 mov rr[r16]=in0
615 srlz.d
616 ;;
617 rsm 0x6000
618 ;;
619 srlz.d
621 // re-pin mappings for kernel text and data
622 mov r18=KERNEL_TR_PAGE_SHIFT<<2
623 movl r17=KERNEL_START
624 ;;
625 ptr.i r17,r18
626 ptr.d r17,r18
627 ;;
628 mov cr.itir=r18
629 mov cr.ifa=r17
630 mov r16=IA64_TR_KERNEL
631 //mov r3=ip
632 movl r25 = PAGE_KERNEL
633 ;;
634 dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
635 ;;
636 or r18=r2,r25
637 ;;
638 srlz.i
639 ;;
640 itr.i itr[r16]=r18
641 ;;
642 itr.d dtr[r16]=r18
643 ;;
645 // re-pin mappings for per-cpu data
647 movl r22 = PERCPU_ADDR
648 ;;
649 mov r24=IA64_TR_PERCPU_DATA
650 or loc2 = r25,loc2 // construct PA | page properties
651 mov r23=PERCPU_PAGE_SHIFT<<2
652 ;;
653 ptr.d r22,r23
654 ;;
655 mov cr.itir=r23
656 mov cr.ifa=r22
657 ;;
658 itr.d dtr[r24]=loc2 // wire in new mapping...
659 ;;
662 #if 0
663 // re-pin mappings for shared_info
665 mov r24=IA64_TR_SHARED_INFO
666 movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
667 ;;
668 or loc3 = r25,loc3 // construct PA | page properties
669 mov r23 = PAGE_SHIFT<<2
670 ;;
671 ptr.d in1,r23
672 ;;
673 mov cr.itir=r23
674 mov cr.ifa=in1
675 ;;
676 itr.d dtr[r24]=loc3 // wire in new mapping...
677 ;;
678 // re-pin mappings for shared_arch_info
680 mov r24=IA64_TR_ARCH_INFO
681 or loc4 = r25,loc4 // construct PA | page properties
682 mov r23 = PAGE_SHIFT<<2
683 ;;
684 ptr.d in2,r23
685 ;;
686 mov cr.itir=r23
687 mov cr.ifa=in2
688 ;;
689 itr.d dtr[r24]=loc4 // wire in new mapping...
690 ;;
691 #endif
694 // re-pin mappings for guest_vhpt
696 mov r24=IA64_TR_PERVP_VHPT
697 movl r25=PAGE_KERNEL
698 ;;
699 or loc5 = r25,loc5 // construct PA | page properties
700 mov r23 = VCPU_VHPT_SHIFT <<2
701 ;;
702 ptr.d in3,r23
703 ;;
704 mov cr.itir=r23
705 mov cr.ifa=in3
706 ;;
707 itr.d dtr[r24]=loc5 // wire in new mapping...
708 ;;
710 // re-pin mappings for PAL code section
712 mov r24=IA64_TR_PALCODE
713 or loc6 = r25,loc6 // construct PA | page properties
714 mov r23 = IA64_GRANULE_SHIFT<<2
715 ;;
716 ptr.i in4,r23
717 ;;
718 mov cr.itir=r23
719 mov cr.ifa=in4
720 ;;
721 itr.i itr[r24]=loc6 // wire in new mapping...
722 ;;
724 // done, switch back to virtual and return
725 mov r16=loc7 // r16= original psr
726 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
727 mov ar.pfs = loc1
728 mov rp = loc0
729 ;;
730 mov ar.rsc=loc8 // restore RSE configuration
731 srlz.d // seralize restoration of psr.l
732 br.ret.sptk.many rp
733 END(vmx_switch_rr7)
734 #endif