direct-io.hg

view xen/arch/ia64/vmx/vmx_ivt.S @ 12411:622bb65e2011

[IA64] Accelerate RSM, SSM and MOV_TO_PSR

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Sun Oct 29 11:18:17 2006 -0700 (2006-10-29)
parents 9e9d8696fb55
children 51be39239c47
line source
1 /*
2 * arch/ia64/kernel/vmx_ivt.S
3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 * Asit Mallick <asit.k.mallick@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Kenneth Chen <kenneth.w.chen@intel.com>
11 * Fenghua Yu <fenghua.yu@intel.com>
12 *
13 *
14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
15 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
16 *
17 * 05/3/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
18 * Supporting Intel virtualization architecture
19 *
20 */
22 /*
23 * This file defines the interruption vector table used by the CPU.
24 * It does not include one entry per possible cause of interruption.
25 *
26 * The first 20 entries of the table contain 64 bundles each while the
27 * remaining 48 entries contain only 16 bundles each.
28 *
29 * The 64 bundles are used to allow inlining the whole handler for critical
30 * interruptions like TLB misses.
31 *
32 * For each entry, the comment is as follows:
33 *
34 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
35 * entry offset ----/ / / / /
36 * entry number ---------/ / / /
37 * size of the entry -------------/ / /
38 * vector name -------------------------------------/ /
39 * interruptions triggering this vector ----------------------/
40 *
41 * The table is 32KB in size and must be aligned on 32KB boundary.
42 * (The CPU ignores the 15 lower bits of the address)
43 *
44 * Table is based upon EAS2.6 (Oct 1999)
45 */
47 #include <linux/config.h>
49 #include <asm/asmmacro.h>
50 #include <asm/break.h>
51 #include <asm/ia32.h>
52 #include <asm/kregs.h>
53 #include <asm/offsets.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
57 #include <asm/system.h>
58 #include <asm/thread_info.h>
59 #include <asm/unistd.h>
60 #include <asm/vhpt.h>
61 #include <asm/virt_event.h>
63 #ifdef VTI_DEBUG
64 /*
65 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
66 * needed for something else before enabling this...
67 */
68 #define VMX_DBG_FAULT(i) \
69 add r16=IVT_CUR_OFS,r21; \
70 add r17=IVT_DBG_OFS,r21;; \
71 ld8 r18=[r16];; \
72 add r17=r18,r17; \
73 mov r19=cr.iip; \
74 mov r20=cr.ipsr; \
75 mov r22=cr.ifa; \
76 mov r23=i;; \
77 st8 [r17]=r19,8; \
78 add r18=32,r18;; \
79 st8 [r17]=r20,8; \
80 mov r19=0xfe0;; \
81 st8 [r17]=r22,8; \
82 and r18=r19,r18;; \
83 st8 [r17]=r23; \
84 st8 [r16]=r18;; \
85 //# define VMX_DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
86 #else
87 # define VMX_DBG_FAULT(i)
88 #endif
90 #include "vmx_minstate.h"
92 #define MINSTATE_VIRT /* needed by minstate.h */
93 #include "minstate.h"
96 #define VMX_FAULT(n) \
97 vmx_fault_##n:; \
98 br.sptk.many dispatch_to_fault_handler; \
99 ;; \
102 #define VMX_REFLECT(n) \
103 mov r31=pr; \
104 mov r19=n; /* prepare to save predicates */ \
105 mov r29=cr.ipsr; \
106 ;; \
107 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
108 (p7)br.sptk.many vmx_dispatch_reflection; \
109 VMX_FAULT(n); \
112 GLOBAL_ENTRY(vmx_panic)
113 br.sptk.many vmx_panic
114 ;;
115 END(vmx_panic)
121 .section .text.ivt,"ax"
123 .align 32768 // align on 32KB boundary
124 .global vmx_ia64_ivt
125 vmx_ia64_ivt:
126 /////////////////////////////////////////////////////////////////////////////////////////
127 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
128 ENTRY(vmx_vhpt_miss)
129 VMX_DBG_FAULT(0)
130 VMX_FAULT(0)
131 END(vmx_vhpt_miss)
133 .org vmx_ia64_ivt+0x400
134 /////////////////////////////////////////////////////////////////////////////////////////
135 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
136 ENTRY(vmx_itlb_miss)
137 VMX_DBG_FAULT(1)
138 mov r31 = pr
139 mov r29=cr.ipsr;
140 ;;
141 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
142 (p6) br.sptk vmx_alt_itlb_miss_1
143 //(p6) br.sptk vmx_fault_1
144 mov r16 = cr.ifa
145 ;;
146 thash r17 = r16
147 ttag r20 = r16
148 ;;
149 mov r18 = r17
150 adds r28 = VLE_TITAG_OFFSET,r17
151 adds r19 = VLE_CCHAIN_OFFSET, r17
152 ;;
153 ld8 r17 = [r19]
154 ;;
155 vmx_itlb_loop:
156 cmp.eq p6,p0 = r0, r17
157 (p6)br vmx_itlb_out
158 ;;
159 adds r16 = VLE_TITAG_OFFSET, r17
160 adds r19 = VLE_CCHAIN_OFFSET, r17
161 ;;
162 ld8 r22 = [r16]
163 ld8 r23 = [r19]
164 ;;
165 lfetch [r23]
166 cmp.eq p6,p7 = r20, r22
167 ;;
168 (p7)mov r17 = r23;
169 (p7)br.sptk vmx_itlb_loop
170 ;;
171 ld8 r25 = [r17]
172 ld8 r27 = [r18]
173 ld8 r29 = [r28]
174 ;;
175 st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
176 st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
177 extr.u r19 = r27, 56, 4
178 ;;
179 ld8 r29 = [r16]
180 ld8 r22 = [r28]
181 dep r27 = r0, r27, 56, 4
182 dep r25 = r19, r25, 56, 4
183 ;;
184 st8 [r16] = r22
185 st8 [r28] = r29
186 st8 [r18] = r25
187 st8 [r17] = r27
188 ;;
189 itc.i r25
190 dv_serialize_data
191 mov r17=cr.isr
192 mov r23=r31
193 mov r22=b0
194 adds r16=IA64_VPD_BASE_OFFSET,r21
195 ;;
196 ld8 r18=[r16]
197 ;;
198 adds r19=VPD(VPSR),r18
199 movl r20=__vsa_base
200 ;;
201 ld8 r19=[r19]
202 ld8 r20=[r20]
203 ;;
204 br.sptk ia64_vmm_entry
205 ;;
206 vmx_itlb_out:
207 mov r19 = 1
208 br.sptk vmx_dispatch_itlb_miss
209 VMX_FAULT(1);
210 END(vmx_itlb_miss)
212 .org vmx_ia64_ivt+0x0800
213 /////////////////////////////////////////////////////////////////////////////////////////
214 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
215 ENTRY(vmx_dtlb_miss)
216 VMX_DBG_FAULT(2)
217 mov r31 = pr
218 mov r29=cr.ipsr;
219 ;;
220 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
221 (p6)br.sptk vmx_alt_dtlb_miss_1
222 mov r16 = cr.ifa
223 ;;
224 thash r17 = r16
225 ttag r20 = r16
226 ;;
227 mov r18 = r17
228 adds r28 = VLE_TITAG_OFFSET,r17
229 adds r19 = VLE_CCHAIN_OFFSET, r17
230 ;;
231 ld8 r17 = [r19]
232 ;;
233 vmx_dtlb_loop:
234 cmp.eq p6,p0 = r0, r17
235 (p6)br vmx_dtlb_out
236 ;;
237 adds r16 = VLE_TITAG_OFFSET, r17
238 adds r19 = VLE_CCHAIN_OFFSET, r17
239 ;;
240 ld8 r22 = [r16]
241 ld8 r23 = [r19]
242 ;;
243 lfetch [r23]
244 cmp.eq p6,p7 = r20, r22
245 ;;
246 (p7)mov r17 = r23;
247 (p7)br.sptk vmx_dtlb_loop
248 ;;
249 ld8 r25 = [r17]
250 ld8 r27 = [r18]
251 ld8 r29 = [r28]
252 ;;
253 st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
254 st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
255 extr.u r19 = r27, 56, 4
256 ;;
257 ld8 r29 = [r16]
258 ld8 r22 = [r28]
259 dep r27 = r0, r27, 56, 4
260 dep r25 = r19, r25, 56, 4
261 ;;
262 st8 [r16] = r22
263 st8 [r28] = r29
264 st8 [r18] = r25
265 st8 [r17] = r27
266 ;;
267 itc.d r25
268 dv_serialize_data
269 mov r17=cr.isr
270 mov r23=r31
271 mov r22=b0
272 adds r16=IA64_VPD_BASE_OFFSET,r21
273 ;;
274 ld8 r18=[r16]
275 ;;
276 adds r19=VPD(VPSR),r18
277 movl r20=__vsa_base
278 ;;
279 ld8 r19=[r19]
280 ld8 r20=[r20]
281 ;;
282 br.sptk ia64_vmm_entry
283 ;;
284 vmx_dtlb_out:
285 mov r19 = 2
286 br.sptk vmx_dispatch_dtlb_miss
287 VMX_FAULT(2);
288 END(vmx_dtlb_miss)
290 .org vmx_ia64_ivt+0x0c00
291 /////////////////////////////////////////////////////////////////////////////////////////
292 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
293 ENTRY(vmx_alt_itlb_miss)
294 VMX_DBG_FAULT(3)
295 mov r31 = pr
296 mov r29=cr.ipsr;
297 ;;
298 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
299 (p7)br.spnt vmx_fault_3
300 vmx_alt_itlb_miss_1:
301 mov r16=cr.ifa // get address that caused the TLB miss
302 ;;
303 tbit.z p6,p7=r16,63
304 (p6)br.spnt vmx_fault_3
305 ;;
306 movl r17=PAGE_KERNEL
307 mov r24=cr.ipsr
308 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
309 ;;
310 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
311 shr.u r18=r16,55 // move address bit 59 to bit 4
312 ;;
313 and r18=0x10,r18 // bit 4=address-bit(61)
314 or r19=r17,r19 // insert PTE control bits into r19
315 ;;
316 movl r20=IA64_GRANULE_SHIFT<<2
317 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
318 ;;
319 mov cr.itir=r20
320 ;;
321 itc.i r19 // insert the TLB entry
322 mov pr=r31,-1
323 rfi
324 VMX_FAULT(3);
325 END(vmx_alt_itlb_miss)
328 .org vmx_ia64_ivt+0x1000
329 /////////////////////////////////////////////////////////////////////////////////////////
330 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
331 ENTRY(vmx_alt_dtlb_miss)
332 VMX_DBG_FAULT(4)
333 mov r31=pr
334 mov r29=cr.ipsr;
335 ;;
336 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
337 (p7)br.spnt vmx_fault_4
338 vmx_alt_dtlb_miss_1:
339 mov r16=cr.ifa // get address that caused the TLB miss
340 ;;
341 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
342 // Test for the address of virtual frame_table
343 shr r22=r16,56;;
344 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
345 (p8)br.cond.sptk frametable_miss ;;
346 #endif
347 tbit.z p6,p7=r16,63
348 (p6)br.spnt vmx_fault_4
349 ;;
350 movl r17=PAGE_KERNEL
351 mov r20=cr.isr
352 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
353 mov r24=cr.ipsr
354 ;;
355 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
356 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
357 shr.u r18=r16,55 // move address bit 59 to bit 4
358 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
359 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
360 ;;
361 and r18=0x10,r18 // bit 4=address-bit(61)
362 (p9)cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
363 dep r24=-1,r24,IA64_PSR_ED_BIT,1
364 or r19=r19,r17 // insert PTE control bits into r19
365 ;;
366 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
367 (p6)mov cr.ipsr=r24
368 movl r20=IA64_GRANULE_SHIFT<<2
369 ;;
370 mov cr.itir=r20
371 ;;
372 (p7)itc.d r19 // insert the TLB entry
373 mov pr=r31,-1
374 rfi
375 VMX_FAULT(4);
376 END(vmx_alt_dtlb_miss)
378 .org vmx_ia64_ivt+0x1400
379 /////////////////////////////////////////////////////////////////////////////////////////
380 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
381 ENTRY(vmx_nested_dtlb_miss)
382 VMX_DBG_FAULT(5)
383 VMX_FAULT(5)
384 END(vmx_nested_dtlb_miss)
386 .org vmx_ia64_ivt+0x1800
387 /////////////////////////////////////////////////////////////////////////////////////////
388 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
389 ENTRY(vmx_ikey_miss)
390 VMX_DBG_FAULT(6)
391 VMX_REFLECT(6)
392 END(vmx_ikey_miss)
394 .org vmx_ia64_ivt+0x1c00
395 /////////////////////////////////////////////////////////////////////////////////////////
396 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
397 ENTRY(vmx_dkey_miss)
398 VMX_DBG_FAULT(7)
399 VMX_REFLECT(7)
400 END(vmx_dkey_miss)
402 .org vmx_ia64_ivt+0x2000
403 /////////////////////////////////////////////////////////////////////////////////////////
404 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
405 ENTRY(vmx_dirty_bit)
406 VMX_DBG_FAULT(8)
407 VMX_REFLECT(8)
408 END(vmx_dirty_bit)
410 .org vmx_ia64_ivt+0x2400
411 /////////////////////////////////////////////////////////////////////////////////////////
412 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
413 ENTRY(vmx_iaccess_bit)
414 VMX_DBG_FAULT(9)
415 VMX_REFLECT(9)
416 END(vmx_iaccess_bit)
418 .org vmx_ia64_ivt+0x2800
419 /////////////////////////////////////////////////////////////////////////////////////////
420 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
421 ENTRY(vmx_daccess_bit)
422 VMX_DBG_FAULT(10)
423 VMX_REFLECT(10)
424 END(vmx_daccess_bit)
426 .org vmx_ia64_ivt+0x2c00
427 /////////////////////////////////////////////////////////////////////////////////////////
428 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
429 ENTRY(vmx_break_fault)
430 VMX_DBG_FAULT(11)
431 mov r31=pr
432 mov r19=11
433 mov r30=cr.iim
434 ;;
435 #ifdef VTI_DEBUG
436 // break 0 is already handled in vmx_ia64_handle_break.
437 cmp.eq p6,p7=r30,r0
438 (p6) br.sptk vmx_fault_11
439 ;;
440 #endif
441 br.sptk.many vmx_dispatch_break_fault
442 ;;
443 VMX_FAULT(11);
444 END(vmx_break_fault)
446 .org vmx_ia64_ivt+0x3000
447 /////////////////////////////////////////////////////////////////////////////////////////
448 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
449 ENTRY(vmx_interrupt)
450 // VMX_DBG_FAULT(12)
451 mov r31=pr // prepare to save predicates
452 mov r19=12
453 mov r29=cr.ipsr
454 ;;
455 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
456 tbit.z p0,p15=r29,IA64_PSR_I_BIT
457 ;;
458 (p7) br.sptk vmx_dispatch_interrupt
459 ;;
460 mov r27=ar.rsc /* M */
461 mov r20=r1 /* A */
462 mov r25=ar.unat /* M */
463 mov r26=ar.pfs /* I */
464 mov r28=cr.iip /* M */
465 cover /* B (or nothing) */
466 ;;
467 mov r1=sp
468 ;;
469 invala /* M */
470 mov r30=cr.ifs
471 ;;
472 addl r1=-IA64_PT_REGS_SIZE,r1
473 ;;
474 adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
475 adds r16=PT(CR_IPSR),r1
476 ;;
477 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
478 st8 [r16]=r29 /* save cr.ipsr */
479 ;;
480 lfetch.fault.excl.nt1 [r17]
481 mov r29=b0
482 ;;
483 adds r16=PT(R8),r1 /* initialize first base pointer */
484 adds r17=PT(R9),r1 /* initialize second base pointer */
485 mov r18=r0 /* make sure r18 isn't NaT */
486 ;;
487 .mem.offset 0,0; st8.spill [r16]=r8,16
488 .mem.offset 8,0; st8.spill [r17]=r9,16
489 ;;
490 .mem.offset 0,0; st8.spill [r16]=r10,24
491 .mem.offset 8,0; st8.spill [r17]=r11,24
492 ;;
493 st8 [r16]=r28,16 /* save cr.iip */
494 st8 [r17]=r30,16 /* save cr.ifs */
495 mov r8=ar.fpsr /* M */
496 mov r9=ar.csd
497 mov r10=ar.ssd
498 movl r11=FPSR_DEFAULT /* L-unit */
499 ;;
500 st8 [r16]=r25,16 /* save ar.unat */
501 st8 [r17]=r26,16 /* save ar.pfs */
502 shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
503 ;;
504 st8 [r16]=r27,16 /* save ar.rsc */
505 adds r17=16,r17 /* skip over ar_rnat field */
506 ;;
507 st8 [r17]=r31,16 /* save predicates */
508 adds r16=16,r16 /* skip over ar_bspstore field */
509 ;;
510 st8 [r16]=r29,16 /* save b0 */
511 st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
512 ;;
513 .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
514 .mem.offset 8,0; st8.spill [r17]=r12,16
515 adds r12=-16,r1 /* switch to kernel memory stack (with 16 bytes of scratch) */
516 ;;
517 .mem.offset 0,0; st8.spill [r16]=r13,16
518 .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
519 MINSTATE_GET_CURRENT(r13)
520 ;;
521 .mem.offset 0,0; st8.spill [r16]=r15,16
522 .mem.offset 8,0; st8.spill [r17]=r14,16
523 dep r14=-1,r0,60,4
524 ;;
525 .mem.offset 0,0; st8.spill [r16]=r2,16
526 .mem.offset 8,0; st8.spill [r17]=r3,16
527 adds r2=IA64_PT_REGS_R16_OFFSET,r1
528 ;;
529 mov r8=ar.ccv
530 movl r1=__gp /* establish kernel global pointer */
531 ;; \
532 bsw.1
533 ;;
534 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
535 mov out0=cr.ivr // pass cr.ivr as first arg
536 add out1=16,sp // pass pointer to pt_regs as second arg
538 ssm psr.ic
539 ;;
540 srlz.i
541 ;;
542 (p15) ssm psr.i
543 adds r3=8,r2 // set up second base pointer for SAVE_REST
544 srlz.i // ensure everybody knows psr.ic is back on
545 ;;
546 .mem.offset 0,0; st8.spill [r2]=r16,16
547 .mem.offset 8,0; st8.spill [r3]=r17,16
548 ;;
549 .mem.offset 0,0; st8.spill [r2]=r18,16
550 .mem.offset 8,0; st8.spill [r3]=r19,16
551 ;;
552 .mem.offset 0,0; st8.spill [r2]=r20,16
553 .mem.offset 8,0; st8.spill [r3]=r21,16
554 mov r18=b6
555 ;;
556 .mem.offset 0,0; st8.spill [r2]=r22,16
557 .mem.offset 8,0; st8.spill [r3]=r23,16
558 mov r19=b7
559 ;;
560 .mem.offset 0,0; st8.spill [r2]=r24,16
561 .mem.offset 8,0; st8.spill [r3]=r25,16
562 ;;
563 .mem.offset 0,0; st8.spill [r2]=r26,16
564 .mem.offset 8,0; st8.spill [r3]=r27,16
565 ;;
566 .mem.offset 0,0; st8.spill [r2]=r28,16
567 .mem.offset 8,0; st8.spill [r3]=r29,16
568 ;;
569 .mem.offset 0,0; st8.spill [r2]=r30,16
570 .mem.offset 8,0; st8.spill [r3]=r31,32
571 ;;
572 mov ar.fpsr=r11 /* M-unit */
573 st8 [r2]=r8,8 /* ar.ccv */
574 adds r24=PT(B6)-PT(F7),r3
575 ;;
576 stf.spill [r2]=f6,32
577 stf.spill [r3]=f7,32
578 ;;
579 stf.spill [r2]=f8,32
580 stf.spill [r3]=f9,32
581 ;;
582 stf.spill [r2]=f10
583 stf.spill [r3]=f11
584 adds r25=PT(B7)-PT(F11),r3
585 ;;
586 st8 [r24]=r18,16 /* b6 */
587 st8 [r25]=r19,16 /* b7 */
588 ;;
589 st8 [r24]=r9 /* ar.csd */
590 st8 [r25]=r10 /* ar.ssd */
591 ;;
592 srlz.d // make sure we see the effect of cr.ivr
593 movl r14=ia64_leave_nested
594 ;;
595 mov rp=r14
596 br.call.sptk.many b6=ia64_handle_irq
597 ;;
598 END(vmx_interrupt)
600 .org vmx_ia64_ivt+0x3400
601 /////////////////////////////////////////////////////////////////////////////////////////
602 // 0x3400 Entry 13 (size 64 bundles) Reserved
603 ENTRY(vmx_virtual_exirq)
604 VMX_DBG_FAULT(13)
605 mov r31=pr
606 mov r19=13
607 br.sptk vmx_dispatch_vexirq
608 END(vmx_virtual_exirq)
610 .org vmx_ia64_ivt+0x3800
611 /////////////////////////////////////////////////////////////////////////////////////////
612 // 0x3800 Entry 14 (size 64 bundles) Reserved
613 VMX_DBG_FAULT(14)
614 VMX_FAULT(14)
617 .org vmx_ia64_ivt+0x3c00
618 /////////////////////////////////////////////////////////////////////////////////////////
619 // 0x3c00 Entry 15 (size 64 bundles) Reserved
620 VMX_DBG_FAULT(15)
621 VMX_FAULT(15)
624 .org vmx_ia64_ivt+0x4000
625 /////////////////////////////////////////////////////////////////////////////////////////
626 // 0x4000 Entry 16 (size 64 bundles) Reserved
627 VMX_DBG_FAULT(16)
628 VMX_FAULT(16)
630 .org vmx_ia64_ivt+0x4400
631 /////////////////////////////////////////////////////////////////////////////////////////
632 // 0x4400 Entry 17 (size 64 bundles) Reserved
633 VMX_DBG_FAULT(17)
634 VMX_FAULT(17)
636 .org vmx_ia64_ivt+0x4800
637 /////////////////////////////////////////////////////////////////////////////////////////
638 // 0x4800 Entry 18 (size 64 bundles) Reserved
639 VMX_DBG_FAULT(18)
640 VMX_FAULT(18)
642 .org vmx_ia64_ivt+0x4c00
643 /////////////////////////////////////////////////////////////////////////////////////////
644 // 0x4c00 Entry 19 (size 64 bundles) Reserved
645 VMX_DBG_FAULT(19)
646 VMX_FAULT(19)
648 .org vmx_ia64_ivt+0x5000
649 /////////////////////////////////////////////////////////////////////////////////////////
650 // 0x5000 Entry 20 (size 16 bundles) Page Not Present
651 ENTRY(vmx_page_not_present)
652 VMX_DBG_FAULT(20)
653 VMX_REFLECT(20)
654 END(vmx_page_not_present)
656 .org vmx_ia64_ivt+0x5100
657 /////////////////////////////////////////////////////////////////////////////////////////
658 // 0x5100 Entry 21 (size 16 bundles) Key Permission vector
659 ENTRY(vmx_key_permission)
660 VMX_DBG_FAULT(21)
661 VMX_REFLECT(21)
662 END(vmx_key_permission)
664 .org vmx_ia64_ivt+0x5200
665 /////////////////////////////////////////////////////////////////////////////////////////
666 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
667 ENTRY(vmx_iaccess_rights)
668 VMX_DBG_FAULT(22)
669 VMX_REFLECT(22)
670 END(vmx_iaccess_rights)
672 .org vmx_ia64_ivt+0x5300
673 /////////////////////////////////////////////////////////////////////////////////////////
674 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
675 ENTRY(vmx_daccess_rights)
676 VMX_DBG_FAULT(23)
677 VMX_REFLECT(23)
678 END(vmx_daccess_rights)
680 .org vmx_ia64_ivt+0x5400
681 /////////////////////////////////////////////////////////////////////////////////////////
682 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
683 ENTRY(vmx_general_exception)
684 VMX_DBG_FAULT(24)
685 VMX_REFLECT(24)
686 // VMX_FAULT(24)
687 END(vmx_general_exception)
689 .org vmx_ia64_ivt+0x5500
690 /////////////////////////////////////////////////////////////////////////////////////////
691 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
692 ENTRY(vmx_disabled_fp_reg)
693 VMX_DBG_FAULT(25)
694 VMX_REFLECT(25)
695 END(vmx_disabled_fp_reg)
697 .org vmx_ia64_ivt+0x5600
698 /////////////////////////////////////////////////////////////////////////////////////////
699 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
700 ENTRY(vmx_nat_consumption)
701 VMX_DBG_FAULT(26)
702 VMX_REFLECT(26)
703 END(vmx_nat_consumption)
705 .org vmx_ia64_ivt+0x5700
706 /////////////////////////////////////////////////////////////////////////////////////////
707 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
708 ENTRY(vmx_speculation_vector)
709 VMX_DBG_FAULT(27)
710 VMX_REFLECT(27)
711 END(vmx_speculation_vector)
713 .org vmx_ia64_ivt+0x5800
714 /////////////////////////////////////////////////////////////////////////////////////////
715 // 0x5800 Entry 28 (size 16 bundles) Reserved
716 VMX_DBG_FAULT(28)
717 VMX_FAULT(28)
719 .org vmx_ia64_ivt+0x5900
720 /////////////////////////////////////////////////////////////////////////////////////////
721 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
722 ENTRY(vmx_debug_vector)
723 VMX_DBG_FAULT(29)
724 VMX_FAULT(29)
725 END(vmx_debug_vector)
727 .org vmx_ia64_ivt+0x5a00
728 /////////////////////////////////////////////////////////////////////////////////////////
729 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
730 ENTRY(vmx_unaligned_access)
731 VMX_DBG_FAULT(30)
732 VMX_REFLECT(30)
733 END(vmx_unaligned_access)
735 .org vmx_ia64_ivt+0x5b00
736 /////////////////////////////////////////////////////////////////////////////////////////
737 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
738 ENTRY(vmx_unsupported_data_reference)
739 VMX_DBG_FAULT(31)
740 VMX_REFLECT(31)
741 END(vmx_unsupported_data_reference)
743 .org vmx_ia64_ivt+0x5c00
744 /////////////////////////////////////////////////////////////////////////////////////////
745 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
746 ENTRY(vmx_floating_point_fault)
747 VMX_DBG_FAULT(32)
748 VMX_REFLECT(32)
749 END(vmx_floating_point_fault)
751 .org vmx_ia64_ivt+0x5d00
752 /////////////////////////////////////////////////////////////////////////////////////////
753 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
754 ENTRY(vmx_floating_point_trap)
755 VMX_DBG_FAULT(33)
756 VMX_REFLECT(33)
757 END(vmx_floating_point_trap)
759 .org vmx_ia64_ivt+0x5e00
760 /////////////////////////////////////////////////////////////////////////////////////////
761 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
762 ENTRY(vmx_lower_privilege_trap)
763 VMX_DBG_FAULT(34)
764 VMX_REFLECT(34)
765 END(vmx_lower_privilege_trap)
767 .org vmx_ia64_ivt+0x5f00
768 /////////////////////////////////////////////////////////////////////////////////////////
769 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
770 ENTRY(vmx_taken_branch_trap)
771 VMX_DBG_FAULT(35)
772 VMX_REFLECT(35)
773 END(vmx_taken_branch_trap)
775 .org vmx_ia64_ivt+0x6000
776 /////////////////////////////////////////////////////////////////////////////////////////
777 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
778 ENTRY(vmx_single_step_trap)
779 VMX_DBG_FAULT(36)
780 VMX_REFLECT(36)
781 END(vmx_single_step_trap)
783 .global vmx_virtualization_fault_back
784 .org vmx_ia64_ivt+0x6100
785 /////////////////////////////////////////////////////////////////////////////////////////
786 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
787 ENTRY(vmx_virtualization_fault)
788 // VMX_DBG_FAULT(37)
789 mov r31=pr
790 ;;
791 cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
792 cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
793 cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
794 cmp.eq p9,p0=EVENT_RSM,r24
795 cmp.eq p10,p0=EVENT_SSM,r24
796 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
797 (p6) br.dptk.many vmx_asm_mov_from_ar
798 (p7) br.dptk.many vmx_asm_mov_from_rr
799 (p8) br.dptk.many vmx_asm_mov_to_rr
800 (p9) br.dptk.many vmx_asm_rsm
801 (p10) br.dptk.many vmx_asm_ssm
802 (p11) br.dptk.many vmx_asm_mov_to_psr
803 ;;
804 vmx_virtualization_fault_back:
805 mov r19=37
806 adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
807 adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
808 ;;
809 st8 [r16] = r24
810 st8 [r17] = r25
811 ;;
812 cmp.ne p6,p0=EVENT_RFI, r24
813 (p6) br.sptk vmx_dispatch_virtualization_fault
814 ;;
815 adds r18=IA64_VPD_BASE_OFFSET,r21
816 ;;
817 ld8 r18=[r18]
818 ;;
819 adds r18=IA64_VPD_VIFS_OFFSET,r18
820 ;;
821 ld8 r18=[r18]
822 ;;
823 tbit.z p6,p0=r18,63
824 (p6) br.sptk vmx_dispatch_virtualization_fault
825 ;;
826 //if vifs.v=1 desert current register frame
827 alloc r18=ar.pfs,0,0,0,0
828 br.sptk vmx_dispatch_virtualization_fault
829 END(vmx_virtualization_fault)
831 .org vmx_ia64_ivt+0x6200
832 /////////////////////////////////////////////////////////////////////////////////////////
833 // 0x6200 Entry 38 (size 16 bundles) Reserved
834 VMX_DBG_FAULT(38)
835 VMX_FAULT(38)
837 .org vmx_ia64_ivt+0x6300
838 /////////////////////////////////////////////////////////////////////////////////////////
839 // 0x6300 Entry 39 (size 16 bundles) Reserved
840 VMX_DBG_FAULT(39)
841 VMX_FAULT(39)
843 .org vmx_ia64_ivt+0x6400
844 /////////////////////////////////////////////////////////////////////////////////////////
845 // 0x6400 Entry 40 (size 16 bundles) Reserved
846 VMX_DBG_FAULT(40)
847 VMX_FAULT(40)
849 .org vmx_ia64_ivt+0x6500
850 /////////////////////////////////////////////////////////////////////////////////////////
851 // 0x6500 Entry 41 (size 16 bundles) Reserved
852 VMX_DBG_FAULT(41)
853 VMX_FAULT(41)
855 .org vmx_ia64_ivt+0x6600
856 /////////////////////////////////////////////////////////////////////////////////////////
857 // 0x6600 Entry 42 (size 16 bundles) Reserved
858 VMX_DBG_FAULT(42)
859 VMX_FAULT(42)
861 .org vmx_ia64_ivt+0x6700
862 /////////////////////////////////////////////////////////////////////////////////////////
863 // 0x6700 Entry 43 (size 16 bundles) Reserved
864 VMX_DBG_FAULT(43)
865 VMX_FAULT(43)
867 .org vmx_ia64_ivt+0x6800
868 /////////////////////////////////////////////////////////////////////////////////////////
869 // 0x6800 Entry 44 (size 16 bundles) Reserved
870 VMX_DBG_FAULT(44)
871 VMX_FAULT(44)
873 .org vmx_ia64_ivt+0x6900
874 /////////////////////////////////////////////////////////////////////////////////////////
875 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
876 ENTRY(vmx_ia32_exception)
877 VMX_DBG_FAULT(45)
878 VMX_FAULT(45)
879 END(vmx_ia32_exception)
881 .org vmx_ia64_ivt+0x6a00
882 /////////////////////////////////////////////////////////////////////////////////////////
883 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
884 ENTRY(vmx_ia32_intercept)
885 VMX_DBG_FAULT(46)
886 VMX_FAULT(46)
887 END(vmx_ia32_intercept)
889 .org vmx_ia64_ivt+0x6b00
890 /////////////////////////////////////////////////////////////////////////////////////////
891 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
892 ENTRY(vmx_ia32_interrupt)
893 VMX_DBG_FAULT(47)
894 VMX_FAULT(47)
895 END(vmx_ia32_interrupt)
897 .org vmx_ia64_ivt+0x6c00
898 /////////////////////////////////////////////////////////////////////////////////////////
899 // 0x6c00 Entry 48 (size 16 bundles) Reserved
900 VMX_DBG_FAULT(48)
901 VMX_FAULT(48)
903 .org vmx_ia64_ivt+0x6d00
904 /////////////////////////////////////////////////////////////////////////////////////////
905 // 0x6d00 Entry 49 (size 16 bundles) Reserved
906 VMX_DBG_FAULT(49)
907 VMX_FAULT(49)
909 .org vmx_ia64_ivt+0x6e00
910 /////////////////////////////////////////////////////////////////////////////////////////
911 // 0x6e00 Entry 50 (size 16 bundles) Reserved
912 VMX_DBG_FAULT(50)
913 VMX_FAULT(50)
915 .org vmx_ia64_ivt+0x6f00
916 /////////////////////////////////////////////////////////////////////////////////////////
917 // 0x6f00 Entry 51 (size 16 bundles) Reserved
918 VMX_DBG_FAULT(51)
919 VMX_FAULT(51)
921 .org vmx_ia64_ivt+0x7000
922 /////////////////////////////////////////////////////////////////////////////////////////
923 // 0x7000 Entry 52 (size 16 bundles) Reserved
924 VMX_DBG_FAULT(52)
925 VMX_FAULT(52)
927 .org vmx_ia64_ivt+0x7100
928 /////////////////////////////////////////////////////////////////////////////////////////
929 // 0x7100 Entry 53 (size 16 bundles) Reserved
930 VMX_DBG_FAULT(53)
931 VMX_FAULT(53)
933 .org vmx_ia64_ivt+0x7200
934 /////////////////////////////////////////////////////////////////////////////////////////
935 // 0x7200 Entry 54 (size 16 bundles) Reserved
936 VMX_DBG_FAULT(54)
937 VMX_FAULT(54)
939 .org vmx_ia64_ivt+0x7300
940 /////////////////////////////////////////////////////////////////////////////////////////
941 // 0x7300 Entry 55 (size 16 bundles) Reserved
942 VMX_DBG_FAULT(55)
943 VMX_FAULT(55)
945 .org vmx_ia64_ivt+0x7400
946 /////////////////////////////////////////////////////////////////////////////////////////
947 // 0x7400 Entry 56 (size 16 bundles) Reserved
948 VMX_DBG_FAULT(56)
949 VMX_FAULT(56)
951 .org vmx_ia64_ivt+0x7500
952 /////////////////////////////////////////////////////////////////////////////////////////
953 // 0x7500 Entry 57 (size 16 bundles) Reserved
954 VMX_DBG_FAULT(57)
955 VMX_FAULT(57)
957 .org vmx_ia64_ivt+0x7600
958 /////////////////////////////////////////////////////////////////////////////////////////
959 // 0x7600 Entry 58 (size 16 bundles) Reserved
960 VMX_DBG_FAULT(58)
961 VMX_FAULT(58)
963 .org vmx_ia64_ivt+0x7700
964 /////////////////////////////////////////////////////////////////////////////////////////
965 // 0x7700 Entry 59 (size 16 bundles) Reserved
966 VMX_DBG_FAULT(59)
967 VMX_FAULT(59)
969 .org vmx_ia64_ivt+0x7800
970 /////////////////////////////////////////////////////////////////////////////////////////
971 // 0x7800 Entry 60 (size 16 bundles) Reserved
972 VMX_DBG_FAULT(60)
973 VMX_FAULT(60)
975 .org vmx_ia64_ivt+0x7900
976 /////////////////////////////////////////////////////////////////////////////////////////
977 // 0x7900 Entry 61 (size 16 bundles) Reserved
978 VMX_DBG_FAULT(61)
979 VMX_FAULT(61)
981 .org vmx_ia64_ivt+0x7a00
982 /////////////////////////////////////////////////////////////////////////////////////////
983 // 0x7a00 Entry 62 (size 16 bundles) Reserved
984 VMX_DBG_FAULT(62)
985 VMX_FAULT(62)
987 .org vmx_ia64_ivt+0x7b00
988 /////////////////////////////////////////////////////////////////////////////////////////
989 // 0x7b00 Entry 63 (size 16 bundles) Reserved
990 VMX_DBG_FAULT(63)
991 VMX_FAULT(63)
993 .org vmx_ia64_ivt+0x7c00
994 /////////////////////////////////////////////////////////////////////////////////////////
995 // 0x7c00 Entry 64 (size 16 bundles) Reserved
996 VMX_DBG_FAULT(64)
997 VMX_FAULT(64)
999 .org vmx_ia64_ivt+0x7d00
1000 /////////////////////////////////////////////////////////////////////////////////////////
1001 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1002 VMX_DBG_FAULT(65)
1003 VMX_FAULT(65)
1005 .org vmx_ia64_ivt+0x7e00
1006 /////////////////////////////////////////////////////////////////////////////////////////
1007 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1008 VMX_DBG_FAULT(66)
1009 VMX_FAULT(66)
1011 .org vmx_ia64_ivt+0x7f00
1012 /////////////////////////////////////////////////////////////////////////////////////////
1013 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1014 VMX_DBG_FAULT(67)
1015 VMX_FAULT(67)
1017 .org vmx_ia64_ivt+0x8000
1018 // There is no particular reason for this code to be here, other than that
1019 // there happens to be space here that would go unused otherwise. If this
1020 // fault ever gets "unreserved", simply moved the following code to a more
1021 // suitable spot...
1024 ENTRY(vmx_dispatch_reflection)
1025 /*
1026 * Input:
1027 * psr.ic: off
1028 * r19: intr type (offset into ivt, see ia64_int.h)
1029 * r31: contains saved predicates (pr)
1030 */
1031 VMX_SAVE_MIN_WITH_COVER_R19
1032 alloc r14=ar.pfs,0,0,5,0
1033 mov out0=cr.ifa
1034 mov out1=cr.isr
1035 mov out2=cr.iim
1036 mov out3=r15
1037 adds r3=8,r2 // set up second base pointer
1038 ;;
1039 ssm psr.ic
1040 ;;
1041 srlz.i // guarantee that interruption collection is on
1042 ;;
1043 (p15) ssm psr.i // restore psr.i
1044 movl r14=ia64_leave_hypervisor
1045 ;;
1046 VMX_SAVE_REST
1047 mov rp=r14
1048 ;;
1049 adds out4=16,r12
1050 br.call.sptk.many b6=vmx_reflect_interruption
1051 END(vmx_dispatch_reflection)
1053 ENTRY(vmx_dispatch_virtualization_fault)
1054 VMX_SAVE_MIN_WITH_COVER_R19
1055 ;;
1056 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1057 mov out0=r13 //vcpu
1058 adds r3=8,r2 // set up second base pointer
1059 ;;
1060 ssm psr.ic
1061 ;;
1062 srlz.i // guarantee that interruption collection is on
1063 ;;
1064 (p15) ssm psr.i // restore psr.i
1065 movl r14=ia64_leave_hypervisor_prepare
1066 ;;
1067 VMX_SAVE_REST
1068 VMX_SAVE_EXTRA
1069 mov rp=r14
1070 ;;
1071 adds out1=16,sp //regs
1072 br.call.sptk.many b6=vmx_emulate
1073 END(vmx_dispatch_virtualization_fault)
1076 GLOBAL_ENTRY(vmx_dispatch_vexirq)
1077 VMX_SAVE_MIN_WITH_COVER_R19
1078 alloc r14=ar.pfs,0,0,1,0
1079 mov out0=r13
1081 ssm psr.ic
1082 ;;
1083 srlz.i // guarantee that interruption collection is on
1084 ;;
1085 (p15) ssm psr.i // restore psr.i
1086 adds r3=8,r2 // set up second base pointer
1087 ;;
1088 VMX_SAVE_REST
1089 movl r14=ia64_leave_hypervisor
1090 ;;
1091 mov rp=r14
1092 br.call.sptk.many b6=vmx_vexirq
1093 END(vmx_dispatch_vexirq)
1095 ENTRY(vmx_dispatch_itlb_miss)
1096 VMX_SAVE_MIN_WITH_COVER_R19
1097 alloc r14=ar.pfs,0,0,3,0
1098 mov out0=cr.ifa
1099 mov out1=r15
1100 adds r3=8,r2 // set up second base pointer
1101 ;;
1102 ssm psr.ic
1103 ;;
1104 srlz.i // guarantee that interruption collection is on
1105 ;;
1106 (p15) ssm psr.i // restore psr.i
1107 movl r14=ia64_leave_hypervisor
1108 ;;
1109 VMX_SAVE_REST
1110 mov rp=r14
1111 ;;
1112 adds out2=16,r12
1113 br.call.sptk.many b6=vmx_hpw_miss
1114 END(vmx_dispatch_itlb_miss)
1116 ENTRY(vmx_dispatch_dtlb_miss)
1117 VMX_SAVE_MIN_WITH_COVER_R19
1118 alloc r14=ar.pfs,0,0,3,0
1119 mov out0=cr.ifa
1120 mov out1=r15
1121 adds r3=8,r2 // set up second base pointer
1122 ;;
1123 ssm psr.ic
1124 ;;
1125 srlz.i // guarantee that interruption collection is on
1126 ;;
1127 (p15) ssm psr.i // restore psr.i
1128 movl r14=ia64_leave_hypervisor_prepare
1129 ;;
1130 VMX_SAVE_REST
1131 VMX_SAVE_EXTRA
1132 mov rp=r14
1133 ;;
1134 adds out2=16,r12
1135 br.call.sptk.many b6=vmx_hpw_miss
1136 END(vmx_dispatch_dtlb_miss)
1138 ENTRY(vmx_dispatch_break_fault)
1139 VMX_SAVE_MIN_WITH_COVER_R19
1140 ;;
1141 ;;
1142 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1143 mov out0=cr.ifa
1144 mov out2=cr.isr // FIXME: pity to make this slow access twice
1145 mov out3=cr.iim // FIXME: pity to make this slow access twice
1146 adds r3=8,r2 // set up second base pointer
1147 ;;
1148 ssm psr.ic
1149 ;;
1150 srlz.i // guarantee that interruption collection is on
1151 ;;
1152 (p15)ssm psr.i // restore psr.i
1153 movl r14=ia64_leave_hypervisor
1154 ;;
1155 VMX_SAVE_REST
1156 mov rp=r14
1157 ;;
1158 adds out1=16,sp
1159 br.call.sptk.many b6=vmx_ia64_handle_break
1160 ;;
1161 END(vmx_dispatch_break_fault)
1164 ENTRY(vmx_dispatch_interrupt)
1165 VMX_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
1166 ;;
1167 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1168 mov out0=cr.ivr // pass cr.ivr as first arg
1169 adds r3=8,r2 // set up second base pointer for SAVE_REST
1170 ;;
1171 ssm psr.ic
1172 ;;
1173 srlz.i
1174 ;;
1175 (p15) ssm psr.i
1176 movl r14=ia64_leave_hypervisor
1177 ;;
1178 VMX_SAVE_REST
1179 mov rp=r14
1180 ;;
1181 add out1=16,sp // pass pointer to pt_regs as second arg
1182 br.call.sptk.many b6=ia64_handle_irq
1183 END(vmx_dispatch_interrupt)