ia64/xen-unstable

view xen/arch/ia64/xen/ivt.S @ 15325:855fe0bf6590

[IA64] Change virtual address of XEN UC indentity area.

This slightly simplifies the code and makes flexible map possible.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Tue Jun 12 15:20:06 2007 -0600 (2007-06-12)
parents f3f59dafaa18
children 89596982890b
line source
1 #include <asm/debugger.h>
2 #include <asm/vhpt.h>
3 #include <public/arch-ia64.h>
4 #include <asm/config.h>
5 /*
6 * arch/ia64/kernel/ivt.S
7 *
8 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
9 * Stephane Eranian <eranian@hpl.hp.com>
10 * David Mosberger <davidm@hpl.hp.com>
11 * Copyright (C) 2000, 2002-2003 Intel Co
12 * Asit Mallick <asit.k.mallick@intel.com>
13 * Suresh Siddha <suresh.b.siddha@intel.com>
14 * Kenneth Chen <kenneth.w.chen@intel.com>
15 * Fenghua Yu <fenghua.yu@intel.com>
16 *
17 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
18 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now
19 * uses virtual PT.
20 */
21 /*
22 * This file defines the interruption vector table used by the CPU.
23 * It does not include one entry per possible cause of interruption.
24 *
25 * The first 20 entries of the table contain 64 bundles each while the
26 * remaining 48 entries contain only 16 bundles each.
27 *
28 * The 64 bundles are used to allow inlining the whole handler for critical
29 * interruptions like TLB misses.
30 *
31 * For each entry, the comment is as follows:
32 *
33 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
34 * entry offset ----/ / / / /
35 * entry number ---------/ / / /
36 * size of the entry -------------/ / /
37 * vector name -------------------------------------/ /
38 * interruptions triggering this vector ----------------------/
39 *
40 * The table is 32KB in size and must be aligned on 32KB boundary.
41 * (The CPU ignores the 15 lower bits of the address)
42 *
43 * Table is based upon EAS2.6 (Oct 1999)
44 */
46 #include <linux/config.h>
48 #include <asm/asmmacro.h>
49 #include <asm/break.h>
50 #include <asm/ia32.h>
51 #include <asm/kregs.h>
52 #include <asm/offsets.h>
53 #include <asm/pgtable.h>
54 #include <asm/processor.h>
55 #include <asm/ptrace.h>
56 #include <asm/system.h>
57 #include <asm/thread_info.h>
58 #include <asm/unistd.h>
59 #include <xen/errno.h>
61 #if 1
62 # define PSR_DEFAULT_BITS psr.ac
63 #else
64 # define PSR_DEFAULT_BITS 0
65 #endif
67 #if 0
68 /*
69 * This lets you track the last eight faults that occurred on the CPU.
70 * Make sure ar.k2 isn't needed for something else before enabling this...
71 */
72 # define DBG_FAULT(i) \
73 mov r16=ar.k2;; \
74 shl r16=r16,8;; \
75 add r16=(i),r16;; \
76 mov ar.k2=r16
77 #else
78 # define DBG_FAULT(i)
79 #endif
81 #define MINSTATE_VIRT /* needed by minstate.h */
82 #include "minstate.h"
84 #define FAULT(n) \
85 mov r19=n; /* prepare to save predicates */ \
86 mov r31=pr; \
87 br.sptk.many dispatch_to_fault_handler
89 #define FAULT_OR_REFLECT(n) \
90 mov r20=cr.ipsr; \
91 mov r19=n; /* prepare to save predicates */ \
92 mov r31=pr;; \
93 extr.u r20=r20,IA64_PSR_CPL0_BIT,2;; \
94 cmp.ne p6,p0=r0,r20; /* cpl != 0?*/ \
95 (p6) br.dptk.many dispatch_reflection; \
96 br.sptk.few dispatch_to_fault_handler
98 .section .text.ivt,"ax"
100 .align 32768 // align on 32KB boundary
101 .global ia64_ivt
102 ia64_ivt:
103 //////////////////////////////////////////////////////////////////////////
104 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
105 ENTRY(vhpt_miss)
106 DBG_FAULT(0)
107 FAULT(0)
108 END(vhpt_miss)
110 .org ia64_ivt+0x400
111 //////////////////////////////////////////////////////////////////////////
112 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
113 ENTRY(itlb_miss)
114 DBG_FAULT(1)
115 mov r16 = cr.ifa
116 mov r31 = pr
117 ;;
118 extr.u r17=r16,59,5
119 ;;
120 /* If address belongs to VMM, go to alt tlb handler */
121 cmp.eq p6,p0=0x1e,r17
122 (p6) br.cond.spnt late_alt_itlb_miss
123 br.cond.sptk fast_tlb_miss_reflect
124 ;;
125 END(itlb_miss)
127 .org ia64_ivt+0x0800
128 //////////////////////////////////////////////////////////////////////////
129 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
130 ENTRY(dtlb_miss)
131 DBG_FAULT(2)
132 mov r16=cr.ifa // get virtual address
133 mov r31=pr
134 ;;
135 extr.u r17=r16,59,5
136 ;;
137 /* If address belongs to VMM, go to alt tlb handler */
138 cmp.eq p6,p0=0x1e,r17
139 (p6) br.cond.spnt late_alt_dtlb_miss
140 br.cond.sptk fast_tlb_miss_reflect
141 ;;
142 END(dtlb_miss)
144 .org ia64_ivt+0x0c00
145 //////////////////////////////////////////////////////////////////////////
146 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
147 ENTRY(alt_itlb_miss)
148 DBG_FAULT(3)
149 mov r16=cr.ifa // get address that caused the TLB miss
150 mov r31=pr
151 ;;
152 late_alt_itlb_miss:
153 mov r21=cr.ipsr
154 movl r17=PAGE_KERNEL
155 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
156 ;;
157 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
158 and r19=r19,r16 // clear ed, reserved bits, and PTE ctrl bits
159 extr.u r18=r16,XEN_VIRT_UC_BIT,1 // extract UC bit
160 ;;
161 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
162 or r19=r17,r19 // insert PTE control bits into r19
163 ;;
164 dep r19=r18,r19,4,1 // set bit 4 (uncached) if access to UC area.
165 (p8) br.cond.spnt page_fault
166 ;;
167 itc.i r19 // insert the TLB entry
168 mov pr=r31,-1
169 rfi
170 END(alt_itlb_miss)
172 .org ia64_ivt+0x1000
173 //////////////////////////////////////////////////////////////////////////
174 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
175 ENTRY(alt_dtlb_miss)
176 DBG_FAULT(4)
177 mov r16=cr.ifa // get address that caused the TLB miss
178 mov r31=pr
179 ;;
180 late_alt_dtlb_miss:
181 mov r20=cr.isr
182 movl r17=PAGE_KERNEL
183 mov r21=cr.ipsr
184 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
185 ;;
186 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
187 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
188 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
189 extr.u r18=r16,XEN_VIRT_UC_BIT,1 // extract UC bit
190 and r19=r19,r16 // clear ed, reserved bits, and
191 // PTE control bits
192 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
193 ;;
194 cmp.ne p8,p0=r0,r23
195 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
196 (p8) br.cond.spnt page_fault
197 ;;
198 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
199 shr r22=r16,56 // Test for the address of virtual frame_table
200 ;;
201 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
202 (p8) br.cond.sptk frametable_miss ;;
203 #endif
204 // If it is not a Xen address, handle it via page_fault.
205 extr.u r22=r16,59,5
206 ;;
207 cmp.ne p8,p0=0x1e,r22
208 (p8) br.cond.sptk page_fault
209 ;;
210 dep r21=-1,r21,IA64_PSR_ED_BIT,1
211 or r19=r19,r17 // insert PTE control bits into r19
212 ;;
213 dep r19=r18,r19,4,1 // set bit 4 (uncached) if access to UC area
214 (p6) mov cr.ipsr=r21
215 ;;
216 (p7) itc.d r19 // insert the TLB entry
217 mov pr=r31,-1
218 rfi
219 END(alt_dtlb_miss)
221 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
222 GLOBAL_ENTRY(frametable_miss)
223 rsm psr.dt // switch to using physical data addressing
224 movl r24=(frametable_pg_dir-PAGE_OFFSET) // r24=__pa(frametable_pg_dir)
225 ;;
226 srlz.d
227 extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3)
228 ;;
229 shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)]
230 ;;
231 ld8 r24=[r24] // r24=pgd[pgd_offset(addr)]
232 extr.u r18=r16,PMD_SHIFT,(PAGE_SHIFT-3) // r18=pmd_offset
233 ;;
234 cmp.eq p6,p7=0,r24 // pgd present?
235 shladd r24=r18,3,r24 // r24=&pmd[pmd_offset(addr)]
236 ;;
237 (p7) ld8 r24=[r24] // r24=pmd[pmd_offset(addr)]
238 extr.u r19=r16,PAGE_SHIFT,(PAGE_SHIFT-3)// r19=pte_offset
239 (p6) br.spnt.few frametable_fault
240 ;;
241 cmp.eq p6,p7=0,r24 // pmd present?
242 shladd r24=r19,3,r24 // r24=&pte[pte_offset(addr)]
243 ;;
244 (p7) ld8 r24=[r24] // r24=pte[pte_offset(addr)]
245 mov r25=0x700|(PAGE_SHIFT<<2) // key=7
246 (p6) br.spnt.few frametable_fault
247 ;;
248 mov cr.itir=r25
249 ssm psr.dt // switch to using virtual data addressing
250 tbit.z p6,p7=r24,_PAGE_P_BIT // pte present?
251 ;;
252 (p7) itc.d r24 // install updated PTE
253 (p6) br.spnt.few frametable_fault // page present bit cleared?
254 ;;
255 mov pr=r31,-1 // restore predicate registers
256 rfi
257 END(frametable_miss)
259 ENTRY(frametable_fault)
260 ssm psr.dt // switch to using virtual data addressing
261 mov r18=cr.iip
262 movl r19=ia64_frametable_probe
263 ;;
264 cmp.eq p6,p7=r18,r19 // is faulting addrress ia64_frametable_probe?
265 mov r8=0 // assumes that 'probe.r' uses r8
266 dep r21=-1,r21,IA64_PSR_RI_BIT+1,1 // return to next instruction in
267 // bundle 2
268 ;;
269 (p6) mov cr.ipsr=r21
270 mov r19=4 // FAULT(4)
271 (p7) br.spnt.few dispatch_to_fault_handler
272 ;;
273 mov pr=r31,-1
274 rfi
275 END(frametable_fault)
277 GLOBAL_ENTRY(ia64_frametable_probe)
278 {
279 probe.r r8=r32,0 // destination register must be r8
280 nop.f 0x0
281 br.ret.sptk.many b0 // this instruction must be in bundle 2
282 }
283 END(ia64_frametable_probe)
284 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
286 .org ia64_ivt+0x1400
287 /////////////////////////////////////////////////////////////////////////////////////////
288 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
289 ENTRY(nested_dtlb_miss)
290 DBG_FAULT(5)
291 mov b0=r30
292 br.sptk.many b0 // return to the continuation point
293 ;;
294 END(nested_dtlb_miss)
296 GLOBAL_ENTRY(dispatch_reflection)
297 /*
298 * Input:
299 * psr.ic: off
300 * r19: intr type (offset into ivt, see ia64_int.h)
301 * r31: contains saved predicates (pr)
302 */
303 SAVE_MIN_WITH_COVER_R19
304 alloc r14=ar.pfs,0,0,5,0
305 mov out4=r15
306 mov out0=cr.ifa
307 adds out1=16,sp
308 mov out2=cr.isr
309 mov out3=cr.iim
310 // mov out3=cr.itir // TODO: why commented out?
312 ssm psr.ic | PSR_DEFAULT_BITS
313 ;;
314 srlz.i // guarantee that interruption
315 // collection is on
316 ;;
317 (p15) ssm psr.i // restore psr.i
318 adds r3=8,r2 // set up second base pointer
319 ;;
320 SAVE_REST
321 movl r14=ia64_leave_kernel
322 ;;
323 mov rp=r14
324 // br.sptk.many ia64_prepare_handle_reflection // TODO: why commented out?
325 br.call.sptk.many b6=ia64_handle_reflection
326 END(dispatch_reflection)
328 .org ia64_ivt+0x1800
329 //////////////////////////////////////////////////////////////////////////
330 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
331 ENTRY(ikey_miss)
332 DBG_FAULT(6)
333 FAULT_OR_REFLECT(6)
334 END(ikey_miss)
336 //----------------------------------------------------------------
337 // call do_page_fault (predicates are in r31, psr.dt may be off,
338 // r16 is faulting address)
339 GLOBAL_ENTRY(page_fault)
340 ssm psr.dt
341 ;;
342 srlz.i
343 ;;
344 SAVE_MIN_WITH_COVER
345 alloc r15=ar.pfs,0,0,4,0
346 mov out0=cr.ifa
347 mov out1=cr.isr
348 mov out3=cr.itir
349 adds r3=8,r2 // set up second base pointer
350 ;;
351 ssm psr.ic | PSR_DEFAULT_BITS
352 ;;
353 srlz.i // guarantee that interruption
354 // collection is on
355 ;;
356 (p15) ssm psr.i // restore psr.i
357 movl r14=ia64_leave_kernel
358 ;;
359 SAVE_REST
360 mov rp=r14
361 ;;
362 adds out2=16,r12 // out2 = pointer to pt_regs
363 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
364 END(page_fault)
366 .org ia64_ivt+0x1c00
367 //////////////////////////////////////////////////////////////////////////
368 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
369 ENTRY(dkey_miss)
370 DBG_FAULT(7)
371 FAULT_OR_REFLECT(7)
372 END(dkey_miss)
375 #define SAVE_MIN_COVER_DONE DO_SAVE_MIN(,mov r30=cr.ifs,)
377 // same as dispatch_break_fault except cover has already been done
378 GLOBAL_ENTRY(dispatch_slow_hyperprivop)
379 SAVE_MIN_COVER_DONE
380 ;;
381 br.sptk.many dispatch_break_fault_post_save
382 END(dispatch_slow_hyperprivop)
384 .org ia64_ivt+0x2000
385 //////////////////////////////////////////////////////////////////////////
386 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
387 ENTRY(dirty_bit)
388 DBG_FAULT(8)
389 mov r20=cr.ipsr
390 mov r31=pr
391 ;;
392 extr.u r20=r20,IA64_PSR_CPL0_BIT,2
393 ;;
394 mov r19=8 // prepare to save predicates
395 cmp.eq p6,p0=r0,r20 // cpl == 0?
396 (p6) br.sptk.few dispatch_to_fault_handler
397 // If shadow mode is not enabled, reflect the fault.
398 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
399 ;;
400 ld8 r22=[r22]
401 ;;
402 add r22=IA64_VCPU_DOMAIN_OFFSET,r22
403 ;;
404 ld8 r22=[r22] // read domain
405 ;;
406 add r22=IA64_DOMAIN_SHADOW_BITMAP_OFFSET,r22
407 ;;
408 ld8 r22=[r22]
409 ;;
410 cmp.eq p6,p0=r0,r22 // !shadow_bitmap ?
411 (p6) br.dptk.many dispatch_reflection
413 SAVE_MIN_WITH_COVER
414 alloc r14=ar.pfs,0,0,4,0
415 mov out0=cr.ifa
416 mov out1=cr.itir
417 mov out2=cr.isr
418 adds out3=16,sp
420 ssm psr.ic | PSR_DEFAULT_BITS
421 ;;
422 srlz.i // guarantee that interruption
423 // collection is on
424 ;;
425 (p15) ssm psr.i // restore psr.i
426 adds r3=8,r2 // set up second base pointer
427 ;;
428 SAVE_REST
429 movl r14=ia64_leave_kernel
430 ;;
431 mov rp=r14
432 br.call.sptk.many b6=ia64_shadow_fault
433 END(dirty_bit)
435 .org ia64_ivt+0x2400
436 //////////////////////////////////////////////////////////////////////////
437 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
438 ENTRY(iaccess_bit)
439 DBG_FAULT(9)
440 mov r16=cr.isr
441 mov r17=cr.ifa
442 mov r31=pr
443 mov r19=9
444 mov r20=0x2400
445 br.sptk.many fast_access_reflect;;
446 END(iaccess_bit)
448 .org ia64_ivt+0x2800
449 //////////////////////////////////////////////////////////////////////////
450 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
451 ENTRY(daccess_bit)
452 DBG_FAULT(10)
453 mov r16=cr.isr
454 mov r17=cr.ifa
455 mov r31=pr
456 mov r19=10
457 mov r20=0x2800
458 br.sptk.many fast_access_reflect
459 ;;
460 END(daccess_bit)
462 .org ia64_ivt+0x2c00
463 //////////////////////////////////////////////////////////////////////////
464 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
465 ENTRY(break_fault)
466 /*
467 * The streamlined system call entry/exit paths only save/restore
468 * the initial part of pt_regs. This implies that the callers of
469 * system-calls must adhere to the normal procedure calling
470 * conventions.
471 *
472 * Registers to be saved & restored:
473 * CR registers: cr.ipsr, cr.iip, cr.ifs
474 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore,
475 * ar.fpsr
476 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
477 * Registers to be restored only:
478 * r8-r11: output value from the system call.
479 *
480 * During system call exit, scratch registers (including r15) are
481 * modified/cleared to prevent leaking bits from kernel to user
482 * level.
483 */
484 DBG_FAULT(11)
485 mov r16=cr.isr
486 mov r17=cr.iim
487 mov r31=pr
488 ;;
489 cmp.eq p7,p0=r17,r0
490 (p7) br.spnt.few dispatch_break_fault
491 ;;
492 #ifdef CRASH_DEBUG
493 // A panic can occur before domain0 is created. In such cases,
494 // referencing XSI_PSR_IC causes nested_dtlb_miss.
495 movl r18=CDB_BREAK_NUM
496 ;;
497 cmp.eq p7,p0=r17,r18
498 ;;
499 (p7) br.spnt.few dispatch_break_fault
500 ;;
501 #endif
502 movl r18=THIS_CPU(current_psr_ic_addr)
503 ;;
504 ld8 r18=[r18]
505 ;;
506 #ifdef CONFIG_PRIVIFY
507 // pseudo-cover are replaced by break.b which (unfortunatly) always
508 // clear iim.
509 cmp.eq p7,p0=r0,r17
510 (p7) br.spnt.many dispatch_privop_fault
511 ;;
512 #endif
513 // if (ipsr.cpl == 2 && (iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX)
514 // this is a hyperprivop. A hyperprivop is hand-coded assembly with
515 // psr.ic off which means it can make no calls, cannot use r1-r15,
516 // and it can have no memory accesses unless they are to pinned
517 // addresses!
518 mov r19= cr.ipsr
519 mov r20=HYPERPRIVOP_START
520 mov r21=HYPERPRIVOP_MAX
521 ;;
522 sub r20=r17,r20
523 extr.u r19=r19,IA64_PSR_CPL0_BIT,2 // extract cpl field from cr.ipsr
524 ;;
525 cmp.gtu p7,p0=r21,r20
526 ;;
527 cmp.eq.and p7,p0=2,r19 // ipsr.cpl==2
528 (p7) br.sptk.many fast_hyperprivop
529 ;;
530 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
531 ;;
532 ld8 r22 = [r22]
533 ;;
534 adds r23=IA64_VCPU_BREAKIMM_OFFSET,r22
535 ;;
536 ld4 r23=[r23];;
537 cmp4.eq p6,p0=r23,r17;; // Xen-reserved breakimm?
538 cmp.eq.and p6,p0=2,r19
539 (p6) br.spnt.many fast_hypercall
540 ;;
541 br.sptk.many fast_break_reflect
542 ;;
545 fast_hypercall:
546 shr r25=r2,8;;
547 cmp.ne p7,p0=r0,r25
548 (p7) br.spnt.few dispatch_break_fault
549 ;;
550 // fall through
553 /*
554 * The streamlined system call entry/exit paths only save/restore the initial part
555 * of pt_regs. This implies that the callers of system-calls must adhere to the
556 * normal procedure calling conventions.
557 *
558 * Registers to be saved & restored:
559 * CR registers: cr.ipsr, cr.iip, cr.ifs
560 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
561 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
562 * Registers to be restored only:
563 * r8-r11: output value from the system call.
564 *
565 * During system call exit, scratch registers (including r15) are modified/cleared
566 * to prevent leaking bits from kernel to user level.
567 */
569 // DBG_FAULT(11)
570 // mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
571 mov r16=r22
572 mov r29=cr.ipsr // M2 (12 cyc)
573 // mov r31=pr // I0 (2 cyc)
574 mov r15=r2
576 // mov r17=cr.iim // M2 (2 cyc)
577 mov.m r27=ar.rsc // M2 (12 cyc)
578 // mov r18=__IA64_BREAK_SYSCALL // A
580 mov.m ar.rsc=0 // M2
581 mov.m r21=ar.fpsr // M2 (12 cyc)
582 mov r19=b6 // I0 (2 cyc)
583 ;;
584 mov.m r23=ar.bspstore // M2 (12 cyc)
585 mov.m r24=ar.rnat // M2 (5 cyc)
586 mov.i r26=ar.pfs // I0 (2 cyc)
588 invala // M0|1
589 nop.m 0 // M
590 mov r20=r1 // A save r1
592 nop.m 0
593 // movl r30=sys_call_table // X
594 movl r30=ia64_hypercall_table // X
596 mov r28=cr.iip // M2 (2 cyc)
597 // cmp.eq p0,p7=r18,r17 // I0 is this a system call?
598 //(p7) br.cond.spnt non_syscall // B no ->
599 //
600 // From this point on, we are definitely on the syscall-path
601 // and we can use (non-banked) scratch registers.
602 //
603 ///////////////////////////////////////////////////////////////////////
604 mov r1=r16 // A move task-pointer to "addl"-addressable reg
605 mov r2=r16 // A setup r2 for ia64_syscall_setup
606 // add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
608 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
609 // adds r15=-1024,r15 // A subtract 1024 from syscall number
610 // mov r3=NR_syscalls - 1
611 mov r3=NR_hypercalls - 1
612 ;;
613 ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
614 // ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
615 mov r9=r0 // force flags = 0
616 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
618 shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
619 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
620 cmp.leu p6,p7=r15,r3 // A syscall number in range?
621 ;;
623 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
624 (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
625 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
627 mov.m ar.bspstore=r22 // M2 switch to kernel RBS
628 cmp.eq p8,p9=2,r8 // A isr.ei==2?
629 ;;
631 (p8) mov r8=0 // A clear ei to 0
632 //(p7) movl r30=sys_ni_syscall // X
633 (p7) movl r30=do_ni_hypercall // X
635 (p8) adds r28=16,r28 // A switch cr.iip to next bundle
636 (p9) adds r8=1,r8 // A increment ei to next slot
637 nop.i 0
638 ;;
640 mov.m r25=ar.unat // M2 (5 cyc)
641 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
642 // adds r15=1024,r15 // A restore original syscall number
643 //
644 // If any of the above loads miss in L1D, we'll stall here until
645 // the data arrives.
646 //
647 ///////////////////////////////////////////////////////////////////////
648 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
649 mov b6=r30 // I0 setup syscall handler branch reg early
650 cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
652 // and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
653 mov r18=ar.bsp // M2 (12 cyc)
654 ;;
655 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
656 // cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
657 br.call.sptk.many b7=ia64_syscall_setup // B
658 1:
659 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
660 nop 0
661 bsw.1 // B (6 cyc) regs are saved, switch to bank 1
662 ;;
664 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
665 // movl r3=ia64_ret_from_syscall // X
666 ;;
668 srlz.i // M0 ensure interruption collection is on
669 // mov rp=r3 // I0 set the real return addr
670 //(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
671 (p15) ssm psr.i // M2 restore psr.i
672 //(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
673 // br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
674 br.call.sptk.many b0=b6 // B invoke syscall-handker (ignore return addr)
675 // br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
676 ;;
677 adds r2=PT(R8)+16,r12
678 ;;
679 st8 [r2]=r8
680 ;;
681 br.call.sptk.many b0=do_softirq
682 ;;
683 //restore hypercall argument if continuation
684 adds r2=IA64_VCPU_HYPERCALL_CONTINUATION_OFS,r13
685 ;;
686 ld1 r20=[r2]
687 ;;
688 st1 [r2]=r0
689 ;;
690 cmp.ne p6,p0=r20,r0
691 ;;
692 (p6) adds r2=PT(R16)+16,r12
693 (p6) adds r3=PT(R17)+16,r12
694 ;;
695 (p6) ld8 r32=[r2],16
696 (p6) ld8 r33=[r3],16
697 ;;
698 (p6) ld8 r34=[r2],16
699 (p6) ld8 r35=[r3],16
700 ;;
701 (p6) ld8 r36=[r2],16
702 ;;
703 //save ar.bsp before cover
704 mov r16=ar.bsp
705 add r2=PT(R14)+16,r12
706 ;;
707 st8 [r2]=r16
708 ;;
709 rsm psr.i|psr.ic
710 ;;
711 srlz.i
712 ;;
713 cover
714 ;;
715 mov r20=cr.ifs
716 adds r2=PT(CR_IFS)+16,r12
717 ;;
718 st8 [r2]=r20
719 ;;
720 br.call.sptk.many b0=reflect_event
721 ;;
722 adds r2=PT(R14)+16,r12
723 adds r3=PT(R8)+16,r12
724 ;;
725 //r16 contains ar.bsp before cover
726 ld8 r16=[r2]
727 ld8 r8=[r3]
728 ;;
729 br.sptk.many ia64_ret_from_syscall
730 ;;
731 END(break_fault)
733 .org ia64_ivt+0x3000
734 //////////////////////////////////////////////////////////////////////////
735 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
736 ENTRY(interrupt)
737 DBG_FAULT(12)
738 mov r31=pr // prepare to save predicates
739 ;;
740 mov r30=cr.ivr // pass cr.ivr as first arg
741 // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
742 // not used anywhere else and we need a place to stash ivr and
743 // there's no registers available unused by SAVE_MIN/REST
744 movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET
745 ;;
746 st8 [r29]=r30
747 ;;
748 movl r28=slow_interrupt
749 ;;
750 mov r29=rp
751 ;;
752 mov rp=r28
753 ;;
754 br.cond.sptk.many fast_tick_reflect
755 ;;
756 slow_interrupt:
757 mov rp=r29;;
758 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
759 ssm psr.ic | PSR_DEFAULT_BITS
760 ;;
761 adds r3=8,r2 // set up second base pointer for SAVE_REST
762 srlz.i // ensure everybody knows psr.ic is back on
763 ;;
764 SAVE_REST
765 ;;
766 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
767 movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
768 ld8 out0=[out0];;
769 add out1=16,sp // pass pointer to pt_regs as second arg
770 movl r14=ia64_leave_kernel
771 ;;
772 mov rp=r14
773 br.call.sptk.many b6=ia64_handle_irq
774 END(interrupt)
776 .org ia64_ivt+0x3400
777 //////////////////////////////////////////////////////////////////////////
778 // 0x3400 Entry 13 (size 64 bundles) Reserved
779 DBG_FAULT(13)
780 FAULT(13)
782 // There is no particular reason for this code to be here, other
783 // than that there happens to be space here that would go unused
784 // otherwise. If this fault ever gets "unreserved", simply move
785 // the following code to a more suitable spot...
787 GLOBAL_ENTRY(dispatch_break_fault)
788 SAVE_MIN_WITH_COVER
789 ;;
790 dispatch_break_fault_post_save:
791 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
792 mov out0=cr.ifa
793 adds out1=16,sp
794 mov out2=cr.isr // FIXME: pity to make this slow access twice
795 mov out3=cr.iim // FIXME: pity to make this slow access twice
797 ssm psr.ic | PSR_DEFAULT_BITS
798 ;;
799 srlz.i // guarantee that interruption collection is on
800 ;;
801 (p15) ssm psr.i // restore psr.i
802 adds r3=8,r2 // set up second base pointer
803 ;;
804 SAVE_REST
805 movl r14=ia64_leave_kernel
806 ;;
807 mov rp=r14
808 // br.sptk.many ia64_prepare_handle_break // TODO: why commented out?
809 br.call.sptk.many b6=ia64_handle_break
810 END(dispatch_break_fault)
812 .org ia64_ivt+0x3800
813 //////////////////////////////////////////////////////////////////////////
814 // 0x3800 Entry 14 (size 64 bundles) Reserved
815 DBG_FAULT(14)
816 FAULT(14)
818 // this code segment is from 2.6.16.13
820 /*
821 * There is no particular reason for this code to be here, other than that
822 * there happens to be space here that would go unused otherwise. If this
823 * fault ever gets "unreserved", simply moved the following code to a more
824 * suitable spot...
825 *
826 * ia64_syscall_setup() is a separate subroutine so that it can
827 * allocate stacked registers so it can safely demine any
828 * potential NaT values from the input registers.
829 *
830 * On entry:
831 * - executing on bank 0 or bank 1 register set (doesn't matter)
832 * - r1: stack pointer
833 * - r2: current task pointer
834 * - r3: preserved
835 * - r11: original contents (saved ar.pfs to be saved)
836 * - r12: original contents (sp to be saved)
837 * - r13: original contents (tp to be saved)
838 * - r15: original contents (syscall # to be saved)
839 * - r18: saved bsp (after switching to kernel stack)
840 * - r19: saved b6
841 * - r20: saved r1 (gp)
842 * - r21: saved ar.fpsr
843 * - r22: kernel's register backing store base (krbs_base)
844 * - r23: saved ar.bspstore
845 * - r24: saved ar.rnat
846 * - r25: saved ar.unat
847 * - r26: saved ar.pfs
848 * - r27: saved ar.rsc
849 * - r28: saved cr.iip
850 * - r29: saved cr.ipsr
851 * - r31: saved pr
852 * - b0: original contents (to be saved)
853 * On exit:
854 * - p10: TRUE if syscall is invoked with more than 8 out
855 * registers or r15's Nat is true
856 * - r1: kernel's gp
857 * - r3: preserved (same as on entry)
858 * - r8: -EINVAL if p10 is true
859 * - r12: points to kernel stack
860 * - r13: points to current task
861 * - r14: preserved (same as on entry)
862 * - p13: preserved
863 * - p15: TRUE if interrupts need to be re-enabled
864 * - ar.fpsr: set to kernel settings
865 * - b6: preserved (same as on entry)
866 */
867 GLOBAL_ENTRY(ia64_syscall_setup)
868 #if PT(B6) != 0
869 # error This code assumes that b6 is the first field in pt_regs.
870 #endif
871 st8 [r1]=r19 // save b6
872 add r16=PT(CR_IPSR),r1 // initialize first base pointer
873 add r17=PT(R11),r1 // initialize second base pointer
874 ;;
875 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
876 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
877 tnat.nz p8,p0=in0
879 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
880 tnat.nz p9,p0=in1
881 (pKStk) mov r18=r0 // make sure r18 isn't NaT
882 ;;
884 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
885 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
886 mov r28=b0 // save b0 (2 cyc)
887 ;;
889 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
890 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
891 (p8) mov in0=-1
892 ;;
894 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
895 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
896 and r8=0x7f,r19 // A // get sof of ar.pfs
898 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
899 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
900 (p9) mov in1=-1
901 ;;
903 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
904 tnat.nz p10,p0=in2
905 add r11=8,r11
906 ;;
907 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
908 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
909 tnat.nz p11,p0=in3
910 ;;
911 (p10) mov in2=-1
912 tnat.nz p12,p0=in4 // [I0]
913 (p11) mov in3=-1
914 ;;
915 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
916 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
917 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
918 ;;
919 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
920 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
921 tnat.nz p13,p0=in5 // [I0]
922 ;;
923 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
924 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
925 (p12) mov in4=-1
926 ;;
928 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
929 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
930 (p13) mov in5=-1
931 ;;
932 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
933 tnat.nz p13,p0=in6
934 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
935 ;;
936 mov r8=1
937 (p9) tnat.nz p10,p0=r15
938 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
940 st8.spill [r17]=r15 // save r15
941 tnat.nz p8,p0=in7
942 nop.i 0
944 mov r13=r2 // establish `current'
945 movl r1=__gp // establish kernel global pointer
946 ;;
947 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
948 (p13) mov in6=-1
949 (p8) mov in7=-1
951 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
952 movl r17=FPSR_DEFAULT
953 ;;
954 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
955 (p10) mov r8=-EINVAL
956 br.ret.sptk.many b7
957 END(ia64_syscall_setup)
960 .org ia64_ivt+0x3c00
961 //////////////////////////////////////////////////////////////////////////
962 // 0x3c00 Entry 15 (size 64 bundles) Reserved
963 DBG_FAULT(15)
964 FAULT(15)
967 .org ia64_ivt+0x4000
968 //////////////////////////////////////////////////////////////////////////
969 // 0x4000 Entry 16 (size 64 bundles) Reserved
970 DBG_FAULT(16)
971 FAULT(16)
973 // There is no particular reason for this code to be here, other
974 // than that there happens to be space here that would go unused
975 // otherwise. If this fault ever gets "unreserved", simply move
976 // the following code to a more suitable spot...
978 ENTRY(dispatch_privop_fault)
979 SAVE_MIN_WITH_COVER
980 ;;
981 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in
982 // insn group!)
983 mov out0=cr.ifa
984 adds out1=16,sp
985 mov out2=cr.isr // FIXME: pity to make this slow access twice
986 mov out3=cr.itir
988 ssm psr.ic | PSR_DEFAULT_BITS
989 ;;
990 srlz.i // guarantee that interruption
991 // collection is on
992 ;;
993 (p15) ssm psr.i // restore psr.i
994 adds r3=8,r2 // set up second base pointer
995 ;;
996 SAVE_REST
997 movl r14=ia64_leave_kernel
998 ;;
999 mov rp=r14
1000 // br.sptk.many ia64_prepare_handle_privop // TODO: why commented out?
1001 br.call.sptk.many b6=ia64_handle_privop
1002 END(dispatch_privop_fault)
1005 .org ia64_ivt+0x4400
1006 //////////////////////////////////////////////////////////////////////////
1007 // 0x4400 Entry 17 (size 64 bundles) Reserved
1008 DBG_FAULT(17)
1009 FAULT(17)
1012 .org ia64_ivt+0x4800
1013 //////////////////////////////////////////////////////////////////////////
1014 // 0x4800 Entry 18 (size 64 bundles) Reserved
1015 DBG_FAULT(18)
1016 FAULT(18)
1019 .org ia64_ivt+0x4c00
1020 //////////////////////////////////////////////////////////////////////////
1021 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1022 DBG_FAULT(19)
1023 FAULT(19)
1025 /*
1026 * There is no particular reason for this code to be here, other
1027 * than that there happens to be space here that would go unused
1028 * otherwise. If this fault ever gets "unreserved", simply move
1029 * the following code to a more suitable spot...
1030 */
1032 GLOBAL_ENTRY(dispatch_to_fault_handler)
1033 /*
1034 * Input:
1035 * psr.ic: off
1036 * r19: fault vector number (e.g., 24 for General Exception)
1037 * r31: contains saved predicates (pr)
1038 */
1039 SAVE_MIN_WITH_COVER_R19
1040 alloc r14=ar.pfs,0,0,5,0
1041 mov out0=r15
1042 mov out1=cr.isr
1043 mov out2=cr.ifa
1044 mov out3=cr.iim
1045 mov out4=cr.itir
1046 ;;
1047 ssm psr.ic | PSR_DEFAULT_BITS
1048 ;;
1049 srlz.i // guarantee that interruption
1050 // collection is on
1051 ;;
1052 (p15) ssm psr.i // restore psr.i
1053 adds r3=8,r2 // set up second base pointer for
1054 // SAVE_REST
1055 ;;
1056 SAVE_REST
1057 movl r14=ia64_leave_kernel
1058 ;;
1059 mov rp=r14
1060 br.call.sptk.many b6=ia64_fault
1061 END(dispatch_to_fault_handler)
1063 //
1064 // --- End of long entries, Beginning of short entries
1065 //
1067 .org ia64_ivt+0x5000
1068 //////////////////////////////////////////////////////////////////////////
1069 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1070 ENTRY(page_not_present)
1071 DBG_FAULT(20)
1072 FAULT_OR_REFLECT(20)
1073 END(page_not_present)
1075 .org ia64_ivt+0x5100
1076 //////////////////////////////////////////////////////////////////////////
1077 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1078 ENTRY(key_permission)
1079 DBG_FAULT(21)
1080 FAULT_OR_REFLECT(21)
1081 END(key_permission)
1083 .org ia64_ivt+0x5200
1084 //////////////////////////////////////////////////////////////////////////
1085 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1086 ENTRY(iaccess_rights)
1087 DBG_FAULT(22)
1088 FAULT_OR_REFLECT(22)
1089 END(iaccess_rights)
1091 .org ia64_ivt+0x5300
1092 //////////////////////////////////////////////////////////////////////////
1093 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1094 ENTRY(daccess_rights)
1095 DBG_FAULT(23)
1096 mov r31=pr
1097 ;;
1098 mov r16=cr.isr
1099 mov r17=cr.ifa
1100 mov r19=23
1101 movl r20=0x5300
1102 br.sptk.many fast_access_reflect
1103 ;;
1104 END(daccess_rights)
1106 .org ia64_ivt+0x5400
1107 //////////////////////////////////////////////////////////////////////////
1108 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1109 ENTRY(general_exception)
1110 DBG_FAULT(24)
1111 mov r16=cr.isr
1112 mov r31=pr
1113 ;;
1114 cmp4.ge p6,p0=0x20,r16
1115 (p6) br.sptk.many dispatch_privop_fault
1116 ;;
1117 FAULT_OR_REFLECT(24)
1118 ;;
1119 mov r19=24 // fault number
1120 br.sptk.many dispatch_to_fault_handler
1121 END(general_exception)
1123 .org ia64_ivt+0x5500
1124 //////////////////////////////////////////////////////////////////////////
1125 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1126 ENTRY(disabled_fp_reg)
1127 DBG_FAULT(25)
1128 #if 0 // TODO: can this be removed?
1129 mov r20=pr
1130 movl r16=0x2000000000000000
1131 movl r17=0x2000000000176b60
1132 mov r18=cr.iip
1133 mov r19=rr[r16]
1134 movl r22=0xe95d0439
1135 ;;
1136 mov pr=r0,-1
1137 ;;
1138 cmp.eq p6,p7=r22,r19
1139 ;;
1140 (p6) cmp.eq p8,p9=r17,r18
1141 (p8) br.sptk.few floating_panic
1142 ;;
1143 mov pr=r20,-1
1144 ;;
1145 #endif
1146 FAULT_OR_REFLECT(25)
1147 //floating_panic: // TODO: can this be removed?
1148 // br.sptk.many floating_panic
1149 ;;
1150 rsm psr.dfh // ensure we can access fph
1151 ;;
1152 srlz.d
1153 mov r31=pr
1154 mov r19=25
1155 br.sptk.many dispatch_to_fault_handler
1156 END(disabled_fp_reg)
1158 .org ia64_ivt+0x5600
1159 //////////////////////////////////////////////////////////////////////////
1160 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1161 ENTRY(nat_consumption)
1162 DBG_FAULT(26)
1163 FAULT_OR_REFLECT(26)
1164 END(nat_consumption)
1166 .org ia64_ivt+0x5700
1167 //////////////////////////////////////////////////////////////////////////
1168 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1169 ENTRY(speculation_vector)
1170 DBG_FAULT(27)
1171 // this probably need not reflect...
1172 FAULT_OR_REFLECT(27)
1173 END(speculation_vector)
1175 .org ia64_ivt+0x5800
1176 //////////////////////////////////////////////////////////////////////////
1177 // 0x5800 Entry 28 (size 16 bundles) Reserved
1178 DBG_FAULT(28)
1179 FAULT(28)
1181 .org ia64_ivt+0x5900
1182 //////////////////////////////////////////////////////////////////////////
1183 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1184 ENTRY(debug_vector)
1185 DBG_FAULT(29)
1186 #ifdef XEN
1187 FAULT_OR_REFLECT(29)
1188 #else
1189 FAULT(29)
1190 #endif
1191 END(debug_vector)
1193 .org ia64_ivt+0x5a00
1194 //////////////////////////////////////////////////////////////////////////
1195 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1196 ENTRY(unaligned_access)
1197 DBG_FAULT(30)
1198 FAULT_OR_REFLECT(30)
1199 END(unaligned_access)
1201 .org ia64_ivt+0x5b00
1202 //////////////////////////////////////////////////////////////////////////
1203 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1204 ENTRY(unsupported_data_reference)
1205 DBG_FAULT(31)
1206 FAULT_OR_REFLECT(31)
1207 END(unsupported_data_reference)
1209 .org ia64_ivt+0x5c00
1210 //////////////////////////////////////////////////////////////////////////
1211 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1212 ENTRY(floating_point_fault)
1213 DBG_FAULT(32)
1214 FAULT_OR_REFLECT(32)
1215 END(floating_point_fault)
1217 .org ia64_ivt+0x5d00
1218 //////////////////////////////////////////////////////////////////////////
1219 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1220 ENTRY(floating_point_trap)
1221 DBG_FAULT(33)
1222 FAULT_OR_REFLECT(33)
1223 END(floating_point_trap)
1225 .org ia64_ivt+0x5e00
1226 //////////////////////////////////////////////////////////////////////////
1227 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1228 ENTRY(lower_privilege_trap)
1229 DBG_FAULT(34)
1230 FAULT_OR_REFLECT(34)
1231 END(lower_privilege_trap)
1233 .org ia64_ivt+0x5f00
1234 //////////////////////////////////////////////////////////////////////////
1235 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1236 ENTRY(taken_branch_trap)
1237 DBG_FAULT(35)
1238 FAULT_OR_REFLECT(35)
1239 END(taken_branch_trap)
1241 .org ia64_ivt+0x6000
1242 //////////////////////////////////////////////////////////////////////////
1243 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1244 ENTRY(single_step_trap)
1245 DBG_FAULT(36)
1246 FAULT_OR_REFLECT(36)
1247 END(single_step_trap)
1249 .org ia64_ivt+0x6100
1250 //////////////////////////////////////////////////////////////////////////
1251 // 0x6100 Entry 37 (size 16 bundles) Reserved
1252 DBG_FAULT(37)
1253 FAULT(37)
1255 .org ia64_ivt+0x6200
1256 //////////////////////////////////////////////////////////////////////////
1257 // 0x6200 Entry 38 (size 16 bundles) Reserved
1258 DBG_FAULT(38)
1259 FAULT(38)
1261 .org ia64_ivt+0x6300
1262 //////////////////////////////////////////////////////////////////////////
1263 // 0x6300 Entry 39 (size 16 bundles) Reserved
1264 DBG_FAULT(39)
1265 FAULT(39)
1267 .org ia64_ivt+0x6400
1268 //////////////////////////////////////////////////////////////////////////
1269 // 0x6400 Entry 40 (size 16 bundles) Reserved
1270 DBG_FAULT(40)
1271 FAULT(40)
1273 .org ia64_ivt+0x6500
1274 //////////////////////////////////////////////////////////////////////////
1275 // 0x6500 Entry 41 (size 16 bundles) Reserved
1276 DBG_FAULT(41)
1277 FAULT(41)
1279 .org ia64_ivt+0x6600
1280 //////////////////////////////////////////////////////////////////////////
1281 // 0x6600 Entry 42 (size 16 bundles) Reserved
1282 DBG_FAULT(42)
1283 FAULT(42)
1285 .org ia64_ivt+0x6700
1286 //////////////////////////////////////////////////////////////////////////
1287 // 0x6700 Entry 43 (size 16 bundles) Reserved
1288 DBG_FAULT(43)
1289 FAULT(43)
1291 .org ia64_ivt+0x6800
1292 //////////////////////////////////////////////////////////////////////////
1293 // 0x6800 Entry 44 (size 16 bundles) Reserved
1294 DBG_FAULT(44)
1295 FAULT(44)
1297 .org ia64_ivt+0x6900
1298 //////////////////////////////////////////////////////////////////////////
1299 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,
1300 // 44,58,60,61,62,72,
1301 // 73,75,76,77)
1302 ENTRY(ia32_exception)
1303 DBG_FAULT(45)
1304 FAULT_OR_REFLECT(45)
1305 END(ia32_exception)
1307 .org ia64_ivt+0x6a00
1308 //////////////////////////////////////////////////////////////////////////
1309 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1310 ENTRY(ia32_intercept)
1311 DBG_FAULT(46)
1312 FAULT_OR_REFLECT(46)
1313 END(ia32_intercept)
1315 .org ia64_ivt+0x6b00
1316 //////////////////////////////////////////////////////////////////////////
1317 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1318 ENTRY(ia32_interrupt)
1319 DBG_FAULT(47)
1320 FAULT_OR_REFLECT(47)
1321 END(ia32_interrupt)
1323 .org ia64_ivt+0x6c00
1324 //////////////////////////////////////////////////////////////////////////
1325 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1326 DBG_FAULT(48)
1327 FAULT(48)
1329 .org ia64_ivt+0x6d00
1330 //////////////////////////////////////////////////////////////////////////
1331 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1332 DBG_FAULT(49)
1333 FAULT(49)
1335 .org ia64_ivt+0x6e00
1336 //////////////////////////////////////////////////////////////////////////
1337 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1338 DBG_FAULT(50)
1339 FAULT(50)
1341 .org ia64_ivt+0x6f00
1342 //////////////////////////////////////////////////////////////////////////
1343 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1344 DBG_FAULT(51)
1345 FAULT(51)
1347 .org ia64_ivt+0x7000
1348 //////////////////////////////////////////////////////////////////////////
1349 // 0x7000 Entry 52 (size 16 bundles) Reserved
1350 DBG_FAULT(52)
1351 FAULT(52)
1353 .org ia64_ivt+0x7100
1354 //////////////////////////////////////////////////////////////////////////
1355 // 0x7100 Entry 53 (size 16 bundles) Reserved
1356 DBG_FAULT(53)
1357 FAULT(53)
1359 .org ia64_ivt+0x7200
1360 //////////////////////////////////////////////////////////////////////////
1361 // 0x7200 Entry 54 (size 16 bundles) Reserved
1362 DBG_FAULT(54)
1363 FAULT(54)
1365 .org ia64_ivt+0x7300
1366 //////////////////////////////////////////////////////////////////////////
1367 // 0x7300 Entry 55 (size 16 bundles) Reserved
1368 DBG_FAULT(55)
1369 FAULT(55)
1371 .org ia64_ivt+0x7400
1372 //////////////////////////////////////////////////////////////////////////
1373 // 0x7400 Entry 56 (size 16 bundles) Reserved
1374 DBG_FAULT(56)
1375 FAULT(56)
1377 .org ia64_ivt+0x7500
1378 //////////////////////////////////////////////////////////////////////////
1379 // 0x7500 Entry 57 (size 16 bundles) Reserved
1380 DBG_FAULT(57)
1381 FAULT(57)
1383 .org ia64_ivt+0x7600
1384 //////////////////////////////////////////////////////////////////////////
1385 // 0x7600 Entry 58 (size 16 bundles) Reserved
1386 DBG_FAULT(58)
1387 FAULT(58)
1389 .org ia64_ivt+0x7700
1390 //////////////////////////////////////////////////////////////////////////
1391 // 0x7700 Entry 59 (size 16 bundles) Reserved
1392 DBG_FAULT(59)
1393 FAULT(59)
1395 .org ia64_ivt+0x7800
1396 //////////////////////////////////////////////////////////////////////////
1397 // 0x7800 Entry 60 (size 16 bundles) Reserved
1398 DBG_FAULT(60)
1399 FAULT(60)
1401 .org ia64_ivt+0x7900
1402 //////////////////////////////////////////////////////////////////////////
1403 // 0x7900 Entry 61 (size 16 bundles) Reserved
1404 DBG_FAULT(61)
1405 FAULT(61)
1407 .org ia64_ivt+0x7a00
1408 //////////////////////////////////////////////////////////////////////////
1409 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1410 DBG_FAULT(62)
1411 FAULT(62)
1413 .org ia64_ivt+0x7b00
1414 //////////////////////////////////////////////////////////////////////////
1415 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1416 DBG_FAULT(63)
1417 FAULT(63)
1419 .org ia64_ivt+0x7c00
1420 //////////////////////////////////////////////////////////////////////////
1421 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1422 DBG_FAULT(64)
1423 FAULT(64)
1425 .org ia64_ivt+0x7d00
1426 //////////////////////////////////////////////////////////////////////////
1427 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1428 DBG_FAULT(65)
1429 FAULT(65)
1431 .org ia64_ivt+0x7e00
1432 //////////////////////////////////////////////////////////////////////////
1433 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1434 DBG_FAULT(66)
1435 FAULT(66)
1437 .org ia64_ivt+0x7f00
1438 //////////////////////////////////////////////////////////////////////////
1439 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1440 DBG_FAULT(67)
1441 FAULT(67)
1443 .org ia64_ivt+0x8000