ia64/xen-unstable

view xen/arch/ia64/xen/ivt.S @ 16110:e120054bf0ac

[IA64] vti domain save/restore: add unwind directive to break fault handler

add unwind directive to fast_hypercall path.
While fast_hypercall path calls function (hypercall, do_softirq()) and
might be blocked, it doesn't have unwind infomation.
So stack unwinding fails. Add necessary unwind directive.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Fri Oct 12 14:30:30 2007 -0600 (2007-10-12)
parents fdd298b75fb5
children ecdc7b5f650b
line source
1 #include <asm/debugger.h>
2 #include <asm/vhpt.h>
3 #include <public/arch-ia64.h>
4 #include <asm/config.h>
5 /*
6 * arch/ia64/kernel/ivt.S
7 *
8 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
9 * Stephane Eranian <eranian@hpl.hp.com>
10 * David Mosberger <davidm@hpl.hp.com>
11 * Copyright (C) 2000, 2002-2003 Intel Co
12 * Asit Mallick <asit.k.mallick@intel.com>
13 * Suresh Siddha <suresh.b.siddha@intel.com>
14 * Kenneth Chen <kenneth.w.chen@intel.com>
15 * Fenghua Yu <fenghua.yu@intel.com>
16 *
17 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
18 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now
19 * uses virtual PT.
20 */
21 /*
22 * This file defines the interruption vector table used by the CPU.
23 * It does not include one entry per possible cause of interruption.
24 *
25 * The first 20 entries of the table contain 64 bundles each while the
26 * remaining 48 entries contain only 16 bundles each.
27 *
28 * The 64 bundles are used to allow inlining the whole handler for critical
29 * interruptions like TLB misses.
30 *
31 * For each entry, the comment is as follows:
32 *
33 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
34 * entry offset ----/ / / / /
35 * entry number ---------/ / / /
36 * size of the entry -------------/ / /
37 * vector name -------------------------------------/ /
38 * interruptions triggering this vector ----------------------/
39 *
40 * The table is 32KB in size and must be aligned on 32KB boundary.
41 * (The CPU ignores the 15 lower bits of the address)
42 *
43 * Table is based upon EAS2.6 (Oct 1999)
44 */
46 #include <linux/config.h>
48 #include <asm/asmmacro.h>
49 #include <asm/break.h>
50 #include <asm/ia32.h>
51 #include <asm/kregs.h>
52 #include <asm/offsets.h>
53 #include <asm/pgtable.h>
54 #include <asm/processor.h>
55 #include <asm/ptrace.h>
56 #include <asm/system.h>
57 #include <asm/thread_info.h>
58 #include <asm/unistd.h>
59 #include <xen/errno.h>
61 #if 1
62 # define PSR_DEFAULT_BITS psr.ac
63 #else
64 # define PSR_DEFAULT_BITS 0
65 #endif
67 #if 0
68 /*
69 * This lets you track the last eight faults that occurred on the CPU.
70 * Make sure ar.k2 isn't needed for something else before enabling this...
71 */
72 # define DBG_FAULT(i) \
73 mov r16=ar.k2;; \
74 shl r16=r16,8;; \
75 add r16=(i),r16;; \
76 mov ar.k2=r16
77 #else
78 # define DBG_FAULT(i)
79 #endif
81 #define MINSTATE_VIRT /* needed by minstate.h */
82 #include "minstate.h"
84 #define FAULT(n) \
85 mov r19=n; /* prepare to save predicates */ \
86 mov r31=pr; \
87 br.sptk.many dispatch_to_fault_handler
89 #define FAULT_OR_REFLECT(n) \
90 mov r20=cr.ipsr; \
91 mov r19=n; /* prepare to save predicates */ \
92 mov r31=pr;; \
93 extr.u r20=r20,IA64_PSR_CPL0_BIT,2;; \
94 cmp.ne p6,p0=r0,r20; /* cpl != 0?*/ \
95 (p6) br.dptk.many dispatch_reflection; \
96 br.sptk.few dispatch_to_fault_handler
98 .section .text.ivt,"ax"
100 .align 32768 // align on 32KB boundary
101 .global ia64_ivt
102 ia64_ivt:
103 //////////////////////////////////////////////////////////////////////////
104 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
105 ENTRY(vhpt_miss)
106 DBG_FAULT(0)
107 FAULT(0)
108 END(vhpt_miss)
110 .org ia64_ivt+0x400
111 //////////////////////////////////////////////////////////////////////////
112 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
113 ENTRY(itlb_miss)
114 DBG_FAULT(1)
115 mov r16 = cr.ifa
116 mov r31 = pr
117 ;;
118 extr.u r17=r16,59,5
119 ;;
120 /* If address belongs to VMM, go to alt tlb handler */
121 cmp.eq p6,p0=0x1e,r17
122 (p6) br.cond.spnt late_alt_itlb_miss
123 br.cond.sptk fast_tlb_miss_reflect
124 ;;
125 END(itlb_miss)
127 .org ia64_ivt+0x0800
128 //////////////////////////////////////////////////////////////////////////
129 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
130 ENTRY(dtlb_miss)
131 DBG_FAULT(2)
132 mov r16=cr.ifa // get virtual address
133 mov r31=pr
134 ;;
135 extr.u r17=r16,59,5
136 ;;
137 /* If address belongs to VMM, go to alt tlb handler */
138 cmp.eq p6,p0=0x1e,r17
139 (p6) br.cond.spnt late_alt_dtlb_miss
140 br.cond.sptk fast_tlb_miss_reflect
141 ;;
142 END(dtlb_miss)
144 .org ia64_ivt+0x0c00
145 //////////////////////////////////////////////////////////////////////////
146 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
147 ENTRY(alt_itlb_miss)
148 DBG_FAULT(3)
149 mov r16=cr.ifa // get address that caused the TLB miss
150 mov r31=pr
151 ;;
152 late_alt_itlb_miss:
153 mov r21=cr.ipsr
154 movl r17=PAGE_KERNEL
155 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
156 ;;
157 mov r20=cr.itir
158 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
159 and r19=r19,r16 // clear ed, reserved bits, and PTE ctrl bits
160 extr.u r18=r16,XEN_VIRT_UC_BIT,1 // extract UC bit
161 ;;
162 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
163 or r19=r17,r19 // insert PTE control bits into r19
164 dep r20=0,r20,IA64_ITIR_KEY,IA64_ITIR_KEY_LEN // clear the key
165 ;;
166 dep r19=r18,r19,4,1 // set bit 4 (uncached) if access to UC area.
167 mov cr.itir=r20 // set itir with cleared key
168 (p8) br.cond.spnt page_fault
169 ;;
170 itc.i r19 // insert the TLB entry
171 mov pr=r31,-1
172 rfi
173 END(alt_itlb_miss)
175 .org ia64_ivt+0x1000
176 //////////////////////////////////////////////////////////////////////////
177 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
178 ENTRY(alt_dtlb_miss)
179 DBG_FAULT(4)
180 mov r16=cr.ifa // get address that caused the TLB miss
181 mov r31=pr
182 ;;
183 late_alt_dtlb_miss:
184 mov r20=cr.isr
185 movl r17=PAGE_KERNEL
186 mov r21=cr.ipsr
187 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
188 ;;
189 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
190 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
191 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
192 extr.u r18=r16,XEN_VIRT_UC_BIT,1 // extract UC bit
193 and r19=r19,r16 // clear ed, reserved bits, and
194 // PTE control bits
195 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
196 ;;
197 cmp.ne p8,p0=r0,r23
198 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
199 (p8) br.cond.spnt page_fault
200 ;;
201 mov r20=cr.itir
202 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
203 shr r22=r16,56 // Test for the address of virtual frame_table
204 ;;
205 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
206 (p8) br.cond.sptk frametable_miss ;;
207 #endif
208 // If it is not a Xen address, handle it via page_fault.
209 extr.u r22=r16,59,5
210 ;;
211 dep r20=0,r20,IA64_ITIR_KEY,IA64_ITIR_KEY_LEN // clear the key
212 cmp.ne p8,p0=0x1e,r22
213 (p8) br.cond.sptk page_fault
214 ;;
215 dep r21=-1,r21,IA64_PSR_ED_BIT,1
216 or r19=r19,r17 // insert PTE control bits into r19
217 mov cr.itir=r20 // set itir with cleared key
218 ;;
219 dep r19=r18,r19,4,1 // set bit 4 (uncached) if access to UC area
220 (p6) mov cr.ipsr=r21
221 ;;
222 (p7) itc.d r19 // insert the TLB entry
223 mov pr=r31,-1
224 rfi
225 END(alt_dtlb_miss)
227 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
228 GLOBAL_ENTRY(frametable_miss)
229 rsm psr.dt // switch to using physical data addressing
230 movl r24=(frametable_pg_dir-PAGE_OFFSET) // r24=__pa(frametable_pg_dir)
231 ;;
232 srlz.d
233 extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3)
234 ;;
235 shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)]
236 ;;
237 ld8 r24=[r24] // r24=pgd[pgd_offset(addr)]
238 extr.u r18=r16,PMD_SHIFT,(PAGE_SHIFT-3) // r18=pmd_offset
239 ;;
240 cmp.eq p6,p7=0,r24 // pgd present?
241 shladd r24=r18,3,r24 // r24=&pmd[pmd_offset(addr)]
242 ;;
243 (p7) ld8 r24=[r24] // r24=pmd[pmd_offset(addr)]
244 extr.u r19=r16,PAGE_SHIFT,(PAGE_SHIFT-3)// r19=pte_offset
245 (p6) br.spnt.few frametable_fault
246 ;;
247 cmp.eq p6,p7=0,r24 // pmd present?
248 shladd r24=r19,3,r24 // r24=&pte[pte_offset(addr)]
249 ;;
250 (p7) ld8 r24=[r24] // r24=pte[pte_offset(addr)]
251 mov r25=(PAGE_SHIFT<<IA64_ITIR_PS)
252 (p6) br.spnt.few frametable_fault
253 ;;
254 mov cr.itir=r25
255 ssm psr.dt // switch to using virtual data addressing
256 tbit.z p6,p7=r24,_PAGE_P_BIT // pte present?
257 ;;
258 (p7) itc.d r24 // install updated PTE
259 (p6) br.spnt.few frametable_fault // page present bit cleared?
260 ;;
261 mov pr=r31,-1 // restore predicate registers
262 rfi
263 END(frametable_miss)
265 ENTRY(frametable_fault)
266 ssm psr.dt // switch to using virtual data addressing
267 mov r18=cr.iip
268 movl r19=ia64_frametable_probe
269 ;;
270 cmp.eq p6,p7=r18,r19 // is faulting addrress ia64_frametable_probe?
271 mov r8=0 // assumes that 'probe.r' uses r8
272 dep r21=-1,r21,IA64_PSR_RI_BIT+1,1 // return to next instruction in
273 // bundle 2
274 ;;
275 (p6) mov cr.ipsr=r21
276 mov r19=4 // FAULT(4)
277 (p7) br.spnt.few dispatch_to_fault_handler
278 ;;
279 mov pr=r31,-1
280 rfi
281 END(frametable_fault)
283 GLOBAL_ENTRY(ia64_frametable_probe)
284 {
285 probe.r r8=r32,0 // destination register must be r8
286 nop.f 0x0
287 br.ret.sptk.many b0 // this instruction must be in bundle 2
288 }
289 END(ia64_frametable_probe)
290 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
292 .org ia64_ivt+0x1400
293 /////////////////////////////////////////////////////////////////////////////////////////
294 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
295 ENTRY(nested_dtlb_miss)
296 DBG_FAULT(5)
297 mov b0=r30
298 br.sptk.many b0 // return to the continuation point
299 ;;
300 END(nested_dtlb_miss)
302 GLOBAL_ENTRY(dispatch_reflection)
303 /*
304 * Input:
305 * psr.ic: off
306 * r19: intr type (offset into ivt, see ia64_int.h)
307 * r31: contains saved predicates (pr)
308 */
309 SAVE_MIN_WITH_COVER_R19
310 alloc r14=ar.pfs,0,0,5,0
311 mov out4=r15
312 mov out0=cr.ifa
313 adds out1=16,sp
314 mov out2=cr.isr
315 mov out3=cr.iim
317 ssm psr.ic | PSR_DEFAULT_BITS
318 ;;
319 srlz.i // guarantee that interruption
320 // collection is on
321 ;;
322 (p15) ssm psr.i // restore psr.i
323 adds r3=8,r2 // set up second base pointer
324 ;;
325 SAVE_REST
326 movl r14=ia64_leave_kernel
327 ;;
328 mov rp=r14
329 // br.sptk.many ia64_prepare_handle_reflection // TODO: why commented out?
330 br.call.sptk.many b6=ia64_handle_reflection
331 END(dispatch_reflection)
333 .org ia64_ivt+0x1800
334 //////////////////////////////////////////////////////////////////////////
335 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
336 ENTRY(ikey_miss)
337 DBG_FAULT(6)
338 FAULT_OR_REFLECT(6)
339 END(ikey_miss)
341 //----------------------------------------------------------------
342 // call do_page_fault (predicates are in r31, psr.dt may be off,
343 // r16 is faulting address)
344 GLOBAL_ENTRY(page_fault)
345 ssm psr.dt
346 ;;
347 srlz.i
348 ;;
349 SAVE_MIN_WITH_COVER
350 alloc r15=ar.pfs,0,0,4,0
351 mov out0=cr.ifa
352 mov out1=cr.isr
353 mov out3=cr.itir
354 adds r3=8,r2 // set up second base pointer
355 ;;
356 ssm psr.ic | PSR_DEFAULT_BITS
357 ;;
358 srlz.i // guarantee that interruption
359 // collection is on
360 ;;
361 (p15) ssm psr.i // restore psr.i
362 movl r14=ia64_leave_kernel
363 ;;
364 SAVE_REST
365 mov rp=r14
366 ;;
367 adds out2=16,r12 // out2 = pointer to pt_regs
368 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
369 END(page_fault)
371 .org ia64_ivt+0x1c00
372 //////////////////////////////////////////////////////////////////////////
373 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
374 ENTRY(dkey_miss)
375 DBG_FAULT(7)
376 FAULT_OR_REFLECT(7)
377 END(dkey_miss)
379 .org ia64_ivt+0x2000
380 //////////////////////////////////////////////////////////////////////////
381 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
382 ENTRY(dirty_bit)
383 DBG_FAULT(8)
384 mov r20=cr.ipsr
385 mov r31=pr
386 ;;
387 extr.u r20=r20,IA64_PSR_CPL0_BIT,2
388 ;;
389 mov r19=8 // prepare to save predicates
390 cmp.eq p6,p0=r0,r20 // cpl == 0?
391 (p6) br.sptk.few dispatch_to_fault_handler
392 // If shadow mode is not enabled, reflect the fault.
393 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
394 ;;
395 ld8 r22=[r22]
396 ;;
397 add r22=IA64_VCPU_DOMAIN_OFFSET,r22
398 ;;
399 ld8 r22=[r22] // read domain
400 ;;
401 add r22=IA64_DOMAIN_SHADOW_BITMAP_OFFSET,r22
402 ;;
403 ld8 r22=[r22]
404 ;;
405 cmp.eq p6,p0=r0,r22 // !shadow_bitmap ?
406 (p6) br.dptk.many dispatch_reflection
408 SAVE_MIN_WITH_COVER
409 alloc r14=ar.pfs,0,0,4,0
410 mov out0=cr.ifa
411 mov out1=cr.itir
412 mov out2=cr.isr
413 adds out3=16,sp
415 ssm psr.ic | PSR_DEFAULT_BITS
416 ;;
417 srlz.i // guarantee that interruption
418 // collection is on
419 ;;
420 (p15) ssm psr.i // restore psr.i
421 adds r3=8,r2 // set up second base pointer
422 ;;
423 SAVE_REST
424 movl r14=ia64_leave_kernel
425 ;;
426 mov rp=r14
427 br.call.sptk.many b6=ia64_shadow_fault
428 END(dirty_bit)
430 .org ia64_ivt+0x2400
431 //////////////////////////////////////////////////////////////////////////
432 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
433 ENTRY(iaccess_bit)
434 DBG_FAULT(9)
435 mov r16=cr.isr
436 mov r17=cr.ifa
437 mov r31=pr
438 mov r19=9
439 mov r20=0x2400
440 br.sptk.many fast_access_reflect;;
441 END(iaccess_bit)
443 .org ia64_ivt+0x2800
444 //////////////////////////////////////////////////////////////////////////
445 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
446 ENTRY(daccess_bit)
447 DBG_FAULT(10)
448 mov r16=cr.isr
449 mov r17=cr.ifa
450 mov r31=pr
451 mov r19=10
452 mov r20=0x2800
453 br.sptk.many fast_access_reflect
454 ;;
455 END(daccess_bit)
457 .org ia64_ivt+0x2c00
458 //////////////////////////////////////////////////////////////////////////
459 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
460 ENTRY(break_fault)
461 .body
462 /*
463 * The streamlined system call entry/exit paths only save/restore
464 * the initial part of pt_regs. This implies that the callers of
465 * system-calls must adhere to the normal procedure calling
466 * conventions.
467 *
468 * Registers to be saved & restored:
469 * CR registers: cr.ipsr, cr.iip, cr.ifs
470 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore,
471 * ar.fpsr
472 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
473 * Registers to be restored only:
474 * r8-r11: output value from the system call.
475 *
476 * During system call exit, scratch registers (including r15) are
477 * modified/cleared to prevent leaking bits from kernel to user
478 * level.
479 */
480 DBG_FAULT(11)
481 mov r16=cr.isr
482 mov r17=cr.iim
483 mov r31=pr
484 ;;
485 cmp.eq p7,p0=r17,r0
486 (p7) br.spnt.few dispatch_break_fault
487 ;;
488 #ifdef CRASH_DEBUG
489 // A panic can occur before domain0 is created. In such cases,
490 // referencing XSI_PSR_IC causes nested_dtlb_miss.
491 movl r18=CDB_BREAK_NUM
492 ;;
493 cmp.eq p7,p0=r17,r18
494 ;;
495 (p7) br.spnt.few dispatch_break_fault
496 ;;
497 #endif
498 movl r18=THIS_CPU(current_psr_ic_addr)
499 ;;
500 ld8 r18=[r18]
501 ;;
502 #ifdef CONFIG_PRIVIFY
503 // pseudo-cover are replaced by break.b which (unfortunatly) always
504 // clear iim.
505 cmp.eq p7,p0=r0,r17
506 (p7) br.spnt.many dispatch_privop_fault
507 ;;
508 #endif
509 // if (ipsr.cpl == CONFIG_CPL0_EMUL &&
510 // (iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX)
511 // this is a hyperprivop. A hyperprivop is hand-coded assembly with
512 // psr.ic off which means it can make no calls, cannot use r1-r15,
513 // and it can have no memory accesses unless they are to pinned
514 // addresses!
515 mov r19= cr.ipsr
516 mov r20=HYPERPRIVOP_START
517 mov r21=HYPERPRIVOP_MAX
518 ;;
519 sub r20=r17,r20
520 extr.u r19=r19,IA64_PSR_CPL0_BIT,2 // extract cpl field from cr.ipsr
521 ;;
522 cmp.gtu p7,p0=r21,r20
523 ;;
524 cmp.eq.and p7,p0=CONFIG_CPL0_EMUL,r19 // ipsr.cpl==CONFIG_CPL0_EMUL
525 (p7) br.sptk.many fast_hyperprivop
526 ;;
527 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
528 ;;
529 ld8 r22 = [r22]
530 ;;
531 adds r23=IA64_VCPU_BREAKIMM_OFFSET,r22
532 ;;
533 ld4 r23=[r23];;
534 cmp4.eq p6,p0=r23,r17;; // Xen-reserved breakimm?
535 cmp.eq.and p6,p0=CONFIG_CPL0_EMUL,r19
536 (p6) br.spnt.many fast_hypercall
537 ;;
538 br.sptk.many fast_break_reflect
539 ;;
542 fast_hypercall:
543 shr r25=r2,8;;
544 cmp.ne p7,p0=r0,r25
545 (p7) br.spnt.few dispatch_break_fault
546 ;;
547 // fall through
550 /*
551 * The streamlined system call entry/exit paths only save/restore the initial part
552 * of pt_regs. This implies that the callers of system-calls must adhere to the
553 * normal procedure calling conventions.
554 *
555 * Registers to be saved & restored:
556 * CR registers: cr.ipsr, cr.iip, cr.ifs
557 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
558 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
559 * Registers to be restored only:
560 * r8-r11: output value from the system call.
561 *
562 * During system call exit, scratch registers (including r15) are modified/cleared
563 * to prevent leaking bits from kernel to user level.
564 */
566 // DBG_FAULT(11)
567 // mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
568 mov r16=r22
569 mov r29=cr.ipsr // M2 (12 cyc)
570 // mov r31=pr // I0 (2 cyc)
571 mov r15=r2
573 // mov r17=cr.iim // M2 (2 cyc)
574 mov.m r27=ar.rsc // M2 (12 cyc)
575 // mov r18=__IA64_BREAK_SYSCALL // A
577 mov.m ar.rsc=0 // M2
578 mov.m r21=ar.fpsr // M2 (12 cyc)
579 mov r19=b6 // I0 (2 cyc)
580 ;;
581 mov.m r23=ar.bspstore // M2 (12 cyc)
582 mov.m r24=ar.rnat // M2 (5 cyc)
583 mov.i r26=ar.pfs // I0 (2 cyc)
585 invala // M0|1
586 nop.m 0 // M
587 mov r20=r1 // A save r1
589 nop.m 0
590 // movl r30=sys_call_table // X
591 movl r30=ia64_hypercall_table // X
593 mov r28=cr.iip // M2 (2 cyc)
594 // cmp.eq p0,p7=r18,r17 // I0 is this a system call?
595 //(p7) br.cond.spnt non_syscall // B no ->
596 //
597 // From this point on, we are definitely on the syscall-path
598 // and we can use (non-banked) scratch registers.
599 //
600 ///////////////////////////////////////////////////////////////////////
601 mov r1=r16 // A move task-pointer to "addl"-addressable reg
602 mov r2=r16 // A setup r2 for ia64_syscall_setup
603 // add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
605 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
606 // adds r15=-1024,r15 // A subtract 1024 from syscall number
607 // mov r3=NR_syscalls - 1
608 mov r3=NR_hypercalls - 1
609 ;;
610 ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
611 // ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
612 mov r9=r0 // force flags = 0
613 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
615 shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
616 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
617 cmp.leu p6,p7=r15,r3 // A syscall number in range?
618 ;;
620 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
621 (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
622 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
624 mov.m ar.bspstore=r22 // M2 switch to kernel RBS
625 cmp.eq p8,p9=2,r8 // A isr.ei==2?
626 ;;
628 (p8) mov r8=0 // A clear ei to 0
629 //(p7) movl r30=sys_ni_syscall // X
630 (p7) movl r30=do_ni_hypercall // X
632 (p8) adds r28=16,r28 // A switch cr.iip to next bundle
633 (p9) adds r8=1,r8 // A increment ei to next slot
634 nop.i 0
635 ;;
637 mov.m r25=ar.unat // M2 (5 cyc)
638 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
639 // adds r15=1024,r15 // A restore original syscall number
640 //
641 // If any of the above loads miss in L1D, we'll stall here until
642 // the data arrives.
643 //
644 ///////////////////////////////////////////////////////////////////////
645 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
646 mov b6=r30 // I0 setup syscall handler branch reg early
647 cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
649 // and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
650 mov r18=ar.bsp // M2 (12 cyc)
651 ;;
652 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
653 // cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
654 br.call.sptk.many b7=ia64_syscall_setup // B
655 1:
656 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
657 nop 0
658 bsw.1 // B (6 cyc) regs are saved, switch to bank 1
659 ;;
661 PT_REGS_UNWIND_INFO(0)
662 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
663 // movl r3=ia64_ret_from_syscall // X
664 ;;
666 srlz.i // M0 ensure interruption collection is on
667 // mov rp=r3 // I0 set the real return addr
668 //(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
669 (p15) ssm psr.i // M2 restore psr.i
670 //(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
671 // br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
672 br.call.sptk.many b0=b6 // B invoke syscall-handker (ignore return addr)
673 // br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
674 ;;
675 adds r2=PT(R8)+16,r12
676 ;;
677 st8 [r2]=r8
678 ;;
679 br.call.sptk.many b0=do_softirq
680 ;;
681 //restore hypercall argument if continuation
682 adds r2=IA64_VCPU_HYPERCALL_CONTINUATION_OFS,r13
683 ;;
684 ld1 r20=[r2]
685 ;;
686 st1 [r2]=r0
687 ;;
688 cmp.ne p6,p0=r20,r0
689 ;;
690 (p6) adds r2=PT(R16)+16,r12
691 (p6) adds r3=PT(R17)+16,r12
692 ;;
693 (p6) ld8 r32=[r2],16
694 (p6) ld8 r33=[r3],16
695 ;;
696 (p6) ld8 r34=[r2],16
697 (p6) ld8 r35=[r3],16
698 ;;
699 (p6) ld8 r36=[r2],16
700 ;;
701 //save ar.bsp before cover
702 mov r16=ar.bsp
703 add r2=PT(R14)+16,r12
704 ;;
705 st8 [r2]=r16
706 ;;
707 rsm psr.i|psr.ic
708 ;;
709 srlz.i
710 ;;
711 cover
712 ;;
713 mov r20=cr.ifs
714 adds r2=PT(CR_IFS)+16,r12
715 ;;
716 st8 [r2]=r20
717 ;;
718 br.call.sptk.many b0=reflect_event
719 ;;
720 adds r2=PT(R14)+16,r12
721 adds r3=PT(R8)+16,r12
722 ;;
723 //r16 contains ar.bsp before cover
724 ld8 r16=[r2]
725 ld8 r8=[r3]
726 ;;
727 br.sptk.many ia64_ret_from_syscall
728 ;;
729 END(break_fault)
731 .org ia64_ivt+0x3000
732 //////////////////////////////////////////////////////////////////////////
733 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
734 ENTRY(interrupt)
735 DBG_FAULT(12)
736 mov r31=pr // prepare to save predicates
737 mov r30=cr.ivr // pass cr.ivr as first arg
738 // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
739 // not used anywhere else and we need a place to stash ivr and
740 // there's no registers available unused by SAVE_MIN/REST
741 movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET
742 ;;
743 st8 [r29]=r30
744 movl r28=slow_interrupt
745 ;;
746 mov r29=rp
747 ;;
748 mov rp=r28
749 ;;
750 br.cond.sptk.many fast_tick_reflect
751 ;;
752 slow_interrupt:
753 mov rp=r29;;
754 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
755 ssm psr.ic | PSR_DEFAULT_BITS
756 ;;
757 adds r3=8,r2 // set up second base pointer for SAVE_REST
758 srlz.i // ensure everybody knows psr.ic is back on
759 ;;
760 SAVE_REST
761 ;;
762 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
763 movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
764 ld8 out0=[out0];;
765 add out1=16,sp // pass pointer to pt_regs as second arg
766 movl r14=ia64_leave_kernel
767 ;;
768 mov rp=r14
769 br.call.sptk.many b6=ia64_handle_irq
770 END(interrupt)
772 .org ia64_ivt+0x3400
773 //////////////////////////////////////////////////////////////////////////
774 // 0x3400 Entry 13 (size 64 bundles) Reserved
775 DBG_FAULT(13)
776 FAULT(13)
778 // There is no particular reason for this code to be here, other
779 // than that there happens to be space here that would go unused
780 // otherwise. If this fault ever gets "unreserved", simply move
781 // the following code to a more suitable spot...
783 GLOBAL_ENTRY(dispatch_break_fault)
784 SAVE_MIN_WITH_COVER
785 ;;
786 dispatch_break_fault_post_save:
787 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
788 mov out0=cr.ifa
789 adds out1=16,sp
790 mov out2=cr.isr // FIXME: pity to make this slow access twice
791 mov out3=cr.iim // FIXME: pity to make this slow access twice
793 ssm psr.ic | PSR_DEFAULT_BITS
794 ;;
795 srlz.i // guarantee that interruption collection is on
796 ;;
797 (p15) ssm psr.i // restore psr.i
798 adds r3=8,r2 // set up second base pointer
799 ;;
800 SAVE_REST
801 movl r14=ia64_leave_kernel
802 ;;
803 mov rp=r14
804 br.call.sptk.many b6=ia64_handle_break
805 END(dispatch_break_fault)
807 .org ia64_ivt+0x3800
808 //////////////////////////////////////////////////////////////////////////
809 // 0x3800 Entry 14 (size 64 bundles) Reserved
810 DBG_FAULT(14)
811 FAULT(14)
813 // this code segment is from 2.6.16.13
815 /*
816 * There is no particular reason for this code to be here, other than that
817 * there happens to be space here that would go unused otherwise. If this
818 * fault ever gets "unreserved", simply moved the following code to a more
819 * suitable spot...
820 *
821 * ia64_syscall_setup() is a separate subroutine so that it can
822 * allocate stacked registers so it can safely demine any
823 * potential NaT values from the input registers.
824 *
825 * On entry:
826 * - executing on bank 0 or bank 1 register set (doesn't matter)
827 * - r1: stack pointer
828 * - r2: current task pointer
829 * - r3: preserved
830 * - r11: original contents (saved ar.pfs to be saved)
831 * - r12: original contents (sp to be saved)
832 * - r13: original contents (tp to be saved)
833 * - r15: original contents (syscall # to be saved)
834 * - r18: saved bsp (after switching to kernel stack)
835 * - r19: saved b6
836 * - r20: saved r1 (gp)
837 * - r21: saved ar.fpsr
838 * - r22: kernel's register backing store base (krbs_base)
839 * - r23: saved ar.bspstore
840 * - r24: saved ar.rnat
841 * - r25: saved ar.unat
842 * - r26: saved ar.pfs
843 * - r27: saved ar.rsc
844 * - r28: saved cr.iip
845 * - r29: saved cr.ipsr
846 * - r31: saved pr
847 * - b0: original contents (to be saved)
848 * On exit:
849 * - p10: TRUE if syscall is invoked with more than 8 out
850 * registers or r15's Nat is true
851 * - r1: kernel's gp
852 * - r3: preserved (same as on entry)
853 * - r8: -EINVAL if p10 is true
854 * - r12: points to kernel stack
855 * - r13: points to current task
856 * - r14: preserved (same as on entry)
857 * - p13: preserved
858 * - p15: TRUE if interrupts need to be re-enabled
859 * - ar.fpsr: set to kernel settings
860 * - b6: preserved (same as on entry)
861 */
862 GLOBAL_ENTRY(ia64_syscall_setup)
863 #if PT(B6) != 0
864 # error This code assumes that b6 is the first field in pt_regs.
865 #endif
866 st8 [r1]=r19 // save b6
867 add r16=PT(CR_IPSR),r1 // initialize first base pointer
868 add r17=PT(R11),r1 // initialize second base pointer
869 ;;
870 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
871 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
872 tnat.nz p8,p0=in0
874 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
875 tnat.nz p9,p0=in1
876 (pKStk) mov r18=r0 // make sure r18 isn't NaT
877 ;;
879 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
880 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
881 mov r28=b0 // save b0 (2 cyc)
882 ;;
884 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
885 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
886 (p8) mov in0=-1
887 ;;
889 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
890 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
891 and r8=0x7f,r19 // A // get sof of ar.pfs
893 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
894 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
895 (p9) mov in1=-1
896 ;;
898 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
899 tnat.nz p10,p0=in2
900 add r11=8,r11
901 ;;
902 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
903 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
904 tnat.nz p11,p0=in3
905 ;;
906 (p10) mov in2=-1
907 tnat.nz p12,p0=in4 // [I0]
908 (p11) mov in3=-1
909 ;;
910 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
911 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
912 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
913 ;;
914 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
915 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
916 tnat.nz p13,p0=in5 // [I0]
917 ;;
918 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
919 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
920 (p12) mov in4=-1
921 ;;
923 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
924 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
925 (p13) mov in5=-1
926 ;;
927 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
928 tnat.nz p13,p0=in6
929 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
930 ;;
931 mov r8=1
932 (p9) tnat.nz p10,p0=r15
933 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
935 st8.spill [r17]=r15 // save r15
936 tnat.nz p8,p0=in7
937 nop.i 0
939 mov r13=r2 // establish `current'
940 movl r1=__gp // establish kernel global pointer
941 ;;
942 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
943 (p13) mov in6=-1
944 (p8) mov in7=-1
946 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
947 movl r17=FPSR_DEFAULT
948 ;;
949 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
950 (p10) mov r8=-EINVAL
951 br.ret.sptk.many b7
952 END(ia64_syscall_setup)
955 .org ia64_ivt+0x3c00
956 //////////////////////////////////////////////////////////////////////////
957 // 0x3c00 Entry 15 (size 64 bundles) Reserved
958 DBG_FAULT(15)
959 FAULT(15)
962 .org ia64_ivt+0x4000
963 //////////////////////////////////////////////////////////////////////////
964 // 0x4000 Entry 16 (size 64 bundles) Reserved
965 DBG_FAULT(16)
966 FAULT(16)
968 // There is no particular reason for this code to be here, other
969 // than that there happens to be space here that would go unused
970 // otherwise. If this fault ever gets "unreserved", simply move
971 // the following code to a more suitable spot...
973 ENTRY(dispatch_privop_fault)
974 SAVE_MIN_WITH_COVER
975 ;;
976 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in
977 // insn group!)
978 mov out0=cr.ifa
979 adds out1=16,sp
980 mov out2=cr.isr // FIXME: pity to make this slow access twice
981 mov out3=cr.itir
983 ssm psr.ic | PSR_DEFAULT_BITS
984 ;;
985 srlz.i // guarantee that interruption
986 // collection is on
987 ;;
988 (p15) ssm psr.i // restore psr.i
989 adds r3=8,r2 // set up second base pointer
990 ;;
991 SAVE_REST
992 movl r14=ia64_leave_kernel
993 ;;
994 mov rp=r14
995 br.call.sptk.many b6=ia64_handle_privop
996 END(dispatch_privop_fault)
999 .org ia64_ivt+0x4400
1000 //////////////////////////////////////////////////////////////////////////
1001 // 0x4400 Entry 17 (size 64 bundles) Reserved
1002 DBG_FAULT(17)
1003 FAULT(17)
1006 .org ia64_ivt+0x4800
1007 //////////////////////////////////////////////////////////////////////////
1008 // 0x4800 Entry 18 (size 64 bundles) Reserved
1009 DBG_FAULT(18)
1010 FAULT(18)
1013 .org ia64_ivt+0x4c00
1014 //////////////////////////////////////////////////////////////////////////
1015 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1016 DBG_FAULT(19)
1017 FAULT(19)
1019 /*
1020 * There is no particular reason for this code to be here, other
1021 * than that there happens to be space here that would go unused
1022 * otherwise. If this fault ever gets "unreserved", simply move
1023 * the following code to a more suitable spot...
1024 */
1026 GLOBAL_ENTRY(dispatch_to_fault_handler)
1027 /*
1028 * Input:
1029 * psr.ic: off
1030 * r19: fault vector number (e.g., 24 for General Exception)
1031 * r31: contains saved predicates (pr)
1032 */
1033 SAVE_MIN_WITH_COVER_R19
1034 alloc r14=ar.pfs,0,0,5,0
1035 mov out0=r15
1036 mov out1=cr.isr
1037 mov out2=cr.ifa
1038 mov out3=cr.iim
1039 mov out4=cr.itir
1040 ;;
1041 ssm psr.ic | PSR_DEFAULT_BITS
1042 ;;
1043 srlz.i // guarantee that interruption
1044 // collection is on
1045 ;;
1046 (p15) ssm psr.i // restore psr.i
1047 adds r3=8,r2 // set up second base pointer for
1048 // SAVE_REST
1049 ;;
1050 SAVE_REST
1051 movl r14=ia64_leave_kernel
1052 ;;
1053 mov rp=r14
1054 br.call.sptk.many b6=ia64_fault
1055 END(dispatch_to_fault_handler)
1057 //
1058 // --- End of long entries, Beginning of short entries
1059 //
1061 .org ia64_ivt+0x5000
1062 //////////////////////////////////////////////////////////////////////////
1063 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1064 ENTRY(page_not_present)
1065 DBG_FAULT(20)
1066 FAULT_OR_REFLECT(20)
1067 END(page_not_present)
1069 .org ia64_ivt+0x5100
1070 //////////////////////////////////////////////////////////////////////////
1071 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1072 ENTRY(key_permission)
1073 DBG_FAULT(21)
1074 FAULT_OR_REFLECT(21)
1075 END(key_permission)
1077 .org ia64_ivt+0x5200
1078 //////////////////////////////////////////////////////////////////////////
1079 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1080 ENTRY(iaccess_rights)
1081 DBG_FAULT(22)
1082 FAULT_OR_REFLECT(22)
1083 END(iaccess_rights)
1085 .org ia64_ivt+0x5300
1086 //////////////////////////////////////////////////////////////////////////
1087 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1088 ENTRY(daccess_rights)
1089 DBG_FAULT(23)
1090 mov r31=pr
1091 mov r16=cr.isr
1092 mov r17=cr.ifa
1093 mov r19=23
1094 mov r20=0x5300
1095 br.sptk.many fast_access_reflect
1096 ;;
1097 END(daccess_rights)
1099 .org ia64_ivt+0x5400
1100 //////////////////////////////////////////////////////////////////////////
1101 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1102 ENTRY(general_exception)
1103 DBG_FAULT(24)
1104 mov r16=cr.isr
1105 mov r31=pr
1106 ;;
1107 cmp4.ge p6,p0=0x20,r16
1108 (p6) br.sptk.many dispatch_privop_fault
1109 ;;
1110 FAULT_OR_REFLECT(24)
1111 END(general_exception)
1113 .org ia64_ivt+0x5500
1114 //////////////////////////////////////////////////////////////////////////
1115 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1116 ENTRY(disabled_fp_reg)
1117 DBG_FAULT(25)
1118 FAULT_OR_REFLECT(25)
1119 END(disabled_fp_reg)
1121 .org ia64_ivt+0x5600
1122 //////////////////////////////////////////////////////////////////////////
1123 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1124 ENTRY(nat_consumption)
1125 DBG_FAULT(26)
1126 FAULT_OR_REFLECT(26)
1127 END(nat_consumption)
1129 .org ia64_ivt+0x5700
1130 //////////////////////////////////////////////////////////////////////////
1131 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1132 ENTRY(speculation_vector)
1133 DBG_FAULT(27)
1134 // this probably need not reflect...
1135 FAULT_OR_REFLECT(27)
1136 END(speculation_vector)
1138 .org ia64_ivt+0x5800
1139 //////////////////////////////////////////////////////////////////////////
1140 // 0x5800 Entry 28 (size 16 bundles) Reserved
1141 DBG_FAULT(28)
1142 FAULT(28)
1144 .org ia64_ivt+0x5900
1145 //////////////////////////////////////////////////////////////////////////
1146 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1147 ENTRY(debug_vector)
1148 DBG_FAULT(29)
1149 FAULT_OR_REFLECT(29)
1150 END(debug_vector)
1152 .org ia64_ivt+0x5a00
1153 //////////////////////////////////////////////////////////////////////////
1154 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1155 ENTRY(unaligned_access)
1156 DBG_FAULT(30)
1157 FAULT_OR_REFLECT(30)
1158 END(unaligned_access)
1160 .org ia64_ivt+0x5b00
1161 //////////////////////////////////////////////////////////////////////////
1162 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1163 ENTRY(unsupported_data_reference)
1164 DBG_FAULT(31)
1165 FAULT_OR_REFLECT(31)
1166 END(unsupported_data_reference)
1168 .org ia64_ivt+0x5c00
1169 //////////////////////////////////////////////////////////////////////////
1170 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1171 ENTRY(floating_point_fault)
1172 DBG_FAULT(32)
1173 FAULT_OR_REFLECT(32)
1174 END(floating_point_fault)
1176 .org ia64_ivt+0x5d00
1177 //////////////////////////////////////////////////////////////////////////
1178 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1179 ENTRY(floating_point_trap)
1180 DBG_FAULT(33)
1181 FAULT_OR_REFLECT(33)
1182 END(floating_point_trap)
1184 .org ia64_ivt+0x5e00
1185 //////////////////////////////////////////////////////////////////////////
1186 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1187 ENTRY(lower_privilege_trap)
1188 DBG_FAULT(34)
1189 FAULT_OR_REFLECT(34)
1190 END(lower_privilege_trap)
1192 .org ia64_ivt+0x5f00
1193 //////////////////////////////////////////////////////////////////////////
1194 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1195 ENTRY(taken_branch_trap)
1196 DBG_FAULT(35)
1197 FAULT_OR_REFLECT(35)
1198 END(taken_branch_trap)
1200 .org ia64_ivt+0x6000
1201 //////////////////////////////////////////////////////////////////////////
1202 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1203 ENTRY(single_step_trap)
1204 DBG_FAULT(36)
1205 FAULT_OR_REFLECT(36)
1206 END(single_step_trap)
1208 .org ia64_ivt+0x6100
1209 //////////////////////////////////////////////////////////////////////////
1210 // 0x6100 Entry 37 (size 16 bundles) Reserved
1211 DBG_FAULT(37)
1212 FAULT(37)
1214 .org ia64_ivt+0x6200
1215 //////////////////////////////////////////////////////////////////////////
1216 // 0x6200 Entry 38 (size 16 bundles) Reserved
1217 DBG_FAULT(38)
1218 FAULT(38)
1220 .org ia64_ivt+0x6300
1221 //////////////////////////////////////////////////////////////////////////
1222 // 0x6300 Entry 39 (size 16 bundles) Reserved
1223 DBG_FAULT(39)
1224 FAULT(39)
1226 .org ia64_ivt+0x6400
1227 //////////////////////////////////////////////////////////////////////////
1228 // 0x6400 Entry 40 (size 16 bundles) Reserved
1229 DBG_FAULT(40)
1230 FAULT(40)
1232 .org ia64_ivt+0x6500
1233 //////////////////////////////////////////////////////////////////////////
1234 // 0x6500 Entry 41 (size 16 bundles) Reserved
1235 DBG_FAULT(41)
1236 FAULT(41)
1238 .org ia64_ivt+0x6600
1239 //////////////////////////////////////////////////////////////////////////
1240 // 0x6600 Entry 42 (size 16 bundles) Reserved
1241 DBG_FAULT(42)
1242 FAULT(42)
1244 .org ia64_ivt+0x6700
1245 //////////////////////////////////////////////////////////////////////////
1246 // 0x6700 Entry 43 (size 16 bundles) Reserved
1247 DBG_FAULT(43)
1248 FAULT(43)
1250 .org ia64_ivt+0x6800
1251 //////////////////////////////////////////////////////////////////////////
1252 // 0x6800 Entry 44 (size 16 bundles) Reserved
1253 DBG_FAULT(44)
1254 FAULT(44)
1256 .org ia64_ivt+0x6900
1257 //////////////////////////////////////////////////////////////////////////
1258 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,
1259 // 44,58,60,61,62,72,
1260 // 73,75,76,77)
1261 ENTRY(ia32_exception)
1262 DBG_FAULT(45)
1263 FAULT_OR_REFLECT(45)
1264 END(ia32_exception)
1266 .org ia64_ivt+0x6a00
1267 //////////////////////////////////////////////////////////////////////////
1268 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1269 ENTRY(ia32_intercept)
1270 DBG_FAULT(46)
1271 FAULT_OR_REFLECT(46)
1272 END(ia32_intercept)
1274 .org ia64_ivt+0x6b00
1275 //////////////////////////////////////////////////////////////////////////
1276 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1277 ENTRY(ia32_interrupt)
1278 DBG_FAULT(47)
1279 FAULT_OR_REFLECT(47)
1280 END(ia32_interrupt)
1282 .org ia64_ivt+0x6c00
1283 //////////////////////////////////////////////////////////////////////////
1284 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1285 DBG_FAULT(48)
1286 FAULT(48)
1288 .org ia64_ivt+0x6d00
1289 //////////////////////////////////////////////////////////////////////////
1290 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1291 DBG_FAULT(49)
1292 FAULT(49)
1294 .org ia64_ivt+0x6e00
1295 //////////////////////////////////////////////////////////////////////////
1296 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1297 DBG_FAULT(50)
1298 FAULT(50)
1300 .org ia64_ivt+0x6f00
1301 //////////////////////////////////////////////////////////////////////////
1302 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1303 DBG_FAULT(51)
1304 FAULT(51)
1306 .org ia64_ivt+0x7000
1307 //////////////////////////////////////////////////////////////////////////
1308 // 0x7000 Entry 52 (size 16 bundles) Reserved
1309 DBG_FAULT(52)
1310 FAULT(52)
1312 .org ia64_ivt+0x7100
1313 //////////////////////////////////////////////////////////////////////////
1314 // 0x7100 Entry 53 (size 16 bundles) Reserved
1315 DBG_FAULT(53)
1316 FAULT(53)
1318 .org ia64_ivt+0x7200
1319 //////////////////////////////////////////////////////////////////////////
1320 // 0x7200 Entry 54 (size 16 bundles) Reserved
1321 DBG_FAULT(54)
1322 FAULT(54)
1324 .org ia64_ivt+0x7300
1325 //////////////////////////////////////////////////////////////////////////
1326 // 0x7300 Entry 55 (size 16 bundles) Reserved
1327 DBG_FAULT(55)
1328 FAULT(55)
1330 .org ia64_ivt+0x7400
1331 //////////////////////////////////////////////////////////////////////////
1332 // 0x7400 Entry 56 (size 16 bundles) Reserved
1333 DBG_FAULT(56)
1334 FAULT(56)
1336 .org ia64_ivt+0x7500
1337 //////////////////////////////////////////////////////////////////////////
1338 // 0x7500 Entry 57 (size 16 bundles) Reserved
1339 DBG_FAULT(57)
1340 FAULT(57)
1342 .org ia64_ivt+0x7600
1343 //////////////////////////////////////////////////////////////////////////
1344 // 0x7600 Entry 58 (size 16 bundles) Reserved
1345 DBG_FAULT(58)
1346 FAULT(58)
1348 .org ia64_ivt+0x7700
1349 //////////////////////////////////////////////////////////////////////////
1350 // 0x7700 Entry 59 (size 16 bundles) Reserved
1351 DBG_FAULT(59)
1352 FAULT(59)
1354 .org ia64_ivt+0x7800
1355 //////////////////////////////////////////////////////////////////////////
1356 // 0x7800 Entry 60 (size 16 bundles) Reserved
1357 DBG_FAULT(60)
1358 FAULT(60)
1360 .org ia64_ivt+0x7900
1361 //////////////////////////////////////////////////////////////////////////
1362 // 0x7900 Entry 61 (size 16 bundles) Reserved
1363 DBG_FAULT(61)
1364 FAULT(61)
1366 .org ia64_ivt+0x7a00
1367 //////////////////////////////////////////////////////////////////////////
1368 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1369 DBG_FAULT(62)
1370 FAULT(62)
1372 .org ia64_ivt+0x7b00
1373 //////////////////////////////////////////////////////////////////////////
1374 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1375 DBG_FAULT(63)
1376 FAULT(63)
1378 .org ia64_ivt+0x7c00
1379 //////////////////////////////////////////////////////////////////////////
1380 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1381 DBG_FAULT(64)
1382 FAULT(64)
1384 .org ia64_ivt+0x7d00
1385 //////////////////////////////////////////////////////////////////////////
1386 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1387 DBG_FAULT(65)
1388 FAULT(65)
1390 .org ia64_ivt+0x7e00
1391 //////////////////////////////////////////////////////////////////////////
1392 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1393 DBG_FAULT(66)
1394 FAULT(66)
1396 .org ia64_ivt+0x7f00
1397 //////////////////////////////////////////////////////////////////////////
1398 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1399 DBG_FAULT(67)
1400 FAULT(67)
1402 .org ia64_ivt+0x8000