direct-io.hg

view xen/arch/ia64/xen/ivt.S @ 13082:cf23494af72c

[IA64] Reflect general exceptions

General exceptions in the domU with isr.code > 0x20 are now reflected
to the domU.

Signed-off-by: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
author awilliam@xenbuild2.aw
date Mon Dec 18 09:48:23 2006 -0700 (2006-12-18)
parents fa584e5d17b6
children 99ff540b0efe
line source
1 #ifdef XEN
2 #include <asm/debugger.h>
3 #include <asm/vhpt.h>
4 #endif
5 /*
6 * arch/ia64/kernel/ivt.S
7 *
8 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
9 * Stephane Eranian <eranian@hpl.hp.com>
10 * David Mosberger <davidm@hpl.hp.com>
11 * Copyright (C) 2000, 2002-2003 Intel Co
12 * Asit Mallick <asit.k.mallick@intel.com>
13 * Suresh Siddha <suresh.b.siddha@intel.com>
14 * Kenneth Chen <kenneth.w.chen@intel.com>
15 * Fenghua Yu <fenghua.yu@intel.com>
16 *
17 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
18 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now
19 * uses virtual PT.
20 */
21 /*
22 * This file defines the interruption vector table used by the CPU.
23 * It does not include one entry per possible cause of interruption.
24 *
25 * The first 20 entries of the table contain 64 bundles each while the
26 * remaining 48 entries contain only 16 bundles each.
27 *
28 * The 64 bundles are used to allow inlining the whole handler for critical
29 * interruptions like TLB misses.
30 *
31 * For each entry, the comment is as follows:
32 *
33 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
34 * entry offset ----/ / / / /
35 * entry number ---------/ / / /
36 * size of the entry -------------/ / /
37 * vector name -------------------------------------/ /
38 * interruptions triggering this vector ----------------------/
39 *
40 * The table is 32KB in size and must be aligned on 32KB boundary.
41 * (The CPU ignores the 15 lower bits of the address)
42 *
43 * Table is based upon EAS2.6 (Oct 1999)
44 */
46 #include <linux/config.h>
48 #include <asm/asmmacro.h>
49 #include <asm/break.h>
50 #include <asm/ia32.h>
51 #include <asm/kregs.h>
52 #include <asm/offsets.h>
53 #include <asm/pgtable.h>
54 #include <asm/processor.h>
55 #include <asm/ptrace.h>
56 #include <asm/system.h>
57 #include <asm/thread_info.h>
58 #include <asm/unistd.h>
59 #ifdef XEN
60 #include <xen/errno.h>
61 #else
62 #include <asm/errno.h>
63 #endif
65 #if 1
66 # define PSR_DEFAULT_BITS psr.ac
67 #else
68 # define PSR_DEFAULT_BITS 0
69 #endif
71 #if 0
72 /*
73 * This lets you track the last eight faults that occurred on the CPU.
74 * Make sure ar.k2 isn't needed for something else before enabling this...
75 */
76 # define DBG_FAULT(i) \
77 mov r16=ar.k2;; \
78 shl r16=r16,8;; \
79 add r16=(i),r16;; \
80 mov ar.k2=r16
81 #else
82 # define DBG_FAULT(i)
83 #endif
85 #define MINSTATE_VIRT /* needed by minstate.h */
86 #include "minstate.h"
88 #define FAULT(n) \
89 mov r19=n; /* prepare to save predicates */ \
90 mov r31=pr; \
91 br.sptk.many dispatch_to_fault_handler
93 #define FAULT_OR_REFLECT(n) \
94 mov r20=cr.ipsr; \
95 mov r19=n; /* prepare to save predicates */ \
96 mov r31=pr;; \
97 extr.u r20=r20,IA64_PSR_CPL0_BIT,2;; \
98 cmp.ne p6,p0=r0,r20; /* cpl != 0?*/ \
99 (p6) br.dptk.many dispatch_reflection; \
100 br.sptk.few dispatch_to_fault_handler
102 .section .text.ivt,"ax"
104 .align 32768 // align on 32KB boundary
105 .global ia64_ivt
106 ia64_ivt:
107 //////////////////////////////////////////////////////////////////////////
108 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
109 ENTRY(vhpt_miss)
110 DBG_FAULT(0)
111 #ifdef XEN
112 FAULT(0)
113 #else
114 /*
115 * The VHPT vector is invoked when the TLB entry for the virtual
116 * page table is missing. This happens only as a result of a
117 * previous (the "original") TLB miss, which may either be caused
118 * by an instruction fetch or a data access (or non-access).
119 *
120 * What we do here is normal TLB miss handing for the _original_
121 * miss, followed by inserting the TLB entry for the virtual page
122 * table page that the VHPT walker was attempting to access. The
123 * latter gets inserted as long as both L1 and L2 have valid
124 * mappings for the faulting address. The TLB entry for the
125 * original miss gets inserted only if the L3 entry indicates
126 * that the page is present.
127 *
128 * do_page_fault gets invoked in the following cases:
129 * - the faulting virtual address uses unimplemented address bits
130 * - the faulting virtual address has no L1, L2, or L3 mapping
131 */
132 mov r16=cr.ifa // get address that caused the TLB miss
133 #ifdef CONFIG_HUGETLB_PAGE
134 movl r18=PAGE_SHIFT
135 mov r25=cr.itir
136 #endif
137 ;;
138 rsm psr.dt // use physical addressing for data
139 mov r31=pr // save the predicate registers
140 mov r19=IA64_KR(PT_BASE) // get page table base address
141 shl r21=r16,3 // shift bit 60 into sign bit
142 shr.u r17=r16,61 // get the region number into r17
143 ;;
144 shr r22=r21,3
145 #ifdef CONFIG_HUGETLB_PAGE
146 extr.u r26=r25,2,6
147 ;;
148 cmp.ne p8,p0=r18,r26
149 sub r27=r26,r18
150 ;;
151 (p8) dep r25=r18,r25,2,6
152 (p8) shr r22=r22,r27
153 #endif
154 ;;
155 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
156 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of faulting address
157 ;;
158 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
160 srlz.d
161 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at
162 // swapper_pg_dir
164 .pred.rel "mutex", p6, p7
165 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
166 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
167 ;;
168 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
169 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA +
170 // (((IFA(61,63) << 7) |
171 // IFA(33,39))*8)
172 cmp.eq p7,p6=0,r21 // unused address bits all zero?
173 shr.u r18=r22,PMD_SHIFT // shift L2 index into position
174 ;;
175 ld8 r17=[r17] // fetch the L1 entry (may be 0)
176 ;;
177 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
178 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page
179 // table entry
180 ;;
181 (p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
182 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
183 ;;
184 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
185 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page
186 // table entry
187 ;;
188 (p7) ld8 r18=[r21] // read the L3 PTE
189 mov r19=cr.isr // cr.isr bit 0 tells us if
190 // this is an insn miss
191 ;;
192 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
193 mov r22=cr.iha // get the VHPT address that
194 // caused the TLB miss
195 ;; // avoid RAW on p7
196 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB
197 // miss?
198 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page
199 // address
200 ;;
201 (p10) itc.i r18 // insert the instruction TLB
202 // entry
203 (p11) itc.d r18 // insert the data TLB entry
204 (p6) br.cond.spnt.many page_fault // handle bad address/page not
205 // present (page fault)
206 mov cr.ifa=r22
208 #ifdef CONFIG_HUGETLB_PAGE
209 (p8) mov cr.itir=r25 // change to default page-size
210 // for VHPT
211 #endif
213 /*
214 * Now compute and insert the TLB entry for the virtual page table.
215 * We never execute in a page table page so there is no need to set
216 * the exception deferral bit.
217 */
218 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
219 ;;
220 (p7) itc.d r24
221 ;;
222 #ifdef CONFIG_SMP
223 /*
224 * Tell the assemblers dependency-violation checker that the above
225 * "itc" instructions cannot possibly affect the following loads:
226 */
227 dv_serialize_data
229 /*
230 * Re-check L2 and L3 pagetable. If they changed, we may have
231 * received a ptc.g between reading the pagetable and the "itc".
232 * If so, flush the entry we inserted and retry.
233 */
234 ld8 r25=[r21] // read L3 PTE again
235 ld8 r26=[r17] // read L2 entry again
236 ;;
237 cmp.ne p6,p7=r26,r20 // did L2 entry change
238 mov r27=PAGE_SHIFT<<2
239 ;;
240 (p6) ptc.l r22,r27 // purge PTE page translation
241 (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change
242 ;;
243 (p6) ptc.l r16,r27 // purge translation
244 #endif
246 mov pr=r31,-1 // restore predicate registers
247 rfi
248 #endif
249 END(vhpt_miss)
251 .org ia64_ivt+0x400
252 //////////////////////////////////////////////////////////////////////////
253 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
254 ENTRY(itlb_miss)
255 DBG_FAULT(1)
256 #ifdef XEN
257 mov r16 = cr.ifa
258 mov r31 = pr
259 ;;
260 extr.u r17=r16,59,5
261 ;;
262 /* If address belongs to VMM, go to alt tlb handler */
263 cmp.eq p6,p0=0x1e,r17
264 (p6) br.cond.spnt late_alt_itlb_miss
265 ;;
266 cmp.eq p6,p0=0x1d,r17
267 (p6) br.cond.spnt late_alt_itlb_miss
268 ;;
269 mov pr = r31, 0x1ffff
270 ;;
271 #ifdef VHPT_GLOBAL
272 br.cond.sptk fast_tlb_miss_reflect
273 ;;
274 #endif
275 #endif
276 /*
277 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
278 * page table. If a nested TLB miss occurs, we switch into physical
279 * mode, walk the page table, and then re-execute the L3 PTE read
280 * and go on normally after that.
281 */
282 mov r16=cr.ifa // get virtual address
283 mov r29=b0 // save b0
284 mov r31=pr // save predicates
285 .itlb_fault:
286 mov r17=cr.iha // get virtual address of L3 PTE
287 movl r30=1f // load nested fault
288 // continuation point
289 ;;
290 1: ld8 r18=[r17] // read L3 PTE
291 ;;
292 mov b0=r29
293 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
294 (p6) br.cond.spnt page_fault
295 ;;
296 itc.i r18
297 ;;
298 #ifdef CONFIG_SMP
299 /*
300 * Tell the assemblers dependency-violation checker that the above
301 * "itc" instructions cannot possibly affect the following loads:
302 */
303 dv_serialize_data
305 ld8 r19=[r17] // read L3 PTE again and see if same
306 mov r20=PAGE_SHIFT<<2 // setup page size for purge
307 ;;
308 cmp.ne p7,p0=r18,r19
309 ;;
310 (p7) ptc.l r16,r20
311 #endif
312 mov pr=r31,-1
313 rfi
314 END(itlb_miss)
316 .org ia64_ivt+0x0800
317 //////////////////////////////////////////////////////////////////////////
318 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
319 ENTRY(dtlb_miss)
320 DBG_FAULT(2)
321 #ifdef XEN
322 mov r16=cr.ifa // get virtual address
323 mov r31=pr
324 ;;
325 extr.u r17=r16,59,5
326 ;;
327 cmp.eq p6,p0=0x1e,r17 // if the address belongs to VMM, go
328 // to the alternate tlb handler
329 (p6) br.cond.spnt late_alt_dtlb_miss
330 ;;
331 cmp.eq p6,p0=0x1d,r17
332 (p6) br.cond.spnt late_alt_dtlb_miss
333 ;;
334 #if VHPT_ENABLED
335 mov r30=cr.ipsr // XXX TODO optimization
336 mov r28=cr.iip
337 mov r17=cr.isr
338 ;;
340 extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2 // extract psr.cpl
341 ;;
342 cmp.ne p6, p0 = r0, r18 // cpl == 0?
343 (p6) br.cond.sptk 2f
345 tbit.nz p7,p0=r17,IA64_ISR_SP_BIT // is speculation bit on?
346 ;;
347 (p7) br.cond.spnt 2f
349 // Is the faulted iip in the vmm area?
350 // -- check [59:58] bit
351 // -- if 00, 11: guest
352 // -- if 01, 10: vmm
353 extr.u r19 = r28, 58, 2
354 ;;
355 cmp.eq p10, p0 = 0x0, r19
356 (p10) br.cond.sptk 2f
357 cmp.eq p11, p0 = 0x3, r19
358 (p11) br.cond.sptk 2f
360 // Is the faulted address is in the identity mapping area?
361 // must be either 0xf000... or 0xe8000...
362 extr.u r20 = r16, 59, 5
363 ;;
364 cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
365 (p12) br.cond.spnt 1f
366 cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
367 (p13) br.cond.sptk 2f
369 1:
370 movl r24=PAGE_KERNEL // xen identity mapping area.
371 movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
372 ;;
373 shr.u r26=r16,55 // move address bit 59 to bit 4
374 and r25=r25,r16 // clear ed, reserved bits, and PTE control bits
375 ;;
376 and r26=0x10,r26 // bit 4=address-bit(59)
377 ;;
378 or r25=r25,r24 // insert PTE control bits into r25
379 ;;
380 or r25=r25,r26 // set bit 4 (uncached) if the access was to
381 // region 6
382 ;;
383 itc.d r25 // insert the TLB entry
384 mov pr=r31,-1
385 rfi
387 2:
388 #endif
389 #ifdef VHPT_GLOBAL
390 // br.cond.sptk page_fault
391 br.cond.sptk fast_tlb_miss_reflect
392 ;;
393 #endif
394 mov r29=b0 // save b0
395 #else
396 /*
397 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
398 * page table. If a nested TLB miss occurs, we switch into physical
399 * mode, walk the page table, and then re-execute the L3 PTE read
400 * and go on normally after that.
401 */
402 mov r16=cr.ifa // get virtual address
403 mov r29=b0 // save b0
404 mov r31=pr // save predicates
405 #endif
406 dtlb_fault:
407 mov r17=cr.iha // get virtual address of L3 PTE
408 movl r30=1f // load nested fault
409 // continuation point
410 ;;
411 1: ld8 r18=[r17] // read L3 PTE
412 ;;
413 mov b0=r29
414 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
415 (p6) br.cond.spnt page_fault
416 ;;
417 itc.d r18
418 ;;
419 #ifdef CONFIG_SMP
420 /*
421 * Tell the assemblers dependency-violation checker that the above
422 * "itc" instructions cannot possibly affect the following loads:
423 */
424 dv_serialize_data
426 ld8 r19=[r17] // read L3 PTE again and see if same
427 mov r20=PAGE_SHIFT<<2 // setup page size for purge
428 ;;
429 cmp.ne p7,p0=r18,r19
430 ;;
431 (p7) ptc.l r16,r20
432 #endif
433 mov pr=r31,-1
434 rfi
435 END(dtlb_miss)
437 .org ia64_ivt+0x0c00
438 //////////////////////////////////////////////////////////////////////////
439 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
440 ENTRY(alt_itlb_miss)
441 DBG_FAULT(3)
442 #ifdef XEN
443 mov r16=cr.ifa // get address that caused the TLB miss
444 mov r31=pr
445 ;;
446 late_alt_itlb_miss:
447 mov r21=cr.ipsr
448 movl r17=PAGE_KERNEL
449 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
450 ;;
451 #else
452 mov r16=cr.ifa // get address that caused the TLB miss
453 movl r17=PAGE_KERNEL
454 mov r21=cr.ipsr
455 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
456 mov r31=pr
457 ;;
458 #endif
459 #ifdef CONFIG_DISABLE_VHPT
460 shr.u r22=r16,61 // get the region number into r21
461 ;;
462 cmp.gt p8,p0=6,r22 // user mode
463 ;;
464 (p8) thash r17=r16
465 ;;
466 (p8) mov cr.iha=r17
467 (p8) mov r29=b0 // save b0
468 (p8) br.cond.dptk .itlb_fault
469 #endif
470 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
471 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
472 #ifdef XEN
473 shr.u r18=r16,55 // move address bit 59 to bit 4
474 ;;
475 and r18=0x10,r18 // bit 4=address-bit(59)
476 #else
477 shr.u r18=r16,57 // move address bit 61 to bit 4
478 ;;
479 andcm r18=0x10,r18 // bit 4=~address-bit(61)
480 #endif
481 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
482 or r19=r17,r19 // insert PTE control bits into r19
483 ;;
484 or r19=r19,r18 // set bit 4 (uncached) if the access was to
485 // region 6
486 (p8) br.cond.spnt page_fault
487 ;;
488 itc.i r19 // insert the TLB entry
489 mov pr=r31,-1
490 rfi
491 END(alt_itlb_miss)
493 .org ia64_ivt+0x1000
494 //////////////////////////////////////////////////////////////////////////
495 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
496 ENTRY(alt_dtlb_miss)
497 DBG_FAULT(4)
498 #ifdef XEN
499 mov r16=cr.ifa // get address that caused the TLB miss
500 mov r31=pr
501 ;;
502 late_alt_dtlb_miss:
503 mov r20=cr.isr
504 movl r17=PAGE_KERNEL
505 mov r21=cr.ipsr
506 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
507 ;;
508 #endif
509 #ifdef CONFIG_DISABLE_VHPT
510 shr.u r22=r16,61 // get the region into r22
511 ;;
512 cmp.gt p8,p0=6,r22 // access to region 0-5
513 ;;
514 (p8) thash r17=r16
515 ;;
516 (p8) mov cr.iha=r17
517 (p8) mov r29=b0 // save b0
518 (p8) br.cond.dptk dtlb_fault
519 #endif
520 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
521 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
522 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
523 #ifdef XEN
524 shr.u r18=r16,55 // move address bit 59 to bit 4
525 and r19=r19,r16 // clear ed, reserved bits, and
526 // PTE control bits
527 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
528 ;;
529 and r18=0x10,r18 // bit 4=address-bit(59)
530 #else
531 shr.u r18=r16,57 // move address bit 61 to bit 4
532 and r19=r19,r16 // clear ed, reserved bits, and
533 // PTE control bits
534 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
535 ;;
536 andcm r18=0x10,r18 // bit 4=~address-bit(61)
537 #endif
538 cmp.ne p8,p0=r0,r23
539 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
540 (p8) br.cond.spnt page_fault
541 #ifdef XEN
542 ;;
543 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
544 shr r22=r16,56 // Test for the address of virtual frame_table
545 ;;
546 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
547 (p8) br.cond.sptk frametable_miss ;;
548 #endif
549 // If it is not a Xen address, handle it via page_fault.
550 // Note that 0xf000 (cached) and 0xe800 (uncached) addresses
551 // should be OK.
552 extr.u r22=r16,59,5
553 ;;
554 cmp.eq p8,p0=0x1e,r22
555 (p8) br.cond.spnt 1f
556 ;;
557 cmp.ne p8,p0=0x1d,r22
558 (p8) br.cond.sptk page_fault
559 ;;
560 1:
561 #endif
563 dep r21=-1,r21,IA64_PSR_ED_BIT,1
564 or r19=r19,r17 // insert PTE control bits into r19
565 ;;
566 or r19=r19,r18 // set bit 4 (uncached) if the access was to
567 // region 6
568 (p6) mov cr.ipsr=r21
569 ;;
570 (p7) itc.d r19 // insert the TLB entry
571 mov pr=r31,-1
572 rfi
573 END(alt_dtlb_miss)
575 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
576 GLOBAL_ENTRY(frametable_miss)
577 rsm psr.dt // switch to using physical data addressing
578 movl r24=(frametable_pg_dir-PAGE_OFFSET) // r24=__pa(frametable_pg_dir)
579 ;;
580 srlz.d
581 extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3)
582 ;;
583 shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)]
584 ;;
585 ld8 r24=[r24] // r24=pgd[pgd_offset(addr)]
586 extr.u r18=r16,PMD_SHIFT,(PAGE_SHIFT-3) // r18=pmd_offset
587 ;;
588 cmp.eq p6,p7=0,r24 // pgd present?
589 shladd r24=r18,3,r24 // r24=&pmd[pmd_offset(addr)]
590 ;;
591 (p7) ld8 r24=[r24] // r24=pmd[pmd_offset(addr)]
592 extr.u r19=r16,PAGE_SHIFT,(PAGE_SHIFT-3)// r19=pte_offset
593 (p6) br.spnt.few frametable_fault
594 ;;
595 cmp.eq p6,p7=0,r24 // pmd present?
596 shladd r24=r19,3,r24 // r24=&pte[pte_offset(addr)]
597 ;;
598 (p7) ld8 r24=[r24] // r24=pte[pte_offset(addr)]
599 mov r25=0x700|(PAGE_SHIFT<<2) // key=7
600 (p6) br.spnt.few frametable_fault
601 ;;
602 mov cr.itir=r25
603 ssm psr.dt // switch to using virtual data addressing
604 tbit.z p6,p7=r24,_PAGE_P_BIT // pte present?
605 ;;
606 (p7) itc.d r24 // install updated PTE
607 (p6) br.spnt.few frametable_fault // page present bit cleared?
608 ;;
609 mov pr=r31,-1 // restore predicate registers
610 rfi
611 END(frametable_miss)
613 ENTRY(frametable_fault)
614 ssm psr.dt // switch to using virtual data addressing
615 mov r18=cr.iip
616 movl r19=ia64_frametable_probe
617 ;;
618 cmp.eq p6,p7=r18,r19 // is faulting addrress ia64_frametable_probe?
619 mov r8=0 // assumes that 'probe.r' uses r8
620 dep r21=-1,r21,IA64_PSR_RI_BIT+1,1 // return to next instruction in
621 // bundle 2
622 ;;
623 (p6) mov cr.ipsr=r21
624 mov r19=4 // FAULT(4)
625 (p7) br.spnt.few dispatch_to_fault_handler
626 ;;
627 mov pr=r31,-1
628 rfi
629 END(frametable_fault)
631 GLOBAL_ENTRY(ia64_frametable_probe)
632 {
633 probe.r r8=r32,0 // destination register must be r8
634 nop.f 0x0
635 br.ret.sptk.many b0 // this instruction must be in bundle 2
636 }
637 END(ia64_frametable_probe)
638 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
640 .org ia64_ivt+0x1400
641 /////////////////////////////////////////////////////////////////////////////////////////
642 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
643 ENTRY(nested_dtlb_miss)
644 DBG_FAULT(5)
645 #ifdef XEN
646 mov b0=r30
647 br.sptk.many b0 // return to the continuation point
648 ;;
649 #else
650 /*
651 * In the absence of kernel bugs, we get here when the virtually
652 * mapped linear page table is accessed non-speculatively (e.g.,
653 * in the Dirty-bit, Instruction Access-bit, or Data Access-bit
654 * faults). If the DTLB entry for the virtual page table is missing,
655 * a nested TLB miss fault is triggered and control is transferred
656 * to this point. When this happens, we lookup the pte for the
657 * faulting address by walking the page table in physical mode
658 * and return to the continuation point passed in register r30
659 * (or call page_fault if the address is not mapped).
660 *
661 * Input: r16: faulting address
662 * r29: saved b0
663 * r30: continuation address
664 * r31: saved pr
665 *
666 * Output: r17: physical address of L3 PTE of faulting address
667 * r29: saved b0
668 * r30: continuation address
669 * r31: saved pr
670 *
671 * Clobbered: b0, r18, r19, r21, psr.dt (cleared)
672 */
673 rsm psr.dt // switch to using physical data
674 // addressing
675 mov r19=IA64_KR(PT_BASE) // get the page table base address
676 shl r21=r16,3 // shift bit 60 into sign bit
677 ;;
678 shr.u r17=r16,61 // get the region number into r17
679 ;;
680 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
681 shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
682 ;;
683 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
685 srlz.d
686 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at
687 // swapper_pg_dir
689 .pred.rel "mutex", p6, p7
690 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
691 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
692 ;;
693 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
694 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) |
695 // IFA(33,39))*8)
696 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
697 shr.u r18=r16,PMD_SHIFT // shift L2 index into position
698 ;;
699 ld8 r17=[r17] // fetch the L1 entry (may be 0)
700 ;;
701 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
702 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table
703 // entry
704 ;;
705 (p7) ld8 r17=[r17] // fetch the L2 entry (may be 0)
706 shr.u r19=r16,PAGE_SHIFT // shift L3 index into position
707 ;;
708 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL?
709 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table
710 // entry
711 (p6) br.cond.spnt page_fault
712 mov b0=r30
713 br.sptk.many b0 // return to continuation point
714 #endif
715 END(nested_dtlb_miss)
717 .org ia64_ivt+0x1800
718 //////////////////////////////////////////////////////////////////////////
719 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
720 ENTRY(ikey_miss)
721 DBG_FAULT(6)
722 #ifdef XEN
723 FAULT_OR_REFLECT(6)
724 #else
725 FAULT(6)
726 #endif
727 END(ikey_miss)
729 //----------------------------------------------------------------
730 // call do_page_fault (predicates are in r31, psr.dt may be off,
731 // r16 is faulting address)
732 #ifdef XEN
733 GLOBAL_ENTRY(page_fault)
734 #else
735 ENTRY(page_fault)
736 #endif
737 ssm psr.dt
738 ;;
739 srlz.i
740 ;;
741 SAVE_MIN_WITH_COVER
742 #ifdef XEN
743 alloc r15=ar.pfs,0,0,4,0
744 mov out0=cr.ifa
745 mov out1=cr.isr
746 mov out3=cr.itir
747 #else
748 alloc r15=ar.pfs,0,0,3,0
749 mov out0=cr.ifa
750 mov out1=cr.isr
751 #endif
752 adds r3=8,r2 // set up second base pointer
753 ;;
754 ssm psr.ic | PSR_DEFAULT_BITS
755 ;;
756 srlz.i // guarantee that interruption
757 // collection is on
758 ;;
759 (p15) ssm psr.i // restore psr.i
760 movl r14=ia64_leave_kernel
761 ;;
762 SAVE_REST
763 mov rp=r14
764 ;;
765 adds out2=16,r12 // out2 = pointer to pt_regs
766 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
767 END(page_fault)
769 .org ia64_ivt+0x1c00
770 //////////////////////////////////////////////////////////////////////////
771 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
772 ENTRY(dkey_miss)
773 DBG_FAULT(7)
774 #ifdef XEN
775 FAULT_OR_REFLECT(7)
776 #else
777 FAULT(7)
778 #endif
779 END(dkey_miss)
781 .org ia64_ivt+0x2000
782 //////////////////////////////////////////////////////////////////////////
783 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
784 ENTRY(dirty_bit)
785 DBG_FAULT(8)
786 #ifdef XEN
787 mov r20=cr.ipsr
788 mov r31=pr
789 ;;
790 extr.u r20=r20,IA64_PSR_CPL0_BIT,2
791 ;;
792 mov r19=8 // prepare to save predicates
793 cmp.eq p6,p0=r0,r20 // cpl == 0?
794 (p6) br.sptk.few dispatch_to_fault_handler
795 // If shadow mode is not enabled, reflect the fault.
796 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
797 ;;
798 ld8 r22=[r22]
799 ;;
800 add r22=IA64_VCPU_DOMAIN_OFFSET,r22
801 ;;
802 ld8 r22=[r22] // read domain
803 ;;
804 add r22=IA64_DOMAIN_SHADOW_BITMAP_OFFSET,r22
805 ;;
806 ld8 r22=[r22]
807 ;;
808 cmp.eq p6,p0=r0,r22 // !shadow_bitmap ?
809 (p6) br.dptk.many dispatch_reflection
811 SAVE_MIN_WITH_COVER
812 alloc r14=ar.pfs,0,0,4,0
813 mov out0=cr.ifa
814 mov out1=cr.itir
815 mov out2=cr.isr
816 adds out3=16,sp
818 ssm psr.ic | PSR_DEFAULT_BITS
819 ;;
820 srlz.i // guarantee that interruption
821 // collection is on
822 ;;
823 (p15) ssm psr.i // restore psr.i
824 adds r3=8,r2 // set up second base pointer
825 ;;
826 SAVE_REST
827 movl r14=ia64_leave_kernel
828 ;;
829 mov rp=r14
830 br.call.sptk.many b6=ia64_shadow_fault
831 #else
832 /*
833 * What we do here is to simply turn on the dirty bit in the PTE.
834 * We need to update both the page-table and the TLB entry. To
835 * efficiently access the PTE, we address it through the virtual
836 * page table. Most likely, the TLB entry for the relevant virtual
837 * page table page is still present in the TLB so we can normally
838 * do this without additional TLB misses. In case the necessary
839 * virtual page table TLB entry isn't present, we take a nested
840 * TLB miss hit where we look up the physical address of the L3
841 * PTE and then continue at label 1 below.
842 */
843 mov r16=cr.ifa // get the address that caused the
844 // fault
845 movl r30=1f // load continuation point in case
846 // of nested fault
847 ;;
848 thash r17=r16 // compute virtual address of L3 PTE
849 mov r29=b0 // save b0 in case of nested fault
850 mov r31=pr // save pr
851 #ifdef CONFIG_SMP
852 mov r28=ar.ccv // save ar.ccv
853 ;;
854 1: ld8 r18=[r17]
855 ;; // avoid RAW on r18
856 mov ar.ccv=r18 // set compare value for cmpxchg
857 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
858 ;;
859 cmpxchg8.acq r26=[r17],r25,ar.ccv
860 mov r24=PAGE_SHIFT<<2
861 ;;
862 cmp.eq p6,p7=r26,r18
863 ;;
864 (p6) itc.d r25 // install updated PTE
865 ;;
866 /*
867 * Tell the assemblers dependency-violation checker that the above
868 * "itc" instructions cannot possibly affect the following loads:
869 */
870 dv_serialize_data
872 ld8 r18=[r17] // read PTE again
873 ;;
874 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
875 ;;
876 (p7) ptc.l r16,r24
877 mov b0=r29 // restore b0
878 mov ar.ccv=r28
879 #else
880 ;;
881 1: ld8 r18=[r17]
882 ;; // avoid RAW on r18
883 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
884 mov b0=r29 // restore b0
885 ;;
886 st8 [r17]=r18 // store back updated PTE
887 itc.d r18 // install updated PTE
888 #endif
889 mov pr=r31,-1 // restore pr
890 rfi
891 #endif
892 END(dirty_bit)
894 .org ia64_ivt+0x2400
895 //////////////////////////////////////////////////////////////////////////
896 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
897 ENTRY(iaccess_bit)
898 DBG_FAULT(9)
899 #ifdef XEN
900 mov r16=cr.isr
901 mov r17=cr.ifa
902 mov r31=pr
903 mov r19=9
904 mov r20=0x2400
905 br.sptk.many fast_access_reflect;;
906 #else
907 // Like Entry 8, except for instruction access
908 mov r16=cr.ifa // get the address that caused the
909 // fault
910 movl r30=1f // load continuation point in case
911 // of nested fault
912 mov r31=pr // save predicates
913 #ifdef CONFIG_ITANIUM
914 /*
915 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
916 */
917 mov r17=cr.ipsr
918 ;;
919 mov r18=cr.iip
920 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
921 ;;
922 (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
923 #endif /* CONFIG_ITANIUM */
924 ;;
925 thash r17=r16 // compute virtual address of L3 PTE
926 mov r29=b0 // save b0 in case of nested fault)
927 #ifdef CONFIG_SMP
928 mov r28=ar.ccv // save ar.ccv
929 ;;
930 1: ld8 r18=[r17]
931 ;;
932 mov ar.ccv=r18 // set compare value for cmpxchg
933 or r25=_PAGE_A,r18 // set the accessed bit
934 ;;
935 cmpxchg8.acq r26=[r17],r25,ar.ccv
936 mov r24=PAGE_SHIFT<<2
937 ;;
938 cmp.eq p6,p7=r26,r18
939 ;;
940 (p6) itc.i r25 // install updated PTE
941 ;;
942 /*
943 * Tell the assemblers dependency-violation checker that the above
944 * "itc" instructions cannot possibly affect the following loads:
945 */
946 dv_serialize_data
948 ld8 r18=[r17] // read PTE again
949 ;;
950 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
951 ;;
952 (p7) ptc.l r16,r24
953 mov b0=r29 // restore b0
954 mov ar.ccv=r28
955 #else /* !CONFIG_SMP */
956 ;;
957 1: ld8 r18=[r17]
958 ;;
959 or r18=_PAGE_A,r18 // set the accessed bit
960 mov b0=r29 // restore b0
961 ;;
962 st8 [r17]=r18 // store back updated PTE
963 itc.i r18 // install updated PTE
964 #endif /* !CONFIG_SMP */
965 mov pr=r31,-1
966 rfi
967 #endif
968 END(iaccess_bit)
970 .org ia64_ivt+0x2800
971 //////////////////////////////////////////////////////////////////////////
972 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
973 ENTRY(daccess_bit)
974 DBG_FAULT(10)
975 #ifdef XEN
976 mov r16=cr.isr
977 mov r17=cr.ifa
978 mov r31=pr
979 mov r19=10
980 mov r20=0x2800
981 br.sptk.many fast_access_reflect
982 ;;
983 #else
984 // Like Entry 8, except for data access
985 mov r16=cr.ifa // get the address that caused the
986 // fault
987 movl r30=1f // load continuation point in case
988 // of nested fault
989 ;;
990 thash r17=r16 // compute virtual address of L3 PTE
991 mov r31=pr
992 mov r29=b0 // save b0 in case of nested fault)
993 #ifdef CONFIG_SMP
994 mov r28=ar.ccv // save ar.ccv
995 ;;
996 1: ld8 r18=[r17]
997 ;; // avoid RAW on r18
998 mov ar.ccv=r18 // set compare value for cmpxchg
999 or r25=_PAGE_A,r18 // set the dirty bit
1000 ;;
1001 cmpxchg8.acq r26=[r17],r25,ar.ccv
1002 mov r24=PAGE_SHIFT<<2
1003 ;;
1004 cmp.eq p6,p7=r26,r18
1005 ;;
1006 (p6) itc.d r25 // install updated PTE
1007 /*
1008 * Tell the assemblers dependency-violation checker that the above
1009 * "itc" instructions cannot possibly affect the following loads:
1010 */
1011 dv_serialize_data
1012 ;;
1013 ld8 r18=[r17] // read PTE again
1014 ;;
1015 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
1016 ;;
1017 (p7) ptc.l r16,r24
1018 mov ar.ccv=r28
1019 #else
1020 ;;
1021 1: ld8 r18=[r17]
1022 ;; // avoid RAW on r18
1023 or r18=_PAGE_A,r18 // set the accessed bit
1024 ;;
1025 st8 [r17]=r18 // store back updated PTE
1026 itc.d r18 // install updated PTE
1027 #endif
1028 mov b0=r29 // restore b0
1029 mov pr=r31,-1
1030 rfi
1031 #endif
1032 END(daccess_bit)
1034 .org ia64_ivt+0x2c00
1035 //////////////////////////////////////////////////////////////////////////
1036 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
1037 ENTRY(break_fault)
1038 /*
1039 * The streamlined system call entry/exit paths only save/restore
1040 * the initial part of pt_regs. This implies that the callers of
1041 * system-calls must adhere to the normal procedure calling
1042 * conventions.
1044 * Registers to be saved & restored:
1045 * CR registers: cr.ipsr, cr.iip, cr.ifs
1046 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore,
1047 * ar.fpsr
1048 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
1049 * Registers to be restored only:
1050 * r8-r11: output value from the system call.
1052 * During system call exit, scratch registers (including r15) are
1053 * modified/cleared to prevent leaking bits from kernel to user
1054 * level.
1055 */
1056 DBG_FAULT(11)
1057 #ifdef XEN
1058 mov r16=cr.isr
1059 mov r17=cr.iim
1060 mov r31=pr
1061 ;;
1062 cmp.eq p7,p0=r17,r0
1063 (p7) br.spnt.few dispatch_break_fault
1064 ;;
1065 #ifdef CRASH_DEBUG
1066 // A panic can occur before domain0 is created. In such cases,
1067 // referencing XSI_PSR_IC causes nested_dtlb_miss.
1068 movl r18=CDB_BREAK_NUM
1069 ;;
1070 cmp.eq p7,p0=r17,r18
1071 ;;
1072 (p7) br.spnt.few dispatch_break_fault
1073 ;;
1074 #endif
1075 movl r18=THIS_CPU(current_psr_ic_addr)
1076 ;;
1077 ld8 r18=[r18]
1078 ;;
1079 ld4 r19=[r18]
1080 ;;
1081 cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
1082 (p7) br.spnt.many dispatch_privop_fault
1083 ;;
1084 // If vpsr.ic is off, we have a hyperprivop. A hyperprivop is
1085 // hand-coded assembly with psr.ic off which means it can make
1086 // no calls, cannot use r1-r15, and it can have no memory accesses
1087 // unless they are to pinned addresses!
1088 cmp4.eq p7,p0=r0,r19
1089 (p7) br.sptk.many fast_hyperprivop
1090 ;;
1091 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
1092 ;;
1093 ld8 r22 = [r22]
1094 ;;
1095 adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22
1096 ;;
1097 ld4 r23=[r22];;
1098 cmp4.eq p6,p7=r23,r17 // Xen-reserved breakimm?
1099 (p6) br.spnt.many dispatch_break_fault
1100 ;;
1101 br.sptk.many fast_break_reflect
1102 ;;
1103 #else /* !XEN */
1104 movl r16=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
1105 ld8 r16=[r16]
1106 mov r17=cr.iim
1107 mov r18=__IA64_BREAK_SYSCALL
1108 mov r21=ar.fpsr
1109 mov r29=cr.ipsr
1110 mov r19=b6
1111 mov r25=ar.unat
1112 mov r27=ar.rsc
1113 mov r26=ar.pfs
1114 mov r28=cr.iip
1115 #ifndef XEN
1116 mov r31=pr // prepare to save predicates
1117 #endif
1118 mov r20=r1
1119 ;;
1120 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
1121 cmp.eq p0,p7=r18,r17 // is this a system call?
1122 // (p7 <- false, if so)
1123 (p7) br.cond.spnt non_syscall
1124 ;;
1125 ld1 r17=[r16] // load current->thread.on_ustack flag
1126 st1 [r16]=r0 // clear current->thread.on_ustack flag
1127 add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
1128 // set r1 for
1129 // MINSTATE_START_SAVE_MIN_VIRT
1130 ;;
1131 invala
1133 /* adjust return address so we skip over the break instruction: */
1135 extr.u r8=r29,41,2 // extract ei field from cr.ipsr
1136 ;;
1137 cmp.eq p6,p7=2,r8 // isr.ei==2?
1138 mov r2=r1 // setup r2 for ia64_syscall_setup
1139 ;;
1140 (p6) mov r8=0 // clear ei to 0
1141 (p6) adds r28=16,r28 // switch cr.iip to next bundle
1142 // cr.ipsr.ei wrapped
1143 (p7) adds r8=1,r8 // increment ei to next slot
1144 ;;
1145 cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
1146 dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
1147 ;;
1149 // switch from user to kernel RBS:
1150 MINSTATE_START_SAVE_MIN_VIRT
1151 br.call.sptk.many b7=ia64_syscall_setup
1152 ;;
1153 MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
1154 ssm psr.ic | PSR_DEFAULT_BITS
1155 ;;
1156 srlz.i // guarantee that interruption
1157 // collection is on
1158 mov r3=NR_syscalls - 1
1159 ;;
1160 (p15) ssm psr.i // restore psr.i
1161 // p10==true means out registers are more than 8 or r15's Nat is true
1162 (p10) br.cond.spnt.many ia64_ret_from_syscall
1163 ;;
1164 movl r16=sys_call_table
1166 adds r15=-1024,r15 // r15 contains the syscall number --
1167 // subtract 1024 from it
1168 movl r2=ia64_ret_from_syscall
1169 ;;
1170 shladd r20=r15,3,r16 // r20 = sys_call_table +
1171 // 8*(syscall-1024)
1172 cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 +
1173 // NR_syscalls) ?
1174 mov rp=r2 // set the real return addr
1175 ;;
1176 (p6) ld8 r20=[r20] // load address of syscall entry point
1177 (p7) movl r20=sys_ni_syscall
1179 add r2=TI_FLAGS+IA64_TASK_SIZE,r13
1180 ;;
1181 ld4 r2=[r2] // r2 = current_thread_info()->flags
1182 ;;
1183 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
1184 ;;
1185 cmp.eq p8,p0=r2,r0
1186 mov b6=r20
1187 ;;
1188 (p8) br.call.sptk.many b6=b6 // ignore this return addr
1189 br.cond.sptk ia64_trace_syscall
1190 // NOT REACHED
1191 #endif
1192 END(break_fault)
1194 .org ia64_ivt+0x3000
1195 //////////////////////////////////////////////////////////////////////////
1196 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
1197 ENTRY(interrupt)
1198 DBG_FAULT(12)
1199 mov r31=pr // prepare to save predicates
1200 ;;
1201 #ifdef XEN
1202 mov r30=cr.ivr // pass cr.ivr as first arg
1203 // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
1204 // not used anywhere else and we need a place to stash ivr and
1205 // there's no registers available unused by SAVE_MIN/REST
1206 movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET
1207 ;;
1208 st8 [r29]=r30
1209 ;;
1210 movl r28=slow_interrupt
1211 ;;
1212 mov r29=rp
1213 ;;
1214 mov rp=r28
1215 ;;
1216 br.cond.sptk.many fast_tick_reflect
1217 ;;
1218 slow_interrupt:
1219 mov rp=r29;;
1220 #endif
1221 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1222 ssm psr.ic | PSR_DEFAULT_BITS
1223 ;;
1224 adds r3=8,r2 // set up second base pointer for SAVE_REST
1225 srlz.i // ensure everybody knows psr.ic is back on
1226 ;;
1227 SAVE_REST
1228 ;;
1229 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1230 #ifdef XEN
1231 movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
1232 ld8 out0=[out0];;
1233 #else
1234 mov out0=cr.ivr // pass cr.ivr as first arg
1235 #endif
1236 add out1=16,sp // pass pointer to pt_regs as second arg
1237 #ifndef XEN
1238 ;;
1239 srlz.d // make sure we see the effect of cr.ivr
1240 #endif
1241 movl r14=ia64_leave_kernel
1242 ;;
1243 mov rp=r14
1244 br.call.sptk.many b6=ia64_handle_irq
1245 END(interrupt)
1247 .org ia64_ivt+0x3400
1248 //////////////////////////////////////////////////////////////////////////
1249 // 0x3400 Entry 13 (size 64 bundles) Reserved
1250 DBG_FAULT(13)
1251 FAULT(13)
1253 #ifdef XEN
1254 // There is no particular reason for this code to be here, other
1255 // than that there happens to be space here that would go unused
1256 // otherwise. If this fault ever gets "unreserved", simply move
1257 // the following code to a more suitable spot...
1259 GLOBAL_ENTRY(dispatch_break_fault)
1260 SAVE_MIN_WITH_COVER
1261 ;;
1262 dispatch_break_fault_post_save:
1263 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1264 mov out0=cr.ifa
1265 adds out1=16,sp
1266 mov out2=cr.isr // FIXME: pity to make this slow access twice
1267 mov out3=cr.iim // FIXME: pity to make this slow access twice
1269 ssm psr.ic | PSR_DEFAULT_BITS
1270 ;;
1271 srlz.i // guarantee that interruption collection is on
1272 ;;
1273 (p15) ssm psr.i // restore psr.i
1274 adds r3=8,r2 // set up second base pointer
1275 ;;
1276 SAVE_REST
1277 movl r14=ia64_leave_kernel
1278 ;;
1279 mov rp=r14
1280 // br.sptk.many ia64_prepare_handle_break // TODO: why commented out?
1281 br.call.sptk.many b6=ia64_handle_break
1282 END(dispatch_break_fault)
1283 #endif
1285 .org ia64_ivt+0x3800
1286 //////////////////////////////////////////////////////////////////////////
1287 // 0x3800 Entry 14 (size 64 bundles) Reserved
1288 DBG_FAULT(14)
1289 FAULT(14)
1291 #ifndef XEN
1292 /*
1293 * There is no particular reason for this code to be here, other
1294 * than that there happens to be space here that would go unused
1295 * otherwise. If this fault ever gets "unreserved", simply move
1296 * the following code to a more suitable spot...
1298 * ia64_syscall_setup() is a separate subroutine so that it can
1299 * allocate stacked registers so it can safely demine any
1300 * potential NaT values from the input registers.
1302 * On entry:
1303 * - executing on bank 0 or bank 1 register set (doesn't matter)
1304 * - r1: stack pointer
1305 * - r2: current task pointer
1306 * - r3: preserved
1307 * - r11: original contents (saved ar.pfs to be saved)
1308 * - r12: original contents (sp to be saved)
1309 * - r13: original contents (tp to be saved)
1310 * - r15: original contents (syscall # to be saved)
1311 * - r18: saved bsp (after switching to kernel stack)
1312 * - r19: saved b6
1313 * - r20: saved r1 (gp)
1314 * - r21: saved ar.fpsr
1315 * - r22: kernel's register backing store base (krbs_base)
1316 * - r23: saved ar.bspstore
1317 * - r24: saved ar.rnat
1318 * - r25: saved ar.unat
1319 * - r26: saved ar.pfs
1320 * - r27: saved ar.rsc
1321 * - r28: saved cr.iip
1322 * - r29: saved cr.ipsr
1323 * - r31: saved pr
1324 * - b0: original contents (to be saved)
1325 * On exit:
1326 * - executing on bank 1 registers
1327 * - psr.ic enabled, interrupts restored
1328 * - p10: TRUE if syscall is invoked with more than 8 out
1329 * registers or r15's Nat is true
1330 * - r1: kernel's gp
1331 * - r3: preserved (same as on entry)
1332 * - r8: -EINVAL if p10 is true
1333 * - r12: points to kernel stack
1334 * - r13: points to current task
1335 * - p15: TRUE if interrupts need to be re-enabled
1336 * - ar.fpsr: set to kernel settings
1337 */
1338 GLOBAL_ENTRY(ia64_syscall_setup)
1339 #ifndef XEN
1340 #if PT(B6) != 0
1341 # error This code assumes that b6 is the first field in pt_regs.
1342 #endif
1343 #endif
1344 st8 [r1]=r19 // save b6
1345 add r16=PT(CR_IPSR),r1 // initialize first base pointer
1346 add r17=PT(R11),r1 // initialize second base pointer
1347 ;;
1348 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
1349 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
1350 tnat.nz p8,p0=in0
1352 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
1353 tnat.nz p9,p0=in1
1354 (pKStk) mov r18=r0 // make sure r18 isn't NaT
1355 ;;
1357 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
1358 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
1359 mov r28=b0 // save b0 (2 cyc)
1360 ;;
1362 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
1363 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
1364 (p8) mov in0=-1
1365 ;;
1367 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
1368 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
1369 and r8=0x7f,r19 // A // get sof of ar.pfs
1371 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
1372 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
1373 (p9) mov in1=-1
1374 ;;
1376 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
1377 tnat.nz p10,p0=in2
1378 add r11=8,r11
1379 ;;
1380 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
1381 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
1382 tnat.nz p11,p0=in3
1383 ;;
1384 (p10) mov in2=-1
1385 tnat.nz p12,p0=in4 // [I0]
1386 (p11) mov in3=-1
1387 ;;
1388 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
1389 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
1390 shl r18=r18,16 // compute ar.rsc to be used
1391 // for "loadrs"
1392 ;;
1393 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
1394 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
1395 tnat.nz p13,p0=in5 // [I0]
1396 ;;
1397 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for
1398 // "loadrs"
1399 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
1400 (p12) mov in4=-1
1401 ;;
1403 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
1404 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1405 (p13) mov in5=-1
1406 ;;
1407 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1408 tnat.nz p14,p0=in6
1409 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
1410 ;;
1411 stf8 [r16]=f1 // ensure pt_regs.r8 != 0
1412 // (see handle_syscall_error)
1413 (p9) tnat.nz p10,p0=r15
1414 adds r12=-16,r1 // switch to kernel memory stack (with 16
1415 // bytes of scratch)
1417 st8.spill [r17]=r15 // save r15
1418 tnat.nz p8,p0=in7
1419 nop.i 0
1421 mov r13=r2 // establish `current'
1422 movl r1=__gp // establish kernel global pointer
1423 ;;
1424 (p14) mov in6=-1
1425 (p8) mov in7=-1
1426 nop.i 0
1428 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1429 movl r17=FPSR_DEFAULT
1430 ;;
1431 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1432 (p10) mov r8=-EINVAL
1433 br.ret.sptk.many b7
1434 END(ia64_syscall_setup)
1435 #endif /* XEN */
1437 .org ia64_ivt+0x3c00
1438 //////////////////////////////////////////////////////////////////////////
1439 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1440 DBG_FAULT(15)
1441 FAULT(15)
1443 #ifndef XEN
1444 /*
1445 * Squatting in this space ...
1447 * This special case dispatcher for illegal operation faults
1448 * allows preserved registers to be modified through a callback
1449 * function (asm only) that is handed back from the fault handler
1450 * in r8. Up to three arguments can be passed to the callback
1451 * function by returning an aggregate with the callback as its
1452 * first element, followed by the arguments.
1453 */
1454 ENTRY(dispatch_illegal_op_fault)
1455 SAVE_MIN_WITH_COVER
1456 ssm psr.ic | PSR_DEFAULT_BITS
1457 ;;
1458 srlz.i // guarantee that interruption collection is on
1459 ;;
1460 (p15) ssm psr.i // restore psr.i
1461 adds r3=8,r2 // set up second base pointer for SAVE_REST
1462 ;;
1463 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1464 mov out0=ar.ec
1465 ;;
1466 SAVE_REST
1467 ;;
1468 br.call.sptk.many rp=ia64_illegal_op_fault
1469 .ret0: ;;
1470 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1471 mov out0=r9
1472 mov out1=r10
1473 mov out2=r11
1474 movl r15=ia64_leave_kernel
1475 ;;
1476 mov rp=r15
1477 mov b6=r8
1478 ;;
1479 cmp.ne p6,p0=0,r8
1480 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1481 br.sptk.many ia64_leave_kernel
1482 END(dispatch_illegal_op_fault)
1483 #endif
1485 .org ia64_ivt+0x4000
1486 //////////////////////////////////////////////////////////////////////////
1487 // 0x4000 Entry 16 (size 64 bundles) Reserved
1488 DBG_FAULT(16)
1489 FAULT(16)
1491 #ifdef XEN
1492 // There is no particular reason for this code to be here, other
1493 // than that there happens to be space here that would go unused
1494 // otherwise. If this fault ever gets "unreserved", simply move
1495 // the following code to a more suitable spot...
1497 ENTRY(dispatch_privop_fault)
1498 SAVE_MIN_WITH_COVER
1499 ;;
1500 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in
1501 // insn group!)
1502 mov out0=cr.ifa
1503 adds out1=16,sp
1504 mov out2=cr.isr // FIXME: pity to make this slow access twice
1505 mov out3=cr.itir
1507 ssm psr.ic | PSR_DEFAULT_BITS
1508 ;;
1509 srlz.i // guarantee that interruption
1510 // collection is on
1511 ;;
1512 (p15) ssm psr.i // restore psr.i
1513 adds r3=8,r2 // set up second base pointer
1514 ;;
1515 SAVE_REST
1516 movl r14=ia64_leave_kernel
1517 ;;
1518 mov rp=r14
1519 // br.sptk.many ia64_prepare_handle_privop // TODO: why commented out?
1520 br.call.sptk.many b6=ia64_handle_privop
1521 END(dispatch_privop_fault)
1522 #endif
1525 .org ia64_ivt+0x4400
1526 //////////////////////////////////////////////////////////////////////////
1527 // 0x4400 Entry 17 (size 64 bundles) Reserved
1528 DBG_FAULT(17)
1529 FAULT(17)
1531 #ifndef XEN
1532 ENTRY(non_syscall)
1533 SAVE_MIN_WITH_COVER
1535 // There is no particular reason for this code to be here, other
1536 // than that there happens to be space here that would go unused
1537 // otherwise. If this fault ever gets "unreserved", simply move
1538 // the following code to a more suitable spot...
1540 alloc r14=ar.pfs,0,0,2,0
1541 mov out0=cr.iim
1542 add out1=16,sp
1543 adds r3=8,r2 // set up second base pointer for SAVE_REST
1545 ssm psr.ic | PSR_DEFAULT_BITS
1546 ;;
1547 srlz.i // guarantee that interruption collection is on
1548 ;;
1549 (p15) ssm psr.i // restore psr.i
1550 movl r15=ia64_leave_kernel
1551 ;;
1552 SAVE_REST
1553 mov rp=r15
1554 ;;
1555 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and
1556 // ignore return addr
1557 END(non_syscall)
1558 #endif
1560 .org ia64_ivt+0x4800
1561 //////////////////////////////////////////////////////////////////////////
1562 // 0x4800 Entry 18 (size 64 bundles) Reserved
1563 DBG_FAULT(18)
1564 FAULT(18)
1566 #ifndef XEN
1567 /*
1568 * There is no particular reason for this code to be here, other
1569 * than that there happens to be space here that would go unused
1570 * otherwise. If this fault ever gets "unreserved", simply move
1571 * the following code to a more suitable spot...
1572 */
1573 ENTRY(dispatch_unaligned_handler)
1574 SAVE_MIN_WITH_COVER
1575 ;;
1576 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in
1577 // insn group!)
1578 mov out0=cr.ifa
1579 adds out1=16,sp
1581 ssm psr.ic | PSR_DEFAULT_BITS
1582 ;;
1583 srlz.i // guarantee that interruption
1584 // collection is on
1585 ;;
1586 (p15) ssm psr.i // restore psr.i
1587 adds r3=8,r2 // set up second base pointer
1588 ;;
1589 SAVE_REST
1590 movl r14=ia64_leave_kernel
1591 ;;
1592 mov rp=r14
1593 // br.sptk.many ia64_prepare_handle_unaligned // TODO: why commented out?
1594 br.call.sptk.many b6=ia64_handle_unaligned
1595 END(dispatch_unaligned_handler)
1596 #endif
1598 .org ia64_ivt+0x4c00
1599 //////////////////////////////////////////////////////////////////////////
1600 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1601 DBG_FAULT(19)
1602 FAULT(19)
1604 /*
1605 * There is no particular reason for this code to be here, other
1606 * than that there happens to be space here that would go unused
1607 * otherwise. If this fault ever gets "unreserved", simply move
1608 * the following code to a more suitable spot...
1609 */
1611 GLOBAL_ENTRY(dispatch_to_fault_handler)
1612 /*
1613 * Input:
1614 * psr.ic: off
1615 * r19: fault vector number (e.g., 24 for General Exception)
1616 * r31: contains saved predicates (pr)
1617 */
1618 SAVE_MIN_WITH_COVER_R19
1619 alloc r14=ar.pfs,0,0,5,0
1620 mov out0=r15
1621 mov out1=cr.isr
1622 mov out2=cr.ifa
1623 mov out3=cr.iim
1624 mov out4=cr.itir
1625 ;;
1626 ssm psr.ic | PSR_DEFAULT_BITS
1627 ;;
1628 srlz.i // guarantee that interruption
1629 // collection is on
1630 ;;
1631 (p15) ssm psr.i // restore psr.i
1632 adds r3=8,r2 // set up second base pointer for
1633 // SAVE_REST
1634 ;;
1635 SAVE_REST
1636 movl r14=ia64_leave_kernel
1637 ;;
1638 mov rp=r14
1639 br.call.sptk.many b6=ia64_fault
1640 END(dispatch_to_fault_handler)
1642 //
1643 // --- End of long entries, Beginning of short entries
1644 //
1646 .org ia64_ivt+0x5000
1647 //////////////////////////////////////////////////////////////////////////
1648 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1649 ENTRY(page_not_present)
1650 DBG_FAULT(20)
1651 #ifdef XEN
1652 FAULT_OR_REFLECT(20)
1653 #else
1654 mov r16=cr.ifa
1655 rsm psr.dt
1656 /*
1657 * The Linux page fault handler doesn't expect non-present pages
1658 * to be in the TLB. Flush the existing entry now, so we meet
1659 * that expectation.
1660 */
1661 mov r17=PAGE_SHIFT<<2
1662 ;;
1663 ptc.l r16,r17
1664 ;;
1665 mov r31=pr
1666 srlz.d
1667 br.sptk.many page_fault
1668 #endif
1669 END(page_not_present)
1671 .org ia64_ivt+0x5100
1672 //////////////////////////////////////////////////////////////////////////
1673 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1674 ENTRY(key_permission)
1675 DBG_FAULT(21)
1676 #ifdef XEN
1677 FAULT_OR_REFLECT(21)
1678 #else
1679 mov r16=cr.ifa
1680 rsm psr.dt
1681 mov r31=pr
1682 ;;
1683 srlz.d
1684 br.sptk.many page_fault
1685 #endif
1686 END(key_permission)
1688 .org ia64_ivt+0x5200
1689 //////////////////////////////////////////////////////////////////////////
1690 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1691 ENTRY(iaccess_rights)
1692 DBG_FAULT(22)
1693 #ifdef XEN
1694 FAULT_OR_REFLECT(22)
1695 #else
1696 mov r16=cr.ifa
1697 rsm psr.dt
1698 mov r31=pr
1699 ;;
1700 srlz.d
1701 br.sptk.many page_fault
1702 #endif
1703 END(iaccess_rights)
1705 .org ia64_ivt+0x5300
1706 //////////////////////////////////////////////////////////////////////////
1707 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1708 ENTRY(daccess_rights)
1709 DBG_FAULT(23)
1710 #ifdef XEN
1711 mov r31=pr
1712 ;;
1713 mov r16=cr.isr
1714 mov r17=cr.ifa
1715 mov r19=23
1716 movl r20=0x5300
1717 br.sptk.many fast_access_reflect
1718 ;;
1719 #else
1720 mov r16=cr.ifa
1721 rsm psr.dt
1722 mov r31=pr
1723 ;;
1724 srlz.d
1725 br.sptk.many page_fault
1726 #endif
1727 END(daccess_rights)
1729 .org ia64_ivt+0x5400
1730 //////////////////////////////////////////////////////////////////////////
1731 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1732 ENTRY(general_exception)
1733 DBG_FAULT(24)
1734 mov r16=cr.isr
1735 mov r31=pr
1736 ;;
1737 #ifdef XEN
1738 cmp4.ge p6,p0=0x20,r16
1739 (p6) br.sptk.many dispatch_privop_fault
1740 ;;
1741 FAULT_OR_REFLECT(24)
1742 #else
1743 cmp4.eq p6,p0=0,r16
1744 (p6) br.sptk.many dispatch_illegal_op_fault
1745 #endif
1746 ;;
1747 mov r19=24 // fault number
1748 br.sptk.many dispatch_to_fault_handler
1749 END(general_exception)
1751 .org ia64_ivt+0x5500
1752 //////////////////////////////////////////////////////////////////////////
1753 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1754 ENTRY(disabled_fp_reg)
1755 DBG_FAULT(25)
1756 #ifdef XEN
1757 #if 0 // TODO: can this be removed?
1758 mov r20=pr
1759 movl r16=0x2000000000000000
1760 movl r17=0x2000000000176b60
1761 mov r18=cr.iip
1762 mov r19=rr[r16]
1763 movl r22=0xe95d0439
1764 ;;
1765 mov pr=r0,-1
1766 ;;
1767 cmp.eq p6,p7=r22,r19
1768 ;;
1769 (p6) cmp.eq p8,p9=r17,r18
1770 (p8) br.sptk.few floating_panic
1771 ;;
1772 mov pr=r20,-1
1773 ;;
1774 #endif
1775 FAULT_OR_REFLECT(25)
1776 //floating_panic: // TODO: can this be removed?
1777 // br.sptk.many floating_panic
1778 ;;
1779 #endif
1780 rsm psr.dfh // ensure we can access fph
1781 ;;
1782 srlz.d
1783 mov r31=pr
1784 mov r19=25
1785 br.sptk.many dispatch_to_fault_handler
1786 END(disabled_fp_reg)
1788 .org ia64_ivt+0x5600
1789 //////////////////////////////////////////////////////////////////////////
1790 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1791 ENTRY(nat_consumption)
1792 DBG_FAULT(26)
1793 #ifdef XEN
1794 FAULT_OR_REFLECT(26)
1795 #else
1796 FAULT(26)
1797 #endif
1798 END(nat_consumption)
1800 .org ia64_ivt+0x5700
1801 //////////////////////////////////////////////////////////////////////////
1802 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1803 ENTRY(speculation_vector)
1804 DBG_FAULT(27)
1805 #ifdef XEN
1806 // this probably need not reflect...
1807 FAULT_OR_REFLECT(27)
1808 #else
1809 /*
1810 * A [f]chk.[as] instruction needs to take the branch to the
1811 * recovery code but this part of the architecture is not
1812 * implemented in hardware on some CPUs, such as Itanium. Thus,
1813 * in general we need to emulate the behavior. IIM contains the
1814 * relative target (not yet sign extended). So after sign extending
1815 * it we simply add it to IIP. We also need to reset the EI field
1816 * of the IPSR to zero, i.e., the slot to restart into.
1818 * cr.imm contains zero_ext(imm21)
1819 */
1820 mov r18=cr.iim
1821 ;;
1822 mov r17=cr.iip
1823 shl r18=r18,43 // put sign bit in position (43=64-21)
1824 ;;
1826 mov r16=cr.ipsr
1827 shr r18=r18,39 // sign extend (39=43-4)
1828 ;;
1830 add r17=r17,r18 // now add the offset
1831 ;;
1832 mov cr.iip=r17
1833 dep r16=0,r16,41,2 // clear EI
1834 ;;
1836 mov cr.ipsr=r16
1837 ;;
1839 rfi // and go back
1840 #endif
1841 END(speculation_vector)
1843 .org ia64_ivt+0x5800
1844 //////////////////////////////////////////////////////////////////////////
1845 // 0x5800 Entry 28 (size 16 bundles) Reserved
1846 DBG_FAULT(28)
1847 FAULT(28)
1849 .org ia64_ivt+0x5900
1850 //////////////////////////////////////////////////////////////////////////
1851 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1852 ENTRY(debug_vector)
1853 DBG_FAULT(29)
1854 #ifdef XEN
1855 FAULT_OR_REFLECT(29)
1856 #else
1857 FAULT(29)
1858 #endif
1859 END(debug_vector)
1861 .org ia64_ivt+0x5a00
1862 //////////////////////////////////////////////////////////////////////////
1863 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1864 ENTRY(unaligned_access)
1865 DBG_FAULT(30)
1866 #ifdef XEN
1867 FAULT_OR_REFLECT(30)
1868 #else
1869 mov r16=cr.ipsr
1870 mov r31=pr // prepare to save predicates
1871 ;;
1872 br.sptk.many dispatch_unaligned_handler
1873 #endif
1874 END(unaligned_access)
1876 .org ia64_ivt+0x5b00
1877 //////////////////////////////////////////////////////////////////////////
1878 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1879 ENTRY(unsupported_data_reference)
1880 DBG_FAULT(31)
1881 #ifdef XEN
1882 FAULT_OR_REFLECT(31)
1883 #else
1884 FAULT(31)
1885 #endif
1886 END(unsupported_data_reference)
1888 .org ia64_ivt+0x5c00
1889 //////////////////////////////////////////////////////////////////////////
1890 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1891 ENTRY(floating_point_fault)
1892 DBG_FAULT(32)
1893 #ifdef XEN
1894 FAULT_OR_REFLECT(32)
1895 #else
1896 FAULT(32)
1897 #endif
1898 END(floating_point_fault)
1900 .org ia64_ivt+0x5d00
1901 //////////////////////////////////////////////////////////////////////////
1902 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1903 ENTRY(floating_point_trap)
1904 DBG_FAULT(33)
1905 #ifdef XEN
1906 FAULT_OR_REFLECT(33)
1907 #else
1908 FAULT(33)
1909 #endif
1910 END(floating_point_trap)
1912 .org ia64_ivt+0x5e00
1913 //////////////////////////////////////////////////////////////////////////
1914 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1915 ENTRY(lower_privilege_trap)
1916 DBG_FAULT(34)
1917 #ifdef XEN
1918 FAULT_OR_REFLECT(34)
1919 #else
1920 FAULT(34)
1921 #endif
1922 END(lower_privilege_trap)
1924 .org ia64_ivt+0x5f00
1925 //////////////////////////////////////////////////////////////////////////
1926 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1927 ENTRY(taken_branch_trap)
1928 DBG_FAULT(35)
1929 #ifdef XEN
1930 FAULT_OR_REFLECT(35)
1931 #else
1932 FAULT(35)
1933 #endif
1934 END(taken_branch_trap)
1936 .org ia64_ivt+0x6000
1937 //////////////////////////////////////////////////////////////////////////
1938 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1939 ENTRY(single_step_trap)
1940 DBG_FAULT(36)
1941 #ifdef XEN
1942 FAULT_OR_REFLECT(36)
1943 #else
1944 FAULT(36)
1945 #endif
1946 END(single_step_trap)
1948 .org ia64_ivt+0x6100
1949 //////////////////////////////////////////////////////////////////////////
1950 // 0x6100 Entry 37 (size 16 bundles) Reserved
1951 DBG_FAULT(37)
1952 FAULT(37)
1954 .org ia64_ivt+0x6200
1955 //////////////////////////////////////////////////////////////////////////
1956 // 0x6200 Entry 38 (size 16 bundles) Reserved
1957 DBG_FAULT(38)
1958 FAULT(38)
1960 .org ia64_ivt+0x6300
1961 //////////////////////////////////////////////////////////////////////////
1962 // 0x6300 Entry 39 (size 16 bundles) Reserved
1963 DBG_FAULT(39)
1964 FAULT(39)
1966 .org ia64_ivt+0x6400
1967 //////////////////////////////////////////////////////////////////////////
1968 // 0x6400 Entry 40 (size 16 bundles) Reserved
1969 DBG_FAULT(40)
1970 FAULT(40)
1972 .org ia64_ivt+0x6500
1973 //////////////////////////////////////////////////////////////////////////
1974 // 0x6500 Entry 41 (size 16 bundles) Reserved
1975 DBG_FAULT(41)
1976 FAULT(41)
1978 .org ia64_ivt+0x6600
1979 //////////////////////////////////////////////////////////////////////////
1980 // 0x6600 Entry 42 (size 16 bundles) Reserved
1981 DBG_FAULT(42)
1982 FAULT(42)
1984 .org ia64_ivt+0x6700
1985 //////////////////////////////////////////////////////////////////////////
1986 // 0x6700 Entry 43 (size 16 bundles) Reserved
1987 DBG_FAULT(43)
1988 FAULT(43)
1990 .org ia64_ivt+0x6800
1991 //////////////////////////////////////////////////////////////////////////
1992 // 0x6800 Entry 44 (size 16 bundles) Reserved
1993 DBG_FAULT(44)
1994 FAULT(44)
1996 .org ia64_ivt+0x6900
1997 //////////////////////////////////////////////////////////////////////////
1998 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,
1999 // 44,58,60,61,62,72,
2000 // 73,75,76,77)
2001 ENTRY(ia32_exception)
2002 DBG_FAULT(45)
2003 #ifdef XEN
2004 FAULT_OR_REFLECT(45)
2005 #else
2006 FAULT(45)
2007 #endif
2008 END(ia32_exception)
2010 .org ia64_ivt+0x6a00
2011 //////////////////////////////////////////////////////////////////////////
2012 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
2013 ENTRY(ia32_intercept)
2014 DBG_FAULT(46)
2015 #ifdef XEN
2016 FAULT_OR_REFLECT(46)
2017 #else
2018 #ifdef CONFIG_IA32_SUPPORT
2019 mov r31=pr
2020 mov r16=cr.isr
2021 ;;
2022 extr.u r17=r16,16,8 // get ISR.code
2023 mov r18=ar.eflag
2024 mov r19=cr.iim // old eflag value
2025 ;;
2026 cmp.ne p6,p0=2,r17
2027 (p6) br.cond.spnt 1f // not a system flag fault
2028 xor r16=r18,r19
2029 ;;
2030 extr.u r17=r16,18,1 // get the eflags.ac bit
2031 ;;
2032 cmp.eq p6,p0=0,r17
2033 (p6) br.cond.spnt 1f // eflags.ac bit didn't change
2034 ;;
2035 mov pr=r31,-1 // restore predicate registers
2036 rfi
2038 1:
2039 #endif // CONFIG_IA32_SUPPORT
2040 FAULT(46)
2041 #endif
2042 END(ia32_intercept)
2044 .org ia64_ivt+0x6b00
2045 //////////////////////////////////////////////////////////////////////////
2046 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
2047 ENTRY(ia32_interrupt)
2048 DBG_FAULT(47)
2049 #ifdef XEN
2050 FAULT_OR_REFLECT(47)
2051 #else
2052 #ifdef CONFIG_IA32_SUPPORT
2053 mov r31=pr
2054 br.sptk.many dispatch_to_ia32_handler
2055 #else
2056 FAULT(47)
2057 #endif
2058 #endif
2059 END(ia32_interrupt)
2061 .org ia64_ivt+0x6c00
2062 //////////////////////////////////////////////////////////////////////////
2063 // 0x6c00 Entry 48 (size 16 bundles) Reserved
2064 DBG_FAULT(48)
2065 FAULT(48)
2067 .org ia64_ivt+0x6d00
2068 //////////////////////////////////////////////////////////////////////////
2069 // 0x6d00 Entry 49 (size 16 bundles) Reserved
2070 DBG_FAULT(49)
2071 FAULT(49)
2073 .org ia64_ivt+0x6e00
2074 //////////////////////////////////////////////////////////////////////////
2075 // 0x6e00 Entry 50 (size 16 bundles) Reserved
2076 DBG_FAULT(50)
2077 FAULT(50)
2079 .org ia64_ivt+0x6f00
2080 //////////////////////////////////////////////////////////////////////////
2081 // 0x6f00 Entry 51 (size 16 bundles) Reserved
2082 DBG_FAULT(51)
2083 FAULT(51)
2085 .org ia64_ivt+0x7000
2086 //////////////////////////////////////////////////////////////////////////
2087 // 0x7000 Entry 52 (size 16 bundles) Reserved
2088 DBG_FAULT(52)
2089 FAULT(52)
2091 .org ia64_ivt+0x7100
2092 //////////////////////////////////////////////////////////////////////////
2093 // 0x7100 Entry 53 (size 16 bundles) Reserved
2094 DBG_FAULT(53)
2095 FAULT(53)
2097 .org ia64_ivt+0x7200
2098 //////////////////////////////////////////////////////////////////////////
2099 // 0x7200 Entry 54 (size 16 bundles) Reserved
2100 DBG_FAULT(54)
2101 FAULT(54)
2103 .org ia64_ivt+0x7300
2104 //////////////////////////////////////////////////////////////////////////
2105 // 0x7300 Entry 55 (size 16 bundles) Reserved
2106 DBG_FAULT(55)
2107 FAULT(55)
2109 .org ia64_ivt+0x7400
2110 //////////////////////////////////////////////////////////////////////////
2111 // 0x7400 Entry 56 (size 16 bundles) Reserved
2112 DBG_FAULT(56)
2113 FAULT(56)
2115 .org ia64_ivt+0x7500
2116 //////////////////////////////////////////////////////////////////////////
2117 // 0x7500 Entry 57 (size 16 bundles) Reserved
2118 DBG_FAULT(57)
2119 FAULT(57)
2121 .org ia64_ivt+0x7600
2122 //////////////////////////////////////////////////////////////////////////
2123 // 0x7600 Entry 58 (size 16 bundles) Reserved
2124 DBG_FAULT(58)
2125 FAULT(58)
2127 .org ia64_ivt+0x7700
2128 //////////////////////////////////////////////////////////////////////////
2129 // 0x7700 Entry 59 (size 16 bundles) Reserved
2130 DBG_FAULT(59)
2131 FAULT(59)
2133 .org ia64_ivt+0x7800
2134 //////////////////////////////////////////////////////////////////////////
2135 // 0x7800 Entry 60 (size 16 bundles) Reserved
2136 DBG_FAULT(60)
2137 FAULT(60)
2139 .org ia64_ivt+0x7900
2140 //////////////////////////////////////////////////////////////////////////
2141 // 0x7900 Entry 61 (size 16 bundles) Reserved
2142 DBG_FAULT(61)
2143 FAULT(61)
2145 .org ia64_ivt+0x7a00
2146 //////////////////////////////////////////////////////////////////////////
2147 // 0x7a00 Entry 62 (size 16 bundles) Reserved
2148 DBG_FAULT(62)
2149 FAULT(62)
2151 .org ia64_ivt+0x7b00
2152 //////////////////////////////////////////////////////////////////////////
2153 // 0x7b00 Entry 63 (size 16 bundles) Reserved
2154 DBG_FAULT(63)
2155 FAULT(63)
2157 .org ia64_ivt+0x7c00
2158 //////////////////////////////////////////////////////////////////////////
2159 // 0x7c00 Entry 64 (size 16 bundles) Reserved
2160 DBG_FAULT(64)
2161 FAULT(64)
2163 .org ia64_ivt+0x7d00
2164 //////////////////////////////////////////////////////////////////////////
2165 // 0x7d00 Entry 65 (size 16 bundles) Reserved
2166 DBG_FAULT(65)
2167 FAULT(65)
2169 .org ia64_ivt+0x7e00
2170 //////////////////////////////////////////////////////////////////////////
2171 // 0x7e00 Entry 66 (size 16 bundles) Reserved
2172 DBG_FAULT(66)
2173 FAULT(66)
2175 .org ia64_ivt+0x7f00
2176 //////////////////////////////////////////////////////////////////////////
2177 // 0x7f00 Entry 67 (size 16 bundles) Reserved
2178 DBG_FAULT(67)
2179 FAULT(67)
2181 #ifdef XEN
2182 .org ia64_ivt+0x8000
2183 GLOBAL_ENTRY(dispatch_reflection)
2184 /*
2185 * Input:
2186 * psr.ic: off
2187 * r19: intr type (offset into ivt, see ia64_int.h)
2188 * r31: contains saved predicates (pr)
2189 */
2190 SAVE_MIN_WITH_COVER_R19
2191 alloc r14=ar.pfs,0,0,5,0
2192 mov out4=r15
2193 mov out0=cr.ifa
2194 adds out1=16,sp
2195 mov out2=cr.isr
2196 mov out3=cr.iim
2197 // mov out3=cr.itir // TODO: why commented out?
2199 ssm psr.ic | PSR_DEFAULT_BITS
2200 ;;
2201 srlz.i // guarantee that interruption
2202 // collection is on
2203 ;;
2204 (p15) ssm psr.i // restore psr.i
2205 adds r3=8,r2 // set up second base pointer
2206 ;;
2207 SAVE_REST
2208 movl r14=ia64_leave_kernel
2209 ;;
2210 mov rp=r14
2211 // br.sptk.many ia64_prepare_handle_reflection // TODO: why commented out?
2212 br.call.sptk.many b6=ia64_handle_reflection
2213 END(dispatch_reflection)
2215 #define SAVE_MIN_COVER_DONE DO_SAVE_MIN(,mov r30=cr.ifs,)
2217 // same as dispatch_break_fault except cover has already been done
2218 GLOBAL_ENTRY(dispatch_slow_hyperprivop)
2219 SAVE_MIN_COVER_DONE
2220 ;;
2221 br.sptk.many dispatch_break_fault_post_save
2222 END(dispatch_slow_hyperprivop)
2223 #endif
2225 #ifdef CONFIG_IA32_SUPPORT
2227 /*
2228 * There is no particular reason for this code to be here, other
2229 * than that there happens to be space here that would go unused
2230 * otherwise. If this fault ever gets "unreserved", simply move
2231 * the following code to a more suitable spot...
2232 */
2234 // IA32 interrupt entry point
2236 ENTRY(dispatch_to_ia32_handler)
2237 SAVE_MIN
2238 ;;
2239 mov r14=cr.isr
2240 ssm psr.ic | PSR_DEFAULT_BITS
2241 ;;
2242 srlz.i // guarantee that interruption collection is on
2243 ;;
2244 (p15) ssm psr.i
2245 adds r3=8,r2 // Base pointer for SAVE_REST
2246 ;;
2247 SAVE_REST
2248 ;;
2249 mov r15=0x80
2250 shr r14=r14,16 // Get interrupt number
2251 ;;
2252 cmp.ne p6,p0=r14,r15
2253 (p6) br.call.dpnt.many b6=non_ia32_syscall
2255 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW
2256 // conventions
2257 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
2258 ;;
2259 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
2260 ld8 r8=[r14] // get r8
2261 ;;
2262 st8 [r15]=r8 // save original EAX in r1 (IA32 procs
2263 // don't use the GP)
2264 ;;
2265 alloc r15=ar.pfs,0,0,6,0 // must be first in an insn group
2266 ;;
2267 ld4 r8=[r14],8 // r8 == eax (syscall number)
2268 mov r15=IA32_NR_syscalls
2269 ;;
2270 cmp.ltu.unc p6,p7=r8,r15
2271 ld4 out1=[r14],8 // r9 == ecx
2272 ;;
2273 ld4 out2=[r14],8 // r10 == edx
2274 ;;
2275 ld4 out0=[r14] // r11 == ebx
2276 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
2277 ;;
2278 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
2279 ;;
2280 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
2281 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
2282 ;;
2283 ld4 out4=[r14] // r15 == edi
2284 movl r16=ia32_syscall_table
2285 ;;
2286 (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
2287 ld4 r2=[r2] // r2 = current_thread_info()->flags
2288 ;;
2289 ld8 r16=[r16]
2290 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
2291 ;;
2292 mov b6=r16
2293 movl r15=ia32_ret_from_syscall
2294 cmp.eq p8,p0=r2,r0
2295 ;;
2296 mov rp=r15
2297 (p8) br.call.sptk.many b6=b6
2298 br.cond.sptk ia32_trace_syscall
2300 non_ia32_syscall:
2301 alloc r15=ar.pfs,0,0,2,0
2302 mov out0=r14 // interrupt #
2303 add out1=16,sp // pointer to pt_regs
2304 ;; // avoid WAW on CFM
2305 br.call.sptk.many rp=ia32_bad_interrupt
2306 .ret1: movl r15=ia64_leave_kernel
2307 ;;
2308 mov rp=r15
2309 br.ret.sptk.many rp
2310 END(dispatch_to_ia32_handler)
2312 #endif /* CONFIG_IA32_SUPPORT */