ia64/xen-unstable

view xen/arch/ia64/xen/ivt.S @ 10786:86e5d8458c08

[IA64] live migration

Shadow mode and live migration.

Virtualize Dirty bit.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Wed Jul 26 09:36:36 2006 -0600 (2006-07-26)
parents 70ee75d5c12c
children 48d7d00e69e5
line source
1 #ifdef XEN
2 #include <asm/debugger.h>
3 #endif
4 /*
5 * arch/ia64/kernel/ivt.S
6 *
7 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
8 * Stephane Eranian <eranian@hpl.hp.com>
9 * David Mosberger <davidm@hpl.hp.com>
10 * Copyright (C) 2000, 2002-2003 Intel Co
11 * Asit Mallick <asit.k.mallick@intel.com>
12 * Suresh Siddha <suresh.b.siddha@intel.com>
13 * Kenneth Chen <kenneth.w.chen@intel.com>
14 * Fenghua Yu <fenghua.yu@intel.com>
15 *
16 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
17 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
18 */
19 /*
20 * This file defines the interruption vector table used by the CPU.
21 * It does not include one entry per possible cause of interruption.
22 *
23 * The first 20 entries of the table contain 64 bundles each while the
24 * remaining 48 entries contain only 16 bundles each.
25 *
26 * The 64 bundles are used to allow inlining the whole handler for critical
27 * interruptions like TLB misses.
28 *
29 * For each entry, the comment is as follows:
30 *
31 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
32 * entry offset ----/ / / / /
33 * entry number ---------/ / / /
34 * size of the entry -------------/ / /
35 * vector name -------------------------------------/ /
36 * interruptions triggering this vector ----------------------/
37 *
38 * The table is 32KB in size and must be aligned on 32KB boundary.
39 * (The CPU ignores the 15 lower bits of the address)
40 *
41 * Table is based upon EAS2.6 (Oct 1999)
42 */
44 #include <linux/config.h>
46 #include <asm/asmmacro.h>
47 #include <asm/break.h>
48 #include <asm/ia32.h>
49 #include <asm/kregs.h>
50 #include <asm/offsets.h>
51 #include <asm/pgtable.h>
52 #include <asm/processor.h>
53 #include <asm/ptrace.h>
54 #include <asm/system.h>
55 #include <asm/thread_info.h>
56 #include <asm/unistd.h>
57 #ifdef XEN
58 #include <xen/errno.h>
59 #else
60 #include <asm/errno.h>
61 #endif
63 #if 1
64 # define PSR_DEFAULT_BITS psr.ac
65 #else
66 # define PSR_DEFAULT_BITS 0
67 #endif
69 #if 0
70 /*
71 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
72 * needed for something else before enabling this...
73 */
74 # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
75 #else
76 # define DBG_FAULT(i)
77 #endif
79 #define MINSTATE_VIRT /* needed by minstate.h */
80 #include "minstate.h"
82 #define FAULT(n) \
83 mov r19=n; /* prepare to save predicates */ \
84 mov r31=pr; \
85 br.sptk.many dispatch_to_fault_handler
87 #define FAULT_OR_REFLECT(n) \
88 mov r20=cr.ipsr; \
89 mov r19=n; /* prepare to save predicates */ \
90 mov r31=pr;; \
91 extr.u r20=r20,IA64_PSR_CPL0_BIT,2;; \
92 cmp.ne p6,p0=r0,r20; /* cpl != 0?*/ \
93 (p6) br.dptk.many dispatch_reflection; \
94 br.sptk.few dispatch_to_fault_handler
96 .section .text.ivt,"ax"
98 .align 32768 // align on 32KB boundary
99 .global ia64_ivt
100 ia64_ivt:
101 /////////////////////////////////////////////////////////////////////////////////////////
102 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
103 ENTRY(vhpt_miss)
104 DBG_FAULT(0)
105 #ifdef XEN
106 FAULT(0)
107 #else
108 /*
109 * The VHPT vector is invoked when the TLB entry for the virtual page table
110 * is missing. This happens only as a result of a previous
111 * (the "original") TLB miss, which may either be caused by an instruction
112 * fetch or a data access (or non-access).
113 *
114 * What we do here is normal TLB miss handing for the _original_ miss, followed
115 * by inserting the TLB entry for the virtual page table page that the VHPT
116 * walker was attempting to access. The latter gets inserted as long
117 * as both L1 and L2 have valid mappings for the faulting address.
118 * The TLB entry for the original miss gets inserted only if
119 * the L3 entry indicates that the page is present.
120 *
121 * do_page_fault gets invoked in the following cases:
122 * - the faulting virtual address uses unimplemented address bits
123 * - the faulting virtual address has no L1, L2, or L3 mapping
124 */
125 mov r16=cr.ifa // get address that caused the TLB miss
126 #ifdef CONFIG_HUGETLB_PAGE
127 movl r18=PAGE_SHIFT
128 mov r25=cr.itir
129 #endif
130 ;;
131 rsm psr.dt // use physical addressing for data
132 mov r31=pr // save the predicate registers
133 mov r19=IA64_KR(PT_BASE) // get page table base address
134 shl r21=r16,3 // shift bit 60 into sign bit
135 shr.u r17=r16,61 // get the region number into r17
136 ;;
137 shr r22=r21,3
138 #ifdef CONFIG_HUGETLB_PAGE
139 extr.u r26=r25,2,6
140 ;;
141 cmp.ne p8,p0=r18,r26
142 sub r27=r26,r18
143 ;;
144 (p8) dep r25=r18,r25,2,6
145 (p8) shr r22=r22,r27
146 #endif
147 ;;
148 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
149 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
150 ;;
151 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
153 srlz.d
154 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
156 .pred.rel "mutex", p6, p7
157 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
158 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
159 ;;
160 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
161 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
162 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
163 shr.u r18=r22,PMD_SHIFT // shift L2 index into position
164 ;;
165 ld8 r17=[r17] // fetch the L1 entry (may be 0)
166 ;;
167 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
168 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
169 ;;
170 (p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
171 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
172 ;;
173 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
174 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
175 ;;
176 (p7) ld8 r18=[r21] // read the L3 PTE
177 mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss
178 ;;
179 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
180 mov r22=cr.iha // get the VHPT address that caused the TLB miss
181 ;; // avoid RAW on p7
182 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
183 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
184 ;;
185 (p10) itc.i r18 // insert the instruction TLB entry
186 (p11) itc.d r18 // insert the data TLB entry
187 (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
188 mov cr.ifa=r22
190 #ifdef CONFIG_HUGETLB_PAGE
191 (p8) mov cr.itir=r25 // change to default page-size for VHPT
192 #endif
194 /*
195 * Now compute and insert the TLB entry for the virtual page table. We never
196 * execute in a page table page so there is no need to set the exception deferral
197 * bit.
198 */
199 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
200 ;;
201 (p7) itc.d r24
202 ;;
203 #ifdef CONFIG_SMP
204 /*
205 * Tell the assemblers dependency-violation checker that the above "itc" instructions
206 * cannot possibly affect the following loads:
207 */
208 dv_serialize_data
210 /*
211 * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g
212 * between reading the pagetable and the "itc". If so, flush the entry we
213 * inserted and retry.
214 */
215 ld8 r25=[r21] // read L3 PTE again
216 ld8 r26=[r17] // read L2 entry again
217 ;;
218 cmp.ne p6,p7=r26,r20 // did L2 entry change
219 mov r27=PAGE_SHIFT<<2
220 ;;
221 (p6) ptc.l r22,r27 // purge PTE page translation
222 (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change
223 ;;
224 (p6) ptc.l r16,r27 // purge translation
225 #endif
227 mov pr=r31,-1 // restore predicate registers
228 rfi
229 #endif
230 END(vhpt_miss)
232 .org ia64_ivt+0x400
233 /////////////////////////////////////////////////////////////////////////////////////////
234 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
235 ENTRY(itlb_miss)
236 DBG_FAULT(1)
237 #ifdef XEN
238 mov r16 = cr.ifa
239 mov r31 = pr
240 ;;
241 extr.u r17=r16,59,5
242 ;;
243 /* If address belongs to VMM, go to alt tlb handler */
244 cmp.eq p6,p0=0x1e,r17
245 (p6) br.cond.spnt late_alt_itlb_miss
246 ;;
247 cmp.eq p6,p0=0x1d,r17
248 (p6) br.cond.spnt late_alt_itlb_miss
249 ;;
250 mov pr = r31, 0x1ffff
251 ;;
252 #ifdef VHPT_GLOBAL
253 br.cond.sptk fast_tlb_miss_reflect
254 ;;
255 #endif
256 #endif
257 /*
258 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
259 * page table. If a nested TLB miss occurs, we switch into physical
260 * mode, walk the page table, and then re-execute the L3 PTE read
261 * and go on normally after that.
262 */
263 mov r16=cr.ifa // get virtual address
264 mov r29=b0 // save b0
265 mov r31=pr // save predicates
266 .itlb_fault:
267 mov r17=cr.iha // get virtual address of L3 PTE
268 movl r30=1f // load nested fault continuation point
269 ;;
270 1: ld8 r18=[r17] // read L3 PTE
271 ;;
272 mov b0=r29
273 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
274 (p6) br.cond.spnt page_fault
275 ;;
276 itc.i r18
277 ;;
278 #ifdef CONFIG_SMP
279 /*
280 * Tell the assemblers dependency-violation checker that the above "itc" instructions
281 * cannot possibly affect the following loads:
282 */
283 dv_serialize_data
285 ld8 r19=[r17] // read L3 PTE again and see if same
286 mov r20=PAGE_SHIFT<<2 // setup page size for purge
287 ;;
288 cmp.ne p7,p0=r18,r19
289 ;;
290 (p7) ptc.l r16,r20
291 #endif
292 mov pr=r31,-1
293 rfi
294 END(itlb_miss)
296 .org ia64_ivt+0x0800
297 /////////////////////////////////////////////////////////////////////////////////////////
298 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
299 ENTRY(dtlb_miss)
300 DBG_FAULT(2)
301 #ifdef XEN
302 mov r16=cr.ifa // get virtual address
303 mov r31=pr
304 ;;
305 extr.u r17=r16,59,5
306 ;;
307 /* If address belongs to VMM, go to alt tlb handler */
308 cmp.eq p6,p0=0x1e,r17
309 (p6) br.cond.spnt late_alt_dtlb_miss
310 ;;
311 cmp.eq p6,p0=0x1d,r17
312 (p6) br.cond.spnt late_alt_dtlb_miss
313 ;;
314 #if VHPT_ENABLED
315 // XXX TODO optimization
316 mov r30=cr.ipsr
317 mov r28=cr.iip
318 mov r17=cr.isr
319 ;;
321 extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2 // extract psr.cpl
322 ;;
323 cmp.ne p6, p0 = r0, r18 // cpl == 0?
324 (p6) br.cond.sptk 2f
326 // is speculation bit on?
327 tbit.nz p7,p0=r17,IA64_ISR_SP_BIT
328 ;;
329 (p7) br.cond.spnt 2f
331 // Is the faulted iip in vmm area?
332 // check [59:58] bit
333 // 00, 11: guest
334 // 01, 10: vmm
335 extr.u r19 = r28, 58, 2
336 ;;
337 cmp.eq p10, p0 = 0x0, r19
338 (p10) br.cond.sptk 2f
339 cmp.eq p11, p0 = 0x3, r19
340 (p11) br.cond.sptk 2f
342 // Is the faulted address is in the identity mapping area?
343 // 0xf000... or 0xe8000...
344 extr.u r20 = r16, 59, 5
345 ;;
346 cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
347 (p12) br.cond.spnt 1f
348 cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
349 (p13) br.cond.sptk 2f
351 1:
352 // xen identity mappin area.
353 movl r24=PAGE_KERNEL
354 movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
355 ;;
356 shr.u r26=r16,55 // move address bit 59 to bit 4
357 and r25=r25,r16 // clear ed, reserved bits, and PTE control bits
358 ;;
359 and r26=0x10,r26 // bit 4=address-bit(59)
360 ;;
361 or r25=r25,r24 // insert PTE control bits into r25
362 ;;
363 or r25=r25,r26 // set bit 4 (uncached) if the access was to region 6
364 ;;
365 itc.d r25 // insert the TLB entry
366 mov pr=r31,-1
367 rfi
369 2:
370 #endif
371 #ifdef VHPT_GLOBAL
372 // br.cond.sptk page_fault
373 br.cond.sptk fast_tlb_miss_reflect
374 ;;
375 #endif
376 mov r29=b0 // save b0
377 #else
378 /*
379 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
380 * page table. If a nested TLB miss occurs, we switch into physical
381 * mode, walk the page table, and then re-execute the L3 PTE read
382 * and go on normally after that.
383 */
384 mov r16=cr.ifa // get virtual address
385 mov r29=b0 // save b0
386 mov r31=pr // save predicates
387 #endif
388 dtlb_fault:
389 mov r17=cr.iha // get virtual address of L3 PTE
390 movl r30=1f // load nested fault continuation point
391 ;;
392 1: ld8 r18=[r17] // read L3 PTE
393 ;;
394 mov b0=r29
395 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
396 (p6) br.cond.spnt page_fault
397 ;;
398 itc.d r18
399 ;;
400 #ifdef CONFIG_SMP
401 /*
402 * Tell the assemblers dependency-violation checker that the above "itc" instructions
403 * cannot possibly affect the following loads:
404 */
405 dv_serialize_data
407 ld8 r19=[r17] // read L3 PTE again and see if same
408 mov r20=PAGE_SHIFT<<2 // setup page size for purge
409 ;;
410 cmp.ne p7,p0=r18,r19
411 ;;
412 (p7) ptc.l r16,r20
413 #endif
414 mov pr=r31,-1
415 rfi
416 END(dtlb_miss)
418 .org ia64_ivt+0x0c00
419 /////////////////////////////////////////////////////////////////////////////////////////
420 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
421 ENTRY(alt_itlb_miss)
422 DBG_FAULT(3)
423 #ifdef XEN
424 mov r16=cr.ifa // get address that caused the TLB miss
425 mov r31=pr
426 ;;
427 late_alt_itlb_miss:
428 mov r21=cr.ipsr
429 movl r17=PAGE_KERNEL
430 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
431 ;;
432 #else
433 mov r16=cr.ifa // get address that caused the TLB miss
434 movl r17=PAGE_KERNEL
435 mov r21=cr.ipsr
436 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
437 mov r31=pr
438 ;;
439 #endif
440 #ifdef CONFIG_DISABLE_VHPT
441 shr.u r22=r16,61 // get the region number into r21
442 ;;
443 cmp.gt p8,p0=6,r22 // user mode
444 ;;
445 (p8) thash r17=r16
446 ;;
447 (p8) mov cr.iha=r17
448 (p8) mov r29=b0 // save b0
449 (p8) br.cond.dptk .itlb_fault
450 #endif
451 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
452 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
453 #ifdef XEN
454 shr.u r18=r16,55 // move address bit 59 to bit 4
455 ;;
456 and r18=0x10,r18 // bit 4=address-bit(59)
457 #else
458 shr.u r18=r16,57 // move address bit 61 to bit 4
459 ;;
460 andcm r18=0x10,r18 // bit 4=~address-bit(61)
461 #endif
462 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
463 or r19=r17,r19 // insert PTE control bits into r19
464 ;;
465 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
466 (p8) br.cond.spnt page_fault
467 ;;
468 itc.i r19 // insert the TLB entry
469 mov pr=r31,-1
470 rfi
471 END(alt_itlb_miss)
473 .org ia64_ivt+0x1000
474 /////////////////////////////////////////////////////////////////////////////////////////
475 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
476 ENTRY(alt_dtlb_miss)
477 DBG_FAULT(4)
478 #ifdef XEN
479 mov r16=cr.ifa // get address that caused the TLB miss
480 mov r31=pr
481 ;;
482 late_alt_dtlb_miss:
483 mov r20=cr.isr
484 movl r17=PAGE_KERNEL
485 mov r21=cr.ipsr
486 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
487 ;;
488 #endif
489 #ifdef CONFIG_DISABLE_VHPT
490 shr.u r22=r16,61 // get the region into r22
491 ;;
492 cmp.gt p8,p0=6,r22 // access to region 0-5
493 ;;
494 (p8) thash r17=r16
495 ;;
496 (p8) mov cr.iha=r17
497 (p8) mov r29=b0 // save b0
498 (p8) br.cond.dptk dtlb_fault
499 #endif
500 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
501 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
502 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
503 #ifdef XEN
504 shr.u r18=r16,55 // move address bit 59 to bit 4
505 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
506 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
507 ;;
508 and r18=0x10,r18 // bit 4=address-bit(59)
509 #else
510 shr.u r18=r16,57 // move address bit 61 to bit 4
511 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
512 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
513 ;;
514 andcm r18=0x10,r18 // bit 4=~address-bit(61)
515 #endif
516 cmp.ne p8,p0=r0,r23
517 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
518 (p8) br.cond.spnt page_fault
519 #ifdef XEN
520 ;;
521 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
522 // Test for the address of virtual frame_table
523 shr r22=r16,56;;
524 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
525 (p8) br.cond.sptk frametable_miss ;;
526 #endif
527 // Test for Xen address, if not handle via page_fault
528 // note that 0xf000 (cached) and 0xe800 (uncached) addresses
529 // should be OK.
530 extr.u r22=r16,59,5;;
531 cmp.eq p8,p0=0x1e,r22
532 (p8) br.cond.spnt 1f;;
533 cmp.ne p8,p0=0x1d,r22
534 (p8) br.cond.sptk page_fault ;;
535 1:
536 #endif
538 dep r21=-1,r21,IA64_PSR_ED_BIT,1
539 or r19=r19,r17 // insert PTE control bits into r19
540 ;;
541 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
542 (p6) mov cr.ipsr=r21
543 ;;
544 (p7) itc.d r19 // insert the TLB entry
545 mov pr=r31,-1
546 rfi
547 END(alt_dtlb_miss)
548 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
549 GLOBAL_ENTRY(frametable_miss)
550 rsm psr.dt // switch to using physical data addressing
551 movl r24=(frametable_pg_dir-PAGE_OFFSET) // r24=__pa(frametable_pg_dir)
552 ;;
553 srlz.d
554 extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3)
555 ;;
556 shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)]
557 ;;
558 ld8 r24=[r24] // r24=pgd[pgd_offset(addr)]
559 extr.u r18=r16,PMD_SHIFT,(PAGE_SHIFT-3) // r18=pmd_offset
560 ;;
561 cmp.eq p6,p7=0,r24 // pgd present?
562 shladd r24=r18,3,r24 // r24=&pmd[pmd_offset(addr)]
563 ;;
564 (p7) ld8 r24=[r24] // r24=pmd[pmd_offset(addr)]
565 extr.u r19=r16,PAGE_SHIFT,(PAGE_SHIFT-3)// r19=pte_offset
566 (p6) br.spnt.few frametable_fault
567 ;;
568 cmp.eq p6,p7=0,r24 // pmd present?
569 shladd r24=r19,3,r24 // r24=&pte[pte_offset(addr)]
570 ;;
571 (p7) ld8 r24=[r24] // r24=pte[pte_offset(addr)]
572 mov r25=0x700|(PAGE_SHIFT<<2) // key=7
573 (p6) br.spnt.few frametable_fault
574 ;;
575 mov cr.itir=r25
576 ssm psr.dt // switch to using virtual data addressing
577 tbit.z p6,p7=r24,_PAGE_P_BIT // pte present?
578 ;;
579 (p7) itc.d r24 // install updated PTE
580 (p6) br.spnt.few frametable_fault // page present bit cleared?
581 ;;
582 mov pr=r31,-1 // restore predicate registers
583 rfi
584 END(frametable_miss)
585 ENTRY(frametable_fault)
586 ssm psr.dt // switch to using virtual data addressing
587 mov r18=cr.iip
588 movl r19=ia64_frametable_probe
589 ;;
590 cmp.eq p6,p7=r18,r19 // is faulting addrress ia64_frametable_probe?
591 mov r8=0 // assumes that 'probe.r' uses r8
592 dep r21=-1,r21,IA64_PSR_RI_BIT+1,1 // return to next instrucition in bundle 2
593 ;;
594 (p6) mov cr.ipsr=r21
595 mov r19=4 // FAULT(4)
596 (p7) br.spnt.few dispatch_to_fault_handler
597 ;;
598 mov pr=r31,-1
599 rfi
600 END(frametable_fault)
601 GLOBAL_ENTRY(ia64_frametable_probe)
602 {
603 probe.r r8=r32,0 // destination register must be r8
604 nop.f 0x0
605 br.ret.sptk.many b0 // this instruction must be in bundle 2
606 }
607 END(ia64_frametable_probe)
608 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
610 .org ia64_ivt+0x1400
611 /////////////////////////////////////////////////////////////////////////////////////////
612 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
613 ENTRY(nested_dtlb_miss)
614 DBG_FAULT(5)
615 #ifdef XEN
616 mov b0=r30
617 br.sptk.many b0 // return to continuation point
618 ;;
619 #else
620 /*
621 * In the absence of kernel bugs, we get here when the virtually mapped linear
622 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
623 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
624 * table is missing, a nested TLB miss fault is triggered and control is
625 * transferred to this point. When this happens, we lookup the pte for the
626 * faulting address by walking the page table in physical mode and return to the
627 * continuation point passed in register r30 (or call page_fault if the address is
628 * not mapped).
629 *
630 * Input: r16: faulting address
631 * r29: saved b0
632 * r30: continuation address
633 * r31: saved pr
634 *
635 * Output: r17: physical address of L3 PTE of faulting address
636 * r29: saved b0
637 * r30: continuation address
638 * r31: saved pr
639 *
640 * Clobbered: b0, r18, r19, r21, psr.dt (cleared)
641 */
642 rsm psr.dt // switch to using physical data addressing
643 mov r19=IA64_KR(PT_BASE) // get the page table base address
644 shl r21=r16,3 // shift bit 60 into sign bit
645 ;;
646 shr.u r17=r16,61 // get the region number into r17
647 ;;
648 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
649 shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
650 ;;
651 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
653 srlz.d
654 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
656 .pred.rel "mutex", p6, p7
657 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
658 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
659 ;;
660 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
661 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
662 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
663 shr.u r18=r16,PMD_SHIFT // shift L2 index into position
664 ;;
665 ld8 r17=[r17] // fetch the L1 entry (may be 0)
666 ;;
667 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
668 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
669 ;;
670 (p7) ld8 r17=[r17] // fetch the L2 entry (may be 0)
671 shr.u r19=r16,PAGE_SHIFT // shift L3 index into position
672 ;;
673 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL?
674 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
675 (p6) br.cond.spnt page_fault
676 mov b0=r30
677 br.sptk.many b0 // return to continuation point
678 #endif
679 END(nested_dtlb_miss)
681 .org ia64_ivt+0x1800
682 /////////////////////////////////////////////////////////////////////////////////////////
683 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
684 ENTRY(ikey_miss)
685 DBG_FAULT(6)
686 #ifdef XEN
687 FAULT_OR_REFLECT(6)
688 #else
689 FAULT(6)
690 #endif
691 END(ikey_miss)
693 //-----------------------------------------------------------------------------------
694 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
695 #ifdef XEN
696 GLOBAL_ENTRY(page_fault)
697 #else
698 ENTRY(page_fault)
699 #endif
700 ssm psr.dt
701 ;;
702 srlz.i
703 ;;
704 SAVE_MIN_WITH_COVER
705 #ifdef XEN
706 alloc r15=ar.pfs,0,0,4,0
707 mov out0=cr.ifa
708 mov out1=cr.isr
709 mov out3=cr.itir
710 #else
711 alloc r15=ar.pfs,0,0,3,0
712 mov out0=cr.ifa
713 mov out1=cr.isr
714 #endif
715 adds r3=8,r2 // set up second base pointer
716 ;;
717 ssm psr.ic | PSR_DEFAULT_BITS
718 ;;
719 srlz.i // guarantee that interruption collectin is on
720 ;;
721 (p15) ssm psr.i // restore psr.i
722 movl r14=ia64_leave_kernel
723 ;;
724 SAVE_REST
725 mov rp=r14
726 ;;
727 adds out2=16,r12 // out2 = pointer to pt_regs
728 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
729 END(page_fault)
731 .org ia64_ivt+0x1c00
732 /////////////////////////////////////////////////////////////////////////////////////////
733 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
734 ENTRY(dkey_miss)
735 DBG_FAULT(7)
736 #ifdef XEN
737 FAULT_OR_REFLECT(7)
738 #else
739 FAULT(7)
740 #endif
741 END(dkey_miss)
743 .org ia64_ivt+0x2000
744 /////////////////////////////////////////////////////////////////////////////////////////
745 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
746 ENTRY(dirty_bit)
747 DBG_FAULT(8)
748 #ifdef XEN
749 mov r20=cr.ipsr
750 mov r31=pr;;
751 extr.u r20=r20,IA64_PSR_CPL0_BIT,2;;
752 mov r19=8 /* prepare to save predicates */
753 cmp.eq p6,p0=r0,r20 /* cpl == 0?*/
754 (p6) br.sptk.few dispatch_to_fault_handler
755 /* If shadow mode is not enabled, reflect the fault. */
756 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
757 ;;
758 ld8 r22=[r22]
759 ;;
760 add r22=IA64_VCPU_DOMAIN_OFFSET,r22
761 ;;
762 /* Read domain. */
763 ld8 r22=[r22]
764 ;;
765 add r22=IA64_DOMAIN_SHADOW_BITMAP_OFFSET,r22
766 ;;
767 ld8 r22=[r22]
768 ;;
769 cmp.eq p6,p0=r0,r22 /* !shadow_bitmap ?*/
770 (p6) br.dptk.many dispatch_reflection
772 SAVE_MIN_WITH_COVER
773 alloc r14=ar.pfs,0,0,4,0
774 mov out0=cr.ifa
775 mov out1=cr.itir
776 mov out2=cr.isr
777 adds out3=16,sp
779 ssm psr.ic | PSR_DEFAULT_BITS
780 ;;
781 srlz.i // guarantee that interruption collection is on
782 ;;
783 (p15) ssm psr.i // restore psr.i
784 adds r3=8,r2 // set up second base pointer
785 ;;
786 SAVE_REST
787 movl r14=ia64_leave_kernel
788 ;;
789 mov rp=r14
790 br.call.sptk.many b6=ia64_shadow_fault
791 #else
792 /*
793 * What we do here is to simply turn on the dirty bit in the PTE. We need to
794 * update both the page-table and the TLB entry. To efficiently access the PTE,
795 * we address it through the virtual page table. Most likely, the TLB entry for
796 * the relevant virtual page table page is still present in the TLB so we can
797 * normally do this without additional TLB misses. In case the necessary virtual
798 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
799 * up the physical address of the L3 PTE and then continue at label 1 below.
800 */
801 mov r16=cr.ifa // get the address that caused the fault
802 movl r30=1f // load continuation point in case of nested fault
803 ;;
804 thash r17=r16 // compute virtual address of L3 PTE
805 mov r29=b0 // save b0 in case of nested fault
806 mov r31=pr // save pr
807 #ifdef CONFIG_SMP
808 mov r28=ar.ccv // save ar.ccv
809 ;;
810 1: ld8 r18=[r17]
811 ;; // avoid RAW on r18
812 mov ar.ccv=r18 // set compare value for cmpxchg
813 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
814 ;;
815 cmpxchg8.acq r26=[r17],r25,ar.ccv
816 mov r24=PAGE_SHIFT<<2
817 ;;
818 cmp.eq p6,p7=r26,r18
819 ;;
820 (p6) itc.d r25 // install updated PTE
821 ;;
822 /*
823 * Tell the assemblers dependency-violation checker that the above "itc" instructions
824 * cannot possibly affect the following loads:
825 */
826 dv_serialize_data
828 ld8 r18=[r17] // read PTE again
829 ;;
830 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
831 ;;
832 (p7) ptc.l r16,r24
833 mov b0=r29 // restore b0
834 mov ar.ccv=r28
835 #else
836 ;;
837 1: ld8 r18=[r17]
838 ;; // avoid RAW on r18
839 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
840 mov b0=r29 // restore b0
841 ;;
842 st8 [r17]=r18 // store back updated PTE
843 itc.d r18 // install updated PTE
844 #endif
845 mov pr=r31,-1 // restore pr
846 rfi
847 #endif
848 END(dirty_bit)
850 .org ia64_ivt+0x2400
851 /////////////////////////////////////////////////////////////////////////////////////////
852 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
853 ENTRY(iaccess_bit)
854 DBG_FAULT(9)
855 #ifdef XEN
856 mov r16=cr.isr
857 mov r17=cr.ifa
858 mov r31=pr
859 mov r19=9
860 mov r20=0x2400
861 br.sptk.many fast_access_reflect;;
862 #else
863 // Like Entry 8, except for instruction access
864 mov r16=cr.ifa // get the address that caused the fault
865 movl r30=1f // load continuation point in case of nested fault
866 mov r31=pr // save predicates
867 #ifdef CONFIG_ITANIUM
868 /*
869 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
870 */
871 mov r17=cr.ipsr
872 ;;
873 mov r18=cr.iip
874 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
875 ;;
876 (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
877 #endif /* CONFIG_ITANIUM */
878 ;;
879 thash r17=r16 // compute virtual address of L3 PTE
880 mov r29=b0 // save b0 in case of nested fault)
881 #ifdef CONFIG_SMP
882 mov r28=ar.ccv // save ar.ccv
883 ;;
884 1: ld8 r18=[r17]
885 ;;
886 mov ar.ccv=r18 // set compare value for cmpxchg
887 or r25=_PAGE_A,r18 // set the accessed bit
888 ;;
889 cmpxchg8.acq r26=[r17],r25,ar.ccv
890 mov r24=PAGE_SHIFT<<2
891 ;;
892 cmp.eq p6,p7=r26,r18
893 ;;
894 (p6) itc.i r25 // install updated PTE
895 ;;
896 /*
897 * Tell the assemblers dependency-violation checker that the above "itc" instructions
898 * cannot possibly affect the following loads:
899 */
900 dv_serialize_data
902 ld8 r18=[r17] // read PTE again
903 ;;
904 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
905 ;;
906 (p7) ptc.l r16,r24
907 mov b0=r29 // restore b0
908 mov ar.ccv=r28
909 #else /* !CONFIG_SMP */
910 ;;
911 1: ld8 r18=[r17]
912 ;;
913 or r18=_PAGE_A,r18 // set the accessed bit
914 mov b0=r29 // restore b0
915 ;;
916 st8 [r17]=r18 // store back updated PTE
917 itc.i r18 // install updated PTE
918 #endif /* !CONFIG_SMP */
919 mov pr=r31,-1
920 rfi
921 #endif
922 END(iaccess_bit)
924 .org ia64_ivt+0x2800
925 /////////////////////////////////////////////////////////////////////////////////////////
926 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
927 ENTRY(daccess_bit)
928 DBG_FAULT(10)
929 #ifdef XEN
930 mov r16=cr.isr
931 mov r17=cr.ifa
932 mov r31=pr
933 mov r19=10
934 mov r20=0x2800
935 br.sptk.many fast_access_reflect;;
936 #else
937 // Like Entry 8, except for data access
938 mov r16=cr.ifa // get the address that caused the fault
939 movl r30=1f // load continuation point in case of nested fault
940 ;;
941 thash r17=r16 // compute virtual address of L3 PTE
942 mov r31=pr
943 mov r29=b0 // save b0 in case of nested fault)
944 #ifdef CONFIG_SMP
945 mov r28=ar.ccv // save ar.ccv
946 ;;
947 1: ld8 r18=[r17]
948 ;; // avoid RAW on r18
949 mov ar.ccv=r18 // set compare value for cmpxchg
950 or r25=_PAGE_A,r18 // set the dirty bit
951 ;;
952 cmpxchg8.acq r26=[r17],r25,ar.ccv
953 mov r24=PAGE_SHIFT<<2
954 ;;
955 cmp.eq p6,p7=r26,r18
956 ;;
957 (p6) itc.d r25 // install updated PTE
958 /*
959 * Tell the assemblers dependency-violation checker that the above "itc" instructions
960 * cannot possibly affect the following loads:
961 */
962 dv_serialize_data
963 ;;
964 ld8 r18=[r17] // read PTE again
965 ;;
966 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
967 ;;
968 (p7) ptc.l r16,r24
969 mov ar.ccv=r28
970 #else
971 ;;
972 1: ld8 r18=[r17]
973 ;; // avoid RAW on r18
974 or r18=_PAGE_A,r18 // set the accessed bit
975 ;;
976 st8 [r17]=r18 // store back updated PTE
977 itc.d r18 // install updated PTE
978 #endif
979 mov b0=r29 // restore b0
980 mov pr=r31,-1
981 rfi
982 #endif
983 END(daccess_bit)
985 .org ia64_ivt+0x2c00
986 /////////////////////////////////////////////////////////////////////////////////////////
987 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
988 ENTRY(break_fault)
989 /*
990 * The streamlined system call entry/exit paths only save/restore the initial part
991 * of pt_regs. This implies that the callers of system-calls must adhere to the
992 * normal procedure calling conventions.
993 *
994 * Registers to be saved & restored:
995 * CR registers: cr.ipsr, cr.iip, cr.ifs
996 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
997 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
998 * Registers to be restored only:
999 * r8-r11: output value from the system call.
1001 * During system call exit, scratch registers (including r15) are modified/cleared
1002 * to prevent leaking bits from kernel to user level.
1003 */
1004 DBG_FAULT(11)
1005 #ifdef XEN
1006 mov r16=cr.isr
1007 mov r17=cr.iim
1008 mov r31=pr
1009 ;;
1010 cmp.eq p7,p0=r17,r0
1011 (p7) br.spnt.few dispatch_break_fault ;;
1012 #ifdef CRASH_DEBUG
1013 // panic can occur before domain0 is created.
1014 // in such case referencing XSI_PSR_IC causes nested_dtlb_miss
1015 movl r18=CDB_BREAK_NUM ;;
1016 cmp.eq p7,p0=r17,r18 ;;
1017 (p7) br.spnt.few dispatch_break_fault ;;
1018 #endif
1019 movl r18=THIS_CPU(current_psr_ic_addr)
1020 ;;
1021 ld8 r18=[r18]
1022 ;;
1023 ld4 r19=[r18]
1024 ;;
1025 cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
1026 (p7) br.spnt.many dispatch_privop_fault
1027 ;;
1028 // if vpsr.ic is off, we have a hyperprivop
1029 // A hyperprivop is hand-coded assembly with psr.ic off
1030 // which means no calls, no use of r1-r15 and no memory accesses
1031 // except to pinned addresses!
1032 cmp4.eq p7,p0=r0,r19
1033 (p7) br.sptk.many fast_hyperprivop
1034 ;;
1035 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
1036 ld8 r22 = [r22]
1037 ;;
1038 adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22;;
1039 ld4 r23=[r22];;
1040 cmp4.eq p6,p7=r23,r17 // Xen-reserved breakimm?
1041 (p6) br.spnt.many dispatch_break_fault
1042 ;;
1043 br.sptk.many fast_break_reflect
1044 ;;
1045 #else /* !XEN */
1046 movl r16=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
1047 ld8 r16=[r16]
1048 mov r17=cr.iim
1049 mov r18=__IA64_BREAK_SYSCALL
1050 mov r21=ar.fpsr
1051 mov r29=cr.ipsr
1052 mov r19=b6
1053 mov r25=ar.unat
1054 mov r27=ar.rsc
1055 mov r26=ar.pfs
1056 mov r28=cr.iip
1057 #ifndef XEN
1058 mov r31=pr // prepare to save predicates
1059 #endif
1060 mov r20=r1
1061 ;;
1062 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
1063 cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
1064 (p7) br.cond.spnt non_syscall
1065 ;;
1066 ld1 r17=[r16] // load current->thread.on_ustack flag
1067 st1 [r16]=r0 // clear current->thread.on_ustack flag
1068 add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
1069 ;;
1070 invala
1072 /* adjust return address so we skip over the break instruction: */
1074 extr.u r8=r29,41,2 // extract ei field from cr.ipsr
1075 ;;
1076 cmp.eq p6,p7=2,r8 // isr.ei==2?
1077 mov r2=r1 // setup r2 for ia64_syscall_setup
1078 ;;
1079 (p6) mov r8=0 // clear ei to 0
1080 (p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped
1081 (p7) adds r8=1,r8 // increment ei to next slot
1082 ;;
1083 cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
1084 dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
1085 ;;
1087 // switch from user to kernel RBS:
1088 MINSTATE_START_SAVE_MIN_VIRT
1089 br.call.sptk.many b7=ia64_syscall_setup
1090 ;;
1091 MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
1092 ssm psr.ic | PSR_DEFAULT_BITS
1093 ;;
1094 srlz.i // guarantee that interruption collection is on
1095 mov r3=NR_syscalls - 1
1096 ;;
1097 (p15) ssm psr.i // restore psr.i
1098 // p10==true means out registers are more than 8 or r15's Nat is true
1099 (p10) br.cond.spnt.many ia64_ret_from_syscall
1100 ;;
1101 movl r16=sys_call_table
1103 adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
1104 movl r2=ia64_ret_from_syscall
1105 ;;
1106 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
1107 cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ?
1108 mov rp=r2 // set the real return addr
1109 ;;
1110 (p6) ld8 r20=[r20] // load address of syscall entry point
1111 (p7) movl r20=sys_ni_syscall
1113 add r2=TI_FLAGS+IA64_TASK_SIZE,r13
1114 ;;
1115 ld4 r2=[r2] // r2 = current_thread_info()->flags
1116 ;;
1117 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
1118 ;;
1119 cmp.eq p8,p0=r2,r0
1120 mov b6=r20
1121 ;;
1122 (p8) br.call.sptk.many b6=b6 // ignore this return addr
1123 br.cond.sptk ia64_trace_syscall
1124 // NOT REACHED
1125 #endif
1126 END(break_fault)
1128 .org ia64_ivt+0x3000
1129 /////////////////////////////////////////////////////////////////////////////////////////
1130 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
1131 ENTRY(interrupt)
1132 DBG_FAULT(12)
1133 mov r31=pr // prepare to save predicates
1134 ;;
1135 #ifdef XEN
1136 mov r30=cr.ivr // pass cr.ivr as first arg
1137 // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
1138 // not used anywhere else and we need a place to stash ivr and
1139 // there's no registers available unused by SAVE_MIN/REST
1140 movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
1141 st8 [r29]=r30;;
1142 movl r28=slow_interrupt;;
1143 mov r29=rp;;
1144 mov rp=r28;;
1145 br.cond.sptk.many fast_tick_reflect
1146 ;;
1147 slow_interrupt:
1148 mov rp=r29;;
1149 #endif
1150 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1151 ssm psr.ic | PSR_DEFAULT_BITS
1152 ;;
1153 adds r3=8,r2 // set up second base pointer for SAVE_REST
1154 srlz.i // ensure everybody knows psr.ic is back on
1155 ;;
1156 SAVE_REST
1157 ;;
1158 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1159 #ifdef XEN
1160 movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
1161 ld8 out0=[out0];;
1162 #else
1163 mov out0=cr.ivr // pass cr.ivr as first arg
1164 #endif
1165 add out1=16,sp // pass pointer to pt_regs as second arg
1166 #ifndef XEN
1167 ;;
1168 srlz.d // make sure we see the effect of cr.ivr
1169 #endif
1170 movl r14=ia64_leave_kernel
1171 ;;
1172 mov rp=r14
1173 br.call.sptk.many b6=ia64_handle_irq
1174 END(interrupt)
1176 .org ia64_ivt+0x3400
1177 /////////////////////////////////////////////////////////////////////////////////////////
1178 // 0x3400 Entry 13 (size 64 bundles) Reserved
1179 DBG_FAULT(13)
1180 FAULT(13)
1182 #ifdef XEN
1183 // There is no particular reason for this code to be here, other than that
1184 // there happens to be space here that would go unused otherwise. If this
1185 // fault ever gets "unreserved", simply moved the following code to a more
1186 // suitable spot...
1188 GLOBAL_ENTRY(dispatch_break_fault)
1189 SAVE_MIN_WITH_COVER
1190 ;;
1191 dispatch_break_fault_post_save:
1192 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1193 mov out0=cr.ifa
1194 adds out1=16,sp
1195 mov out2=cr.isr // FIXME: pity to make this slow access twice
1196 mov out3=cr.iim // FIXME: pity to make this slow access twice
1198 ssm psr.ic | PSR_DEFAULT_BITS
1199 ;;
1200 srlz.i // guarantee that interruption collection is on
1201 ;;
1202 (p15) ssm psr.i // restore psr.i
1203 adds r3=8,r2 // set up second base pointer
1204 ;;
1205 SAVE_REST
1206 movl r14=ia64_leave_kernel
1207 ;;
1208 mov rp=r14
1209 // br.sptk.many ia64_prepare_handle_break
1210 br.call.sptk.many b6=ia64_handle_break
1211 END(dispatch_break_fault)
1212 #endif
1214 .org ia64_ivt+0x3800
1215 /////////////////////////////////////////////////////////////////////////////////////////
1216 // 0x3800 Entry 14 (size 64 bundles) Reserved
1217 DBG_FAULT(14)
1218 FAULT(14)
1220 #ifndef XEN
1221 /*
1222 * There is no particular reason for this code to be here, other than that
1223 * there happens to be space here that would go unused otherwise. If this
1224 * fault ever gets "unreserved", simply moved the following code to a more
1225 * suitable spot...
1227 * ia64_syscall_setup() is a separate subroutine so that it can
1228 * allocate stacked registers so it can safely demine any
1229 * potential NaT values from the input registers.
1231 * On entry:
1232 * - executing on bank 0 or bank 1 register set (doesn't matter)
1233 * - r1: stack pointer
1234 * - r2: current task pointer
1235 * - r3: preserved
1236 * - r11: original contents (saved ar.pfs to be saved)
1237 * - r12: original contents (sp to be saved)
1238 * - r13: original contents (tp to be saved)
1239 * - r15: original contents (syscall # to be saved)
1240 * - r18: saved bsp (after switching to kernel stack)
1241 * - r19: saved b6
1242 * - r20: saved r1 (gp)
1243 * - r21: saved ar.fpsr
1244 * - r22: kernel's register backing store base (krbs_base)
1245 * - r23: saved ar.bspstore
1246 * - r24: saved ar.rnat
1247 * - r25: saved ar.unat
1248 * - r26: saved ar.pfs
1249 * - r27: saved ar.rsc
1250 * - r28: saved cr.iip
1251 * - r29: saved cr.ipsr
1252 * - r31: saved pr
1253 * - b0: original contents (to be saved)
1254 * On exit:
1255 * - executing on bank 1 registers
1256 * - psr.ic enabled, interrupts restored
1257 * - p10: TRUE if syscall is invoked with more than 8 out
1258 * registers or r15's Nat is true
1259 * - r1: kernel's gp
1260 * - r3: preserved (same as on entry)
1261 * - r8: -EINVAL if p10 is true
1262 * - r12: points to kernel stack
1263 * - r13: points to current task
1264 * - p15: TRUE if interrupts need to be re-enabled
1265 * - ar.fpsr: set to kernel settings
1266 */
1267 GLOBAL_ENTRY(ia64_syscall_setup)
1268 #ifndef XEN
1269 #if PT(B6) != 0
1270 # error This code assumes that b6 is the first field in pt_regs.
1271 #endif
1272 #endif
1273 st8 [r1]=r19 // save b6
1274 add r16=PT(CR_IPSR),r1 // initialize first base pointer
1275 add r17=PT(R11),r1 // initialize second base pointer
1276 ;;
1277 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
1278 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
1279 tnat.nz p8,p0=in0
1281 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
1282 tnat.nz p9,p0=in1
1283 (pKStk) mov r18=r0 // make sure r18 isn't NaT
1284 ;;
1286 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
1287 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
1288 mov r28=b0 // save b0 (2 cyc)
1289 ;;
1291 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
1292 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
1293 (p8) mov in0=-1
1294 ;;
1296 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
1297 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
1298 and r8=0x7f,r19 // A // get sof of ar.pfs
1300 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
1301 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
1302 (p9) mov in1=-1
1303 ;;
1305 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
1306 tnat.nz p10,p0=in2
1307 add r11=8,r11
1308 ;;
1309 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
1310 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
1311 tnat.nz p11,p0=in3
1312 ;;
1313 (p10) mov in2=-1
1314 tnat.nz p12,p0=in4 // [I0]
1315 (p11) mov in3=-1
1316 ;;
1317 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
1318 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
1319 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
1320 ;;
1321 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
1322 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
1323 tnat.nz p13,p0=in5 // [I0]
1324 ;;
1325 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
1326 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
1327 (p12) mov in4=-1
1328 ;;
1330 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
1331 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1332 (p13) mov in5=-1
1333 ;;
1334 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1335 tnat.nz p14,p0=in6
1336 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
1337 ;;
1338 stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
1339 (p9) tnat.nz p10,p0=r15
1340 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
1342 st8.spill [r17]=r15 // save r15
1343 tnat.nz p8,p0=in7
1344 nop.i 0
1346 mov r13=r2 // establish `current'
1347 movl r1=__gp // establish kernel global pointer
1348 ;;
1349 (p14) mov in6=-1
1350 (p8) mov in7=-1
1351 nop.i 0
1353 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1354 movl r17=FPSR_DEFAULT
1355 ;;
1356 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1357 (p10) mov r8=-EINVAL
1358 br.ret.sptk.many b7
1359 END(ia64_syscall_setup)
1360 #endif /* XEN */
1362 .org ia64_ivt+0x3c00
1363 /////////////////////////////////////////////////////////////////////////////////////////
1364 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1365 DBG_FAULT(15)
1366 FAULT(15)
1368 #ifndef XEN
1369 /*
1370 * Squatting in this space ...
1372 * This special case dispatcher for illegal operation faults allows preserved
1373 * registers to be modified through a callback function (asm only) that is handed
1374 * back from the fault handler in r8. Up to three arguments can be passed to the
1375 * callback function by returning an aggregate with the callback as its first
1376 * element, followed by the arguments.
1377 */
1378 ENTRY(dispatch_illegal_op_fault)
1379 SAVE_MIN_WITH_COVER
1380 ssm psr.ic | PSR_DEFAULT_BITS
1381 ;;
1382 srlz.i // guarantee that interruption collection is on
1383 ;;
1384 (p15) ssm psr.i // restore psr.i
1385 adds r3=8,r2 // set up second base pointer for SAVE_REST
1386 ;;
1387 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1388 mov out0=ar.ec
1389 ;;
1390 SAVE_REST
1391 ;;
1392 br.call.sptk.many rp=ia64_illegal_op_fault
1393 .ret0: ;;
1394 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1395 mov out0=r9
1396 mov out1=r10
1397 mov out2=r11
1398 movl r15=ia64_leave_kernel
1399 ;;
1400 mov rp=r15
1401 mov b6=r8
1402 ;;
1403 cmp.ne p6,p0=0,r8
1404 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1405 br.sptk.many ia64_leave_kernel
1406 END(dispatch_illegal_op_fault)
1407 #endif
1409 .org ia64_ivt+0x4000
1410 /////////////////////////////////////////////////////////////////////////////////////////
1411 // 0x4000 Entry 16 (size 64 bundles) Reserved
1412 DBG_FAULT(16)
1413 FAULT(16)
1415 #ifdef XEN
1416 // There is no particular reason for this code to be here, other than that
1417 // there happens to be space here that would go unused otherwise. If this
1418 // fault ever gets "unreserved", simply moved the following code to a more
1419 // suitable spot...
1421 ENTRY(dispatch_privop_fault)
1422 SAVE_MIN_WITH_COVER
1423 ;;
1424 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1425 mov out0=cr.ifa
1426 adds out1=16,sp
1427 mov out2=cr.isr // FIXME: pity to make this slow access twice
1428 mov out3=cr.itir
1430 ssm psr.ic | PSR_DEFAULT_BITS
1431 ;;
1432 srlz.i // guarantee that interruption collection is on
1433 ;;
1434 (p15) ssm psr.i // restore psr.i
1435 adds r3=8,r2 // set up second base pointer
1436 ;;
1437 SAVE_REST
1438 movl r14=ia64_leave_kernel
1439 ;;
1440 mov rp=r14
1441 // br.sptk.many ia64_prepare_handle_privop
1442 br.call.sptk.many b6=ia64_handle_privop
1443 END(dispatch_privop_fault)
1444 #endif
1447 .org ia64_ivt+0x4400
1448 /////////////////////////////////////////////////////////////////////////////////////////
1449 // 0x4400 Entry 17 (size 64 bundles) Reserved
1450 DBG_FAULT(17)
1451 FAULT(17)
1453 #ifndef XEN
1454 ENTRY(non_syscall)
1455 SAVE_MIN_WITH_COVER
1457 // There is no particular reason for this code to be here, other than that
1458 // there happens to be space here that would go unused otherwise. If this
1459 // fault ever gets "unreserved", simply moved the following code to a more
1460 // suitable spot...
1462 alloc r14=ar.pfs,0,0,2,0
1463 mov out0=cr.iim
1464 add out1=16,sp
1465 adds r3=8,r2 // set up second base pointer for SAVE_REST
1467 ssm psr.ic | PSR_DEFAULT_BITS
1468 ;;
1469 srlz.i // guarantee that interruption collection is on
1470 ;;
1471 (p15) ssm psr.i // restore psr.i
1472 movl r15=ia64_leave_kernel
1473 ;;
1474 SAVE_REST
1475 mov rp=r15
1476 ;;
1477 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1478 END(non_syscall)
1479 #endif
1481 .org ia64_ivt+0x4800
1482 /////////////////////////////////////////////////////////////////////////////////////////
1483 // 0x4800 Entry 18 (size 64 bundles) Reserved
1484 DBG_FAULT(18)
1485 FAULT(18)
1487 #ifndef XEN
1488 /*
1489 * There is no particular reason for this code to be here, other than that
1490 * there happens to be space here that would go unused otherwise. If this
1491 * fault ever gets "unreserved", simply moved the following code to a more
1492 * suitable spot...
1493 */
1494 ENTRY(dispatch_unaligned_handler)
1495 SAVE_MIN_WITH_COVER
1496 ;;
1497 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1498 mov out0=cr.ifa
1499 adds out1=16,sp
1501 ssm psr.ic | PSR_DEFAULT_BITS
1502 ;;
1503 srlz.i // guarantee that interruption collection is on
1504 ;;
1505 (p15) ssm psr.i // restore psr.i
1506 adds r3=8,r2 // set up second base pointer
1507 ;;
1508 SAVE_REST
1509 movl r14=ia64_leave_kernel
1510 ;;
1511 mov rp=r14
1512 // br.sptk.many ia64_prepare_handle_unaligned
1513 br.call.sptk.many b6=ia64_handle_unaligned
1514 END(dispatch_unaligned_handler)
1515 #endif
1517 .org ia64_ivt+0x4c00
1518 /////////////////////////////////////////////////////////////////////////////////////////
1519 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1520 DBG_FAULT(19)
1521 FAULT(19)
1523 /*
1524 * There is no particular reason for this code to be here, other than that
1525 * there happens to be space here that would go unused otherwise. If this
1526 * fault ever gets "unreserved", simply moved the following code to a more
1527 * suitable spot...
1528 */
1530 GLOBAL_ENTRY(dispatch_to_fault_handler)
1531 /*
1532 * Input:
1533 * psr.ic: off
1534 * r19: fault vector number (e.g., 24 for General Exception)
1535 * r31: contains saved predicates (pr)
1536 */
1537 SAVE_MIN_WITH_COVER_R19
1538 alloc r14=ar.pfs,0,0,5,0
1539 mov out0=r15
1540 mov out1=cr.isr
1541 mov out2=cr.ifa
1542 mov out3=cr.iim
1543 mov out4=cr.itir
1544 ;;
1545 ssm psr.ic | PSR_DEFAULT_BITS
1546 ;;
1547 srlz.i // guarantee that interruption collection is on
1548 ;;
1549 (p15) ssm psr.i // restore psr.i
1550 adds r3=8,r2 // set up second base pointer for SAVE_REST
1551 ;;
1552 SAVE_REST
1553 movl r14=ia64_leave_kernel
1554 ;;
1555 mov rp=r14
1556 br.call.sptk.many b6=ia64_fault
1557 END(dispatch_to_fault_handler)
1559 //
1560 // --- End of long entries, Beginning of short entries
1561 //
1563 .org ia64_ivt+0x5000
1564 /////////////////////////////////////////////////////////////////////////////////////////
1565 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1566 ENTRY(page_not_present)
1567 DBG_FAULT(20)
1568 #ifdef XEN
1569 FAULT_OR_REFLECT(20)
1570 #else
1571 mov r16=cr.ifa
1572 rsm psr.dt
1573 /*
1574 * The Linux page fault handler doesn't expect non-present pages to be in
1575 * the TLB. Flush the existing entry now, so we meet that expectation.
1576 */
1577 mov r17=PAGE_SHIFT<<2
1578 ;;
1579 ptc.l r16,r17
1580 ;;
1581 mov r31=pr
1582 srlz.d
1583 br.sptk.many page_fault
1584 #endif
1585 END(page_not_present)
1587 .org ia64_ivt+0x5100
1588 /////////////////////////////////////////////////////////////////////////////////////////
1589 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1590 ENTRY(key_permission)
1591 DBG_FAULT(21)
1592 #ifdef XEN
1593 FAULT_OR_REFLECT(21)
1594 #else
1595 mov r16=cr.ifa
1596 rsm psr.dt
1597 mov r31=pr
1598 ;;
1599 srlz.d
1600 br.sptk.many page_fault
1601 #endif
1602 END(key_permission)
1604 .org ia64_ivt+0x5200
1605 /////////////////////////////////////////////////////////////////////////////////////////
1606 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1607 ENTRY(iaccess_rights)
1608 DBG_FAULT(22)
1609 #ifdef XEN
1610 FAULT_OR_REFLECT(22)
1611 #else
1612 mov r16=cr.ifa
1613 rsm psr.dt
1614 mov r31=pr
1615 ;;
1616 srlz.d
1617 br.sptk.many page_fault
1618 #endif
1619 END(iaccess_rights)
1621 .org ia64_ivt+0x5300
1622 /////////////////////////////////////////////////////////////////////////////////////////
1623 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1624 ENTRY(daccess_rights)
1625 DBG_FAULT(23)
1626 #ifdef XEN
1627 mov r31=pr;
1628 mov r16=cr.isr
1629 mov r17=cr.ifa
1630 mov r19=23
1631 movl r20=0x5300
1632 br.sptk.many fast_access_reflect;;
1633 #else
1634 mov r16=cr.ifa
1635 rsm psr.dt
1636 mov r31=pr
1637 ;;
1638 srlz.d
1639 br.sptk.many page_fault
1640 #endif
1641 END(daccess_rights)
1643 .org ia64_ivt+0x5400
1644 /////////////////////////////////////////////////////////////////////////////////////////
1645 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1646 ENTRY(general_exception)
1647 DBG_FAULT(24)
1648 mov r16=cr.isr
1649 mov r31=pr
1650 ;;
1651 #ifdef XEN
1652 cmp4.ge p6,p0=0x20,r16
1653 (p6) br.sptk.many dispatch_privop_fault
1654 #else
1655 cmp4.eq p6,p0=0,r16
1656 (p6) br.sptk.many dispatch_illegal_op_fault
1657 #endif
1658 ;;
1659 mov r19=24 // fault number
1660 br.sptk.many dispatch_to_fault_handler
1661 END(general_exception)
1663 .org ia64_ivt+0x5500
1664 /////////////////////////////////////////////////////////////////////////////////////////
1665 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1666 ENTRY(disabled_fp_reg)
1667 DBG_FAULT(25)
1668 #ifdef XEN
1669 #if 0
1670 mov r20=pr
1671 movl r16=0x2000000000000000
1672 movl r17=0x2000000000176b60
1673 mov r18=cr.iip
1674 mov r19=rr[r16]
1675 movl r22=0xe95d0439
1676 ;;
1677 mov pr=r0,-1
1678 ;;
1679 cmp.eq p6,p7=r22,r19
1680 ;;
1681 (p6) cmp.eq p8,p9=r17,r18
1682 (p8) br.sptk.few floating_panic
1683 ;;
1684 mov pr=r20,-1
1685 ;;
1686 #endif
1687 FAULT_OR_REFLECT(25)
1688 //floating_panic:
1689 // br.sptk.many floating_panic
1690 ;;
1691 #endif
1692 rsm psr.dfh // ensure we can access fph
1693 ;;
1694 srlz.d
1695 mov r31=pr
1696 mov r19=25
1697 br.sptk.many dispatch_to_fault_handler
1698 END(disabled_fp_reg)
1700 .org ia64_ivt+0x5600
1701 /////////////////////////////////////////////////////////////////////////////////////////
1702 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1703 ENTRY(nat_consumption)
1704 DBG_FAULT(26)
1705 #ifdef XEN
1706 FAULT_OR_REFLECT(26)
1707 #else
1708 FAULT(26)
1709 #endif
1710 END(nat_consumption)
1712 .org ia64_ivt+0x5700
1713 /////////////////////////////////////////////////////////////////////////////////////////
1714 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1715 ENTRY(speculation_vector)
1716 DBG_FAULT(27)
1717 #ifdef XEN
1718 // this probably need not reflect...
1719 FAULT_OR_REFLECT(27)
1720 #else
1721 /*
1722 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1723 * this part of the architecture is not implemented in hardware on some CPUs, such
1724 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
1725 * the relative target (not yet sign extended). So after sign extending it we
1726 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
1727 * i.e., the slot to restart into.
1729 * cr.imm contains zero_ext(imm21)
1730 */
1731 mov r18=cr.iim
1732 ;;
1733 mov r17=cr.iip
1734 shl r18=r18,43 // put sign bit in position (43=64-21)
1735 ;;
1737 mov r16=cr.ipsr
1738 shr r18=r18,39 // sign extend (39=43-4)
1739 ;;
1741 add r17=r17,r18 // now add the offset
1742 ;;
1743 mov cr.iip=r17
1744 dep r16=0,r16,41,2 // clear EI
1745 ;;
1747 mov cr.ipsr=r16
1748 ;;
1750 rfi // and go back
1751 #endif
1752 END(speculation_vector)
1754 .org ia64_ivt+0x5800
1755 /////////////////////////////////////////////////////////////////////////////////////////
1756 // 0x5800 Entry 28 (size 16 bundles) Reserved
1757 DBG_FAULT(28)
1758 FAULT(28)
1760 .org ia64_ivt+0x5900
1761 /////////////////////////////////////////////////////////////////////////////////////////
1762 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1763 ENTRY(debug_vector)
1764 DBG_FAULT(29)
1765 #ifdef XEN
1766 FAULT_OR_REFLECT(29)
1767 #else
1768 FAULT(29)
1769 #endif
1770 END(debug_vector)
1772 .org ia64_ivt+0x5a00
1773 /////////////////////////////////////////////////////////////////////////////////////////
1774 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1775 ENTRY(unaligned_access)
1776 DBG_FAULT(30)
1777 #ifdef XEN
1778 FAULT_OR_REFLECT(30)
1779 #else
1780 mov r16=cr.ipsr
1781 mov r31=pr // prepare to save predicates
1782 ;;
1783 br.sptk.many dispatch_unaligned_handler
1784 #endif
1785 END(unaligned_access)
1787 .org ia64_ivt+0x5b00
1788 /////////////////////////////////////////////////////////////////////////////////////////
1789 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1790 ENTRY(unsupported_data_reference)
1791 DBG_FAULT(31)
1792 #ifdef XEN
1793 FAULT_OR_REFLECT(31)
1794 #else
1795 FAULT(31)
1796 #endif
1797 END(unsupported_data_reference)
1799 .org ia64_ivt+0x5c00
1800 /////////////////////////////////////////////////////////////////////////////////////////
1801 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1802 ENTRY(floating_point_fault)
1803 DBG_FAULT(32)
1804 #ifdef XEN
1805 FAULT_OR_REFLECT(32)
1806 #else
1807 FAULT(32)
1808 #endif
1809 END(floating_point_fault)
1811 .org ia64_ivt+0x5d00
1812 /////////////////////////////////////////////////////////////////////////////////////////
1813 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1814 ENTRY(floating_point_trap)
1815 DBG_FAULT(33)
1816 #ifdef XEN
1817 FAULT_OR_REFLECT(33)
1818 #else
1819 FAULT(33)
1820 #endif
1821 END(floating_point_trap)
1823 .org ia64_ivt+0x5e00
1824 /////////////////////////////////////////////////////////////////////////////////////////
1825 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1826 ENTRY(lower_privilege_trap)
1827 DBG_FAULT(34)
1828 #ifdef XEN
1829 FAULT_OR_REFLECT(34)
1830 #else
1831 FAULT(34)
1832 #endif
1833 END(lower_privilege_trap)
1835 .org ia64_ivt+0x5f00
1836 /////////////////////////////////////////////////////////////////////////////////////////
1837 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1838 ENTRY(taken_branch_trap)
1839 DBG_FAULT(35)
1840 #ifdef XEN
1841 FAULT_OR_REFLECT(35)
1842 #else
1843 FAULT(35)
1844 #endif
1845 END(taken_branch_trap)
1847 .org ia64_ivt+0x6000
1848 /////////////////////////////////////////////////////////////////////////////////////////
1849 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1850 ENTRY(single_step_trap)
1851 DBG_FAULT(36)
1852 #ifdef XEN
1853 FAULT_OR_REFLECT(36)
1854 #else
1855 FAULT(36)
1856 #endif
1857 END(single_step_trap)
1859 .org ia64_ivt+0x6100
1860 /////////////////////////////////////////////////////////////////////////////////////////
1861 // 0x6100 Entry 37 (size 16 bundles) Reserved
1862 DBG_FAULT(37)
1863 FAULT(37)
1865 .org ia64_ivt+0x6200
1866 /////////////////////////////////////////////////////////////////////////////////////////
1867 // 0x6200 Entry 38 (size 16 bundles) Reserved
1868 DBG_FAULT(38)
1869 FAULT(38)
1871 .org ia64_ivt+0x6300
1872 /////////////////////////////////////////////////////////////////////////////////////////
1873 // 0x6300 Entry 39 (size 16 bundles) Reserved
1874 DBG_FAULT(39)
1875 FAULT(39)
1877 .org ia64_ivt+0x6400
1878 /////////////////////////////////////////////////////////////////////////////////////////
1879 // 0x6400 Entry 40 (size 16 bundles) Reserved
1880 DBG_FAULT(40)
1881 FAULT(40)
1883 .org ia64_ivt+0x6500
1884 /////////////////////////////////////////////////////////////////////////////////////////
1885 // 0x6500 Entry 41 (size 16 bundles) Reserved
1886 DBG_FAULT(41)
1887 FAULT(41)
1889 .org ia64_ivt+0x6600
1890 /////////////////////////////////////////////////////////////////////////////////////////
1891 // 0x6600 Entry 42 (size 16 bundles) Reserved
1892 DBG_FAULT(42)
1893 FAULT(42)
1895 .org ia64_ivt+0x6700
1896 /////////////////////////////////////////////////////////////////////////////////////////
1897 // 0x6700 Entry 43 (size 16 bundles) Reserved
1898 DBG_FAULT(43)
1899 FAULT(43)
1901 .org ia64_ivt+0x6800
1902 /////////////////////////////////////////////////////////////////////////////////////////
1903 // 0x6800 Entry 44 (size 16 bundles) Reserved
1904 DBG_FAULT(44)
1905 FAULT(44)
1907 .org ia64_ivt+0x6900
1908 /////////////////////////////////////////////////////////////////////////////////////////
1909 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1910 ENTRY(ia32_exception)
1911 DBG_FAULT(45)
1912 #ifdef XEN
1913 FAULT_OR_REFLECT(45)
1914 #else
1915 FAULT(45)
1916 #endif
1917 END(ia32_exception)
1919 .org ia64_ivt+0x6a00
1920 /////////////////////////////////////////////////////////////////////////////////////////
1921 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1922 ENTRY(ia32_intercept)
1923 DBG_FAULT(46)
1924 #ifdef XEN
1925 FAULT_OR_REFLECT(46)
1926 #else
1927 #ifdef CONFIG_IA32_SUPPORT
1928 mov r31=pr
1929 mov r16=cr.isr
1930 ;;
1931 extr.u r17=r16,16,8 // get ISR.code
1932 mov r18=ar.eflag
1933 mov r19=cr.iim // old eflag value
1934 ;;
1935 cmp.ne p6,p0=2,r17
1936 (p6) br.cond.spnt 1f // not a system flag fault
1937 xor r16=r18,r19
1938 ;;
1939 extr.u r17=r16,18,1 // get the eflags.ac bit
1940 ;;
1941 cmp.eq p6,p0=0,r17
1942 (p6) br.cond.spnt 1f // eflags.ac bit didn't change
1943 ;;
1944 mov pr=r31,-1 // restore predicate registers
1945 rfi
1947 1:
1948 #endif // CONFIG_IA32_SUPPORT
1949 FAULT(46)
1950 #endif
1951 END(ia32_intercept)
1953 .org ia64_ivt+0x6b00
1954 /////////////////////////////////////////////////////////////////////////////////////////
1955 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1956 ENTRY(ia32_interrupt)
1957 DBG_FAULT(47)
1958 #ifdef XEN
1959 FAULT_OR_REFLECT(47)
1960 #else
1961 #ifdef CONFIG_IA32_SUPPORT
1962 mov r31=pr
1963 br.sptk.many dispatch_to_ia32_handler
1964 #else
1965 FAULT(47)
1966 #endif
1967 #endif
1968 END(ia32_interrupt)
1970 .org ia64_ivt+0x6c00
1971 /////////////////////////////////////////////////////////////////////////////////////////
1972 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1973 DBG_FAULT(48)
1974 FAULT(48)
1976 .org ia64_ivt+0x6d00
1977 /////////////////////////////////////////////////////////////////////////////////////////
1978 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1979 DBG_FAULT(49)
1980 FAULT(49)
1982 .org ia64_ivt+0x6e00
1983 /////////////////////////////////////////////////////////////////////////////////////////
1984 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1985 DBG_FAULT(50)
1986 FAULT(50)
1988 .org ia64_ivt+0x6f00
1989 /////////////////////////////////////////////////////////////////////////////////////////
1990 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1991 DBG_FAULT(51)
1992 FAULT(51)
1994 .org ia64_ivt+0x7000
1995 /////////////////////////////////////////////////////////////////////////////////////////
1996 // 0x7000 Entry 52 (size 16 bundles) Reserved
1997 DBG_FAULT(52)
1998 FAULT(52)
2000 .org ia64_ivt+0x7100
2001 /////////////////////////////////////////////////////////////////////////////////////////
2002 // 0x7100 Entry 53 (size 16 bundles) Reserved
2003 DBG_FAULT(53)
2004 FAULT(53)
2006 .org ia64_ivt+0x7200
2007 /////////////////////////////////////////////////////////////////////////////////////////
2008 // 0x7200 Entry 54 (size 16 bundles) Reserved
2009 DBG_FAULT(54)
2010 FAULT(54)
2012 .org ia64_ivt+0x7300
2013 /////////////////////////////////////////////////////////////////////////////////////////
2014 // 0x7300 Entry 55 (size 16 bundles) Reserved
2015 DBG_FAULT(55)
2016 FAULT(55)
2018 .org ia64_ivt+0x7400
2019 /////////////////////////////////////////////////////////////////////////////////////////
2020 // 0x7400 Entry 56 (size 16 bundles) Reserved
2021 DBG_FAULT(56)
2022 FAULT(56)
2024 .org ia64_ivt+0x7500
2025 /////////////////////////////////////////////////////////////////////////////////////////
2026 // 0x7500 Entry 57 (size 16 bundles) Reserved
2027 DBG_FAULT(57)
2028 FAULT(57)
2030 .org ia64_ivt+0x7600
2031 /////////////////////////////////////////////////////////////////////////////////////////
2032 // 0x7600 Entry 58 (size 16 bundles) Reserved
2033 DBG_FAULT(58)
2034 FAULT(58)
2036 .org ia64_ivt+0x7700
2037 /////////////////////////////////////////////////////////////////////////////////////////
2038 // 0x7700 Entry 59 (size 16 bundles) Reserved
2039 DBG_FAULT(59)
2040 FAULT(59)
2042 .org ia64_ivt+0x7800
2043 /////////////////////////////////////////////////////////////////////////////////////////
2044 // 0x7800 Entry 60 (size 16 bundles) Reserved
2045 DBG_FAULT(60)
2046 FAULT(60)
2048 .org ia64_ivt+0x7900
2049 /////////////////////////////////////////////////////////////////////////////////////////
2050 // 0x7900 Entry 61 (size 16 bundles) Reserved
2051 DBG_FAULT(61)
2052 FAULT(61)
2054 .org ia64_ivt+0x7a00
2055 /////////////////////////////////////////////////////////////////////////////////////////
2056 // 0x7a00 Entry 62 (size 16 bundles) Reserved
2057 DBG_FAULT(62)
2058 FAULT(62)
2060 .org ia64_ivt+0x7b00
2061 /////////////////////////////////////////////////////////////////////////////////////////
2062 // 0x7b00 Entry 63 (size 16 bundles) Reserved
2063 DBG_FAULT(63)
2064 FAULT(63)
2066 .org ia64_ivt+0x7c00
2067 /////////////////////////////////////////////////////////////////////////////////////////
2068 // 0x7c00 Entry 64 (size 16 bundles) Reserved
2069 DBG_FAULT(64)
2070 FAULT(64)
2072 .org ia64_ivt+0x7d00
2073 /////////////////////////////////////////////////////////////////////////////////////////
2074 // 0x7d00 Entry 65 (size 16 bundles) Reserved
2075 DBG_FAULT(65)
2076 FAULT(65)
2078 .org ia64_ivt+0x7e00
2079 /////////////////////////////////////////////////////////////////////////////////////////
2080 // 0x7e00 Entry 66 (size 16 bundles) Reserved
2081 DBG_FAULT(66)
2082 FAULT(66)
2084 .org ia64_ivt+0x7f00
2085 /////////////////////////////////////////////////////////////////////////////////////////
2086 // 0x7f00 Entry 67 (size 16 bundles) Reserved
2087 DBG_FAULT(67)
2088 FAULT(67)
2090 #ifdef XEN
2091 .org ia64_ivt+0x8000
2092 GLOBAL_ENTRY(dispatch_reflection)
2093 /*
2094 * Input:
2095 * psr.ic: off
2096 * r19: intr type (offset into ivt, see ia64_int.h)
2097 * r31: contains saved predicates (pr)
2098 */
2099 SAVE_MIN_WITH_COVER_R19
2100 alloc r14=ar.pfs,0,0,5,0
2101 mov out4=r15
2102 mov out0=cr.ifa
2103 adds out1=16,sp
2104 mov out2=cr.isr
2105 mov out3=cr.iim
2106 // mov out3=cr.itir
2108 ssm psr.ic | PSR_DEFAULT_BITS
2109 ;;
2110 srlz.i // guarantee that interruption collection is on
2111 ;;
2112 (p15) ssm psr.i // restore psr.i
2113 adds r3=8,r2 // set up second base pointer
2114 ;;
2115 SAVE_REST
2116 movl r14=ia64_leave_kernel
2117 ;;
2118 mov rp=r14
2119 // br.sptk.many ia64_prepare_handle_reflection
2120 br.call.sptk.many b6=ia64_handle_reflection
2121 END(dispatch_reflection)
2123 #define SAVE_MIN_COVER_DONE DO_SAVE_MIN(,mov r30=cr.ifs,)
2125 // same as dispatch_break_fault except cover has already been done
2126 GLOBAL_ENTRY(dispatch_slow_hyperprivop)
2127 SAVE_MIN_COVER_DONE
2128 ;;
2129 br.sptk.many dispatch_break_fault_post_save
2130 END(dispatch_slow_hyperprivop)
2131 #endif
2133 #ifdef CONFIG_IA32_SUPPORT
2135 /*
2136 * There is no particular reason for this code to be here, other than that
2137 * there happens to be space here that would go unused otherwise. If this
2138 * fault ever gets "unreserved", simply moved the following code to a more
2139 * suitable spot...
2140 */
2142 // IA32 interrupt entry point
2144 ENTRY(dispatch_to_ia32_handler)
2145 SAVE_MIN
2146 ;;
2147 mov r14=cr.isr
2148 ssm psr.ic | PSR_DEFAULT_BITS
2149 ;;
2150 srlz.i // guarantee that interruption collection is on
2151 ;;
2152 (p15) ssm psr.i
2153 adds r3=8,r2 // Base pointer for SAVE_REST
2154 ;;
2155 SAVE_REST
2156 ;;
2157 mov r15=0x80
2158 shr r14=r14,16 // Get interrupt number
2159 ;;
2160 cmp.ne p6,p0=r14,r15
2161 (p6) br.call.dpnt.many b6=non_ia32_syscall
2163 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
2164 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
2165 ;;
2166 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
2167 ld8 r8=[r14] // get r8
2168 ;;
2169 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
2170 ;;
2171 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
2172 ;;
2173 ld4 r8=[r14],8 // r8 == eax (syscall number)
2174 mov r15=IA32_NR_syscalls
2175 ;;
2176 cmp.ltu.unc p6,p7=r8,r15
2177 ld4 out1=[r14],8 // r9 == ecx
2178 ;;
2179 ld4 out2=[r14],8 // r10 == edx
2180 ;;
2181 ld4 out0=[r14] // r11 == ebx
2182 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
2183 ;;
2184 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
2185 ;;
2186 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
2187 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
2188 ;;
2189 ld4 out4=[r14] // r15 == edi
2190 movl r16=ia32_syscall_table
2191 ;;
2192 (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
2193 ld4 r2=[r2] // r2 = current_thread_info()->flags
2194 ;;
2195 ld8 r16=[r16]
2196 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
2197 ;;
2198 mov b6=r16
2199 movl r15=ia32_ret_from_syscall
2200 cmp.eq p8,p0=r2,r0
2201 ;;
2202 mov rp=r15
2203 (p8) br.call.sptk.many b6=b6
2204 br.cond.sptk ia32_trace_syscall
2206 non_ia32_syscall:
2207 alloc r15=ar.pfs,0,0,2,0
2208 mov out0=r14 // interrupt #
2209 add out1=16,sp // pointer to pt_regs
2210 ;; // avoid WAW on CFM
2211 br.call.sptk.many rp=ia32_bad_interrupt
2212 .ret1: movl r15=ia64_leave_kernel
2213 ;;
2214 mov rp=r15
2215 br.ret.sptk.many rp
2216 END(dispatch_to_ia32_handler)
2218 #endif /* CONFIG_IA32_SUPPORT */