ia64/xen-unstable

view xen/arch/ia64/xen/ivt.S @ 9768:63af1c14fa18

[IA64] missed chunk of Kevin's hypercall cleanup patch

Missed this chunk of Kevin's patch when merging with dom0vp changes

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 25 22:30:07 2006 -0600 (2006-04-25)
parents 5ee12273119c
children c3506e73b63e
line source
2 #ifdef XEN
3 //#define CONFIG_DISABLE_VHPT // FIXME: change when VHPT is enabled??
4 // these are all hacked out for now as the entire IVT
5 // will eventually be replaced... just want to use it
6 // for startup code to handle TLB misses
7 //#define ia64_leave_kernel 0
8 //#define ia64_ret_from_syscall 0
9 //#define ia64_handle_irq 0
10 //#define ia64_fault 0
11 #define ia64_illegal_op_fault 0
12 #define ia64_prepare_handle_unaligned 0
13 #define ia64_bad_break 0
14 #define ia64_trace_syscall 0
15 #define sys_call_table 0
16 #define sys_ni_syscall 0
17 #include <asm/vhpt.h>
18 #include <asm/debugger.h>
19 #endif
20 /*
21 * arch/ia64/kernel/ivt.S
22 *
23 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
24 * Stephane Eranian <eranian@hpl.hp.com>
25 * David Mosberger <davidm@hpl.hp.com>
26 * Copyright (C) 2000, 2002-2003 Intel Co
27 * Asit Mallick <asit.k.mallick@intel.com>
28 * Suresh Siddha <suresh.b.siddha@intel.com>
29 * Kenneth Chen <kenneth.w.chen@intel.com>
30 * Fenghua Yu <fenghua.yu@intel.com>
31 *
32 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
33 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
34 */
35 /*
36 * This file defines the interruption vector table used by the CPU.
37 * It does not include one entry per possible cause of interruption.
38 *
39 * The first 20 entries of the table contain 64 bundles each while the
40 * remaining 48 entries contain only 16 bundles each.
41 *
42 * The 64 bundles are used to allow inlining the whole handler for critical
43 * interruptions like TLB misses.
44 *
45 * For each entry, the comment is as follows:
46 *
47 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
48 * entry offset ----/ / / / /
49 * entry number ---------/ / / /
50 * size of the entry -------------/ / /
51 * vector name -------------------------------------/ /
52 * interruptions triggering this vector ----------------------/
53 *
54 * The table is 32KB in size and must be aligned on 32KB boundary.
55 * (The CPU ignores the 15 lower bits of the address)
56 *
57 * Table is based upon EAS2.6 (Oct 1999)
58 */
60 #include <linux/config.h>
62 #include <asm/asmmacro.h>
63 #include <asm/break.h>
64 #include <asm/ia32.h>
65 #include <asm/kregs.h>
66 #include <asm/offsets.h>
67 #include <asm/pgtable.h>
68 #include <asm/processor.h>
69 #include <asm/ptrace.h>
70 #include <asm/system.h>
71 #include <asm/thread_info.h>
72 #include <asm/unistd.h>
73 #ifdef XEN
74 #include <xen/errno.h>
75 #else
76 #include <asm/errno.h>
77 #endif
79 #if 1
80 # define PSR_DEFAULT_BITS psr.ac
81 #else
82 # define PSR_DEFAULT_BITS 0
83 #endif
85 #if 0
86 /*
87 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
88 * needed for something else before enabling this...
89 */
90 # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
91 #else
92 # define DBG_FAULT(i)
93 #endif
95 #define MINSTATE_VIRT /* needed by minstate.h */
96 #include "minstate.h"
98 #define FAULT(n) \
99 mov r31=pr; \
100 mov r19=n;; /* prepare to save predicates */ \
101 br.sptk.many dispatch_to_fault_handler
103 #ifdef XEN
104 #define REFLECT(n) \
105 mov r31=pr; \
106 mov r19=n;; /* prepare to save predicates */ \
107 br.sptk.many dispatch_reflection
108 #endif
110 .section .text.ivt,"ax"
112 .align 32768 // align on 32KB boundary
113 .global ia64_ivt
114 ia64_ivt:
115 /////////////////////////////////////////////////////////////////////////////////////////
116 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
117 ENTRY(vhpt_miss)
118 DBG_FAULT(0)
119 #ifdef XEN
120 FAULT(0)
121 #else
122 /*
123 * The VHPT vector is invoked when the TLB entry for the virtual page table
124 * is missing. This happens only as a result of a previous
125 * (the "original") TLB miss, which may either be caused by an instruction
126 * fetch or a data access (or non-access).
127 *
128 * What we do here is normal TLB miss handing for the _original_ miss, followed
129 * by inserting the TLB entry for the virtual page table page that the VHPT
130 * walker was attempting to access. The latter gets inserted as long
131 * as both L1 and L2 have valid mappings for the faulting address.
132 * The TLB entry for the original miss gets inserted only if
133 * the L3 entry indicates that the page is present.
134 *
135 * do_page_fault gets invoked in the following cases:
136 * - the faulting virtual address uses unimplemented address bits
137 * - the faulting virtual address has no L1, L2, or L3 mapping
138 */
139 mov r16=cr.ifa // get address that caused the TLB miss
140 #ifdef CONFIG_HUGETLB_PAGE
141 movl r18=PAGE_SHIFT
142 mov r25=cr.itir
143 #endif
144 ;;
145 rsm psr.dt // use physical addressing for data
146 mov r31=pr // save the predicate registers
147 mov r19=IA64_KR(PT_BASE) // get page table base address
148 shl r21=r16,3 // shift bit 60 into sign bit
149 shr.u r17=r16,61 // get the region number into r17
150 ;;
151 shr r22=r21,3
152 #ifdef CONFIG_HUGETLB_PAGE
153 extr.u r26=r25,2,6
154 ;;
155 cmp.ne p8,p0=r18,r26
156 sub r27=r26,r18
157 ;;
158 (p8) dep r25=r18,r25,2,6
159 (p8) shr r22=r22,r27
160 #endif
161 ;;
162 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
163 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
164 ;;
165 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
167 srlz.d
168 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
170 .pred.rel "mutex", p6, p7
171 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
172 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
173 ;;
174 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
175 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
176 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
177 shr.u r18=r22,PMD_SHIFT // shift L2 index into position
178 ;;
179 ld8 r17=[r17] // fetch the L1 entry (may be 0)
180 ;;
181 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
182 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
183 ;;
184 (p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
185 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
186 ;;
187 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
188 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
189 ;;
190 (p7) ld8 r18=[r21] // read the L3 PTE
191 mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss
192 ;;
193 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
194 mov r22=cr.iha // get the VHPT address that caused the TLB miss
195 ;; // avoid RAW on p7
196 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
197 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
198 ;;
199 (p10) itc.i r18 // insert the instruction TLB entry
200 (p11) itc.d r18 // insert the data TLB entry
201 (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
202 mov cr.ifa=r22
204 #ifdef CONFIG_HUGETLB_PAGE
205 (p8) mov cr.itir=r25 // change to default page-size for VHPT
206 #endif
208 /*
209 * Now compute and insert the TLB entry for the virtual page table. We never
210 * execute in a page table page so there is no need to set the exception deferral
211 * bit.
212 */
213 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
214 ;;
215 (p7) itc.d r24
216 ;;
217 #ifdef CONFIG_SMP
218 /*
219 * Tell the assemblers dependency-violation checker that the above "itc" instructions
220 * cannot possibly affect the following loads:
221 */
222 dv_serialize_data
224 /*
225 * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g
226 * between reading the pagetable and the "itc". If so, flush the entry we
227 * inserted and retry.
228 */
229 ld8 r25=[r21] // read L3 PTE again
230 ld8 r26=[r17] // read L2 entry again
231 ;;
232 cmp.ne p6,p7=r26,r20 // did L2 entry change
233 mov r27=PAGE_SHIFT<<2
234 ;;
235 (p6) ptc.l r22,r27 // purge PTE page translation
236 (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change
237 ;;
238 (p6) ptc.l r16,r27 // purge translation
239 #endif
241 mov pr=r31,-1 // restore predicate registers
242 rfi
243 #endif
244 END(vhpt_miss)
246 .org ia64_ivt+0x400
247 /////////////////////////////////////////////////////////////////////////////////////////
248 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
249 ENTRY(itlb_miss)
250 DBG_FAULT(1)
251 #ifdef XEN
252 mov r31 = pr
253 mov r16 = cr.ifa
254 ;;
255 extr.u r17=r16,59,5
256 ;;
257 /* If address belongs to VMM, go to alt tlb handler */
258 cmp.eq p6,p0=0x1e,r17
259 (p6) br.cond.spnt late_alt_itlb_miss
260 ;;
261 cmp.eq p6,p0=0x1d,r17
262 (p6) br.cond.spnt late_alt_itlb_miss
263 ;;
264 mov pr = r31, 0x1ffff
265 ;;
266 #ifdef VHPT_GLOBAL
267 br.cond.sptk fast_tlb_miss_reflect
268 ;;
269 #endif
270 #endif
271 /*
272 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
273 * page table. If a nested TLB miss occurs, we switch into physical
274 * mode, walk the page table, and then re-execute the L3 PTE read
275 * and go on normally after that.
276 */
277 mov r16=cr.ifa // get virtual address
278 mov r29=b0 // save b0
279 mov r31=pr // save predicates
280 .itlb_fault:
281 mov r17=cr.iha // get virtual address of L3 PTE
282 movl r30=1f // load nested fault continuation point
283 ;;
284 1: ld8 r18=[r17] // read L3 PTE
285 ;;
286 mov b0=r29
287 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
288 (p6) br.cond.spnt page_fault
289 ;;
290 itc.i r18
291 ;;
292 #ifdef CONFIG_SMP
293 /*
294 * Tell the assemblers dependency-violation checker that the above "itc" instructions
295 * cannot possibly affect the following loads:
296 */
297 dv_serialize_data
299 ld8 r19=[r17] // read L3 PTE again and see if same
300 mov r20=PAGE_SHIFT<<2 // setup page size for purge
301 ;;
302 cmp.ne p7,p0=r18,r19
303 ;;
304 (p7) ptc.l r16,r20
305 #endif
306 mov pr=r31,-1
307 rfi
308 END(itlb_miss)
310 .org ia64_ivt+0x0800
311 /////////////////////////////////////////////////////////////////////////////////////////
312 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
313 ENTRY(dtlb_miss)
314 DBG_FAULT(2)
315 #ifdef XEN
316 mov r31=pr
317 mov r16=cr.ifa // get virtual address
318 ;;
319 extr.u r17=r16,59,5
320 ;;
321 /* If address belongs to VMM, go to alt tlb handler */
322 cmp.eq p6,p0=0x1e,r17
323 (p6) br.cond.spnt late_alt_dtlb_miss
324 ;;
325 cmp.eq p6,p0=0x1d,r17
326 (p6) br.cond.spnt late_alt_dtlb_miss
327 ;;
328 #if VHPT_ENABLED
329 // XXX TODO optimization
330 mov r30=cr.ipsr
331 mov r28=cr.iip
332 mov r17=cr.isr
333 ;;
335 extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2 // extract psr.cpl
336 ;;
337 cmp.ne p6, p0 = r0, r18 // cpl == 0?
338 (p6) br.cond.sptk 2f
340 // is speculation bit on?
341 tbit.nz p7,p0=r17,IA64_ISR_SP_BIT
342 ;;
343 (p7) br.cond.spnt 2f
345 // Is the faulted iip in vmm area?
346 // check [59:58] bit
347 // 00, 11: guest
348 // 01, 10: vmm
349 extr.u r19 = r28, 58, 2
350 ;;
351 cmp.eq p10, p0 = 0x0, r19
352 (p10) br.cond.sptk 2f
353 cmp.eq p11, p0 = 0x3, r19
354 (p11) br.cond.sptk 2f
356 // Is the faulted address is in the identity mapping area?
357 // 0xf000... or 0xe8000...
358 extr.u r20 = r16, 59, 5
359 ;;
360 cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
361 (p12) br.cond.spnt 1f
362 cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
363 (p13) br.cond.sptk 2f
365 1:
366 // xen identity mappin area.
367 movl r24=PAGE_KERNEL
368 movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
369 ;;
370 shr.u r26=r16,55 // move address bit 59 to bit 4
371 and r25=r25,r16 // clear ed, reserved bits, and PTE control bits
372 ;;
373 and r26=0x10,r26 // bit 4=address-bit(59)
374 ;;
375 or r25=r25,r24 // insert PTE control bits into r25
376 ;;
377 or r25=r25,r26 // set bit 4 (uncached) if the access was to region 6
378 ;;
379 itc.d r25 // insert the TLB entry
380 mov pr=r31,-1
381 rfi
383 2:
384 #endif
385 #ifdef VHPT_GLOBAL
386 // br.cond.sptk page_fault
387 br.cond.sptk fast_tlb_miss_reflect
388 ;;
389 #endif
390 mov r29=b0 // save b0
391 #else
392 /*
393 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
394 * page table. If a nested TLB miss occurs, we switch into physical
395 * mode, walk the page table, and then re-execute the L3 PTE read
396 * and go on normally after that.
397 */
398 mov r16=cr.ifa // get virtual address
399 mov r29=b0 // save b0
400 mov r31=pr // save predicates
401 #endif
402 dtlb_fault:
403 mov r17=cr.iha // get virtual address of L3 PTE
404 movl r30=1f // load nested fault continuation point
405 ;;
406 1: ld8 r18=[r17] // read L3 PTE
407 ;;
408 mov b0=r29
409 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
410 (p6) br.cond.spnt page_fault
411 ;;
412 itc.d r18
413 ;;
414 #ifdef CONFIG_SMP
415 /*
416 * Tell the assemblers dependency-violation checker that the above "itc" instructions
417 * cannot possibly affect the following loads:
418 */
419 dv_serialize_data
421 ld8 r19=[r17] // read L3 PTE again and see if same
422 mov r20=PAGE_SHIFT<<2 // setup page size for purge
423 ;;
424 cmp.ne p7,p0=r18,r19
425 ;;
426 (p7) ptc.l r16,r20
427 #endif
428 mov pr=r31,-1
429 rfi
430 END(dtlb_miss)
432 .org ia64_ivt+0x0c00
433 /////////////////////////////////////////////////////////////////////////////////////////
434 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
435 ENTRY(alt_itlb_miss)
436 DBG_FAULT(3)
437 #ifdef XEN
438 mov r31=pr
439 mov r16=cr.ifa // get address that caused the TLB miss
440 ;;
441 late_alt_itlb_miss:
442 movl r17=PAGE_KERNEL
443 mov r21=cr.ipsr
444 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
445 ;;
446 #else
447 mov r16=cr.ifa // get address that caused the TLB miss
448 movl r17=PAGE_KERNEL
449 mov r21=cr.ipsr
450 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
451 mov r31=pr
452 ;;
453 #endif
454 #ifdef CONFIG_DISABLE_VHPT
455 shr.u r22=r16,61 // get the region number into r21
456 ;;
457 cmp.gt p8,p0=6,r22 // user mode
458 ;;
459 (p8) thash r17=r16
460 ;;
461 (p8) mov cr.iha=r17
462 (p8) mov r29=b0 // save b0
463 (p8) br.cond.dptk .itlb_fault
464 #endif
465 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
466 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
467 #ifdef XEN
468 shr.u r18=r16,55 // move address bit 59 to bit 4
469 ;;
470 and r18=0x10,r18 // bit 4=address-bit(59)
471 #else
472 shr.u r18=r16,57 // move address bit 61 to bit 4
473 ;;
474 andcm r18=0x10,r18 // bit 4=~address-bit(61)
475 #endif
476 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
477 or r19=r17,r19 // insert PTE control bits into r19
478 ;;
479 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
480 (p8) br.cond.spnt page_fault
481 ;;
482 itc.i r19 // insert the TLB entry
483 mov pr=r31,-1
484 rfi
485 END(alt_itlb_miss)
487 .org ia64_ivt+0x1000
488 /////////////////////////////////////////////////////////////////////////////////////////
489 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
490 ENTRY(alt_dtlb_miss)
491 DBG_FAULT(4)
492 #ifdef XEN
493 mov r31=pr
494 mov r16=cr.ifa // get address that caused the TLB miss
495 ;;
496 late_alt_dtlb_miss:
497 movl r17=PAGE_KERNEL
498 mov r20=cr.isr
499 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
500 mov r21=cr.ipsr
501 ;;
502 #else
503 #endif
504 #ifdef CONFIG_DISABLE_VHPT
505 shr.u r22=r16,61 // get the region number into r21
506 ;;
507 cmp.gt p8,p0=6,r22 // access to region 0-5
508 ;;
509 (p8) thash r17=r16
510 ;;
511 (p8) mov cr.iha=r17
512 (p8) mov r29=b0 // save b0
513 (p8) br.cond.dptk dtlb_fault
514 #endif
515 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
516 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
517 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
518 #ifdef XEN
519 shr.u r18=r16,55 // move address bit 59 to bit 4
520 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
521 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
522 ;;
523 and r18=0x10,r18 // bit 4=address-bit(59)
524 #else
525 shr.u r18=r16,57 // move address bit 61 to bit 4
526 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
527 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
528 ;;
529 andcm r18=0x10,r18 // bit 4=~address-bit(61)
530 #endif
531 cmp.ne p8,p0=r0,r23
532 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
533 (p8) br.cond.spnt page_fault
534 #ifdef XEN
535 ;;
536 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
537 // Test for the address of virtual frame_table
538 shr r22=r16,56;;
539 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
540 (p8) br.cond.sptk frametable_miss ;;
541 #endif
542 // Test for Xen address, if not handle via page_fault
543 // note that 0xf000 (cached) and 0xe800 (uncached) addresses
544 // should be OK.
545 extr.u r22=r16,59,5;;
546 cmp.eq p8,p0=0x1e,r22
547 (p8) br.cond.spnt 1f;;
548 cmp.ne p8,p0=0x1d,r22
549 (p8) br.cond.sptk page_fault ;;
550 1:
551 #endif
553 dep r21=-1,r21,IA64_PSR_ED_BIT,1
554 or r19=r19,r17 // insert PTE control bits into r19
555 ;;
556 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
557 (p6) mov cr.ipsr=r21
558 ;;
559 (p7) itc.d r19 // insert the TLB entry
560 mov pr=r31,-1
561 rfi
562 END(alt_dtlb_miss)
563 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
564 GLOBAL_ENTRY(frametable_miss)
565 rsm psr.dt // switch to using physical data addressing
566 movl r24=(frametable_pg_dir-PAGE_OFFSET) // r24=__pa(frametable_pg_dir)
567 ;;
568 srlz.d
569 extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3)
570 ;;
571 shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)]
572 ;;
573 ld8 r24=[r24] // r24=pgd[pgd_offset(addr)]
574 extr.u r18=r16,PMD_SHIFT,(PAGE_SHIFT-3) // r18=pmd_offset
575 ;;
576 cmp.eq p6,p7=0,r24 // pgd present?
577 shladd r24=r18,3,r24 // r24=&pmd[pmd_offset(addr)]
578 ;;
579 (p7) ld8 r24=[r24] // r24=pmd[pmd_offset(addr)]
580 extr.u r19=r16,PAGE_SHIFT,(PAGE_SHIFT-3)// r19=pte_offset
581 (p6) br.spnt.few frametable_fault
582 ;;
583 cmp.eq p6,p7=0,r24 // pmd present?
584 shladd r24=r19,3,r24 // r24=&pte[pte_offset(addr)]
585 ;;
586 (p7) ld8 r24=[r24] // r24=pte[pte_offset(addr)]
587 mov r25=0x700|(_PAGE_SIZE_16K<<2) // key=7
588 (p6) br.spnt.few frametable_fault
589 ;;
590 mov cr.itir=r25
591 ssm psr.dt // switch to using virtual data addressing
592 tbit.z p6,p7=r24,_PAGE_P_BIT // pte present?
593 ;;
594 (p7) itc.d r24 // install updated PTE
595 (p6) br.spnt.few frametable_fault // page present bit cleared?
596 ;;
597 mov pr=r31,-1 // restore predicate registers
598 rfi
599 END(frametable_miss)
600 ENTRY(frametable_fault)
601 ssm psr.dt // switch to using virtual data addressing
602 mov r18=cr.iip
603 movl r19=ia64_frametable_probe
604 ;;
605 cmp.eq p6,p7=r18,r19 // is faulting addrress ia64_frametable_probe?
606 mov r8=0 // assumes that 'probe.r' uses r8
607 dep r21=-1,r21,IA64_PSR_RI_BIT+1,1 // return to next instrucition in bundle 2
608 ;;
609 (p6) mov cr.ipsr=r21
610 mov r19=4 // FAULT(4)
611 (p7) br.spnt.few dispatch_to_fault_handler
612 ;;
613 mov pr=r31,-1
614 rfi
615 END(frametable_fault)
616 GLOBAL_ENTRY(ia64_frametable_probe)
617 probe.r r8=r32,0 // destination register must be r8
618 nop.f 0x0
619 br.ret.sptk.many b0 // this instruction must be in bundle 2
620 END(ia64_frametable_probe)
621 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
623 .org ia64_ivt+0x1400
624 /////////////////////////////////////////////////////////////////////////////////////////
625 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
626 ENTRY(nested_dtlb_miss)
627 DBG_FAULT(5)
628 #ifdef XEN
629 mov b0=r30
630 br.sptk.many b0 // return to continuation point
631 ;;
632 #else
633 /*
634 * In the absence of kernel bugs, we get here when the virtually mapped linear
635 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
636 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
637 * table is missing, a nested TLB miss fault is triggered and control is
638 * transferred to this point. When this happens, we lookup the pte for the
639 * faulting address by walking the page table in physical mode and return to the
640 * continuation point passed in register r30 (or call page_fault if the address is
641 * not mapped).
642 *
643 * Input: r16: faulting address
644 * r29: saved b0
645 * r30: continuation address
646 * r31: saved pr
647 *
648 * Output: r17: physical address of L3 PTE of faulting address
649 * r29: saved b0
650 * r30: continuation address
651 * r31: saved pr
652 *
653 * Clobbered: b0, r18, r19, r21, psr.dt (cleared)
654 */
655 rsm psr.dt // switch to using physical data addressing
656 mov r19=IA64_KR(PT_BASE) // get the page table base address
657 shl r21=r16,3 // shift bit 60 into sign bit
658 ;;
659 shr.u r17=r16,61 // get the region number into r17
660 ;;
661 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
662 shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
663 ;;
664 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
666 srlz.d
667 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
669 .pred.rel "mutex", p6, p7
670 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
671 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
672 ;;
673 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
674 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
675 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
676 shr.u r18=r16,PMD_SHIFT // shift L2 index into position
677 ;;
678 ld8 r17=[r17] // fetch the L1 entry (may be 0)
679 ;;
680 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
681 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
682 ;;
683 (p7) ld8 r17=[r17] // fetch the L2 entry (may be 0)
684 shr.u r19=r16,PAGE_SHIFT // shift L3 index into position
685 ;;
686 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL?
687 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
688 (p6) br.cond.spnt page_fault
689 mov b0=r30
690 br.sptk.many b0 // return to continuation point
691 #endif
692 END(nested_dtlb_miss)
694 .org ia64_ivt+0x1800
695 /////////////////////////////////////////////////////////////////////////////////////////
696 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
697 ENTRY(ikey_miss)
698 DBG_FAULT(6)
699 #ifdef XEN
700 REFLECT(6)
701 #endif
702 FAULT(6)
703 END(ikey_miss)
705 //-----------------------------------------------------------------------------------
706 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
707 #ifdef XEN
708 GLOBAL_ENTRY(page_fault)
709 #else
710 ENTRY(page_fault)
711 #endif
712 ssm psr.dt
713 ;;
714 srlz.i
715 ;;
716 SAVE_MIN_WITH_COVER
717 #ifdef XEN
718 alloc r15=ar.pfs,0,0,4,0
719 mov out0=cr.ifa
720 mov out1=cr.isr
721 mov out3=cr.itir
722 #else
723 alloc r15=ar.pfs,0,0,3,0
724 mov out0=cr.ifa
725 mov out1=cr.isr
726 #endif
727 adds r3=8,r2 // set up second base pointer
728 ;;
729 ssm psr.ic | PSR_DEFAULT_BITS
730 ;;
731 srlz.i // guarantee that interruption collectin is on
732 ;;
733 (p15) ssm psr.i // restore psr.i
734 movl r14=ia64_leave_kernel
735 ;;
736 SAVE_REST
737 mov rp=r14
738 ;;
739 adds out2=16,r12 // out2 = pointer to pt_regs
740 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
741 END(page_fault)
743 .org ia64_ivt+0x1c00
744 /////////////////////////////////////////////////////////////////////////////////////////
745 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
746 ENTRY(dkey_miss)
747 DBG_FAULT(7)
748 #ifdef XEN
749 REFLECT(7)
750 #endif
751 FAULT(7)
752 END(dkey_miss)
754 .org ia64_ivt+0x2000
755 /////////////////////////////////////////////////////////////////////////////////////////
756 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
757 ENTRY(dirty_bit)
758 DBG_FAULT(8)
759 #ifdef XEN
760 REFLECT(8)
761 #endif
762 /*
763 * What we do here is to simply turn on the dirty bit in the PTE. We need to
764 * update both the page-table and the TLB entry. To efficiently access the PTE,
765 * we address it through the virtual page table. Most likely, the TLB entry for
766 * the relevant virtual page table page is still present in the TLB so we can
767 * normally do this without additional TLB misses. In case the necessary virtual
768 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
769 * up the physical address of the L3 PTE and then continue at label 1 below.
770 */
771 mov r16=cr.ifa // get the address that caused the fault
772 movl r30=1f // load continuation point in case of nested fault
773 ;;
774 thash r17=r16 // compute virtual address of L3 PTE
775 mov r29=b0 // save b0 in case of nested fault
776 mov r31=pr // save pr
777 #ifdef CONFIG_SMP
778 mov r28=ar.ccv // save ar.ccv
779 ;;
780 1: ld8 r18=[r17]
781 ;; // avoid RAW on r18
782 mov ar.ccv=r18 // set compare value for cmpxchg
783 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
784 ;;
785 cmpxchg8.acq r26=[r17],r25,ar.ccv
786 mov r24=PAGE_SHIFT<<2
787 ;;
788 cmp.eq p6,p7=r26,r18
789 ;;
790 (p6) itc.d r25 // install updated PTE
791 ;;
792 /*
793 * Tell the assemblers dependency-violation checker that the above "itc" instructions
794 * cannot possibly affect the following loads:
795 */
796 dv_serialize_data
798 ld8 r18=[r17] // read PTE again
799 ;;
800 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
801 ;;
802 (p7) ptc.l r16,r24
803 mov b0=r29 // restore b0
804 mov ar.ccv=r28
805 #else
806 ;;
807 1: ld8 r18=[r17]
808 ;; // avoid RAW on r18
809 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
810 mov b0=r29 // restore b0
811 ;;
812 st8 [r17]=r18 // store back updated PTE
813 itc.d r18 // install updated PTE
814 #endif
815 mov pr=r31,-1 // restore pr
816 rfi
817 END(dirty_bit)
819 .org ia64_ivt+0x2400
820 /////////////////////////////////////////////////////////////////////////////////////////
821 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
822 ENTRY(iaccess_bit)
823 DBG_FAULT(9)
824 #ifdef XEN
825 mov r31=pr;
826 mov r16=cr.isr
827 mov r17=cr.ifa
828 mov r19=9
829 movl r20=0x2400
830 br.sptk.many fast_access_reflect;;
831 #endif
832 // Like Entry 8, except for instruction access
833 mov r16=cr.ifa // get the address that caused the fault
834 movl r30=1f // load continuation point in case of nested fault
835 mov r31=pr // save predicates
836 #ifdef CONFIG_ITANIUM
837 /*
838 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
839 */
840 mov r17=cr.ipsr
841 ;;
842 mov r18=cr.iip
843 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
844 ;;
845 (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
846 #endif /* CONFIG_ITANIUM */
847 ;;
848 thash r17=r16 // compute virtual address of L3 PTE
849 mov r29=b0 // save b0 in case of nested fault)
850 #ifdef CONFIG_SMP
851 mov r28=ar.ccv // save ar.ccv
852 ;;
853 1: ld8 r18=[r17]
854 ;;
855 mov ar.ccv=r18 // set compare value for cmpxchg
856 or r25=_PAGE_A,r18 // set the accessed bit
857 ;;
858 cmpxchg8.acq r26=[r17],r25,ar.ccv
859 mov r24=PAGE_SHIFT<<2
860 ;;
861 cmp.eq p6,p7=r26,r18
862 ;;
863 (p6) itc.i r25 // install updated PTE
864 ;;
865 /*
866 * Tell the assemblers dependency-violation checker that the above "itc" instructions
867 * cannot possibly affect the following loads:
868 */
869 dv_serialize_data
871 ld8 r18=[r17] // read PTE again
872 ;;
873 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
874 ;;
875 (p7) ptc.l r16,r24
876 mov b0=r29 // restore b0
877 mov ar.ccv=r28
878 #else /* !CONFIG_SMP */
879 ;;
880 1: ld8 r18=[r17]
881 ;;
882 or r18=_PAGE_A,r18 // set the accessed bit
883 mov b0=r29 // restore b0
884 ;;
885 st8 [r17]=r18 // store back updated PTE
886 itc.i r18 // install updated PTE
887 #endif /* !CONFIG_SMP */
888 mov pr=r31,-1
889 rfi
890 END(iaccess_bit)
892 .org ia64_ivt+0x2800
893 /////////////////////////////////////////////////////////////////////////////////////////
894 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
895 ENTRY(daccess_bit)
896 DBG_FAULT(10)
897 #ifdef XEN
898 mov r31=pr;
899 mov r16=cr.isr
900 mov r17=cr.ifa
901 mov r19=10
902 movl r20=0x2800
903 br.sptk.many fast_access_reflect;;
904 #endif
905 // Like Entry 8, except for data access
906 mov r16=cr.ifa // get the address that caused the fault
907 movl r30=1f // load continuation point in case of nested fault
908 ;;
909 thash r17=r16 // compute virtual address of L3 PTE
910 mov r31=pr
911 mov r29=b0 // save b0 in case of nested fault)
912 #ifdef CONFIG_SMP
913 mov r28=ar.ccv // save ar.ccv
914 ;;
915 1: ld8 r18=[r17]
916 ;; // avoid RAW on r18
917 mov ar.ccv=r18 // set compare value for cmpxchg
918 or r25=_PAGE_A,r18 // set the dirty bit
919 ;;
920 cmpxchg8.acq r26=[r17],r25,ar.ccv
921 mov r24=PAGE_SHIFT<<2
922 ;;
923 cmp.eq p6,p7=r26,r18
924 ;;
925 (p6) itc.d r25 // install updated PTE
926 /*
927 * Tell the assemblers dependency-violation checker that the above "itc" instructions
928 * cannot possibly affect the following loads:
929 */
930 dv_serialize_data
931 ;;
932 ld8 r18=[r17] // read PTE again
933 ;;
934 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
935 ;;
936 (p7) ptc.l r16,r24
937 mov ar.ccv=r28
938 #else
939 ;;
940 1: ld8 r18=[r17]
941 ;; // avoid RAW on r18
942 or r18=_PAGE_A,r18 // set the accessed bit
943 ;;
944 st8 [r17]=r18 // store back updated PTE
945 itc.d r18 // install updated PTE
946 #endif
947 mov b0=r29 // restore b0
948 mov pr=r31,-1
949 rfi
950 END(daccess_bit)
952 .org ia64_ivt+0x2c00
953 /////////////////////////////////////////////////////////////////////////////////////////
954 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
955 ENTRY(break_fault)
956 /*
957 * The streamlined system call entry/exit paths only save/restore the initial part
958 * of pt_regs. This implies that the callers of system-calls must adhere to the
959 * normal procedure calling conventions.
960 *
961 * Registers to be saved & restored:
962 * CR registers: cr.ipsr, cr.iip, cr.ifs
963 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
964 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
965 * Registers to be restored only:
966 * r8-r11: output value from the system call.
967 *
968 * During system call exit, scratch registers (including r15) are modified/cleared
969 * to prevent leaking bits from kernel to user level.
970 */
971 DBG_FAULT(11)
972 #ifdef XEN
973 mov r16=cr.isr
974 mov r17=cr.iim
975 mov r31=pr
976 ;;
977 cmp.eq p7,p0=r17,r0
978 (p7) br.spnt.few dispatch_break_fault ;;
979 #ifdef CRASH_DEBUG
980 // panic can occur before domain0 is created.
981 // in such case referencing XSI_PSR_IC causes nested_dtlb_miss
982 movl r18=CDB_BREAK_NUM ;;
983 cmp.eq p7,p0=r17,r18 ;;
984 (p7) br.spnt.few dispatch_break_fault ;;
985 #endif
986 movl r18=XSI_PSR_IC
987 ;;
988 ld4 r19=[r18]
989 ;;
990 cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
991 (p7) br.spnt.many dispatch_privop_fault
992 ;;
993 // if vpsr.ic is off, we have a hyperprivop
994 // A hyperprivop is hand-coded assembly with psr.ic off
995 // which means no calls, no use of r1-r15 and no memory accesses
996 // except to pinned addresses!
997 cmp4.eq p7,p0=r0,r19
998 (p7) br.sptk.many fast_hyperprivop
999 ;;
1000 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
1001 ld8 r22 = [r22]
1002 ;;
1003 adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22;;
1004 ld4 r23=[r22];;
1005 cmp4.eq p6,p7=r23,r17 // Xen-reserved breakimm?
1006 (p6) br.spnt.many dispatch_break_fault
1007 ;;
1008 br.sptk.many fast_break_reflect
1009 ;;
1010 #endif
1011 movl r16=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
1012 ld8 r16=[r16]
1013 mov r17=cr.iim
1014 mov r18=__IA64_BREAK_SYSCALL
1015 mov r21=ar.fpsr
1016 mov r29=cr.ipsr
1017 mov r19=b6
1018 mov r25=ar.unat
1019 mov r27=ar.rsc
1020 mov r26=ar.pfs
1021 mov r28=cr.iip
1022 #ifndef XEN
1023 mov r31=pr // prepare to save predicates
1024 #endif
1025 mov r20=r1
1026 ;;
1027 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
1028 cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
1029 (p7) br.cond.spnt non_syscall
1030 ;;
1031 ld1 r17=[r16] // load current->thread.on_ustack flag
1032 st1 [r16]=r0 // clear current->thread.on_ustack flag
1033 add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
1034 ;;
1035 invala
1037 /* adjust return address so we skip over the break instruction: */
1039 extr.u r8=r29,41,2 // extract ei field from cr.ipsr
1040 ;;
1041 cmp.eq p6,p7=2,r8 // isr.ei==2?
1042 mov r2=r1 // setup r2 for ia64_syscall_setup
1043 ;;
1044 (p6) mov r8=0 // clear ei to 0
1045 (p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped
1046 (p7) adds r8=1,r8 // increment ei to next slot
1047 ;;
1048 cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
1049 dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
1050 ;;
1052 // switch from user to kernel RBS:
1053 MINSTATE_START_SAVE_MIN_VIRT
1054 br.call.sptk.many b7=ia64_syscall_setup
1055 ;;
1056 MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
1057 ssm psr.ic | PSR_DEFAULT_BITS
1058 ;;
1059 srlz.i // guarantee that interruption collection is on
1060 mov r3=NR_syscalls - 1
1061 ;;
1062 (p15) ssm psr.i // restore psr.i
1063 // p10==true means out registers are more than 8 or r15's Nat is true
1064 (p10) br.cond.spnt.many ia64_ret_from_syscall
1065 ;;
1066 movl r16=sys_call_table
1068 adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
1069 movl r2=ia64_ret_from_syscall
1070 ;;
1071 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
1072 cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ?
1073 mov rp=r2 // set the real return addr
1074 ;;
1075 (p6) ld8 r20=[r20] // load address of syscall entry point
1076 (p7) movl r20=sys_ni_syscall
1078 add r2=TI_FLAGS+IA64_TASK_SIZE,r13
1079 ;;
1080 ld4 r2=[r2] // r2 = current_thread_info()->flags
1081 ;;
1082 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
1083 ;;
1084 cmp.eq p8,p0=r2,r0
1085 mov b6=r20
1086 ;;
1087 (p8) br.call.sptk.many b6=b6 // ignore this return addr
1088 br.cond.sptk ia64_trace_syscall
1089 // NOT REACHED
1090 END(break_fault)
1092 .org ia64_ivt+0x3000
1093 /////////////////////////////////////////////////////////////////////////////////////////
1094 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
1095 ENTRY(interrupt)
1096 DBG_FAULT(12)
1097 mov r31=pr // prepare to save predicates
1098 ;;
1099 #ifdef XEN
1100 mov r30=cr.ivr // pass cr.ivr as first arg
1101 // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
1102 // not used anywhere else and we need a place to stash ivr and
1103 // there's no registers available unused by SAVE_MIN/REST
1104 movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
1105 st8 [r29]=r30;;
1106 movl r28=slow_interrupt;;
1107 mov r29=rp;;
1108 mov rp=r28;;
1109 br.cond.sptk.many fast_tick_reflect
1110 ;;
1111 slow_interrupt:
1112 mov rp=r29;;
1113 #endif
1114 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1115 ssm psr.ic | PSR_DEFAULT_BITS
1116 ;;
1117 adds r3=8,r2 // set up second base pointer for SAVE_REST
1118 srlz.i // ensure everybody knows psr.ic is back on
1119 ;;
1120 SAVE_REST
1121 ;;
1122 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1123 #ifdef XEN
1124 movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
1125 ld8 out0=[out0];;
1126 #else
1127 mov out0=cr.ivr // pass cr.ivr as first arg
1128 #endif
1129 add out1=16,sp // pass pointer to pt_regs as second arg
1130 #ifndef XEN
1131 ;;
1132 srlz.d // make sure we see the effect of cr.ivr
1133 #endif
1134 movl r14=ia64_leave_kernel
1135 ;;
1136 mov rp=r14
1137 br.call.sptk.many b6=ia64_handle_irq
1138 END(interrupt)
1140 .org ia64_ivt+0x3400
1141 /////////////////////////////////////////////////////////////////////////////////////////
1142 // 0x3400 Entry 13 (size 64 bundles) Reserved
1143 DBG_FAULT(13)
1144 FAULT(13)
1146 #ifdef XEN
1147 // There is no particular reason for this code to be here, other than that
1148 // there happens to be space here that would go unused otherwise. If this
1149 // fault ever gets "unreserved", simply moved the following code to a more
1150 // suitable spot...
1152 GLOBAL_ENTRY(dispatch_break_fault)
1153 SAVE_MIN_WITH_COVER
1154 ;;
1155 dispatch_break_fault_post_save:
1156 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1157 mov out0=cr.ifa
1158 adds out1=16,sp
1159 mov out2=cr.isr // FIXME: pity to make this slow access twice
1160 mov out3=cr.iim // FIXME: pity to make this slow access twice
1162 ssm psr.ic | PSR_DEFAULT_BITS
1163 ;;
1164 srlz.i // guarantee that interruption collection is on
1165 ;;
1166 (p15) ssm psr.i // restore psr.i
1167 adds r3=8,r2 // set up second base pointer
1168 ;;
1169 SAVE_REST
1170 movl r14=ia64_leave_kernel
1171 ;;
1172 mov rp=r14
1173 // br.sptk.many ia64_prepare_handle_break
1174 br.call.sptk.many b6=ia64_handle_break
1175 END(dispatch_break_fault)
1176 #endif
1178 .org ia64_ivt+0x3800
1179 /////////////////////////////////////////////////////////////////////////////////////////
1180 // 0x3800 Entry 14 (size 64 bundles) Reserved
1181 DBG_FAULT(14)
1182 FAULT(14)
1184 /*
1185 * There is no particular reason for this code to be here, other than that
1186 * there happens to be space here that would go unused otherwise. If this
1187 * fault ever gets "unreserved", simply moved the following code to a more
1188 * suitable spot...
1190 * ia64_syscall_setup() is a separate subroutine so that it can
1191 * allocate stacked registers so it can safely demine any
1192 * potential NaT values from the input registers.
1194 * On entry:
1195 * - executing on bank 0 or bank 1 register set (doesn't matter)
1196 * - r1: stack pointer
1197 * - r2: current task pointer
1198 * - r3: preserved
1199 * - r11: original contents (saved ar.pfs to be saved)
1200 * - r12: original contents (sp to be saved)
1201 * - r13: original contents (tp to be saved)
1202 * - r15: original contents (syscall # to be saved)
1203 * - r18: saved bsp (after switching to kernel stack)
1204 * - r19: saved b6
1205 * - r20: saved r1 (gp)
1206 * - r21: saved ar.fpsr
1207 * - r22: kernel's register backing store base (krbs_base)
1208 * - r23: saved ar.bspstore
1209 * - r24: saved ar.rnat
1210 * - r25: saved ar.unat
1211 * - r26: saved ar.pfs
1212 * - r27: saved ar.rsc
1213 * - r28: saved cr.iip
1214 * - r29: saved cr.ipsr
1215 * - r31: saved pr
1216 * - b0: original contents (to be saved)
1217 * On exit:
1218 * - executing on bank 1 registers
1219 * - psr.ic enabled, interrupts restored
1220 * - p10: TRUE if syscall is invoked with more than 8 out
1221 * registers or r15's Nat is true
1222 * - r1: kernel's gp
1223 * - r3: preserved (same as on entry)
1224 * - r8: -EINVAL if p10 is true
1225 * - r12: points to kernel stack
1226 * - r13: points to current task
1227 * - p15: TRUE if interrupts need to be re-enabled
1228 * - ar.fpsr: set to kernel settings
1229 */
1230 GLOBAL_ENTRY(ia64_syscall_setup)
1231 #ifndef XEN
1232 #if PT(B6) != 0
1233 # error This code assumes that b6 is the first field in pt_regs.
1234 #endif
1235 #endif
1236 st8 [r1]=r19 // save b6
1237 add r16=PT(CR_IPSR),r1 // initialize first base pointer
1238 add r17=PT(R11),r1 // initialize second base pointer
1239 ;;
1240 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
1241 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
1242 tnat.nz p8,p0=in0
1244 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
1245 tnat.nz p9,p0=in1
1246 (pKStk) mov r18=r0 // make sure r18 isn't NaT
1247 ;;
1249 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
1250 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
1251 mov r28=b0 // save b0 (2 cyc)
1252 ;;
1254 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
1255 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
1256 (p8) mov in0=-1
1257 ;;
1259 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
1260 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
1261 and r8=0x7f,r19 // A // get sof of ar.pfs
1263 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
1264 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
1265 (p9) mov in1=-1
1266 ;;
1268 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
1269 tnat.nz p10,p0=in2
1270 add r11=8,r11
1271 ;;
1272 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
1273 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
1274 tnat.nz p11,p0=in3
1275 ;;
1276 (p10) mov in2=-1
1277 tnat.nz p12,p0=in4 // [I0]
1278 (p11) mov in3=-1
1279 ;;
1280 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
1281 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
1282 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
1283 ;;
1284 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
1285 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
1286 tnat.nz p13,p0=in5 // [I0]
1287 ;;
1288 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
1289 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
1290 (p12) mov in4=-1
1291 ;;
1293 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
1294 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1295 (p13) mov in5=-1
1296 ;;
1297 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1298 tnat.nz p14,p0=in6
1299 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
1300 ;;
1301 stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
1302 (p9) tnat.nz p10,p0=r15
1303 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
1305 st8.spill [r17]=r15 // save r15
1306 tnat.nz p8,p0=in7
1307 nop.i 0
1309 mov r13=r2 // establish `current'
1310 movl r1=__gp // establish kernel global pointer
1311 ;;
1312 (p14) mov in6=-1
1313 (p8) mov in7=-1
1314 nop.i 0
1316 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1317 movl r17=FPSR_DEFAULT
1318 ;;
1319 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1320 (p10) mov r8=-EINVAL
1321 br.ret.sptk.many b7
1322 END(ia64_syscall_setup)
1324 .org ia64_ivt+0x3c00
1325 /////////////////////////////////////////////////////////////////////////////////////////
1326 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1327 DBG_FAULT(15)
1328 FAULT(15)
1330 /*
1331 * Squatting in this space ...
1333 * This special case dispatcher for illegal operation faults allows preserved
1334 * registers to be modified through a callback function (asm only) that is handed
1335 * back from the fault handler in r8. Up to three arguments can be passed to the
1336 * callback function by returning an aggregate with the callback as its first
1337 * element, followed by the arguments.
1338 */
1339 ENTRY(dispatch_illegal_op_fault)
1340 SAVE_MIN_WITH_COVER
1341 ssm psr.ic | PSR_DEFAULT_BITS
1342 ;;
1343 srlz.i // guarantee that interruption collection is on
1344 ;;
1345 (p15) ssm psr.i // restore psr.i
1346 adds r3=8,r2 // set up second base pointer for SAVE_REST
1347 ;;
1348 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1349 mov out0=ar.ec
1350 ;;
1351 SAVE_REST
1352 ;;
1353 br.call.sptk.many rp=ia64_illegal_op_fault
1354 .ret0: ;;
1355 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1356 mov out0=r9
1357 mov out1=r10
1358 mov out2=r11
1359 movl r15=ia64_leave_kernel
1360 ;;
1361 mov rp=r15
1362 mov b6=r8
1363 ;;
1364 cmp.ne p6,p0=0,r8
1365 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1366 br.sptk.many ia64_leave_kernel
1367 END(dispatch_illegal_op_fault)
1369 .org ia64_ivt+0x4000
1370 /////////////////////////////////////////////////////////////////////////////////////////
1371 // 0x4000 Entry 16 (size 64 bundles) Reserved
1372 DBG_FAULT(16)
1373 FAULT(16)
1375 #ifdef XEN
1376 // There is no particular reason for this code to be here, other than that
1377 // there happens to be space here that would go unused otherwise. If this
1378 // fault ever gets "unreserved", simply moved the following code to a more
1379 // suitable spot...
1381 ENTRY(dispatch_privop_fault)
1382 SAVE_MIN_WITH_COVER
1383 ;;
1384 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1385 mov out0=cr.ifa
1386 adds out1=16,sp
1387 mov out2=cr.isr // FIXME: pity to make this slow access twice
1388 mov out3=cr.itir
1390 ssm psr.ic | PSR_DEFAULT_BITS
1391 ;;
1392 srlz.i // guarantee that interruption collection is on
1393 ;;
1394 (p15) ssm psr.i // restore psr.i
1395 adds r3=8,r2 // set up second base pointer
1396 ;;
1397 SAVE_REST
1398 movl r14=ia64_leave_kernel
1399 ;;
1400 mov rp=r14
1401 // br.sptk.many ia64_prepare_handle_privop
1402 br.call.sptk.many b6=ia64_handle_privop
1403 END(dispatch_privop_fault)
1404 #endif
1407 .org ia64_ivt+0x4400
1408 /////////////////////////////////////////////////////////////////////////////////////////
1409 // 0x4400 Entry 17 (size 64 bundles) Reserved
1410 DBG_FAULT(17)
1411 FAULT(17)
1413 ENTRY(non_syscall)
1414 SAVE_MIN_WITH_COVER
1416 // There is no particular reason for this code to be here, other than that
1417 // there happens to be space here that would go unused otherwise. If this
1418 // fault ever gets "unreserved", simply moved the following code to a more
1419 // suitable spot...
1421 alloc r14=ar.pfs,0,0,2,0
1422 mov out0=cr.iim
1423 add out1=16,sp
1424 adds r3=8,r2 // set up second base pointer for SAVE_REST
1426 ssm psr.ic | PSR_DEFAULT_BITS
1427 ;;
1428 srlz.i // guarantee that interruption collection is on
1429 ;;
1430 (p15) ssm psr.i // restore psr.i
1431 movl r15=ia64_leave_kernel
1432 ;;
1433 SAVE_REST
1434 mov rp=r15
1435 ;;
1436 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1437 END(non_syscall)
1439 .org ia64_ivt+0x4800
1440 /////////////////////////////////////////////////////////////////////////////////////////
1441 // 0x4800 Entry 18 (size 64 bundles) Reserved
1442 DBG_FAULT(18)
1443 FAULT(18)
1445 /*
1446 * There is no particular reason for this code to be here, other than that
1447 * there happens to be space here that would go unused otherwise. If this
1448 * fault ever gets "unreserved", simply moved the following code to a more
1449 * suitable spot...
1450 */
1452 ENTRY(dispatch_unaligned_handler)
1453 SAVE_MIN_WITH_COVER
1454 ;;
1455 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1456 mov out0=cr.ifa
1457 adds out1=16,sp
1459 ssm psr.ic | PSR_DEFAULT_BITS
1460 ;;
1461 srlz.i // guarantee that interruption collection is on
1462 ;;
1463 (p15) ssm psr.i // restore psr.i
1464 adds r3=8,r2 // set up second base pointer
1465 ;;
1466 SAVE_REST
1467 movl r14=ia64_leave_kernel
1468 ;;
1469 mov rp=r14
1470 // br.sptk.many ia64_prepare_handle_unaligned
1471 br.call.sptk.many b6=ia64_handle_unaligned
1472 END(dispatch_unaligned_handler)
1474 .org ia64_ivt+0x4c00
1475 /////////////////////////////////////////////////////////////////////////////////////////
1476 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1477 DBG_FAULT(19)
1478 FAULT(19)
1480 /*
1481 * There is no particular reason for this code to be here, other than that
1482 * there happens to be space here that would go unused otherwise. If this
1483 * fault ever gets "unreserved", simply moved the following code to a more
1484 * suitable spot...
1485 */
1487 ENTRY(dispatch_to_fault_handler)
1488 /*
1489 * Input:
1490 * psr.ic: off
1491 * r19: fault vector number (e.g., 24 for General Exception)
1492 * r31: contains saved predicates (pr)
1493 */
1494 SAVE_MIN_WITH_COVER_R19
1495 alloc r14=ar.pfs,0,0,5,0
1496 mov out0=r15
1497 mov out1=cr.isr
1498 mov out2=cr.ifa
1499 mov out3=cr.iim
1500 mov out4=cr.itir
1501 ;;
1502 ssm psr.ic | PSR_DEFAULT_BITS
1503 ;;
1504 srlz.i // guarantee that interruption collection is on
1505 ;;
1506 (p15) ssm psr.i // restore psr.i
1507 adds r3=8,r2 // set up second base pointer for SAVE_REST
1508 ;;
1509 SAVE_REST
1510 movl r14=ia64_leave_kernel
1511 ;;
1512 mov rp=r14
1513 br.call.sptk.many b6=ia64_fault
1514 END(dispatch_to_fault_handler)
1516 //
1517 // --- End of long entries, Beginning of short entries
1518 //
1520 .org ia64_ivt+0x5000
1521 /////////////////////////////////////////////////////////////////////////////////////////
1522 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1523 ENTRY(page_not_present)
1524 DBG_FAULT(20)
1525 #ifdef XEN
1526 REFLECT(20)
1527 #endif
1528 mov r16=cr.ifa
1529 rsm psr.dt
1530 /*
1531 * The Linux page fault handler doesn't expect non-present pages to be in
1532 * the TLB. Flush the existing entry now, so we meet that expectation.
1533 */
1534 mov r17=PAGE_SHIFT<<2
1535 ;;
1536 ptc.l r16,r17
1537 ;;
1538 mov r31=pr
1539 srlz.d
1540 br.sptk.many page_fault
1541 END(page_not_present)
1543 .org ia64_ivt+0x5100
1544 /////////////////////////////////////////////////////////////////////////////////////////
1545 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1546 ENTRY(key_permission)
1547 DBG_FAULT(21)
1548 #ifdef XEN
1549 REFLECT(21)
1550 #endif
1551 mov r16=cr.ifa
1552 rsm psr.dt
1553 mov r31=pr
1554 ;;
1555 srlz.d
1556 br.sptk.many page_fault
1557 END(key_permission)
1559 .org ia64_ivt+0x5200
1560 /////////////////////////////////////////////////////////////////////////////////////////
1561 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1562 ENTRY(iaccess_rights)
1563 DBG_FAULT(22)
1564 #ifdef XEN
1565 REFLECT(22)
1566 #endif
1567 mov r16=cr.ifa
1568 rsm psr.dt
1569 mov r31=pr
1570 ;;
1571 srlz.d
1572 br.sptk.many page_fault
1573 END(iaccess_rights)
1575 .org ia64_ivt+0x5300
1576 /////////////////////////////////////////////////////////////////////////////////////////
1577 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1578 ENTRY(daccess_rights)
1579 DBG_FAULT(23)
1580 #ifdef XEN
1581 mov r31=pr;
1582 mov r16=cr.isr
1583 mov r17=cr.ifa
1584 mov r19=23
1585 movl r20=0x5300
1586 br.sptk.many fast_access_reflect;;
1587 #endif
1588 mov r16=cr.ifa
1589 rsm psr.dt
1590 mov r31=pr
1591 ;;
1592 srlz.d
1593 br.sptk.many page_fault
1594 END(daccess_rights)
1596 .org ia64_ivt+0x5400
1597 /////////////////////////////////////////////////////////////////////////////////////////
1598 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1599 ENTRY(general_exception)
1600 DBG_FAULT(24)
1601 mov r16=cr.isr
1602 mov r31=pr
1603 ;;
1604 #ifdef XEN
1605 cmp4.ge p6,p0=0x20,r16
1606 (p6) br.sptk.many dispatch_privop_fault
1607 #else
1608 cmp4.eq p6,p0=0,r16
1609 (p6) br.sptk.many dispatch_illegal_op_fault
1610 #endif
1611 ;;
1612 mov r19=24 // fault number
1613 br.sptk.many dispatch_to_fault_handler
1614 END(general_exception)
1616 .org ia64_ivt+0x5500
1617 /////////////////////////////////////////////////////////////////////////////////////////
1618 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1619 ENTRY(disabled_fp_reg)
1620 DBG_FAULT(25)
1621 #ifdef XEN
1622 #if 0
1623 mov r20=pr
1624 movl r16=0x2000000000000000
1625 movl r17=0x2000000000176b60
1626 mov r18=cr.iip
1627 mov r19=rr[r16]
1628 movl r22=0xe95d0439
1629 ;;
1630 mov pr=r0,-1
1631 ;;
1632 cmp.eq p6,p7=r22,r19
1633 ;;
1634 (p6) cmp.eq p8,p9=r17,r18
1635 (p8) br.sptk.few floating_panic
1636 ;;
1637 mov pr=r20,-1
1638 ;;
1639 #endif
1640 REFLECT(25)
1641 //floating_panic:
1642 // br.sptk.many floating_panic
1643 ;;
1644 #endif
1645 rsm psr.dfh // ensure we can access fph
1646 ;;
1647 srlz.d
1648 mov r31=pr
1649 mov r19=25
1650 br.sptk.many dispatch_to_fault_handler
1651 END(disabled_fp_reg)
1653 .org ia64_ivt+0x5600
1654 /////////////////////////////////////////////////////////////////////////////////////////
1655 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1656 ENTRY(nat_consumption)
1657 DBG_FAULT(26)
1658 #ifdef XEN
1659 REFLECT(26)
1660 #endif
1661 FAULT(26)
1662 END(nat_consumption)
1664 .org ia64_ivt+0x5700
1665 /////////////////////////////////////////////////////////////////////////////////////////
1666 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1667 ENTRY(speculation_vector)
1668 DBG_FAULT(27)
1669 #ifdef XEN
1670 // this probably need not reflect...
1671 REFLECT(27)
1672 #endif
1673 /*
1674 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1675 * this part of the architecture is not implemented in hardware on some CPUs, such
1676 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
1677 * the relative target (not yet sign extended). So after sign extending it we
1678 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
1679 * i.e., the slot to restart into.
1681 * cr.imm contains zero_ext(imm21)
1682 */
1683 mov r18=cr.iim
1684 ;;
1685 mov r17=cr.iip
1686 shl r18=r18,43 // put sign bit in position (43=64-21)
1687 ;;
1689 mov r16=cr.ipsr
1690 shr r18=r18,39 // sign extend (39=43-4)
1691 ;;
1693 add r17=r17,r18 // now add the offset
1694 ;;
1695 mov cr.iip=r17
1696 dep r16=0,r16,41,2 // clear EI
1697 ;;
1699 mov cr.ipsr=r16
1700 ;;
1702 rfi // and go back
1703 END(speculation_vector)
1705 .org ia64_ivt+0x5800
1706 /////////////////////////////////////////////////////////////////////////////////////////
1707 // 0x5800 Entry 28 (size 16 bundles) Reserved
1708 DBG_FAULT(28)
1709 FAULT(28)
1711 .org ia64_ivt+0x5900
1712 /////////////////////////////////////////////////////////////////////////////////////////
1713 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1714 ENTRY(debug_vector)
1715 DBG_FAULT(29)
1716 #ifdef XEN
1717 REFLECT(29)
1718 #endif
1719 FAULT(29)
1720 END(debug_vector)
1722 .org ia64_ivt+0x5a00
1723 /////////////////////////////////////////////////////////////////////////////////////////
1724 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1725 ENTRY(unaligned_access)
1726 DBG_FAULT(30)
1727 #ifdef XEN
1728 REFLECT(30)
1729 #endif
1730 mov r16=cr.ipsr
1731 mov r31=pr // prepare to save predicates
1732 ;;
1733 br.sptk.many dispatch_unaligned_handler
1734 END(unaligned_access)
1736 .org ia64_ivt+0x5b00
1737 /////////////////////////////////////////////////////////////////////////////////////////
1738 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1739 ENTRY(unsupported_data_reference)
1740 DBG_FAULT(31)
1741 #ifdef XEN
1742 REFLECT(31)
1743 #endif
1744 FAULT(31)
1745 END(unsupported_data_reference)
1747 .org ia64_ivt+0x5c00
1748 /////////////////////////////////////////////////////////////////////////////////////////
1749 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1750 ENTRY(floating_point_fault)
1751 DBG_FAULT(32)
1752 #ifdef XEN
1753 REFLECT(32)
1754 #endif
1755 FAULT(32)
1756 END(floating_point_fault)
1758 .org ia64_ivt+0x5d00
1759 /////////////////////////////////////////////////////////////////////////////////////////
1760 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1761 ENTRY(floating_point_trap)
1762 DBG_FAULT(33)
1763 #ifdef XEN
1764 REFLECT(33)
1765 #endif
1766 FAULT(33)
1767 END(floating_point_trap)
1769 .org ia64_ivt+0x5e00
1770 /////////////////////////////////////////////////////////////////////////////////////////
1771 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1772 ENTRY(lower_privilege_trap)
1773 DBG_FAULT(34)
1774 #ifdef XEN
1775 REFLECT(34)
1776 #endif
1777 FAULT(34)
1778 END(lower_privilege_trap)
1780 .org ia64_ivt+0x5f00
1781 /////////////////////////////////////////////////////////////////////////////////////////
1782 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1783 ENTRY(taken_branch_trap)
1784 DBG_FAULT(35)
1785 #ifdef XEN
1786 REFLECT(35)
1787 #endif
1788 FAULT(35)
1789 END(taken_branch_trap)
1791 .org ia64_ivt+0x6000
1792 /////////////////////////////////////////////////////////////////////////////////////////
1793 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1794 ENTRY(single_step_trap)
1795 DBG_FAULT(36)
1796 #ifdef XEN
1797 REFLECT(36)
1798 #endif
1799 FAULT(36)
1800 END(single_step_trap)
1802 .org ia64_ivt+0x6100
1803 /////////////////////////////////////////////////////////////////////////////////////////
1804 // 0x6100 Entry 37 (size 16 bundles) Reserved
1805 DBG_FAULT(37)
1806 FAULT(37)
1808 .org ia64_ivt+0x6200
1809 /////////////////////////////////////////////////////////////////////////////////////////
1810 // 0x6200 Entry 38 (size 16 bundles) Reserved
1811 DBG_FAULT(38)
1812 FAULT(38)
1814 .org ia64_ivt+0x6300
1815 /////////////////////////////////////////////////////////////////////////////////////////
1816 // 0x6300 Entry 39 (size 16 bundles) Reserved
1817 DBG_FAULT(39)
1818 FAULT(39)
1820 .org ia64_ivt+0x6400
1821 /////////////////////////////////////////////////////////////////////////////////////////
1822 // 0x6400 Entry 40 (size 16 bundles) Reserved
1823 DBG_FAULT(40)
1824 FAULT(40)
1826 .org ia64_ivt+0x6500
1827 /////////////////////////////////////////////////////////////////////////////////////////
1828 // 0x6500 Entry 41 (size 16 bundles) Reserved
1829 DBG_FAULT(41)
1830 FAULT(41)
1832 .org ia64_ivt+0x6600
1833 /////////////////////////////////////////////////////////////////////////////////////////
1834 // 0x6600 Entry 42 (size 16 bundles) Reserved
1835 DBG_FAULT(42)
1836 FAULT(42)
1838 .org ia64_ivt+0x6700
1839 /////////////////////////////////////////////////////////////////////////////////////////
1840 // 0x6700 Entry 43 (size 16 bundles) Reserved
1841 DBG_FAULT(43)
1842 FAULT(43)
1844 .org ia64_ivt+0x6800
1845 /////////////////////////////////////////////////////////////////////////////////////////
1846 // 0x6800 Entry 44 (size 16 bundles) Reserved
1847 DBG_FAULT(44)
1848 FAULT(44)
1850 .org ia64_ivt+0x6900
1851 /////////////////////////////////////////////////////////////////////////////////////////
1852 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1853 ENTRY(ia32_exception)
1854 DBG_FAULT(45)
1855 #ifdef XEN
1856 REFLECT(45)
1857 #endif
1858 FAULT(45)
1859 END(ia32_exception)
1861 .org ia64_ivt+0x6a00
1862 /////////////////////////////////////////////////////////////////////////////////////////
1863 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1864 ENTRY(ia32_intercept)
1865 DBG_FAULT(46)
1866 #ifdef XEN
1867 REFLECT(46)
1868 #endif
1869 #ifdef CONFIG_IA32_SUPPORT
1870 mov r31=pr
1871 mov r16=cr.isr
1872 ;;
1873 extr.u r17=r16,16,8 // get ISR.code
1874 mov r18=ar.eflag
1875 mov r19=cr.iim // old eflag value
1876 ;;
1877 cmp.ne p6,p0=2,r17
1878 (p6) br.cond.spnt 1f // not a system flag fault
1879 xor r16=r18,r19
1880 ;;
1881 extr.u r17=r16,18,1 // get the eflags.ac bit
1882 ;;
1883 cmp.eq p6,p0=0,r17
1884 (p6) br.cond.spnt 1f // eflags.ac bit didn't change
1885 ;;
1886 mov pr=r31,-1 // restore predicate registers
1887 rfi
1889 1:
1890 #endif // CONFIG_IA32_SUPPORT
1891 FAULT(46)
1892 END(ia32_intercept)
1894 .org ia64_ivt+0x6b00
1895 /////////////////////////////////////////////////////////////////////////////////////////
1896 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1897 ENTRY(ia32_interrupt)
1898 DBG_FAULT(47)
1899 #ifdef XEN
1900 REFLECT(47)
1901 #endif
1902 #ifdef CONFIG_IA32_SUPPORT
1903 mov r31=pr
1904 br.sptk.many dispatch_to_ia32_handler
1905 #else
1906 FAULT(47)
1907 #endif
1908 END(ia32_interrupt)
1910 .org ia64_ivt+0x6c00
1911 /////////////////////////////////////////////////////////////////////////////////////////
1912 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1913 DBG_FAULT(48)
1914 FAULT(48)
1916 .org ia64_ivt+0x6d00
1917 /////////////////////////////////////////////////////////////////////////////////////////
1918 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1919 DBG_FAULT(49)
1920 FAULT(49)
1922 .org ia64_ivt+0x6e00
1923 /////////////////////////////////////////////////////////////////////////////////////////
1924 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1925 DBG_FAULT(50)
1926 FAULT(50)
1928 .org ia64_ivt+0x6f00
1929 /////////////////////////////////////////////////////////////////////////////////////////
1930 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1931 DBG_FAULT(51)
1932 FAULT(51)
1934 .org ia64_ivt+0x7000
1935 /////////////////////////////////////////////////////////////////////////////////////////
1936 // 0x7000 Entry 52 (size 16 bundles) Reserved
1937 DBG_FAULT(52)
1938 FAULT(52)
1940 .org ia64_ivt+0x7100
1941 /////////////////////////////////////////////////////////////////////////////////////////
1942 // 0x7100 Entry 53 (size 16 bundles) Reserved
1943 DBG_FAULT(53)
1944 FAULT(53)
1946 .org ia64_ivt+0x7200
1947 /////////////////////////////////////////////////////////////////////////////////////////
1948 // 0x7200 Entry 54 (size 16 bundles) Reserved
1949 DBG_FAULT(54)
1950 FAULT(54)
1952 .org ia64_ivt+0x7300
1953 /////////////////////////////////////////////////////////////////////////////////////////
1954 // 0x7300 Entry 55 (size 16 bundles) Reserved
1955 DBG_FAULT(55)
1956 FAULT(55)
1958 .org ia64_ivt+0x7400
1959 /////////////////////////////////////////////////////////////////////////////////////////
1960 // 0x7400 Entry 56 (size 16 bundles) Reserved
1961 DBG_FAULT(56)
1962 FAULT(56)
1964 .org ia64_ivt+0x7500
1965 /////////////////////////////////////////////////////////////////////////////////////////
1966 // 0x7500 Entry 57 (size 16 bundles) Reserved
1967 DBG_FAULT(57)
1968 FAULT(57)
1970 .org ia64_ivt+0x7600
1971 /////////////////////////////////////////////////////////////////////////////////////////
1972 // 0x7600 Entry 58 (size 16 bundles) Reserved
1973 DBG_FAULT(58)
1974 FAULT(58)
1976 .org ia64_ivt+0x7700
1977 /////////////////////////////////////////////////////////////////////////////////////////
1978 // 0x7700 Entry 59 (size 16 bundles) Reserved
1979 DBG_FAULT(59)
1980 FAULT(59)
1982 .org ia64_ivt+0x7800
1983 /////////////////////////////////////////////////////////////////////////////////////////
1984 // 0x7800 Entry 60 (size 16 bundles) Reserved
1985 DBG_FAULT(60)
1986 FAULT(60)
1988 .org ia64_ivt+0x7900
1989 /////////////////////////////////////////////////////////////////////////////////////////
1990 // 0x7900 Entry 61 (size 16 bundles) Reserved
1991 DBG_FAULT(61)
1992 FAULT(61)
1994 .org ia64_ivt+0x7a00
1995 /////////////////////////////////////////////////////////////////////////////////////////
1996 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1997 DBG_FAULT(62)
1998 FAULT(62)
2000 .org ia64_ivt+0x7b00
2001 /////////////////////////////////////////////////////////////////////////////////////////
2002 // 0x7b00 Entry 63 (size 16 bundles) Reserved
2003 DBG_FAULT(63)
2004 FAULT(63)
2006 .org ia64_ivt+0x7c00
2007 /////////////////////////////////////////////////////////////////////////////////////////
2008 // 0x7c00 Entry 64 (size 16 bundles) Reserved
2009 DBG_FAULT(64)
2010 FAULT(64)
2012 .org ia64_ivt+0x7d00
2013 /////////////////////////////////////////////////////////////////////////////////////////
2014 // 0x7d00 Entry 65 (size 16 bundles) Reserved
2015 DBG_FAULT(65)
2016 FAULT(65)
2018 .org ia64_ivt+0x7e00
2019 /////////////////////////////////////////////////////////////////////////////////////////
2020 // 0x7e00 Entry 66 (size 16 bundles) Reserved
2021 DBG_FAULT(66)
2022 FAULT(66)
2024 .org ia64_ivt+0x7f00
2025 /////////////////////////////////////////////////////////////////////////////////////////
2026 // 0x7f00 Entry 67 (size 16 bundles) Reserved
2027 DBG_FAULT(67)
2028 FAULT(67)
2030 #ifdef XEN
2031 .org ia64_ivt+0x8000
2032 GLOBAL_ENTRY(dispatch_reflection)
2033 /*
2034 * Input:
2035 * psr.ic: off
2036 * r19: intr type (offset into ivt, see ia64_int.h)
2037 * r31: contains saved predicates (pr)
2038 */
2039 SAVE_MIN_WITH_COVER_R19
2040 alloc r14=ar.pfs,0,0,5,0
2041 mov out4=r15
2042 mov out0=cr.ifa
2043 adds out1=16,sp
2044 mov out2=cr.isr
2045 mov out3=cr.iim
2046 // mov out3=cr.itir
2048 ssm psr.ic | PSR_DEFAULT_BITS
2049 ;;
2050 srlz.i // guarantee that interruption collection is on
2051 ;;
2052 (p15) ssm psr.i // restore psr.i
2053 adds r3=8,r2 // set up second base pointer
2054 ;;
2055 SAVE_REST
2056 movl r14=ia64_leave_kernel
2057 ;;
2058 mov rp=r14
2059 // br.sptk.many ia64_prepare_handle_reflection
2060 br.call.sptk.many b6=ia64_handle_reflection
2061 END(dispatch_reflection)
2063 #define SAVE_MIN_COVER_DONE DO_SAVE_MIN(,mov r30=cr.ifs,)
2065 // same as dispatch_break_fault except cover has already been done
2066 GLOBAL_ENTRY(dispatch_slow_hyperprivop)
2067 SAVE_MIN_COVER_DONE
2068 ;;
2069 br.sptk.many dispatch_break_fault_post_save
2070 END(dispatch_slow_hyperprivop)
2071 #endif
2073 #ifdef CONFIG_IA32_SUPPORT
2075 /*
2076 * There is no particular reason for this code to be here, other than that
2077 * there happens to be space here that would go unused otherwise. If this
2078 * fault ever gets "unreserved", simply moved the following code to a more
2079 * suitable spot...
2080 */
2082 // IA32 interrupt entry point
2084 ENTRY(dispatch_to_ia32_handler)
2085 SAVE_MIN
2086 ;;
2087 mov r14=cr.isr
2088 ssm psr.ic | PSR_DEFAULT_BITS
2089 ;;
2090 srlz.i // guarantee that interruption collection is on
2091 ;;
2092 (p15) ssm psr.i
2093 adds r3=8,r2 // Base pointer for SAVE_REST
2094 ;;
2095 SAVE_REST
2096 ;;
2097 mov r15=0x80
2098 shr r14=r14,16 // Get interrupt number
2099 ;;
2100 cmp.ne p6,p0=r14,r15
2101 (p6) br.call.dpnt.many b6=non_ia32_syscall
2103 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
2104 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
2105 ;;
2106 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
2107 ld8 r8=[r14] // get r8
2108 ;;
2109 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
2110 ;;
2111 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
2112 ;;
2113 ld4 r8=[r14],8 // r8 == eax (syscall number)
2114 mov r15=IA32_NR_syscalls
2115 ;;
2116 cmp.ltu.unc p6,p7=r8,r15
2117 ld4 out1=[r14],8 // r9 == ecx
2118 ;;
2119 ld4 out2=[r14],8 // r10 == edx
2120 ;;
2121 ld4 out0=[r14] // r11 == ebx
2122 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
2123 ;;
2124 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
2125 ;;
2126 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
2127 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
2128 ;;
2129 ld4 out4=[r14] // r15 == edi
2130 movl r16=ia32_syscall_table
2131 ;;
2132 (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
2133 ld4 r2=[r2] // r2 = current_thread_info()->flags
2134 ;;
2135 ld8 r16=[r16]
2136 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
2137 ;;
2138 mov b6=r16
2139 movl r15=ia32_ret_from_syscall
2140 cmp.eq p8,p0=r2,r0
2141 ;;
2142 mov rp=r15
2143 (p8) br.call.sptk.many b6=b6
2144 br.cond.sptk ia32_trace_syscall
2146 non_ia32_syscall:
2147 alloc r15=ar.pfs,0,0,2,0
2148 mov out0=r14 // interrupt #
2149 add out1=16,sp // pointer to pt_regs
2150 ;; // avoid WAW on CFM
2151 br.call.sptk.many rp=ia32_bad_interrupt
2152 .ret1: movl r15=ia64_leave_kernel
2153 ;;
2154 mov rp=r15
2155 br.ret.sptk.many rp
2156 END(dispatch_to_ia32_handler)
2158 #endif /* CONFIG_IA32_SUPPORT */