ia64/xen-unstable

view xen/arch/ia64/ivt.S @ 5987:5f1ed597f107

Ensure percpu data area not used before the TR is set.
author fred@xuni-t01.sc.intel.com
date Tue Aug 23 18:43:18 2005 -0800 (2005-08-23)
parents 97675c2dbb40
children 8799d14bef77
line source
2 #ifdef XEN
3 //#define CONFIG_DISABLE_VHPT // FIXME: change when VHPT is enabled??
4 // these are all hacked out for now as the entire IVT
5 // will eventually be replaced... just want to use it
6 // for startup code to handle TLB misses
7 //#define ia64_leave_kernel 0
8 //#define ia64_ret_from_syscall 0
9 //#define ia64_handle_irq 0
10 //#define ia64_fault 0
11 #define ia64_illegal_op_fault 0
12 #define ia64_prepare_handle_unaligned 0
13 #define ia64_bad_break 0
14 #define ia64_trace_syscall 0
15 #define sys_call_table 0
16 #define sys_ni_syscall 0
17 #include <asm/vhpt.h>
18 #endif
19 /*
20 * arch/ia64/kernel/ivt.S
21 *
22 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
23 * Stephane Eranian <eranian@hpl.hp.com>
24 * David Mosberger <davidm@hpl.hp.com>
25 * Copyright (C) 2000, 2002-2003 Intel Co
26 * Asit Mallick <asit.k.mallick@intel.com>
27 * Suresh Siddha <suresh.b.siddha@intel.com>
28 * Kenneth Chen <kenneth.w.chen@intel.com>
29 * Fenghua Yu <fenghua.yu@intel.com>
30 *
31 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
32 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
33 */
34 /*
35 * This file defines the interruption vector table used by the CPU.
36 * It does not include one entry per possible cause of interruption.
37 *
38 * The first 20 entries of the table contain 64 bundles each while the
39 * remaining 48 entries contain only 16 bundles each.
40 *
41 * The 64 bundles are used to allow inlining the whole handler for critical
42 * interruptions like TLB misses.
43 *
44 * For each entry, the comment is as follows:
45 *
46 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
47 * entry offset ----/ / / / /
48 * entry number ---------/ / / /
49 * size of the entry -------------/ / /
50 * vector name -------------------------------------/ /
51 * interruptions triggering this vector ----------------------/
52 *
53 * The table is 32KB in size and must be aligned on 32KB boundary.
54 * (The CPU ignores the 15 lower bits of the address)
55 *
56 * Table is based upon EAS2.6 (Oct 1999)
57 */
59 #include <linux/config.h>
61 #include <asm/asmmacro.h>
62 #include <asm/break.h>
63 #include <asm/ia32.h>
64 #include <asm/kregs.h>
65 #include <asm/offsets.h>
66 #include <asm/pgtable.h>
67 #include <asm/processor.h>
68 #include <asm/ptrace.h>
69 #include <asm/system.h>
70 #include <asm/thread_info.h>
71 #include <asm/unistd.h>
72 #include <asm/errno.h>
74 #if 1
75 # define PSR_DEFAULT_BITS psr.ac
76 #else
77 # define PSR_DEFAULT_BITS 0
78 #endif
80 #if 0
81 /*
82 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
83 * needed for something else before enabling this...
84 */
85 # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
86 #else
87 # define DBG_FAULT(i)
88 #endif
90 #define MINSTATE_VIRT /* needed by minstate.h */
91 #include "minstate.h"
93 #define FAULT(n) \
94 mov r31=pr; \
95 mov r19=n;; /* prepare to save predicates */ \
96 br.sptk.many dispatch_to_fault_handler
98 #ifdef XEN
99 #define REFLECT(n) \
100 mov r31=pr; \
101 mov r19=n;; /* prepare to save predicates */ \
102 br.sptk.many dispatch_reflection
103 #endif
105 .section .text.ivt,"ax"
107 .align 32768 // align on 32KB boundary
108 .global ia64_ivt
109 ia64_ivt:
110 /////////////////////////////////////////////////////////////////////////////////////////
111 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
112 ENTRY(vhpt_miss)
113 DBG_FAULT(0)
114 /*
115 * The VHPT vector is invoked when the TLB entry for the virtual page table
116 * is missing. This happens only as a result of a previous
117 * (the "original") TLB miss, which may either be caused by an instruction
118 * fetch or a data access (or non-access).
119 *
120 * What we do here is normal TLB miss handing for the _original_ miss, followed
121 * by inserting the TLB entry for the virtual page table page that the VHPT
122 * walker was attempting to access. The latter gets inserted as long
123 * as both L1 and L2 have valid mappings for the faulting address.
124 * The TLB entry for the original miss gets inserted only if
125 * the L3 entry indicates that the page is present.
126 *
127 * do_page_fault gets invoked in the following cases:
128 * - the faulting virtual address uses unimplemented address bits
129 * - the faulting virtual address has no L1, L2, or L3 mapping
130 */
131 mov r16=cr.ifa // get address that caused the TLB miss
132 #ifdef CONFIG_HUGETLB_PAGE
133 movl r18=PAGE_SHIFT
134 mov r25=cr.itir
135 #endif
136 ;;
137 rsm psr.dt // use physical addressing for data
138 mov r31=pr // save the predicate registers
139 #ifdef XEN
140 movl r19=THIS_CPU(cpu_kr)+IA64_KR_PT_BASE_OFFSET;;
141 #else
142 mov r19=IA64_KR(PT_BASE) // get page table base address
143 #endif
144 shl r21=r16,3 // shift bit 60 into sign bit
145 shr.u r17=r16,61 // get the region number into r17
146 ;;
147 shr r22=r21,3
148 #ifdef CONFIG_HUGETLB_PAGE
149 extr.u r26=r25,2,6
150 ;;
151 cmp.ne p8,p0=r18,r26
152 sub r27=r26,r18
153 ;;
154 (p8) dep r25=r18,r25,2,6
155 (p8) shr r22=r22,r27
156 #endif
157 ;;
158 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
159 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
160 ;;
161 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
163 srlz.d
164 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
166 .pred.rel "mutex", p6, p7
167 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
168 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
169 ;;
170 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
171 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
172 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
173 shr.u r18=r22,PMD_SHIFT // shift L2 index into position
174 ;;
175 ld8 r17=[r17] // fetch the L1 entry (may be 0)
176 ;;
177 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
178 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
179 ;;
180 (p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
181 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
182 ;;
183 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
184 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
185 ;;
186 (p7) ld8 r18=[r21] // read the L3 PTE
187 mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss
188 ;;
189 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
190 mov r22=cr.iha // get the VHPT address that caused the TLB miss
191 ;; // avoid RAW on p7
192 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
193 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
194 ;;
195 (p10) itc.i r18 // insert the instruction TLB entry
196 (p11) itc.d r18 // insert the data TLB entry
197 (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
198 mov cr.ifa=r22
200 #ifdef CONFIG_HUGETLB_PAGE
201 (p8) mov cr.itir=r25 // change to default page-size for VHPT
202 #endif
204 /*
205 * Now compute and insert the TLB entry for the virtual page table. We never
206 * execute in a page table page so there is no need to set the exception deferral
207 * bit.
208 */
209 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
210 ;;
211 (p7) itc.d r24
212 ;;
213 #ifdef CONFIG_SMP
214 /*
215 * Tell the assemblers dependency-violation checker that the above "itc" instructions
216 * cannot possibly affect the following loads:
217 */
218 dv_serialize_data
220 /*
221 * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g
222 * between reading the pagetable and the "itc". If so, flush the entry we
223 * inserted and retry.
224 */
225 ld8 r25=[r21] // read L3 PTE again
226 ld8 r26=[r17] // read L2 entry again
227 ;;
228 cmp.ne p6,p7=r26,r20 // did L2 entry change
229 mov r27=PAGE_SHIFT<<2
230 ;;
231 (p6) ptc.l r22,r27 // purge PTE page translation
232 (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change
233 ;;
234 (p6) ptc.l r16,r27 // purge translation
235 #endif
237 mov pr=r31,-1 // restore predicate registers
238 rfi
239 END(vhpt_miss)
241 .org ia64_ivt+0x400
242 /////////////////////////////////////////////////////////////////////////////////////////
243 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
244 ENTRY(itlb_miss)
245 DBG_FAULT(1)
246 #ifdef XEN
247 VHPT_CCHAIN_LOOKUP(itlb_miss,i)
248 #ifdef VHPT_GLOBAL
249 br.cond.sptk page_fault
250 ;;
251 #endif
252 #endif
253 /*
254 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
255 * page table. If a nested TLB miss occurs, we switch into physical
256 * mode, walk the page table, and then re-execute the L3 PTE read
257 * and go on normally after that.
258 */
259 mov r16=cr.ifa // get virtual address
260 mov r29=b0 // save b0
261 mov r31=pr // save predicates
262 .itlb_fault:
263 mov r17=cr.iha // get virtual address of L3 PTE
264 movl r30=1f // load nested fault continuation point
265 ;;
266 1: ld8 r18=[r17] // read L3 PTE
267 ;;
268 mov b0=r29
269 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
270 (p6) br.cond.spnt page_fault
271 ;;
272 itc.i r18
273 ;;
274 #ifdef CONFIG_SMP
275 /*
276 * Tell the assemblers dependency-violation checker that the above "itc" instructions
277 * cannot possibly affect the following loads:
278 */
279 dv_serialize_data
281 ld8 r19=[r17] // read L3 PTE again and see if same
282 mov r20=PAGE_SHIFT<<2 // setup page size for purge
283 ;;
284 cmp.ne p7,p0=r18,r19
285 ;;
286 (p7) ptc.l r16,r20
287 #endif
288 mov pr=r31,-1
289 rfi
290 END(itlb_miss)
292 .org ia64_ivt+0x0800
293 /////////////////////////////////////////////////////////////////////////////////////////
294 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
295 ENTRY(dtlb_miss)
296 DBG_FAULT(2)
297 #ifdef XEN
298 VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
299 #ifdef VHPT_GLOBAL
300 br.cond.sptk page_fault
301 ;;
302 #endif
303 #endif
304 /*
305 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
306 * page table. If a nested TLB miss occurs, we switch into physical
307 * mode, walk the page table, and then re-execute the L3 PTE read
308 * and go on normally after that.
309 */
310 mov r16=cr.ifa // get virtual address
311 mov r29=b0 // save b0
312 mov r31=pr // save predicates
313 dtlb_fault:
314 mov r17=cr.iha // get virtual address of L3 PTE
315 movl r30=1f // load nested fault continuation point
316 ;;
317 1: ld8 r18=[r17] // read L3 PTE
318 ;;
319 mov b0=r29
320 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
321 (p6) br.cond.spnt page_fault
322 ;;
323 itc.d r18
324 ;;
325 #ifdef CONFIG_SMP
326 /*
327 * Tell the assemblers dependency-violation checker that the above "itc" instructions
328 * cannot possibly affect the following loads:
329 */
330 dv_serialize_data
332 ld8 r19=[r17] // read L3 PTE again and see if same
333 mov r20=PAGE_SHIFT<<2 // setup page size for purge
334 ;;
335 cmp.ne p7,p0=r18,r19
336 ;;
337 (p7) ptc.l r16,r20
338 #endif
339 mov pr=r31,-1
340 rfi
341 END(dtlb_miss)
343 .org ia64_ivt+0x0c00
344 /////////////////////////////////////////////////////////////////////////////////////////
345 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
346 ENTRY(alt_itlb_miss)
347 DBG_FAULT(3)
348 #ifdef XEN
349 //#ifdef VHPT_GLOBAL
350 // VHPT_CCHAIN_LOOKUP(alt_itlb_miss,i)
351 // br.cond.sptk page_fault
352 // ;;
353 //#endif
354 #endif
355 #ifdef XEN
356 mov r31=pr
357 mov r16=cr.ifa // get address that caused the TLB miss
358 ;;
359 late_alt_itlb_miss:
360 movl r17=PAGE_KERNEL
361 mov r21=cr.ipsr
362 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
363 ;;
364 #else
365 mov r16=cr.ifa // get address that caused the TLB miss
366 movl r17=PAGE_KERNEL
367 mov r21=cr.ipsr
368 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
369 mov r31=pr
370 ;;
371 #endif
372 #ifdef CONFIG_DISABLE_VHPT
373 shr.u r22=r16,61 // get the region number into r21
374 ;;
375 cmp.gt p8,p0=6,r22 // user mode
376 ;;
377 (p8) thash r17=r16
378 ;;
379 (p8) mov cr.iha=r17
380 (p8) mov r29=b0 // save b0
381 (p8) br.cond.dptk .itlb_fault
382 #endif
383 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
384 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
385 #ifdef XEN
386 shr.u r18=r16,55 // move address bit 59 to bit 4
387 ;;
388 and r18=0x10,r18 // bit 4=address-bit(59)
389 #else
390 shr.u r18=r16,57 // move address bit 61 to bit 4
391 ;;
392 andcm r18=0x10,r18 // bit 4=~address-bit(61)
393 #endif
394 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
395 or r19=r17,r19 // insert PTE control bits into r19
396 ;;
397 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
398 (p8) br.cond.spnt page_fault
399 ;;
400 itc.i r19 // insert the TLB entry
401 mov pr=r31,-1
402 rfi
403 END(alt_itlb_miss)
405 .org ia64_ivt+0x1000
406 /////////////////////////////////////////////////////////////////////////////////////////
407 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
408 ENTRY(alt_dtlb_miss)
409 DBG_FAULT(4)
410 #ifdef XEN
411 //#ifdef VHPT_GLOBAL
412 // VHPT_CCHAIN_LOOKUP(alt_dtlb_miss,d)
413 // br.cond.sptk page_fault
414 // ;;
415 //#endif
416 #endif
417 #ifdef XEN
418 mov r31=pr
419 mov r16=cr.ifa // get address that caused the TLB miss
420 ;;
421 late_alt_dtlb_miss:
422 movl r17=PAGE_KERNEL
423 mov r20=cr.isr
424 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
425 mov r21=cr.ipsr
426 ;;
427 #else
428 #endif
429 #ifdef CONFIG_DISABLE_VHPT
430 shr.u r22=r16,61 // get the region number into r21
431 ;;
432 cmp.gt p8,p0=6,r22 // access to region 0-5
433 ;;
434 (p8) thash r17=r16
435 ;;
436 (p8) mov cr.iha=r17
437 (p8) mov r29=b0 // save b0
438 (p8) br.cond.dptk dtlb_fault
439 #endif
440 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
441 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
442 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
443 #ifdef XEN
444 shr.u r18=r16,55 // move address bit 59 to bit 4
445 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
446 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
447 ;;
448 and r18=0x10,r18 // bit 4=address-bit(59)
449 #else
450 shr.u r18=r16,57 // move address bit 61 to bit 4
451 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
452 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
453 ;;
454 andcm r18=0x10,r18 // bit 4=~address-bit(61)
455 #endif
456 cmp.ne p8,p0=r0,r23
457 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
458 (p8) br.cond.spnt page_fault
459 #ifdef XEN
460 ;;
461 // Test for Xen address, if not handle via page_fault
462 // note that 0xf000 (cached) and 0xe800 (uncached) addresses
463 // should be OK.
464 extr.u r22=r16,59,5;;
465 cmp.eq p8,p0=0x1e,r22
466 (p8) br.cond.spnt 1f;;
467 cmp.ne p8,p0=0x1d,r22
468 (p8) br.cond.sptk page_fault ;;
469 1:
470 #endif
472 dep r21=-1,r21,IA64_PSR_ED_BIT,1
473 or r19=r19,r17 // insert PTE control bits into r19
474 ;;
475 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
476 (p6) mov cr.ipsr=r21
477 ;;
478 (p7) itc.d r19 // insert the TLB entry
479 mov pr=r31,-1
480 rfi
481 END(alt_dtlb_miss)
483 .org ia64_ivt+0x1400
484 /////////////////////////////////////////////////////////////////////////////////////////
485 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
486 ENTRY(nested_dtlb_miss)
487 /*
488 * In the absence of kernel bugs, we get here when the virtually mapped linear
489 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
490 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
491 * table is missing, a nested TLB miss fault is triggered and control is
492 * transferred to this point. When this happens, we lookup the pte for the
493 * faulting address by walking the page table in physical mode and return to the
494 * continuation point passed in register r30 (or call page_fault if the address is
495 * not mapped).
496 *
497 * Input: r16: faulting address
498 * r29: saved b0
499 * r30: continuation address
500 * r31: saved pr
501 *
502 * Output: r17: physical address of L3 PTE of faulting address
503 * r29: saved b0
504 * r30: continuation address
505 * r31: saved pr
506 *
507 * Clobbered: b0, r18, r19, r21, psr.dt (cleared)
508 */
509 rsm psr.dt // switch to using physical data addressing
510 #ifdef XEN
511 movl r19=THIS_CPU(cpu_kr)+IA64_KR_PT_BASE_OFFSET;;
512 #else
513 mov r19=IA64_KR(PT_BASE) // get the page table base address
514 #endif
515 shl r21=r16,3 // shift bit 60 into sign bit
516 ;;
517 shr.u r17=r16,61 // get the region number into r17
518 ;;
519 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
520 shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
521 ;;
522 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
524 srlz.d
525 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
527 .pred.rel "mutex", p6, p7
528 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
529 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
530 ;;
531 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
532 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
533 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
534 shr.u r18=r16,PMD_SHIFT // shift L2 index into position
535 ;;
536 ld8 r17=[r17] // fetch the L1 entry (may be 0)
537 ;;
538 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
539 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
540 ;;
541 (p7) ld8 r17=[r17] // fetch the L2 entry (may be 0)
542 shr.u r19=r16,PAGE_SHIFT // shift L3 index into position
543 ;;
544 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL?
545 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
546 (p6) br.cond.spnt page_fault
547 mov b0=r30
548 br.sptk.many b0 // return to continuation point
549 END(nested_dtlb_miss)
551 .org ia64_ivt+0x1800
552 /////////////////////////////////////////////////////////////////////////////////////////
553 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
554 ENTRY(ikey_miss)
555 #ifdef XEN
556 REFLECT(6)
557 #endif
558 DBG_FAULT(6)
559 FAULT(6)
560 END(ikey_miss)
562 //-----------------------------------------------------------------------------------
563 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
564 ENTRY(page_fault)
565 ssm psr.dt
566 ;;
567 srlz.i
568 ;;
569 SAVE_MIN_WITH_COVER
570 #ifdef XEN
571 alloc r15=ar.pfs,0,0,4,0
572 mov out0=cr.ifa
573 mov out1=cr.isr
574 mov out3=cr.itir
575 #else
576 alloc r15=ar.pfs,0,0,3,0
577 mov out0=cr.ifa
578 mov out1=cr.isr
579 #endif
580 adds r3=8,r2 // set up second base pointer
581 ;;
582 ssm psr.ic | PSR_DEFAULT_BITS
583 ;;
584 srlz.i // guarantee that interruption collectin is on
585 ;;
586 (p15) ssm psr.i // restore psr.i
587 movl r14=ia64_leave_kernel
588 ;;
589 SAVE_REST
590 mov rp=r14
591 ;;
592 adds out2=16,r12 // out2 = pointer to pt_regs
593 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
594 END(page_fault)
596 .org ia64_ivt+0x1c00
597 /////////////////////////////////////////////////////////////////////////////////////////
598 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
599 ENTRY(dkey_miss)
600 #ifdef XEN
601 REFLECT(7)
602 #endif
603 DBG_FAULT(7)
604 FAULT(7)
605 END(dkey_miss)
607 .org ia64_ivt+0x2000
608 /////////////////////////////////////////////////////////////////////////////////////////
609 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
610 ENTRY(dirty_bit)
611 #ifdef XEN
612 REFLECT(8)
613 #endif
614 DBG_FAULT(8)
615 /*
616 * What we do here is to simply turn on the dirty bit in the PTE. We need to
617 * update both the page-table and the TLB entry. To efficiently access the PTE,
618 * we address it through the virtual page table. Most likely, the TLB entry for
619 * the relevant virtual page table page is still present in the TLB so we can
620 * normally do this without additional TLB misses. In case the necessary virtual
621 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
622 * up the physical address of the L3 PTE and then continue at label 1 below.
623 */
624 mov r16=cr.ifa // get the address that caused the fault
625 movl r30=1f // load continuation point in case of nested fault
626 ;;
627 thash r17=r16 // compute virtual address of L3 PTE
628 mov r29=b0 // save b0 in case of nested fault
629 mov r31=pr // save pr
630 #ifdef CONFIG_SMP
631 mov r28=ar.ccv // save ar.ccv
632 ;;
633 1: ld8 r18=[r17]
634 ;; // avoid RAW on r18
635 mov ar.ccv=r18 // set compare value for cmpxchg
636 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
637 ;;
638 cmpxchg8.acq r26=[r17],r25,ar.ccv
639 mov r24=PAGE_SHIFT<<2
640 ;;
641 cmp.eq p6,p7=r26,r18
642 ;;
643 (p6) itc.d r25 // install updated PTE
644 ;;
645 /*
646 * Tell the assemblers dependency-violation checker that the above "itc" instructions
647 * cannot possibly affect the following loads:
648 */
649 dv_serialize_data
651 ld8 r18=[r17] // read PTE again
652 ;;
653 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
654 ;;
655 (p7) ptc.l r16,r24
656 mov b0=r29 // restore b0
657 mov ar.ccv=r28
658 #else
659 ;;
660 1: ld8 r18=[r17]
661 ;; // avoid RAW on r18
662 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
663 mov b0=r29 // restore b0
664 ;;
665 st8 [r17]=r18 // store back updated PTE
666 itc.d r18 // install updated PTE
667 #endif
668 mov pr=r31,-1 // restore pr
669 rfi
670 END(dirty_bit)
672 .org ia64_ivt+0x2400
673 /////////////////////////////////////////////////////////////////////////////////////////
674 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
675 ENTRY(iaccess_bit)
676 #ifdef XEN
677 mov r31=pr;
678 mov r16=cr.isr
679 mov r17=cr.ifa
680 mov r19=9
681 movl r20=0x2400
682 br.sptk.many fast_access_reflect;;
683 #endif
684 DBG_FAULT(9)
685 // Like Entry 8, except for instruction access
686 mov r16=cr.ifa // get the address that caused the fault
687 movl r30=1f // load continuation point in case of nested fault
688 mov r31=pr // save predicates
689 #ifdef CONFIG_ITANIUM
690 /*
691 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
692 */
693 mov r17=cr.ipsr
694 ;;
695 mov r18=cr.iip
696 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
697 ;;
698 (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
699 #endif /* CONFIG_ITANIUM */
700 ;;
701 thash r17=r16 // compute virtual address of L3 PTE
702 mov r29=b0 // save b0 in case of nested fault)
703 #ifdef CONFIG_SMP
704 mov r28=ar.ccv // save ar.ccv
705 ;;
706 1: ld8 r18=[r17]
707 ;;
708 mov ar.ccv=r18 // set compare value for cmpxchg
709 or r25=_PAGE_A,r18 // set the accessed bit
710 ;;
711 cmpxchg8.acq r26=[r17],r25,ar.ccv
712 mov r24=PAGE_SHIFT<<2
713 ;;
714 cmp.eq p6,p7=r26,r18
715 ;;
716 (p6) itc.i r25 // install updated PTE
717 ;;
718 /*
719 * Tell the assemblers dependency-violation checker that the above "itc" instructions
720 * cannot possibly affect the following loads:
721 */
722 dv_serialize_data
724 ld8 r18=[r17] // read PTE again
725 ;;
726 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
727 ;;
728 (p7) ptc.l r16,r24
729 mov b0=r29 // restore b0
730 mov ar.ccv=r28
731 #else /* !CONFIG_SMP */
732 ;;
733 1: ld8 r18=[r17]
734 ;;
735 or r18=_PAGE_A,r18 // set the accessed bit
736 mov b0=r29 // restore b0
737 ;;
738 st8 [r17]=r18 // store back updated PTE
739 itc.i r18 // install updated PTE
740 #endif /* !CONFIG_SMP */
741 mov pr=r31,-1
742 rfi
743 END(iaccess_bit)
745 .org ia64_ivt+0x2800
746 /////////////////////////////////////////////////////////////////////////////////////////
747 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
748 ENTRY(daccess_bit)
749 #ifdef XEN
750 mov r31=pr;
751 mov r16=cr.isr
752 mov r17=cr.ifa
753 mov r19=10
754 movl r20=0x2800
755 br.sptk.many fast_access_reflect;;
756 #endif
757 DBG_FAULT(10)
758 // Like Entry 8, except for data access
759 mov r16=cr.ifa // get the address that caused the fault
760 movl r30=1f // load continuation point in case of nested fault
761 ;;
762 thash r17=r16 // compute virtual address of L3 PTE
763 mov r31=pr
764 mov r29=b0 // save b0 in case of nested fault)
765 #ifdef CONFIG_SMP
766 mov r28=ar.ccv // save ar.ccv
767 ;;
768 1: ld8 r18=[r17]
769 ;; // avoid RAW on r18
770 mov ar.ccv=r18 // set compare value for cmpxchg
771 or r25=_PAGE_A,r18 // set the dirty bit
772 ;;
773 cmpxchg8.acq r26=[r17],r25,ar.ccv
774 mov r24=PAGE_SHIFT<<2
775 ;;
776 cmp.eq p6,p7=r26,r18
777 ;;
778 (p6) itc.d r25 // install updated PTE
779 /*
780 * Tell the assemblers dependency-violation checker that the above "itc" instructions
781 * cannot possibly affect the following loads:
782 */
783 dv_serialize_data
784 ;;
785 ld8 r18=[r17] // read PTE again
786 ;;
787 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
788 ;;
789 (p7) ptc.l r16,r24
790 mov ar.ccv=r28
791 #else
792 ;;
793 1: ld8 r18=[r17]
794 ;; // avoid RAW on r18
795 or r18=_PAGE_A,r18 // set the accessed bit
796 ;;
797 st8 [r17]=r18 // store back updated PTE
798 itc.d r18 // install updated PTE
799 #endif
800 mov b0=r29 // restore b0
801 mov pr=r31,-1
802 rfi
803 END(daccess_bit)
805 .org ia64_ivt+0x2c00
806 /////////////////////////////////////////////////////////////////////////////////////////
807 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
808 ENTRY(break_fault)
809 /*
810 * The streamlined system call entry/exit paths only save/restore the initial part
811 * of pt_regs. This implies that the callers of system-calls must adhere to the
812 * normal procedure calling conventions.
813 *
814 * Registers to be saved & restored:
815 * CR registers: cr.ipsr, cr.iip, cr.ifs
816 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
817 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
818 * Registers to be restored only:
819 * r8-r11: output value from the system call.
820 *
821 * During system call exit, scratch registers (including r15) are modified/cleared
822 * to prevent leaking bits from kernel to user level.
823 */
824 DBG_FAULT(11)
825 #ifdef XEN
826 mov r16=cr.isr
827 mov r17=cr.iim
828 mov r31=pr
829 ;;
830 movl r18=XSI_PSR_IC
831 ;;
832 ld8 r19=[r18]
833 ;;
834 cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
835 (p7) br.spnt.many dispatch_privop_fault
836 ;;
837 // if vpsr.ic is off, we have a hyperprivop
838 // A hyperprivop is hand-coded assembly with psr.ic off
839 // which means no calls, no use of r1-r15 and no memory accesses
840 // except to pinned addresses!
841 cmp4.eq p7,p0=r0,r19
842 (p7) br.sptk.many fast_hyperprivop
843 ;;
844 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
845 ld8 r22 = [r22]
846 ;;
847 adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22;;
848 ld4 r23=[r22];;
849 cmp4.eq p6,p7=r23,r17 // Xen-reserved breakimm?
850 (p6) br.spnt.many dispatch_break_fault
851 ;;
852 br.sptk.many fast_break_reflect
853 ;;
854 #endif
855 movl r16=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
856 ld8 r16=[r16]
857 mov r17=cr.iim
858 mov r18=__IA64_BREAK_SYSCALL
859 mov r21=ar.fpsr
860 mov r29=cr.ipsr
861 mov r19=b6
862 mov r25=ar.unat
863 mov r27=ar.rsc
864 mov r26=ar.pfs
865 mov r28=cr.iip
866 #ifndef XEN
867 mov r31=pr // prepare to save predicates
868 #endif
869 mov r20=r1
870 ;;
871 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
872 cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
873 (p7) br.cond.spnt non_syscall
874 ;;
875 ld1 r17=[r16] // load current->thread.on_ustack flag
876 st1 [r16]=r0 // clear current->thread.on_ustack flag
877 add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
878 ;;
879 invala
881 /* adjust return address so we skip over the break instruction: */
883 extr.u r8=r29,41,2 // extract ei field from cr.ipsr
884 ;;
885 cmp.eq p6,p7=2,r8 // isr.ei==2?
886 mov r2=r1 // setup r2 for ia64_syscall_setup
887 ;;
888 (p6) mov r8=0 // clear ei to 0
889 (p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped
890 (p7) adds r8=1,r8 // increment ei to next slot
891 ;;
892 cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
893 dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
894 ;;
896 // switch from user to kernel RBS:
897 MINSTATE_START_SAVE_MIN_VIRT
898 br.call.sptk.many b7=ia64_syscall_setup
899 ;;
900 MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
901 ssm psr.ic | PSR_DEFAULT_BITS
902 ;;
903 srlz.i // guarantee that interruption collection is on
904 mov r3=NR_syscalls - 1
905 ;;
906 (p15) ssm psr.i // restore psr.i
907 // p10==true means out registers are more than 8 or r15's Nat is true
908 (p10) br.cond.spnt.many ia64_ret_from_syscall
909 ;;
910 movl r16=sys_call_table
912 adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
913 movl r2=ia64_ret_from_syscall
914 ;;
915 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
916 cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ?
917 mov rp=r2 // set the real return addr
918 ;;
919 (p6) ld8 r20=[r20] // load address of syscall entry point
920 (p7) movl r20=sys_ni_syscall
922 add r2=TI_FLAGS+IA64_TASK_SIZE,r13
923 ;;
924 ld4 r2=[r2] // r2 = current_thread_info()->flags
925 ;;
926 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
927 ;;
928 cmp.eq p8,p0=r2,r0
929 mov b6=r20
930 ;;
931 (p8) br.call.sptk.many b6=b6 // ignore this return addr
932 br.cond.sptk ia64_trace_syscall
933 // NOT REACHED
934 END(break_fault)
936 .org ia64_ivt+0x3000
937 /////////////////////////////////////////////////////////////////////////////////////////
938 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
939 ENTRY(interrupt)
940 DBG_FAULT(12)
941 mov r31=pr // prepare to save predicates
942 ;;
943 #ifdef XEN
944 mov r30=cr.ivr // pass cr.ivr as first arg
945 // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
946 // not used anywhere else and we need a place to stash ivr and
947 // there's no registers available unused by SAVE_MIN/REST
948 movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
949 st8 [r29]=r30;;
950 movl r28=slow_interrupt;;
951 mov r29=rp;;
952 mov rp=r28;;
953 br.cond.sptk.many fast_tick_reflect
954 ;;
955 slow_interrupt:
956 mov rp=r29;;
957 #endif
958 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
959 ssm psr.ic | PSR_DEFAULT_BITS
960 ;;
961 adds r3=8,r2 // set up second base pointer for SAVE_REST
962 srlz.i // ensure everybody knows psr.ic is back on
963 ;;
964 SAVE_REST
965 ;;
966 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
967 #ifdef XEN
968 movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
969 ld8 out0=[out0];;
970 #else
971 mov out0=cr.ivr // pass cr.ivr as first arg
972 #endif
973 add out1=16,sp // pass pointer to pt_regs as second arg
974 ;;
975 srlz.d // make sure we see the effect of cr.ivr
976 movl r14=ia64_leave_kernel
977 ;;
978 mov rp=r14
979 br.call.sptk.many b6=ia64_handle_irq
980 END(interrupt)
982 .org ia64_ivt+0x3400
983 /////////////////////////////////////////////////////////////////////////////////////////
984 // 0x3400 Entry 13 (size 64 bundles) Reserved
985 DBG_FAULT(13)
986 FAULT(13)
988 #ifdef XEN
989 // There is no particular reason for this code to be here, other than that
990 // there happens to be space here that would go unused otherwise. If this
991 // fault ever gets "unreserved", simply moved the following code to a more
992 // suitable spot...
994 GLOBAL_ENTRY(dispatch_break_fault)
995 SAVE_MIN_WITH_COVER
996 ;;
997 dispatch_break_fault_post_save:
998 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
999 mov out0=cr.ifa
1000 adds out1=16,sp
1001 mov out2=cr.isr // FIXME: pity to make this slow access twice
1002 mov out3=cr.iim // FIXME: pity to make this slow access twice
1004 ssm psr.ic | PSR_DEFAULT_BITS
1005 ;;
1006 srlz.i // guarantee that interruption collection is on
1007 ;;
1008 (p15) ssm psr.i // restore psr.i
1009 adds r3=8,r2 // set up second base pointer
1010 ;;
1011 SAVE_REST
1012 movl r14=ia64_leave_kernel
1013 ;;
1014 mov rp=r14
1015 br.sptk.many ia64_prepare_handle_break
1016 END(dispatch_break_fault)
1017 #endif
1019 .org ia64_ivt+0x3800
1020 /////////////////////////////////////////////////////////////////////////////////////////
1021 // 0x3800 Entry 14 (size 64 bundles) Reserved
1022 DBG_FAULT(14)
1023 FAULT(14)
1025 /*
1026 * There is no particular reason for this code to be here, other than that
1027 * there happens to be space here that would go unused otherwise. If this
1028 * fault ever gets "unreserved", simply moved the following code to a more
1029 * suitable spot...
1031 * ia64_syscall_setup() is a separate subroutine so that it can
1032 * allocate stacked registers so it can safely demine any
1033 * potential NaT values from the input registers.
1035 * On entry:
1036 * - executing on bank 0 or bank 1 register set (doesn't matter)
1037 * - r1: stack pointer
1038 * - r2: current task pointer
1039 * - r3: preserved
1040 * - r11: original contents (saved ar.pfs to be saved)
1041 * - r12: original contents (sp to be saved)
1042 * - r13: original contents (tp to be saved)
1043 * - r15: original contents (syscall # to be saved)
1044 * - r18: saved bsp (after switching to kernel stack)
1045 * - r19: saved b6
1046 * - r20: saved r1 (gp)
1047 * - r21: saved ar.fpsr
1048 * - r22: kernel's register backing store base (krbs_base)
1049 * - r23: saved ar.bspstore
1050 * - r24: saved ar.rnat
1051 * - r25: saved ar.unat
1052 * - r26: saved ar.pfs
1053 * - r27: saved ar.rsc
1054 * - r28: saved cr.iip
1055 * - r29: saved cr.ipsr
1056 * - r31: saved pr
1057 * - b0: original contents (to be saved)
1058 * On exit:
1059 * - executing on bank 1 registers
1060 * - psr.ic enabled, interrupts restored
1061 * - p10: TRUE if syscall is invoked with more than 8 out
1062 * registers or r15's Nat is true
1063 * - r1: kernel's gp
1064 * - r3: preserved (same as on entry)
1065 * - r8: -EINVAL if p10 is true
1066 * - r12: points to kernel stack
1067 * - r13: points to current task
1068 * - p15: TRUE if interrupts need to be re-enabled
1069 * - ar.fpsr: set to kernel settings
1070 */
1071 GLOBAL_ENTRY(ia64_syscall_setup)
1072 #ifndef XEN
1073 #if PT(B6) != 0
1074 # error This code assumes that b6 is the first field in pt_regs.
1075 #endif
1076 #endif
1077 st8 [r1]=r19 // save b6
1078 add r16=PT(CR_IPSR),r1 // initialize first base pointer
1079 add r17=PT(R11),r1 // initialize second base pointer
1080 ;;
1081 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
1082 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
1083 tnat.nz p8,p0=in0
1085 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
1086 tnat.nz p9,p0=in1
1087 (pKStk) mov r18=r0 // make sure r18 isn't NaT
1088 ;;
1090 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
1091 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
1092 mov r28=b0 // save b0 (2 cyc)
1093 ;;
1095 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
1096 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
1097 (p8) mov in0=-1
1098 ;;
1100 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
1101 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
1102 and r8=0x7f,r19 // A // get sof of ar.pfs
1104 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
1105 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
1106 (p9) mov in1=-1
1107 ;;
1109 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
1110 tnat.nz p10,p0=in2
1111 add r11=8,r11
1112 ;;
1113 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
1114 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
1115 tnat.nz p11,p0=in3
1116 ;;
1117 (p10) mov in2=-1
1118 tnat.nz p12,p0=in4 // [I0]
1119 (p11) mov in3=-1
1120 ;;
1121 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
1122 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
1123 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
1124 ;;
1125 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
1126 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
1127 tnat.nz p13,p0=in5 // [I0]
1128 ;;
1129 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
1130 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
1131 (p12) mov in4=-1
1132 ;;
1134 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
1135 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1136 (p13) mov in5=-1
1137 ;;
1138 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1139 tnat.nz p14,p0=in6
1140 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
1141 ;;
1142 stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
1143 (p9) tnat.nz p10,p0=r15
1144 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
1146 st8.spill [r17]=r15 // save r15
1147 tnat.nz p8,p0=in7
1148 nop.i 0
1150 mov r13=r2 // establish `current'
1151 movl r1=__gp // establish kernel global pointer
1152 ;;
1153 (p14) mov in6=-1
1154 (p8) mov in7=-1
1155 nop.i 0
1157 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1158 movl r17=FPSR_DEFAULT
1159 ;;
1160 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1161 (p10) mov r8=-EINVAL
1162 br.ret.sptk.many b7
1163 END(ia64_syscall_setup)
1165 .org ia64_ivt+0x3c00
1166 /////////////////////////////////////////////////////////////////////////////////////////
1167 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1168 DBG_FAULT(15)
1169 FAULT(15)
1171 /*
1172 * Squatting in this space ...
1174 * This special case dispatcher for illegal operation faults allows preserved
1175 * registers to be modified through a callback function (asm only) that is handed
1176 * back from the fault handler in r8. Up to three arguments can be passed to the
1177 * callback function by returning an aggregate with the callback as its first
1178 * element, followed by the arguments.
1179 */
1180 ENTRY(dispatch_illegal_op_fault)
1181 SAVE_MIN_WITH_COVER
1182 ssm psr.ic | PSR_DEFAULT_BITS
1183 ;;
1184 srlz.i // guarantee that interruption collection is on
1185 ;;
1186 (p15) ssm psr.i // restore psr.i
1187 adds r3=8,r2 // set up second base pointer for SAVE_REST
1188 ;;
1189 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1190 mov out0=ar.ec
1191 ;;
1192 SAVE_REST
1193 ;;
1194 br.call.sptk.many rp=ia64_illegal_op_fault
1195 .ret0: ;;
1196 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1197 mov out0=r9
1198 mov out1=r10
1199 mov out2=r11
1200 movl r15=ia64_leave_kernel
1201 ;;
1202 mov rp=r15
1203 mov b6=r8
1204 ;;
1205 cmp.ne p6,p0=0,r8
1206 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1207 br.sptk.many ia64_leave_kernel
1208 END(dispatch_illegal_op_fault)
1210 .org ia64_ivt+0x4000
1211 /////////////////////////////////////////////////////////////////////////////////////////
1212 // 0x4000 Entry 16 (size 64 bundles) Reserved
1213 DBG_FAULT(16)
1214 FAULT(16)
1216 #ifdef XEN
1217 // There is no particular reason for this code to be here, other than that
1218 // there happens to be space here that would go unused otherwise. If this
1219 // fault ever gets "unreserved", simply moved the following code to a more
1220 // suitable spot...
1222 ENTRY(dispatch_privop_fault)
1223 SAVE_MIN_WITH_COVER
1224 ;;
1225 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1226 mov out0=cr.ifa
1227 adds out1=16,sp
1228 mov out2=cr.isr // FIXME: pity to make this slow access twice
1229 mov out3=cr.itir
1231 ssm psr.ic | PSR_DEFAULT_BITS
1232 ;;
1233 srlz.i // guarantee that interruption collection is on
1234 ;;
1235 (p15) ssm psr.i // restore psr.i
1236 adds r3=8,r2 // set up second base pointer
1237 ;;
1238 SAVE_REST
1239 movl r14=ia64_leave_kernel
1240 ;;
1241 mov rp=r14
1242 br.sptk.many ia64_prepare_handle_privop
1243 END(dispatch_privop_fault)
1244 #endif
1247 .org ia64_ivt+0x4400
1248 /////////////////////////////////////////////////////////////////////////////////////////
1249 // 0x4400 Entry 17 (size 64 bundles) Reserved
1250 DBG_FAULT(17)
1251 FAULT(17)
1253 ENTRY(non_syscall)
1254 SAVE_MIN_WITH_COVER
1256 // There is no particular reason for this code to be here, other than that
1257 // there happens to be space here that would go unused otherwise. If this
1258 // fault ever gets "unreserved", simply moved the following code to a more
1259 // suitable spot...
1261 alloc r14=ar.pfs,0,0,2,0
1262 mov out0=cr.iim
1263 add out1=16,sp
1264 adds r3=8,r2 // set up second base pointer for SAVE_REST
1266 ssm psr.ic | PSR_DEFAULT_BITS
1267 ;;
1268 srlz.i // guarantee that interruption collection is on
1269 ;;
1270 (p15) ssm psr.i // restore psr.i
1271 movl r15=ia64_leave_kernel
1272 ;;
1273 SAVE_REST
1274 mov rp=r15
1275 ;;
1276 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1277 END(non_syscall)
1279 .org ia64_ivt+0x4800
1280 /////////////////////////////////////////////////////////////////////////////////////////
1281 // 0x4800 Entry 18 (size 64 bundles) Reserved
1282 DBG_FAULT(18)
1283 FAULT(18)
1285 /*
1286 * There is no particular reason for this code to be here, other than that
1287 * there happens to be space here that would go unused otherwise. If this
1288 * fault ever gets "unreserved", simply moved the following code to a more
1289 * suitable spot...
1290 */
1292 ENTRY(dispatch_unaligned_handler)
1293 SAVE_MIN_WITH_COVER
1294 ;;
1295 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1296 mov out0=cr.ifa
1297 adds out1=16,sp
1299 ssm psr.ic | PSR_DEFAULT_BITS
1300 ;;
1301 srlz.i // guarantee that interruption collection is on
1302 ;;
1303 (p15) ssm psr.i // restore psr.i
1304 adds r3=8,r2 // set up second base pointer
1305 ;;
1306 SAVE_REST
1307 movl r14=ia64_leave_kernel
1308 ;;
1309 mov rp=r14
1310 br.sptk.many ia64_prepare_handle_unaligned
1311 END(dispatch_unaligned_handler)
1313 .org ia64_ivt+0x4c00
1314 /////////////////////////////////////////////////////////////////////////////////////////
1315 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1316 DBG_FAULT(19)
1317 FAULT(19)
1319 /*
1320 * There is no particular reason for this code to be here, other than that
1321 * there happens to be space here that would go unused otherwise. If this
1322 * fault ever gets "unreserved", simply moved the following code to a more
1323 * suitable spot...
1324 */
1326 ENTRY(dispatch_to_fault_handler)
1327 /*
1328 * Input:
1329 * psr.ic: off
1330 * r19: fault vector number (e.g., 24 for General Exception)
1331 * r31: contains saved predicates (pr)
1332 */
1333 SAVE_MIN_WITH_COVER_R19
1334 alloc r14=ar.pfs,0,0,5,0
1335 mov out0=r15
1336 mov out1=cr.isr
1337 mov out2=cr.ifa
1338 mov out3=cr.iim
1339 mov out4=cr.itir
1340 ;;
1341 ssm psr.ic | PSR_DEFAULT_BITS
1342 ;;
1343 srlz.i // guarantee that interruption collection is on
1344 ;;
1345 (p15) ssm psr.i // restore psr.i
1346 adds r3=8,r2 // set up second base pointer for SAVE_REST
1347 ;;
1348 SAVE_REST
1349 movl r14=ia64_leave_kernel
1350 ;;
1351 mov rp=r14
1352 br.call.sptk.many b6=ia64_fault
1353 END(dispatch_to_fault_handler)
1355 //
1356 // --- End of long entries, Beginning of short entries
1357 //
1359 .org ia64_ivt+0x5000
1360 /////////////////////////////////////////////////////////////////////////////////////////
1361 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1362 ENTRY(page_not_present)
1363 #ifdef XEN
1364 REFLECT(20)
1365 #endif
1366 DBG_FAULT(20)
1367 mov r16=cr.ifa
1368 rsm psr.dt
1369 /*
1370 * The Linux page fault handler doesn't expect non-present pages to be in
1371 * the TLB. Flush the existing entry now, so we meet that expectation.
1372 */
1373 mov r17=PAGE_SHIFT<<2
1374 ;;
1375 ptc.l r16,r17
1376 ;;
1377 mov r31=pr
1378 srlz.d
1379 br.sptk.many page_fault
1380 END(page_not_present)
1382 .org ia64_ivt+0x5100
1383 /////////////////////////////////////////////////////////////////////////////////////////
1384 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1385 ENTRY(key_permission)
1386 #ifdef XEN
1387 REFLECT(21)
1388 #endif
1389 DBG_FAULT(21)
1390 mov r16=cr.ifa
1391 rsm psr.dt
1392 mov r31=pr
1393 ;;
1394 srlz.d
1395 br.sptk.many page_fault
1396 END(key_permission)
1398 .org ia64_ivt+0x5200
1399 /////////////////////////////////////////////////////////////////////////////////////////
1400 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1401 ENTRY(iaccess_rights)
1402 #ifdef XEN
1403 REFLECT(22)
1404 #endif
1405 DBG_FAULT(22)
1406 mov r16=cr.ifa
1407 rsm psr.dt
1408 mov r31=pr
1409 ;;
1410 srlz.d
1411 br.sptk.many page_fault
1412 END(iaccess_rights)
1414 .org ia64_ivt+0x5300
1415 /////////////////////////////////////////////////////////////////////////////////////////
1416 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1417 ENTRY(daccess_rights)
1418 #ifdef XEN
1419 mov r31=pr;
1420 mov r16=cr.isr
1421 mov r17=cr.ifa
1422 mov r19=23
1423 movl r20=0x5300
1424 br.sptk.many fast_access_reflect;;
1425 #endif
1426 DBG_FAULT(23)
1427 mov r16=cr.ifa
1428 rsm psr.dt
1429 mov r31=pr
1430 ;;
1431 srlz.d
1432 br.sptk.many page_fault
1433 END(daccess_rights)
1435 .org ia64_ivt+0x5400
1436 /////////////////////////////////////////////////////////////////////////////////////////
1437 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1438 ENTRY(general_exception)
1439 DBG_FAULT(24)
1440 mov r16=cr.isr
1441 mov r31=pr
1442 ;;
1443 #ifdef XEN
1444 cmp4.ge p6,p0=0x20,r16
1445 (p6) br.sptk.many dispatch_privop_fault
1446 #else
1447 cmp4.eq p6,p0=0,r16
1448 (p6) br.sptk.many dispatch_illegal_op_fault
1449 #endif
1450 ;;
1451 mov r19=24 // fault number
1452 br.sptk.many dispatch_to_fault_handler
1453 END(general_exception)
1455 .org ia64_ivt+0x5500
1456 /////////////////////////////////////////////////////////////////////////////////////////
1457 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1458 ENTRY(disabled_fp_reg)
1459 #ifdef XEN
1460 REFLECT(25)
1461 #endif
1462 DBG_FAULT(25)
1463 rsm psr.dfh // ensure we can access fph
1464 ;;
1465 srlz.d
1466 mov r31=pr
1467 mov r19=25
1468 br.sptk.many dispatch_to_fault_handler
1469 END(disabled_fp_reg)
1471 .org ia64_ivt+0x5600
1472 /////////////////////////////////////////////////////////////////////////////////////////
1473 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1474 ENTRY(nat_consumption)
1475 #ifdef XEN
1476 REFLECT(26)
1477 #endif
1478 DBG_FAULT(26)
1479 FAULT(26)
1480 END(nat_consumption)
1482 .org ia64_ivt+0x5700
1483 /////////////////////////////////////////////////////////////////////////////////////////
1484 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1485 ENTRY(speculation_vector)
1486 #ifdef XEN
1487 // this probably need not reflect...
1488 REFLECT(27)
1489 #endif
1490 DBG_FAULT(27)
1491 /*
1492 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1493 * this part of the architecture is not implemented in hardware on some CPUs, such
1494 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
1495 * the relative target (not yet sign extended). So after sign extending it we
1496 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
1497 * i.e., the slot to restart into.
1499 * cr.imm contains zero_ext(imm21)
1500 */
1501 mov r18=cr.iim
1502 ;;
1503 mov r17=cr.iip
1504 shl r18=r18,43 // put sign bit in position (43=64-21)
1505 ;;
1507 mov r16=cr.ipsr
1508 shr r18=r18,39 // sign extend (39=43-4)
1509 ;;
1511 add r17=r17,r18 // now add the offset
1512 ;;
1513 mov cr.iip=r17
1514 dep r16=0,r16,41,2 // clear EI
1515 ;;
1517 mov cr.ipsr=r16
1518 ;;
1520 rfi // and go back
1521 END(speculation_vector)
1523 .org ia64_ivt+0x5800
1524 /////////////////////////////////////////////////////////////////////////////////////////
1525 // 0x5800 Entry 28 (size 16 bundles) Reserved
1526 DBG_FAULT(28)
1527 FAULT(28)
1529 .org ia64_ivt+0x5900
1530 /////////////////////////////////////////////////////////////////////////////////////////
1531 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1532 ENTRY(debug_vector)
1533 #ifdef XEN
1534 REFLECT(29)
1535 #endif
1536 DBG_FAULT(29)
1537 FAULT(29)
1538 END(debug_vector)
1540 .org ia64_ivt+0x5a00
1541 /////////////////////////////////////////////////////////////////////////////////////////
1542 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1543 ENTRY(unaligned_access)
1544 #ifdef XEN
1545 REFLECT(30)
1546 #endif
1547 DBG_FAULT(30)
1548 mov r16=cr.ipsr
1549 mov r31=pr // prepare to save predicates
1550 ;;
1551 br.sptk.many dispatch_unaligned_handler
1552 END(unaligned_access)
1554 .org ia64_ivt+0x5b00
1555 /////////////////////////////////////////////////////////////////////////////////////////
1556 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1557 ENTRY(unsupported_data_reference)
1558 #ifdef XEN
1559 REFLECT(31)
1560 #endif
1561 DBG_FAULT(31)
1562 FAULT(31)
1563 END(unsupported_data_reference)
1565 .org ia64_ivt+0x5c00
1566 /////////////////////////////////////////////////////////////////////////////////////////
1567 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1568 ENTRY(floating_point_fault)
1569 #ifdef XEN
1570 REFLECT(32)
1571 #endif
1572 DBG_FAULT(32)
1573 FAULT(32)
1574 END(floating_point_fault)
1576 .org ia64_ivt+0x5d00
1577 /////////////////////////////////////////////////////////////////////////////////////////
1578 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1579 ENTRY(floating_point_trap)
1580 #ifdef XEN
1581 REFLECT(33)
1582 #endif
1583 DBG_FAULT(33)
1584 FAULT(33)
1585 END(floating_point_trap)
1587 .org ia64_ivt+0x5e00
1588 /////////////////////////////////////////////////////////////////////////////////////////
1589 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1590 ENTRY(lower_privilege_trap)
1591 #ifdef XEN
1592 REFLECT(34)
1593 #endif
1594 DBG_FAULT(34)
1595 FAULT(34)
1596 END(lower_privilege_trap)
1598 .org ia64_ivt+0x5f00
1599 /////////////////////////////////////////////////////////////////////////////////////////
1600 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1601 ENTRY(taken_branch_trap)
1602 #ifdef XEN
1603 REFLECT(35)
1604 #endif
1605 DBG_FAULT(35)
1606 FAULT(35)
1607 END(taken_branch_trap)
1609 .org ia64_ivt+0x6000
1610 /////////////////////////////////////////////////////////////////////////////////////////
1611 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1612 ENTRY(single_step_trap)
1613 #ifdef XEN
1614 REFLECT(36)
1615 #endif
1616 DBG_FAULT(36)
1617 FAULT(36)
1618 END(single_step_trap)
1620 .org ia64_ivt+0x6100
1621 /////////////////////////////////////////////////////////////////////////////////////////
1622 // 0x6100 Entry 37 (size 16 bundles) Reserved
1623 DBG_FAULT(37)
1624 FAULT(37)
1626 .org ia64_ivt+0x6200
1627 /////////////////////////////////////////////////////////////////////////////////////////
1628 // 0x6200 Entry 38 (size 16 bundles) Reserved
1629 DBG_FAULT(38)
1630 FAULT(38)
1632 .org ia64_ivt+0x6300
1633 /////////////////////////////////////////////////////////////////////////////////////////
1634 // 0x6300 Entry 39 (size 16 bundles) Reserved
1635 DBG_FAULT(39)
1636 FAULT(39)
1638 .org ia64_ivt+0x6400
1639 /////////////////////////////////////////////////////////////////////////////////////////
1640 // 0x6400 Entry 40 (size 16 bundles) Reserved
1641 DBG_FAULT(40)
1642 FAULT(40)
1644 .org ia64_ivt+0x6500
1645 /////////////////////////////////////////////////////////////////////////////////////////
1646 // 0x6500 Entry 41 (size 16 bundles) Reserved
1647 DBG_FAULT(41)
1648 FAULT(41)
1650 .org ia64_ivt+0x6600
1651 /////////////////////////////////////////////////////////////////////////////////////////
1652 // 0x6600 Entry 42 (size 16 bundles) Reserved
1653 DBG_FAULT(42)
1654 FAULT(42)
1656 .org ia64_ivt+0x6700
1657 /////////////////////////////////////////////////////////////////////////////////////////
1658 // 0x6700 Entry 43 (size 16 bundles) Reserved
1659 DBG_FAULT(43)
1660 FAULT(43)
1662 .org ia64_ivt+0x6800
1663 /////////////////////////////////////////////////////////////////////////////////////////
1664 // 0x6800 Entry 44 (size 16 bundles) Reserved
1665 DBG_FAULT(44)
1666 FAULT(44)
1668 .org ia64_ivt+0x6900
1669 /////////////////////////////////////////////////////////////////////////////////////////
1670 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1671 ENTRY(ia32_exception)
1672 #ifdef XEN
1673 REFLECT(45)
1674 #endif
1675 DBG_FAULT(45)
1676 FAULT(45)
1677 END(ia32_exception)
1679 .org ia64_ivt+0x6a00
1680 /////////////////////////////////////////////////////////////////////////////////////////
1681 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1682 ENTRY(ia32_intercept)
1683 #ifdef XEN
1684 REFLECT(46)
1685 #endif
1686 DBG_FAULT(46)
1687 #ifdef CONFIG_IA32_SUPPORT
1688 mov r31=pr
1689 mov r16=cr.isr
1690 ;;
1691 extr.u r17=r16,16,8 // get ISR.code
1692 mov r18=ar.eflag
1693 mov r19=cr.iim // old eflag value
1694 ;;
1695 cmp.ne p6,p0=2,r17
1696 (p6) br.cond.spnt 1f // not a system flag fault
1697 xor r16=r18,r19
1698 ;;
1699 extr.u r17=r16,18,1 // get the eflags.ac bit
1700 ;;
1701 cmp.eq p6,p0=0,r17
1702 (p6) br.cond.spnt 1f // eflags.ac bit didn't change
1703 ;;
1704 mov pr=r31,-1 // restore predicate registers
1705 rfi
1707 1:
1708 #endif // CONFIG_IA32_SUPPORT
1709 FAULT(46)
1710 END(ia32_intercept)
1712 .org ia64_ivt+0x6b00
1713 /////////////////////////////////////////////////////////////////////////////////////////
1714 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1715 ENTRY(ia32_interrupt)
1716 #ifdef XEN
1717 REFLECT(47)
1718 #endif
1719 DBG_FAULT(47)
1720 #ifdef CONFIG_IA32_SUPPORT
1721 mov r31=pr
1722 br.sptk.many dispatch_to_ia32_handler
1723 #else
1724 FAULT(47)
1725 #endif
1726 END(ia32_interrupt)
1728 .org ia64_ivt+0x6c00
1729 /////////////////////////////////////////////////////////////////////////////////////////
1730 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1731 DBG_FAULT(48)
1732 FAULT(48)
1734 .org ia64_ivt+0x6d00
1735 /////////////////////////////////////////////////////////////////////////////////////////
1736 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1737 DBG_FAULT(49)
1738 FAULT(49)
1740 .org ia64_ivt+0x6e00
1741 /////////////////////////////////////////////////////////////////////////////////////////
1742 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1743 DBG_FAULT(50)
1744 FAULT(50)
1746 .org ia64_ivt+0x6f00
1747 /////////////////////////////////////////////////////////////////////////////////////////
1748 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1749 DBG_FAULT(51)
1750 FAULT(51)
1752 .org ia64_ivt+0x7000
1753 /////////////////////////////////////////////////////////////////////////////////////////
1754 // 0x7000 Entry 52 (size 16 bundles) Reserved
1755 DBG_FAULT(52)
1756 FAULT(52)
1758 .org ia64_ivt+0x7100
1759 /////////////////////////////////////////////////////////////////////////////////////////
1760 // 0x7100 Entry 53 (size 16 bundles) Reserved
1761 DBG_FAULT(53)
1762 FAULT(53)
1764 .org ia64_ivt+0x7200
1765 /////////////////////////////////////////////////////////////////////////////////////////
1766 // 0x7200 Entry 54 (size 16 bundles) Reserved
1767 DBG_FAULT(54)
1768 FAULT(54)
1770 .org ia64_ivt+0x7300
1771 /////////////////////////////////////////////////////////////////////////////////////////
1772 // 0x7300 Entry 55 (size 16 bundles) Reserved
1773 DBG_FAULT(55)
1774 FAULT(55)
1776 .org ia64_ivt+0x7400
1777 /////////////////////////////////////////////////////////////////////////////////////////
1778 // 0x7400 Entry 56 (size 16 bundles) Reserved
1779 DBG_FAULT(56)
1780 FAULT(56)
1782 .org ia64_ivt+0x7500
1783 /////////////////////////////////////////////////////////////////////////////////////////
1784 // 0x7500 Entry 57 (size 16 bundles) Reserved
1785 DBG_FAULT(57)
1786 FAULT(57)
1788 .org ia64_ivt+0x7600
1789 /////////////////////////////////////////////////////////////////////////////////////////
1790 // 0x7600 Entry 58 (size 16 bundles) Reserved
1791 DBG_FAULT(58)
1792 FAULT(58)
1794 .org ia64_ivt+0x7700
1795 /////////////////////////////////////////////////////////////////////////////////////////
1796 // 0x7700 Entry 59 (size 16 bundles) Reserved
1797 DBG_FAULT(59)
1798 FAULT(59)
1800 .org ia64_ivt+0x7800
1801 /////////////////////////////////////////////////////////////////////////////////////////
1802 // 0x7800 Entry 60 (size 16 bundles) Reserved
1803 DBG_FAULT(60)
1804 FAULT(60)
1806 .org ia64_ivt+0x7900
1807 /////////////////////////////////////////////////////////////////////////////////////////
1808 // 0x7900 Entry 61 (size 16 bundles) Reserved
1809 DBG_FAULT(61)
1810 FAULT(61)
1812 .org ia64_ivt+0x7a00
1813 /////////////////////////////////////////////////////////////////////////////////////////
1814 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1815 DBG_FAULT(62)
1816 FAULT(62)
1818 .org ia64_ivt+0x7b00
1819 /////////////////////////////////////////////////////////////////////////////////////////
1820 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1821 DBG_FAULT(63)
1822 FAULT(63)
1824 .org ia64_ivt+0x7c00
1825 /////////////////////////////////////////////////////////////////////////////////////////
1826 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1827 DBG_FAULT(64)
1828 FAULT(64)
1830 .org ia64_ivt+0x7d00
1831 /////////////////////////////////////////////////////////////////////////////////////////
1832 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1833 DBG_FAULT(65)
1834 FAULT(65)
1836 .org ia64_ivt+0x7e00
1837 /////////////////////////////////////////////////////////////////////////////////////////
1838 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1839 DBG_FAULT(66)
1840 FAULT(66)
1842 .org ia64_ivt+0x7f00
1843 /////////////////////////////////////////////////////////////////////////////////////////
1844 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1845 DBG_FAULT(67)
1846 FAULT(67)
1848 #ifdef XEN
1849 .org ia64_ivt+0x8000
1850 GLOBAL_ENTRY(dispatch_reflection)
1851 /*
1852 * Input:
1853 * psr.ic: off
1854 * r19: intr type (offset into ivt, see ia64_int.h)
1855 * r31: contains saved predicates (pr)
1856 */
1857 SAVE_MIN_WITH_COVER_R19
1858 alloc r14=ar.pfs,0,0,5,0
1859 mov out4=r15
1860 mov out0=cr.ifa
1861 adds out1=16,sp
1862 mov out2=cr.isr
1863 mov out3=cr.iim
1864 // mov out3=cr.itir
1866 ssm psr.ic | PSR_DEFAULT_BITS
1867 ;;
1868 srlz.i // guarantee that interruption collection is on
1869 ;;
1870 (p15) ssm psr.i // restore psr.i
1871 adds r3=8,r2 // set up second base pointer
1872 ;;
1873 SAVE_REST
1874 movl r14=ia64_leave_kernel
1875 ;;
1876 mov rp=r14
1877 br.sptk.many ia64_prepare_handle_reflection
1878 END(dispatch_reflection)
1880 #define SAVE_MIN_COVER_DONE DO_SAVE_MIN(,mov r30=cr.ifs,)
1882 // same as dispatch_break_fault except cover has already been done
1883 GLOBAL_ENTRY(dispatch_slow_hyperprivop)
1884 SAVE_MIN_COVER_DONE
1885 ;;
1886 br.sptk.many dispatch_break_fault_post_save
1887 END(dispatch_slow_hyperprivop)
1888 #endif
1890 #ifdef CONFIG_IA32_SUPPORT
1892 /*
1893 * There is no particular reason for this code to be here, other than that
1894 * there happens to be space here that would go unused otherwise. If this
1895 * fault ever gets "unreserved", simply moved the following code to a more
1896 * suitable spot...
1897 */
1899 // IA32 interrupt entry point
1901 ENTRY(dispatch_to_ia32_handler)
1902 SAVE_MIN
1903 ;;
1904 mov r14=cr.isr
1905 ssm psr.ic | PSR_DEFAULT_BITS
1906 ;;
1907 srlz.i // guarantee that interruption collection is on
1908 ;;
1909 (p15) ssm psr.i
1910 adds r3=8,r2 // Base pointer for SAVE_REST
1911 ;;
1912 SAVE_REST
1913 ;;
1914 mov r15=0x80
1915 shr r14=r14,16 // Get interrupt number
1916 ;;
1917 cmp.ne p6,p0=r14,r15
1918 (p6) br.call.dpnt.many b6=non_ia32_syscall
1920 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
1921 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
1922 ;;
1923 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1924 ld8 r8=[r14] // get r8
1925 ;;
1926 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
1927 ;;
1928 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
1929 ;;
1930 ld4 r8=[r14],8 // r8 == eax (syscall number)
1931 mov r15=IA32_NR_syscalls
1932 ;;
1933 cmp.ltu.unc p6,p7=r8,r15
1934 ld4 out1=[r14],8 // r9 == ecx
1935 ;;
1936 ld4 out2=[r14],8 // r10 == edx
1937 ;;
1938 ld4 out0=[r14] // r11 == ebx
1939 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
1940 ;;
1941 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
1942 ;;
1943 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
1944 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
1945 ;;
1946 ld4 out4=[r14] // r15 == edi
1947 movl r16=ia32_syscall_table
1948 ;;
1949 (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
1950 ld4 r2=[r2] // r2 = current_thread_info()->flags
1951 ;;
1952 ld8 r16=[r16]
1953 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
1954 ;;
1955 mov b6=r16
1956 movl r15=ia32_ret_from_syscall
1957 cmp.eq p8,p0=r2,r0
1958 ;;
1959 mov rp=r15
1960 (p8) br.call.sptk.many b6=b6
1961 br.cond.sptk ia32_trace_syscall
1963 non_ia32_syscall:
1964 alloc r15=ar.pfs,0,0,2,0
1965 mov out0=r14 // interrupt #
1966 add out1=16,sp // pointer to pt_regs
1967 ;; // avoid WAW on CFM
1968 br.call.sptk.many rp=ia32_bad_interrupt
1969 .ret1: movl r15=ia64_leave_kernel
1970 ;;
1971 mov rp=r15
1972 br.ret.sptk.many rp
1973 END(dispatch_to_ia32_handler)
1975 #endif /* CONFIG_IA32_SUPPORT */