ia64/xen-unstable

view xen/arch/ia64/xen/ivt.S @ 9748:2f86b84d0483

[IA64] more cleanup in vhpt.h

VHPT_CCHAIN_LOOKUP removed, body is now inlined in ivt.S
vhpt_insert() is now written in C.
Cleanup within vhpt.c/.h

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Fri Apr 21 09:06:38 2006 -0600 (2006-04-21)
parents bdb08c9ef3d1
children 5ee12273119c
line source
2 #ifdef XEN
3 //#define CONFIG_DISABLE_VHPT // FIXME: change when VHPT is enabled??
4 // these are all hacked out for now as the entire IVT
5 // will eventually be replaced... just want to use it
6 // for startup code to handle TLB misses
7 //#define ia64_leave_kernel 0
8 //#define ia64_ret_from_syscall 0
9 //#define ia64_handle_irq 0
10 //#define ia64_fault 0
11 #define ia64_illegal_op_fault 0
12 #define ia64_prepare_handle_unaligned 0
13 #define ia64_bad_break 0
14 #define ia64_trace_syscall 0
15 #define sys_call_table 0
16 #define sys_ni_syscall 0
17 #include <asm/vhpt.h>
18 #include <asm/debugger.h>
19 #endif
20 /*
21 * arch/ia64/kernel/ivt.S
22 *
23 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
24 * Stephane Eranian <eranian@hpl.hp.com>
25 * David Mosberger <davidm@hpl.hp.com>
26 * Copyright (C) 2000, 2002-2003 Intel Co
27 * Asit Mallick <asit.k.mallick@intel.com>
28 * Suresh Siddha <suresh.b.siddha@intel.com>
29 * Kenneth Chen <kenneth.w.chen@intel.com>
30 * Fenghua Yu <fenghua.yu@intel.com>
31 *
32 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
33 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
34 */
35 /*
36 * This file defines the interruption vector table used by the CPU.
37 * It does not include one entry per possible cause of interruption.
38 *
39 * The first 20 entries of the table contain 64 bundles each while the
40 * remaining 48 entries contain only 16 bundles each.
41 *
42 * The 64 bundles are used to allow inlining the whole handler for critical
43 * interruptions like TLB misses.
44 *
45 * For each entry, the comment is as follows:
46 *
47 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
48 * entry offset ----/ / / / /
49 * entry number ---------/ / / /
50 * size of the entry -------------/ / /
51 * vector name -------------------------------------/ /
52 * interruptions triggering this vector ----------------------/
53 *
54 * The table is 32KB in size and must be aligned on 32KB boundary.
55 * (The CPU ignores the 15 lower bits of the address)
56 *
57 * Table is based upon EAS2.6 (Oct 1999)
58 */
60 #include <linux/config.h>
62 #include <asm/asmmacro.h>
63 #include <asm/break.h>
64 #include <asm/ia32.h>
65 #include <asm/kregs.h>
66 #include <asm/offsets.h>
67 #include <asm/pgtable.h>
68 #include <asm/processor.h>
69 #include <asm/ptrace.h>
70 #include <asm/system.h>
71 #include <asm/thread_info.h>
72 #include <asm/unistd.h>
73 #ifdef XEN
74 #include <xen/errno.h>
75 #else
76 #include <asm/errno.h>
77 #endif
79 #if 1
80 # define PSR_DEFAULT_BITS psr.ac
81 #else
82 # define PSR_DEFAULT_BITS 0
83 #endif
85 #if 0
86 /*
87 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
88 * needed for something else before enabling this...
89 */
90 # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
91 #else
92 # define DBG_FAULT(i)
93 #endif
95 #define MINSTATE_VIRT /* needed by minstate.h */
96 #include "minstate.h"
98 #define FAULT(n) \
99 mov r31=pr; \
100 mov r19=n;; /* prepare to save predicates */ \
101 br.sptk.many dispatch_to_fault_handler
103 #ifdef XEN
104 #define REFLECT(n) \
105 mov r31=pr; \
106 mov r19=n;; /* prepare to save predicates */ \
107 br.sptk.many dispatch_reflection
108 #endif
110 .section .text.ivt,"ax"
112 .align 32768 // align on 32KB boundary
113 .global ia64_ivt
114 ia64_ivt:
115 /////////////////////////////////////////////////////////////////////////////////////////
116 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
117 ENTRY(vhpt_miss)
118 DBG_FAULT(0)
119 #ifdef XEN
120 FAULT(0)
121 #else
122 /*
123 * The VHPT vector is invoked when the TLB entry for the virtual page table
124 * is missing. This happens only as a result of a previous
125 * (the "original") TLB miss, which may either be caused by an instruction
126 * fetch or a data access (or non-access).
127 *
128 * What we do here is normal TLB miss handing for the _original_ miss, followed
129 * by inserting the TLB entry for the virtual page table page that the VHPT
130 * walker was attempting to access. The latter gets inserted as long
131 * as both L1 and L2 have valid mappings for the faulting address.
132 * The TLB entry for the original miss gets inserted only if
133 * the L3 entry indicates that the page is present.
134 *
135 * do_page_fault gets invoked in the following cases:
136 * - the faulting virtual address uses unimplemented address bits
137 * - the faulting virtual address has no L1, L2, or L3 mapping
138 */
139 mov r16=cr.ifa // get address that caused the TLB miss
140 #ifdef CONFIG_HUGETLB_PAGE
141 movl r18=PAGE_SHIFT
142 mov r25=cr.itir
143 #endif
144 ;;
145 rsm psr.dt // use physical addressing for data
146 mov r31=pr // save the predicate registers
147 mov r19=IA64_KR(PT_BASE) // get page table base address
148 shl r21=r16,3 // shift bit 60 into sign bit
149 shr.u r17=r16,61 // get the region number into r17
150 ;;
151 shr r22=r21,3
152 #ifdef CONFIG_HUGETLB_PAGE
153 extr.u r26=r25,2,6
154 ;;
155 cmp.ne p8,p0=r18,r26
156 sub r27=r26,r18
157 ;;
158 (p8) dep r25=r18,r25,2,6
159 (p8) shr r22=r22,r27
160 #endif
161 ;;
162 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
163 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
164 ;;
165 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
167 srlz.d
168 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
170 .pred.rel "mutex", p6, p7
171 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
172 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
173 ;;
174 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
175 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
176 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
177 shr.u r18=r22,PMD_SHIFT // shift L2 index into position
178 ;;
179 ld8 r17=[r17] // fetch the L1 entry (may be 0)
180 ;;
181 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
182 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
183 ;;
184 (p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
185 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
186 ;;
187 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
188 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
189 ;;
190 (p7) ld8 r18=[r21] // read the L3 PTE
191 mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss
192 ;;
193 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
194 mov r22=cr.iha // get the VHPT address that caused the TLB miss
195 ;; // avoid RAW on p7
196 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
197 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
198 ;;
199 (p10) itc.i r18 // insert the instruction TLB entry
200 (p11) itc.d r18 // insert the data TLB entry
201 (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
202 mov cr.ifa=r22
204 #ifdef CONFIG_HUGETLB_PAGE
205 (p8) mov cr.itir=r25 // change to default page-size for VHPT
206 #endif
208 /*
209 * Now compute and insert the TLB entry for the virtual page table. We never
210 * execute in a page table page so there is no need to set the exception deferral
211 * bit.
212 */
213 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
214 ;;
215 (p7) itc.d r24
216 ;;
217 #ifdef CONFIG_SMP
218 /*
219 * Tell the assemblers dependency-violation checker that the above "itc" instructions
220 * cannot possibly affect the following loads:
221 */
222 dv_serialize_data
224 /*
225 * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g
226 * between reading the pagetable and the "itc". If so, flush the entry we
227 * inserted and retry.
228 */
229 ld8 r25=[r21] // read L3 PTE again
230 ld8 r26=[r17] // read L2 entry again
231 ;;
232 cmp.ne p6,p7=r26,r20 // did L2 entry change
233 mov r27=PAGE_SHIFT<<2
234 ;;
235 (p6) ptc.l r22,r27 // purge PTE page translation
236 (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change
237 ;;
238 (p6) ptc.l r16,r27 // purge translation
239 #endif
241 mov pr=r31,-1 // restore predicate registers
242 rfi
243 #endif
244 END(vhpt_miss)
246 .org ia64_ivt+0x400
247 /////////////////////////////////////////////////////////////////////////////////////////
248 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
249 ENTRY(itlb_miss)
250 DBG_FAULT(1)
251 #ifdef XEN
252 mov r31 = pr
253 mov r16 = cr.ifa
254 ;;
255 extr.u r17=r16,59,5
256 ;;
257 /* If address belongs to VMM, go to alt tlb handler */
258 cmp.eq p6,p0=0x1e,r17
259 (p6) br.cond.spnt late_alt_itlb_miss
260 ;;
261 cmp.eq p6,p0=0x1d,r17
262 (p6) br.cond.spnt late_alt_itlb_miss
263 ;;
264 mov pr = r31, 0x1ffff
265 ;;
266 #ifdef VHPT_GLOBAL
267 br.cond.sptk fast_tlb_miss_reflect
268 ;;
269 #endif
270 #endif
271 /*
272 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
273 * page table. If a nested TLB miss occurs, we switch into physical
274 * mode, walk the page table, and then re-execute the L3 PTE read
275 * and go on normally after that.
276 */
277 mov r16=cr.ifa // get virtual address
278 mov r29=b0 // save b0
279 mov r31=pr // save predicates
280 .itlb_fault:
281 mov r17=cr.iha // get virtual address of L3 PTE
282 movl r30=1f // load nested fault continuation point
283 ;;
284 1: ld8 r18=[r17] // read L3 PTE
285 ;;
286 mov b0=r29
287 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
288 (p6) br.cond.spnt page_fault
289 ;;
290 itc.i r18
291 ;;
292 #ifdef CONFIG_SMP
293 /*
294 * Tell the assemblers dependency-violation checker that the above "itc" instructions
295 * cannot possibly affect the following loads:
296 */
297 dv_serialize_data
299 ld8 r19=[r17] // read L3 PTE again and see if same
300 mov r20=PAGE_SHIFT<<2 // setup page size for purge
301 ;;
302 cmp.ne p7,p0=r18,r19
303 ;;
304 (p7) ptc.l r16,r20
305 #endif
306 mov pr=r31,-1
307 rfi
308 END(itlb_miss)
310 .org ia64_ivt+0x0800
311 /////////////////////////////////////////////////////////////////////////////////////////
312 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
313 ENTRY(dtlb_miss)
314 DBG_FAULT(2)
315 #ifdef XEN
316 mov r31=pr
317 mov r16=cr.ifa // get virtual address
318 ;;
319 extr.u r17=r16,59,5
320 ;;
321 /* If address belongs to VMM, go to alt tlb handler */
322 cmp.eq p6,p0=0x1e,r17
323 (p6) br.cond.spnt late_alt_dtlb_miss
324 ;;
325 cmp.eq p6,p0=0x1d,r17
326 (p6) br.cond.spnt late_alt_dtlb_miss
327 ;;
328 #if VHPT_ENABLED
329 // XXX TODO optimization
330 mov r30=cr.ipsr
331 mov r28=cr.iip
332 mov r17=cr.isr
333 ;;
335 extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2 // extract psr.cpl
336 ;;
337 cmp.ne p6, p0 = r0, r18 // cpl == 0?
338 (p6) br.cond.sptk 2f
340 // is speculation bit on?
341 tbit.nz p7,p0=r17,IA64_ISR_SP_BIT
342 ;;
343 (p7) br.cond.spnt 2f
345 // Is the faulted iip in vmm area?
346 // check [59:58] bit
347 // 00, 11: guest
348 // 01, 10: vmm
349 extr.u r19 = r28, 58, 2
350 ;;
351 cmp.eq p10, p0 = 0x0, r19
352 (p10) br.cond.sptk 2f
353 cmp.eq p11, p0 = 0x3, r19
354 (p11) br.cond.sptk 2f
356 // Is the faulted address is in the identity mapping area?
357 // 0xf000... or 0xe8000...
358 extr.u r20 = r16, 59, 5
359 ;;
360 cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
361 (p12) br.cond.spnt 1f
362 cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
363 (p13) br.cond.sptk 2f
365 1:
366 // xen identity mappin area.
367 movl r24=PAGE_KERNEL
368 movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
369 ;;
370 shr.u r26=r16,55 // move address bit 59 to bit 4
371 and r25=r25,r16 // clear ed, reserved bits, and PTE control bits
372 ;;
373 and r26=0x10,r26 // bit 4=address-bit(59)
374 ;;
375 or r25=r25,r24 // insert PTE control bits into r25
376 ;;
377 or r25=r25,r26 // set bit 4 (uncached) if the access was to region 6
378 ;;
379 itc.d r25 // insert the TLB entry
380 mov pr=r31,-1
381 rfi
383 2:
384 #endif
385 #ifdef VHPT_GLOBAL
386 // br.cond.sptk page_fault
387 br.cond.sptk fast_tlb_miss_reflect
388 ;;
389 #endif
390 mov r29=b0 // save b0
391 #else
392 /*
393 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
394 * page table. If a nested TLB miss occurs, we switch into physical
395 * mode, walk the page table, and then re-execute the L3 PTE read
396 * and go on normally after that.
397 */
398 mov r16=cr.ifa // get virtual address
399 mov r29=b0 // save b0
400 mov r31=pr // save predicates
401 #endif
402 dtlb_fault:
403 mov r17=cr.iha // get virtual address of L3 PTE
404 movl r30=1f // load nested fault continuation point
405 ;;
406 1: ld8 r18=[r17] // read L3 PTE
407 ;;
408 mov b0=r29
409 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
410 (p6) br.cond.spnt page_fault
411 ;;
412 itc.d r18
413 ;;
414 #ifdef CONFIG_SMP
415 /*
416 * Tell the assemblers dependency-violation checker that the above "itc" instructions
417 * cannot possibly affect the following loads:
418 */
419 dv_serialize_data
421 ld8 r19=[r17] // read L3 PTE again and see if same
422 mov r20=PAGE_SHIFT<<2 // setup page size for purge
423 ;;
424 cmp.ne p7,p0=r18,r19
425 ;;
426 (p7) ptc.l r16,r20
427 #endif
428 mov pr=r31,-1
429 rfi
430 END(dtlb_miss)
432 .org ia64_ivt+0x0c00
433 /////////////////////////////////////////////////////////////////////////////////////////
434 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
435 ENTRY(alt_itlb_miss)
436 DBG_FAULT(3)
437 #ifdef XEN
438 mov r31=pr
439 mov r16=cr.ifa // get address that caused the TLB miss
440 ;;
441 late_alt_itlb_miss:
442 movl r17=PAGE_KERNEL
443 mov r21=cr.ipsr
444 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
445 ;;
446 #else
447 mov r16=cr.ifa // get address that caused the TLB miss
448 movl r17=PAGE_KERNEL
449 mov r21=cr.ipsr
450 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
451 mov r31=pr
452 ;;
453 #endif
454 #ifdef CONFIG_DISABLE_VHPT
455 shr.u r22=r16,61 // get the region number into r21
456 ;;
457 cmp.gt p8,p0=6,r22 // user mode
458 ;;
459 (p8) thash r17=r16
460 ;;
461 (p8) mov cr.iha=r17
462 (p8) mov r29=b0 // save b0
463 (p8) br.cond.dptk .itlb_fault
464 #endif
465 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
466 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
467 #ifdef XEN
468 shr.u r18=r16,55 // move address bit 59 to bit 4
469 ;;
470 and r18=0x10,r18 // bit 4=address-bit(59)
471 #else
472 shr.u r18=r16,57 // move address bit 61 to bit 4
473 ;;
474 andcm r18=0x10,r18 // bit 4=~address-bit(61)
475 #endif
476 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
477 or r19=r17,r19 // insert PTE control bits into r19
478 ;;
479 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
480 (p8) br.cond.spnt page_fault
481 #ifdef XEN
482 FORCE_CRASH
483 #endif
484 ;;
485 itc.i r19 // insert the TLB entry
486 mov pr=r31,-1
487 rfi
488 END(alt_itlb_miss)
490 .org ia64_ivt+0x1000
491 /////////////////////////////////////////////////////////////////////////////////////////
492 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
493 ENTRY(alt_dtlb_miss)
494 DBG_FAULT(4)
495 #ifdef XEN
496 mov r31=pr
497 mov r16=cr.ifa // get address that caused the TLB miss
498 ;;
499 late_alt_dtlb_miss:
500 movl r17=PAGE_KERNEL
501 mov r20=cr.isr
502 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
503 mov r21=cr.ipsr
504 ;;
505 #else
506 #endif
507 #ifdef CONFIG_DISABLE_VHPT
508 shr.u r22=r16,61 // get the region number into r21
509 ;;
510 cmp.gt p8,p0=6,r22 // access to region 0-5
511 ;;
512 (p8) thash r17=r16
513 ;;
514 (p8) mov cr.iha=r17
515 (p8) mov r29=b0 // save b0
516 (p8) br.cond.dptk dtlb_fault
517 #endif
518 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
519 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
520 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
521 #ifdef XEN
522 shr.u r18=r16,55 // move address bit 59 to bit 4
523 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
524 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
525 ;;
526 and r18=0x10,r18 // bit 4=address-bit(59)
527 #else
528 shr.u r18=r16,57 // move address bit 61 to bit 4
529 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
530 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
531 ;;
532 andcm r18=0x10,r18 // bit 4=~address-bit(61)
533 #endif
534 cmp.ne p8,p0=r0,r23
535 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
536 (p8) br.cond.spnt page_fault
537 #ifdef XEN
538 ;;
539 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
540 // Test for the address of virtual frame_table
541 shr r22=r16,56;;
542 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
543 (p8) br.cond.sptk frametable_miss ;;
544 #endif
545 // Test for Xen address, if not handle via page_fault
546 // note that 0xf000 (cached) and 0xe800 (uncached) addresses
547 // should be OK.
548 extr.u r22=r16,59,5;;
549 cmp.eq p8,p0=0x1e,r22
550 (p8) br.cond.spnt 1f;;
551 cmp.ne p8,p0=0x1d,r22
552 (p8) br.cond.sptk page_fault ;;
553 1:
554 #endif
556 dep r21=-1,r21,IA64_PSR_ED_BIT,1
557 or r19=r19,r17 // insert PTE control bits into r19
558 ;;
559 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
560 (p6) mov cr.ipsr=r21
561 ;;
562 (p7) itc.d r19 // insert the TLB entry
563 mov pr=r31,-1
564 rfi
565 END(alt_dtlb_miss)
566 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
567 GLOBAL_ENTRY(frametable_miss)
568 rsm psr.dt // switch to using physical data addressing
569 movl r24=(frametable_pg_dir-PAGE_OFFSET) // r24=__pa(frametable_pg_dir)
570 ;;
571 srlz.d
572 extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3)
573 ;;
574 shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)]
575 ;;
576 ld8 r24=[r24] // r24=pgd[pgd_offset(addr)]
577 extr.u r18=r16,PMD_SHIFT,(PAGE_SHIFT-3) // r18=pmd_offset
578 ;;
579 cmp.eq p6,p7=0,r24 // pgd present?
580 shladd r24=r18,3,r24 // r24=&pmd[pmd_offset(addr)]
581 ;;
582 (p7) ld8 r24=[r24] // r24=pmd[pmd_offset(addr)]
583 extr.u r19=r16,PAGE_SHIFT,(PAGE_SHIFT-3)// r19=pte_offset
584 (p6) br.spnt.few frametable_fault
585 ;;
586 cmp.eq p6,p7=0,r24 // pmd present?
587 shladd r24=r19,3,r24 // r24=&pte[pte_offset(addr)]
588 ;;
589 (p7) ld8 r24=[r24] // r24=pte[pte_offset(addr)]
590 mov r25=0x700|(_PAGE_SIZE_16K<<2) // key=7
591 (p6) br.spnt.few frametable_fault
592 ;;
593 mov cr.itir=r25
594 ssm psr.dt // switch to using virtual data addressing
595 tbit.z p6,p7=r24,_PAGE_P_BIT // pte present?
596 ;;
597 (p7) itc.d r24 // install updated PTE
598 (p6) br.spnt.few frametable_fault // page present bit cleared?
599 ;;
600 mov pr=r31,-1 // restore predicate registers
601 rfi
602 END(frametable_miss)
603 ENTRY(frametable_fault)
604 ssm psr.dt // switch to using virtual data addressing
605 mov r18=cr.iip
606 movl r19=ia64_frametable_probe
607 ;;
608 cmp.eq p6,p7=r18,r19 // is faulting addrress ia64_frametable_probe?
609 mov r8=0 // assumes that 'probe.r' uses r8
610 dep r21=-1,r21,IA64_PSR_RI_BIT+1,1 // return to next instrucition in bundle 2
611 ;;
612 (p6) mov cr.ipsr=r21
613 mov r19=4 // FAULT(4)
614 (p7) br.spnt.few dispatch_to_fault_handler
615 ;;
616 mov pr=r31,-1
617 rfi
618 END(frametable_fault)
619 GLOBAL_ENTRY(ia64_frametable_probe)
620 probe.r r8=r32,0 // destination register must be r8
621 nop.f 0x0
622 br.ret.sptk.many b0 // this instruction must be in bundle 2
623 END(ia64_frametable_probe)
624 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
626 .org ia64_ivt+0x1400
627 /////////////////////////////////////////////////////////////////////////////////////////
628 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
629 ENTRY(nested_dtlb_miss)
630 DBG_FAULT(5)
631 #ifdef XEN
632 mov b0=r30
633 br.sptk.many b0 // return to continuation point
634 ;;
635 #else
636 /*
637 * In the absence of kernel bugs, we get here when the virtually mapped linear
638 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
639 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
640 * table is missing, a nested TLB miss fault is triggered and control is
641 * transferred to this point. When this happens, we lookup the pte for the
642 * faulting address by walking the page table in physical mode and return to the
643 * continuation point passed in register r30 (or call page_fault if the address is
644 * not mapped).
645 *
646 * Input: r16: faulting address
647 * r29: saved b0
648 * r30: continuation address
649 * r31: saved pr
650 *
651 * Output: r17: physical address of L3 PTE of faulting address
652 * r29: saved b0
653 * r30: continuation address
654 * r31: saved pr
655 *
656 * Clobbered: b0, r18, r19, r21, psr.dt (cleared)
657 */
658 rsm psr.dt // switch to using physical data addressing
659 mov r19=IA64_KR(PT_BASE) // get the page table base address
660 shl r21=r16,3 // shift bit 60 into sign bit
661 ;;
662 shr.u r17=r16,61 // get the region number into r17
663 ;;
664 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
665 shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
666 ;;
667 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
669 srlz.d
670 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
672 .pred.rel "mutex", p6, p7
673 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
674 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
675 ;;
676 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
677 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
678 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
679 shr.u r18=r16,PMD_SHIFT // shift L2 index into position
680 ;;
681 ld8 r17=[r17] // fetch the L1 entry (may be 0)
682 ;;
683 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
684 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
685 ;;
686 (p7) ld8 r17=[r17] // fetch the L2 entry (may be 0)
687 shr.u r19=r16,PAGE_SHIFT // shift L3 index into position
688 ;;
689 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL?
690 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
691 (p6) br.cond.spnt page_fault
692 mov b0=r30
693 br.sptk.many b0 // return to continuation point
694 #endif
695 END(nested_dtlb_miss)
697 .org ia64_ivt+0x1800
698 /////////////////////////////////////////////////////////////////////////////////////////
699 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
700 ENTRY(ikey_miss)
701 DBG_FAULT(6)
702 #ifdef XEN
703 REFLECT(6)
704 #endif
705 FAULT(6)
706 END(ikey_miss)
708 //-----------------------------------------------------------------------------------
709 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
710 #ifdef XEN
711 GLOBAL_ENTRY(page_fault)
712 #else
713 ENTRY(page_fault)
714 #endif
715 ssm psr.dt
716 ;;
717 srlz.i
718 ;;
719 SAVE_MIN_WITH_COVER
720 #ifdef XEN
721 alloc r15=ar.pfs,0,0,4,0
722 mov out0=cr.ifa
723 mov out1=cr.isr
724 mov out3=cr.itir
725 #else
726 alloc r15=ar.pfs,0,0,3,0
727 mov out0=cr.ifa
728 mov out1=cr.isr
729 #endif
730 adds r3=8,r2 // set up second base pointer
731 ;;
732 ssm psr.ic | PSR_DEFAULT_BITS
733 ;;
734 srlz.i // guarantee that interruption collectin is on
735 ;;
736 (p15) ssm psr.i // restore psr.i
737 movl r14=ia64_leave_kernel
738 ;;
739 SAVE_REST
740 mov rp=r14
741 ;;
742 adds out2=16,r12 // out2 = pointer to pt_regs
743 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
744 END(page_fault)
746 .org ia64_ivt+0x1c00
747 /////////////////////////////////////////////////////////////////////////////////////////
748 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
749 ENTRY(dkey_miss)
750 DBG_FAULT(7)
751 #ifdef XEN
752 REFLECT(7)
753 #endif
754 FAULT(7)
755 END(dkey_miss)
757 .org ia64_ivt+0x2000
758 /////////////////////////////////////////////////////////////////////////////////////////
759 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
760 ENTRY(dirty_bit)
761 DBG_FAULT(8)
762 #ifdef XEN
763 REFLECT(8)
764 #endif
765 /*
766 * What we do here is to simply turn on the dirty bit in the PTE. We need to
767 * update both the page-table and the TLB entry. To efficiently access the PTE,
768 * we address it through the virtual page table. Most likely, the TLB entry for
769 * the relevant virtual page table page is still present in the TLB so we can
770 * normally do this without additional TLB misses. In case the necessary virtual
771 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
772 * up the physical address of the L3 PTE and then continue at label 1 below.
773 */
774 mov r16=cr.ifa // get the address that caused the fault
775 movl r30=1f // load continuation point in case of nested fault
776 ;;
777 thash r17=r16 // compute virtual address of L3 PTE
778 mov r29=b0 // save b0 in case of nested fault
779 mov r31=pr // save pr
780 #ifdef CONFIG_SMP
781 mov r28=ar.ccv // save ar.ccv
782 ;;
783 1: ld8 r18=[r17]
784 ;; // avoid RAW on r18
785 mov ar.ccv=r18 // set compare value for cmpxchg
786 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
787 ;;
788 cmpxchg8.acq r26=[r17],r25,ar.ccv
789 mov r24=PAGE_SHIFT<<2
790 ;;
791 cmp.eq p6,p7=r26,r18
792 ;;
793 (p6) itc.d r25 // install updated PTE
794 ;;
795 /*
796 * Tell the assemblers dependency-violation checker that the above "itc" instructions
797 * cannot possibly affect the following loads:
798 */
799 dv_serialize_data
801 ld8 r18=[r17] // read PTE again
802 ;;
803 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
804 ;;
805 (p7) ptc.l r16,r24
806 mov b0=r29 // restore b0
807 mov ar.ccv=r28
808 #else
809 ;;
810 1: ld8 r18=[r17]
811 ;; // avoid RAW on r18
812 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
813 mov b0=r29 // restore b0
814 ;;
815 st8 [r17]=r18 // store back updated PTE
816 itc.d r18 // install updated PTE
817 #endif
818 mov pr=r31,-1 // restore pr
819 rfi
820 END(dirty_bit)
822 .org ia64_ivt+0x2400
823 /////////////////////////////////////////////////////////////////////////////////////////
824 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
825 ENTRY(iaccess_bit)
826 DBG_FAULT(9)
827 #ifdef XEN
828 mov r31=pr;
829 mov r16=cr.isr
830 mov r17=cr.ifa
831 mov r19=9
832 movl r20=0x2400
833 br.sptk.many fast_access_reflect;;
834 #endif
835 // Like Entry 8, except for instruction access
836 mov r16=cr.ifa // get the address that caused the fault
837 movl r30=1f // load continuation point in case of nested fault
838 mov r31=pr // save predicates
839 #ifdef CONFIG_ITANIUM
840 /*
841 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
842 */
843 mov r17=cr.ipsr
844 ;;
845 mov r18=cr.iip
846 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
847 ;;
848 (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
849 #endif /* CONFIG_ITANIUM */
850 ;;
851 thash r17=r16 // compute virtual address of L3 PTE
852 mov r29=b0 // save b0 in case of nested fault)
853 #ifdef CONFIG_SMP
854 mov r28=ar.ccv // save ar.ccv
855 ;;
856 1: ld8 r18=[r17]
857 ;;
858 mov ar.ccv=r18 // set compare value for cmpxchg
859 or r25=_PAGE_A,r18 // set the accessed bit
860 ;;
861 cmpxchg8.acq r26=[r17],r25,ar.ccv
862 mov r24=PAGE_SHIFT<<2
863 ;;
864 cmp.eq p6,p7=r26,r18
865 ;;
866 (p6) itc.i r25 // install updated PTE
867 ;;
868 /*
869 * Tell the assemblers dependency-violation checker that the above "itc" instructions
870 * cannot possibly affect the following loads:
871 */
872 dv_serialize_data
874 ld8 r18=[r17] // read PTE again
875 ;;
876 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
877 ;;
878 (p7) ptc.l r16,r24
879 mov b0=r29 // restore b0
880 mov ar.ccv=r28
881 #else /* !CONFIG_SMP */
882 ;;
883 1: ld8 r18=[r17]
884 ;;
885 or r18=_PAGE_A,r18 // set the accessed bit
886 mov b0=r29 // restore b0
887 ;;
888 st8 [r17]=r18 // store back updated PTE
889 itc.i r18 // install updated PTE
890 #endif /* !CONFIG_SMP */
891 mov pr=r31,-1
892 rfi
893 END(iaccess_bit)
895 .org ia64_ivt+0x2800
896 /////////////////////////////////////////////////////////////////////////////////////////
897 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
898 ENTRY(daccess_bit)
899 DBG_FAULT(10)
900 #ifdef XEN
901 mov r31=pr;
902 mov r16=cr.isr
903 mov r17=cr.ifa
904 mov r19=10
905 movl r20=0x2800
906 br.sptk.many fast_access_reflect;;
907 #endif
908 // Like Entry 8, except for data access
909 mov r16=cr.ifa // get the address that caused the fault
910 movl r30=1f // load continuation point in case of nested fault
911 ;;
912 thash r17=r16 // compute virtual address of L3 PTE
913 mov r31=pr
914 mov r29=b0 // save b0 in case of nested fault)
915 #ifdef CONFIG_SMP
916 mov r28=ar.ccv // save ar.ccv
917 ;;
918 1: ld8 r18=[r17]
919 ;; // avoid RAW on r18
920 mov ar.ccv=r18 // set compare value for cmpxchg
921 or r25=_PAGE_A,r18 // set the dirty bit
922 ;;
923 cmpxchg8.acq r26=[r17],r25,ar.ccv
924 mov r24=PAGE_SHIFT<<2
925 ;;
926 cmp.eq p6,p7=r26,r18
927 ;;
928 (p6) itc.d r25 // install updated PTE
929 /*
930 * Tell the assemblers dependency-violation checker that the above "itc" instructions
931 * cannot possibly affect the following loads:
932 */
933 dv_serialize_data
934 ;;
935 ld8 r18=[r17] // read PTE again
936 ;;
937 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
938 ;;
939 (p7) ptc.l r16,r24
940 mov ar.ccv=r28
941 #else
942 ;;
943 1: ld8 r18=[r17]
944 ;; // avoid RAW on r18
945 or r18=_PAGE_A,r18 // set the accessed bit
946 ;;
947 st8 [r17]=r18 // store back updated PTE
948 itc.d r18 // install updated PTE
949 #endif
950 mov b0=r29 // restore b0
951 mov pr=r31,-1
952 rfi
953 END(daccess_bit)
955 .org ia64_ivt+0x2c00
956 /////////////////////////////////////////////////////////////////////////////////////////
957 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
958 ENTRY(break_fault)
959 /*
960 * The streamlined system call entry/exit paths only save/restore the initial part
961 * of pt_regs. This implies that the callers of system-calls must adhere to the
962 * normal procedure calling conventions.
963 *
964 * Registers to be saved & restored:
965 * CR registers: cr.ipsr, cr.iip, cr.ifs
966 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
967 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
968 * Registers to be restored only:
969 * r8-r11: output value from the system call.
970 *
971 * During system call exit, scratch registers (including r15) are modified/cleared
972 * to prevent leaking bits from kernel to user level.
973 */
974 DBG_FAULT(11)
975 #ifdef XEN
976 mov r16=cr.isr
977 mov r17=cr.iim
978 mov r31=pr
979 ;;
980 cmp.eq p7,p0=r17,r0
981 (p7) br.spnt.few dispatch_break_fault ;;
982 #ifdef CRASH_DEBUG
983 // panic can occur before domain0 is created.
984 // in such case referencing XSI_PSR_IC causes nested_dtlb_miss
985 movl r18=CDB_BREAK_NUM ;;
986 cmp.eq p7,p0=r17,r18 ;;
987 (p7) br.spnt.few dispatch_break_fault ;;
988 #endif
989 movl r18=XSI_PSR_IC
990 ;;
991 ld4 r19=[r18]
992 ;;
993 cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
994 (p7) br.spnt.many dispatch_privop_fault
995 ;;
996 // if vpsr.ic is off, we have a hyperprivop
997 // A hyperprivop is hand-coded assembly with psr.ic off
998 // which means no calls, no use of r1-r15 and no memory accesses
999 // except to pinned addresses!
1000 cmp4.eq p7,p0=r0,r19
1001 (p7) br.sptk.many fast_hyperprivop
1002 ;;
1003 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
1004 ld8 r22 = [r22]
1005 ;;
1006 adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22;;
1007 ld4 r23=[r22];;
1008 cmp4.eq p6,p7=r23,r17 // Xen-reserved breakimm?
1009 (p6) br.spnt.many dispatch_break_fault
1010 ;;
1011 br.sptk.many fast_break_reflect
1012 ;;
1013 #endif
1014 movl r16=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
1015 ld8 r16=[r16]
1016 mov r17=cr.iim
1017 mov r18=__IA64_BREAK_SYSCALL
1018 mov r21=ar.fpsr
1019 mov r29=cr.ipsr
1020 mov r19=b6
1021 mov r25=ar.unat
1022 mov r27=ar.rsc
1023 mov r26=ar.pfs
1024 mov r28=cr.iip
1025 #ifndef XEN
1026 mov r31=pr // prepare to save predicates
1027 #endif
1028 mov r20=r1
1029 ;;
1030 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
1031 cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
1032 (p7) br.cond.spnt non_syscall
1033 ;;
1034 ld1 r17=[r16] // load current->thread.on_ustack flag
1035 st1 [r16]=r0 // clear current->thread.on_ustack flag
1036 add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
1037 ;;
1038 invala
1040 /* adjust return address so we skip over the break instruction: */
1042 extr.u r8=r29,41,2 // extract ei field from cr.ipsr
1043 ;;
1044 cmp.eq p6,p7=2,r8 // isr.ei==2?
1045 mov r2=r1 // setup r2 for ia64_syscall_setup
1046 ;;
1047 (p6) mov r8=0 // clear ei to 0
1048 (p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped
1049 (p7) adds r8=1,r8 // increment ei to next slot
1050 ;;
1051 cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
1052 dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
1053 ;;
1055 // switch from user to kernel RBS:
1056 MINSTATE_START_SAVE_MIN_VIRT
1057 br.call.sptk.many b7=ia64_syscall_setup
1058 ;;
1059 MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
1060 ssm psr.ic | PSR_DEFAULT_BITS
1061 ;;
1062 srlz.i // guarantee that interruption collection is on
1063 mov r3=NR_syscalls - 1
1064 ;;
1065 (p15) ssm psr.i // restore psr.i
1066 // p10==true means out registers are more than 8 or r15's Nat is true
1067 (p10) br.cond.spnt.many ia64_ret_from_syscall
1068 ;;
1069 movl r16=sys_call_table
1071 adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
1072 movl r2=ia64_ret_from_syscall
1073 ;;
1074 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
1075 cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ?
1076 mov rp=r2 // set the real return addr
1077 ;;
1078 (p6) ld8 r20=[r20] // load address of syscall entry point
1079 (p7) movl r20=sys_ni_syscall
1081 add r2=TI_FLAGS+IA64_TASK_SIZE,r13
1082 ;;
1083 ld4 r2=[r2] // r2 = current_thread_info()->flags
1084 ;;
1085 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
1086 ;;
1087 cmp.eq p8,p0=r2,r0
1088 mov b6=r20
1089 ;;
1090 (p8) br.call.sptk.many b6=b6 // ignore this return addr
1091 br.cond.sptk ia64_trace_syscall
1092 // NOT REACHED
1093 END(break_fault)
1095 .org ia64_ivt+0x3000
1096 /////////////////////////////////////////////////////////////////////////////////////////
1097 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
1098 ENTRY(interrupt)
1099 DBG_FAULT(12)
1100 mov r31=pr // prepare to save predicates
1101 ;;
1102 #ifdef XEN
1103 mov r30=cr.ivr // pass cr.ivr as first arg
1104 // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
1105 // not used anywhere else and we need a place to stash ivr and
1106 // there's no registers available unused by SAVE_MIN/REST
1107 movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
1108 st8 [r29]=r30;;
1109 movl r28=slow_interrupt;;
1110 mov r29=rp;;
1111 mov rp=r28;;
1112 br.cond.sptk.many fast_tick_reflect
1113 ;;
1114 slow_interrupt:
1115 mov rp=r29;;
1116 #endif
1117 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1118 ssm psr.ic | PSR_DEFAULT_BITS
1119 ;;
1120 adds r3=8,r2 // set up second base pointer for SAVE_REST
1121 srlz.i // ensure everybody knows psr.ic is back on
1122 ;;
1123 SAVE_REST
1124 ;;
1125 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1126 #ifdef XEN
1127 movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
1128 ld8 out0=[out0];;
1129 #else
1130 mov out0=cr.ivr // pass cr.ivr as first arg
1131 #endif
1132 add out1=16,sp // pass pointer to pt_regs as second arg
1133 #ifndef XEN
1134 ;;
1135 srlz.d // make sure we see the effect of cr.ivr
1136 #endif
1137 movl r14=ia64_leave_kernel
1138 ;;
1139 mov rp=r14
1140 br.call.sptk.many b6=ia64_handle_irq
1141 END(interrupt)
1143 .org ia64_ivt+0x3400
1144 /////////////////////////////////////////////////////////////////////////////////////////
1145 // 0x3400 Entry 13 (size 64 bundles) Reserved
1146 DBG_FAULT(13)
1147 FAULT(13)
1149 #ifdef XEN
1150 // There is no particular reason for this code to be here, other than that
1151 // there happens to be space here that would go unused otherwise. If this
1152 // fault ever gets "unreserved", simply moved the following code to a more
1153 // suitable spot...
1155 GLOBAL_ENTRY(dispatch_break_fault)
1156 SAVE_MIN_WITH_COVER
1157 ;;
1158 dispatch_break_fault_post_save:
1159 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1160 mov out0=cr.ifa
1161 adds out1=16,sp
1162 mov out2=cr.isr // FIXME: pity to make this slow access twice
1163 mov out3=cr.iim // FIXME: pity to make this slow access twice
1165 ssm psr.ic | PSR_DEFAULT_BITS
1166 ;;
1167 srlz.i // guarantee that interruption collection is on
1168 ;;
1169 (p15) ssm psr.i // restore psr.i
1170 adds r3=8,r2 // set up second base pointer
1171 ;;
1172 SAVE_REST
1173 movl r14=ia64_leave_kernel
1174 ;;
1175 mov rp=r14
1176 // br.sptk.many ia64_prepare_handle_break
1177 br.call.sptk.many b6=ia64_handle_break
1178 END(dispatch_break_fault)
1179 #endif
1181 .org ia64_ivt+0x3800
1182 /////////////////////////////////////////////////////////////////////////////////////////
1183 // 0x3800 Entry 14 (size 64 bundles) Reserved
1184 DBG_FAULT(14)
1185 FAULT(14)
1187 /*
1188 * There is no particular reason for this code to be here, other than that
1189 * there happens to be space here that would go unused otherwise. If this
1190 * fault ever gets "unreserved", simply moved the following code to a more
1191 * suitable spot...
1193 * ia64_syscall_setup() is a separate subroutine so that it can
1194 * allocate stacked registers so it can safely demine any
1195 * potential NaT values from the input registers.
1197 * On entry:
1198 * - executing on bank 0 or bank 1 register set (doesn't matter)
1199 * - r1: stack pointer
1200 * - r2: current task pointer
1201 * - r3: preserved
1202 * - r11: original contents (saved ar.pfs to be saved)
1203 * - r12: original contents (sp to be saved)
1204 * - r13: original contents (tp to be saved)
1205 * - r15: original contents (syscall # to be saved)
1206 * - r18: saved bsp (after switching to kernel stack)
1207 * - r19: saved b6
1208 * - r20: saved r1 (gp)
1209 * - r21: saved ar.fpsr
1210 * - r22: kernel's register backing store base (krbs_base)
1211 * - r23: saved ar.bspstore
1212 * - r24: saved ar.rnat
1213 * - r25: saved ar.unat
1214 * - r26: saved ar.pfs
1215 * - r27: saved ar.rsc
1216 * - r28: saved cr.iip
1217 * - r29: saved cr.ipsr
1218 * - r31: saved pr
1219 * - b0: original contents (to be saved)
1220 * On exit:
1221 * - executing on bank 1 registers
1222 * - psr.ic enabled, interrupts restored
1223 * - p10: TRUE if syscall is invoked with more than 8 out
1224 * registers or r15's Nat is true
1225 * - r1: kernel's gp
1226 * - r3: preserved (same as on entry)
1227 * - r8: -EINVAL if p10 is true
1228 * - r12: points to kernel stack
1229 * - r13: points to current task
1230 * - p15: TRUE if interrupts need to be re-enabled
1231 * - ar.fpsr: set to kernel settings
1232 */
1233 GLOBAL_ENTRY(ia64_syscall_setup)
1234 #ifndef XEN
1235 #if PT(B6) != 0
1236 # error This code assumes that b6 is the first field in pt_regs.
1237 #endif
1238 #endif
1239 st8 [r1]=r19 // save b6
1240 add r16=PT(CR_IPSR),r1 // initialize first base pointer
1241 add r17=PT(R11),r1 // initialize second base pointer
1242 ;;
1243 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
1244 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
1245 tnat.nz p8,p0=in0
1247 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
1248 tnat.nz p9,p0=in1
1249 (pKStk) mov r18=r0 // make sure r18 isn't NaT
1250 ;;
1252 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
1253 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
1254 mov r28=b0 // save b0 (2 cyc)
1255 ;;
1257 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
1258 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
1259 (p8) mov in0=-1
1260 ;;
1262 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
1263 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
1264 and r8=0x7f,r19 // A // get sof of ar.pfs
1266 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
1267 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
1268 (p9) mov in1=-1
1269 ;;
1271 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
1272 tnat.nz p10,p0=in2
1273 add r11=8,r11
1274 ;;
1275 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
1276 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
1277 tnat.nz p11,p0=in3
1278 ;;
1279 (p10) mov in2=-1
1280 tnat.nz p12,p0=in4 // [I0]
1281 (p11) mov in3=-1
1282 ;;
1283 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
1284 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
1285 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
1286 ;;
1287 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
1288 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
1289 tnat.nz p13,p0=in5 // [I0]
1290 ;;
1291 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
1292 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
1293 (p12) mov in4=-1
1294 ;;
1296 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
1297 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1298 (p13) mov in5=-1
1299 ;;
1300 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1301 tnat.nz p14,p0=in6
1302 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
1303 ;;
1304 stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
1305 (p9) tnat.nz p10,p0=r15
1306 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
1308 st8.spill [r17]=r15 // save r15
1309 tnat.nz p8,p0=in7
1310 nop.i 0
1312 mov r13=r2 // establish `current'
1313 movl r1=__gp // establish kernel global pointer
1314 ;;
1315 (p14) mov in6=-1
1316 (p8) mov in7=-1
1317 nop.i 0
1319 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1320 movl r17=FPSR_DEFAULT
1321 ;;
1322 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1323 (p10) mov r8=-EINVAL
1324 br.ret.sptk.many b7
1325 END(ia64_syscall_setup)
1327 .org ia64_ivt+0x3c00
1328 /////////////////////////////////////////////////////////////////////////////////////////
1329 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1330 DBG_FAULT(15)
1331 FAULT(15)
1333 /*
1334 * Squatting in this space ...
1336 * This special case dispatcher for illegal operation faults allows preserved
1337 * registers to be modified through a callback function (asm only) that is handed
1338 * back from the fault handler in r8. Up to three arguments can be passed to the
1339 * callback function by returning an aggregate with the callback as its first
1340 * element, followed by the arguments.
1341 */
1342 ENTRY(dispatch_illegal_op_fault)
1343 SAVE_MIN_WITH_COVER
1344 ssm psr.ic | PSR_DEFAULT_BITS
1345 ;;
1346 srlz.i // guarantee that interruption collection is on
1347 ;;
1348 (p15) ssm psr.i // restore psr.i
1349 adds r3=8,r2 // set up second base pointer for SAVE_REST
1350 ;;
1351 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1352 mov out0=ar.ec
1353 ;;
1354 SAVE_REST
1355 ;;
1356 br.call.sptk.many rp=ia64_illegal_op_fault
1357 .ret0: ;;
1358 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1359 mov out0=r9
1360 mov out1=r10
1361 mov out2=r11
1362 movl r15=ia64_leave_kernel
1363 ;;
1364 mov rp=r15
1365 mov b6=r8
1366 ;;
1367 cmp.ne p6,p0=0,r8
1368 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1369 br.sptk.many ia64_leave_kernel
1370 END(dispatch_illegal_op_fault)
1372 .org ia64_ivt+0x4000
1373 /////////////////////////////////////////////////////////////////////////////////////////
1374 // 0x4000 Entry 16 (size 64 bundles) Reserved
1375 DBG_FAULT(16)
1376 FAULT(16)
1378 #ifdef XEN
1379 // There is no particular reason for this code to be here, other than that
1380 // there happens to be space here that would go unused otherwise. If this
1381 // fault ever gets "unreserved", simply moved the following code to a more
1382 // suitable spot...
1384 ENTRY(dispatch_privop_fault)
1385 SAVE_MIN_WITH_COVER
1386 ;;
1387 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1388 mov out0=cr.ifa
1389 adds out1=16,sp
1390 mov out2=cr.isr // FIXME: pity to make this slow access twice
1391 mov out3=cr.itir
1393 ssm psr.ic | PSR_DEFAULT_BITS
1394 ;;
1395 srlz.i // guarantee that interruption collection is on
1396 ;;
1397 (p15) ssm psr.i // restore psr.i
1398 adds r3=8,r2 // set up second base pointer
1399 ;;
1400 SAVE_REST
1401 movl r14=ia64_leave_kernel
1402 ;;
1403 mov rp=r14
1404 // br.sptk.many ia64_prepare_handle_privop
1405 br.call.sptk.many b6=ia64_handle_privop
1406 END(dispatch_privop_fault)
1407 #endif
1410 .org ia64_ivt+0x4400
1411 /////////////////////////////////////////////////////////////////////////////////////////
1412 // 0x4400 Entry 17 (size 64 bundles) Reserved
1413 DBG_FAULT(17)
1414 FAULT(17)
1416 ENTRY(non_syscall)
1417 SAVE_MIN_WITH_COVER
1419 // There is no particular reason for this code to be here, other than that
1420 // there happens to be space here that would go unused otherwise. If this
1421 // fault ever gets "unreserved", simply moved the following code to a more
1422 // suitable spot...
1424 alloc r14=ar.pfs,0,0,2,0
1425 mov out0=cr.iim
1426 add out1=16,sp
1427 adds r3=8,r2 // set up second base pointer for SAVE_REST
1429 ssm psr.ic | PSR_DEFAULT_BITS
1430 ;;
1431 srlz.i // guarantee that interruption collection is on
1432 ;;
1433 (p15) ssm psr.i // restore psr.i
1434 movl r15=ia64_leave_kernel
1435 ;;
1436 SAVE_REST
1437 mov rp=r15
1438 ;;
1439 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1440 END(non_syscall)
1442 .org ia64_ivt+0x4800
1443 /////////////////////////////////////////////////////////////////////////////////////////
1444 // 0x4800 Entry 18 (size 64 bundles) Reserved
1445 DBG_FAULT(18)
1446 FAULT(18)
1448 /*
1449 * There is no particular reason for this code to be here, other than that
1450 * there happens to be space here that would go unused otherwise. If this
1451 * fault ever gets "unreserved", simply moved the following code to a more
1452 * suitable spot...
1453 */
1455 ENTRY(dispatch_unaligned_handler)
1456 SAVE_MIN_WITH_COVER
1457 ;;
1458 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1459 mov out0=cr.ifa
1460 adds out1=16,sp
1462 ssm psr.ic | PSR_DEFAULT_BITS
1463 ;;
1464 srlz.i // guarantee that interruption collection is on
1465 ;;
1466 (p15) ssm psr.i // restore psr.i
1467 adds r3=8,r2 // set up second base pointer
1468 ;;
1469 SAVE_REST
1470 movl r14=ia64_leave_kernel
1471 ;;
1472 mov rp=r14
1473 // br.sptk.many ia64_prepare_handle_unaligned
1474 br.call.sptk.many b6=ia64_handle_unaligned
1475 END(dispatch_unaligned_handler)
1477 .org ia64_ivt+0x4c00
1478 /////////////////////////////////////////////////////////////////////////////////////////
1479 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1480 DBG_FAULT(19)
1481 FAULT(19)
1483 /*
1484 * There is no particular reason for this code to be here, other than that
1485 * there happens to be space here that would go unused otherwise. If this
1486 * fault ever gets "unreserved", simply moved the following code to a more
1487 * suitable spot...
1488 */
1490 ENTRY(dispatch_to_fault_handler)
1491 /*
1492 * Input:
1493 * psr.ic: off
1494 * r19: fault vector number (e.g., 24 for General Exception)
1495 * r31: contains saved predicates (pr)
1496 */
1497 SAVE_MIN_WITH_COVER_R19
1498 alloc r14=ar.pfs,0,0,5,0
1499 mov out0=r15
1500 mov out1=cr.isr
1501 mov out2=cr.ifa
1502 mov out3=cr.iim
1503 mov out4=cr.itir
1504 ;;
1505 ssm psr.ic | PSR_DEFAULT_BITS
1506 ;;
1507 srlz.i // guarantee that interruption collection is on
1508 ;;
1509 (p15) ssm psr.i // restore psr.i
1510 adds r3=8,r2 // set up second base pointer for SAVE_REST
1511 ;;
1512 SAVE_REST
1513 movl r14=ia64_leave_kernel
1514 ;;
1515 mov rp=r14
1516 br.call.sptk.many b6=ia64_fault
1517 END(dispatch_to_fault_handler)
1519 //
1520 // --- End of long entries, Beginning of short entries
1521 //
1523 .org ia64_ivt+0x5000
1524 /////////////////////////////////////////////////////////////////////////////////////////
1525 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1526 ENTRY(page_not_present)
1527 DBG_FAULT(20)
1528 #ifdef XEN
1529 REFLECT(20)
1530 #endif
1531 mov r16=cr.ifa
1532 rsm psr.dt
1533 /*
1534 * The Linux page fault handler doesn't expect non-present pages to be in
1535 * the TLB. Flush the existing entry now, so we meet that expectation.
1536 */
1537 mov r17=PAGE_SHIFT<<2
1538 ;;
1539 ptc.l r16,r17
1540 ;;
1541 mov r31=pr
1542 srlz.d
1543 br.sptk.many page_fault
1544 END(page_not_present)
1546 .org ia64_ivt+0x5100
1547 /////////////////////////////////////////////////////////////////////////////////////////
1548 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1549 ENTRY(key_permission)
1550 DBG_FAULT(21)
1551 #ifdef XEN
1552 REFLECT(21)
1553 #endif
1554 mov r16=cr.ifa
1555 rsm psr.dt
1556 mov r31=pr
1557 ;;
1558 srlz.d
1559 br.sptk.many page_fault
1560 END(key_permission)
1562 .org ia64_ivt+0x5200
1563 /////////////////////////////////////////////////////////////////////////////////////////
1564 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1565 ENTRY(iaccess_rights)
1566 DBG_FAULT(22)
1567 #ifdef XEN
1568 REFLECT(22)
1569 #endif
1570 mov r16=cr.ifa
1571 rsm psr.dt
1572 mov r31=pr
1573 ;;
1574 srlz.d
1575 br.sptk.many page_fault
1576 END(iaccess_rights)
1578 .org ia64_ivt+0x5300
1579 /////////////////////////////////////////////////////////////////////////////////////////
1580 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1581 ENTRY(daccess_rights)
1582 DBG_FAULT(23)
1583 #ifdef XEN
1584 mov r31=pr;
1585 mov r16=cr.isr
1586 mov r17=cr.ifa
1587 mov r19=23
1588 movl r20=0x5300
1589 br.sptk.many fast_access_reflect;;
1590 #endif
1591 mov r16=cr.ifa
1592 rsm psr.dt
1593 mov r31=pr
1594 ;;
1595 srlz.d
1596 br.sptk.many page_fault
1597 END(daccess_rights)
1599 .org ia64_ivt+0x5400
1600 /////////////////////////////////////////////////////////////////////////////////////////
1601 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1602 ENTRY(general_exception)
1603 DBG_FAULT(24)
1604 mov r16=cr.isr
1605 mov r31=pr
1606 ;;
1607 #ifdef XEN
1608 cmp4.ge p6,p0=0x20,r16
1609 (p6) br.sptk.many dispatch_privop_fault
1610 #else
1611 cmp4.eq p6,p0=0,r16
1612 (p6) br.sptk.many dispatch_illegal_op_fault
1613 #endif
1614 ;;
1615 mov r19=24 // fault number
1616 br.sptk.many dispatch_to_fault_handler
1617 END(general_exception)
1619 .org ia64_ivt+0x5500
1620 /////////////////////////////////////////////////////////////////////////////////////////
1621 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1622 ENTRY(disabled_fp_reg)
1623 DBG_FAULT(25)
1624 #ifdef XEN
1625 #if 0
1626 mov r20=pr
1627 movl r16=0x2000000000000000
1628 movl r17=0x2000000000176b60
1629 mov r18=cr.iip
1630 mov r19=rr[r16]
1631 movl r22=0xe95d0439
1632 ;;
1633 mov pr=r0,-1
1634 ;;
1635 cmp.eq p6,p7=r22,r19
1636 ;;
1637 (p6) cmp.eq p8,p9=r17,r18
1638 (p8) br.sptk.few floating_panic
1639 ;;
1640 mov pr=r20,-1
1641 ;;
1642 #endif
1643 REFLECT(25)
1644 //floating_panic:
1645 // br.sptk.many floating_panic
1646 ;;
1647 #endif
1648 rsm psr.dfh // ensure we can access fph
1649 ;;
1650 srlz.d
1651 mov r31=pr
1652 mov r19=25
1653 br.sptk.many dispatch_to_fault_handler
1654 END(disabled_fp_reg)
1656 .org ia64_ivt+0x5600
1657 /////////////////////////////////////////////////////////////////////////////////////////
1658 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1659 ENTRY(nat_consumption)
1660 DBG_FAULT(26)
1661 #ifdef XEN
1662 REFLECT(26)
1663 #endif
1664 FAULT(26)
1665 END(nat_consumption)
1667 .org ia64_ivt+0x5700
1668 /////////////////////////////////////////////////////////////////////////////////////////
1669 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1670 ENTRY(speculation_vector)
1671 DBG_FAULT(27)
1672 #ifdef XEN
1673 // this probably need not reflect...
1674 REFLECT(27)
1675 #endif
1676 /*
1677 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1678 * this part of the architecture is not implemented in hardware on some CPUs, such
1679 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
1680 * the relative target (not yet sign extended). So after sign extending it we
1681 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
1682 * i.e., the slot to restart into.
1684 * cr.imm contains zero_ext(imm21)
1685 */
1686 mov r18=cr.iim
1687 ;;
1688 mov r17=cr.iip
1689 shl r18=r18,43 // put sign bit in position (43=64-21)
1690 ;;
1692 mov r16=cr.ipsr
1693 shr r18=r18,39 // sign extend (39=43-4)
1694 ;;
1696 add r17=r17,r18 // now add the offset
1697 ;;
1698 mov cr.iip=r17
1699 dep r16=0,r16,41,2 // clear EI
1700 ;;
1702 mov cr.ipsr=r16
1703 ;;
1705 rfi // and go back
1706 END(speculation_vector)
1708 .org ia64_ivt+0x5800
1709 /////////////////////////////////////////////////////////////////////////////////////////
1710 // 0x5800 Entry 28 (size 16 bundles) Reserved
1711 DBG_FAULT(28)
1712 FAULT(28)
1714 .org ia64_ivt+0x5900
1715 /////////////////////////////////////////////////////////////////////////////////////////
1716 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1717 ENTRY(debug_vector)
1718 DBG_FAULT(29)
1719 #ifdef XEN
1720 REFLECT(29)
1721 #endif
1722 FAULT(29)
1723 END(debug_vector)
1725 .org ia64_ivt+0x5a00
1726 /////////////////////////////////////////////////////////////////////////////////////////
1727 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1728 ENTRY(unaligned_access)
1729 DBG_FAULT(30)
1730 #ifdef XEN
1731 REFLECT(30)
1732 #endif
1733 mov r16=cr.ipsr
1734 mov r31=pr // prepare to save predicates
1735 ;;
1736 br.sptk.many dispatch_unaligned_handler
1737 END(unaligned_access)
1739 .org ia64_ivt+0x5b00
1740 /////////////////////////////////////////////////////////////////////////////////////////
1741 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1742 ENTRY(unsupported_data_reference)
1743 DBG_FAULT(31)
1744 #ifdef XEN
1745 REFLECT(31)
1746 #endif
1747 FAULT(31)
1748 END(unsupported_data_reference)
1750 .org ia64_ivt+0x5c00
1751 /////////////////////////////////////////////////////////////////////////////////////////
1752 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1753 ENTRY(floating_point_fault)
1754 DBG_FAULT(32)
1755 #ifdef XEN
1756 REFLECT(32)
1757 #endif
1758 FAULT(32)
1759 END(floating_point_fault)
1761 .org ia64_ivt+0x5d00
1762 /////////////////////////////////////////////////////////////////////////////////////////
1763 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1764 ENTRY(floating_point_trap)
1765 DBG_FAULT(33)
1766 #ifdef XEN
1767 REFLECT(33)
1768 #endif
1769 FAULT(33)
1770 END(floating_point_trap)
1772 .org ia64_ivt+0x5e00
1773 /////////////////////////////////////////////////////////////////////////////////////////
1774 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1775 ENTRY(lower_privilege_trap)
1776 DBG_FAULT(34)
1777 #ifdef XEN
1778 REFLECT(34)
1779 #endif
1780 FAULT(34)
1781 END(lower_privilege_trap)
1783 .org ia64_ivt+0x5f00
1784 /////////////////////////////////////////////////////////////////////////////////////////
1785 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1786 ENTRY(taken_branch_trap)
1787 DBG_FAULT(35)
1788 #ifdef XEN
1789 REFLECT(35)
1790 #endif
1791 FAULT(35)
1792 END(taken_branch_trap)
1794 .org ia64_ivt+0x6000
1795 /////////////////////////////////////////////////////////////////////////////////////////
1796 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1797 ENTRY(single_step_trap)
1798 DBG_FAULT(36)
1799 #ifdef XEN
1800 REFLECT(36)
1801 #endif
1802 FAULT(36)
1803 END(single_step_trap)
1805 .org ia64_ivt+0x6100
1806 /////////////////////////////////////////////////////////////////////////////////////////
1807 // 0x6100 Entry 37 (size 16 bundles) Reserved
1808 DBG_FAULT(37)
1809 FAULT(37)
1811 .org ia64_ivt+0x6200
1812 /////////////////////////////////////////////////////////////////////////////////////////
1813 // 0x6200 Entry 38 (size 16 bundles) Reserved
1814 DBG_FAULT(38)
1815 FAULT(38)
1817 .org ia64_ivt+0x6300
1818 /////////////////////////////////////////////////////////////////////////////////////////
1819 // 0x6300 Entry 39 (size 16 bundles) Reserved
1820 DBG_FAULT(39)
1821 FAULT(39)
1823 .org ia64_ivt+0x6400
1824 /////////////////////////////////////////////////////////////////////////////////////////
1825 // 0x6400 Entry 40 (size 16 bundles) Reserved
1826 DBG_FAULT(40)
1827 FAULT(40)
1829 .org ia64_ivt+0x6500
1830 /////////////////////////////////////////////////////////////////////////////////////////
1831 // 0x6500 Entry 41 (size 16 bundles) Reserved
1832 DBG_FAULT(41)
1833 FAULT(41)
1835 .org ia64_ivt+0x6600
1836 /////////////////////////////////////////////////////////////////////////////////////////
1837 // 0x6600 Entry 42 (size 16 bundles) Reserved
1838 DBG_FAULT(42)
1839 FAULT(42)
1841 .org ia64_ivt+0x6700
1842 /////////////////////////////////////////////////////////////////////////////////////////
1843 // 0x6700 Entry 43 (size 16 bundles) Reserved
1844 DBG_FAULT(43)
1845 FAULT(43)
1847 .org ia64_ivt+0x6800
1848 /////////////////////////////////////////////////////////////////////////////////////////
1849 // 0x6800 Entry 44 (size 16 bundles) Reserved
1850 DBG_FAULT(44)
1851 FAULT(44)
1853 .org ia64_ivt+0x6900
1854 /////////////////////////////////////////////////////////////////////////////////////////
1855 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1856 ENTRY(ia32_exception)
1857 DBG_FAULT(45)
1858 #ifdef XEN
1859 REFLECT(45)
1860 #endif
1861 FAULT(45)
1862 END(ia32_exception)
1864 .org ia64_ivt+0x6a00
1865 /////////////////////////////////////////////////////////////////////////////////////////
1866 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1867 ENTRY(ia32_intercept)
1868 DBG_FAULT(46)
1869 #ifdef XEN
1870 REFLECT(46)
1871 #endif
1872 #ifdef CONFIG_IA32_SUPPORT
1873 mov r31=pr
1874 mov r16=cr.isr
1875 ;;
1876 extr.u r17=r16,16,8 // get ISR.code
1877 mov r18=ar.eflag
1878 mov r19=cr.iim // old eflag value
1879 ;;
1880 cmp.ne p6,p0=2,r17
1881 (p6) br.cond.spnt 1f // not a system flag fault
1882 xor r16=r18,r19
1883 ;;
1884 extr.u r17=r16,18,1 // get the eflags.ac bit
1885 ;;
1886 cmp.eq p6,p0=0,r17
1887 (p6) br.cond.spnt 1f // eflags.ac bit didn't change
1888 ;;
1889 mov pr=r31,-1 // restore predicate registers
1890 rfi
1892 1:
1893 #endif // CONFIG_IA32_SUPPORT
1894 FAULT(46)
1895 END(ia32_intercept)
1897 .org ia64_ivt+0x6b00
1898 /////////////////////////////////////////////////////////////////////////////////////////
1899 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1900 ENTRY(ia32_interrupt)
1901 DBG_FAULT(47)
1902 #ifdef XEN
1903 REFLECT(47)
1904 #endif
1905 #ifdef CONFIG_IA32_SUPPORT
1906 mov r31=pr
1907 br.sptk.many dispatch_to_ia32_handler
1908 #else
1909 FAULT(47)
1910 #endif
1911 END(ia32_interrupt)
1913 .org ia64_ivt+0x6c00
1914 /////////////////////////////////////////////////////////////////////////////////////////
1915 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1916 DBG_FAULT(48)
1917 FAULT(48)
1919 .org ia64_ivt+0x6d00
1920 /////////////////////////////////////////////////////////////////////////////////////////
1921 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1922 DBG_FAULT(49)
1923 FAULT(49)
1925 .org ia64_ivt+0x6e00
1926 /////////////////////////////////////////////////////////////////////////////////////////
1927 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1928 DBG_FAULT(50)
1929 FAULT(50)
1931 .org ia64_ivt+0x6f00
1932 /////////////////////////////////////////////////////////////////////////////////////////
1933 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1934 DBG_FAULT(51)
1935 FAULT(51)
1937 .org ia64_ivt+0x7000
1938 /////////////////////////////////////////////////////////////////////////////////////////
1939 // 0x7000 Entry 52 (size 16 bundles) Reserved
1940 DBG_FAULT(52)
1941 FAULT(52)
1943 .org ia64_ivt+0x7100
1944 /////////////////////////////////////////////////////////////////////////////////////////
1945 // 0x7100 Entry 53 (size 16 bundles) Reserved
1946 DBG_FAULT(53)
1947 FAULT(53)
1949 .org ia64_ivt+0x7200
1950 /////////////////////////////////////////////////////////////////////////////////////////
1951 // 0x7200 Entry 54 (size 16 bundles) Reserved
1952 DBG_FAULT(54)
1953 FAULT(54)
1955 .org ia64_ivt+0x7300
1956 /////////////////////////////////////////////////////////////////////////////////////////
1957 // 0x7300 Entry 55 (size 16 bundles) Reserved
1958 DBG_FAULT(55)
1959 FAULT(55)
1961 .org ia64_ivt+0x7400
1962 /////////////////////////////////////////////////////////////////////////////////////////
1963 // 0x7400 Entry 56 (size 16 bundles) Reserved
1964 DBG_FAULT(56)
1965 FAULT(56)
1967 .org ia64_ivt+0x7500
1968 /////////////////////////////////////////////////////////////////////////////////////////
1969 // 0x7500 Entry 57 (size 16 bundles) Reserved
1970 DBG_FAULT(57)
1971 FAULT(57)
1973 .org ia64_ivt+0x7600
1974 /////////////////////////////////////////////////////////////////////////////////////////
1975 // 0x7600 Entry 58 (size 16 bundles) Reserved
1976 DBG_FAULT(58)
1977 FAULT(58)
1979 .org ia64_ivt+0x7700
1980 /////////////////////////////////////////////////////////////////////////////////////////
1981 // 0x7700 Entry 59 (size 16 bundles) Reserved
1982 DBG_FAULT(59)
1983 FAULT(59)
1985 .org ia64_ivt+0x7800
1986 /////////////////////////////////////////////////////////////////////////////////////////
1987 // 0x7800 Entry 60 (size 16 bundles) Reserved
1988 DBG_FAULT(60)
1989 FAULT(60)
1991 .org ia64_ivt+0x7900
1992 /////////////////////////////////////////////////////////////////////////////////////////
1993 // 0x7900 Entry 61 (size 16 bundles) Reserved
1994 DBG_FAULT(61)
1995 FAULT(61)
1997 .org ia64_ivt+0x7a00
1998 /////////////////////////////////////////////////////////////////////////////////////////
1999 // 0x7a00 Entry 62 (size 16 bundles) Reserved
2000 DBG_FAULT(62)
2001 FAULT(62)
2003 .org ia64_ivt+0x7b00
2004 /////////////////////////////////////////////////////////////////////////////////////////
2005 // 0x7b00 Entry 63 (size 16 bundles) Reserved
2006 DBG_FAULT(63)
2007 FAULT(63)
2009 .org ia64_ivt+0x7c00
2010 /////////////////////////////////////////////////////////////////////////////////////////
2011 // 0x7c00 Entry 64 (size 16 bundles) Reserved
2012 DBG_FAULT(64)
2013 FAULT(64)
2015 .org ia64_ivt+0x7d00
2016 /////////////////////////////////////////////////////////////////////////////////////////
2017 // 0x7d00 Entry 65 (size 16 bundles) Reserved
2018 DBG_FAULT(65)
2019 FAULT(65)
2021 .org ia64_ivt+0x7e00
2022 /////////////////////////////////////////////////////////////////////////////////////////
2023 // 0x7e00 Entry 66 (size 16 bundles) Reserved
2024 DBG_FAULT(66)
2025 FAULT(66)
2027 .org ia64_ivt+0x7f00
2028 /////////////////////////////////////////////////////////////////////////////////////////
2029 // 0x7f00 Entry 67 (size 16 bundles) Reserved
2030 DBG_FAULT(67)
2031 FAULT(67)
2033 #ifdef XEN
2034 .org ia64_ivt+0x8000
2035 GLOBAL_ENTRY(dispatch_reflection)
2036 /*
2037 * Input:
2038 * psr.ic: off
2039 * r19: intr type (offset into ivt, see ia64_int.h)
2040 * r31: contains saved predicates (pr)
2041 */
2042 SAVE_MIN_WITH_COVER_R19
2043 alloc r14=ar.pfs,0,0,5,0
2044 mov out4=r15
2045 mov out0=cr.ifa
2046 adds out1=16,sp
2047 mov out2=cr.isr
2048 mov out3=cr.iim
2049 // mov out3=cr.itir
2051 ssm psr.ic | PSR_DEFAULT_BITS
2052 ;;
2053 srlz.i // guarantee that interruption collection is on
2054 ;;
2055 (p15) ssm psr.i // restore psr.i
2056 adds r3=8,r2 // set up second base pointer
2057 ;;
2058 SAVE_REST
2059 movl r14=ia64_leave_kernel
2060 ;;
2061 mov rp=r14
2062 // br.sptk.many ia64_prepare_handle_reflection
2063 br.call.sptk.many b6=ia64_handle_reflection
2064 END(dispatch_reflection)
2066 #define SAVE_MIN_COVER_DONE DO_SAVE_MIN(,mov r30=cr.ifs,)
2068 // same as dispatch_break_fault except cover has already been done
2069 GLOBAL_ENTRY(dispatch_slow_hyperprivop)
2070 SAVE_MIN_COVER_DONE
2071 ;;
2072 br.sptk.many dispatch_break_fault_post_save
2073 END(dispatch_slow_hyperprivop)
2074 #endif
2076 #ifdef CONFIG_IA32_SUPPORT
2078 /*
2079 * There is no particular reason for this code to be here, other than that
2080 * there happens to be space here that would go unused otherwise. If this
2081 * fault ever gets "unreserved", simply moved the following code to a more
2082 * suitable spot...
2083 */
2085 // IA32 interrupt entry point
2087 ENTRY(dispatch_to_ia32_handler)
2088 SAVE_MIN
2089 ;;
2090 mov r14=cr.isr
2091 ssm psr.ic | PSR_DEFAULT_BITS
2092 ;;
2093 srlz.i // guarantee that interruption collection is on
2094 ;;
2095 (p15) ssm psr.i
2096 adds r3=8,r2 // Base pointer for SAVE_REST
2097 ;;
2098 SAVE_REST
2099 ;;
2100 mov r15=0x80
2101 shr r14=r14,16 // Get interrupt number
2102 ;;
2103 cmp.ne p6,p0=r14,r15
2104 (p6) br.call.dpnt.many b6=non_ia32_syscall
2106 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
2107 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
2108 ;;
2109 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
2110 ld8 r8=[r14] // get r8
2111 ;;
2112 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
2113 ;;
2114 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
2115 ;;
2116 ld4 r8=[r14],8 // r8 == eax (syscall number)
2117 mov r15=IA32_NR_syscalls
2118 ;;
2119 cmp.ltu.unc p6,p7=r8,r15
2120 ld4 out1=[r14],8 // r9 == ecx
2121 ;;
2122 ld4 out2=[r14],8 // r10 == edx
2123 ;;
2124 ld4 out0=[r14] // r11 == ebx
2125 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
2126 ;;
2127 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
2128 ;;
2129 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
2130 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
2131 ;;
2132 ld4 out4=[r14] // r15 == edi
2133 movl r16=ia32_syscall_table
2134 ;;
2135 (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
2136 ld4 r2=[r2] // r2 = current_thread_info()->flags
2137 ;;
2138 ld8 r16=[r16]
2139 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
2140 ;;
2141 mov b6=r16
2142 movl r15=ia32_ret_from_syscall
2143 cmp.eq p8,p0=r2,r0
2144 ;;
2145 mov rp=r15
2146 (p8) br.call.sptk.many b6=b6
2147 br.cond.sptk ia32_trace_syscall
2149 non_ia32_syscall:
2150 alloc r15=ar.pfs,0,0,2,0
2151 mov out0=r14 // interrupt #
2152 add out1=16,sp // pointer to pt_regs
2153 ;; // avoid WAW on CFM
2154 br.call.sptk.many rp=ia32_bad_interrupt
2155 .ret1: movl r15=ia64_leave_kernel
2156 ;;
2157 mov rp=r15
2158 br.ret.sptk.many rp
2159 END(dispatch_to_ia32_handler)
2161 #endif /* CONFIG_IA32_SUPPORT */