ia64/xen-unstable

view xen/arch/ia64/xen/ivt.S @ 9405:29dfadcc5029

[IA64] Followup to xen time cleanup

Clean up to xen time handler. Tristan #if 0 some code because it seems
redundant, which however is actually problematic logic as a reason for
an intermittent timer oops issue of dom0. So delete it now.

Also remove vcpu_wake, since wakeup current has nothing meaningful and
simply waste cpu cycle.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Mon Mar 27 15:32:08 2006 -0700 (2006-03-27)
parents 815758308556
children 2b6e531dab38
line source
2 #ifdef XEN
3 //#define CONFIG_DISABLE_VHPT // FIXME: change when VHPT is enabled??
4 // these are all hacked out for now as the entire IVT
5 // will eventually be replaced... just want to use it
6 // for startup code to handle TLB misses
7 //#define ia64_leave_kernel 0
8 //#define ia64_ret_from_syscall 0
9 //#define ia64_handle_irq 0
10 //#define ia64_fault 0
11 #define ia64_illegal_op_fault 0
12 #define ia64_prepare_handle_unaligned 0
13 #define ia64_bad_break 0
14 #define ia64_trace_syscall 0
15 #define sys_call_table 0
16 #define sys_ni_syscall 0
17 #include <asm/vhpt.h>
18 #include <asm/debugger.h>
19 #endif
20 /*
21 * arch/ia64/kernel/ivt.S
22 *
23 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
24 * Stephane Eranian <eranian@hpl.hp.com>
25 * David Mosberger <davidm@hpl.hp.com>
26 * Copyright (C) 2000, 2002-2003 Intel Co
27 * Asit Mallick <asit.k.mallick@intel.com>
28 * Suresh Siddha <suresh.b.siddha@intel.com>
29 * Kenneth Chen <kenneth.w.chen@intel.com>
30 * Fenghua Yu <fenghua.yu@intel.com>
31 *
32 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
33 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
34 */
35 /*
36 * This file defines the interruption vector table used by the CPU.
37 * It does not include one entry per possible cause of interruption.
38 *
39 * The first 20 entries of the table contain 64 bundles each while the
40 * remaining 48 entries contain only 16 bundles each.
41 *
42 * The 64 bundles are used to allow inlining the whole handler for critical
43 * interruptions like TLB misses.
44 *
45 * For each entry, the comment is as follows:
46 *
47 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
48 * entry offset ----/ / / / /
49 * entry number ---------/ / / /
50 * size of the entry -------------/ / /
51 * vector name -------------------------------------/ /
52 * interruptions triggering this vector ----------------------/
53 *
54 * The table is 32KB in size and must be aligned on 32KB boundary.
55 * (The CPU ignores the 15 lower bits of the address)
56 *
57 * Table is based upon EAS2.6 (Oct 1999)
58 */
60 #include <linux/config.h>
62 #include <asm/asmmacro.h>
63 #include <asm/break.h>
64 #include <asm/ia32.h>
65 #include <asm/kregs.h>
66 #include <asm/offsets.h>
67 #include <asm/pgtable.h>
68 #include <asm/processor.h>
69 #include <asm/ptrace.h>
70 #include <asm/system.h>
71 #include <asm/thread_info.h>
72 #include <asm/unistd.h>
73 #ifdef XEN
74 #include <xen/errno.h>
75 #else
76 #include <asm/errno.h>
77 #endif
79 #if 1
80 # define PSR_DEFAULT_BITS psr.ac
81 #else
82 # define PSR_DEFAULT_BITS 0
83 #endif
85 #if 0
86 /*
87 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
88 * needed for something else before enabling this...
89 */
90 # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
91 #else
92 # define DBG_FAULT(i)
93 #endif
95 #define MINSTATE_VIRT /* needed by minstate.h */
96 #include "minstate.h"
98 #define FAULT(n) \
99 mov r31=pr; \
100 mov r19=n;; /* prepare to save predicates */ \
101 br.sptk.many dispatch_to_fault_handler
103 #ifdef XEN
104 #define REFLECT(n) \
105 mov r31=pr; \
106 mov r19=n;; /* prepare to save predicates */ \
107 br.sptk.many dispatch_reflection
108 #endif
110 .section .text.ivt,"ax"
112 .align 32768 // align on 32KB boundary
113 .global ia64_ivt
114 ia64_ivt:
115 /////////////////////////////////////////////////////////////////////////////////////////
116 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
117 ENTRY(vhpt_miss)
118 DBG_FAULT(0)
119 /*
120 * The VHPT vector is invoked when the TLB entry for the virtual page table
121 * is missing. This happens only as a result of a previous
122 * (the "original") TLB miss, which may either be caused by an instruction
123 * fetch or a data access (or non-access).
124 *
125 * What we do here is normal TLB miss handing for the _original_ miss, followed
126 * by inserting the TLB entry for the virtual page table page that the VHPT
127 * walker was attempting to access. The latter gets inserted as long
128 * as both L1 and L2 have valid mappings for the faulting address.
129 * The TLB entry for the original miss gets inserted only if
130 * the L3 entry indicates that the page is present.
131 *
132 * do_page_fault gets invoked in the following cases:
133 * - the faulting virtual address uses unimplemented address bits
134 * - the faulting virtual address has no L1, L2, or L3 mapping
135 */
136 mov r16=cr.ifa // get address that caused the TLB miss
137 #ifdef CONFIG_HUGETLB_PAGE
138 movl r18=PAGE_SHIFT
139 mov r25=cr.itir
140 #endif
141 ;;
142 rsm psr.dt // use physical addressing for data
143 mov r31=pr // save the predicate registers
144 #ifdef XEN
145 movl r19=THIS_CPU(cpu_kr)+IA64_KR_PT_BASE_OFFSET;;
146 #else
147 mov r19=IA64_KR(PT_BASE) // get page table base address
148 #endif
149 shl r21=r16,3 // shift bit 60 into sign bit
150 shr.u r17=r16,61 // get the region number into r17
151 ;;
152 shr r22=r21,3
153 #ifdef CONFIG_HUGETLB_PAGE
154 extr.u r26=r25,2,6
155 ;;
156 cmp.ne p8,p0=r18,r26
157 sub r27=r26,r18
158 ;;
159 (p8) dep r25=r18,r25,2,6
160 (p8) shr r22=r22,r27
161 #endif
162 ;;
163 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
164 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
165 ;;
166 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
168 srlz.d
169 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
171 .pred.rel "mutex", p6, p7
172 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
173 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
174 ;;
175 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
176 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
177 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
178 shr.u r18=r22,PMD_SHIFT // shift L2 index into position
179 ;;
180 ld8 r17=[r17] // fetch the L1 entry (may be 0)
181 ;;
182 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
183 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
184 ;;
185 (p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
186 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
187 ;;
188 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
189 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
190 ;;
191 (p7) ld8 r18=[r21] // read the L3 PTE
192 mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss
193 ;;
194 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
195 mov r22=cr.iha // get the VHPT address that caused the TLB miss
196 ;; // avoid RAW on p7
197 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
198 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
199 ;;
200 (p10) itc.i r18 // insert the instruction TLB entry
201 (p11) itc.d r18 // insert the data TLB entry
202 (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
203 mov cr.ifa=r22
205 #ifdef CONFIG_HUGETLB_PAGE
206 (p8) mov cr.itir=r25 // change to default page-size for VHPT
207 #endif
209 /*
210 * Now compute and insert the TLB entry for the virtual page table. We never
211 * execute in a page table page so there is no need to set the exception deferral
212 * bit.
213 */
214 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
215 ;;
216 (p7) itc.d r24
217 ;;
218 #ifdef CONFIG_SMP
219 /*
220 * Tell the assemblers dependency-violation checker that the above "itc" instructions
221 * cannot possibly affect the following loads:
222 */
223 dv_serialize_data
225 /*
226 * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g
227 * between reading the pagetable and the "itc". If so, flush the entry we
228 * inserted and retry.
229 */
230 ld8 r25=[r21] // read L3 PTE again
231 ld8 r26=[r17] // read L2 entry again
232 ;;
233 cmp.ne p6,p7=r26,r20 // did L2 entry change
234 mov r27=PAGE_SHIFT<<2
235 ;;
236 (p6) ptc.l r22,r27 // purge PTE page translation
237 (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change
238 ;;
239 (p6) ptc.l r16,r27 // purge translation
240 #endif
242 mov pr=r31,-1 // restore predicate registers
243 rfi
244 END(vhpt_miss)
246 .org ia64_ivt+0x400
247 /////////////////////////////////////////////////////////////////////////////////////////
248 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
249 ENTRY(itlb_miss)
250 DBG_FAULT(1)
251 #ifdef XEN
252 VHPT_CCHAIN_LOOKUP(itlb_miss,i)
253 #ifdef VHPT_GLOBAL
254 // br.cond.sptk page_fault
255 br.cond.sptk fast_tlb_miss_reflect
256 ;;
257 #endif
258 #endif
259 /*
260 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
261 * page table. If a nested TLB miss occurs, we switch into physical
262 * mode, walk the page table, and then re-execute the L3 PTE read
263 * and go on normally after that.
264 */
265 mov r16=cr.ifa // get virtual address
266 mov r29=b0 // save b0
267 mov r31=pr // save predicates
268 .itlb_fault:
269 mov r17=cr.iha // get virtual address of L3 PTE
270 movl r30=1f // load nested fault continuation point
271 ;;
272 1: ld8 r18=[r17] // read L3 PTE
273 ;;
274 mov b0=r29
275 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
276 (p6) br.cond.spnt page_fault
277 ;;
278 itc.i r18
279 ;;
280 #ifdef CONFIG_SMP
281 /*
282 * Tell the assemblers dependency-violation checker that the above "itc" instructions
283 * cannot possibly affect the following loads:
284 */
285 dv_serialize_data
287 ld8 r19=[r17] // read L3 PTE again and see if same
288 mov r20=PAGE_SHIFT<<2 // setup page size for purge
289 ;;
290 cmp.ne p7,p0=r18,r19
291 ;;
292 (p7) ptc.l r16,r20
293 #endif
294 mov pr=r31,-1
295 rfi
296 END(itlb_miss)
298 .org ia64_ivt+0x0800
299 /////////////////////////////////////////////////////////////////////////////////////////
300 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
301 ENTRY(dtlb_miss)
302 DBG_FAULT(2)
303 #ifdef XEN
304 VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
305 #if VHPT_ENABLED
306 // XXX TODO optimization
307 mov r31=pr // save predicates
308 mov r30=cr.ipsr
309 mov r28=cr.iip
310 mov r16=cr.ifa // get virtual address
311 mov r17=cr.isr // save predicates
312 ;;
314 extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2 // extract psr.cpl
315 ;;
316 cmp.ne p6, p0 = r0, r18 // cpl == 0?
317 (p6) br.cond.sptk 2f
319 // is speculation bit on?
320 tbit.nz p7,p0=r17,IA64_ISR_SP_BIT
321 ;;
322 (p7) br.cond.spnt 2f
324 // is non-access bit on?
325 tbit.nz p8,p0=r17,IA64_ISR_NA_BIT
326 ;;
327 (p8) br.cond.spnt 2f
329 // cr.isr.code == IA64_ISR_CODE_LFETCH?
330 and r18=IA64_ISR_CODE_MASK,r17 // get the isr.code field
331 ;;
332 cmp.eq p9,p0=IA64_ISR_CODE_LFETCH,r18 // check isr.code field
333 (p9) br.cond.spnt 2f
335 // Is the faulted iip in vmm area?
336 // check [59:58] bit
337 // 00, 11: guest
338 // 01, 10: vmm
339 extr.u r19 = r28, 58, 2
340 ;;
341 cmp.eq p10, p0 = 0x0, r19
342 (p10) br.cond.sptk 2f
343 cmp.eq p11, p0 = 0x3, r19
344 (p11) br.cond.sptk 2f
346 // Is the faulted address is in the identity mapping area?
347 // 0xf000... or 0xe8000...
348 extr.u r20 = r16, 59, 5
349 ;;
350 cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
351 (p12) br.cond.spnt 1f
352 cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
353 (p13) br.cond.sptk 2f
355 1:
356 // xen identity mappin area.
357 movl r24=PAGE_KERNEL
358 movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
359 ;;
360 shr.u r26=r16,55 // move address bit 59 to bit 4
361 and r25=r25,r16 // clear ed, reserved bits, and PTE control bits
362 ;;
363 and r26=0x10,r26 // bit 4=address-bit(59)
364 ;;
365 or r25=r25,r24 // insert PTE control bits into r25
366 ;;
367 or r25=r25,r26 // set bit 4 (uncached) if the access was to region 6
368 ;;
369 itc.d r25 // insert the TLB entry
370 mov pr=r31,-1
371 rfi
373 2:
374 #endif
375 #ifdef VHPT_GLOBAL
376 // br.cond.sptk page_fault
377 br.cond.sptk fast_tlb_miss_reflect
378 ;;
379 #endif
380 mov r29=b0 // save b0
381 #else
382 /*
383 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
384 * page table. If a nested TLB miss occurs, we switch into physical
385 * mode, walk the page table, and then re-execute the L3 PTE read
386 * and go on normally after that.
387 */
388 mov r16=cr.ifa // get virtual address
389 mov r29=b0 // save b0
390 mov r31=pr // save predicates
391 #endif
392 dtlb_fault:
393 mov r17=cr.iha // get virtual address of L3 PTE
394 movl r30=1f // load nested fault continuation point
395 ;;
396 1: ld8 r18=[r17] // read L3 PTE
397 ;;
398 mov b0=r29
399 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
400 (p6) br.cond.spnt page_fault
401 ;;
402 itc.d r18
403 ;;
404 #ifdef CONFIG_SMP
405 /*
406 * Tell the assemblers dependency-violation checker that the above "itc" instructions
407 * cannot possibly affect the following loads:
408 */
409 dv_serialize_data
411 ld8 r19=[r17] // read L3 PTE again and see if same
412 mov r20=PAGE_SHIFT<<2 // setup page size for purge
413 ;;
414 cmp.ne p7,p0=r18,r19
415 ;;
416 (p7) ptc.l r16,r20
417 #endif
418 mov pr=r31,-1
419 rfi
420 END(dtlb_miss)
422 .org ia64_ivt+0x0c00
423 /////////////////////////////////////////////////////////////////////////////////////////
424 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
425 ENTRY(alt_itlb_miss)
426 DBG_FAULT(3)
427 #ifdef XEN
428 //#ifdef VHPT_GLOBAL
429 // VHPT_CCHAIN_LOOKUP(alt_itlb_miss,i)
430 // br.cond.sptk page_fault
431 // ;;
432 //#endif
433 #endif
434 #ifdef XEN
435 mov r31=pr
436 mov r16=cr.ifa // get address that caused the TLB miss
437 ;;
438 late_alt_itlb_miss:
439 movl r17=PAGE_KERNEL
440 mov r21=cr.ipsr
441 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
442 ;;
443 #else
444 mov r16=cr.ifa // get address that caused the TLB miss
445 movl r17=PAGE_KERNEL
446 mov r21=cr.ipsr
447 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
448 mov r31=pr
449 ;;
450 #endif
451 #ifdef CONFIG_DISABLE_VHPT
452 shr.u r22=r16,61 // get the region number into r21
453 ;;
454 cmp.gt p8,p0=6,r22 // user mode
455 ;;
456 (p8) thash r17=r16
457 ;;
458 (p8) mov cr.iha=r17
459 (p8) mov r29=b0 // save b0
460 (p8) br.cond.dptk .itlb_fault
461 #endif
462 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
463 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
464 #ifdef XEN
465 shr.u r18=r16,55 // move address bit 59 to bit 4
466 ;;
467 and r18=0x10,r18 // bit 4=address-bit(59)
468 #else
469 shr.u r18=r16,57 // move address bit 61 to bit 4
470 ;;
471 andcm r18=0x10,r18 // bit 4=~address-bit(61)
472 #endif
473 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
474 or r19=r17,r19 // insert PTE control bits into r19
475 ;;
476 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
477 (p8) br.cond.spnt page_fault
478 #ifdef XEN
479 FORCE_CRASH
480 #endif
481 ;;
482 itc.i r19 // insert the TLB entry
483 mov pr=r31,-1
484 rfi
485 END(alt_itlb_miss)
487 .org ia64_ivt+0x1000
488 /////////////////////////////////////////////////////////////////////////////////////////
489 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
490 ENTRY(alt_dtlb_miss)
491 DBG_FAULT(4)
492 #ifdef XEN
493 //#ifdef VHPT_GLOBAL
494 // VHPT_CCHAIN_LOOKUP(alt_dtlb_miss,d)
495 // br.cond.sptk page_fault
496 // ;;
497 //#endif
498 #endif
499 #ifdef XEN
500 mov r31=pr
501 mov r16=cr.ifa // get address that caused the TLB miss
502 ;;
503 late_alt_dtlb_miss:
504 movl r17=PAGE_KERNEL
505 mov r20=cr.isr
506 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
507 mov r21=cr.ipsr
508 ;;
509 #else
510 #endif
511 #ifdef CONFIG_DISABLE_VHPT
512 shr.u r22=r16,61 // get the region number into r21
513 ;;
514 cmp.gt p8,p0=6,r22 // access to region 0-5
515 ;;
516 (p8) thash r17=r16
517 ;;
518 (p8) mov cr.iha=r17
519 (p8) mov r29=b0 // save b0
520 (p8) br.cond.dptk dtlb_fault
521 #endif
522 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
523 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
524 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
525 #ifdef XEN
526 shr.u r18=r16,55 // move address bit 59 to bit 4
527 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
528 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
529 ;;
530 and r18=0x10,r18 // bit 4=address-bit(59)
531 #else
532 shr.u r18=r16,57 // move address bit 61 to bit 4
533 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
534 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
535 ;;
536 andcm r18=0x10,r18 // bit 4=~address-bit(61)
537 #endif
538 cmp.ne p8,p0=r0,r23
539 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
540 (p8) br.cond.spnt page_fault
541 #ifdef XEN
542 ;;
543 // Test for Xen address, if not handle via page_fault
544 // note that 0xf000 (cached) and 0xe800 (uncached) addresses
545 // should be OK.
546 extr.u r22=r16,59,5;;
547 cmp.eq p8,p0=0x1e,r22
548 (p8) br.cond.spnt 1f;;
549 cmp.ne p8,p0=0x1d,r22
550 (p8) br.cond.sptk page_fault ;;
551 1:
552 #endif
554 dep r21=-1,r21,IA64_PSR_ED_BIT,1
555 or r19=r19,r17 // insert PTE control bits into r19
556 ;;
557 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
558 (p6) mov cr.ipsr=r21
559 ;;
560 (p7) itc.d r19 // insert the TLB entry
561 mov pr=r31,-1
562 rfi
563 END(alt_dtlb_miss)
565 .org ia64_ivt+0x1400
566 /////////////////////////////////////////////////////////////////////////////////////////
567 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
568 ENTRY(nested_dtlb_miss)
569 DBG_FAULT(5)
570 #ifdef XEN
571 mov b0=r30
572 br.sptk.many b0 // return to continuation point
573 ;;
574 #endif
575 /*
576 * In the absence of kernel bugs, we get here when the virtually mapped linear
577 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
578 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
579 * table is missing, a nested TLB miss fault is triggered and control is
580 * transferred to this point. When this happens, we lookup the pte for the
581 * faulting address by walking the page table in physical mode and return to the
582 * continuation point passed in register r30 (or call page_fault if the address is
583 * not mapped).
584 *
585 * Input: r16: faulting address
586 * r29: saved b0
587 * r30: continuation address
588 * r31: saved pr
589 *
590 * Output: r17: physical address of L3 PTE of faulting address
591 * r29: saved b0
592 * r30: continuation address
593 * r31: saved pr
594 *
595 * Clobbered: b0, r18, r19, r21, psr.dt (cleared)
596 */
597 rsm psr.dt // switch to using physical data addressing
598 #ifdef XEN
599 movl r19=THIS_CPU(cpu_kr)+IA64_KR_PT_BASE_OFFSET;;
600 #else
601 mov r19=IA64_KR(PT_BASE) // get the page table base address
602 #endif
603 shl r21=r16,3 // shift bit 60 into sign bit
604 ;;
605 shr.u r17=r16,61 // get the region number into r17
606 ;;
607 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
608 shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
609 ;;
610 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
612 srlz.d
613 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
615 .pred.rel "mutex", p6, p7
616 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
617 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
618 ;;
619 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
620 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
621 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
622 shr.u r18=r16,PMD_SHIFT // shift L2 index into position
623 ;;
624 ld8 r17=[r17] // fetch the L1 entry (may be 0)
625 ;;
626 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
627 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
628 ;;
629 (p7) ld8 r17=[r17] // fetch the L2 entry (may be 0)
630 shr.u r19=r16,PAGE_SHIFT // shift L3 index into position
631 ;;
632 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL?
633 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
634 (p6) br.cond.spnt page_fault
635 mov b0=r30
636 br.sptk.many b0 // return to continuation point
637 END(nested_dtlb_miss)
639 .org ia64_ivt+0x1800
640 /////////////////////////////////////////////////////////////////////////////////////////
641 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
642 ENTRY(ikey_miss)
643 DBG_FAULT(6)
644 #ifdef XEN
645 REFLECT(6)
646 #endif
647 FAULT(6)
648 END(ikey_miss)
650 //-----------------------------------------------------------------------------------
651 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
652 #ifdef XEN
653 GLOBAL_ENTRY(page_fault)
654 #else
655 ENTRY(page_fault)
656 #endif
657 ssm psr.dt
658 ;;
659 srlz.i
660 ;;
661 SAVE_MIN_WITH_COVER
662 #ifdef XEN
663 alloc r15=ar.pfs,0,0,4,0
664 mov out0=cr.ifa
665 mov out1=cr.isr
666 mov out3=cr.itir
667 #else
668 alloc r15=ar.pfs,0,0,3,0
669 mov out0=cr.ifa
670 mov out1=cr.isr
671 #endif
672 adds r3=8,r2 // set up second base pointer
673 ;;
674 ssm psr.ic | PSR_DEFAULT_BITS
675 ;;
676 srlz.i // guarantee that interruption collectin is on
677 ;;
678 (p15) ssm psr.i // restore psr.i
679 movl r14=ia64_leave_kernel
680 ;;
681 SAVE_REST
682 mov rp=r14
683 ;;
684 adds out2=16,r12 // out2 = pointer to pt_regs
685 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
686 END(page_fault)
688 .org ia64_ivt+0x1c00
689 /////////////////////////////////////////////////////////////////////////////////////////
690 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
691 ENTRY(dkey_miss)
692 DBG_FAULT(7)
693 #ifdef XEN
694 REFLECT(7)
695 #endif
696 FAULT(7)
697 END(dkey_miss)
699 .org ia64_ivt+0x2000
700 /////////////////////////////////////////////////////////////////////////////////////////
701 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
702 ENTRY(dirty_bit)
703 DBG_FAULT(8)
704 #ifdef XEN
705 REFLECT(8)
706 #endif
707 /*
708 * What we do here is to simply turn on the dirty bit in the PTE. We need to
709 * update both the page-table and the TLB entry. To efficiently access the PTE,
710 * we address it through the virtual page table. Most likely, the TLB entry for
711 * the relevant virtual page table page is still present in the TLB so we can
712 * normally do this without additional TLB misses. In case the necessary virtual
713 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
714 * up the physical address of the L3 PTE and then continue at label 1 below.
715 */
716 mov r16=cr.ifa // get the address that caused the fault
717 movl r30=1f // load continuation point in case of nested fault
718 ;;
719 thash r17=r16 // compute virtual address of L3 PTE
720 mov r29=b0 // save b0 in case of nested fault
721 mov r31=pr // save pr
722 #ifdef CONFIG_SMP
723 mov r28=ar.ccv // save ar.ccv
724 ;;
725 1: ld8 r18=[r17]
726 ;; // avoid RAW on r18
727 mov ar.ccv=r18 // set compare value for cmpxchg
728 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
729 ;;
730 cmpxchg8.acq r26=[r17],r25,ar.ccv
731 mov r24=PAGE_SHIFT<<2
732 ;;
733 cmp.eq p6,p7=r26,r18
734 ;;
735 (p6) itc.d r25 // install updated PTE
736 ;;
737 /*
738 * Tell the assemblers dependency-violation checker that the above "itc" instructions
739 * cannot possibly affect the following loads:
740 */
741 dv_serialize_data
743 ld8 r18=[r17] // read PTE again
744 ;;
745 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
746 ;;
747 (p7) ptc.l r16,r24
748 mov b0=r29 // restore b0
749 mov ar.ccv=r28
750 #else
751 ;;
752 1: ld8 r18=[r17]
753 ;; // avoid RAW on r18
754 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
755 mov b0=r29 // restore b0
756 ;;
757 st8 [r17]=r18 // store back updated PTE
758 itc.d r18 // install updated PTE
759 #endif
760 mov pr=r31,-1 // restore pr
761 rfi
762 END(dirty_bit)
764 .org ia64_ivt+0x2400
765 /////////////////////////////////////////////////////////////////////////////////////////
766 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
767 ENTRY(iaccess_bit)
768 DBG_FAULT(9)
769 #ifdef XEN
770 mov r31=pr;
771 mov r16=cr.isr
772 mov r17=cr.ifa
773 mov r19=9
774 movl r20=0x2400
775 br.sptk.many fast_access_reflect;;
776 #endif
777 // Like Entry 8, except for instruction access
778 mov r16=cr.ifa // get the address that caused the fault
779 movl r30=1f // load continuation point in case of nested fault
780 mov r31=pr // save predicates
781 #ifdef CONFIG_ITANIUM
782 /*
783 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
784 */
785 mov r17=cr.ipsr
786 ;;
787 mov r18=cr.iip
788 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
789 ;;
790 (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
791 #endif /* CONFIG_ITANIUM */
792 ;;
793 thash r17=r16 // compute virtual address of L3 PTE
794 mov r29=b0 // save b0 in case of nested fault)
795 #ifdef CONFIG_SMP
796 mov r28=ar.ccv // save ar.ccv
797 ;;
798 1: ld8 r18=[r17]
799 ;;
800 mov ar.ccv=r18 // set compare value for cmpxchg
801 or r25=_PAGE_A,r18 // set the accessed bit
802 ;;
803 cmpxchg8.acq r26=[r17],r25,ar.ccv
804 mov r24=PAGE_SHIFT<<2
805 ;;
806 cmp.eq p6,p7=r26,r18
807 ;;
808 (p6) itc.i r25 // install updated PTE
809 ;;
810 /*
811 * Tell the assemblers dependency-violation checker that the above "itc" instructions
812 * cannot possibly affect the following loads:
813 */
814 dv_serialize_data
816 ld8 r18=[r17] // read PTE again
817 ;;
818 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
819 ;;
820 (p7) ptc.l r16,r24
821 mov b0=r29 // restore b0
822 mov ar.ccv=r28
823 #else /* !CONFIG_SMP */
824 ;;
825 1: ld8 r18=[r17]
826 ;;
827 or r18=_PAGE_A,r18 // set the accessed bit
828 mov b0=r29 // restore b0
829 ;;
830 st8 [r17]=r18 // store back updated PTE
831 itc.i r18 // install updated PTE
832 #endif /* !CONFIG_SMP */
833 mov pr=r31,-1
834 rfi
835 END(iaccess_bit)
837 .org ia64_ivt+0x2800
838 /////////////////////////////////////////////////////////////////////////////////////////
839 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
840 ENTRY(daccess_bit)
841 DBG_FAULT(10)
842 #ifdef XEN
843 mov r31=pr;
844 mov r16=cr.isr
845 mov r17=cr.ifa
846 mov r19=10
847 movl r20=0x2800
848 br.sptk.many fast_access_reflect;;
849 #endif
850 // Like Entry 8, except for data access
851 mov r16=cr.ifa // get the address that caused the fault
852 movl r30=1f // load continuation point in case of nested fault
853 ;;
854 thash r17=r16 // compute virtual address of L3 PTE
855 mov r31=pr
856 mov r29=b0 // save b0 in case of nested fault)
857 #ifdef CONFIG_SMP
858 mov r28=ar.ccv // save ar.ccv
859 ;;
860 1: ld8 r18=[r17]
861 ;; // avoid RAW on r18
862 mov ar.ccv=r18 // set compare value for cmpxchg
863 or r25=_PAGE_A,r18 // set the dirty bit
864 ;;
865 cmpxchg8.acq r26=[r17],r25,ar.ccv
866 mov r24=PAGE_SHIFT<<2
867 ;;
868 cmp.eq p6,p7=r26,r18
869 ;;
870 (p6) itc.d r25 // install updated PTE
871 /*
872 * Tell the assemblers dependency-violation checker that the above "itc" instructions
873 * cannot possibly affect the following loads:
874 */
875 dv_serialize_data
876 ;;
877 ld8 r18=[r17] // read PTE again
878 ;;
879 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
880 ;;
881 (p7) ptc.l r16,r24
882 mov ar.ccv=r28
883 #else
884 ;;
885 1: ld8 r18=[r17]
886 ;; // avoid RAW on r18
887 or r18=_PAGE_A,r18 // set the accessed bit
888 ;;
889 st8 [r17]=r18 // store back updated PTE
890 itc.d r18 // install updated PTE
891 #endif
892 mov b0=r29 // restore b0
893 mov pr=r31,-1
894 rfi
895 END(daccess_bit)
897 .org ia64_ivt+0x2c00
898 /////////////////////////////////////////////////////////////////////////////////////////
899 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
900 ENTRY(break_fault)
901 /*
902 * The streamlined system call entry/exit paths only save/restore the initial part
903 * of pt_regs. This implies that the callers of system-calls must adhere to the
904 * normal procedure calling conventions.
905 *
906 * Registers to be saved & restored:
907 * CR registers: cr.ipsr, cr.iip, cr.ifs
908 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
909 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
910 * Registers to be restored only:
911 * r8-r11: output value from the system call.
912 *
913 * During system call exit, scratch registers (including r15) are modified/cleared
914 * to prevent leaking bits from kernel to user level.
915 */
916 DBG_FAULT(11)
917 #ifdef XEN
918 mov r16=cr.isr
919 mov r17=cr.iim
920 mov r31=pr
921 ;;
922 cmp.eq p7,p0=r17,r0
923 (p7) br.spnt.few dispatch_break_fault ;;
924 #ifdef CRASH_DEBUG
925 // panic can occur before domain0 is created.
926 // in such case referencing XSI_PSR_IC causes nested_dtlb_miss
927 movl r18=CDB_BREAK_NUM ;;
928 cmp.eq p7,p0=r17,r18 ;;
929 (p7) br.spnt.few dispatch_break_fault ;;
930 #endif
931 movl r18=XSI_PSR_IC
932 ;;
933 ld8 r19=[r18]
934 ;;
935 cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
936 (p7) br.spnt.many dispatch_privop_fault
937 ;;
938 // if vpsr.ic is off, we have a hyperprivop
939 // A hyperprivop is hand-coded assembly with psr.ic off
940 // which means no calls, no use of r1-r15 and no memory accesses
941 // except to pinned addresses!
942 cmp4.eq p7,p0=r0,r19
943 (p7) br.sptk.many fast_hyperprivop
944 ;;
945 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
946 ld8 r22 = [r22]
947 ;;
948 adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22;;
949 ld4 r23=[r22];;
950 cmp4.eq p6,p7=r23,r17 // Xen-reserved breakimm?
951 (p6) br.spnt.many dispatch_break_fault
952 ;;
953 br.sptk.many fast_break_reflect
954 ;;
955 #endif
956 movl r16=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
957 ld8 r16=[r16]
958 mov r17=cr.iim
959 mov r18=__IA64_BREAK_SYSCALL
960 mov r21=ar.fpsr
961 mov r29=cr.ipsr
962 mov r19=b6
963 mov r25=ar.unat
964 mov r27=ar.rsc
965 mov r26=ar.pfs
966 mov r28=cr.iip
967 #ifndef XEN
968 mov r31=pr // prepare to save predicates
969 #endif
970 mov r20=r1
971 ;;
972 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
973 cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
974 (p7) br.cond.spnt non_syscall
975 ;;
976 ld1 r17=[r16] // load current->thread.on_ustack flag
977 st1 [r16]=r0 // clear current->thread.on_ustack flag
978 add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
979 ;;
980 invala
982 /* adjust return address so we skip over the break instruction: */
984 extr.u r8=r29,41,2 // extract ei field from cr.ipsr
985 ;;
986 cmp.eq p6,p7=2,r8 // isr.ei==2?
987 mov r2=r1 // setup r2 for ia64_syscall_setup
988 ;;
989 (p6) mov r8=0 // clear ei to 0
990 (p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped
991 (p7) adds r8=1,r8 // increment ei to next slot
992 ;;
993 cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
994 dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
995 ;;
997 // switch from user to kernel RBS:
998 MINSTATE_START_SAVE_MIN_VIRT
999 br.call.sptk.many b7=ia64_syscall_setup
1000 ;;
1001 MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
1002 ssm psr.ic | PSR_DEFAULT_BITS
1003 ;;
1004 srlz.i // guarantee that interruption collection is on
1005 mov r3=NR_syscalls - 1
1006 ;;
1007 (p15) ssm psr.i // restore psr.i
1008 // p10==true means out registers are more than 8 or r15's Nat is true
1009 (p10) br.cond.spnt.many ia64_ret_from_syscall
1010 ;;
1011 movl r16=sys_call_table
1013 adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
1014 movl r2=ia64_ret_from_syscall
1015 ;;
1016 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
1017 cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ?
1018 mov rp=r2 // set the real return addr
1019 ;;
1020 (p6) ld8 r20=[r20] // load address of syscall entry point
1021 (p7) movl r20=sys_ni_syscall
1023 add r2=TI_FLAGS+IA64_TASK_SIZE,r13
1024 ;;
1025 ld4 r2=[r2] // r2 = current_thread_info()->flags
1026 ;;
1027 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
1028 ;;
1029 cmp.eq p8,p0=r2,r0
1030 mov b6=r20
1031 ;;
1032 (p8) br.call.sptk.many b6=b6 // ignore this return addr
1033 br.cond.sptk ia64_trace_syscall
1034 // NOT REACHED
1035 END(break_fault)
1037 .org ia64_ivt+0x3000
1038 /////////////////////////////////////////////////////////////////////////////////////////
1039 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
1040 ENTRY(interrupt)
1041 DBG_FAULT(12)
1042 mov r31=pr // prepare to save predicates
1043 ;;
1044 #ifdef XEN
1045 mov r30=cr.ivr // pass cr.ivr as first arg
1046 // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
1047 // not used anywhere else and we need a place to stash ivr and
1048 // there's no registers available unused by SAVE_MIN/REST
1049 movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
1050 st8 [r29]=r30;;
1051 movl r28=slow_interrupt;;
1052 mov r29=rp;;
1053 mov rp=r28;;
1054 br.cond.sptk.many fast_tick_reflect
1055 ;;
1056 slow_interrupt:
1057 mov rp=r29;;
1058 #endif
1059 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1060 ssm psr.ic | PSR_DEFAULT_BITS
1061 ;;
1062 adds r3=8,r2 // set up second base pointer for SAVE_REST
1063 srlz.i // ensure everybody knows psr.ic is back on
1064 ;;
1065 SAVE_REST
1066 ;;
1067 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1068 #ifdef XEN
1069 movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
1070 ld8 out0=[out0];;
1071 #else
1072 mov out0=cr.ivr // pass cr.ivr as first arg
1073 #endif
1074 add out1=16,sp // pass pointer to pt_regs as second arg
1075 #ifndef XEN
1076 ;;
1077 srlz.d // make sure we see the effect of cr.ivr
1078 #endif
1079 movl r14=ia64_leave_kernel
1080 ;;
1081 mov rp=r14
1082 br.call.sptk.many b6=ia64_handle_irq
1083 END(interrupt)
1085 .org ia64_ivt+0x3400
1086 /////////////////////////////////////////////////////////////////////////////////////////
1087 // 0x3400 Entry 13 (size 64 bundles) Reserved
1088 DBG_FAULT(13)
1089 FAULT(13)
1091 #ifdef XEN
1092 // There is no particular reason for this code to be here, other than that
1093 // there happens to be space here that would go unused otherwise. If this
1094 // fault ever gets "unreserved", simply moved the following code to a more
1095 // suitable spot...
1097 GLOBAL_ENTRY(dispatch_break_fault)
1098 SAVE_MIN_WITH_COVER
1099 ;;
1100 dispatch_break_fault_post_save:
1101 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1102 mov out0=cr.ifa
1103 adds out1=16,sp
1104 mov out2=cr.isr // FIXME: pity to make this slow access twice
1105 mov out3=cr.iim // FIXME: pity to make this slow access twice
1107 ssm psr.ic | PSR_DEFAULT_BITS
1108 ;;
1109 srlz.i // guarantee that interruption collection is on
1110 ;;
1111 (p15) ssm psr.i // restore psr.i
1112 adds r3=8,r2 // set up second base pointer
1113 ;;
1114 SAVE_REST
1115 movl r14=ia64_leave_kernel
1116 ;;
1117 mov rp=r14
1118 // br.sptk.many ia64_prepare_handle_break
1119 br.call.sptk.many b6=ia64_handle_break
1120 END(dispatch_break_fault)
1121 #endif
1123 .org ia64_ivt+0x3800
1124 /////////////////////////////////////////////////////////////////////////////////////////
1125 // 0x3800 Entry 14 (size 64 bundles) Reserved
1126 DBG_FAULT(14)
1127 FAULT(14)
1129 /*
1130 * There is no particular reason for this code to be here, other than that
1131 * there happens to be space here that would go unused otherwise. If this
1132 * fault ever gets "unreserved", simply moved the following code to a more
1133 * suitable spot...
1135 * ia64_syscall_setup() is a separate subroutine so that it can
1136 * allocate stacked registers so it can safely demine any
1137 * potential NaT values from the input registers.
1139 * On entry:
1140 * - executing on bank 0 or bank 1 register set (doesn't matter)
1141 * - r1: stack pointer
1142 * - r2: current task pointer
1143 * - r3: preserved
1144 * - r11: original contents (saved ar.pfs to be saved)
1145 * - r12: original contents (sp to be saved)
1146 * - r13: original contents (tp to be saved)
1147 * - r15: original contents (syscall # to be saved)
1148 * - r18: saved bsp (after switching to kernel stack)
1149 * - r19: saved b6
1150 * - r20: saved r1 (gp)
1151 * - r21: saved ar.fpsr
1152 * - r22: kernel's register backing store base (krbs_base)
1153 * - r23: saved ar.bspstore
1154 * - r24: saved ar.rnat
1155 * - r25: saved ar.unat
1156 * - r26: saved ar.pfs
1157 * - r27: saved ar.rsc
1158 * - r28: saved cr.iip
1159 * - r29: saved cr.ipsr
1160 * - r31: saved pr
1161 * - b0: original contents (to be saved)
1162 * On exit:
1163 * - executing on bank 1 registers
1164 * - psr.ic enabled, interrupts restored
1165 * - p10: TRUE if syscall is invoked with more than 8 out
1166 * registers or r15's Nat is true
1167 * - r1: kernel's gp
1168 * - r3: preserved (same as on entry)
1169 * - r8: -EINVAL if p10 is true
1170 * - r12: points to kernel stack
1171 * - r13: points to current task
1172 * - p15: TRUE if interrupts need to be re-enabled
1173 * - ar.fpsr: set to kernel settings
1174 */
1175 GLOBAL_ENTRY(ia64_syscall_setup)
1176 #ifndef XEN
1177 #if PT(B6) != 0
1178 # error This code assumes that b6 is the first field in pt_regs.
1179 #endif
1180 #endif
1181 st8 [r1]=r19 // save b6
1182 add r16=PT(CR_IPSR),r1 // initialize first base pointer
1183 add r17=PT(R11),r1 // initialize second base pointer
1184 ;;
1185 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
1186 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
1187 tnat.nz p8,p0=in0
1189 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
1190 tnat.nz p9,p0=in1
1191 (pKStk) mov r18=r0 // make sure r18 isn't NaT
1192 ;;
1194 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
1195 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
1196 mov r28=b0 // save b0 (2 cyc)
1197 ;;
1199 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
1200 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
1201 (p8) mov in0=-1
1202 ;;
1204 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
1205 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
1206 and r8=0x7f,r19 // A // get sof of ar.pfs
1208 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
1209 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
1210 (p9) mov in1=-1
1211 ;;
1213 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
1214 tnat.nz p10,p0=in2
1215 add r11=8,r11
1216 ;;
1217 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
1218 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
1219 tnat.nz p11,p0=in3
1220 ;;
1221 (p10) mov in2=-1
1222 tnat.nz p12,p0=in4 // [I0]
1223 (p11) mov in3=-1
1224 ;;
1225 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
1226 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
1227 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
1228 ;;
1229 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
1230 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
1231 tnat.nz p13,p0=in5 // [I0]
1232 ;;
1233 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
1234 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
1235 (p12) mov in4=-1
1236 ;;
1238 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
1239 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1240 (p13) mov in5=-1
1241 ;;
1242 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1243 tnat.nz p14,p0=in6
1244 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
1245 ;;
1246 stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
1247 (p9) tnat.nz p10,p0=r15
1248 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
1250 st8.spill [r17]=r15 // save r15
1251 tnat.nz p8,p0=in7
1252 nop.i 0
1254 mov r13=r2 // establish `current'
1255 movl r1=__gp // establish kernel global pointer
1256 ;;
1257 (p14) mov in6=-1
1258 (p8) mov in7=-1
1259 nop.i 0
1261 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1262 movl r17=FPSR_DEFAULT
1263 ;;
1264 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1265 (p10) mov r8=-EINVAL
1266 br.ret.sptk.many b7
1267 END(ia64_syscall_setup)
1269 .org ia64_ivt+0x3c00
1270 /////////////////////////////////////////////////////////////////////////////////////////
1271 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1272 DBG_FAULT(15)
1273 FAULT(15)
1275 /*
1276 * Squatting in this space ...
1278 * This special case dispatcher for illegal operation faults allows preserved
1279 * registers to be modified through a callback function (asm only) that is handed
1280 * back from the fault handler in r8. Up to three arguments can be passed to the
1281 * callback function by returning an aggregate with the callback as its first
1282 * element, followed by the arguments.
1283 */
1284 ENTRY(dispatch_illegal_op_fault)
1285 SAVE_MIN_WITH_COVER
1286 ssm psr.ic | PSR_DEFAULT_BITS
1287 ;;
1288 srlz.i // guarantee that interruption collection is on
1289 ;;
1290 (p15) ssm psr.i // restore psr.i
1291 adds r3=8,r2 // set up second base pointer for SAVE_REST
1292 ;;
1293 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1294 mov out0=ar.ec
1295 ;;
1296 SAVE_REST
1297 ;;
1298 br.call.sptk.many rp=ia64_illegal_op_fault
1299 .ret0: ;;
1300 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1301 mov out0=r9
1302 mov out1=r10
1303 mov out2=r11
1304 movl r15=ia64_leave_kernel
1305 ;;
1306 mov rp=r15
1307 mov b6=r8
1308 ;;
1309 cmp.ne p6,p0=0,r8
1310 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1311 br.sptk.many ia64_leave_kernel
1312 END(dispatch_illegal_op_fault)
1314 .org ia64_ivt+0x4000
1315 /////////////////////////////////////////////////////////////////////////////////////////
1316 // 0x4000 Entry 16 (size 64 bundles) Reserved
1317 DBG_FAULT(16)
1318 FAULT(16)
1320 #ifdef XEN
1321 // There is no particular reason for this code to be here, other than that
1322 // there happens to be space here that would go unused otherwise. If this
1323 // fault ever gets "unreserved", simply moved the following code to a more
1324 // suitable spot...
1326 ENTRY(dispatch_privop_fault)
1327 SAVE_MIN_WITH_COVER
1328 ;;
1329 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1330 mov out0=cr.ifa
1331 adds out1=16,sp
1332 mov out2=cr.isr // FIXME: pity to make this slow access twice
1333 mov out3=cr.itir
1335 ssm psr.ic | PSR_DEFAULT_BITS
1336 ;;
1337 srlz.i // guarantee that interruption collection is on
1338 ;;
1339 (p15) ssm psr.i // restore psr.i
1340 adds r3=8,r2 // set up second base pointer
1341 ;;
1342 SAVE_REST
1343 movl r14=ia64_leave_kernel
1344 ;;
1345 mov rp=r14
1346 // br.sptk.many ia64_prepare_handle_privop
1347 br.call.sptk.many b6=ia64_handle_privop
1348 END(dispatch_privop_fault)
1349 #endif
1352 .org ia64_ivt+0x4400
1353 /////////////////////////////////////////////////////////////////////////////////////////
1354 // 0x4400 Entry 17 (size 64 bundles) Reserved
1355 DBG_FAULT(17)
1356 FAULT(17)
1358 ENTRY(non_syscall)
1359 SAVE_MIN_WITH_COVER
1361 // There is no particular reason for this code to be here, other than that
1362 // there happens to be space here that would go unused otherwise. If this
1363 // fault ever gets "unreserved", simply moved the following code to a more
1364 // suitable spot...
1366 alloc r14=ar.pfs,0,0,2,0
1367 mov out0=cr.iim
1368 add out1=16,sp
1369 adds r3=8,r2 // set up second base pointer for SAVE_REST
1371 ssm psr.ic | PSR_DEFAULT_BITS
1372 ;;
1373 srlz.i // guarantee that interruption collection is on
1374 ;;
1375 (p15) ssm psr.i // restore psr.i
1376 movl r15=ia64_leave_kernel
1377 ;;
1378 SAVE_REST
1379 mov rp=r15
1380 ;;
1381 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1382 END(non_syscall)
1384 .org ia64_ivt+0x4800
1385 /////////////////////////////////////////////////////////////////////////////////////////
1386 // 0x4800 Entry 18 (size 64 bundles) Reserved
1387 DBG_FAULT(18)
1388 FAULT(18)
1390 /*
1391 * There is no particular reason for this code to be here, other than that
1392 * there happens to be space here that would go unused otherwise. If this
1393 * fault ever gets "unreserved", simply moved the following code to a more
1394 * suitable spot...
1395 */
1397 ENTRY(dispatch_unaligned_handler)
1398 SAVE_MIN_WITH_COVER
1399 ;;
1400 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1401 mov out0=cr.ifa
1402 adds out1=16,sp
1404 ssm psr.ic | PSR_DEFAULT_BITS
1405 ;;
1406 srlz.i // guarantee that interruption collection is on
1407 ;;
1408 (p15) ssm psr.i // restore psr.i
1409 adds r3=8,r2 // set up second base pointer
1410 ;;
1411 SAVE_REST
1412 movl r14=ia64_leave_kernel
1413 ;;
1414 mov rp=r14
1415 // br.sptk.many ia64_prepare_handle_unaligned
1416 br.call.sptk.many b6=ia64_handle_unaligned
1417 END(dispatch_unaligned_handler)
1419 .org ia64_ivt+0x4c00
1420 /////////////////////////////////////////////////////////////////////////////////////////
1421 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1422 DBG_FAULT(19)
1423 FAULT(19)
1425 /*
1426 * There is no particular reason for this code to be here, other than that
1427 * there happens to be space here that would go unused otherwise. If this
1428 * fault ever gets "unreserved", simply moved the following code to a more
1429 * suitable spot...
1430 */
1432 ENTRY(dispatch_to_fault_handler)
1433 /*
1434 * Input:
1435 * psr.ic: off
1436 * r19: fault vector number (e.g., 24 for General Exception)
1437 * r31: contains saved predicates (pr)
1438 */
1439 SAVE_MIN_WITH_COVER_R19
1440 alloc r14=ar.pfs,0,0,5,0
1441 mov out0=r15
1442 mov out1=cr.isr
1443 mov out2=cr.ifa
1444 mov out3=cr.iim
1445 mov out4=cr.itir
1446 ;;
1447 ssm psr.ic | PSR_DEFAULT_BITS
1448 ;;
1449 srlz.i // guarantee that interruption collection is on
1450 ;;
1451 (p15) ssm psr.i // restore psr.i
1452 adds r3=8,r2 // set up second base pointer for SAVE_REST
1453 ;;
1454 SAVE_REST
1455 movl r14=ia64_leave_kernel
1456 ;;
1457 mov rp=r14
1458 br.call.sptk.many b6=ia64_fault
1459 END(dispatch_to_fault_handler)
1461 //
1462 // --- End of long entries, Beginning of short entries
1463 //
1465 .org ia64_ivt+0x5000
1466 /////////////////////////////////////////////////////////////////////////////////////////
1467 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1468 ENTRY(page_not_present)
1469 DBG_FAULT(20)
1470 #ifdef XEN
1471 REFLECT(20)
1472 #endif
1473 mov r16=cr.ifa
1474 rsm psr.dt
1475 /*
1476 * The Linux page fault handler doesn't expect non-present pages to be in
1477 * the TLB. Flush the existing entry now, so we meet that expectation.
1478 */
1479 mov r17=PAGE_SHIFT<<2
1480 ;;
1481 ptc.l r16,r17
1482 ;;
1483 mov r31=pr
1484 srlz.d
1485 br.sptk.many page_fault
1486 END(page_not_present)
1488 .org ia64_ivt+0x5100
1489 /////////////////////////////////////////////////////////////////////////////////////////
1490 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1491 ENTRY(key_permission)
1492 DBG_FAULT(21)
1493 #ifdef XEN
1494 REFLECT(21)
1495 #endif
1496 mov r16=cr.ifa
1497 rsm psr.dt
1498 mov r31=pr
1499 ;;
1500 srlz.d
1501 br.sptk.many page_fault
1502 END(key_permission)
1504 .org ia64_ivt+0x5200
1505 /////////////////////////////////////////////////////////////////////////////////////////
1506 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1507 ENTRY(iaccess_rights)
1508 DBG_FAULT(22)
1509 #ifdef XEN
1510 REFLECT(22)
1511 #endif
1512 mov r16=cr.ifa
1513 rsm psr.dt
1514 mov r31=pr
1515 ;;
1516 srlz.d
1517 br.sptk.many page_fault
1518 END(iaccess_rights)
1520 .org ia64_ivt+0x5300
1521 /////////////////////////////////////////////////////////////////////////////////////////
1522 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1523 ENTRY(daccess_rights)
1524 DBG_FAULT(23)
1525 #ifdef XEN
1526 mov r31=pr;
1527 mov r16=cr.isr
1528 mov r17=cr.ifa
1529 mov r19=23
1530 movl r20=0x5300
1531 br.sptk.many fast_access_reflect;;
1532 #endif
1533 mov r16=cr.ifa
1534 rsm psr.dt
1535 mov r31=pr
1536 ;;
1537 srlz.d
1538 br.sptk.many page_fault
1539 END(daccess_rights)
1541 .org ia64_ivt+0x5400
1542 /////////////////////////////////////////////////////////////////////////////////////////
1543 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1544 ENTRY(general_exception)
1545 DBG_FAULT(24)
1546 mov r16=cr.isr
1547 mov r31=pr
1548 ;;
1549 #ifdef XEN
1550 cmp4.ge p6,p0=0x20,r16
1551 (p6) br.sptk.many dispatch_privop_fault
1552 #else
1553 cmp4.eq p6,p0=0,r16
1554 (p6) br.sptk.many dispatch_illegal_op_fault
1555 #endif
1556 ;;
1557 mov r19=24 // fault number
1558 br.sptk.many dispatch_to_fault_handler
1559 END(general_exception)
1561 .org ia64_ivt+0x5500
1562 /////////////////////////////////////////////////////////////////////////////////////////
1563 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1564 ENTRY(disabled_fp_reg)
1565 DBG_FAULT(25)
1566 #ifdef XEN
1567 #if 0
1568 mov r20=pr
1569 movl r16=0x2000000000000000
1570 movl r17=0x2000000000176b60
1571 mov r18=cr.iip
1572 mov r19=rr[r16]
1573 movl r22=0xe95d0439
1574 ;;
1575 mov pr=r0,-1
1576 ;;
1577 cmp.eq p6,p7=r22,r19
1578 ;;
1579 (p6) cmp.eq p8,p9=r17,r18
1580 (p8) br.sptk.few floating_panic
1581 ;;
1582 mov pr=r20,-1
1583 ;;
1584 #endif
1585 REFLECT(25)
1586 //floating_panic:
1587 // br.sptk.many floating_panic
1588 ;;
1589 #endif
1590 rsm psr.dfh // ensure we can access fph
1591 ;;
1592 srlz.d
1593 mov r31=pr
1594 mov r19=25
1595 br.sptk.many dispatch_to_fault_handler
1596 END(disabled_fp_reg)
1598 .org ia64_ivt+0x5600
1599 /////////////////////////////////////////////////////////////////////////////////////////
1600 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1601 ENTRY(nat_consumption)
1602 DBG_FAULT(26)
1603 #ifdef XEN
1604 REFLECT(26)
1605 #endif
1606 FAULT(26)
1607 END(nat_consumption)
1609 .org ia64_ivt+0x5700
1610 /////////////////////////////////////////////////////////////////////////////////////////
1611 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1612 ENTRY(speculation_vector)
1613 DBG_FAULT(27)
1614 #ifdef XEN
1615 // this probably need not reflect...
1616 REFLECT(27)
1617 #endif
1618 /*
1619 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1620 * this part of the architecture is not implemented in hardware on some CPUs, such
1621 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
1622 * the relative target (not yet sign extended). So after sign extending it we
1623 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
1624 * i.e., the slot to restart into.
1626 * cr.imm contains zero_ext(imm21)
1627 */
1628 mov r18=cr.iim
1629 ;;
1630 mov r17=cr.iip
1631 shl r18=r18,43 // put sign bit in position (43=64-21)
1632 ;;
1634 mov r16=cr.ipsr
1635 shr r18=r18,39 // sign extend (39=43-4)
1636 ;;
1638 add r17=r17,r18 // now add the offset
1639 ;;
1640 mov cr.iip=r17
1641 dep r16=0,r16,41,2 // clear EI
1642 ;;
1644 mov cr.ipsr=r16
1645 ;;
1647 rfi // and go back
1648 END(speculation_vector)
1650 .org ia64_ivt+0x5800
1651 /////////////////////////////////////////////////////////////////////////////////////////
1652 // 0x5800 Entry 28 (size 16 bundles) Reserved
1653 DBG_FAULT(28)
1654 FAULT(28)
1656 .org ia64_ivt+0x5900
1657 /////////////////////////////////////////////////////////////////////////////////////////
1658 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1659 ENTRY(debug_vector)
1660 DBG_FAULT(29)
1661 #ifdef XEN
1662 REFLECT(29)
1663 #endif
1664 FAULT(29)
1665 END(debug_vector)
1667 .org ia64_ivt+0x5a00
1668 /////////////////////////////////////////////////////////////////////////////////////////
1669 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1670 ENTRY(unaligned_access)
1671 DBG_FAULT(30)
1672 #ifdef XEN
1673 REFLECT(30)
1674 #endif
1675 mov r16=cr.ipsr
1676 mov r31=pr // prepare to save predicates
1677 ;;
1678 br.sptk.many dispatch_unaligned_handler
1679 END(unaligned_access)
1681 .org ia64_ivt+0x5b00
1682 /////////////////////////////////////////////////////////////////////////////////////////
1683 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1684 ENTRY(unsupported_data_reference)
1685 DBG_FAULT(31)
1686 #ifdef XEN
1687 REFLECT(31)
1688 #endif
1689 FAULT(31)
1690 END(unsupported_data_reference)
1692 .org ia64_ivt+0x5c00
1693 /////////////////////////////////////////////////////////////////////////////////////////
1694 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1695 ENTRY(floating_point_fault)
1696 DBG_FAULT(32)
1697 #ifdef XEN
1698 REFLECT(32)
1699 #endif
1700 FAULT(32)
1701 END(floating_point_fault)
1703 .org ia64_ivt+0x5d00
1704 /////////////////////////////////////////////////////////////////////////////////////////
1705 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1706 ENTRY(floating_point_trap)
1707 DBG_FAULT(33)
1708 #ifdef XEN
1709 REFLECT(33)
1710 #endif
1711 FAULT(33)
1712 END(floating_point_trap)
1714 .org ia64_ivt+0x5e00
1715 /////////////////////////////////////////////////////////////////////////////////////////
1716 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1717 ENTRY(lower_privilege_trap)
1718 DBG_FAULT(34)
1719 #ifdef XEN
1720 REFLECT(34)
1721 #endif
1722 FAULT(34)
1723 END(lower_privilege_trap)
1725 .org ia64_ivt+0x5f00
1726 /////////////////////////////////////////////////////////////////////////////////////////
1727 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1728 ENTRY(taken_branch_trap)
1729 DBG_FAULT(35)
1730 #ifdef XEN
1731 REFLECT(35)
1732 #endif
1733 FAULT(35)
1734 END(taken_branch_trap)
1736 .org ia64_ivt+0x6000
1737 /////////////////////////////////////////////////////////////////////////////////////////
1738 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1739 ENTRY(single_step_trap)
1740 DBG_FAULT(36)
1741 #ifdef XEN
1742 REFLECT(36)
1743 #endif
1744 FAULT(36)
1745 END(single_step_trap)
1747 .org ia64_ivt+0x6100
1748 /////////////////////////////////////////////////////////////////////////////////////////
1749 // 0x6100 Entry 37 (size 16 bundles) Reserved
1750 DBG_FAULT(37)
1751 FAULT(37)
1753 .org ia64_ivt+0x6200
1754 /////////////////////////////////////////////////////////////////////////////////////////
1755 // 0x6200 Entry 38 (size 16 bundles) Reserved
1756 DBG_FAULT(38)
1757 FAULT(38)
1759 .org ia64_ivt+0x6300
1760 /////////////////////////////////////////////////////////////////////////////////////////
1761 // 0x6300 Entry 39 (size 16 bundles) Reserved
1762 DBG_FAULT(39)
1763 FAULT(39)
1765 .org ia64_ivt+0x6400
1766 /////////////////////////////////////////////////////////////////////////////////////////
1767 // 0x6400 Entry 40 (size 16 bundles) Reserved
1768 DBG_FAULT(40)
1769 FAULT(40)
1771 .org ia64_ivt+0x6500
1772 /////////////////////////////////////////////////////////////////////////////////////////
1773 // 0x6500 Entry 41 (size 16 bundles) Reserved
1774 DBG_FAULT(41)
1775 FAULT(41)
1777 .org ia64_ivt+0x6600
1778 /////////////////////////////////////////////////////////////////////////////////////////
1779 // 0x6600 Entry 42 (size 16 bundles) Reserved
1780 DBG_FAULT(42)
1781 FAULT(42)
1783 .org ia64_ivt+0x6700
1784 /////////////////////////////////////////////////////////////////////////////////////////
1785 // 0x6700 Entry 43 (size 16 bundles) Reserved
1786 DBG_FAULT(43)
1787 FAULT(43)
1789 .org ia64_ivt+0x6800
1790 /////////////////////////////////////////////////////////////////////////////////////////
1791 // 0x6800 Entry 44 (size 16 bundles) Reserved
1792 DBG_FAULT(44)
1793 FAULT(44)
1795 .org ia64_ivt+0x6900
1796 /////////////////////////////////////////////////////////////////////////////////////////
1797 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1798 ENTRY(ia32_exception)
1799 DBG_FAULT(45)
1800 #ifdef XEN
1801 REFLECT(45)
1802 #endif
1803 FAULT(45)
1804 END(ia32_exception)
1806 .org ia64_ivt+0x6a00
1807 /////////////////////////////////////////////////////////////////////////////////////////
1808 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1809 ENTRY(ia32_intercept)
1810 DBG_FAULT(46)
1811 #ifdef XEN
1812 REFLECT(46)
1813 #endif
1814 #ifdef CONFIG_IA32_SUPPORT
1815 mov r31=pr
1816 mov r16=cr.isr
1817 ;;
1818 extr.u r17=r16,16,8 // get ISR.code
1819 mov r18=ar.eflag
1820 mov r19=cr.iim // old eflag value
1821 ;;
1822 cmp.ne p6,p0=2,r17
1823 (p6) br.cond.spnt 1f // not a system flag fault
1824 xor r16=r18,r19
1825 ;;
1826 extr.u r17=r16,18,1 // get the eflags.ac bit
1827 ;;
1828 cmp.eq p6,p0=0,r17
1829 (p6) br.cond.spnt 1f // eflags.ac bit didn't change
1830 ;;
1831 mov pr=r31,-1 // restore predicate registers
1832 rfi
1834 1:
1835 #endif // CONFIG_IA32_SUPPORT
1836 FAULT(46)
1837 END(ia32_intercept)
1839 .org ia64_ivt+0x6b00
1840 /////////////////////////////////////////////////////////////////////////////////////////
1841 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1842 ENTRY(ia32_interrupt)
1843 DBG_FAULT(47)
1844 #ifdef XEN
1845 REFLECT(47)
1846 #endif
1847 #ifdef CONFIG_IA32_SUPPORT
1848 mov r31=pr
1849 br.sptk.many dispatch_to_ia32_handler
1850 #else
1851 FAULT(47)
1852 #endif
1853 END(ia32_interrupt)
1855 .org ia64_ivt+0x6c00
1856 /////////////////////////////////////////////////////////////////////////////////////////
1857 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1858 DBG_FAULT(48)
1859 FAULT(48)
1861 .org ia64_ivt+0x6d00
1862 /////////////////////////////////////////////////////////////////////////////////////////
1863 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1864 DBG_FAULT(49)
1865 FAULT(49)
1867 .org ia64_ivt+0x6e00
1868 /////////////////////////////////////////////////////////////////////////////////////////
1869 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1870 DBG_FAULT(50)
1871 FAULT(50)
1873 .org ia64_ivt+0x6f00
1874 /////////////////////////////////////////////////////////////////////////////////////////
1875 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1876 DBG_FAULT(51)
1877 FAULT(51)
1879 .org ia64_ivt+0x7000
1880 /////////////////////////////////////////////////////////////////////////////////////////
1881 // 0x7000 Entry 52 (size 16 bundles) Reserved
1882 DBG_FAULT(52)
1883 FAULT(52)
1885 .org ia64_ivt+0x7100
1886 /////////////////////////////////////////////////////////////////////////////////////////
1887 // 0x7100 Entry 53 (size 16 bundles) Reserved
1888 DBG_FAULT(53)
1889 FAULT(53)
1891 .org ia64_ivt+0x7200
1892 /////////////////////////////////////////////////////////////////////////////////////////
1893 // 0x7200 Entry 54 (size 16 bundles) Reserved
1894 DBG_FAULT(54)
1895 FAULT(54)
1897 .org ia64_ivt+0x7300
1898 /////////////////////////////////////////////////////////////////////////////////////////
1899 // 0x7300 Entry 55 (size 16 bundles) Reserved
1900 DBG_FAULT(55)
1901 FAULT(55)
1903 .org ia64_ivt+0x7400
1904 /////////////////////////////////////////////////////////////////////////////////////////
1905 // 0x7400 Entry 56 (size 16 bundles) Reserved
1906 DBG_FAULT(56)
1907 FAULT(56)
1909 .org ia64_ivt+0x7500
1910 /////////////////////////////////////////////////////////////////////////////////////////
1911 // 0x7500 Entry 57 (size 16 bundles) Reserved
1912 DBG_FAULT(57)
1913 FAULT(57)
1915 .org ia64_ivt+0x7600
1916 /////////////////////////////////////////////////////////////////////////////////////////
1917 // 0x7600 Entry 58 (size 16 bundles) Reserved
1918 DBG_FAULT(58)
1919 FAULT(58)
1921 .org ia64_ivt+0x7700
1922 /////////////////////////////////////////////////////////////////////////////////////////
1923 // 0x7700 Entry 59 (size 16 bundles) Reserved
1924 DBG_FAULT(59)
1925 FAULT(59)
1927 .org ia64_ivt+0x7800
1928 /////////////////////////////////////////////////////////////////////////////////////////
1929 // 0x7800 Entry 60 (size 16 bundles) Reserved
1930 DBG_FAULT(60)
1931 FAULT(60)
1933 .org ia64_ivt+0x7900
1934 /////////////////////////////////////////////////////////////////////////////////////////
1935 // 0x7900 Entry 61 (size 16 bundles) Reserved
1936 DBG_FAULT(61)
1937 FAULT(61)
1939 .org ia64_ivt+0x7a00
1940 /////////////////////////////////////////////////////////////////////////////////////////
1941 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1942 DBG_FAULT(62)
1943 FAULT(62)
1945 .org ia64_ivt+0x7b00
1946 /////////////////////////////////////////////////////////////////////////////////////////
1947 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1948 DBG_FAULT(63)
1949 FAULT(63)
1951 .org ia64_ivt+0x7c00
1952 /////////////////////////////////////////////////////////////////////////////////////////
1953 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1954 DBG_FAULT(64)
1955 FAULT(64)
1957 .org ia64_ivt+0x7d00
1958 /////////////////////////////////////////////////////////////////////////////////////////
1959 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1960 DBG_FAULT(65)
1961 FAULT(65)
1963 .org ia64_ivt+0x7e00
1964 /////////////////////////////////////////////////////////////////////////////////////////
1965 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1966 DBG_FAULT(66)
1967 FAULT(66)
1969 .org ia64_ivt+0x7f00
1970 /////////////////////////////////////////////////////////////////////////////////////////
1971 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1972 DBG_FAULT(67)
1973 FAULT(67)
1975 #ifdef XEN
1976 .org ia64_ivt+0x8000
1977 GLOBAL_ENTRY(dispatch_reflection)
1978 /*
1979 * Input:
1980 * psr.ic: off
1981 * r19: intr type (offset into ivt, see ia64_int.h)
1982 * r31: contains saved predicates (pr)
1983 */
1984 SAVE_MIN_WITH_COVER_R19
1985 alloc r14=ar.pfs,0,0,5,0
1986 mov out4=r15
1987 mov out0=cr.ifa
1988 adds out1=16,sp
1989 mov out2=cr.isr
1990 mov out3=cr.iim
1991 // mov out3=cr.itir
1993 ssm psr.ic | PSR_DEFAULT_BITS
1994 ;;
1995 srlz.i // guarantee that interruption collection is on
1996 ;;
1997 (p15) ssm psr.i // restore psr.i
1998 adds r3=8,r2 // set up second base pointer
1999 ;;
2000 SAVE_REST
2001 movl r14=ia64_leave_kernel
2002 ;;
2003 mov rp=r14
2004 // br.sptk.many ia64_prepare_handle_reflection
2005 br.call.sptk.many b6=ia64_handle_reflection
2006 END(dispatch_reflection)
2008 #define SAVE_MIN_COVER_DONE DO_SAVE_MIN(,mov r30=cr.ifs,)
2010 // same as dispatch_break_fault except cover has already been done
2011 GLOBAL_ENTRY(dispatch_slow_hyperprivop)
2012 SAVE_MIN_COVER_DONE
2013 ;;
2014 br.sptk.many dispatch_break_fault_post_save
2015 END(dispatch_slow_hyperprivop)
2016 #endif
2018 #ifdef CONFIG_IA32_SUPPORT
2020 /*
2021 * There is no particular reason for this code to be here, other than that
2022 * there happens to be space here that would go unused otherwise. If this
2023 * fault ever gets "unreserved", simply moved the following code to a more
2024 * suitable spot...
2025 */
2027 // IA32 interrupt entry point
2029 ENTRY(dispatch_to_ia32_handler)
2030 SAVE_MIN
2031 ;;
2032 mov r14=cr.isr
2033 ssm psr.ic | PSR_DEFAULT_BITS
2034 ;;
2035 srlz.i // guarantee that interruption collection is on
2036 ;;
2037 (p15) ssm psr.i
2038 adds r3=8,r2 // Base pointer for SAVE_REST
2039 ;;
2040 SAVE_REST
2041 ;;
2042 mov r15=0x80
2043 shr r14=r14,16 // Get interrupt number
2044 ;;
2045 cmp.ne p6,p0=r14,r15
2046 (p6) br.call.dpnt.many b6=non_ia32_syscall
2048 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
2049 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
2050 ;;
2051 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
2052 ld8 r8=[r14] // get r8
2053 ;;
2054 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
2055 ;;
2056 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
2057 ;;
2058 ld4 r8=[r14],8 // r8 == eax (syscall number)
2059 mov r15=IA32_NR_syscalls
2060 ;;
2061 cmp.ltu.unc p6,p7=r8,r15
2062 ld4 out1=[r14],8 // r9 == ecx
2063 ;;
2064 ld4 out2=[r14],8 // r10 == edx
2065 ;;
2066 ld4 out0=[r14] // r11 == ebx
2067 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
2068 ;;
2069 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
2070 ;;
2071 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
2072 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
2073 ;;
2074 ld4 out4=[r14] // r15 == edi
2075 movl r16=ia32_syscall_table
2076 ;;
2077 (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
2078 ld4 r2=[r2] // r2 = current_thread_info()->flags
2079 ;;
2080 ld8 r16=[r16]
2081 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
2082 ;;
2083 mov b6=r16
2084 movl r15=ia32_ret_from_syscall
2085 cmp.eq p8,p0=r2,r0
2086 ;;
2087 mov rp=r15
2088 (p8) br.call.sptk.many b6=b6
2089 br.cond.sptk ia32_trace_syscall
2091 non_ia32_syscall:
2092 alloc r15=ar.pfs,0,0,2,0
2093 mov out0=r14 // interrupt #
2094 add out1=16,sp // pointer to pt_regs
2095 ;; // avoid WAW on CFM
2096 br.call.sptk.many rp=ia32_bad_interrupt
2097 .ret1: movl r15=ia64_leave_kernel
2098 ;;
2099 mov rp=r15
2100 br.ret.sptk.many rp
2101 END(dispatch_to_ia32_handler)
2103 #endif /* CONFIG_IA32_SUPPORT */