ia64/xen-unstable

view xen/arch/ia64/xen/ivt.S @ 13435:24ce556e3049

[IA64] Add privilege check back for hypercall

hypercall is allowed when cpl=2

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild2.aw
date Tue Jan 16 11:22:44 2007 -0700 (2007-01-16)
parents 01ea554f1c5e
children 7476a0ea8ee4
line source
1 #ifdef XEN
2 #include <asm/debugger.h>
3 #include <asm/vhpt.h>
4 #include <public/arch-ia64.h>
5 #include <asm/config.h>
6 #endif
7 /*
8 * arch/ia64/kernel/ivt.S
9 *
10 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
11 * Stephane Eranian <eranian@hpl.hp.com>
12 * David Mosberger <davidm@hpl.hp.com>
13 * Copyright (C) 2000, 2002-2003 Intel Co
14 * Asit Mallick <asit.k.mallick@intel.com>
15 * Suresh Siddha <suresh.b.siddha@intel.com>
16 * Kenneth Chen <kenneth.w.chen@intel.com>
17 * Fenghua Yu <fenghua.yu@intel.com>
18 *
19 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
20 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now
21 * uses virtual PT.
22 */
23 /*
24 * This file defines the interruption vector table used by the CPU.
25 * It does not include one entry per possible cause of interruption.
26 *
27 * The first 20 entries of the table contain 64 bundles each while the
28 * remaining 48 entries contain only 16 bundles each.
29 *
30 * The 64 bundles are used to allow inlining the whole handler for critical
31 * interruptions like TLB misses.
32 *
33 * For each entry, the comment is as follows:
34 *
35 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
36 * entry offset ----/ / / / /
37 * entry number ---------/ / / /
38 * size of the entry -------------/ / /
39 * vector name -------------------------------------/ /
40 * interruptions triggering this vector ----------------------/
41 *
42 * The table is 32KB in size and must be aligned on 32KB boundary.
43 * (The CPU ignores the 15 lower bits of the address)
44 *
45 * Table is based upon EAS2.6 (Oct 1999)
46 */
48 #include <linux/config.h>
50 #include <asm/asmmacro.h>
51 #include <asm/break.h>
52 #include <asm/ia32.h>
53 #include <asm/kregs.h>
54 #include <asm/offsets.h>
55 #include <asm/pgtable.h>
56 #include <asm/processor.h>
57 #include <asm/ptrace.h>
58 #include <asm/system.h>
59 #include <asm/thread_info.h>
60 #include <asm/unistd.h>
61 #ifdef XEN
62 #include <xen/errno.h>
63 #else
64 #include <asm/errno.h>
65 #endif
67 #if 1
68 # define PSR_DEFAULT_BITS psr.ac
69 #else
70 # define PSR_DEFAULT_BITS 0
71 #endif
73 #if 0
74 /*
75 * This lets you track the last eight faults that occurred on the CPU.
76 * Make sure ar.k2 isn't needed for something else before enabling this...
77 */
78 # define DBG_FAULT(i) \
79 mov r16=ar.k2;; \
80 shl r16=r16,8;; \
81 add r16=(i),r16;; \
82 mov ar.k2=r16
83 #else
84 # define DBG_FAULT(i)
85 #endif
87 #define MINSTATE_VIRT /* needed by minstate.h */
88 #include "minstate.h"
90 #define FAULT(n) \
91 mov r19=n; /* prepare to save predicates */ \
92 mov r31=pr; \
93 br.sptk.many dispatch_to_fault_handler
95 #define FAULT_OR_REFLECT(n) \
96 mov r20=cr.ipsr; \
97 mov r19=n; /* prepare to save predicates */ \
98 mov r31=pr;; \
99 extr.u r20=r20,IA64_PSR_CPL0_BIT,2;; \
100 cmp.ne p6,p0=r0,r20; /* cpl != 0?*/ \
101 (p6) br.dptk.many dispatch_reflection; \
102 br.sptk.few dispatch_to_fault_handler
104 .section .text.ivt,"ax"
106 .align 32768 // align on 32KB boundary
107 .global ia64_ivt
108 ia64_ivt:
109 //////////////////////////////////////////////////////////////////////////
110 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
111 ENTRY(vhpt_miss)
112 DBG_FAULT(0)
113 #ifdef XEN
114 FAULT(0)
115 #else
116 /*
117 * The VHPT vector is invoked when the TLB entry for the virtual
118 * page table is missing. This happens only as a result of a
119 * previous (the "original") TLB miss, which may either be caused
120 * by an instruction fetch or a data access (or non-access).
121 *
122 * What we do here is normal TLB miss handing for the _original_
123 * miss, followed by inserting the TLB entry for the virtual page
124 * table page that the VHPT walker was attempting to access. The
125 * latter gets inserted as long as both L1 and L2 have valid
126 * mappings for the faulting address. The TLB entry for the
127 * original miss gets inserted only if the L3 entry indicates
128 * that the page is present.
129 *
130 * do_page_fault gets invoked in the following cases:
131 * - the faulting virtual address uses unimplemented address bits
132 * - the faulting virtual address has no L1, L2, or L3 mapping
133 */
134 mov r16=cr.ifa // get address that caused the TLB miss
135 #ifdef CONFIG_HUGETLB_PAGE
136 movl r18=PAGE_SHIFT
137 mov r25=cr.itir
138 #endif
139 ;;
140 rsm psr.dt // use physical addressing for data
141 mov r31=pr // save the predicate registers
142 mov r19=IA64_KR(PT_BASE) // get page table base address
143 shl r21=r16,3 // shift bit 60 into sign bit
144 shr.u r17=r16,61 // get the region number into r17
145 ;;
146 shr r22=r21,3
147 #ifdef CONFIG_HUGETLB_PAGE
148 extr.u r26=r25,2,6
149 ;;
150 cmp.ne p8,p0=r18,r26
151 sub r27=r26,r18
152 ;;
153 (p8) dep r25=r18,r25,2,6
154 (p8) shr r22=r22,r27
155 #endif
156 ;;
157 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
158 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of faulting address
159 ;;
160 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
162 srlz.d
163 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at
164 // swapper_pg_dir
166 .pred.rel "mutex", p6, p7
167 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
168 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
169 ;;
170 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
171 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA +
172 // (((IFA(61,63) << 7) |
173 // IFA(33,39))*8)
174 cmp.eq p7,p6=0,r21 // unused address bits all zero?
175 shr.u r18=r22,PMD_SHIFT // shift L2 index into position
176 ;;
177 ld8 r17=[r17] // fetch the L1 entry (may be 0)
178 ;;
179 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
180 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page
181 // table entry
182 ;;
183 (p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
184 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
185 ;;
186 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
187 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page
188 // table entry
189 ;;
190 (p7) ld8 r18=[r21] // read the L3 PTE
191 mov r19=cr.isr // cr.isr bit 0 tells us if
192 // this is an insn miss
193 ;;
194 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
195 mov r22=cr.iha // get the VHPT address that
196 // caused the TLB miss
197 ;; // avoid RAW on p7
198 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB
199 // miss?
200 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page
201 // address
202 ;;
203 (p10) itc.i r18 // insert the instruction TLB
204 // entry
205 (p11) itc.d r18 // insert the data TLB entry
206 (p6) br.cond.spnt.many page_fault // handle bad address/page not
207 // present (page fault)
208 mov cr.ifa=r22
210 #ifdef CONFIG_HUGETLB_PAGE
211 (p8) mov cr.itir=r25 // change to default page-size
212 // for VHPT
213 #endif
215 /*
216 * Now compute and insert the TLB entry for the virtual page table.
217 * We never execute in a page table page so there is no need to set
218 * the exception deferral bit.
219 */
220 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
221 ;;
222 (p7) itc.d r24
223 ;;
224 #ifdef CONFIG_SMP
225 /*
226 * Tell the assemblers dependency-violation checker that the above
227 * "itc" instructions cannot possibly affect the following loads:
228 */
229 dv_serialize_data
231 /*
232 * Re-check L2 and L3 pagetable. If they changed, we may have
233 * received a ptc.g between reading the pagetable and the "itc".
234 * If so, flush the entry we inserted and retry.
235 */
236 ld8 r25=[r21] // read L3 PTE again
237 ld8 r26=[r17] // read L2 entry again
238 ;;
239 cmp.ne p6,p7=r26,r20 // did L2 entry change
240 mov r27=PAGE_SHIFT<<2
241 ;;
242 (p6) ptc.l r22,r27 // purge PTE page translation
243 (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change
244 ;;
245 (p6) ptc.l r16,r27 // purge translation
246 #endif
248 mov pr=r31,-1 // restore predicate registers
249 rfi
250 #endif
251 END(vhpt_miss)
253 .org ia64_ivt+0x400
254 //////////////////////////////////////////////////////////////////////////
255 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
256 ENTRY(itlb_miss)
257 DBG_FAULT(1)
258 #ifdef XEN
259 mov r16 = cr.ifa
260 mov r31 = pr
261 ;;
262 extr.u r17=r16,59,5
263 ;;
264 /* If address belongs to VMM, go to alt tlb handler */
265 cmp.eq p6,p0=0x1e,r17
266 (p6) br.cond.spnt late_alt_itlb_miss
267 ;;
268 cmp.eq p6,p0=0x1d,r17
269 (p6) br.cond.spnt late_alt_itlb_miss
270 ;;
271 mov pr = r31, 0x1ffff
272 ;;
273 #ifdef VHPT_GLOBAL
274 br.cond.sptk fast_tlb_miss_reflect
275 ;;
276 #endif
277 #endif
278 /*
279 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
280 * page table. If a nested TLB miss occurs, we switch into physical
281 * mode, walk the page table, and then re-execute the L3 PTE read
282 * and go on normally after that.
283 */
284 mov r16=cr.ifa // get virtual address
285 mov r29=b0 // save b0
286 mov r31=pr // save predicates
287 .itlb_fault:
288 mov r17=cr.iha // get virtual address of L3 PTE
289 movl r30=1f // load nested fault
290 // continuation point
291 ;;
292 1: ld8 r18=[r17] // read L3 PTE
293 ;;
294 mov b0=r29
295 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
296 (p6) br.cond.spnt page_fault
297 ;;
298 itc.i r18
299 ;;
300 #ifdef CONFIG_SMP
301 /*
302 * Tell the assemblers dependency-violation checker that the above
303 * "itc" instructions cannot possibly affect the following loads:
304 */
305 dv_serialize_data
307 ld8 r19=[r17] // read L3 PTE again and see if same
308 mov r20=PAGE_SHIFT<<2 // setup page size for purge
309 ;;
310 cmp.ne p7,p0=r18,r19
311 ;;
312 (p7) ptc.l r16,r20
313 #endif
314 mov pr=r31,-1
315 rfi
316 END(itlb_miss)
318 .org ia64_ivt+0x0800
319 //////////////////////////////////////////////////////////////////////////
320 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
321 ENTRY(dtlb_miss)
322 DBG_FAULT(2)
323 #ifdef XEN
324 mov r16=cr.ifa // get virtual address
325 mov r31=pr
326 ;;
327 extr.u r17=r16,59,5
328 ;;
329 cmp.eq p6,p0=0x1e,r17 // if the address belongs to VMM, go
330 // to the alternate tlb handler
331 (p6) br.cond.spnt late_alt_dtlb_miss
332 ;;
333 cmp.eq p6,p0=0x1d,r17
334 (p6) br.cond.spnt late_alt_dtlb_miss
335 ;;
336 #if VHPT_ENABLED
337 mov r30=cr.ipsr // XXX TODO optimization
338 mov r28=cr.iip
339 mov r17=cr.isr
340 ;;
342 extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2 // extract psr.cpl
343 ;;
344 cmp.ne p6, p0 = r0, r18 // cpl == 0?
345 (p6) br.cond.sptk 2f
347 tbit.nz p7,p0=r17,IA64_ISR_SP_BIT // is speculation bit on?
348 ;;
349 (p7) br.cond.spnt 2f
351 // Is the faulted iip in the vmm area?
352 // -- check [59:58] bit
353 // -- if 00, 11: guest
354 // -- if 01, 10: vmm
355 extr.u r19 = r28, 58, 2
356 ;;
357 cmp.eq p10, p0 = 0x0, r19
358 (p10) br.cond.sptk 2f
359 cmp.eq p11, p0 = 0x3, r19
360 (p11) br.cond.sptk 2f
362 // Is the faulted address is in the identity mapping area?
363 // must be either 0xf000... or 0xe8000...
364 extr.u r20 = r16, 59, 5
365 ;;
366 cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
367 (p12) br.cond.spnt 1f
368 cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
369 (p13) br.cond.sptk 2f
371 1:
372 movl r24=PAGE_KERNEL // xen identity mapping area.
373 movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
374 ;;
375 shr.u r26=r16,55 // move address bit 59 to bit 4
376 and r25=r25,r16 // clear ed, reserved bits, and PTE control bits
377 ;;
378 and r26=0x10,r26 // bit 4=address-bit(59)
379 ;;
380 or r25=r25,r24 // insert PTE control bits into r25
381 ;;
382 or r25=r25,r26 // set bit 4 (uncached) if the access was to
383 // region 6
384 ;;
385 itc.d r25 // insert the TLB entry
386 mov pr=r31,-1
387 rfi
389 2:
390 #endif
391 #ifdef VHPT_GLOBAL
392 // br.cond.sptk page_fault
393 br.cond.sptk fast_tlb_miss_reflect
394 ;;
395 #endif
396 mov r29=b0 // save b0
397 #else
398 /*
399 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
400 * page table. If a nested TLB miss occurs, we switch into physical
401 * mode, walk the page table, and then re-execute the L3 PTE read
402 * and go on normally after that.
403 */
404 mov r16=cr.ifa // get virtual address
405 mov r29=b0 // save b0
406 mov r31=pr // save predicates
407 #endif
408 dtlb_fault:
409 mov r17=cr.iha // get virtual address of L3 PTE
410 movl r30=1f // load nested fault
411 // continuation point
412 ;;
413 1: ld8 r18=[r17] // read L3 PTE
414 ;;
415 mov b0=r29
416 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
417 (p6) br.cond.spnt page_fault
418 ;;
419 itc.d r18
420 ;;
421 #ifdef CONFIG_SMP
422 /*
423 * Tell the assemblers dependency-violation checker that the above
424 * "itc" instructions cannot possibly affect the following loads:
425 */
426 dv_serialize_data
428 ld8 r19=[r17] // read L3 PTE again and see if same
429 mov r20=PAGE_SHIFT<<2 // setup page size for purge
430 ;;
431 cmp.ne p7,p0=r18,r19
432 ;;
433 (p7) ptc.l r16,r20
434 #endif
435 mov pr=r31,-1
436 rfi
437 END(dtlb_miss)
439 .org ia64_ivt+0x0c00
440 //////////////////////////////////////////////////////////////////////////
441 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
442 ENTRY(alt_itlb_miss)
443 DBG_FAULT(3)
444 #ifdef XEN
445 mov r16=cr.ifa // get address that caused the TLB miss
446 mov r31=pr
447 ;;
448 late_alt_itlb_miss:
449 mov r21=cr.ipsr
450 movl r17=PAGE_KERNEL
451 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
452 ;;
453 #else
454 mov r16=cr.ifa // get address that caused the TLB miss
455 movl r17=PAGE_KERNEL
456 mov r21=cr.ipsr
457 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
458 mov r31=pr
459 ;;
460 #endif
461 #ifdef CONFIG_DISABLE_VHPT
462 shr.u r22=r16,61 // get the region number into r21
463 ;;
464 cmp.gt p8,p0=6,r22 // user mode
465 ;;
466 (p8) thash r17=r16
467 ;;
468 (p8) mov cr.iha=r17
469 (p8) mov r29=b0 // save b0
470 (p8) br.cond.dptk .itlb_fault
471 #endif
472 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
473 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
474 #ifdef XEN
475 shr.u r18=r16,55 // move address bit 59 to bit 4
476 ;;
477 and r18=0x10,r18 // bit 4=address-bit(59)
478 #else
479 shr.u r18=r16,57 // move address bit 61 to bit 4
480 ;;
481 andcm r18=0x10,r18 // bit 4=~address-bit(61)
482 #endif
483 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
484 or r19=r17,r19 // insert PTE control bits into r19
485 ;;
486 or r19=r19,r18 // set bit 4 (uncached) if the access was to
487 // region 6
488 (p8) br.cond.spnt page_fault
489 ;;
490 itc.i r19 // insert the TLB entry
491 mov pr=r31,-1
492 rfi
493 END(alt_itlb_miss)
495 .org ia64_ivt+0x1000
496 //////////////////////////////////////////////////////////////////////////
497 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
498 ENTRY(alt_dtlb_miss)
499 DBG_FAULT(4)
500 #ifdef XEN
501 mov r16=cr.ifa // get address that caused the TLB miss
502 mov r31=pr
503 ;;
504 late_alt_dtlb_miss:
505 mov r20=cr.isr
506 movl r17=PAGE_KERNEL
507 mov r21=cr.ipsr
508 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
509 ;;
510 #endif
511 #ifdef CONFIG_DISABLE_VHPT
512 shr.u r22=r16,61 // get the region into r22
513 ;;
514 cmp.gt p8,p0=6,r22 // access to region 0-5
515 ;;
516 (p8) thash r17=r16
517 ;;
518 (p8) mov cr.iha=r17
519 (p8) mov r29=b0 // save b0
520 (p8) br.cond.dptk dtlb_fault
521 #endif
522 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
523 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
524 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
525 #ifdef XEN
526 shr.u r18=r16,55 // move address bit 59 to bit 4
527 and r19=r19,r16 // clear ed, reserved bits, and
528 // PTE control bits
529 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
530 ;;
531 and r18=0x10,r18 // bit 4=address-bit(59)
532 #else
533 shr.u r18=r16,57 // move address bit 61 to bit 4
534 and r19=r19,r16 // clear ed, reserved bits, and
535 // PTE control bits
536 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
537 ;;
538 andcm r18=0x10,r18 // bit 4=~address-bit(61)
539 #endif
540 cmp.ne p8,p0=r0,r23
541 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
542 (p8) br.cond.spnt page_fault
543 #ifdef XEN
544 ;;
545 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
546 shr r22=r16,56 // Test for the address of virtual frame_table
547 ;;
548 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
549 (p8) br.cond.sptk frametable_miss ;;
550 #endif
551 // If it is not a Xen address, handle it via page_fault.
552 // Note that 0xf000 (cached) and 0xe800 (uncached) addresses
553 // should be OK.
554 extr.u r22=r16,59,5
555 ;;
556 cmp.eq p8,p0=0x1e,r22
557 (p8) br.cond.spnt 1f
558 ;;
559 cmp.ne p8,p0=0x1d,r22
560 (p8) br.cond.sptk page_fault
561 ;;
562 1:
563 #endif
565 dep r21=-1,r21,IA64_PSR_ED_BIT,1
566 or r19=r19,r17 // insert PTE control bits into r19
567 ;;
568 or r19=r19,r18 // set bit 4 (uncached) if the access was to
569 // region 6
570 (p6) mov cr.ipsr=r21
571 ;;
572 (p7) itc.d r19 // insert the TLB entry
573 mov pr=r31,-1
574 rfi
575 END(alt_dtlb_miss)
577 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
578 GLOBAL_ENTRY(frametable_miss)
579 rsm psr.dt // switch to using physical data addressing
580 movl r24=(frametable_pg_dir-PAGE_OFFSET) // r24=__pa(frametable_pg_dir)
581 ;;
582 srlz.d
583 extr.u r17=r16,PGDIR_SHIFT,(PAGE_SHIFT-3)
584 ;;
585 shladd r24=r17,3,r24 // r24=&pgd[pgd_offset(addr)]
586 ;;
587 ld8 r24=[r24] // r24=pgd[pgd_offset(addr)]
588 extr.u r18=r16,PMD_SHIFT,(PAGE_SHIFT-3) // r18=pmd_offset
589 ;;
590 cmp.eq p6,p7=0,r24 // pgd present?
591 shladd r24=r18,3,r24 // r24=&pmd[pmd_offset(addr)]
592 ;;
593 (p7) ld8 r24=[r24] // r24=pmd[pmd_offset(addr)]
594 extr.u r19=r16,PAGE_SHIFT,(PAGE_SHIFT-3)// r19=pte_offset
595 (p6) br.spnt.few frametable_fault
596 ;;
597 cmp.eq p6,p7=0,r24 // pmd present?
598 shladd r24=r19,3,r24 // r24=&pte[pte_offset(addr)]
599 ;;
600 (p7) ld8 r24=[r24] // r24=pte[pte_offset(addr)]
601 mov r25=0x700|(PAGE_SHIFT<<2) // key=7
602 (p6) br.spnt.few frametable_fault
603 ;;
604 mov cr.itir=r25
605 ssm psr.dt // switch to using virtual data addressing
606 tbit.z p6,p7=r24,_PAGE_P_BIT // pte present?
607 ;;
608 (p7) itc.d r24 // install updated PTE
609 (p6) br.spnt.few frametable_fault // page present bit cleared?
610 ;;
611 mov pr=r31,-1 // restore predicate registers
612 rfi
613 END(frametable_miss)
615 ENTRY(frametable_fault)
616 ssm psr.dt // switch to using virtual data addressing
617 mov r18=cr.iip
618 movl r19=ia64_frametable_probe
619 ;;
620 cmp.eq p6,p7=r18,r19 // is faulting addrress ia64_frametable_probe?
621 mov r8=0 // assumes that 'probe.r' uses r8
622 dep r21=-1,r21,IA64_PSR_RI_BIT+1,1 // return to next instruction in
623 // bundle 2
624 ;;
625 (p6) mov cr.ipsr=r21
626 mov r19=4 // FAULT(4)
627 (p7) br.spnt.few dispatch_to_fault_handler
628 ;;
629 mov pr=r31,-1
630 rfi
631 END(frametable_fault)
633 GLOBAL_ENTRY(ia64_frametable_probe)
634 {
635 probe.r r8=r32,0 // destination register must be r8
636 nop.f 0x0
637 br.ret.sptk.many b0 // this instruction must be in bundle 2
638 }
639 END(ia64_frametable_probe)
640 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
642 .org ia64_ivt+0x1400
643 /////////////////////////////////////////////////////////////////////////////////////////
644 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
645 ENTRY(nested_dtlb_miss)
646 DBG_FAULT(5)
647 #ifdef XEN
648 mov b0=r30
649 br.sptk.many b0 // return to the continuation point
650 ;;
651 #else
652 /*
653 * In the absence of kernel bugs, we get here when the virtually
654 * mapped linear page table is accessed non-speculatively (e.g.,
655 * in the Dirty-bit, Instruction Access-bit, or Data Access-bit
656 * faults). If the DTLB entry for the virtual page table is missing,
657 * a nested TLB miss fault is triggered and control is transferred
658 * to this point. When this happens, we lookup the pte for the
659 * faulting address by walking the page table in physical mode
660 * and return to the continuation point passed in register r30
661 * (or call page_fault if the address is not mapped).
662 *
663 * Input: r16: faulting address
664 * r29: saved b0
665 * r30: continuation address
666 * r31: saved pr
667 *
668 * Output: r17: physical address of L3 PTE of faulting address
669 * r29: saved b0
670 * r30: continuation address
671 * r31: saved pr
672 *
673 * Clobbered: b0, r18, r19, r21, psr.dt (cleared)
674 */
675 rsm psr.dt // switch to using physical data
676 // addressing
677 mov r19=IA64_KR(PT_BASE) // get the page table base address
678 shl r21=r16,3 // shift bit 60 into sign bit
679 ;;
680 shr.u r17=r16,61 // get the region number into r17
681 ;;
682 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
683 shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
684 ;;
685 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
687 srlz.d
688 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at
689 // swapper_pg_dir
691 .pred.rel "mutex", p6, p7
692 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
693 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
694 ;;
695 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
696 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) |
697 // IFA(33,39))*8)
698 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
699 shr.u r18=r16,PMD_SHIFT // shift L2 index into position
700 ;;
701 ld8 r17=[r17] // fetch the L1 entry (may be 0)
702 ;;
703 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
704 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table
705 // entry
706 ;;
707 (p7) ld8 r17=[r17] // fetch the L2 entry (may be 0)
708 shr.u r19=r16,PAGE_SHIFT // shift L3 index into position
709 ;;
710 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL?
711 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table
712 // entry
713 (p6) br.cond.spnt page_fault
714 mov b0=r30
715 br.sptk.many b0 // return to continuation point
716 #endif
717 END(nested_dtlb_miss)
719 .org ia64_ivt+0x1800
720 //////////////////////////////////////////////////////////////////////////
721 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
722 ENTRY(ikey_miss)
723 DBG_FAULT(6)
724 #ifdef XEN
725 FAULT_OR_REFLECT(6)
726 #else
727 FAULT(6)
728 #endif
729 END(ikey_miss)
731 //----------------------------------------------------------------
732 // call do_page_fault (predicates are in r31, psr.dt may be off,
733 // r16 is faulting address)
734 #ifdef XEN
735 GLOBAL_ENTRY(page_fault)
736 #else
737 ENTRY(page_fault)
738 #endif
739 ssm psr.dt
740 ;;
741 srlz.i
742 ;;
743 SAVE_MIN_WITH_COVER
744 #ifdef XEN
745 alloc r15=ar.pfs,0,0,4,0
746 mov out0=cr.ifa
747 mov out1=cr.isr
748 mov out3=cr.itir
749 #else
750 alloc r15=ar.pfs,0,0,3,0
751 mov out0=cr.ifa
752 mov out1=cr.isr
753 #endif
754 adds r3=8,r2 // set up second base pointer
755 ;;
756 ssm psr.ic | PSR_DEFAULT_BITS
757 ;;
758 srlz.i // guarantee that interruption
759 // collection is on
760 ;;
761 (p15) ssm psr.i // restore psr.i
762 movl r14=ia64_leave_kernel
763 ;;
764 SAVE_REST
765 mov rp=r14
766 ;;
767 adds out2=16,r12 // out2 = pointer to pt_regs
768 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
769 END(page_fault)
771 .org ia64_ivt+0x1c00
772 //////////////////////////////////////////////////////////////////////////
773 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
774 ENTRY(dkey_miss)
775 DBG_FAULT(7)
776 #ifdef XEN
777 FAULT_OR_REFLECT(7)
778 #else
779 FAULT(7)
780 #endif
781 END(dkey_miss)
783 .org ia64_ivt+0x2000
784 //////////////////////////////////////////////////////////////////////////
785 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
786 ENTRY(dirty_bit)
787 DBG_FAULT(8)
788 #ifdef XEN
789 mov r20=cr.ipsr
790 mov r31=pr
791 ;;
792 extr.u r20=r20,IA64_PSR_CPL0_BIT,2
793 ;;
794 mov r19=8 // prepare to save predicates
795 cmp.eq p6,p0=r0,r20 // cpl == 0?
796 (p6) br.sptk.few dispatch_to_fault_handler
797 // If shadow mode is not enabled, reflect the fault.
798 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
799 ;;
800 ld8 r22=[r22]
801 ;;
802 add r22=IA64_VCPU_DOMAIN_OFFSET,r22
803 ;;
804 ld8 r22=[r22] // read domain
805 ;;
806 add r22=IA64_DOMAIN_SHADOW_BITMAP_OFFSET,r22
807 ;;
808 ld8 r22=[r22]
809 ;;
810 cmp.eq p6,p0=r0,r22 // !shadow_bitmap ?
811 (p6) br.dptk.many dispatch_reflection
813 SAVE_MIN_WITH_COVER
814 alloc r14=ar.pfs,0,0,4,0
815 mov out0=cr.ifa
816 mov out1=cr.itir
817 mov out2=cr.isr
818 adds out3=16,sp
820 ssm psr.ic | PSR_DEFAULT_BITS
821 ;;
822 srlz.i // guarantee that interruption
823 // collection is on
824 ;;
825 (p15) ssm psr.i // restore psr.i
826 adds r3=8,r2 // set up second base pointer
827 ;;
828 SAVE_REST
829 movl r14=ia64_leave_kernel
830 ;;
831 mov rp=r14
832 br.call.sptk.many b6=ia64_shadow_fault
833 #else
834 /*
835 * What we do here is to simply turn on the dirty bit in the PTE.
836 * We need to update both the page-table and the TLB entry. To
837 * efficiently access the PTE, we address it through the virtual
838 * page table. Most likely, the TLB entry for the relevant virtual
839 * page table page is still present in the TLB so we can normally
840 * do this without additional TLB misses. In case the necessary
841 * virtual page table TLB entry isn't present, we take a nested
842 * TLB miss hit where we look up the physical address of the L3
843 * PTE and then continue at label 1 below.
844 */
845 mov r16=cr.ifa // get the address that caused the
846 // fault
847 movl r30=1f // load continuation point in case
848 // of nested fault
849 ;;
850 thash r17=r16 // compute virtual address of L3 PTE
851 mov r29=b0 // save b0 in case of nested fault
852 mov r31=pr // save pr
853 #ifdef CONFIG_SMP
854 mov r28=ar.ccv // save ar.ccv
855 ;;
856 1: ld8 r18=[r17]
857 ;; // avoid RAW on r18
858 mov ar.ccv=r18 // set compare value for cmpxchg
859 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
860 ;;
861 cmpxchg8.acq r26=[r17],r25,ar.ccv
862 mov r24=PAGE_SHIFT<<2
863 ;;
864 cmp.eq p6,p7=r26,r18
865 ;;
866 (p6) itc.d r25 // install updated PTE
867 ;;
868 /*
869 * Tell the assemblers dependency-violation checker that the above
870 * "itc" instructions cannot possibly affect the following loads:
871 */
872 dv_serialize_data
874 ld8 r18=[r17] // read PTE again
875 ;;
876 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
877 ;;
878 (p7) ptc.l r16,r24
879 mov b0=r29 // restore b0
880 mov ar.ccv=r28
881 #else
882 ;;
883 1: ld8 r18=[r17]
884 ;; // avoid RAW on r18
885 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
886 mov b0=r29 // restore b0
887 ;;
888 st8 [r17]=r18 // store back updated PTE
889 itc.d r18 // install updated PTE
890 #endif
891 mov pr=r31,-1 // restore pr
892 rfi
893 #endif
894 END(dirty_bit)
896 .org ia64_ivt+0x2400
897 //////////////////////////////////////////////////////////////////////////
898 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
899 ENTRY(iaccess_bit)
900 DBG_FAULT(9)
901 #ifdef XEN
902 mov r16=cr.isr
903 mov r17=cr.ifa
904 mov r31=pr
905 mov r19=9
906 mov r20=0x2400
907 br.sptk.many fast_access_reflect;;
908 #else
909 // Like Entry 8, except for instruction access
910 mov r16=cr.ifa // get the address that caused the
911 // fault
912 movl r30=1f // load continuation point in case
913 // of nested fault
914 mov r31=pr // save predicates
915 #ifdef CONFIG_ITANIUM
916 /*
917 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
918 */
919 mov r17=cr.ipsr
920 ;;
921 mov r18=cr.iip
922 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
923 ;;
924 (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
925 #endif /* CONFIG_ITANIUM */
926 ;;
927 thash r17=r16 // compute virtual address of L3 PTE
928 mov r29=b0 // save b0 in case of nested fault)
929 #ifdef CONFIG_SMP
930 mov r28=ar.ccv // save ar.ccv
931 ;;
932 1: ld8 r18=[r17]
933 ;;
934 mov ar.ccv=r18 // set compare value for cmpxchg
935 or r25=_PAGE_A,r18 // set the accessed bit
936 ;;
937 cmpxchg8.acq r26=[r17],r25,ar.ccv
938 mov r24=PAGE_SHIFT<<2
939 ;;
940 cmp.eq p6,p7=r26,r18
941 ;;
942 (p6) itc.i r25 // install updated PTE
943 ;;
944 /*
945 * Tell the assemblers dependency-violation checker that the above
946 * "itc" instructions cannot possibly affect the following loads:
947 */
948 dv_serialize_data
950 ld8 r18=[r17] // read PTE again
951 ;;
952 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
953 ;;
954 (p7) ptc.l r16,r24
955 mov b0=r29 // restore b0
956 mov ar.ccv=r28
957 #else /* !CONFIG_SMP */
958 ;;
959 1: ld8 r18=[r17]
960 ;;
961 or r18=_PAGE_A,r18 // set the accessed bit
962 mov b0=r29 // restore b0
963 ;;
964 st8 [r17]=r18 // store back updated PTE
965 itc.i r18 // install updated PTE
966 #endif /* !CONFIG_SMP */
967 mov pr=r31,-1
968 rfi
969 #endif
970 END(iaccess_bit)
972 .org ia64_ivt+0x2800
973 //////////////////////////////////////////////////////////////////////////
974 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
975 ENTRY(daccess_bit)
976 DBG_FAULT(10)
977 #ifdef XEN
978 mov r16=cr.isr
979 mov r17=cr.ifa
980 mov r31=pr
981 mov r19=10
982 mov r20=0x2800
983 br.sptk.many fast_access_reflect
984 ;;
985 #else
986 // Like Entry 8, except for data access
987 mov r16=cr.ifa // get the address that caused the
988 // fault
989 movl r30=1f // load continuation point in case
990 // of nested fault
991 ;;
992 thash r17=r16 // compute virtual address of L3 PTE
993 mov r31=pr
994 mov r29=b0 // save b0 in case of nested fault)
995 #ifdef CONFIG_SMP
996 mov r28=ar.ccv // save ar.ccv
997 ;;
998 1: ld8 r18=[r17]
999 ;; // avoid RAW on r18
1000 mov ar.ccv=r18 // set compare value for cmpxchg
1001 or r25=_PAGE_A,r18 // set the dirty bit
1002 ;;
1003 cmpxchg8.acq r26=[r17],r25,ar.ccv
1004 mov r24=PAGE_SHIFT<<2
1005 ;;
1006 cmp.eq p6,p7=r26,r18
1007 ;;
1008 (p6) itc.d r25 // install updated PTE
1009 /*
1010 * Tell the assemblers dependency-violation checker that the above
1011 * "itc" instructions cannot possibly affect the following loads:
1012 */
1013 dv_serialize_data
1014 ;;
1015 ld8 r18=[r17] // read PTE again
1016 ;;
1017 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
1018 ;;
1019 (p7) ptc.l r16,r24
1020 mov ar.ccv=r28
1021 #else
1022 ;;
1023 1: ld8 r18=[r17]
1024 ;; // avoid RAW on r18
1025 or r18=_PAGE_A,r18 // set the accessed bit
1026 ;;
1027 st8 [r17]=r18 // store back updated PTE
1028 itc.d r18 // install updated PTE
1029 #endif
1030 mov b0=r29 // restore b0
1031 mov pr=r31,-1
1032 rfi
1033 #endif
1034 END(daccess_bit)
1036 .org ia64_ivt+0x2c00
1037 //////////////////////////////////////////////////////////////////////////
1038 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
1039 ENTRY(break_fault)
1040 /*
1041 * The streamlined system call entry/exit paths only save/restore
1042 * the initial part of pt_regs. This implies that the callers of
1043 * system-calls must adhere to the normal procedure calling
1044 * conventions.
1046 * Registers to be saved & restored:
1047 * CR registers: cr.ipsr, cr.iip, cr.ifs
1048 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore,
1049 * ar.fpsr
1050 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
1051 * Registers to be restored only:
1052 * r8-r11: output value from the system call.
1054 * During system call exit, scratch registers (including r15) are
1055 * modified/cleared to prevent leaking bits from kernel to user
1056 * level.
1057 */
1058 DBG_FAULT(11)
1059 mov r16=cr.isr
1060 mov r17=cr.iim
1061 mov r31=pr
1062 ;;
1063 cmp.eq p7,p0=r17,r0
1064 (p7) br.spnt.few dispatch_break_fault
1065 ;;
1066 #ifdef CRASH_DEBUG
1067 // A panic can occur before domain0 is created. In such cases,
1068 // referencing XSI_PSR_IC causes nested_dtlb_miss.
1069 movl r18=CDB_BREAK_NUM
1070 ;;
1071 cmp.eq p7,p0=r17,r18
1072 ;;
1073 (p7) br.spnt.few dispatch_break_fault
1074 ;;
1075 #endif
1076 movl r18=THIS_CPU(current_psr_ic_addr)
1077 ;;
1078 ld8 r18=[r18]
1079 ;;
1080 cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
1081 (p7) br.spnt.many dispatch_privop_fault
1082 ;;
1083 // if (ipsr.cpl == 2 && (iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX)
1084 // this is a hyperprivop. A hyperprivop is hand-coded assembly with
1085 // psr.ic off which means it can make no calls, cannot use r1-r15,
1086 // and it can have no memory accesses unless they are to pinned
1087 // addresses!
1088 mov r19= cr.ipsr
1089 movl r20=HYPERPRIVOP_START
1090 mov r21=HYPERPRIVOP_MAX
1091 ;;
1092 sub r20=r17,r20
1093 extr.u r19=r19,IA64_PSR_CPL0_BIT,2 // extract cpl field from cr.ipsr
1094 ;;
1095 cmp.gtu p7,p0=r21,r20
1096 ;;
1097 cmp.eq.and p7,p0=2,r19 // ipsr.cpl==2
1098 (p7) br.sptk.many fast_hyperprivop
1099 ;;
1100 movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
1101 ;;
1102 ld8 r22 = [r22]
1103 ;;
1104 adds r23=IA64_VCPU_BREAKIMM_OFFSET,r22
1105 ;;
1106 ld4 r23=[r23];;
1107 cmp4.eq p6,p0=r23,r17;; // Xen-reserved breakimm?
1108 cmp.eq.and p6,p0=2,r19
1109 (p6) br.spnt.many fast_hypercall
1110 ;;
1111 br.sptk.many fast_break_reflect
1112 ;;
1115 fast_hypercall:
1116 shr r25=r2,8;;
1117 cmp.ne p7,p0=r0,r25
1118 (p7) br.spnt.few dispatch_break_fault
1119 ;;
1120 // fall through
1123 /*
1124 * The streamlined system call entry/exit paths only save/restore the initial part
1125 * of pt_regs. This implies that the callers of system-calls must adhere to the
1126 * normal procedure calling conventions.
1128 * Registers to be saved & restored:
1129 * CR registers: cr.ipsr, cr.iip, cr.ifs
1130 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
1131 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
1132 * Registers to be restored only:
1133 * r8-r11: output value from the system call.
1135 * During system call exit, scratch registers (including r15) are modified/cleared
1136 * to prevent leaking bits from kernel to user level.
1137 */
1139 // DBG_FAULT(11)
1140 // mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
1141 mov r16=r22
1142 mov r29=cr.ipsr // M2 (12 cyc)
1143 // mov r31=pr // I0 (2 cyc)
1144 mov r15=r2
1146 // mov r17=cr.iim // M2 (2 cyc)
1147 mov.m r27=ar.rsc // M2 (12 cyc)
1148 // mov r18=__IA64_BREAK_SYSCALL // A
1150 mov.m ar.rsc=0 // M2
1151 mov.m r21=ar.fpsr // M2 (12 cyc)
1152 mov r19=b6 // I0 (2 cyc)
1153 ;;
1154 mov.m r23=ar.bspstore // M2 (12 cyc)
1155 mov.m r24=ar.rnat // M2 (5 cyc)
1156 mov.i r26=ar.pfs // I0 (2 cyc)
1158 invala // M0|1
1159 nop.m 0 // M
1160 mov r20=r1 // A save r1
1162 nop.m 0
1163 // movl r30=sys_call_table // X
1164 movl r30=ia64_hypercall_table // X
1166 mov r28=cr.iip // M2 (2 cyc)
1167 // cmp.eq p0,p7=r18,r17 // I0 is this a system call?
1168 //(p7) br.cond.spnt non_syscall // B no ->
1169 //
1170 // From this point on, we are definitely on the syscall-path
1171 // and we can use (non-banked) scratch registers.
1172 //
1173 ///////////////////////////////////////////////////////////////////////
1174 mov r1=r16 // A move task-pointer to "addl"-addressable reg
1175 mov r2=r16 // A setup r2 for ia64_syscall_setup
1176 // add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
1178 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
1179 // adds r15=-1024,r15 // A subtract 1024 from syscall number
1180 // mov r3=NR_syscalls - 1
1181 mov r3=NR_hypercalls - 1
1182 ;;
1183 ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
1184 // ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
1185 mov r9=r0 // force flags = 0
1186 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
1188 shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
1189 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
1190 cmp.leu p6,p7=r15,r3 // A syscall number in range?
1191 ;;
1193 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
1194 (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
1195 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
1197 mov.m ar.bspstore=r22 // M2 switch to kernel RBS
1198 cmp.eq p8,p9=2,r8 // A isr.ei==2?
1199 ;;
1201 (p8) mov r8=0 // A clear ei to 0
1202 //(p7) movl r30=sys_ni_syscall // X
1203 (p7) movl r30=do_ni_hypercall // X
1205 (p8) adds r28=16,r28 // A switch cr.iip to next bundle
1206 (p9) adds r8=1,r8 // A increment ei to next slot
1207 nop.i 0
1208 ;;
1210 mov.m r25=ar.unat // M2 (5 cyc)
1211 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
1212 // adds r15=1024,r15 // A restore original syscall number
1213 //
1214 // If any of the above loads miss in L1D, we'll stall here until
1215 // the data arrives.
1216 //
1217 ///////////////////////////////////////////////////////////////////////
1218 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
1219 mov b6=r30 // I0 setup syscall handler branch reg early
1220 cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
1222 // and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
1223 mov r18=ar.bsp // M2 (12 cyc)
1224 ;;
1225 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
1226 // cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
1227 br.call.sptk.many b7=ia64_syscall_setup // B
1228 1:
1229 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
1230 nop 0
1231 bsw.1 // B (6 cyc) regs are saved, switch to bank 1
1232 ;;
1234 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
1235 // movl r3=ia64_ret_from_syscall // X
1236 ;;
1238 srlz.i // M0 ensure interruption collection is on
1239 // mov rp=r3 // I0 set the real return addr
1240 //(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
1241 (p15) ssm psr.i // M2 restore psr.i
1242 //(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
1243 // br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
1244 br.call.sptk.many b0=b6 // B invoke syscall-handker (ignore return addr)
1245 // br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
1246 ;;
1247 adds r2=PT(R8)+16,r12
1248 ;;
1249 st8 [r2]=r8
1250 ;;
1251 br.call.sptk.many b0=do_softirq
1252 ;;
1253 //restore hypercall argument if continuation
1254 adds r2=IA64_VCPU_HYPERCALL_CONTINUATION_OFS,r13
1255 ;;
1256 ld1 r20=[r2]
1257 ;;
1258 st1 [r2]=r0
1259 ;;
1260 cmp.ne p6,p0=r20,r0
1261 ;;
1262 (p6) adds r2=PT(R16)+16,r12
1263 (p6) adds r3=PT(R17)+16,r12
1264 ;;
1265 (p6) ld8 r32=[r2],16
1266 (p6) ld8 r33=[r3],16
1267 ;;
1268 (p6) ld8 r34=[r2],16
1269 (p6) ld8 r35=[r3],16
1270 ;;
1271 (p6) ld8 r36=[r2],16
1272 ;;
1273 //save ar.bsp before cover
1274 mov r16=ar.bsp
1275 add r2=PT(R14)+16,r12
1276 ;;
1277 st8 [r2]=r16
1278 ;;
1279 rsm psr.i|psr.ic
1280 ;;
1281 srlz.i
1282 ;;
1283 cover
1284 ;;
1285 mov r20=cr.ifs
1286 adds r2=PT(CR_IFS)+16,r12
1287 ;;
1288 st8 [r2]=r20
1289 ;;
1290 br.call.sptk.many b0=reflect_event
1291 ;;
1292 adds r2=PT(R14)+16,r12
1293 adds r3=PT(R8)+16,r12
1294 ;;
1295 //r16 contains ar.bsp before cover
1296 ld8 r16=[r2]
1297 ld8 r8=[r3]
1298 ;;
1299 br.sptk.many ia64_ret_from_syscall
1300 ;;
1301 END(break_fault)
1303 .org ia64_ivt+0x3000
1304 //////////////////////////////////////////////////////////////////////////
1305 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
1306 ENTRY(interrupt)
1307 DBG_FAULT(12)
1308 mov r31=pr // prepare to save predicates
1309 ;;
1310 #ifdef XEN
1311 mov r30=cr.ivr // pass cr.ivr as first arg
1312 // FIXME: this is a hack... use cpuinfo.ksoftirqd because its
1313 // not used anywhere else and we need a place to stash ivr and
1314 // there's no registers available unused by SAVE_MIN/REST
1315 movl r29=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET
1316 ;;
1317 st8 [r29]=r30
1318 ;;
1319 movl r28=slow_interrupt
1320 ;;
1321 mov r29=rp
1322 ;;
1323 mov rp=r28
1324 ;;
1325 br.cond.sptk.many fast_tick_reflect
1326 ;;
1327 slow_interrupt:
1328 mov rp=r29;;
1329 #endif
1330 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1331 ssm psr.ic | PSR_DEFAULT_BITS
1332 ;;
1333 adds r3=8,r2 // set up second base pointer for SAVE_REST
1334 srlz.i // ensure everybody knows psr.ic is back on
1335 ;;
1336 SAVE_REST
1337 ;;
1338 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1339 #ifdef XEN
1340 movl out0=THIS_CPU(cpu_info)+IA64_CPUINFO_KSOFTIRQD_OFFSET;;
1341 ld8 out0=[out0];;
1342 #else
1343 mov out0=cr.ivr // pass cr.ivr as first arg
1344 #endif
1345 add out1=16,sp // pass pointer to pt_regs as second arg
1346 #ifndef XEN
1347 ;;
1348 srlz.d // make sure we see the effect of cr.ivr
1349 #endif
1350 movl r14=ia64_leave_kernel
1351 ;;
1352 mov rp=r14
1353 br.call.sptk.many b6=ia64_handle_irq
1354 END(interrupt)
1356 .org ia64_ivt+0x3400
1357 //////////////////////////////////////////////////////////////////////////
1358 // 0x3400 Entry 13 (size 64 bundles) Reserved
1359 DBG_FAULT(13)
1360 FAULT(13)
1362 #ifdef XEN
1363 // There is no particular reason for this code to be here, other
1364 // than that there happens to be space here that would go unused
1365 // otherwise. If this fault ever gets "unreserved", simply move
1366 // the following code to a more suitable spot...
1368 GLOBAL_ENTRY(dispatch_break_fault)
1369 SAVE_MIN_WITH_COVER
1370 ;;
1371 dispatch_break_fault_post_save:
1372 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1373 mov out0=cr.ifa
1374 adds out1=16,sp
1375 mov out2=cr.isr // FIXME: pity to make this slow access twice
1376 mov out3=cr.iim // FIXME: pity to make this slow access twice
1378 ssm psr.ic | PSR_DEFAULT_BITS
1379 ;;
1380 srlz.i // guarantee that interruption collection is on
1381 ;;
1382 (p15) ssm psr.i // restore psr.i
1383 adds r3=8,r2 // set up second base pointer
1384 ;;
1385 SAVE_REST
1386 movl r14=ia64_leave_kernel
1387 ;;
1388 mov rp=r14
1389 // br.sptk.many ia64_prepare_handle_break // TODO: why commented out?
1390 br.call.sptk.many b6=ia64_handle_break
1391 END(dispatch_break_fault)
1392 #endif
1394 .org ia64_ivt+0x3800
1395 //////////////////////////////////////////////////////////////////////////
1396 // 0x3800 Entry 14 (size 64 bundles) Reserved
1397 DBG_FAULT(14)
1398 FAULT(14)
1400 #ifdef XEN
1401 // this code segment is from 2.6.16.13
1403 /*
1404 * There is no particular reason for this code to be here, other than that
1405 * there happens to be space here that would go unused otherwise. If this
1406 * fault ever gets "unreserved", simply moved the following code to a more
1407 * suitable spot...
1409 * ia64_syscall_setup() is a separate subroutine so that it can
1410 * allocate stacked registers so it can safely demine any
1411 * potential NaT values from the input registers.
1413 * On entry:
1414 * - executing on bank 0 or bank 1 register set (doesn't matter)
1415 * - r1: stack pointer
1416 * - r2: current task pointer
1417 * - r3: preserved
1418 * - r11: original contents (saved ar.pfs to be saved)
1419 * - r12: original contents (sp to be saved)
1420 * - r13: original contents (tp to be saved)
1421 * - r15: original contents (syscall # to be saved)
1422 * - r18: saved bsp (after switching to kernel stack)
1423 * - r19: saved b6
1424 * - r20: saved r1 (gp)
1425 * - r21: saved ar.fpsr
1426 * - r22: kernel's register backing store base (krbs_base)
1427 * - r23: saved ar.bspstore
1428 * - r24: saved ar.rnat
1429 * - r25: saved ar.unat
1430 * - r26: saved ar.pfs
1431 * - r27: saved ar.rsc
1432 * - r28: saved cr.iip
1433 * - r29: saved cr.ipsr
1434 * - r31: saved pr
1435 * - b0: original contents (to be saved)
1436 * On exit:
1437 * - p10: TRUE if syscall is invoked with more than 8 out
1438 * registers or r15's Nat is true
1439 * - r1: kernel's gp
1440 * - r3: preserved (same as on entry)
1441 * - r8: -EINVAL if p10 is true
1442 * - r12: points to kernel stack
1443 * - r13: points to current task
1444 * - r14: preserved (same as on entry)
1445 * - p13: preserved
1446 * - p15: TRUE if interrupts need to be re-enabled
1447 * - ar.fpsr: set to kernel settings
1448 * - b6: preserved (same as on entry)
1449 */
1450 GLOBAL_ENTRY(ia64_syscall_setup)
1451 #if PT(B6) != 0
1452 # error This code assumes that b6 is the first field in pt_regs.
1453 #endif
1454 st8 [r1]=r19 // save b6
1455 add r16=PT(CR_IPSR),r1 // initialize first base pointer
1456 add r17=PT(R11),r1 // initialize second base pointer
1457 ;;
1458 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
1459 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
1460 tnat.nz p8,p0=in0
1462 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
1463 tnat.nz p9,p0=in1
1464 (pKStk) mov r18=r0 // make sure r18 isn't NaT
1465 ;;
1467 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
1468 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
1469 mov r28=b0 // save b0 (2 cyc)
1470 ;;
1472 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
1473 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
1474 (p8) mov in0=-1
1475 ;;
1477 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
1478 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
1479 and r8=0x7f,r19 // A // get sof of ar.pfs
1481 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
1482 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
1483 (p9) mov in1=-1
1484 ;;
1486 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
1487 tnat.nz p10,p0=in2
1488 add r11=8,r11
1489 ;;
1490 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
1491 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
1492 tnat.nz p11,p0=in3
1493 ;;
1494 (p10) mov in2=-1
1495 tnat.nz p12,p0=in4 // [I0]
1496 (p11) mov in3=-1
1497 ;;
1498 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
1499 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
1500 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
1501 ;;
1502 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
1503 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
1504 tnat.nz p13,p0=in5 // [I0]
1505 ;;
1506 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
1507 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
1508 (p12) mov in4=-1
1509 ;;
1511 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
1512 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1513 (p13) mov in5=-1
1514 ;;
1515 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1516 tnat.nz p13,p0=in6
1517 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
1518 ;;
1519 mov r8=1
1520 (p9) tnat.nz p10,p0=r15
1521 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
1523 st8.spill [r17]=r15 // save r15
1524 tnat.nz p8,p0=in7
1525 nop.i 0
1527 mov r13=r2 // establish `current'
1528 movl r1=__gp // establish kernel global pointer
1529 ;;
1530 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
1531 (p13) mov in6=-1
1532 (p8) mov in7=-1
1534 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1535 movl r17=FPSR_DEFAULT
1536 ;;
1537 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1538 (p10) mov r8=-EINVAL
1539 br.ret.sptk.many b7
1540 END(ia64_syscall_setup)
1543 #else
1544 /*
1545 * There is no particular reason for this code to be here, other
1546 * than that there happens to be space here that would go unused
1547 * otherwise. If this fault ever gets "unreserved", simply move
1548 * the following code to a more suitable spot...
1550 * ia64_syscall_setup() is a separate subroutine so that it can
1551 * allocate stacked registers so it can safely demine any
1552 * potential NaT values from the input registers.
1554 * On entry:
1555 * - executing on bank 0 or bank 1 register set (doesn't matter)
1556 * - r1: stack pointer
1557 * - r2: current task pointer
1558 * - r3: preserved
1559 * - r11: original contents (saved ar.pfs to be saved)
1560 * - r12: original contents (sp to be saved)
1561 * - r13: original contents (tp to be saved)
1562 * - r15: original contents (syscall # to be saved)
1563 * - r18: saved bsp (after switching to kernel stack)
1564 * - r19: saved b6
1565 * - r20: saved r1 (gp)
1566 * - r21: saved ar.fpsr
1567 * - r22: kernel's register backing store base (krbs_base)
1568 * - r23: saved ar.bspstore
1569 * - r24: saved ar.rnat
1570 * - r25: saved ar.unat
1571 * - r26: saved ar.pfs
1572 * - r27: saved ar.rsc
1573 * - r28: saved cr.iip
1574 * - r29: saved cr.ipsr
1575 * - r31: saved pr
1576 * - b0: original contents (to be saved)
1577 * On exit:
1578 * - executing on bank 1 registers
1579 * - psr.ic enabled, interrupts restored
1580 * - p10: TRUE if syscall is invoked with more than 8 out
1581 * registers or r15's Nat is true
1582 * - r1: kernel's gp
1583 * - r3: preserved (same as on entry)
1584 * - r8: -EINVAL if p10 is true
1585 * - r12: points to kernel stack
1586 * - r13: points to current task
1587 * - p15: TRUE if interrupts need to be re-enabled
1588 * - ar.fpsr: set to kernel settings
1589 */
1590 GLOBAL_ENTRY(ia64_syscall_setup)
1591 #ifndef XEN
1592 #if PT(B6) != 0
1593 # error This code assumes that b6 is the first field in pt_regs.
1594 #endif
1595 #endif
1596 st8 [r1]=r19 // save b6
1597 add r16=PT(CR_IPSR),r1 // initialize first base pointer
1598 add r17=PT(R11),r1 // initialize second base pointer
1599 ;;
1600 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
1601 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
1602 tnat.nz p8,p0=in0
1604 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
1605 tnat.nz p9,p0=in1
1606 (pKStk) mov r18=r0 // make sure r18 isn't NaT
1607 ;;
1609 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
1610 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
1611 mov r28=b0 // save b0 (2 cyc)
1612 ;;
1614 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
1615 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
1616 (p8) mov in0=-1
1617 ;;
1619 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
1620 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
1621 and r8=0x7f,r19 // A // get sof of ar.pfs
1623 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
1624 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
1625 (p9) mov in1=-1
1626 ;;
1628 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
1629 tnat.nz p10,p0=in2
1630 add r11=8,r11
1631 ;;
1632 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
1633 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
1634 tnat.nz p11,p0=in3
1635 ;;
1636 (p10) mov in2=-1
1637 tnat.nz p12,p0=in4 // [I0]
1638 (p11) mov in3=-1
1639 ;;
1640 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
1641 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
1642 shl r18=r18,16 // compute ar.rsc to be used
1643 // for "loadrs"
1644 ;;
1645 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
1646 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
1647 tnat.nz p13,p0=in5 // [I0]
1648 ;;
1649 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for
1650 // "loadrs"
1651 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
1652 (p12) mov in4=-1
1653 ;;
1655 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
1656 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1657 (p13) mov in5=-1
1658 ;;
1659 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1660 tnat.nz p14,p0=in6
1661 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
1662 ;;
1663 stf8 [r16]=f1 // ensure pt_regs.r8 != 0
1664 // (see handle_syscall_error)
1665 (p9) tnat.nz p10,p0=r15
1666 adds r12=-16,r1 // switch to kernel memory stack (with 16
1667 // bytes of scratch)
1669 st8.spill [r17]=r15 // save r15
1670 tnat.nz p8,p0=in7
1671 nop.i 0
1673 mov r13=r2 // establish `current'
1674 movl r1=__gp // establish kernel global pointer
1675 ;;
1676 (p14) mov in6=-1
1677 (p8) mov in7=-1
1678 nop.i 0
1680 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1681 movl r17=FPSR_DEFAULT
1682 ;;
1683 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1684 (p10) mov r8=-EINVAL
1685 br.ret.sptk.many b7
1686 END(ia64_syscall_setup)
1687 #endif /* XEN */
1689 .org ia64_ivt+0x3c00
1690 //////////////////////////////////////////////////////////////////////////
1691 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1692 DBG_FAULT(15)
1693 FAULT(15)
1695 #ifndef XEN
1696 /*
1697 * Squatting in this space ...
1699 * This special case dispatcher for illegal operation faults
1700 * allows preserved registers to be modified through a callback
1701 * function (asm only) that is handed back from the fault handler
1702 * in r8. Up to three arguments can be passed to the callback
1703 * function by returning an aggregate with the callback as its
1704 * first element, followed by the arguments.
1705 */
1706 ENTRY(dispatch_illegal_op_fault)
1707 SAVE_MIN_WITH_COVER
1708 ssm psr.ic | PSR_DEFAULT_BITS
1709 ;;
1710 srlz.i // guarantee that interruption collection is on
1711 ;;
1712 (p15) ssm psr.i // restore psr.i
1713 adds r3=8,r2 // set up second base pointer for SAVE_REST
1714 ;;
1715 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1716 mov out0=ar.ec
1717 ;;
1718 SAVE_REST
1719 ;;
1720 br.call.sptk.many rp=ia64_illegal_op_fault
1721 .ret0: ;;
1722 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1723 mov out0=r9
1724 mov out1=r10
1725 mov out2=r11
1726 movl r15=ia64_leave_kernel
1727 ;;
1728 mov rp=r15
1729 mov b6=r8
1730 ;;
1731 cmp.ne p6,p0=0,r8
1732 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1733 br.sptk.many ia64_leave_kernel
1734 END(dispatch_illegal_op_fault)
1735 #endif
1737 .org ia64_ivt+0x4000
1738 //////////////////////////////////////////////////////////////////////////
1739 // 0x4000 Entry 16 (size 64 bundles) Reserved
1740 DBG_FAULT(16)
1741 FAULT(16)
1743 #ifdef XEN
1744 // There is no particular reason for this code to be here, other
1745 // than that there happens to be space here that would go unused
1746 // otherwise. If this fault ever gets "unreserved", simply move
1747 // the following code to a more suitable spot...
1749 ENTRY(dispatch_privop_fault)
1750 SAVE_MIN_WITH_COVER
1751 ;;
1752 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in
1753 // insn group!)
1754 mov out0=cr.ifa
1755 adds out1=16,sp
1756 mov out2=cr.isr // FIXME: pity to make this slow access twice
1757 mov out3=cr.itir
1759 ssm psr.ic | PSR_DEFAULT_BITS
1760 ;;
1761 srlz.i // guarantee that interruption
1762 // collection is on
1763 ;;
1764 (p15) ssm psr.i // restore psr.i
1765 adds r3=8,r2 // set up second base pointer
1766 ;;
1767 SAVE_REST
1768 movl r14=ia64_leave_kernel
1769 ;;
1770 mov rp=r14
1771 // br.sptk.many ia64_prepare_handle_privop // TODO: why commented out?
1772 br.call.sptk.many b6=ia64_handle_privop
1773 END(dispatch_privop_fault)
1774 #endif
1777 .org ia64_ivt+0x4400
1778 //////////////////////////////////////////////////////////////////////////
1779 // 0x4400 Entry 17 (size 64 bundles) Reserved
1780 DBG_FAULT(17)
1781 FAULT(17)
1783 #ifndef XEN
1784 ENTRY(non_syscall)
1785 SAVE_MIN_WITH_COVER
1787 // There is no particular reason for this code to be here, other
1788 // than that there happens to be space here that would go unused
1789 // otherwise. If this fault ever gets "unreserved", simply move
1790 // the following code to a more suitable spot...
1792 alloc r14=ar.pfs,0,0,2,0
1793 mov out0=cr.iim
1794 add out1=16,sp
1795 adds r3=8,r2 // set up second base pointer for SAVE_REST
1797 ssm psr.ic | PSR_DEFAULT_BITS
1798 ;;
1799 srlz.i // guarantee that interruption collection is on
1800 ;;
1801 (p15) ssm psr.i // restore psr.i
1802 movl r15=ia64_leave_kernel
1803 ;;
1804 SAVE_REST
1805 mov rp=r15
1806 ;;
1807 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and
1808 // ignore return addr
1809 END(non_syscall)
1810 #endif
1812 .org ia64_ivt+0x4800
1813 //////////////////////////////////////////////////////////////////////////
1814 // 0x4800 Entry 18 (size 64 bundles) Reserved
1815 DBG_FAULT(18)
1816 FAULT(18)
1818 #ifndef XEN
1819 /*
1820 * There is no particular reason for this code to be here, other
1821 * than that there happens to be space here that would go unused
1822 * otherwise. If this fault ever gets "unreserved", simply move
1823 * the following code to a more suitable spot...
1824 */
1825 ENTRY(dispatch_unaligned_handler)
1826 SAVE_MIN_WITH_COVER
1827 ;;
1828 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in
1829 // insn group!)
1830 mov out0=cr.ifa
1831 adds out1=16,sp
1833 ssm psr.ic | PSR_DEFAULT_BITS
1834 ;;
1835 srlz.i // guarantee that interruption
1836 // collection is on
1837 ;;
1838 (p15) ssm psr.i // restore psr.i
1839 adds r3=8,r2 // set up second base pointer
1840 ;;
1841 SAVE_REST
1842 movl r14=ia64_leave_kernel
1843 ;;
1844 mov rp=r14
1845 // br.sptk.many ia64_prepare_handle_unaligned // TODO: why commented out?
1846 br.call.sptk.many b6=ia64_handle_unaligned
1847 END(dispatch_unaligned_handler)
1848 #endif
1850 .org ia64_ivt+0x4c00
1851 //////////////////////////////////////////////////////////////////////////
1852 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1853 DBG_FAULT(19)
1854 FAULT(19)
1856 /*
1857 * There is no particular reason for this code to be here, other
1858 * than that there happens to be space here that would go unused
1859 * otherwise. If this fault ever gets "unreserved", simply move
1860 * the following code to a more suitable spot...
1861 */
1863 GLOBAL_ENTRY(dispatch_to_fault_handler)
1864 /*
1865 * Input:
1866 * psr.ic: off
1867 * r19: fault vector number (e.g., 24 for General Exception)
1868 * r31: contains saved predicates (pr)
1869 */
1870 SAVE_MIN_WITH_COVER_R19
1871 alloc r14=ar.pfs,0,0,5,0
1872 mov out0=r15
1873 mov out1=cr.isr
1874 mov out2=cr.ifa
1875 mov out3=cr.iim
1876 mov out4=cr.itir
1877 ;;
1878 ssm psr.ic | PSR_DEFAULT_BITS
1879 ;;
1880 srlz.i // guarantee that interruption
1881 // collection is on
1882 ;;
1883 (p15) ssm psr.i // restore psr.i
1884 adds r3=8,r2 // set up second base pointer for
1885 // SAVE_REST
1886 ;;
1887 SAVE_REST
1888 movl r14=ia64_leave_kernel
1889 ;;
1890 mov rp=r14
1891 br.call.sptk.many b6=ia64_fault
1892 END(dispatch_to_fault_handler)
1894 //
1895 // --- End of long entries, Beginning of short entries
1896 //
1898 .org ia64_ivt+0x5000
1899 //////////////////////////////////////////////////////////////////////////
1900 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1901 ENTRY(page_not_present)
1902 DBG_FAULT(20)
1903 #ifdef XEN
1904 FAULT_OR_REFLECT(20)
1905 #else
1906 mov r16=cr.ifa
1907 rsm psr.dt
1908 /*
1909 * The Linux page fault handler doesn't expect non-present pages
1910 * to be in the TLB. Flush the existing entry now, so we meet
1911 * that expectation.
1912 */
1913 mov r17=PAGE_SHIFT<<2
1914 ;;
1915 ptc.l r16,r17
1916 ;;
1917 mov r31=pr
1918 srlz.d
1919 br.sptk.many page_fault
1920 #endif
1921 END(page_not_present)
1923 .org ia64_ivt+0x5100
1924 //////////////////////////////////////////////////////////////////////////
1925 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1926 ENTRY(key_permission)
1927 DBG_FAULT(21)
1928 #ifdef XEN
1929 FAULT_OR_REFLECT(21)
1930 #else
1931 mov r16=cr.ifa
1932 rsm psr.dt
1933 mov r31=pr
1934 ;;
1935 srlz.d
1936 br.sptk.many page_fault
1937 #endif
1938 END(key_permission)
1940 .org ia64_ivt+0x5200
1941 //////////////////////////////////////////////////////////////////////////
1942 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1943 ENTRY(iaccess_rights)
1944 DBG_FAULT(22)
1945 #ifdef XEN
1946 FAULT_OR_REFLECT(22)
1947 #else
1948 mov r16=cr.ifa
1949 rsm psr.dt
1950 mov r31=pr
1951 ;;
1952 srlz.d
1953 br.sptk.many page_fault
1954 #endif
1955 END(iaccess_rights)
1957 .org ia64_ivt+0x5300
1958 //////////////////////////////////////////////////////////////////////////
1959 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1960 ENTRY(daccess_rights)
1961 DBG_FAULT(23)
1962 #ifdef XEN
1963 mov r31=pr
1964 ;;
1965 mov r16=cr.isr
1966 mov r17=cr.ifa
1967 mov r19=23
1968 movl r20=0x5300
1969 br.sptk.many fast_access_reflect
1970 ;;
1971 #else
1972 mov r16=cr.ifa
1973 rsm psr.dt
1974 mov r31=pr
1975 ;;
1976 srlz.d
1977 br.sptk.many page_fault
1978 #endif
1979 END(daccess_rights)
1981 .org ia64_ivt+0x5400
1982 //////////////////////////////////////////////////////////////////////////
1983 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1984 ENTRY(general_exception)
1985 DBG_FAULT(24)
1986 mov r16=cr.isr
1987 mov r31=pr
1988 ;;
1989 #ifdef XEN
1990 cmp4.ge p6,p0=0x20,r16
1991 (p6) br.sptk.many dispatch_privop_fault
1992 ;;
1993 FAULT_OR_REFLECT(24)
1994 #else
1995 cmp4.eq p6,p0=0,r16
1996 (p6) br.sptk.many dispatch_illegal_op_fault
1997 #endif
1998 ;;
1999 mov r19=24 // fault number
2000 br.sptk.many dispatch_to_fault_handler
2001 END(general_exception)
2003 .org ia64_ivt+0x5500
2004 //////////////////////////////////////////////////////////////////////////
2005 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
2006 ENTRY(disabled_fp_reg)
2007 DBG_FAULT(25)
2008 #ifdef XEN
2009 #if 0 // TODO: can this be removed?
2010 mov r20=pr
2011 movl r16=0x2000000000000000
2012 movl r17=0x2000000000176b60
2013 mov r18=cr.iip
2014 mov r19=rr[r16]
2015 movl r22=0xe95d0439
2016 ;;
2017 mov pr=r0,-1
2018 ;;
2019 cmp.eq p6,p7=r22,r19
2020 ;;
2021 (p6) cmp.eq p8,p9=r17,r18
2022 (p8) br.sptk.few floating_panic
2023 ;;
2024 mov pr=r20,-1
2025 ;;
2026 #endif
2027 FAULT_OR_REFLECT(25)
2028 //floating_panic: // TODO: can this be removed?
2029 // br.sptk.many floating_panic
2030 ;;
2031 #endif
2032 rsm psr.dfh // ensure we can access fph
2033 ;;
2034 srlz.d
2035 mov r31=pr
2036 mov r19=25
2037 br.sptk.many dispatch_to_fault_handler
2038 END(disabled_fp_reg)
2040 .org ia64_ivt+0x5600
2041 //////////////////////////////////////////////////////////////////////////
2042 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
2043 ENTRY(nat_consumption)
2044 DBG_FAULT(26)
2045 #ifdef XEN
2046 FAULT_OR_REFLECT(26)
2047 #else
2048 FAULT(26)
2049 #endif
2050 END(nat_consumption)
2052 .org ia64_ivt+0x5700
2053 //////////////////////////////////////////////////////////////////////////
2054 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
2055 ENTRY(speculation_vector)
2056 DBG_FAULT(27)
2057 #ifdef XEN
2058 // this probably need not reflect...
2059 FAULT_OR_REFLECT(27)
2060 #else
2061 /*
2062 * A [f]chk.[as] instruction needs to take the branch to the
2063 * recovery code but this part of the architecture is not
2064 * implemented in hardware on some CPUs, such as Itanium. Thus,
2065 * in general we need to emulate the behavior. IIM contains the
2066 * relative target (not yet sign extended). So after sign extending
2067 * it we simply add it to IIP. We also need to reset the EI field
2068 * of the IPSR to zero, i.e., the slot to restart into.
2070 * cr.imm contains zero_ext(imm21)
2071 */
2072 mov r18=cr.iim
2073 ;;
2074 mov r17=cr.iip
2075 shl r18=r18,43 // put sign bit in position (43=64-21)
2076 ;;
2078 mov r16=cr.ipsr
2079 shr r18=r18,39 // sign extend (39=43-4)
2080 ;;
2082 add r17=r17,r18 // now add the offset
2083 ;;
2084 mov cr.iip=r17
2085 dep r16=0,r16,41,2 // clear EI
2086 ;;
2088 mov cr.ipsr=r16
2089 ;;
2091 rfi // and go back
2092 #endif
2093 END(speculation_vector)
2095 .org ia64_ivt+0x5800
2096 //////////////////////////////////////////////////////////////////////////
2097 // 0x5800 Entry 28 (size 16 bundles) Reserved
2098 DBG_FAULT(28)
2099 FAULT(28)
2101 .org ia64_ivt+0x5900
2102 //////////////////////////////////////////////////////////////////////////
2103 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
2104 ENTRY(debug_vector)
2105 DBG_FAULT(29)
2106 #ifdef XEN
2107 FAULT_OR_REFLECT(29)
2108 #else
2109 FAULT(29)
2110 #endif
2111 END(debug_vector)
2113 .org ia64_ivt+0x5a00
2114 //////////////////////////////////////////////////////////////////////////
2115 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
2116 ENTRY(unaligned_access)
2117 DBG_FAULT(30)
2118 #ifdef XEN
2119 FAULT_OR_REFLECT(30)
2120 #else
2121 mov r16=cr.ipsr
2122 mov r31=pr // prepare to save predicates
2123 ;;
2124 br.sptk.many dispatch_unaligned_handler
2125 #endif
2126 END(unaligned_access)
2128 .org ia64_ivt+0x5b00
2129 //////////////////////////////////////////////////////////////////////////
2130 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
2131 ENTRY(unsupported_data_reference)
2132 DBG_FAULT(31)
2133 #ifdef XEN
2134 FAULT_OR_REFLECT(31)
2135 #else
2136 FAULT(31)
2137 #endif
2138 END(unsupported_data_reference)
2140 .org ia64_ivt+0x5c00
2141 //////////////////////////////////////////////////////////////////////////
2142 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
2143 ENTRY(floating_point_fault)
2144 DBG_FAULT(32)
2145 #ifdef XEN
2146 FAULT_OR_REFLECT(32)
2147 #else
2148 FAULT(32)
2149 #endif
2150 END(floating_point_fault)
2152 .org ia64_ivt+0x5d00
2153 //////////////////////////////////////////////////////////////////////////
2154 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
2155 ENTRY(floating_point_trap)
2156 DBG_FAULT(33)
2157 #ifdef XEN
2158 FAULT_OR_REFLECT(33)
2159 #else
2160 FAULT(33)
2161 #endif
2162 END(floating_point_trap)
2164 .org ia64_ivt+0x5e00
2165 //////////////////////////////////////////////////////////////////////////
2166 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
2167 ENTRY(lower_privilege_trap)
2168 DBG_FAULT(34)
2169 #ifdef XEN
2170 FAULT_OR_REFLECT(34)
2171 #else
2172 FAULT(34)
2173 #endif
2174 END(lower_privilege_trap)
2176 .org ia64_ivt+0x5f00
2177 //////////////////////////////////////////////////////////////////////////
2178 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
2179 ENTRY(taken_branch_trap)
2180 DBG_FAULT(35)
2181 #ifdef XEN
2182 FAULT_OR_REFLECT(35)
2183 #else
2184 FAULT(35)
2185 #endif
2186 END(taken_branch_trap)
2188 .org ia64_ivt+0x6000
2189 //////////////////////////////////////////////////////////////////////////
2190 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
2191 ENTRY(single_step_trap)
2192 DBG_FAULT(36)
2193 #ifdef XEN
2194 FAULT_OR_REFLECT(36)
2195 #else
2196 FAULT(36)
2197 #endif
2198 END(single_step_trap)
2200 .org ia64_ivt+0x6100
2201 //////////////////////////////////////////////////////////////////////////
2202 // 0x6100 Entry 37 (size 16 bundles) Reserved
2203 DBG_FAULT(37)
2204 FAULT(37)
2206 .org ia64_ivt+0x6200
2207 //////////////////////////////////////////////////////////////////////////
2208 // 0x6200 Entry 38 (size 16 bundles) Reserved
2209 DBG_FAULT(38)
2210 FAULT(38)
2212 .org ia64_ivt+0x6300
2213 //////////////////////////////////////////////////////////////////////////
2214 // 0x6300 Entry 39 (size 16 bundles) Reserved
2215 DBG_FAULT(39)
2216 FAULT(39)
2218 .org ia64_ivt+0x6400
2219 //////////////////////////////////////////////////////////////////////////
2220 // 0x6400 Entry 40 (size 16 bundles) Reserved
2221 DBG_FAULT(40)
2222 FAULT(40)
2224 .org ia64_ivt+0x6500
2225 //////////////////////////////////////////////////////////////////////////
2226 // 0x6500 Entry 41 (size 16 bundles) Reserved
2227 DBG_FAULT(41)
2228 FAULT(41)
2230 .org ia64_ivt+0x6600
2231 //////////////////////////////////////////////////////////////////////////
2232 // 0x6600 Entry 42 (size 16 bundles) Reserved
2233 DBG_FAULT(42)
2234 FAULT(42)
2236 .org ia64_ivt+0x6700
2237 //////////////////////////////////////////////////////////////////////////
2238 // 0x6700 Entry 43 (size 16 bundles) Reserved
2239 DBG_FAULT(43)
2240 FAULT(43)
2242 .org ia64_ivt+0x6800
2243 //////////////////////////////////////////////////////////////////////////
2244 // 0x6800 Entry 44 (size 16 bundles) Reserved
2245 DBG_FAULT(44)
2246 FAULT(44)
2248 .org ia64_ivt+0x6900
2249 //////////////////////////////////////////////////////////////////////////
2250 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,
2251 // 44,58,60,61,62,72,
2252 // 73,75,76,77)
2253 ENTRY(ia32_exception)
2254 DBG_FAULT(45)
2255 #ifdef XEN
2256 FAULT_OR_REFLECT(45)
2257 #else
2258 FAULT(45)
2259 #endif
2260 END(ia32_exception)
2262 .org ia64_ivt+0x6a00
2263 //////////////////////////////////////////////////////////////////////////
2264 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
2265 ENTRY(ia32_intercept)
2266 DBG_FAULT(46)
2267 #ifdef XEN
2268 FAULT_OR_REFLECT(46)
2269 #else
2270 #ifdef CONFIG_IA32_SUPPORT
2271 mov r31=pr
2272 mov r16=cr.isr
2273 ;;
2274 extr.u r17=r16,16,8 // get ISR.code
2275 mov r18=ar.eflag
2276 mov r19=cr.iim // old eflag value
2277 ;;
2278 cmp.ne p6,p0=2,r17
2279 (p6) br.cond.spnt 1f // not a system flag fault
2280 xor r16=r18,r19
2281 ;;
2282 extr.u r17=r16,18,1 // get the eflags.ac bit
2283 ;;
2284 cmp.eq p6,p0=0,r17
2285 (p6) br.cond.spnt 1f // eflags.ac bit didn't change
2286 ;;
2287 mov pr=r31,-1 // restore predicate registers
2288 rfi
2290 1:
2291 #endif // CONFIG_IA32_SUPPORT
2292 FAULT(46)
2293 #endif
2294 END(ia32_intercept)
2296 .org ia64_ivt+0x6b00
2297 //////////////////////////////////////////////////////////////////////////
2298 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
2299 ENTRY(ia32_interrupt)
2300 DBG_FAULT(47)
2301 #ifdef XEN
2302 FAULT_OR_REFLECT(47)
2303 #else
2304 #ifdef CONFIG_IA32_SUPPORT
2305 mov r31=pr
2306 br.sptk.many dispatch_to_ia32_handler
2307 #else
2308 FAULT(47)
2309 #endif
2310 #endif
2311 END(ia32_interrupt)
2313 .org ia64_ivt+0x6c00
2314 //////////////////////////////////////////////////////////////////////////
2315 // 0x6c00 Entry 48 (size 16 bundles) Reserved
2316 DBG_FAULT(48)
2317 FAULT(48)
2319 .org ia64_ivt+0x6d00
2320 //////////////////////////////////////////////////////////////////////////
2321 // 0x6d00 Entry 49 (size 16 bundles) Reserved
2322 DBG_FAULT(49)
2323 FAULT(49)
2325 .org ia64_ivt+0x6e00
2326 //////////////////////////////////////////////////////////////////////////
2327 // 0x6e00 Entry 50 (size 16 bundles) Reserved
2328 DBG_FAULT(50)
2329 FAULT(50)
2331 .org ia64_ivt+0x6f00
2332 //////////////////////////////////////////////////////////////////////////
2333 // 0x6f00 Entry 51 (size 16 bundles) Reserved
2334 DBG_FAULT(51)
2335 FAULT(51)
2337 .org ia64_ivt+0x7000
2338 //////////////////////////////////////////////////////////////////////////
2339 // 0x7000 Entry 52 (size 16 bundles) Reserved
2340 DBG_FAULT(52)
2341 FAULT(52)
2343 .org ia64_ivt+0x7100
2344 //////////////////////////////////////////////////////////////////////////
2345 // 0x7100 Entry 53 (size 16 bundles) Reserved
2346 DBG_FAULT(53)
2347 FAULT(53)
2349 .org ia64_ivt+0x7200
2350 //////////////////////////////////////////////////////////////////////////
2351 // 0x7200 Entry 54 (size 16 bundles) Reserved
2352 DBG_FAULT(54)
2353 FAULT(54)
2355 .org ia64_ivt+0x7300
2356 //////////////////////////////////////////////////////////////////////////
2357 // 0x7300 Entry 55 (size 16 bundles) Reserved
2358 DBG_FAULT(55)
2359 FAULT(55)
2361 .org ia64_ivt+0x7400
2362 //////////////////////////////////////////////////////////////////////////
2363 // 0x7400 Entry 56 (size 16 bundles) Reserved
2364 DBG_FAULT(56)
2365 FAULT(56)
2367 .org ia64_ivt+0x7500
2368 //////////////////////////////////////////////////////////////////////////
2369 // 0x7500 Entry 57 (size 16 bundles) Reserved
2370 DBG_FAULT(57)
2371 FAULT(57)
2373 .org ia64_ivt+0x7600
2374 //////////////////////////////////////////////////////////////////////////
2375 // 0x7600 Entry 58 (size 16 bundles) Reserved
2376 DBG_FAULT(58)
2377 FAULT(58)
2379 .org ia64_ivt+0x7700
2380 //////////////////////////////////////////////////////////////////////////
2381 // 0x7700 Entry 59 (size 16 bundles) Reserved
2382 DBG_FAULT(59)
2383 FAULT(59)
2385 .org ia64_ivt+0x7800
2386 //////////////////////////////////////////////////////////////////////////
2387 // 0x7800 Entry 60 (size 16 bundles) Reserved
2388 DBG_FAULT(60)
2389 FAULT(60)
2391 .org ia64_ivt+0x7900
2392 //////////////////////////////////////////////////////////////////////////
2393 // 0x7900 Entry 61 (size 16 bundles) Reserved
2394 DBG_FAULT(61)
2395 FAULT(61)
2397 .org ia64_ivt+0x7a00
2398 //////////////////////////////////////////////////////////////////////////
2399 // 0x7a00 Entry 62 (size 16 bundles) Reserved
2400 DBG_FAULT(62)
2401 FAULT(62)
2403 .org ia64_ivt+0x7b00
2404 //////////////////////////////////////////////////////////////////////////
2405 // 0x7b00 Entry 63 (size 16 bundles) Reserved
2406 DBG_FAULT(63)
2407 FAULT(63)
2409 .org ia64_ivt+0x7c00
2410 //////////////////////////////////////////////////////////////////////////
2411 // 0x7c00 Entry 64 (size 16 bundles) Reserved
2412 DBG_FAULT(64)
2413 FAULT(64)
2415 .org ia64_ivt+0x7d00
2416 //////////////////////////////////////////////////////////////////////////
2417 // 0x7d00 Entry 65 (size 16 bundles) Reserved
2418 DBG_FAULT(65)
2419 FAULT(65)
2421 .org ia64_ivt+0x7e00
2422 //////////////////////////////////////////////////////////////////////////
2423 // 0x7e00 Entry 66 (size 16 bundles) Reserved
2424 DBG_FAULT(66)
2425 FAULT(66)
2427 .org ia64_ivt+0x7f00
2428 //////////////////////////////////////////////////////////////////////////
2429 // 0x7f00 Entry 67 (size 16 bundles) Reserved
2430 DBG_FAULT(67)
2431 FAULT(67)
2433 #ifdef XEN
2434 .org ia64_ivt+0x8000
2435 GLOBAL_ENTRY(dispatch_reflection)
2436 /*
2437 * Input:
2438 * psr.ic: off
2439 * r19: intr type (offset into ivt, see ia64_int.h)
2440 * r31: contains saved predicates (pr)
2441 */
2442 SAVE_MIN_WITH_COVER_R19
2443 alloc r14=ar.pfs,0,0,5,0
2444 mov out4=r15
2445 mov out0=cr.ifa
2446 adds out1=16,sp
2447 mov out2=cr.isr
2448 mov out3=cr.iim
2449 // mov out3=cr.itir // TODO: why commented out?
2451 ssm psr.ic | PSR_DEFAULT_BITS
2452 ;;
2453 srlz.i // guarantee that interruption
2454 // collection is on
2455 ;;
2456 (p15) ssm psr.i // restore psr.i
2457 adds r3=8,r2 // set up second base pointer
2458 ;;
2459 SAVE_REST
2460 movl r14=ia64_leave_kernel
2461 ;;
2462 mov rp=r14
2463 // br.sptk.many ia64_prepare_handle_reflection // TODO: why commented out?
2464 br.call.sptk.many b6=ia64_handle_reflection
2465 END(dispatch_reflection)
2467 #define SAVE_MIN_COVER_DONE DO_SAVE_MIN(,mov r30=cr.ifs,)
2469 // same as dispatch_break_fault except cover has already been done
2470 GLOBAL_ENTRY(dispatch_slow_hyperprivop)
2471 SAVE_MIN_COVER_DONE
2472 ;;
2473 br.sptk.many dispatch_break_fault_post_save
2474 END(dispatch_slow_hyperprivop)
2475 #endif
2477 #ifdef CONFIG_IA32_SUPPORT
2479 /*
2480 * There is no particular reason for this code to be here, other
2481 * than that there happens to be space here that would go unused
2482 * otherwise. If this fault ever gets "unreserved", simply move
2483 * the following code to a more suitable spot...
2484 */
2486 // IA32 interrupt entry point
2488 ENTRY(dispatch_to_ia32_handler)
2489 SAVE_MIN
2490 ;;
2491 mov r14=cr.isr
2492 ssm psr.ic | PSR_DEFAULT_BITS
2493 ;;
2494 srlz.i // guarantee that interruption collection is on
2495 ;;
2496 (p15) ssm psr.i
2497 adds r3=8,r2 // Base pointer for SAVE_REST
2498 ;;
2499 SAVE_REST
2500 ;;
2501 mov r15=0x80
2502 shr r14=r14,16 // Get interrupt number
2503 ;;
2504 cmp.ne p6,p0=r14,r15
2505 (p6) br.call.dpnt.many b6=non_ia32_syscall
2507 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW
2508 // conventions
2509 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
2510 ;;
2511 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
2512 ld8 r8=[r14] // get r8
2513 ;;
2514 st8 [r15]=r8 // save original EAX in r1 (IA32 procs
2515 // don't use the GP)
2516 ;;
2517 alloc r15=ar.pfs,0,0,6,0 // must be first in an insn group
2518 ;;
2519 ld4 r8=[r14],8 // r8 == eax (syscall number)
2520 mov r15=IA32_NR_syscalls
2521 ;;
2522 cmp.ltu.unc p6,p7=r8,r15
2523 ld4 out1=[r14],8 // r9 == ecx
2524 ;;
2525 ld4 out2=[r14],8 // r10 == edx
2526 ;;
2527 ld4 out0=[r14] // r11 == ebx
2528 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
2529 ;;
2530 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
2531 ;;
2532 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
2533 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
2534 ;;
2535 ld4 out4=[r14] // r15 == edi
2536 movl r16=ia32_syscall_table
2537 ;;
2538 (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
2539 ld4 r2=[r2] // r2 = current_thread_info()->flags
2540 ;;
2541 ld8 r16=[r16]
2542 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
2543 ;;
2544 mov b6=r16
2545 movl r15=ia32_ret_from_syscall
2546 cmp.eq p8,p0=r2,r0
2547 ;;
2548 mov rp=r15
2549 (p8) br.call.sptk.many b6=b6
2550 br.cond.sptk ia32_trace_syscall
2552 non_ia32_syscall:
2553 alloc r15=ar.pfs,0,0,2,0
2554 mov out0=r14 // interrupt #
2555 add out1=16,sp // pointer to pt_regs
2556 ;; // avoid WAW on CFM
2557 br.call.sptk.many rp=ia32_bad_interrupt
2558 .ret1: movl r15=ia64_leave_kernel
2559 ;;
2560 mov rp=r15
2561 br.ret.sptk.many rp
2562 END(dispatch_to_ia32_handler)
2564 #endif /* CONFIG_IA32_SUPPORT */