ia64/linux-2.6.18-xen.hg

view arch/ia64/xen/xenivt.S @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents d5c2e97b87ac
children
line source
1 /*
2 * arch/ia64/xen/ivt.S
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
7 /*
8 * This file defines the interruption vector table used by the CPU.
9 * It does not include one entry per possible cause of interruption.
10 *
11 * The first 20 entries of the table contain 64 bundles each while the
12 * remaining 48 entries contain only 16 bundles each.
13 *
14 * The 64 bundles are used to allow inlining the whole handler for critical
15 * interruptions like TLB misses.
16 *
17 * For each entry, the comment is as follows:
18 *
19 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
20 * entry offset ----/ / / / /
21 * entry number ---------/ / / /
22 * size of the entry -------------/ / /
23 * vector name -------------------------------------/ /
24 * interruptions triggering this vector ----------------------/
25 *
26 * The table is 32KB in size and must be aligned on 32KB boundary.
27 * (The CPU ignores the 15 lower bits of the address)
28 *
29 * Table is based upon EAS2.6 (Oct 1999)
30 */
32 #include <asm/asmmacro.h>
33 #include <asm/break.h>
34 #include <asm/ia32.h>
35 #include <asm/kregs.h>
36 #include <asm/asm-offsets.h>
37 #include <asm/pgtable.h>
38 #include <asm/processor.h>
39 #include <asm/ptrace.h>
40 #include <asm/system.h>
41 #include <asm/thread_info.h>
42 #include <asm/unistd.h>
43 #include <asm/errno.h>
45 #ifdef CONFIG_XEN
46 #define ia64_ivt xen_ivt
47 #endif
49 #if 1
50 # define PSR_DEFAULT_BITS psr.ac
51 #else
52 # define PSR_DEFAULT_BITS 0
53 #endif
55 #if 0
56 /*
57 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
58 * needed for something else before enabling this...
59 */
60 # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
61 #else
62 # define DBG_FAULT(i)
63 #endif
65 #define MINSTATE_VIRT /* needed by minstate.h */
66 #include "xenminstate.h"
68 #define FAULT(n) \
69 mov r31=pr; \
70 mov r19=n;; /* prepare to save predicates */ \
71 br.sptk.many dispatch_to_fault_handler
73 .section .text.ivt,"ax"
75 .align 32768 // align on 32KB boundary
76 .global ia64_ivt
77 ia64_ivt:
78 /////////////////////////////////////////////////////////////////////////////////////////
79 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
80 ENTRY(vhpt_miss)
81 DBG_FAULT(0)
82 /*
83 * The VHPT vector is invoked when the TLB entry for the virtual page table
84 * is missing. This happens only as a result of a previous
85 * (the "original") TLB miss, which may either be caused by an instruction
86 * fetch or a data access (or non-access).
87 *
88 * What we do here is normal TLB miss handing for the _original_ miss,
89 * followed by inserting the TLB entry for the virtual page table page
90 * that the VHPT walker was attempting to access. The latter gets
91 * inserted as long as page table entry above pte level have valid
92 * mappings for the faulting address. The TLB entry for the original
93 * miss gets inserted only if the pte entry indicates that the page is
94 * present.
95 *
96 * do_page_fault gets invoked in the following cases:
97 * - the faulting virtual address uses unimplemented address bits
98 * - the faulting virtual address has no valid page table mapping
99 */
100 #ifdef CONFIG_XEN
101 movl r16=XSI_IFA
102 ;;
103 ld8 r16=[r16]
104 #ifdef CONFIG_HUGETLB_PAGE
105 movl r18=PAGE_SHIFT
106 movl r25=XSI_ITIR
107 ;;
108 ld8 r25=[r25]
109 #endif
110 ;;
111 #else
112 mov r16=cr.ifa // get address that caused the TLB miss
113 #ifdef CONFIG_HUGETLB_PAGE
114 movl r18=PAGE_SHIFT
115 mov r25=cr.itir
116 #endif
117 #endif
118 ;;
119 #ifdef CONFIG_XEN
120 XEN_HYPER_RSM_PSR_DT;
121 #else
122 rsm psr.dt // use physical addressing for data
123 #endif
124 mov r31=pr // save the predicate registers
125 mov r19=IA64_KR(PT_BASE) // get page table base address
126 shl r21=r16,3 // shift bit 60 into sign bit
127 shr.u r17=r16,61 // get the region number into r17
128 ;;
129 shr.u r22=r21,3
130 #ifdef CONFIG_HUGETLB_PAGE
131 extr.u r26=r25,2,6
132 ;;
133 cmp.ne p8,p0=r18,r26
134 sub r27=r26,r18
135 ;;
136 (p8) dep r25=r18,r25,2,6
137 (p8) shr r22=r22,r27
138 #endif
139 ;;
140 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
141 shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit
142 ;;
143 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
145 srlz.d
146 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
148 .pred.rel "mutex", p6, p7
149 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
150 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
151 ;;
152 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
153 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
154 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
155 #ifdef CONFIG_PGTABLE_4
156 shr.u r28=r22,PUD_SHIFT // shift pud index into position
157 #else
158 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
159 #endif
160 ;;
161 ld8 r17=[r17] // get *pgd (may be 0)
162 ;;
163 (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
164 #ifdef CONFIG_PGTABLE_4
165 dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
166 ;;
167 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
168 (p7) ld8 r29=[r28] // get *pud (may be 0)
169 ;;
170 (p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL?
171 dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
172 #else
173 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr)
174 #endif
175 ;;
176 (p7) ld8 r20=[r17] // get *pmd (may be 0)
177 shr.u r19=r22,PAGE_SHIFT // shift pte index into position
178 ;;
179 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL?
180 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
181 ;;
182 (p7) ld8 r18=[r21] // read *pte
183 #ifdef CONFIG_XEN
184 movl r19=XSI_ISR
185 ;;
186 ld8 r19=[r19]
187 #else
188 mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss
189 #endif
190 ;;
191 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
192 #ifdef CONFIG_XEN
193 movl r22=XSI_IHA
194 ;;
195 ld8 r22=[r22]
196 #else
197 mov r22=cr.iha // get the VHPT address that caused the TLB miss
198 #endif
199 ;; // avoid RAW on p7
200 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
201 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
202 ;;
203 #ifdef CONFIG_XEN
204 mov r24=r8
205 mov r8=r18
206 ;;
207 (p10) XEN_HYPER_ITC_I
208 ;;
209 (p11) XEN_HYPER_ITC_D
210 ;;
211 mov r8=r24
212 ;;
213 #else
214 (p10) itc.i r18 // insert the instruction TLB entry
215 (p11) itc.d r18 // insert the data TLB entry
216 #endif
217 (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
218 #ifdef CONFIG_XEN
219 movl r24=XSI_IFA
220 ;;
221 st8 [r24]=r22
222 ;;
223 #else
224 mov cr.ifa=r22
225 #endif
227 #ifdef CONFIG_HUGETLB_PAGE
228 (p8) mov cr.itir=r25 // change to default page-size for VHPT
229 #endif
231 /*
232 * Now compute and insert the TLB entry for the virtual page table. We never
233 * execute in a page table page so there is no need to set the exception deferral
234 * bit.
235 */
236 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
237 ;;
238 #ifdef CONFIG_XEN
239 (p7) mov r25=r8
240 (p7) mov r8=r24
241 ;;
242 (p7) XEN_HYPER_ITC_D
243 ;;
244 (p7) mov r8=r25
245 ;;
246 #else
247 (p7) itc.d r24
248 #endif
249 ;;
250 #ifdef CONFIG_SMP
251 /*
252 * Tell the assemblers dependency-violation checker that the above "itc" instructions
253 * cannot possibly affect the following loads:
254 */
255 dv_serialize_data
257 /*
258 * Re-check pagetable entry. If they changed, we may have received a ptc.g
259 * between reading the pagetable and the "itc". If so, flush the entry we
260 * inserted and retry. At this point, we have:
261 *
262 * r28 = equivalent of pud_offset(pgd, ifa)
263 * r17 = equivalent of pmd_offset(pud, ifa)
264 * r21 = equivalent of pte_offset(pmd, ifa)
265 *
266 * r29 = *pud
267 * r20 = *pmd
268 * r18 = *pte
269 */
270 ld8 r25=[r21] // read *pte again
271 ld8 r26=[r17] // read *pmd again
272 #ifdef CONFIG_PGTABLE_4
273 ld8 r19=[r28] // read *pud again
274 #endif
275 cmp.ne p6,p7=r0,r0
276 ;;
277 cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
278 #ifdef CONFIG_PGTABLE_4
279 cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
280 #endif
281 mov r27=PAGE_SHIFT<<2
282 ;;
283 (p6) ptc.l r22,r27 // purge PTE page translation
284 (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change
285 ;;
286 (p6) ptc.l r16,r27 // purge translation
287 #endif
289 mov pr=r31,-1 // restore predicate registers
290 #ifdef CONFIG_XEN
291 XEN_HYPER_RFI
292 dv_serialize_data
293 #else
294 rfi
295 #endif
296 END(vhpt_miss)
298 .org ia64_ivt+0x400
299 /////////////////////////////////////////////////////////////////////////////////////////
300 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
301 ENTRY(itlb_miss)
302 DBG_FAULT(1)
303 /*
304 * The ITLB handler accesses the PTE via the virtually mapped linear
305 * page table. If a nested TLB miss occurs, we switch into physical
306 * mode, walk the page table, and then re-execute the PTE read and
307 * go on normally after that.
308 */
309 #ifdef CONFIG_XEN
310 movl r16=XSI_IFA
311 ;;
312 ld8 r16=[r16]
313 #else
314 mov r16=cr.ifa // get virtual address
315 #endif
316 mov r29=b0 // save b0
317 mov r31=pr // save predicates
318 .itlb_fault:
319 #ifdef CONFIG_XEN
320 movl r17=XSI_IHA
321 ;;
322 ld8 r17=[r17] // get virtual address of L3 PTE
323 #else
324 mov r17=cr.iha // get virtual address of PTE
325 #endif
326 movl r30=1f // load nested fault continuation point
327 ;;
328 1: ld8 r18=[r17] // read *pte
329 ;;
330 mov b0=r29
331 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
332 (p6) br.cond.spnt page_fault
333 ;;
334 #ifdef CONFIG_XEN
335 mov r19=r8
336 mov r8=r18
337 ;;
338 XEN_HYPER_ITC_I
339 ;;
340 mov r8=r19
341 #else
342 itc.i r18
343 #endif
344 ;;
345 #ifdef CONFIG_SMP
346 /*
347 * Tell the assemblers dependency-violation checker that the above "itc" instructions
348 * cannot possibly affect the following loads:
349 */
350 dv_serialize_data
352 ld8 r19=[r17] // read *pte again and see if same
353 mov r20=PAGE_SHIFT<<2 // setup page size for purge
354 ;;
355 cmp.ne p7,p0=r18,r19
356 ;;
357 (p7) ptc.l r16,r20
358 #endif
359 mov pr=r31,-1
360 #ifdef CONFIG_XEN
361 XEN_HYPER_RFI
362 dv_serialize_data
363 #else
364 rfi
365 #endif
366 END(itlb_miss)
368 .org ia64_ivt+0x0800
369 /////////////////////////////////////////////////////////////////////////////////////////
370 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
371 ENTRY(dtlb_miss)
372 DBG_FAULT(2)
373 /*
374 * The DTLB handler accesses the PTE via the virtually mapped linear
375 * page table. If a nested TLB miss occurs, we switch into physical
376 * mode, walk the page table, and then re-execute the PTE read and
377 * go on normally after that.
378 */
379 #ifdef CONFIG_XEN
380 movl r16=XSI_IFA
381 ;;
382 ld8 r16=[r16]
383 #else
384 mov r16=cr.ifa // get virtual address
385 #endif
386 mov r29=b0 // save b0
387 mov r31=pr // save predicates
388 dtlb_fault:
389 #ifdef CONFIG_XEN
390 movl r17=XSI_IHA
391 ;;
392 ld8 r17=[r17] // get virtual address of L3 PTE
393 #else
394 mov r17=cr.iha // get virtual address of PTE
395 #endif
396 movl r30=1f // load nested fault continuation point
397 ;;
398 1: ld8 r18=[r17] // read *pte
399 ;;
400 mov b0=r29
401 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
402 (p6) br.cond.spnt page_fault
403 ;;
404 #ifdef CONFIG_XEN
405 mov r19=r8
406 mov r8=r18
407 ;;
408 XEN_HYPER_ITC_D
409 ;;
410 mov r8=r19
411 ;;
412 #else
413 itc.d r18
414 #endif
415 ;;
416 #ifdef CONFIG_SMP
417 /*
418 * Tell the assemblers dependency-violation checker that the above "itc" instructions
419 * cannot possibly affect the following loads:
420 */
421 dv_serialize_data
423 ld8 r19=[r17] // read *pte again and see if same
424 mov r20=PAGE_SHIFT<<2 // setup page size for purge
425 ;;
426 cmp.ne p7,p0=r18,r19
427 ;;
428 (p7) ptc.l r16,r20
429 #endif
430 mov pr=r31,-1
431 #ifdef CONFIG_XEN
432 XEN_HYPER_RFI
433 dv_serialize_data
434 #else
435 rfi
436 #endif
437 END(dtlb_miss)
439 .org ia64_ivt+0x0c00
440 /////////////////////////////////////////////////////////////////////////////////////////
441 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
442 ENTRY(alt_itlb_miss)
443 DBG_FAULT(3)
444 #ifdef CONFIG_XEN
445 movl r31=XSI_IPSR
446 ;;
447 ld8 r21=[r31],XSI_IFA_OFS-XSI_IPSR_OFS // get ipsr, point to ifa
448 movl r17=PAGE_KERNEL
449 ;;
450 ld8 r16=[r31] // get ifa
451 #else
452 mov r16=cr.ifa // get address that caused the TLB miss
453 movl r17=PAGE_KERNEL
454 mov r21=cr.ipsr
455 #endif
456 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
457 mov r31=pr
458 ;;
459 #ifdef CONFIG_DISABLE_VHPT
460 shr.u r22=r16,61 // get the region number into r21
461 ;;
462 cmp.gt p8,p0=6,r22 // user mode
463 ;;
464 #ifndef CONFIG_XEN
465 (p8) thash r17=r16
466 ;;
467 (p8) mov cr.iha=r17
468 #endif
469 (p8) mov r29=b0 // save b0
470 (p8) br.cond.dptk .itlb_fault
471 #endif
472 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
473 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
474 shr.u r18=r16,57 // move address bit 61 to bit 4
475 ;;
476 andcm r18=0x10,r18 // bit 4=~address-bit(61)
477 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
478 or r19=r17,r19 // insert PTE control bits into r19
479 ;;
480 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
481 (p8) br.cond.spnt page_fault
482 ;;
483 #ifdef CONFIG_XEN
484 mov r18=r8
485 mov r8=r19
486 ;;
487 XEN_HYPER_ITC_I
488 ;;
489 mov r8=r18
490 ;;
491 mov pr=r31,-1
492 ;;
493 XEN_HYPER_RFI;
494 #else
495 itc.i r19 // insert the TLB entry
496 mov pr=r31,-1
497 rfi
498 #endif
499 END(alt_itlb_miss)
501 .org ia64_ivt+0x1000
502 /////////////////////////////////////////////////////////////////////////////////////////
503 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
504 ENTRY(alt_dtlb_miss)
505 DBG_FAULT(4)
506 #ifdef CONFIG_XEN
507 movl r31=XSI_IPSR
508 ;;
509 ld8 r21=[r31],XSI_ISR_OFS-XSI_IPSR_OFS // get ipsr, point to isr
510 movl r17=PAGE_KERNEL
511 ;;
512 ld8 r20=[r31],XSI_IFA_OFS-XSI_ISR_OFS // get isr, point to ifa
513 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
514 ;;
515 ld8 r16=[r31] // get ifa
516 #else
517 mov r16=cr.ifa // get address that caused the TLB miss
518 movl r17=PAGE_KERNEL
519 mov r20=cr.isr
520 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
521 mov r21=cr.ipsr
522 #endif
523 mov r31=pr
524 ;;
525 #ifdef CONFIG_DISABLE_VHPT
526 shr.u r22=r16,61 // get the region number into r21
527 ;;
528 cmp.gt p8,p0=6,r22 // access to region 0-5
529 ;;
530 #ifndef CONFIG_XEN
531 (p8) thash r17=r16
532 ;;
533 (p8) mov cr.iha=r17
534 #endif
535 (p8) mov r29=b0 // save b0
536 (p8) br.cond.dptk dtlb_fault
537 #endif
538 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
539 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
540 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
541 shr.u r18=r16,57 // move address bit 61 to bit 4
542 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
543 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
544 ;;
545 andcm r18=0x10,r18 // bit 4=~address-bit(61)
546 cmp.ne p8,p0=r0,r23
547 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
548 (p8) br.cond.spnt page_fault
550 dep r21=-1,r21,IA64_PSR_ED_BIT,1
551 or r19=r19,r17 // insert PTE control bits into r19
552 ;;
553 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
554 (p6) mov cr.ipsr=r21
555 ;;
556 #ifdef CONFIG_XEN
557 (p7) mov r18=r8
558 (p7) mov r8=r19
559 ;;
560 (p7) XEN_HYPER_ITC_D
561 ;;
562 (p7) mov r8=r18
563 ;;
564 mov pr=r31,-1
565 ;;
566 XEN_HYPER_RFI;
567 #else
568 (p7) itc.d r19 // insert the TLB entry
569 mov pr=r31,-1
570 rfi
571 #endif
572 END(alt_dtlb_miss)
574 .org ia64_ivt+0x1400
575 /////////////////////////////////////////////////////////////////////////////////////////
576 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
577 ENTRY(nested_dtlb_miss)
578 /*
579 * In the absence of kernel bugs, we get here when the virtually mapped linear
580 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
581 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
582 * table is missing, a nested TLB miss fault is triggered and control is
583 * transferred to this point. When this happens, we lookup the pte for the
584 * faulting address by walking the page table in physical mode and return to the
585 * continuation point passed in register r30 (or call page_fault if the address is
586 * not mapped).
587 *
588 * Input: r16: faulting address
589 * r29: saved b0
590 * r30: continuation address
591 * r31: saved pr
592 *
593 * Output: r17: physical address of PTE of faulting address
594 * r29: saved b0
595 * r30: continuation address
596 * r31: saved pr
597 *
598 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
599 */
600 #ifdef CONFIG_XEN
601 XEN_HYPER_RSM_PSR_DT;
602 #else
603 rsm psr.dt // switch to using physical data addressing
604 #endif
605 mov r19=IA64_KR(PT_BASE) // get the page table base address
606 shl r21=r16,3 // shift bit 60 into sign bit
607 #ifdef CONFIG_XEN
608 movl r18=XSI_ITIR
609 ;;
610 ld8 r18=[r18]
611 #else
612 mov r18=cr.itir
613 #endif
614 ;;
615 shr.u r17=r16,61 // get the region number into r17
616 extr.u r18=r18,2,6 // get the faulting page size
617 ;;
618 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
619 add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address
620 add r18=PGDIR_SHIFT-PAGE_SHIFT,r18
621 ;;
622 shr.u r22=r16,r22
623 shr.u r18=r16,r18
624 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
626 srlz.d
627 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
629 .pred.rel "mutex", p6, p7
630 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
631 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
632 ;;
633 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
634 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
635 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
636 #ifdef CONFIG_PGTABLE_4
637 shr.u r18=r22,PUD_SHIFT // shift pud index into position
638 #else
639 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
640 #endif
641 ;;
642 ld8 r17=[r17] // get *pgd (may be 0)
643 ;;
644 (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
645 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
646 ;;
647 #ifdef CONFIG_PGTABLE_4
648 (p7) ld8 r17=[r17] // get *pud (may be 0)
649 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
650 ;;
651 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL?
652 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
653 ;;
654 #endif
655 (p7) ld8 r17=[r17] // get *pmd (may be 0)
656 shr.u r19=r22,PAGE_SHIFT // shift pte index into position
657 ;;
658 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL?
659 dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr);
660 (p6) br.cond.spnt page_fault
661 mov b0=r30
662 br.sptk.many b0 // return to continuation point
663 END(nested_dtlb_miss)
665 .org ia64_ivt+0x1800
666 /////////////////////////////////////////////////////////////////////////////////////////
667 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
668 ENTRY(ikey_miss)
669 DBG_FAULT(6)
670 FAULT(6)
671 END(ikey_miss)
673 //-----------------------------------------------------------------------------------
674 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
675 ENTRY(page_fault)
676 #ifdef CONFIG_XEN
677 XEN_HYPER_SSM_PSR_DT
678 #else
679 ssm psr.dt
680 ;;
681 srlz.i
682 #endif
683 ;;
684 SAVE_MIN_WITH_COVER
685 alloc r15=ar.pfs,0,0,3,0
686 #ifdef CONFIG_XEN
687 movl r3=XSI_ISR
688 ;;
689 ld8 out1=[r3],XSI_IFA_OFS-XSI_ISR_OFS // get vcr.isr, point to ifa
690 ;;
691 ld8 out0=[r3] // get vcr.ifa
692 mov r14=1
693 ;;
694 add r3=XSI_PSR_IC_OFS-XSI_IFA_OFS, r3 // point to vpsr.ic
695 ;;
696 st4 [r3]=r14 // vpsr.ic = 1
697 adds r3=8,r2 // set up second base pointer
698 ;;
699 sum PSR_DEFAULT_BITS
700 #else
701 mov out0=cr.ifa
702 mov out1=cr.isr
703 adds r3=8,r2 // set up second base pointer
704 ;;
705 ssm psr.ic | PSR_DEFAULT_BITS
706 #endif
707 ;;
708 srlz.i // guarantee that interruption collectin is on
709 ;;
710 #ifdef CONFIG_XEN
712 #define MASK_TO_PEND_OFS (-1)
714 (p15) movl r14=XSI_PSR_I_ADDR
715 ;;
716 (p15) ld8 r14=[r14]
717 ;;
718 (p15) st1 [r14]=r0,MASK_TO_PEND_OFS // if (p15) vpsr.i = 1
719 ;; // if (p15) (vcpu->vcpu_info->evtchn_upcall_mask)=0
720 (p15) ld1 r14=[r14] // if (vcpu->vcpu_info->evtchn_upcall_pending)
721 ;;
722 (p15) cmp.ne p15,p0=r14,r0
723 ;;
724 (p15) XEN_HYPER_SSM_I
725 #else
726 (p15) ssm psr.i // restore psr.i
727 #endif
728 movl r14=ia64_leave_kernel
729 ;;
730 SAVE_REST
731 mov rp=r14
732 ;;
733 adds out2=16,r12 // out2 = pointer to pt_regs
734 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
735 END(page_fault)
737 .org ia64_ivt+0x1c00
738 /////////////////////////////////////////////////////////////////////////////////////////
739 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
740 ENTRY(dkey_miss)
741 DBG_FAULT(7)
742 FAULT(7)
743 END(dkey_miss)
745 .org ia64_ivt+0x2000
746 /////////////////////////////////////////////////////////////////////////////////////////
747 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
748 ENTRY(dirty_bit)
749 DBG_FAULT(8)
750 /*
751 * What we do here is to simply turn on the dirty bit in the PTE. We need to
752 * update both the page-table and the TLB entry. To efficiently access the PTE,
753 * we address it through the virtual page table. Most likely, the TLB entry for
754 * the relevant virtual page table page is still present in the TLB so we can
755 * normally do this without additional TLB misses. In case the necessary virtual
756 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
757 * up the physical address of the L3 PTE and then continue at label 1 below.
758 */
759 #ifdef CONFIG_XEN
760 movl r16=XSI_IFA
761 ;;
762 ld8 r16=[r16]
763 ;;
764 #else
765 mov r16=cr.ifa // get the address that caused the fault
766 #endif
767 movl r30=1f // load continuation point in case of nested fault
768 ;;
769 #ifdef CONFIG_XEN
770 mov r18=r8;
771 mov r8=r16;
772 XEN_HYPER_THASH;;
773 mov r17=r8;
774 mov r8=r18;;
775 #else
776 thash r17=r16 // compute virtual address of L3 PTE
777 #endif
778 mov r29=b0 // save b0 in case of nested fault
779 mov r31=pr // save pr
780 #ifdef CONFIG_SMP
781 mov r28=ar.ccv // save ar.ccv
782 ;;
783 1: ld8 r18=[r17]
784 ;; // avoid RAW on r18
785 mov ar.ccv=r18 // set compare value for cmpxchg
786 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
787 tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
788 ;;
789 (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present
790 mov r24=PAGE_SHIFT<<2
791 ;;
792 (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
793 ;;
794 #ifdef CONFIG_XEN
795 (p6) mov r18=r8
796 (p6) mov r8=r25
797 ;;
798 (p6) XEN_HYPER_ITC_D
799 ;;
800 (p6) mov r8=r18
801 #else
802 (p6) itc.d r25 // install updated PTE
803 #endif
804 ;;
805 /*
806 * Tell the assemblers dependency-violation checker that the above "itc" instructions
807 * cannot possibly affect the following loads:
808 */
809 dv_serialize_data
811 ld8 r18=[r17] // read PTE again
812 ;;
813 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
814 ;;
815 (p7) ptc.l r16,r24
816 mov b0=r29 // restore b0
817 mov ar.ccv=r28
818 #else
819 ;;
820 1: ld8 r18=[r17]
821 ;; // avoid RAW on r18
822 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
823 mov b0=r29 // restore b0
824 ;;
825 st8 [r17]=r18 // store back updated PTE
826 itc.d r18 // install updated PTE
827 #endif
828 mov pr=r31,-1 // restore pr
829 #ifdef CONFIG_XEN
830 XEN_HYPER_RFI
831 dv_serialize_data
832 #else
833 rfi
834 #endif
835 END(dirty_bit)
837 .org ia64_ivt+0x2400
838 /////////////////////////////////////////////////////////////////////////////////////////
839 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
840 ENTRY(iaccess_bit)
841 DBG_FAULT(9)
842 // Like Entry 8, except for instruction access
843 #ifdef CONFIG_XEN
844 movl r16=XSI_IFA
845 ;;
846 ld8 r16=[r16]
847 ;;
848 #else
849 mov r16=cr.ifa // get the address that caused the fault
850 #endif
851 movl r30=1f // load continuation point in case of nested fault
852 mov r31=pr // save predicates
853 #ifdef CONFIG_ITANIUM
854 /*
855 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
856 */
857 mov r17=cr.ipsr
858 ;;
859 mov r18=cr.iip
860 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
861 ;;
862 (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
863 #endif /* CONFIG_ITANIUM */
864 ;;
865 #ifdef CONFIG_XEN
866 mov r18=r8;
867 mov r8=r16;
868 XEN_HYPER_THASH;;
869 mov r17=r8;
870 mov r8=r18;;
871 #else
872 thash r17=r16 // compute virtual address of L3 PTE
873 #endif
874 mov r29=b0 // save b0 in case of nested fault)
875 #ifdef CONFIG_SMP
876 mov r28=ar.ccv // save ar.ccv
877 ;;
878 1: ld8 r18=[r17]
879 ;;
880 mov ar.ccv=r18 // set compare value for cmpxchg
881 or r25=_PAGE_A,r18 // set the accessed bit
882 tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
883 ;;
884 (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present
885 mov r24=PAGE_SHIFT<<2
886 ;;
887 (p6) cmp.eq p6,p7=r26,r18 // Only if page present
888 ;;
889 #ifdef CONFIG_XEN
890 mov r26=r8
891 mov r8=r25
892 ;;
893 (p6) XEN_HYPER_ITC_I
894 ;;
895 mov r8=r26
896 ;;
897 #else
898 (p6) itc.i r25 // install updated PTE
899 #endif
900 ;;
901 /*
902 * Tell the assemblers dependency-violation checker that the above "itc" instructions
903 * cannot possibly affect the following loads:
904 */
905 dv_serialize_data
907 ld8 r18=[r17] // read PTE again
908 ;;
909 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
910 ;;
911 (p7) ptc.l r16,r24
912 mov b0=r29 // restore b0
913 mov ar.ccv=r28
914 #else /* !CONFIG_SMP */
915 ;;
916 1: ld8 r18=[r17]
917 ;;
918 or r18=_PAGE_A,r18 // set the accessed bit
919 mov b0=r29 // restore b0
920 ;;
921 st8 [r17]=r18 // store back updated PTE
922 itc.i r18 // install updated PTE
923 #endif /* !CONFIG_SMP */
924 mov pr=r31,-1
925 #ifdef CONFIG_XEN
926 XEN_HYPER_RFI
927 dv_serialize_data
928 #else
929 rfi
930 #endif
931 END(iaccess_bit)
933 .org ia64_ivt+0x2800
934 /////////////////////////////////////////////////////////////////////////////////////////
935 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
936 ENTRY(daccess_bit)
937 DBG_FAULT(10)
938 // Like Entry 8, except for data access
939 #ifdef CONFIG_XEN
940 movl r16=XSI_IFA
941 ;;
942 ld8 r16=[r16]
943 ;;
944 #else
945 mov r16=cr.ifa // get the address that caused the fault
946 #endif
947 movl r30=1f // load continuation point in case of nested fault
948 ;;
949 #ifdef CONFIG_XEN
950 mov r18=r8
951 mov r8=r16
952 XEN_HYPER_THASH
953 ;;
954 mov r17=r8
955 mov r8=r18
956 ;;
957 #else
958 thash r17=r16 // compute virtual address of L3 PTE
959 #endif
960 mov r31=pr
961 mov r29=b0 // save b0 in case of nested fault)
962 #ifdef CONFIG_SMP
963 mov r28=ar.ccv // save ar.ccv
964 ;;
965 1: ld8 r18=[r17]
966 ;; // avoid RAW on r18
967 mov ar.ccv=r18 // set compare value for cmpxchg
968 or r25=_PAGE_A,r18 // set the dirty bit
969 tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
970 ;;
971 (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present
972 mov r24=PAGE_SHIFT<<2
973 ;;
974 (p6) cmp.eq p6,p7=r26,r18 // Only if page is present
975 ;;
976 #ifdef CONFIG_XEN
977 mov r26=r8
978 mov r8=r25
979 ;;
980 (p6) XEN_HYPER_ITC_D
981 ;;
982 mov r8=r26
983 ;;
984 #else
985 (p6) itc.d r25 // install updated PTE
986 #endif
987 /*
988 * Tell the assemblers dependency-violation checker that the above "itc" instructions
989 * cannot possibly affect the following loads:
990 */
991 dv_serialize_data
992 ;;
993 ld8 r18=[r17] // read PTE again
994 ;;
995 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
996 ;;
997 (p7) ptc.l r16,r24
998 mov ar.ccv=r28
999 #else
1000 ;;
1001 1: ld8 r18=[r17]
1002 ;; // avoid RAW on r18
1003 or r18=_PAGE_A,r18 // set the accessed bit
1004 ;;
1005 st8 [r17]=r18 // store back updated PTE
1006 itc.d r18 // install updated PTE
1007 #endif
1008 mov b0=r29 // restore b0
1009 mov pr=r31,-1
1010 #ifdef CONFIG_XEN
1011 XEN_HYPER_RFI
1012 dv_serialize_data
1013 #else
1014 rfi
1015 #endif
1016 END(daccess_bit)
1018 .org ia64_ivt+0x2c00
1019 /////////////////////////////////////////////////////////////////////////////////////////
1020 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
1021 ENTRY(break_fault)
1022 /*
1023 * The streamlined system call entry/exit paths only save/restore the initial part
1024 * of pt_regs. This implies that the callers of system-calls must adhere to the
1025 * normal procedure calling conventions.
1027 * Registers to be saved & restored:
1028 * CR registers: cr.ipsr, cr.iip, cr.ifs
1029 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
1030 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
1031 * Registers to be restored only:
1032 * r8-r11: output value from the system call.
1034 * During system call exit, scratch registers (including r15) are modified/cleared
1035 * to prevent leaking bits from kernel to user level.
1036 */
1037 DBG_FAULT(11)
1038 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
1039 #ifdef CONFIG_XEN
1040 movl r22=XSI_IPSR
1041 ;;
1042 ld8 r29=[r22],XSI_IIM_OFS-XSI_IPSR_OFS // get ipsr, point to iip
1043 #else
1044 mov r29=cr.ipsr // M2 (12 cyc)
1045 #endif
1046 mov r31=pr // I0 (2 cyc)
1048 #ifdef CONFIG_XEN
1049 ;;
1050 ld8 r17=[r22],XSI_IIP_OFS-XSI_IIM_OFS
1051 #else
1052 mov r17=cr.iim // M2 (2 cyc)
1053 #endif
1054 mov.m r27=ar.rsc // M2 (12 cyc)
1055 mov r18=__IA64_BREAK_SYSCALL // A
1057 mov.m ar.rsc=0 // M2
1058 mov.m r21=ar.fpsr // M2 (12 cyc)
1059 mov r19=b6 // I0 (2 cyc)
1060 ;;
1061 mov.m r23=ar.bspstore // M2 (12 cyc)
1062 mov.m r24=ar.rnat // M2 (5 cyc)
1063 mov.i r26=ar.pfs // I0 (2 cyc)
1065 invala // M0|1
1066 nop.m 0 // M
1067 mov r20=r1 // A save r1
1069 nop.m 0
1070 movl r30=sys_call_table // X
1072 #ifdef CONFIG_XEN
1073 ld8 r28=[r22]
1074 #else
1075 mov r28=cr.iip // M2 (2 cyc)
1076 #endif
1077 cmp.eq p0,p7=r18,r17 // I0 is this a system call?
1078 (p7) br.cond.spnt non_syscall // B no ->
1079 //
1080 // From this point on, we are definitely on the syscall-path
1081 // and we can use (non-banked) scratch registers.
1082 //
1083 ///////////////////////////////////////////////////////////////////////
1084 mov r1=r16 // A move task-pointer to "addl"-addressable reg
1085 mov r2=r16 // A setup r2 for ia64_syscall_setup
1086 add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
1088 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
1089 adds r15=-1024,r15 // A subtract 1024 from syscall number
1090 mov r3=NR_syscalls - 1
1091 ;;
1092 ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
1093 ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
1094 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
1096 shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
1097 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
1098 cmp.leu p6,p7=r15,r3 // A syscall number in range?
1099 ;;
1101 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
1102 (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
1103 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
1105 mov.m ar.bspstore=r22 // M2 switch to kernel RBS
1106 cmp.eq p8,p9=2,r8 // A isr.ei==2?
1107 ;;
1109 (p8) mov r8=0 // A clear ei to 0
1110 (p7) movl r30=sys_ni_syscall // X
1112 (p8) adds r28=16,r28 // A switch cr.iip to next bundle
1113 (p9) adds r8=1,r8 // A increment ei to next slot
1114 nop.i 0
1115 ;;
1117 mov.m r25=ar.unat // M2 (5 cyc)
1118 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
1119 adds r15=1024,r15 // A restore original syscall number
1120 //
1121 // If any of the above loads miss in L1D, we'll stall here until
1122 // the data arrives.
1123 //
1124 ///////////////////////////////////////////////////////////////////////
1125 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
1126 mov b6=r30 // I0 setup syscall handler branch reg early
1127 cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
1129 and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
1130 mov r18=ar.bsp // M2 (12 cyc)
1131 (pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS
1132 ;;
1133 .back_from_break_fixup:
1134 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
1135 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
1136 br.call.sptk.many b7=ia64_syscall_setup // B
1137 1:
1138 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
1139 nop 0
1140 #ifdef CONFIG_XEN
1141 mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2;;
1142 #else
1143 bsw.1 // B (6 cyc) regs are saved, switch to bank 1
1144 #endif
1145 ;;
1147 #ifdef CONFIG_XEN
1148 movl r16=XSI_PSR_IC
1149 mov r3=1
1150 ;;
1151 st4 [r16]=r3,XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS // vpsr.ic = 1
1152 #else
1153 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
1154 #endif
1155 movl r3=ia64_ret_from_syscall // X
1156 ;;
1158 srlz.i // M0 ensure interruption collection is on
1159 mov rp=r3 // I0 set the real return addr
1160 (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
1162 #ifdef CONFIG_XEN
1163 (p15) ld8 r16=[r16] // vpsr.i
1164 ;;
1165 (p15) st1 [r16]=r0,MASK_TO_PEND_OFS // if (p15) vpsr.i = 1
1166 ;; // if (p15) (vcpu->vcpu_info->evtchn_upcall_mask)=0
1167 (p15) ld1 r2=[r16] // if (vcpu->vcpu_info->evtchn_upcall_pending)
1168 ;;
1169 (p15) cmp.ne.unc p6,p0=r2,r0
1170 ;;
1171 (p6) XEN_HYPER_SSM_I // do a real ssm psr.i
1172 #else
1173 (p15) ssm psr.i // M2 restore psr.i
1174 #endif
1175 (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
1176 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
1177 // NOT REACHED
1178 ///////////////////////////////////////////////////////////////////////
1179 // On entry, we optimistically assumed that we're coming from user-space.
1180 // For the rare cases where a system-call is done from within the kernel,
1181 // we fix things up at this point:
1182 .break_fixup:
1183 add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure
1184 mov ar.rnat=r24 // M2 restore kernel's AR.RNAT
1185 ;;
1186 mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE
1187 br.cond.sptk .back_from_break_fixup
1188 END(break_fault)
1190 .org ia64_ivt+0x3000
1191 /////////////////////////////////////////////////////////////////////////////////////////
1192 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
1193 ENTRY(interrupt)
1194 DBG_FAULT(12)
1195 mov r31=pr // prepare to save predicates
1196 ;;
1197 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1198 #ifdef CONFIG_XEN
1199 movl r3=XSI_PSR_IC
1200 mov r14=1
1201 ;;
1202 st4 [r3]=r14
1203 #else
1204 ssm psr.ic | PSR_DEFAULT_BITS
1205 #endif
1206 ;;
1207 adds r3=8,r2 // set up second base pointer for SAVE_REST
1208 srlz.i // ensure everybody knows psr.ic is back on
1209 ;;
1210 SAVE_REST
1211 ;;
1212 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1213 #ifdef CONFIG_XEN
1214 ;;
1215 XEN_HYPER_GET_IVR
1216 ;;
1217 mov out0=r8 // pass cr.ivr as first arg
1218 #else
1219 mov out0=cr.ivr // pass cr.ivr as first arg
1220 #endif
1221 add out1=16,sp // pass pointer to pt_regs as second arg
1222 ;;
1223 srlz.d // make sure we see the effect of cr.ivr
1224 movl r14=ia64_leave_kernel
1225 ;;
1226 mov rp=r14
1227 br.call.sptk.many b6=ia64_handle_irq
1228 END(interrupt)
1230 .org ia64_ivt+0x3400
1231 /////////////////////////////////////////////////////////////////////////////////////////
1232 // 0x3400 Entry 13 (size 64 bundles) Reserved
1233 DBG_FAULT(13)
1234 FAULT(13)
1236 .org ia64_ivt+0x3800
1237 /////////////////////////////////////////////////////////////////////////////////////////
1238 // 0x3800 Entry 14 (size 64 bundles) Reserved
1239 DBG_FAULT(14)
1240 FAULT(14)
1242 /*
1243 * There is no particular reason for this code to be here, other than that
1244 * there happens to be space here that would go unused otherwise. If this
1245 * fault ever gets "unreserved", simply moved the following code to a more
1246 * suitable spot...
1248 * ia64_syscall_setup() is a separate subroutine so that it can
1249 * allocate stacked registers so it can safely demine any
1250 * potential NaT values from the input registers.
1252 * On entry:
1253 * - executing on bank 0 or bank 1 register set (doesn't matter)
1254 * - r1: stack pointer
1255 * - r2: current task pointer
1256 * - r3: preserved
1257 * - r11: original contents (saved ar.pfs to be saved)
1258 * - r12: original contents (sp to be saved)
1259 * - r13: original contents (tp to be saved)
1260 * - r15: original contents (syscall # to be saved)
1261 * - r18: saved bsp (after switching to kernel stack)
1262 * - r19: saved b6
1263 * - r20: saved r1 (gp)
1264 * - r21: saved ar.fpsr
1265 * - r22: kernel's register backing store base (krbs_base)
1266 * - r23: saved ar.bspstore
1267 * - r24: saved ar.rnat
1268 * - r25: saved ar.unat
1269 * - r26: saved ar.pfs
1270 * - r27: saved ar.rsc
1271 * - r28: saved cr.iip
1272 * - r29: saved cr.ipsr
1273 * - r31: saved pr
1274 * - b0: original contents (to be saved)
1275 * On exit:
1276 * - p10: TRUE if syscall is invoked with more than 8 out
1277 * registers or r15's Nat is true
1278 * - r1: kernel's gp
1279 * - r3: preserved (same as on entry)
1280 * - r8: -EINVAL if p10 is true
1281 * - r12: points to kernel stack
1282 * - r13: points to current task
1283 * - r14: preserved (same as on entry)
1284 * - p13: preserved
1285 * - p15: TRUE if interrupts need to be re-enabled
1286 * - ar.fpsr: set to kernel settings
1287 * - b6: preserved (same as on entry)
1288 */
1289 #ifndef CONFIG_XEN
1290 GLOBAL_ENTRY(ia64_syscall_setup)
1291 #if PT(B6) != 0
1292 # error This code assumes that b6 is the first field in pt_regs.
1293 #endif
1294 st8 [r1]=r19 // save b6
1295 add r16=PT(CR_IPSR),r1 // initialize first base pointer
1296 add r17=PT(R11),r1 // initialize second base pointer
1297 ;;
1298 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
1299 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
1300 tnat.nz p8,p0=in0
1302 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
1303 tnat.nz p9,p0=in1
1304 (pKStk) mov r18=r0 // make sure r18 isn't NaT
1305 ;;
1307 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
1308 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
1309 mov r28=b0 // save b0 (2 cyc)
1310 ;;
1312 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
1313 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
1314 (p8) mov in0=-1
1315 ;;
1317 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
1318 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
1319 and r8=0x7f,r19 // A // get sof of ar.pfs
1321 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
1322 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
1323 (p9) mov in1=-1
1324 ;;
1326 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
1327 tnat.nz p10,p0=in2
1328 add r11=8,r11
1329 ;;
1330 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
1331 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
1332 tnat.nz p11,p0=in3
1333 ;;
1334 (p10) mov in2=-1
1335 tnat.nz p12,p0=in4 // [I0]
1336 (p11) mov in3=-1
1337 ;;
1338 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
1339 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
1340 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
1341 ;;
1342 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
1343 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
1344 tnat.nz p13,p0=in5 // [I0]
1345 ;;
1346 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
1347 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
1348 (p12) mov in4=-1
1349 ;;
1351 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
1352 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1353 (p13) mov in5=-1
1354 ;;
1355 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1356 tnat.nz p13,p0=in6
1357 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
1358 ;;
1359 mov r8=1
1360 (p9) tnat.nz p10,p0=r15
1361 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
1363 st8.spill [r17]=r15 // save r15
1364 tnat.nz p8,p0=in7
1365 nop.i 0
1367 mov r13=r2 // establish `current'
1368 movl r1=__gp // establish kernel global pointer
1369 ;;
1370 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
1371 (p13) mov in6=-1
1372 (p8) mov in7=-1
1374 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1375 movl r17=FPSR_DEFAULT
1376 ;;
1377 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1378 (p10) mov r8=-EINVAL
1379 br.ret.sptk.many b7
1380 END(ia64_syscall_setup)
1381 #endif
1383 .org ia64_ivt+0x3c00
1384 /////////////////////////////////////////////////////////////////////////////////////////
1385 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1386 DBG_FAULT(15)
1387 FAULT(15)
1389 /*
1390 * Squatting in this space ...
1392 * This special case dispatcher for illegal operation faults allows preserved
1393 * registers to be modified through a callback function (asm only) that is handed
1394 * back from the fault handler in r8. Up to three arguments can be passed to the
1395 * callback function by returning an aggregate with the callback as its first
1396 * element, followed by the arguments.
1397 */
1398 ENTRY(dispatch_illegal_op_fault)
1399 .prologue
1400 .body
1401 SAVE_MIN_WITH_COVER
1402 ssm psr.ic | PSR_DEFAULT_BITS
1403 ;;
1404 srlz.i // guarantee that interruption collection is on
1405 ;;
1406 (p15) ssm psr.i // restore psr.i
1407 adds r3=8,r2 // set up second base pointer for SAVE_REST
1408 ;;
1409 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1410 mov out0=ar.ec
1411 ;;
1412 SAVE_REST
1413 PT_REGS_UNWIND_INFO(0)
1414 ;;
1415 br.call.sptk.many rp=ia64_illegal_op_fault
1416 .ret0: ;;
1417 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1418 mov out0=r9
1419 mov out1=r10
1420 mov out2=r11
1421 movl r15=ia64_leave_kernel
1422 ;;
1423 mov rp=r15
1424 mov b6=r8
1425 ;;
1426 cmp.ne p6,p0=0,r8
1427 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1428 br.sptk.many ia64_leave_kernel
1429 END(dispatch_illegal_op_fault)
1431 .org ia64_ivt+0x4000
1432 /////////////////////////////////////////////////////////////////////////////////////////
1433 // 0x4000 Entry 16 (size 64 bundles) Reserved
1434 DBG_FAULT(16)
1435 FAULT(16)
1437 .org ia64_ivt+0x4400
1438 /////////////////////////////////////////////////////////////////////////////////////////
1439 // 0x4400 Entry 17 (size 64 bundles) Reserved
1440 DBG_FAULT(17)
1441 FAULT(17)
1443 ENTRY(non_syscall)
1444 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
1445 ;;
1446 SAVE_MIN_WITH_COVER
1448 // There is no particular reason for this code to be here, other than that
1449 // there happens to be space here that would go unused otherwise. If this
1450 // fault ever gets "unreserved", simply moved the following code to a more
1451 // suitable spot...
1453 alloc r14=ar.pfs,0,0,2,0
1454 mov out0=cr.iim
1455 add out1=16,sp
1456 adds r3=8,r2 // set up second base pointer for SAVE_REST
1458 ssm psr.ic | PSR_DEFAULT_BITS
1459 ;;
1460 srlz.i // guarantee that interruption collection is on
1461 ;;
1462 (p15) ssm psr.i // restore psr.i
1463 movl r15=ia64_leave_kernel
1464 ;;
1465 SAVE_REST
1466 mov rp=r15
1467 ;;
1468 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1469 END(non_syscall)
1471 .org ia64_ivt+0x4800
1472 /////////////////////////////////////////////////////////////////////////////////////////
1473 // 0x4800 Entry 18 (size 64 bundles) Reserved
1474 DBG_FAULT(18)
1475 FAULT(18)
1477 /*
1478 * There is no particular reason for this code to be here, other than that
1479 * there happens to be space here that would go unused otherwise. If this
1480 * fault ever gets "unreserved", simply moved the following code to a more
1481 * suitable spot...
1482 */
1484 ENTRY(dispatch_unaligned_handler)
1485 SAVE_MIN_WITH_COVER
1486 ;;
1487 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1488 mov out0=cr.ifa
1489 adds out1=16,sp
1491 ssm psr.ic | PSR_DEFAULT_BITS
1492 ;;
1493 srlz.i // guarantee that interruption collection is on
1494 ;;
1495 (p15) ssm psr.i // restore psr.i
1496 adds r3=8,r2 // set up second base pointer
1497 ;;
1498 SAVE_REST
1499 movl r14=ia64_leave_kernel
1500 ;;
1501 mov rp=r14
1502 br.sptk.many ia64_prepare_handle_unaligned
1503 END(dispatch_unaligned_handler)
1505 .org ia64_ivt+0x4c00
1506 /////////////////////////////////////////////////////////////////////////////////////////
1507 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1508 DBG_FAULT(19)
1509 FAULT(19)
1511 /*
1512 * There is no particular reason for this code to be here, other than that
1513 * there happens to be space here that would go unused otherwise. If this
1514 * fault ever gets "unreserved", simply moved the following code to a more
1515 * suitable spot...
1516 */
1518 ENTRY(dispatch_to_fault_handler)
1519 /*
1520 * Input:
1521 * psr.ic: off
1522 * r19: fault vector number (e.g., 24 for General Exception)
1523 * r31: contains saved predicates (pr)
1524 */
1525 SAVE_MIN_WITH_COVER_R19
1526 alloc r14=ar.pfs,0,0,5,0
1527 mov out0=r15
1528 #ifdef CONFIG_XEN
1529 movl out1=XSI_ISR
1530 ;;
1531 adds out2=XSI_IFA-XSI_ISR,out1
1532 adds out3=XSI_IIM-XSI_ISR,out1
1533 adds out4=XSI_ITIR-XSI_ISR,out1
1534 ;;
1535 ld8 out1=[out1]
1536 ld8 out2=[out2]
1537 ld8 out3=[out4]
1538 ld8 out4=[out4]
1539 ;;
1540 #else
1541 mov out1=cr.isr
1542 mov out2=cr.ifa
1543 mov out3=cr.iim
1544 mov out4=cr.itir
1545 ;;
1546 #endif
1547 ssm psr.ic | PSR_DEFAULT_BITS
1548 ;;
1549 srlz.i // guarantee that interruption collection is on
1550 ;;
1551 (p15) ssm psr.i // restore psr.i
1552 adds r3=8,r2 // set up second base pointer for SAVE_REST
1553 ;;
1554 SAVE_REST
1555 movl r14=ia64_leave_kernel
1556 ;;
1557 mov rp=r14
1558 br.call.sptk.many b6=ia64_fault
1559 END(dispatch_to_fault_handler)
1561 //
1562 // --- End of long entries, Beginning of short entries
1563 //
1565 .org ia64_ivt+0x5000
1566 /////////////////////////////////////////////////////////////////////////////////////////
1567 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1568 ENTRY(page_not_present)
1569 DBG_FAULT(20)
1570 mov r16=cr.ifa
1571 rsm psr.dt
1572 /*
1573 * The Linux page fault handler doesn't expect non-present pages to be in
1574 * the TLB. Flush the existing entry now, so we meet that expectation.
1575 */
1576 mov r17=PAGE_SHIFT<<2
1577 ;;
1578 ptc.l r16,r17
1579 ;;
1580 mov r31=pr
1581 srlz.d
1582 br.sptk.many page_fault
1583 END(page_not_present)
1585 .org ia64_ivt+0x5100
1586 /////////////////////////////////////////////////////////////////////////////////////////
1587 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1588 ENTRY(key_permission)
1589 DBG_FAULT(21)
1590 mov r16=cr.ifa
1591 rsm psr.dt
1592 mov r31=pr
1593 ;;
1594 srlz.d
1595 br.sptk.many page_fault
1596 END(key_permission)
1598 .org ia64_ivt+0x5200
1599 /////////////////////////////////////////////////////////////////////////////////////////
1600 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1601 ENTRY(iaccess_rights)
1602 DBG_FAULT(22)
1603 mov r16=cr.ifa
1604 rsm psr.dt
1605 mov r31=pr
1606 ;;
1607 srlz.d
1608 br.sptk.many page_fault
1609 END(iaccess_rights)
1611 .org ia64_ivt+0x5300
1612 /////////////////////////////////////////////////////////////////////////////////////////
1613 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1614 ENTRY(daccess_rights)
1615 DBG_FAULT(23)
1616 #ifdef CONFIG_XEN
1617 movl r16=XSI_IFA
1618 ;;
1619 ld8 r16=[r16]
1620 ;;
1621 XEN_HYPER_RSM_PSR_DT
1622 #else
1623 mov r16=cr.ifa
1624 rsm psr.dt
1625 #endif
1626 mov r31=pr
1627 ;;
1628 srlz.d
1629 br.sptk.many page_fault
1630 END(daccess_rights)
1632 .org ia64_ivt+0x5400
1633 /////////////////////////////////////////////////////////////////////////////////////////
1634 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1635 ENTRY(general_exception)
1636 DBG_FAULT(24)
1637 mov r16=cr.isr
1638 mov r31=pr
1639 ;;
1640 cmp4.eq p6,p0=0,r16
1641 (p6) br.sptk.many dispatch_illegal_op_fault
1642 ;;
1643 mov r19=24 // fault number
1644 br.sptk.many dispatch_to_fault_handler
1645 END(general_exception)
1647 .org ia64_ivt+0x5500
1648 /////////////////////////////////////////////////////////////////////////////////////////
1649 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1650 ENTRY(disabled_fp_reg)
1651 DBG_FAULT(25)
1652 rsm psr.dfh // ensure we can access fph
1653 ;;
1654 srlz.d
1655 mov r31=pr
1656 mov r19=25
1657 br.sptk.many dispatch_to_fault_handler
1658 END(disabled_fp_reg)
1660 .org ia64_ivt+0x5600
1661 /////////////////////////////////////////////////////////////////////////////////////////
1662 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1663 ENTRY(nat_consumption)
1664 DBG_FAULT(26)
1666 mov r16=cr.ipsr
1667 mov r17=cr.isr
1668 mov r31=pr // save PR
1669 ;;
1670 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
1671 tbit.z p6,p0=r17,IA64_ISR_NA_BIT
1672 ;;
1673 cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
1674 dep r16=-1,r16,IA64_PSR_ED_BIT,1
1675 (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
1676 ;;
1677 mov cr.ipsr=r16 // set cr.ipsr.na
1678 mov pr=r31,-1
1679 ;;
1680 rfi
1682 1: mov pr=r31,-1
1683 ;;
1684 FAULT(26)
1685 END(nat_consumption)
1687 .org ia64_ivt+0x5700
1688 /////////////////////////////////////////////////////////////////////////////////////////
1689 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1690 ENTRY(speculation_vector)
1691 DBG_FAULT(27)
1692 /*
1693 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1694 * this part of the architecture is not implemented in hardware on some CPUs, such
1695 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
1696 * the relative target (not yet sign extended). So after sign extending it we
1697 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
1698 * i.e., the slot to restart into.
1700 * cr.imm contains zero_ext(imm21)
1701 */
1702 mov r18=cr.iim
1703 ;;
1704 mov r17=cr.iip
1705 shl r18=r18,43 // put sign bit in position (43=64-21)
1706 ;;
1708 mov r16=cr.ipsr
1709 shr r18=r18,39 // sign extend (39=43-4)
1710 ;;
1712 add r17=r17,r18 // now add the offset
1713 ;;
1714 mov cr.iip=r17
1715 dep r16=0,r16,41,2 // clear EI
1716 ;;
1718 mov cr.ipsr=r16
1719 ;;
1721 #ifdef CONFIG_XEN
1722 XEN_HYPER_RFI;
1723 #else
1724 rfi // and go back
1725 #endif
1726 END(speculation_vector)
1728 .org ia64_ivt+0x5800
1729 /////////////////////////////////////////////////////////////////////////////////////////
1730 // 0x5800 Entry 28 (size 16 bundles) Reserved
1731 DBG_FAULT(28)
1732 FAULT(28)
1734 .org ia64_ivt+0x5900
1735 /////////////////////////////////////////////////////////////////////////////////////////
1736 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1737 ENTRY(debug_vector)
1738 DBG_FAULT(29)
1739 FAULT(29)
1740 END(debug_vector)
1742 .org ia64_ivt+0x5a00
1743 /////////////////////////////////////////////////////////////////////////////////////////
1744 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1745 ENTRY(unaligned_access)
1746 DBG_FAULT(30)
1747 mov r31=pr // prepare to save predicates
1748 ;;
1749 br.sptk.many dispatch_unaligned_handler
1750 END(unaligned_access)
1752 .org ia64_ivt+0x5b00
1753 /////////////////////////////////////////////////////////////////////////////////////////
1754 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1755 ENTRY(unsupported_data_reference)
1756 DBG_FAULT(31)
1757 FAULT(31)
1758 END(unsupported_data_reference)
1760 .org ia64_ivt+0x5c00
1761 /////////////////////////////////////////////////////////////////////////////////////////
1762 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1763 ENTRY(floating_point_fault)
1764 DBG_FAULT(32)
1765 FAULT(32)
1766 END(floating_point_fault)
1768 .org ia64_ivt+0x5d00
1769 /////////////////////////////////////////////////////////////////////////////////////////
1770 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1771 ENTRY(floating_point_trap)
1772 DBG_FAULT(33)
1773 FAULT(33)
1774 END(floating_point_trap)
1776 .org ia64_ivt+0x5e00
1777 /////////////////////////////////////////////////////////////////////////////////////////
1778 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1779 ENTRY(lower_privilege_trap)
1780 DBG_FAULT(34)
1781 FAULT(34)
1782 END(lower_privilege_trap)
1784 .org ia64_ivt+0x5f00
1785 /////////////////////////////////////////////////////////////////////////////////////////
1786 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1787 ENTRY(taken_branch_trap)
1788 DBG_FAULT(35)
1789 FAULT(35)
1790 END(taken_branch_trap)
1792 .org ia64_ivt+0x6000
1793 /////////////////////////////////////////////////////////////////////////////////////////
1794 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1795 ENTRY(single_step_trap)
1796 DBG_FAULT(36)
1797 FAULT(36)
1798 END(single_step_trap)
1800 .org ia64_ivt+0x6100
1801 /////////////////////////////////////////////////////////////////////////////////////////
1802 // 0x6100 Entry 37 (size 16 bundles) Reserved
1803 DBG_FAULT(37)
1804 FAULT(37)
1806 .org ia64_ivt+0x6200
1807 /////////////////////////////////////////////////////////////////////////////////////////
1808 // 0x6200 Entry 38 (size 16 bundles) Reserved
1809 DBG_FAULT(38)
1810 FAULT(38)
1812 .org ia64_ivt+0x6300
1813 /////////////////////////////////////////////////////////////////////////////////////////
1814 // 0x6300 Entry 39 (size 16 bundles) Reserved
1815 DBG_FAULT(39)
1816 FAULT(39)
1818 .org ia64_ivt+0x6400
1819 /////////////////////////////////////////////////////////////////////////////////////////
1820 // 0x6400 Entry 40 (size 16 bundles) Reserved
1821 DBG_FAULT(40)
1822 FAULT(40)
1824 .org ia64_ivt+0x6500
1825 /////////////////////////////////////////////////////////////////////////////////////////
1826 // 0x6500 Entry 41 (size 16 bundles) Reserved
1827 DBG_FAULT(41)
1828 FAULT(41)
1830 .org ia64_ivt+0x6600
1831 /////////////////////////////////////////////////////////////////////////////////////////
1832 // 0x6600 Entry 42 (size 16 bundles) Reserved
1833 DBG_FAULT(42)
1834 FAULT(42)
1836 .org ia64_ivt+0x6700
1837 /////////////////////////////////////////////////////////////////////////////////////////
1838 // 0x6700 Entry 43 (size 16 bundles) Reserved
1839 DBG_FAULT(43)
1840 FAULT(43)
1842 .org ia64_ivt+0x6800
1843 /////////////////////////////////////////////////////////////////////////////////////////
1844 // 0x6800 Entry 44 (size 16 bundles) Reserved
1845 DBG_FAULT(44)
1846 FAULT(44)
1848 .org ia64_ivt+0x6900
1849 /////////////////////////////////////////////////////////////////////////////////////////
1850 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1851 ENTRY(ia32_exception)
1852 DBG_FAULT(45)
1853 FAULT(45)
1854 END(ia32_exception)
1856 .org ia64_ivt+0x6a00
1857 /////////////////////////////////////////////////////////////////////////////////////////
1858 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1859 ENTRY(ia32_intercept)
1860 DBG_FAULT(46)
1861 #ifdef CONFIG_IA32_SUPPORT
1862 mov r31=pr
1863 mov r16=cr.isr
1864 ;;
1865 extr.u r17=r16,16,8 // get ISR.code
1866 mov r18=ar.eflag
1867 mov r19=cr.iim // old eflag value
1868 ;;
1869 cmp.ne p6,p0=2,r17
1870 (p6) br.cond.spnt 1f // not a system flag fault
1871 xor r16=r18,r19
1872 ;;
1873 extr.u r17=r16,18,1 // get the eflags.ac bit
1874 ;;
1875 cmp.eq p6,p0=0,r17
1876 (p6) br.cond.spnt 1f // eflags.ac bit didn't change
1877 ;;
1878 mov pr=r31,-1 // restore predicate registers
1879 #ifdef CONFIG_XEN
1880 XEN_HYPER_RFI;
1881 #else
1882 rfi
1883 #endif
1885 1:
1886 #endif // CONFIG_IA32_SUPPORT
1887 FAULT(46)
1888 END(ia32_intercept)
1890 .org ia64_ivt+0x6b00
1891 /////////////////////////////////////////////////////////////////////////////////////////
1892 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1893 ENTRY(ia32_interrupt)
1894 DBG_FAULT(47)
1895 #ifdef CONFIG_IA32_SUPPORT
1896 mov r31=pr
1897 br.sptk.many dispatch_to_ia32_handler
1898 #else
1899 FAULT(47)
1900 #endif
1901 END(ia32_interrupt)
1903 .org ia64_ivt+0x6c00
1904 /////////////////////////////////////////////////////////////////////////////////////////
1905 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1906 DBG_FAULT(48)
1907 FAULT(48)
1909 .org ia64_ivt+0x6d00
1910 /////////////////////////////////////////////////////////////////////////////////////////
1911 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1912 DBG_FAULT(49)
1913 FAULT(49)
1915 .org ia64_ivt+0x6e00
1916 /////////////////////////////////////////////////////////////////////////////////////////
1917 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1918 DBG_FAULT(50)
1919 FAULT(50)
1921 .org ia64_ivt+0x6f00
1922 /////////////////////////////////////////////////////////////////////////////////////////
1923 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1924 DBG_FAULT(51)
1925 FAULT(51)
1927 .org ia64_ivt+0x7000
1928 /////////////////////////////////////////////////////////////////////////////////////////
1929 // 0x7000 Entry 52 (size 16 bundles) Reserved
1930 DBG_FAULT(52)
1931 FAULT(52)
1933 .org ia64_ivt+0x7100
1934 /////////////////////////////////////////////////////////////////////////////////////////
1935 // 0x7100 Entry 53 (size 16 bundles) Reserved
1936 DBG_FAULT(53)
1937 FAULT(53)
1939 .org ia64_ivt+0x7200
1940 /////////////////////////////////////////////////////////////////////////////////////////
1941 // 0x7200 Entry 54 (size 16 bundles) Reserved
1942 DBG_FAULT(54)
1943 FAULT(54)
1945 .org ia64_ivt+0x7300
1946 /////////////////////////////////////////////////////////////////////////////////////////
1947 // 0x7300 Entry 55 (size 16 bundles) Reserved
1948 DBG_FAULT(55)
1949 FAULT(55)
1951 .org ia64_ivt+0x7400
1952 /////////////////////////////////////////////////////////////////////////////////////////
1953 // 0x7400 Entry 56 (size 16 bundles) Reserved
1954 DBG_FAULT(56)
1955 FAULT(56)
1957 .org ia64_ivt+0x7500
1958 /////////////////////////////////////////////////////////////////////////////////////////
1959 // 0x7500 Entry 57 (size 16 bundles) Reserved
1960 DBG_FAULT(57)
1961 FAULT(57)
1963 .org ia64_ivt+0x7600
1964 /////////////////////////////////////////////////////////////////////////////////////////
1965 // 0x7600 Entry 58 (size 16 bundles) Reserved
1966 DBG_FAULT(58)
1967 FAULT(58)
1969 .org ia64_ivt+0x7700
1970 /////////////////////////////////////////////////////////////////////////////////////////
1971 // 0x7700 Entry 59 (size 16 bundles) Reserved
1972 DBG_FAULT(59)
1973 FAULT(59)
1975 .org ia64_ivt+0x7800
1976 /////////////////////////////////////////////////////////////////////////////////////////
1977 // 0x7800 Entry 60 (size 16 bundles) Reserved
1978 DBG_FAULT(60)
1979 FAULT(60)
1981 .org ia64_ivt+0x7900
1982 /////////////////////////////////////////////////////////////////////////////////////////
1983 // 0x7900 Entry 61 (size 16 bundles) Reserved
1984 DBG_FAULT(61)
1985 FAULT(61)
1987 .org ia64_ivt+0x7a00
1988 /////////////////////////////////////////////////////////////////////////////////////////
1989 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1990 DBG_FAULT(62)
1991 FAULT(62)
1993 .org ia64_ivt+0x7b00
1994 /////////////////////////////////////////////////////////////////////////////////////////
1995 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1996 DBG_FAULT(63)
1997 FAULT(63)
1999 .org ia64_ivt+0x7c00
2000 /////////////////////////////////////////////////////////////////////////////////////////
2001 // 0x7c00 Entry 64 (size 16 bundles) Reserved
2002 DBG_FAULT(64)
2003 FAULT(64)
2005 .org ia64_ivt+0x7d00
2006 /////////////////////////////////////////////////////////////////////////////////////////
2007 // 0x7d00 Entry 65 (size 16 bundles) Reserved
2008 DBG_FAULT(65)
2009 FAULT(65)
2011 .org ia64_ivt+0x7e00
2012 /////////////////////////////////////////////////////////////////////////////////////////
2013 // 0x7e00 Entry 66 (size 16 bundles) Reserved
2014 DBG_FAULT(66)
2015 FAULT(66)
2017 .org ia64_ivt+0x7f00
2018 /////////////////////////////////////////////////////////////////////////////////////////
2019 // 0x7f00 Entry 67 (size 16 bundles) Reserved
2020 DBG_FAULT(67)
2021 FAULT(67)
2023 #ifdef CONFIG_IA32_SUPPORT
2025 /*
2026 * There is no particular reason for this code to be here, other than that
2027 * there happens to be space here that would go unused otherwise. If this
2028 * fault ever gets "unreserved", simply moved the following code to a more
2029 * suitable spot...
2030 */
2032 // IA32 interrupt entry point
2034 ENTRY(dispatch_to_ia32_handler)
2035 SAVE_MIN
2036 ;;
2037 mov r14=cr.isr
2038 ssm psr.ic | PSR_DEFAULT_BITS
2039 ;;
2040 srlz.i // guarantee that interruption collection is on
2041 ;;
2042 (p15) ssm psr.i
2043 adds r3=8,r2 // Base pointer for SAVE_REST
2044 ;;
2045 SAVE_REST
2046 ;;
2047 mov r15=0x80
2048 shr r14=r14,16 // Get interrupt number
2049 ;;
2050 cmp.ne p6,p0=r14,r15
2051 (p6) br.call.dpnt.many b6=non_ia32_syscall
2053 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
2054 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
2055 ;;
2056 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
2057 ld8 r8=[r14] // get r8
2058 ;;
2059 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
2060 ;;
2061 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
2062 ;;
2063 ld4 r8=[r14],8 // r8 == eax (syscall number)
2064 mov r15=IA32_NR_syscalls
2065 ;;
2066 cmp.ltu.unc p6,p7=r8,r15
2067 ld4 out1=[r14],8 // r9 == ecx
2068 ;;
2069 ld4 out2=[r14],8 // r10 == edx
2070 ;;
2071 ld4 out0=[r14] // r11 == ebx
2072 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
2073 ;;
2074 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
2075 ;;
2076 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
2077 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
2078 ;;
2079 ld4 out4=[r14] // r15 == edi
2080 movl r16=ia32_syscall_table
2081 ;;
2082 (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
2083 ld4 r2=[r2] // r2 = current_thread_info()->flags
2084 ;;
2085 ld8 r16=[r16]
2086 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
2087 ;;
2088 mov b6=r16
2089 movl r15=ia32_ret_from_syscall
2090 cmp.eq p8,p0=r2,r0
2091 ;;
2092 mov rp=r15
2093 (p8) br.call.sptk.many b6=b6
2094 br.cond.sptk ia32_trace_syscall
2096 non_ia32_syscall:
2097 alloc r15=ar.pfs,0,0,2,0
2098 mov out0=r14 // interrupt #
2099 add out1=16,sp // pointer to pt_regs
2100 ;; // avoid WAW on CFM
2101 br.call.sptk.many rp=ia32_bad_interrupt
2102 .ret1: movl r15=ia64_leave_kernel
2103 ;;
2104 mov rp=r15
2105 br.ret.sptk.many rp
2106 END(dispatch_to_ia32_handler)
2107 #endif /* CONFIG_IA32_SUPPORT */
2109 #ifdef CONFIG_XEN
2110 .section .text,"ax"
2111 GLOBAL_ENTRY(xen_event_callback)
2112 .prologue
2113 .body
2114 mov r31=pr // prepare to save predicates
2115 ;;
2116 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
2117 ;;
2118 movl r3=XSI_PSR_IC
2119 mov r14=1
2120 ;;
2121 st4 [r3]=r14
2122 ;;
2123 adds r3=8,r2 // set up second base pointer for SAVE_REST
2124 srlz.i // ensure everybody knows psr.ic is back on
2125 ;;
2126 SAVE_REST
2127 PT_REGS_UNWIND_INFO(0)
2128 ;;
2129 1:
2130 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
2131 add out0=16,sp // pass pointer to pt_regs as first arg
2132 ;;
2133 br.call.sptk.many b0=evtchn_do_upcall
2134 ;;
2135 movl r20=XSI_PSR_I_ADDR
2136 ;;
2137 ld8 r20=[r20]
2138 ;;
2139 adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending
2140 ;;
2141 ld1 r20=[r20]
2142 ;;
2143 cmp.ne p6,p0=r20,r0 // if there are pending events,
2144 (p6) br.spnt.few 1b // call evtchn_do_upcall again.
2145 br.sptk.many ia64_leave_kernel
2146 END(xen_event_callback)
2149 /*
2150 * There is no particular reason for this code to be here, other than that
2151 * there happens to be space here that would go unused otherwise. If this
2152 * fault ever gets "unreserved", simply moved the following code to a more
2153 * suitable spot...
2154 */
2156 GLOBAL_ENTRY(xen_bsw1)
2157 /* FIXME: THIS CODE IS NOT NaT SAFE! */
2158 mov r14=ar.unat
2159 movl r30=XSI_B1NAT
2160 ;;
2161 ld8 r30=[r30];;
2162 mov ar.unat=r30
2163 movl r30=XSI_BANKNUM;
2164 mov r31=1;;
2165 st4 [r30]=r31;
2166 movl r30=XSI_BANK1_R16;
2167 movl r31=XSI_BANK1_R16+8;;
2168 ld8.fill r16=[r30],16; ld8.fill r17=[r31],16;;
2169 ld8.fill r18=[r30],16; ld8.fill r19=[r31],16;;
2170 ld8.fill r20=[r30],16; ld8.fill r21=[r31],16;;
2171 ld8.fill r22=[r30],16; ld8.fill r23=[r31],16;;
2172 ld8.fill r24=[r30],16; ld8.fill r25=[r31],16;;
2173 ld8.fill r26=[r30],16; ld8.fill r27=[r31],16;;
2174 ld8.fill r28=[r30],16; ld8.fill r29=[r31],16;;
2175 ld8.fill r30=[r30]; ld8.fill r31=[r31];;
2176 mov ar.unat=r14
2177 br.ret.sptk.many b0
2178 END(xen_bsw1)
2181 #endif