ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_ivt.S @ 16760:38c73bd5e02d

[IA64] vti fault handler clean up: consolidate vmx_interrupt and vmx_dispatch_interrupt

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Fri Dec 14 13:46:49 2007 -0700 (2007-12-14)
parents 2d0193702170
children 44aca51a4b7b
line source
1 /*
2 * arch/ia64/kernel/vmx_ivt.S
3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 * Asit Mallick <asit.k.mallick@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Kenneth Chen <kenneth.w.chen@intel.com>
11 * Fenghua Yu <fenghua.yu@intel.com>
12 *
13 *
14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
15 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
16 *
17 * 05/3/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
18 * Supporting Intel virtualization architecture
19 *
20 */
22 /*
23 * This file defines the interruption vector table used by the CPU.
24 * It does not include one entry per possible cause of interruption.
25 *
26 * The first 20 entries of the table contain 64 bundles each while the
27 * remaining 48 entries contain only 16 bundles each.
28 *
29 * The 64 bundles are used to allow inlining the whole handler for critical
30 * interruptions like TLB misses.
31 *
32 * For each entry, the comment is as follows:
33 *
34 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
35 * entry offset ----/ / / / /
36 * entry number ---------/ / / /
37 * size of the entry -------------/ / /
38 * vector name -------------------------------------/ /
39 * interruptions triggering this vector ----------------------/
40 *
41 * The table is 32KB in size and must be aligned on 32KB boundary.
42 * (The CPU ignores the 15 lower bits of the address)
43 *
44 * Table is based upon EAS2.6 (Oct 1999)
45 */
47 #include <linux/config.h>
49 #include <asm/asmmacro.h>
50 #include <asm/break.h>
51 #include <asm/ia32.h>
52 #include <asm/kregs.h>
53 #include <asm/offsets.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
57 #include <asm/system.h>
58 #include <asm/thread_info.h>
59 #include <asm/unistd.h>
60 #include <asm/vhpt.h>
61 #include <asm/virt_event.h>
62 #include <asm/vmx_phy_mode.h>
63 #include <xen/errno.h>
65 #if 1
66 # define PSR_DEFAULT_BITS psr.ac
67 #else
68 # define PSR_DEFAULT_BITS 0
69 #endif
72 #ifdef VTI_DEBUG
73 #define IVT_DEBUG_MASK (IVT_DEBUG_SIZE * (IVT_DEBUG_MAX - 1))
74 #define VMX_DBG_FAULT(i) \
75 mov r31=pr; \
76 mov r20=cr.ipsr;; \
77 tbit.z p6,p0=r20,IA64_PSR_VM_BIT;; \
78 (p6)movl r21=THIS_CPU(cpu_kr)+ \
79 IA64_KR_CURRENT_OFFSET;; \
80 (p6)ld8 r21=[r21]; \
81 mov pr=r31;; \
82 add r16=IVT_CUR_OFS,r21; \
83 add r17=IVT_DBG_OFS,r21;; \
84 ld8 r18=[r16];; \
85 add r17=r18,r17; \
86 mov r19=cr.iip; \
87 mov r22=cr.ifa; \
88 mov r23=i;; \
89 st8 [r17]=r19,8; \
90 add r18=IVT_DEBUG_SIZE,r18;; \
91 st8 [r17]=r20,8; \
92 mov r19=IVT_DEBUG_MASK;; \
93 st8 [r17]=r22,8; \
94 and r18=r19,r18;; \
95 st8 [r17]=r23; \
96 st8 [r16]=r18;;
97 #else
98 # define VMX_DBG_FAULT(i)
99 #endif
101 #include "vmx_minstate.h"
103 #define MINSTATE_VIRT /* needed by minstate.h */
104 #include "minstate.h"
107 #define VMX_FAULT(n) \
108 vmx_fault_##n:; \
109 mov r19=n; \
110 br.sptk.many dispatch_to_fault_handler; \
111 ;;
113 #define VMX_REFLECT(n) \
114 mov r31=pr; \
115 mov r19=n; /* prepare to save predicates */ \
116 mov r29=cr.ipsr; \
117 ;; \
118 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
119 (p7)br.sptk.many vmx_dispatch_reflection; \
120 br.sptk.many dispatch_to_fault_handler
122 #ifdef CONFIG_VMX_PANIC
123 GLOBAL_ENTRY(vmx_panic)
124 br.sptk.many vmx_panic
125 ;;
126 END(vmx_panic)
127 #endif
132 .section .text.ivt,"ax"
134 .align 32768 // align on 32KB boundary
135 .global vmx_ia64_ivt
136 vmx_ia64_ivt:
137 /////////////////////////////////////////////////////////////////////////////////////////
138 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
139 ENTRY(vmx_vhpt_miss)
140 VMX_DBG_FAULT(0)
141 VMX_FAULT(0)
142 END(vmx_vhpt_miss)
144 .org vmx_ia64_ivt+0x400
145 /////////////////////////////////////////////////////////////////////////////////////////
146 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
147 ENTRY(vmx_itlb_miss)
148 VMX_DBG_FAULT(1)
149 mov r29=cr.ipsr
150 mov r31 = pr
151 ;;
152 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
153 (p6) br.sptk vmx_alt_itlb_miss_vmm
154 mov r16 = cr.ifa
155 ;;
156 thash r17 = r16
157 ttag r20 = r16
158 ;;
159 mov r18 = r17
160 adds r28 = VLE_TITAG_OFFSET,r17
161 adds r19 = VLE_CCHAIN_OFFSET, r17
162 ;;
163 ld8 r17 = [r19] // Read chain
164 ;;
165 vmx_itlb_loop:
166 cmp.eq p6,p0 = r0, r17 // End of chain ?
167 (p6)br vmx_itlb_out
168 ;;
169 adds r16 = VLE_TITAG_OFFSET, r17
170 adds r19 = VLE_CCHAIN_OFFSET, r17
171 ;;
172 ld8 r24 = [r16] // Read tag
173 ld8 r23 = [r19] // Read chain
174 ;;
175 lfetch [r23]
176 cmp.eq p6,p7 = r20, r24 // does tag match ?
177 ;;
178 (p7)mov r17 = r23; // No: entry = chain
179 (p7)br.sptk vmx_itlb_loop // again
180 ;;
181 // Swap the first entry with the entry found in the collision chain
182 // to speed up next hardware search (and keep LRU).
183 // In comments 1 stands for the first entry and 2 for the found entry.
184 ld8 r25 = [r17] // Read value of 2
185 ld8 r27 = [r18] // Read value of 1
186 ld8 r29 = [r28] // Read tag of 1
187 dep r22 = -1,r24,63,1 // set ti=1 of 2 (to disable it during the swap)
188 ;;
189 st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET // Write tag of 2
190 st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET // Write tag of 1
191 extr.u r19 = r27, 56, 4 // Extract collision chain length
192 mf
193 ;;
194 ld8 r29 = [r16] // read itir of 2
195 ld8 r22 = [r28] // read itir of 1
196 dep r27 = r0, r27, 56, 4 // Clear collision chain length for 2
197 dep r25 = r19, r25, 56, 4 // Write collision chain length for 1
198 ;;
199 st8 [r16] = r22 // Write itir of 2
200 st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET // write itir of 1
201 st8 [r18] = r25 // Write value of 1
202 st8 [r17] = r27 // Write value of 2
203 ;;
204 st8.rel [r28] = r24 // Write tag of 1 (with ti=0)
205 // Insert the translation entry
206 itc.i r25
207 dv_serialize_data
208 // Resume
209 mov r17=cr.isr
210 mov r23=r31
211 mov r22=b0
212 adds r16=IA64_VPD_BASE_OFFSET,r21
213 ;;
214 ld8 r18=[r16]
215 ;;
216 adds r19=VPD(VPSR),r18
217 movl r20=__vsa_base
218 ;;
219 ld8 r19=[r19]
220 ld8 r20=[r20]
221 ;;
222 br.sptk ia64_vmm_entry
223 ;;
224 vmx_itlb_out:
225 mov r19 = 1
226 br.sptk vmx_dispatch_tlb_miss
227 VMX_FAULT(1);
228 END(vmx_itlb_miss)
230 .org vmx_ia64_ivt+0x0800
231 /////////////////////////////////////////////////////////////////////////////////////////
232 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
233 ENTRY(vmx_dtlb_miss)
234 VMX_DBG_FAULT(2)
235 mov r29=cr.ipsr
236 mov r31 = pr
237 ;;
238 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
239 (p6)br.sptk vmx_alt_dtlb_miss_vmm
240 mov r16 = cr.ifa
241 ;;
242 thash r17 = r16
243 ttag r20 = r16
244 ;;
245 mov r18 = r17
246 adds r28 = VLE_TITAG_OFFSET,r17
247 adds r19 = VLE_CCHAIN_OFFSET, r17
248 ;;
249 ld8 r17 = [r19]
250 ;;
251 vmx_dtlb_loop:
252 cmp.eq p6,p0 = r0, r17
253 (p6)br vmx_dtlb_out
254 ;;
255 adds r16 = VLE_TITAG_OFFSET, r17
256 adds r19 = VLE_CCHAIN_OFFSET, r17
257 ;;
258 ld8 r24 = [r16]
259 ld8 r23 = [r19]
260 ;;
261 lfetch [r23]
262 cmp.eq p6,p7 = r20, r24
263 ;;
264 (p7)mov r17 = r23;
265 (p7)br.sptk vmx_dtlb_loop
266 ;;
267 ld8 r25 = [r17]
268 ld8 r27 = [r18]
269 ld8 r29 = [r28]
270 dep r22 = -1,r24,63,1 //set ti=1
271 ;;
272 st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
273 st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
274 extr.u r19 = r27, 56, 4
275 mf
276 ;;
277 ld8 r29 = [r16]
278 ld8 r22 = [r28]
279 dep r27 = r0, r27, 56, 4
280 dep r25 = r19, r25, 56, 4
281 ;;
282 st8 [r16] = r22
283 st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET
284 st8 [r18] = r25
285 st8 [r17] = r27
286 ;;
287 st8.rel [r28] = r24
288 itc.d r25
289 dv_serialize_data
290 mov r17=cr.isr
291 mov r23=r31
292 mov r22=b0
293 adds r16=IA64_VPD_BASE_OFFSET,r21
294 ;;
295 ld8 r18=[r16]
296 ;;
297 adds r19=VPD(VPSR),r18
298 movl r20=__vsa_base
299 ;;
300 ld8 r19=[r19]
301 ld8 r20=[r20]
302 ;;
303 br.sptk ia64_vmm_entry
304 ;;
305 vmx_dtlb_out:
306 mov r19 = 2
307 br.sptk vmx_dispatch_tlb_miss
308 VMX_FAULT(2);
309 END(vmx_dtlb_miss)
311 .org vmx_ia64_ivt+0x0c00
312 /////////////////////////////////////////////////////////////////////////////////////////
313 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
314 ENTRY(vmx_alt_itlb_miss)
315 VMX_DBG_FAULT(3)
316 mov r29=cr.ipsr
317 mov r31 = pr
318 adds r22=IA64_VCPU_MMU_MODE_OFFSET, r21
319 ;;
320 tbit.nz p7,p0=r29,IA64_PSR_VM_BIT
321 (p7)br.spnt vmx_alt_itlb_miss_dom
322 vmx_alt_itlb_miss_vmm:
323 mov r16=cr.ifa // get address that caused the TLB miss
324 movl r17=PAGE_KERNEL
325 mov r24=cr.ipsr
326 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
327 ;;
328 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
329 extr.u r18=r16,XEN_VIRT_UC_BIT, 15 // extract UC bit
330 ;;
331 or r19=r17,r19 // insert PTE control bits into r19
332 mov r20=IA64_GRANULE_SHIFT<<2
333 ;;
334 dep r19=r18,r19,4,1 // set bit 4 (uncached) if the access was to UC region
335 mov cr.itir=r20
336 ;;
337 itc.i r19 // insert the TLB entry
338 mov pr=r31,-1
339 rfi
340 ;;
341 vmx_alt_itlb_miss_dom:
342 ld1 r23=[r22] // Load mmu_mode
343 ;;
344 cmp.eq p6,p7=VMX_MMU_PHY_D,r23
345 (p7)br.sptk vmx_fault_3
346 ;;
347 mov r19=3
348 br.sptk vmx_dispatch_tlb_miss
349 VMX_FAULT(3);
350 END(vmx_alt_itlb_miss)
353 .org vmx_ia64_ivt+0x1000
354 /////////////////////////////////////////////////////////////////////////////////////////
355 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
356 ENTRY(vmx_alt_dtlb_miss)
357 VMX_DBG_FAULT(4)
358 mov r29=cr.ipsr
359 mov r31=pr
360 adds r22=IA64_VCPU_MMU_MODE_OFFSET, r21
361 ;;
362 tbit.nz p7,p0=r29,IA64_PSR_VM_BIT
363 (p7)br.spnt vmx_alt_dtlb_miss_dom
364 vmx_alt_dtlb_miss_vmm:
365 mov r16=cr.ifa // get address that caused the TLB miss
366 ;;
367 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
368 // Test for the address of virtual frame_table
369 shr r22=r16,56;;
370 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
371 (p8)br.cond.sptk frametable_miss ;;
372 #endif
373 movl r17=PAGE_KERNEL
374 mov r20=cr.isr
375 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
376 mov r24=cr.ipsr
377 ;;
378 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
379 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
380 extr.u r18=r16,XEN_VIRT_UC_BIT, 1 // extract UC bit
381 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
382 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
383 ;;
384 (p9)cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
385 dep r24=-1,r24,IA64_PSR_ED_BIT,1
386 or r19=r19,r17 // insert PTE control bits into r19
387 mov r20=IA64_GRANULE_SHIFT<<2
388 ;;
389 dep r19=r18,r19,4,1 // set bit 4 (uncached) if the access was to UC region
390 (p6)mov cr.ipsr=r24
391 mov cr.itir=r20
392 ;;
393 (p7)itc.d r19 // insert the TLB entry
394 mov pr=r31,-1
395 rfi
396 ;;
397 vmx_alt_dtlb_miss_dom:
398 ld1 r23=[r22] // Load mmu_mode
399 ;;
400 cmp.eq p6,p7=VMX_MMU_PHY_D,r23
401 (p7)br.sptk vmx_fault_4
402 ;;
403 mov r19=4
404 br.sptk vmx_dispatch_tlb_miss
405 VMX_FAULT(4);
406 END(vmx_alt_dtlb_miss)
408 .org vmx_ia64_ivt+0x1400
409 /////////////////////////////////////////////////////////////////////////////////////////
410 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
411 ENTRY(vmx_nested_dtlb_miss)
412 VMX_DBG_FAULT(5)
413 VMX_FAULT(5)
414 END(vmx_nested_dtlb_miss)
416 .org vmx_ia64_ivt+0x1800
417 /////////////////////////////////////////////////////////////////////////////////////////
418 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
419 ENTRY(vmx_ikey_miss)
420 VMX_DBG_FAULT(6)
421 VMX_REFLECT(6)
422 END(vmx_ikey_miss)
424 .org vmx_ia64_ivt+0x1c00
425 /////////////////////////////////////////////////////////////////////////////////////////
426 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
427 ENTRY(vmx_dkey_miss)
428 VMX_DBG_FAULT(7)
429 VMX_REFLECT(7)
430 END(vmx_dkey_miss)
432 .org vmx_ia64_ivt+0x2000
433 /////////////////////////////////////////////////////////////////////////////////////////
434 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
435 ENTRY(vmx_dirty_bit)
436 VMX_DBG_FAULT(8)
437 VMX_REFLECT(8)
438 END(vmx_dirty_bit)
440 .org vmx_ia64_ivt+0x2400
441 /////////////////////////////////////////////////////////////////////////////////////////
442 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
443 ENTRY(vmx_iaccess_bit)
444 VMX_DBG_FAULT(9)
445 VMX_REFLECT(9)
446 END(vmx_iaccess_bit)
448 .org vmx_ia64_ivt+0x2800
449 /////////////////////////////////////////////////////////////////////////////////////////
450 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
451 ENTRY(vmx_daccess_bit)
452 VMX_DBG_FAULT(10)
453 VMX_REFLECT(10)
454 END(vmx_daccess_bit)
456 .org vmx_ia64_ivt+0x2c00
457 /////////////////////////////////////////////////////////////////////////////////////////
458 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
459 ENTRY(vmx_break_fault)
460 VMX_DBG_FAULT(11)
461 mov r31=pr
462 mov r19=11
463 mov r17=cr.iim
464 mov r29=cr.ipsr
465 ;;
466 tbit.z p6,p0=r29,IA64_PSR_VM_BIT
467 (p6)br.sptk.many vmx_dispatch_break_fault /* make sure before access [r21] */
468 adds r22=IA64_VCPU_BREAKIMM_OFFSET, r21
469 ;;
470 ld4 r22=[r22]
471 extr.u r24=r29,IA64_PSR_CPL0_BIT,2
472 cmp.ltu p6,p0=NR_hypercalls,r2
473 ;;
474 cmp.ne.or p6,p0=r22,r17
475 cmp.ne.or p6,p0=r0,r24
476 (p6) br.sptk.many vmx_dispatch_break_fault
477 ;;
478 /*
479 * The streamlined system call entry/exit paths only save/restore the initial part
480 * of pt_regs. This implies that the callers of system-calls must adhere to the
481 * normal procedure calling conventions.
482 *
483 * Registers to be saved & restored:
484 * CR registers: cr.ipsr, cr.iip, cr.ifs
485 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
486 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
487 * Registers to be restored only:
488 * r8-r11: output value from the system call.
489 *
490 * During system call exit, scratch registers (including r15) are modified/cleared
491 * to prevent leaking bits from kernel to user level.
492 */
494 // mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
495 mov r14=r21
496 bsw.1 // B (6 cyc) regs are saved, switch to bank 1
497 ;;
498 mov r29=cr.ipsr // M2 (12 cyc)
499 mov r31=pr // I0 (2 cyc)
500 mov r16=r14
501 mov r15=r2
503 mov r17=cr.iim // M2 (2 cyc)
504 mov.m r27=ar.rsc // M2 (12 cyc)
505 // mov r18=__IA64_BREAK_SYSCALL // A
507 mov.m ar.rsc=0 // M2
508 mov.m r21=ar.fpsr // M2 (12 cyc)
509 mov r19=b6 // I0 (2 cyc)
510 ;;
511 mov.m r23=ar.bspstore // M2 (12 cyc)
512 mov.m r24=ar.rnat // M2 (5 cyc)
513 mov.i r26=ar.pfs // I0 (2 cyc)
515 invala // M0|1
516 nop.m 0 // M
517 mov r20=r1 // A save r1
519 nop.m 0
520 // movl r30=sys_call_table // X
521 movl r30=ia64_hypercall_table // X
523 mov r28=cr.iip // M2 (2 cyc)
524 // cmp.eq p0,p7=r18,r17 // I0 is this a system call?
525 //(p7) br.cond.spnt non_syscall // B no ->
526 //
527 // From this point on, we are definitely on the syscall-path
528 // and we can use (non-banked) scratch registers.
529 //
530 ///////////////////////////////////////////////////////////////////////
531 mov r1=r16 // A move task-pointer to "addl"-addressable reg
532 mov r2=r16 // A setup r2 for ia64_syscall_setup
533 // add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
535 // adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
536 // adds r15=-1024,r15 // A subtract 1024 from syscall number
537 // mov r3=NR_syscalls - 1
538 mov r3=NR_hypercalls - 1
539 ;;
540 // ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
541 // ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
542 mov r9=r0 // force flags = 0
543 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
545 shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
546 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
547 cmp.leu p6,p7=r15,r3 // A syscall number in range?
548 ;;
550 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
551 (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
552 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
554 mov.m ar.bspstore=r22 // M2 switch to kernel RBS
555 cmp.eq p8,p9=2,r8 // A isr.ei==2?
556 ;;
558 (p8) mov r8=0 // A clear ei to 0
559 //(p7) movl r30=sys_ni_syscall // X
560 (p7) movl r30=do_ni_hypercall // X
562 (p8) adds r28=16,r28 // A switch cr.iip to next bundle
563 (p9) adds r8=1,r8 // A increment ei to next slot
564 nop.i 0
565 ;;
567 mov.m r25=ar.unat // M2 (5 cyc)
568 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
569 // adds r15=1024,r15 // A restore original syscall number
570 //
571 // If any of the above loads miss in L1D, we'll stall here until
572 // the data arrives.
573 //
574 ///////////////////////////////////////////////////////////////////////
575 // st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
576 mov b6=r30 // I0 setup syscall handler branch reg early
577 // cmp.ne pKStk,pUStk=r0,r0 // A were we on kernel stacks already?
579 // and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
580 mov r18=ar.bsp // M2 (12 cyc)
581 ;;
582 //(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
583 addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
584 // cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
585 // br.call.sptk.many b7=ia64_syscall_setup // B
586 br.call.sptk.many b7=ia64_hypercall_setup // B
587 1:
588 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
589 // nop 0
590 // bsw.1 // B (6 cyc) regs are saved, switch to bank 1
591 ;;
592 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
593 // movl r3=ia64_ret_from_syscall // X
594 movl r3=ia64_leave_hypercall // X
595 ;;
597 srlz.i // M0 ensure interruption collection is on
598 mov rp=r3 // I0 set the real return addr
599 //(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
600 (p15) ssm psr.i // M2 restore psr.i
601 //(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
602 br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
603 // br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
604 ;;
605 VMX_FAULT(11)
606 END(vmx_break_fault)
608 .org vmx_ia64_ivt+0x3000
609 /////////////////////////////////////////////////////////////////////////////////////////
610 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
611 ENTRY(vmx_interrupt)
612 VMX_DBG_FAULT(12)
613 mov r31=pr // prepare to save predicates
614 mov r19=12
615 br.sptk vmx_dispatch_interrupt
616 END(vmx_interrupt)
618 .org vmx_ia64_ivt+0x3400
619 /////////////////////////////////////////////////////////////////////////////////////////
620 // 0x3400 Entry 13 (size 64 bundles) Reserved
621 ENTRY(vmx_virtual_exirq)
622 VMX_DBG_FAULT(13)
623 mov r31=pr
624 mov r19=13
625 br.sptk vmx_dispatch_vexirq
626 END(vmx_virtual_exirq)
628 .org vmx_ia64_ivt+0x3800
629 /////////////////////////////////////////////////////////////////////////////////////////
630 // 0x3800 Entry 14 (size 64 bundles) Reserved
631 VMX_DBG_FAULT(14)
632 VMX_FAULT(14)
633 // this code segment is from 2.6.16.13
635 /*
636 * There is no particular reason for this code to be here, other than that
637 * there happens to be space here that would go unused otherwise. If this
638 * fault ever gets "unreserved", simply moved the following code to a more
639 * suitable spot...
640 *
641 * ia64_syscall_setup() is a separate subroutine so that it can
642 * allocate stacked registers so it can safely demine any
643 * potential NaT values from the input registers.
644 *
645 * On entry:
646 * - executing on bank 0 or bank 1 register set (doesn't matter)
647 * - r1: stack pointer
648 * - r2: current task pointer
649 * - r3: preserved
650 * - r11: original contents (saved ar.pfs to be saved)
651 * - r12: original contents (sp to be saved)
652 * - r13: original contents (tp to be saved)
653 * - r15: original contents (syscall # to be saved)
654 * - r18: saved bsp (after switching to kernel stack)
655 * - r19: saved b6
656 * - r20: saved r1 (gp)
657 * - r21: saved ar.fpsr
658 * - r22: kernel's register backing store base (krbs_base)
659 * - r23: saved ar.bspstore
660 * - r24: saved ar.rnat
661 * - r25: saved ar.unat
662 * - r26: saved ar.pfs
663 * - r27: saved ar.rsc
664 * - r28: saved cr.iip
665 * - r29: saved cr.ipsr
666 * - r31: saved pr
667 * - b0: original contents (to be saved)
668 * On exit:
669 * - p10: TRUE if syscall is invoked with more than 8 out
670 * registers or r15's Nat is true
671 * - r1: kernel's gp
672 * - r3: preserved (same as on entry)
673 * - r8: -EINVAL if p10 is true
674 * - r12: points to kernel stack
675 * - r13: points to current task
676 * - r14: preserved (same as on entry)
677 * - p13: preserved
678 * - p15: TRUE if interrupts need to be re-enabled
679 * - ar.fpsr: set to kernel settings
680 * - b6: preserved (same as on entry)
681 */
682 GLOBAL_ENTRY(ia64_hypercall_setup)
683 #if PT(B6) != 0
684 # error This code assumes that b6 is the first field in pt_regs.
685 #endif
686 st8 [r1]=r19 // save b6
687 add r16=PT(CR_IPSR),r1 // initialize first base pointer
688 add r17=PT(R11),r1 // initialize second base pointer
689 ;;
690 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
691 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
692 tnat.nz p8,p0=in0
694 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
695 tnat.nz p9,p0=in1
696 //(pKStk) mov r18=r0 // make sure r18 isn't NaT
697 ;;
699 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
700 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
701 mov r28=b0 // save b0 (2 cyc)
702 ;;
704 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
705 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
706 (p8) mov in0=-1
707 ;;
709 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
710 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
711 and r8=0x7f,r19 // A // get sof of ar.pfs
713 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
714 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
715 (p9) mov in1=-1
716 ;;
718 //(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
719 sub r18=r18,r22 // r18=RSE.ndirty*8
720 tnat.nz p10,p0=in2
721 add r11=8,r11
722 ;;
723 //(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
724 //(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
725 tnat.nz p11,p0=in3
726 ;;
727 (p10) mov in2=-1
728 tnat.nz p12,p0=in4 // [I0]
729 (p11) mov in3=-1
730 ;;
731 //(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
732 st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
733 //(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
734 st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
735 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
736 ;;
737 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
738 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
739 tnat.nz p13,p0=in5 // [I0]
740 ;;
741 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
742 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
743 (p12) mov in4=-1
744 ;;
746 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
747 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
748 (p13) mov in5=-1
749 ;;
750 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
751 tnat.nz p13,p0=in6
752 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
753 ;;
754 mov r8=1
755 (p9) tnat.nz p10,p0=r15
756 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
758 st8.spill [r17]=r15 // save r15
759 tnat.nz p8,p0=in7
760 nop.i 0
762 mov r13=r2 // establish `current'
763 movl r1=__gp // establish kernel global pointer
764 ;;
765 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
766 (p13) mov in6=-1
767 (p8) mov in7=-1
769 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
770 movl r17=FPSR_DEFAULT
771 ;;
772 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
773 (p10) mov r8=-EINVAL
774 br.ret.sptk.many b7
775 END(ia64_hypercall_setup)
778 .org vmx_ia64_ivt+0x3c00
779 /////////////////////////////////////////////////////////////////////////////////////////
780 // 0x3c00 Entry 15 (size 64 bundles) Reserved
781 VMX_DBG_FAULT(15)
782 VMX_FAULT(15)
785 .org vmx_ia64_ivt+0x4000
786 /////////////////////////////////////////////////////////////////////////////////////////
787 // 0x4000 Entry 16 (size 64 bundles) Reserved
788 VMX_DBG_FAULT(16)
789 VMX_FAULT(16)
791 .org vmx_ia64_ivt+0x4400
792 /////////////////////////////////////////////////////////////////////////////////////////
793 // 0x4400 Entry 17 (size 64 bundles) Reserved
794 VMX_DBG_FAULT(17)
795 VMX_FAULT(17)
797 .org vmx_ia64_ivt+0x4800
798 /////////////////////////////////////////////////////////////////////////////////////////
799 // 0x4800 Entry 18 (size 64 bundles) Reserved
800 VMX_DBG_FAULT(18)
801 VMX_FAULT(18)
803 .org vmx_ia64_ivt+0x4c00
804 /////////////////////////////////////////////////////////////////////////////////////////
805 // 0x4c00 Entry 19 (size 64 bundles) Reserved
806 VMX_DBG_FAULT(19)
807 VMX_FAULT(19)
809 .org vmx_ia64_ivt+0x5000
810 /////////////////////////////////////////////////////////////////////////////////////////
811 // 0x5000 Entry 20 (size 16 bundles) Page Not Present
812 ENTRY(vmx_page_not_present)
813 VMX_DBG_FAULT(20)
814 VMX_REFLECT(20)
815 END(vmx_page_not_present)
817 .org vmx_ia64_ivt+0x5100
818 /////////////////////////////////////////////////////////////////////////////////////////
819 // 0x5100 Entry 21 (size 16 bundles) Key Permission vector
820 ENTRY(vmx_key_permission)
821 VMX_DBG_FAULT(21)
822 VMX_REFLECT(21)
823 END(vmx_key_permission)
825 .org vmx_ia64_ivt+0x5200
826 /////////////////////////////////////////////////////////////////////////////////////////
827 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
828 ENTRY(vmx_iaccess_rights)
829 VMX_DBG_FAULT(22)
830 VMX_REFLECT(22)
831 END(vmx_iaccess_rights)
833 .org vmx_ia64_ivt+0x5300
834 /////////////////////////////////////////////////////////////////////////////////////////
835 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
836 ENTRY(vmx_daccess_rights)
837 VMX_DBG_FAULT(23)
838 VMX_REFLECT(23)
839 END(vmx_daccess_rights)
841 .org vmx_ia64_ivt+0x5400
842 /////////////////////////////////////////////////////////////////////////////////////////
843 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
844 ENTRY(vmx_general_exception)
845 VMX_DBG_FAULT(24)
846 VMX_REFLECT(24)
847 // VMX_FAULT(24)
848 END(vmx_general_exception)
850 .org vmx_ia64_ivt+0x5500
851 /////////////////////////////////////////////////////////////////////////////////////////
852 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
853 ENTRY(vmx_disabled_fp_reg)
854 VMX_DBG_FAULT(25)
855 VMX_REFLECT(25)
856 END(vmx_disabled_fp_reg)
858 .org vmx_ia64_ivt+0x5600
859 /////////////////////////////////////////////////////////////////////////////////////////
860 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
861 ENTRY(vmx_nat_consumption)
862 VMX_DBG_FAULT(26)
863 VMX_REFLECT(26)
864 END(vmx_nat_consumption)
866 .org vmx_ia64_ivt+0x5700
867 /////////////////////////////////////////////////////////////////////////////////////////
868 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
869 ENTRY(vmx_speculation_vector)
870 VMX_DBG_FAULT(27)
871 VMX_REFLECT(27)
872 END(vmx_speculation_vector)
874 .org vmx_ia64_ivt+0x5800
875 /////////////////////////////////////////////////////////////////////////////////////////
876 // 0x5800 Entry 28 (size 16 bundles) Reserved
877 VMX_DBG_FAULT(28)
878 VMX_FAULT(28)
880 .org vmx_ia64_ivt+0x5900
881 /////////////////////////////////////////////////////////////////////////////////////////
882 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
883 ENTRY(vmx_debug_vector)
884 VMX_DBG_FAULT(29)
885 VMX_REFLECT(29)
886 END(vmx_debug_vector)
888 .org vmx_ia64_ivt+0x5a00
889 /////////////////////////////////////////////////////////////////////////////////////////
890 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
891 ENTRY(vmx_unaligned_access)
892 VMX_DBG_FAULT(30)
893 VMX_REFLECT(30)
894 END(vmx_unaligned_access)
896 .org vmx_ia64_ivt+0x5b00
897 /////////////////////////////////////////////////////////////////////////////////////////
898 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
899 ENTRY(vmx_unsupported_data_reference)
900 VMX_DBG_FAULT(31)
901 VMX_REFLECT(31)
902 END(vmx_unsupported_data_reference)
904 .org vmx_ia64_ivt+0x5c00
905 /////////////////////////////////////////////////////////////////////////////////////////
906 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
907 ENTRY(vmx_floating_point_fault)
908 VMX_DBG_FAULT(32)
909 VMX_REFLECT(32)
910 END(vmx_floating_point_fault)
912 .org vmx_ia64_ivt+0x5d00
913 /////////////////////////////////////////////////////////////////////////////////////////
914 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
915 ENTRY(vmx_floating_point_trap)
916 VMX_DBG_FAULT(33)
917 VMX_REFLECT(33)
918 END(vmx_floating_point_trap)
920 .org vmx_ia64_ivt+0x5e00
921 /////////////////////////////////////////////////////////////////////////////////////////
922 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
923 ENTRY(vmx_lower_privilege_trap)
924 VMX_DBG_FAULT(34)
925 VMX_REFLECT(34)
926 END(vmx_lower_privilege_trap)
928 .org vmx_ia64_ivt+0x5f00
929 /////////////////////////////////////////////////////////////////////////////////////////
930 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
931 ENTRY(vmx_taken_branch_trap)
932 VMX_DBG_FAULT(35)
933 VMX_REFLECT(35)
934 END(vmx_taken_branch_trap)
936 .org vmx_ia64_ivt+0x6000
937 /////////////////////////////////////////////////////////////////////////////////////////
938 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
939 ENTRY(vmx_single_step_trap)
940 VMX_DBG_FAULT(36)
941 VMX_REFLECT(36)
942 END(vmx_single_step_trap)
944 .global vmx_virtualization_fault_back
945 .org vmx_ia64_ivt+0x6100
946 /////////////////////////////////////////////////////////////////////////////////////////
947 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
948 ENTRY(vmx_virtualization_fault)
949 // VMX_DBG_FAULT(37)
950 mov r31=pr
951 ;;
952 cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
953 cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
954 cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
955 cmp.eq p9,p0=EVENT_RSM,r24
956 cmp.eq p10,p0=EVENT_SSM,r24
957 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
958 cmp.eq p12,p0=EVENT_THASH,r24
959 (p6) br.dptk.many vmx_asm_mov_from_ar
960 (p7) br.dptk.many vmx_asm_mov_from_rr
961 (p8) br.dptk.many vmx_asm_mov_to_rr
962 (p9) br.dptk.many vmx_asm_rsm
963 (p10) br.dptk.many vmx_asm_ssm
964 (p11) br.dptk.many vmx_asm_mov_to_psr
965 (p12) br.dptk.many vmx_asm_thash
966 ;;
967 vmx_virtualization_fault_back:
968 mov r19=37
969 adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
970 adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
971 ;;
972 st8 [r16] = r24
973 st8 [r17] = r25
974 ;;
975 cmp.ne p6,p0=EVENT_RFI, r24
976 (p6) br.sptk vmx_dispatch_virtualization_fault
977 ;;
978 adds r18=IA64_VPD_BASE_OFFSET,r21
979 ;;
980 ld8 r18=[r18]
981 ;;
982 adds r18=IA64_VPD_VIFS_OFFSET,r18
983 ;;
984 ld8 r18=[r18]
985 ;;
986 tbit.z p6,p0=r18,63
987 (p6) br.sptk vmx_dispatch_virtualization_fault
988 ;;
989 //if vifs.v=1 desert current register frame
990 alloc r18=ar.pfs,0,0,0,0
991 br.sptk vmx_dispatch_virtualization_fault
992 END(vmx_virtualization_fault)
994 .org vmx_ia64_ivt+0x6200
995 /////////////////////////////////////////////////////////////////////////////////////////
996 // 0x6200 Entry 38 (size 16 bundles) Reserved
997 VMX_DBG_FAULT(38)
998 VMX_FAULT(38)
1000 .org vmx_ia64_ivt+0x6300
1001 /////////////////////////////////////////////////////////////////////////////////////////
1002 // 0x6300 Entry 39 (size 16 bundles) Reserved
1003 VMX_DBG_FAULT(39)
1004 VMX_FAULT(39)
1006 .org vmx_ia64_ivt+0x6400
1007 /////////////////////////////////////////////////////////////////////////////////////////
1008 // 0x6400 Entry 40 (size 16 bundles) Reserved
1009 VMX_DBG_FAULT(40)
1010 VMX_FAULT(40)
1012 .org vmx_ia64_ivt+0x6500
1013 /////////////////////////////////////////////////////////////////////////////////////////
1014 // 0x6500 Entry 41 (size 16 bundles) Reserved
1015 VMX_DBG_FAULT(41)
1016 VMX_FAULT(41)
1018 .org vmx_ia64_ivt+0x6600
1019 /////////////////////////////////////////////////////////////////////////////////////////
1020 // 0x6600 Entry 42 (size 16 bundles) Reserved
1021 VMX_DBG_FAULT(42)
1022 VMX_FAULT(42)
1024 .org vmx_ia64_ivt+0x6700
1025 /////////////////////////////////////////////////////////////////////////////////////////
1026 // 0x6700 Entry 43 (size 16 bundles) Reserved
1027 VMX_DBG_FAULT(43)
1028 VMX_FAULT(43)
1030 .org vmx_ia64_ivt+0x6800
1031 /////////////////////////////////////////////////////////////////////////////////////////
1032 // 0x6800 Entry 44 (size 16 bundles) Reserved
1033 VMX_DBG_FAULT(44)
1034 VMX_FAULT(44)
1036 .org vmx_ia64_ivt+0x6900
1037 /////////////////////////////////////////////////////////////////////////////////////////
1038 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1039 ENTRY(vmx_ia32_exception)
1040 VMX_DBG_FAULT(45)
1041 VMX_FAULT(45)
1042 END(vmx_ia32_exception)
1044 .org vmx_ia64_ivt+0x6a00
1045 /////////////////////////////////////////////////////////////////////////////////////////
1046 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1047 ENTRY(vmx_ia32_intercept)
1048 VMX_DBG_FAULT(46)
1049 VMX_FAULT(46)
1050 END(vmx_ia32_intercept)
1052 .org vmx_ia64_ivt+0x6b00
1053 /////////////////////////////////////////////////////////////////////////////////////////
1054 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1055 ENTRY(vmx_ia32_interrupt)
1056 VMX_DBG_FAULT(47)
1057 VMX_FAULT(47)
1058 END(vmx_ia32_interrupt)
1060 .org vmx_ia64_ivt+0x6c00
1061 /////////////////////////////////////////////////////////////////////////////////////////
1062 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1063 VMX_DBG_FAULT(48)
1064 VMX_FAULT(48)
1066 .org vmx_ia64_ivt+0x6d00
1067 /////////////////////////////////////////////////////////////////////////////////////////
1068 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1069 VMX_DBG_FAULT(49)
1070 VMX_FAULT(49)
1072 .org vmx_ia64_ivt+0x6e00
1073 /////////////////////////////////////////////////////////////////////////////////////////
1074 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1075 VMX_DBG_FAULT(50)
1076 VMX_FAULT(50)
1078 .org vmx_ia64_ivt+0x6f00
1079 /////////////////////////////////////////////////////////////////////////////////////////
1080 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1081 VMX_DBG_FAULT(51)
1082 VMX_FAULT(51)
1084 .org vmx_ia64_ivt+0x7000
1085 /////////////////////////////////////////////////////////////////////////////////////////
1086 // 0x7000 Entry 52 (size 16 bundles) Reserved
1087 VMX_DBG_FAULT(52)
1088 VMX_FAULT(52)
1090 .org vmx_ia64_ivt+0x7100
1091 /////////////////////////////////////////////////////////////////////////////////////////
1092 // 0x7100 Entry 53 (size 16 bundles) Reserved
1093 VMX_DBG_FAULT(53)
1094 VMX_FAULT(53)
1096 .org vmx_ia64_ivt+0x7200
1097 /////////////////////////////////////////////////////////////////////////////////////////
1098 // 0x7200 Entry 54 (size 16 bundles) Reserved
1099 VMX_DBG_FAULT(54)
1100 VMX_FAULT(54)
1102 .org vmx_ia64_ivt+0x7300
1103 /////////////////////////////////////////////////////////////////////////////////////////
1104 // 0x7300 Entry 55 (size 16 bundles) Reserved
1105 VMX_DBG_FAULT(55)
1106 VMX_FAULT(55)
1108 .org vmx_ia64_ivt+0x7400
1109 /////////////////////////////////////////////////////////////////////////////////////////
1110 // 0x7400 Entry 56 (size 16 bundles) Reserved
1111 VMX_DBG_FAULT(56)
1112 VMX_FAULT(56)
1114 .org vmx_ia64_ivt+0x7500
1115 /////////////////////////////////////////////////////////////////////////////////////////
1116 // 0x7500 Entry 57 (size 16 bundles) Reserved
1117 VMX_DBG_FAULT(57)
1118 VMX_FAULT(57)
1120 .org vmx_ia64_ivt+0x7600
1121 /////////////////////////////////////////////////////////////////////////////////////////
1122 // 0x7600 Entry 58 (size 16 bundles) Reserved
1123 VMX_DBG_FAULT(58)
1124 VMX_FAULT(58)
1126 .org vmx_ia64_ivt+0x7700
1127 /////////////////////////////////////////////////////////////////////////////////////////
1128 // 0x7700 Entry 59 (size 16 bundles) Reserved
1129 VMX_DBG_FAULT(59)
1130 VMX_FAULT(59)
1132 .org vmx_ia64_ivt+0x7800
1133 /////////////////////////////////////////////////////////////////////////////////////////
1134 // 0x7800 Entry 60 (size 16 bundles) Reserved
1135 VMX_DBG_FAULT(60)
1136 VMX_FAULT(60)
1138 .org vmx_ia64_ivt+0x7900
1139 /////////////////////////////////////////////////////////////////////////////////////////
1140 // 0x7900 Entry 61 (size 16 bundles) Reserved
1141 VMX_DBG_FAULT(61)
1142 VMX_FAULT(61)
1144 .org vmx_ia64_ivt+0x7a00
1145 /////////////////////////////////////////////////////////////////////////////////////////
1146 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1147 VMX_DBG_FAULT(62)
1148 VMX_FAULT(62)
1150 .org vmx_ia64_ivt+0x7b00
1151 /////////////////////////////////////////////////////////////////////////////////////////
1152 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1153 VMX_DBG_FAULT(63)
1154 VMX_FAULT(63)
1156 .org vmx_ia64_ivt+0x7c00
1157 /////////////////////////////////////////////////////////////////////////////////////////
1158 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1159 VMX_DBG_FAULT(64)
1160 VMX_FAULT(64)
1162 .org vmx_ia64_ivt+0x7d00
1163 /////////////////////////////////////////////////////////////////////////////////////////
1164 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1165 VMX_DBG_FAULT(65)
1166 VMX_FAULT(65)
1168 .org vmx_ia64_ivt+0x7e00
1169 /////////////////////////////////////////////////////////////////////////////////////////
1170 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1171 VMX_DBG_FAULT(66)
1172 VMX_FAULT(66)
1174 .org vmx_ia64_ivt+0x7f00
1175 /////////////////////////////////////////////////////////////////////////////////////////
1176 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1177 VMX_DBG_FAULT(67)
1178 VMX_FAULT(67)
1180 .org vmx_ia64_ivt+0x8000
1181 // There is no particular reason for this code to be here, other than that
1182 // there happens to be space here that would go unused otherwise. If this
1183 // fault ever gets "unreserved", simply moved the following code to a more
1184 // suitable spot...
1187 ENTRY(vmx_dispatch_reflection)
1188 /*
1189 * Input:
1190 * psr.ic: off
1191 * r19: intr type (offset into ivt, see ia64_int.h)
1192 * r31: contains saved predicates (pr)
1193 */
1194 VMX_SAVE_MIN_WITH_COVER_R19
1195 alloc r14=ar.pfs,0,0,5,0
1196 mov out0=cr.ifa
1197 mov out1=cr.isr
1198 mov out2=cr.iim
1199 mov out3=r15
1200 adds r3=8,r2 // set up second base pointer
1201 ;;
1202 ssm psr.ic
1203 ;;
1204 srlz.i // guarantee that interruption collection is on
1205 ;;
1206 (p15) ssm psr.i // restore psr.i
1207 movl r14=ia64_leave_hypervisor
1208 ;;
1209 VMX_SAVE_REST
1210 mov rp=r14
1211 ;;
1212 P6_BR_CALL_PANIC(.Lvmx_dispatch_reflection_string)
1213 adds out4=16,r12
1214 br.call.sptk.many b6=vmx_reflect_interruption
1215 END(vmx_dispatch_reflection)
1217 ENTRY(vmx_dispatch_virtualization_fault)
1218 VMX_SAVE_MIN_WITH_COVER
1219 ;;
1220 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1221 mov out0=r13 //vcpu
1222 adds r3=8,r2 // set up second base pointer
1223 ;;
1224 ssm psr.ic
1225 ;;
1226 srlz.i // guarantee that interruption collection is on
1227 ;;
1228 (p15) ssm psr.i // restore psr.i
1229 movl r14=ia64_leave_hypervisor_prepare
1230 ;;
1231 VMX_SAVE_REST
1232 VMX_SAVE_EXTRA
1233 mov rp=r14
1234 ;;
1235 P6_BR_CALL_PANIC(.Lvmx_dispatch_virtualization_fault_string)
1236 adds out1=16,sp //regs
1237 br.call.sptk.many b6=vmx_emulate
1238 END(vmx_dispatch_virtualization_fault)
1241 GLOBAL_ENTRY(vmx_dispatch_vexirq)
1242 VMX_SAVE_MIN_WITH_COVER
1243 alloc r14=ar.pfs,0,0,1,0
1244 mov out0=r13
1246 ssm psr.ic
1247 ;;
1248 srlz.i // guarantee that interruption collection is on
1249 ;;
1250 (p15) ssm psr.i // restore psr.i
1251 adds r3=8,r2 // set up second base pointer
1252 ;;
1253 VMX_SAVE_REST
1254 movl r14=ia64_leave_hypervisor
1255 ;;
1256 mov rp=r14
1257 P6_BR_CALL_PANIC(.Lvmx_dispatch_vexirq_string)
1258 br.call.sptk.many b6=vmx_vexirq
1259 END(vmx_dispatch_vexirq)
1261 ENTRY(vmx_dispatch_tlb_miss)
1262 VMX_SAVE_MIN_WITH_COVER_R19
1263 alloc r14=ar.pfs,0,0,3,0
1264 mov out0=cr.ifa
1265 mov out1=r15
1266 adds r3=8,r2 // set up second base pointer
1267 ;;
1268 ssm psr.ic
1269 ;;
1270 srlz.i // guarantee that interruption collection is on
1271 ;;
1272 (p15) ssm psr.i // restore psr.i
1273 movl r14=ia64_leave_hypervisor
1274 ;;
1275 VMX_SAVE_REST
1276 mov rp=r14
1277 ;;
1278 P6_BR_CALL_PANIC(.Lvmx_dispatch_tlb_miss_string)
1279 adds out2=16,r12
1280 br.call.sptk.many b6=vmx_hpw_miss
1281 END(vmx_dispatch_tlb_miss)
1283 ENTRY(vmx_dispatch_break_fault)
1284 VMX_SAVE_MIN_WITH_COVER_NO_PANIC
1285 ;;
1286 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1287 mov out0=cr.ifa
1288 mov out2=cr.isr // FIXME: pity to make this slow access twice
1289 mov out3=cr.iim // FIXME: pity to make this slow access twice
1290 adds r3=8,r2 // set up second base pointer
1291 ;;
1292 ssm psr.ic
1293 ;;
1294 srlz.i // guarantee that interruption collection is on
1295 ;;
1296 (p15)ssm psr.i // restore psr.i
1297 (pUStk)movl r14=ia64_leave_hypervisor
1298 ;;
1299 (pKStk)movl r14=ia64_leave_nested
1300 VMX_SAVE_REST
1301 mov rp=r14
1302 ;;
1303 adds out1=16,sp
1304 br.call.sptk.many b6=vmx_ia64_handle_break
1305 ;;
1306 END(vmx_dispatch_break_fault)
1309 ENTRY(vmx_dispatch_interrupt)
1310 VMX_SAVE_MIN_WITH_COVER_NO_PANIC // uses r31; defines r2 and r3
1311 ;;
1312 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1313 ssm psr.ic
1314 mov out0=cr.ivr // pass cr.ivr as first arg
1315 adds r3=8,r2 // set up second base pointer for SAVE_REST
1316 ;;
1317 (pUStk) movl r14=ia64_leave_hypervisor
1318 srlz.i
1319 ;;
1320 (pKStk) movl r14=ia64_leave_nested
1321 VMX_SAVE_REST
1322 add out1=16,sp // pass pointer to pt_regs as second arg
1323 mov rp=r14
1324 br.call.sptk.many b6=ia64_handle_irq
1325 END(vmx_dispatch_interrupt)
1327 .Lvmx_dispatch_reflection_string:
1328 .asciz "vmx_dispatch_reflection\n"
1329 .Lvmx_dispatch_virtualization_fault_string:
1330 .asciz "vmx_dispatch_virtualization_fault\n"
1331 .Lvmx_dispatch_vexirq_string:
1332 .asciz "vmx_dispatch_vexirq\n"
1333 .Lvmx_dispatch_tlb_miss_string:
1334 .asciz "vmx_dispatch_tlb_miss\n"