ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_ivt.S @ 15325:855fe0bf6590

[IA64] Change virtual address of XEN UC indentity area.

This slightly simplifies the code and makes flexible map possible.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Tue Jun 12 15:20:06 2007 -0600 (2007-06-12)
parents 9daa40cae3d6
children 962f22223817
line source
1 /*
2 * arch/ia64/kernel/vmx_ivt.S
3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 * Asit Mallick <asit.k.mallick@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Kenneth Chen <kenneth.w.chen@intel.com>
11 * Fenghua Yu <fenghua.yu@intel.com>
12 *
13 *
14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
15 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
16 *
17 * 05/3/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
18 * Supporting Intel virtualization architecture
19 *
20 */
22 /*
23 * This file defines the interruption vector table used by the CPU.
24 * It does not include one entry per possible cause of interruption.
25 *
26 * The first 20 entries of the table contain 64 bundles each while the
27 * remaining 48 entries contain only 16 bundles each.
28 *
29 * The 64 bundles are used to allow inlining the whole handler for critical
30 * interruptions like TLB misses.
31 *
32 * For each entry, the comment is as follows:
33 *
34 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
35 * entry offset ----/ / / / /
36 * entry number ---------/ / / /
37 * size of the entry -------------/ / /
38 * vector name -------------------------------------/ /
39 * interruptions triggering this vector ----------------------/
40 *
41 * The table is 32KB in size and must be aligned on 32KB boundary.
42 * (The CPU ignores the 15 lower bits of the address)
43 *
44 * Table is based upon EAS2.6 (Oct 1999)
45 */
47 #include <linux/config.h>
49 #include <asm/asmmacro.h>
50 #include <asm/break.h>
51 #include <asm/ia32.h>
52 #include <asm/kregs.h>
53 #include <asm/offsets.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
57 #include <asm/system.h>
58 #include <asm/thread_info.h>
59 #include <asm/unistd.h>
60 #include <asm/vhpt.h>
61 #include <asm/virt_event.h>
62 #include <xen/errno.h>
64 #if 1
65 # define PSR_DEFAULT_BITS psr.ac
66 #else
67 # define PSR_DEFAULT_BITS 0
68 #endif
71 #ifdef VTI_DEBUG
72 /*
73 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
74 * needed for something else before enabling this...
75 */
76 #define VMX_DBG_FAULT(i) \
77 add r16=IVT_CUR_OFS,r21; \
78 add r17=IVT_DBG_OFS,r21;; \
79 ld8 r18=[r16];; \
80 add r17=r18,r17; \
81 mov r19=cr.iip; \
82 mov r20=cr.ipsr; \
83 mov r22=cr.ifa; \
84 mov r23=i;; \
85 st8 [r17]=r19,8; \
86 add r18=32,r18;; \
87 st8 [r17]=r20,8; \
88 mov r19=0xfe0;; \
89 st8 [r17]=r22,8; \
90 and r18=r19,r18;; \
91 st8 [r17]=r23; \
92 st8 [r16]=r18;; \
93 //# define VMX_DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
94 #else
95 # define VMX_DBG_FAULT(i)
96 #endif
98 #include "vmx_minstate.h"
100 #define MINSTATE_VIRT /* needed by minstate.h */
101 #include "minstate.h"
104 #define VMX_FAULT(n) \
105 vmx_fault_##n:; \
106 mov r19=n;; \
107 br.sptk.many dispatch_to_fault_handler; \
108 ;; \
111 #define VMX_REFLECT(n) \
112 mov r31=pr; \
113 mov r19=n; /* prepare to save predicates */ \
114 mov r29=cr.ipsr; \
115 ;; \
116 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
117 (p7)br.sptk.many vmx_dispatch_reflection; \
118 br.sptk.many dispatch_to_fault_handler; \
121 GLOBAL_ENTRY(vmx_panic)
122 br.sptk.many vmx_panic
123 ;;
124 END(vmx_panic)
130 .section .text.ivt,"ax"
132 .align 32768 // align on 32KB boundary
133 .global vmx_ia64_ivt
134 vmx_ia64_ivt:
135 /////////////////////////////////////////////////////////////////////////////////////////
136 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
137 ENTRY(vmx_vhpt_miss)
138 VMX_DBG_FAULT(0)
139 VMX_FAULT(0)
140 END(vmx_vhpt_miss)
142 .org vmx_ia64_ivt+0x400
143 /////////////////////////////////////////////////////////////////////////////////////////
144 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
145 ENTRY(vmx_itlb_miss)
146 VMX_DBG_FAULT(1)
147 mov r31 = pr
148 mov r29=cr.ipsr;
149 ;;
150 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
151 (p6) br.sptk vmx_alt_itlb_miss_1
152 //(p6) br.sptk vmx_fault_1
153 mov r16 = cr.ifa
154 ;;
155 thash r17 = r16
156 ttag r20 = r16
157 ;;
158 mov r18 = r17
159 adds r28 = VLE_TITAG_OFFSET,r17
160 adds r19 = VLE_CCHAIN_OFFSET, r17
161 ;;
162 ld8 r17 = [r19]
163 ;;
164 vmx_itlb_loop:
165 cmp.eq p6,p0 = r0, r17
166 (p6)br vmx_itlb_out
167 ;;
168 adds r16 = VLE_TITAG_OFFSET, r17
169 adds r19 = VLE_CCHAIN_OFFSET, r17
170 ;;
171 ld8 r24 = [r16]
172 ld8 r23 = [r19]
173 ;;
174 lfetch [r23]
175 cmp.eq p6,p7 = r20, r24
176 ;;
177 (p7)mov r17 = r23;
178 (p7)br.sptk vmx_itlb_loop
179 ;;
180 ld8 r25 = [r17]
181 ld8 r27 = [r18]
182 ld8 r29 = [r28]
183 dep r22 = -1,r24,63,1 //set ti=1
184 ;;
185 st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
186 st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
187 extr.u r19 = r27, 56, 4
188 mf
189 ;;
190 ld8 r29 = [r16]
191 ld8 r22 = [r28]
192 dep r27 = r0, r27, 56, 4
193 dep r25 = r19, r25, 56, 4
194 ;;
195 st8 [r16] = r22
196 st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET
197 st8 [r18] = r25
198 st8 [r17] = r27
199 ;;
200 st8.rel [r28] = r24
201 itc.i r25
202 dv_serialize_data
203 mov r17=cr.isr
204 mov r23=r31
205 mov r22=b0
206 adds r16=IA64_VPD_BASE_OFFSET,r21
207 ;;
208 ld8 r18=[r16]
209 ;;
210 adds r19=VPD(VPSR),r18
211 movl r20=__vsa_base
212 ;;
213 ld8 r19=[r19]
214 ld8 r20=[r20]
215 ;;
216 br.sptk ia64_vmm_entry
217 ;;
218 vmx_itlb_out:
219 mov r19 = 1
220 br.sptk vmx_dispatch_itlb_miss
221 VMX_FAULT(1);
222 END(vmx_itlb_miss)
224 .org vmx_ia64_ivt+0x0800
225 /////////////////////////////////////////////////////////////////////////////////////////
226 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
227 ENTRY(vmx_dtlb_miss)
228 VMX_DBG_FAULT(2)
229 mov r31 = pr
230 mov r29=cr.ipsr;
231 ;;
232 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
233 (p6)br.sptk vmx_alt_dtlb_miss_1
234 mov r16 = cr.ifa
235 ;;
236 thash r17 = r16
237 ttag r20 = r16
238 ;;
239 mov r18 = r17
240 adds r28 = VLE_TITAG_OFFSET,r17
241 adds r19 = VLE_CCHAIN_OFFSET, r17
242 ;;
243 ld8 r17 = [r19]
244 ;;
245 vmx_dtlb_loop:
246 cmp.eq p6,p0 = r0, r17
247 (p6)br vmx_dtlb_out
248 ;;
249 adds r16 = VLE_TITAG_OFFSET, r17
250 adds r19 = VLE_CCHAIN_OFFSET, r17
251 ;;
252 ld8 r24 = [r16]
253 ld8 r23 = [r19]
254 ;;
255 lfetch [r23]
256 cmp.eq p6,p7 = r20, r24
257 ;;
258 (p7)mov r17 = r23;
259 (p7)br.sptk vmx_dtlb_loop
260 ;;
261 ld8 r25 = [r17]
262 ld8 r27 = [r18]
263 ld8 r29 = [r28]
264 dep r22 = -1,r24,63,1 //set ti=1
265 ;;
266 st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
267 st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
268 extr.u r19 = r27, 56, 4
269 mf
270 ;;
271 ld8 r29 = [r16]
272 ld8 r22 = [r28]
273 dep r27 = r0, r27, 56, 4
274 dep r25 = r19, r25, 56, 4
275 ;;
276 st8 [r16] = r22
277 st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET
278 st8 [r18] = r25
279 st8 [r17] = r27
280 ;;
281 st8.rel [r28] = r24
282 itc.d r25
283 dv_serialize_data
284 mov r17=cr.isr
285 mov r23=r31
286 mov r22=b0
287 adds r16=IA64_VPD_BASE_OFFSET,r21
288 ;;
289 ld8 r18=[r16]
290 ;;
291 adds r19=VPD(VPSR),r18
292 movl r20=__vsa_base
293 ;;
294 ld8 r19=[r19]
295 ld8 r20=[r20]
296 ;;
297 br.sptk ia64_vmm_entry
298 ;;
299 vmx_dtlb_out:
300 mov r19 = 2
301 br.sptk vmx_dispatch_dtlb_miss
302 VMX_FAULT(2);
303 END(vmx_dtlb_miss)
305 .org vmx_ia64_ivt+0x0c00
306 /////////////////////////////////////////////////////////////////////////////////////////
307 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
308 ENTRY(vmx_alt_itlb_miss)
309 VMX_DBG_FAULT(3)
310 mov r31 = pr
311 mov r29=cr.ipsr;
312 ;;
313 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
314 (p7)br.spnt vmx_fault_3
315 vmx_alt_itlb_miss_1:
316 mov r16=cr.ifa // get address that caused the TLB miss
317 ;;
318 movl r17=PAGE_KERNEL
319 mov r24=cr.ipsr
320 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
321 ;;
322 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
323 extr.u r18=r16,XEN_VIRT_UC_BIT, 15 // extract UC bit
324 ;;
325 or r19=r17,r19 // insert PTE control bits into r19
326 mov r20=IA64_GRANULE_SHIFT<<2
327 ;;
328 dep r19=r18,r19,4,1 // set bit 4 (uncached) if the access was to UC region
329 mov cr.itir=r20
330 ;;
331 itc.i r19 // insert the TLB entry
332 mov pr=r31,-1
333 rfi
334 VMX_FAULT(3);
335 END(vmx_alt_itlb_miss)
338 .org vmx_ia64_ivt+0x1000
339 /////////////////////////////////////////////////////////////////////////////////////////
340 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
341 ENTRY(vmx_alt_dtlb_miss)
342 VMX_DBG_FAULT(4)
343 mov r31=pr
344 mov r29=cr.ipsr;
345 ;;
346 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
347 (p7)br.spnt vmx_fault_4
348 vmx_alt_dtlb_miss_1:
349 mov r16=cr.ifa // get address that caused the TLB miss
350 ;;
351 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
352 // Test for the address of virtual frame_table
353 shr r22=r16,56;;
354 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
355 (p8)br.cond.sptk frametable_miss ;;
356 #endif
357 movl r17=PAGE_KERNEL
358 mov r20=cr.isr
359 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
360 mov r24=cr.ipsr
361 ;;
362 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
363 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
364 extr.u r18=r16,XEN_VIRT_UC_BIT, 1 // extract UC bit
365 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
366 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
367 ;;
368 (p9)cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
369 dep r24=-1,r24,IA64_PSR_ED_BIT,1
370 or r19=r19,r17 // insert PTE control bits into r19
371 mov r20=IA64_GRANULE_SHIFT<<2
372 ;;
373 dep r19=r18,r19,4,1 // set bit 4 (uncached) if the access was to UC region
374 (p6)mov cr.ipsr=r24
375 mov cr.itir=r20
376 ;;
377 (p7)itc.d r19 // insert the TLB entry
378 mov pr=r31,-1
379 rfi
380 VMX_FAULT(4);
381 END(vmx_alt_dtlb_miss)
383 .org vmx_ia64_ivt+0x1400
384 /////////////////////////////////////////////////////////////////////////////////////////
385 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
386 ENTRY(vmx_nested_dtlb_miss)
387 VMX_DBG_FAULT(5)
388 VMX_FAULT(5)
389 END(vmx_nested_dtlb_miss)
391 .org vmx_ia64_ivt+0x1800
392 /////////////////////////////////////////////////////////////////////////////////////////
393 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
394 ENTRY(vmx_ikey_miss)
395 VMX_DBG_FAULT(6)
396 VMX_REFLECT(6)
397 END(vmx_ikey_miss)
399 .org vmx_ia64_ivt+0x1c00
400 /////////////////////////////////////////////////////////////////////////////////////////
401 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
402 ENTRY(vmx_dkey_miss)
403 VMX_DBG_FAULT(7)
404 VMX_REFLECT(7)
405 END(vmx_dkey_miss)
407 .org vmx_ia64_ivt+0x2000
408 /////////////////////////////////////////////////////////////////////////////////////////
409 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
410 ENTRY(vmx_dirty_bit)
411 VMX_DBG_FAULT(8)
412 VMX_REFLECT(8)
413 END(vmx_dirty_bit)
415 .org vmx_ia64_ivt+0x2400
416 /////////////////////////////////////////////////////////////////////////////////////////
417 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
418 ENTRY(vmx_iaccess_bit)
419 VMX_DBG_FAULT(9)
420 VMX_REFLECT(9)
421 END(vmx_iaccess_bit)
423 .org vmx_ia64_ivt+0x2800
424 /////////////////////////////////////////////////////////////////////////////////////////
425 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
426 ENTRY(vmx_daccess_bit)
427 VMX_DBG_FAULT(10)
428 VMX_REFLECT(10)
429 END(vmx_daccess_bit)
431 .org vmx_ia64_ivt+0x2c00
432 /////////////////////////////////////////////////////////////////////////////////////////
433 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
434 ENTRY(vmx_break_fault)
435 VMX_DBG_FAULT(11)
436 mov r31=pr
437 mov r19=11
438 mov r17=cr.iim
439 ;;
440 #ifdef VTI_DEBUG
441 // break 0 is already handled in vmx_ia64_handle_break.
442 cmp.eq p6,p7=r17,r0
443 (p6) br.sptk vmx_fault_11
444 ;;
445 #endif
446 mov r29=cr.ipsr
447 adds r22=IA64_VCPU_BREAKIMM_OFFSET, r21
448 ;;
449 ld4 r22=[r22]
450 extr.u r24=r29,IA64_PSR_CPL0_BIT,2
451 cmp.eq p0,p6=r0,r0
452 ;;
453 cmp.ne.or p6,p0=r22,r17
454 cmp.ne.or p6,p0=r0,r24
455 (p6) br.sptk.many vmx_dispatch_break_fault
456 ;;
457 /*
458 * The streamlined system call entry/exit paths only save/restore the initial part
459 * of pt_regs. This implies that the callers of system-calls must adhere to the
460 * normal procedure calling conventions.
461 *
462 * Registers to be saved & restored:
463 * CR registers: cr.ipsr, cr.iip, cr.ifs
464 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
465 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
466 * Registers to be restored only:
467 * r8-r11: output value from the system call.
468 *
469 * During system call exit, scratch registers (including r15) are modified/cleared
470 * to prevent leaking bits from kernel to user level.
471 */
473 // mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
474 mov r14=r21
475 bsw.1 // B (6 cyc) regs are saved, switch to bank 1
476 ;;
477 mov r29=cr.ipsr // M2 (12 cyc)
478 mov r31=pr // I0 (2 cyc)
479 mov r16=r14
480 mov r15=r2
482 mov r17=cr.iim // M2 (2 cyc)
483 mov.m r27=ar.rsc // M2 (12 cyc)
484 // mov r18=__IA64_BREAK_SYSCALL // A
486 mov.m ar.rsc=0 // M2
487 mov.m r21=ar.fpsr // M2 (12 cyc)
488 mov r19=b6 // I0 (2 cyc)
489 ;;
490 mov.m r23=ar.bspstore // M2 (12 cyc)
491 mov.m r24=ar.rnat // M2 (5 cyc)
492 mov.i r26=ar.pfs // I0 (2 cyc)
494 invala // M0|1
495 nop.m 0 // M
496 mov r20=r1 // A save r1
498 nop.m 0
499 // movl r30=sys_call_table // X
500 movl r30=ia64_hypercall_table // X
502 mov r28=cr.iip // M2 (2 cyc)
503 // cmp.eq p0,p7=r18,r17 // I0 is this a system call?
504 //(p7) br.cond.spnt non_syscall // B no ->
505 //
506 // From this point on, we are definitely on the syscall-path
507 // and we can use (non-banked) scratch registers.
508 //
509 ///////////////////////////////////////////////////////////////////////
510 mov r1=r16 // A move task-pointer to "addl"-addressable reg
511 mov r2=r16 // A setup r2 for ia64_syscall_setup
512 // add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
514 // adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
515 // adds r15=-1024,r15 // A subtract 1024 from syscall number
516 // mov r3=NR_syscalls - 1
517 mov r3=NR_hypercalls - 1
518 ;;
519 // ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
520 // ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
521 mov r9=r0 // force flags = 0
522 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
524 shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
525 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
526 cmp.leu p6,p7=r15,r3 // A syscall number in range?
527 ;;
529 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
530 (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
531 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
533 mov.m ar.bspstore=r22 // M2 switch to kernel RBS
534 cmp.eq p8,p9=2,r8 // A isr.ei==2?
535 ;;
537 (p8) mov r8=0 // A clear ei to 0
538 //(p7) movl r30=sys_ni_syscall // X
539 (p7) movl r30=do_ni_hypercall // X
541 (p8) adds r28=16,r28 // A switch cr.iip to next bundle
542 (p9) adds r8=1,r8 // A increment ei to next slot
543 nop.i 0
544 ;;
546 mov.m r25=ar.unat // M2 (5 cyc)
547 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
548 // adds r15=1024,r15 // A restore original syscall number
549 //
550 // If any of the above loads miss in L1D, we'll stall here until
551 // the data arrives.
552 //
553 ///////////////////////////////////////////////////////////////////////
554 // st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
555 mov b6=r30 // I0 setup syscall handler branch reg early
556 cmp.ne pKStk,pUStk=r0,r0 // A were we on kernel stacks already?
558 // and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
559 mov r18=ar.bsp // M2 (12 cyc)
560 ;;
561 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
562 // cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
563 // br.call.sptk.many b7=ia64_syscall_setup // B
564 br.call.sptk.many b7=ia64_hypercall_setup // B
565 1:
566 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
567 // nop 0
568 // bsw.1 // B (6 cyc) regs are saved, switch to bank 1
569 ;;
570 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
571 // movl r3=ia64_ret_from_syscall // X
572 movl r3=ia64_leave_hypercall // X
573 ;;
575 srlz.i // M0 ensure interruption collection is on
576 mov rp=r3 // I0 set the real return addr
577 //(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
578 (p15) ssm psr.i // M2 restore psr.i
579 //(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
580 br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
581 // br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
582 ;;
583 VMX_FAULT(11)
584 END(vmx_break_fault)
586 .org vmx_ia64_ivt+0x3000
587 /////////////////////////////////////////////////////////////////////////////////////////
588 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
589 ENTRY(vmx_interrupt)
590 // VMX_DBG_FAULT(12)
591 mov r31=pr // prepare to save predicates
592 mov r19=12
593 mov r29=cr.ipsr
594 ;;
595 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
596 tbit.z p0,p15=r29,IA64_PSR_I_BIT
597 ;;
598 (p7) br.sptk vmx_dispatch_interrupt
599 ;;
600 mov r27=ar.rsc /* M */
601 mov r20=r1 /* A */
602 mov r25=ar.unat /* M */
603 mov r26=ar.pfs /* I */
604 mov r28=cr.iip /* M */
605 cover /* B (or nothing) */
606 ;;
607 mov r1=sp
608 ;;
609 invala /* M */
610 mov r30=cr.ifs
611 ;;
612 addl r1=-IA64_PT_REGS_SIZE,r1
613 ;;
614 adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
615 adds r16=PT(CR_IPSR),r1
616 ;;
617 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
618 st8 [r16]=r29 /* save cr.ipsr */
619 ;;
620 lfetch.fault.excl.nt1 [r17]
621 mov r29=b0
622 ;;
623 adds r16=PT(R8),r1 /* initialize first base pointer */
624 adds r17=PT(R9),r1 /* initialize second base pointer */
625 mov r18=r0 /* make sure r18 isn't NaT */
626 ;;
627 .mem.offset 0,0; st8.spill [r16]=r8,16
628 .mem.offset 8,0; st8.spill [r17]=r9,16
629 ;;
630 .mem.offset 0,0; st8.spill [r16]=r10,24
631 .mem.offset 8,0; st8.spill [r17]=r11,24
632 ;;
633 st8 [r16]=r28,16 /* save cr.iip */
634 st8 [r17]=r30,16 /* save cr.ifs */
635 mov r8=ar.fpsr /* M */
636 mov r9=ar.csd
637 mov r10=ar.ssd
638 movl r11=FPSR_DEFAULT /* L-unit */
639 ;;
640 st8 [r16]=r25,16 /* save ar.unat */
641 st8 [r17]=r26,16 /* save ar.pfs */
642 shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
643 ;;
644 st8 [r16]=r27,16 /* save ar.rsc */
645 adds r17=16,r17 /* skip over ar_rnat field */
646 ;;
647 st8 [r17]=r31,16 /* save predicates */
648 adds r16=16,r16 /* skip over ar_bspstore field */
649 ;;
650 st8 [r16]=r29,16 /* save b0 */
651 st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
652 ;;
653 .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
654 .mem.offset 8,0; st8.spill [r17]=r12,16
655 adds r12=-16,r1 /* switch to kernel memory stack (with 16 bytes of scratch) */
656 ;;
657 .mem.offset 0,0; st8.spill [r16]=r13,16
658 .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
659 MINSTATE_GET_CURRENT(r13)
660 ;;
661 .mem.offset 0,0; st8.spill [r16]=r15,16
662 .mem.offset 8,0; st8.spill [r17]=r14,16
663 dep r14=-1,r0,60,4
664 ;;
665 .mem.offset 0,0; st8.spill [r16]=r2,16
666 .mem.offset 8,0; st8.spill [r17]=r3,16
667 adds r2=IA64_PT_REGS_R16_OFFSET,r1
668 ;;
669 mov r8=ar.ccv
670 movl r1=__gp /* establish kernel global pointer */
671 ;; \
672 bsw.1
673 ;;
674 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
675 mov out0=cr.ivr // pass cr.ivr as first arg
676 add out1=16,sp // pass pointer to pt_regs as second arg
678 ssm psr.ic
679 ;;
680 srlz.i
681 ;;
682 (p15) ssm psr.i
683 adds r3=8,r2 // set up second base pointer for SAVE_REST
684 srlz.i // ensure everybody knows psr.ic is back on
685 ;;
686 .mem.offset 0,0; st8.spill [r2]=r16,16
687 .mem.offset 8,0; st8.spill [r3]=r17,16
688 ;;
689 .mem.offset 0,0; st8.spill [r2]=r18,16
690 .mem.offset 8,0; st8.spill [r3]=r19,16
691 ;;
692 .mem.offset 0,0; st8.spill [r2]=r20,16
693 .mem.offset 8,0; st8.spill [r3]=r21,16
694 mov r18=b6
695 ;;
696 .mem.offset 0,0; st8.spill [r2]=r22,16
697 .mem.offset 8,0; st8.spill [r3]=r23,16
698 mov r19=b7
699 ;;
700 .mem.offset 0,0; st8.spill [r2]=r24,16
701 .mem.offset 8,0; st8.spill [r3]=r25,16
702 ;;
703 .mem.offset 0,0; st8.spill [r2]=r26,16
704 .mem.offset 8,0; st8.spill [r3]=r27,16
705 ;;
706 .mem.offset 0,0; st8.spill [r2]=r28,16
707 .mem.offset 8,0; st8.spill [r3]=r29,16
708 ;;
709 .mem.offset 0,0; st8.spill [r2]=r30,16
710 .mem.offset 8,0; st8.spill [r3]=r31,32
711 ;;
712 mov ar.fpsr=r11 /* M-unit */
713 st8 [r2]=r8,8 /* ar.ccv */
714 adds r24=PT(B6)-PT(F7),r3
715 ;;
716 stf.spill [r2]=f6,32
717 stf.spill [r3]=f7,32
718 ;;
719 stf.spill [r2]=f8,32
720 stf.spill [r3]=f9,32
721 ;;
722 stf.spill [r2]=f10
723 stf.spill [r3]=f11
724 adds r25=PT(B7)-PT(F11),r3
725 ;;
726 st8 [r24]=r18,16 /* b6 */
727 st8 [r25]=r19,16 /* b7 */
728 ;;
729 st8 [r24]=r9 /* ar.csd */
730 st8 [r25]=r10 /* ar.ssd */
731 ;;
732 srlz.d // make sure we see the effect of cr.ivr
733 movl r14=ia64_leave_nested
734 ;;
735 mov rp=r14
736 br.call.sptk.many b6=ia64_handle_irq
737 ;;
738 END(vmx_interrupt)
740 .org vmx_ia64_ivt+0x3400
741 /////////////////////////////////////////////////////////////////////////////////////////
742 // 0x3400 Entry 13 (size 64 bundles) Reserved
743 ENTRY(vmx_virtual_exirq)
744 VMX_DBG_FAULT(13)
745 mov r31=pr
746 mov r19=13
747 br.sptk vmx_dispatch_vexirq
748 END(vmx_virtual_exirq)
750 .org vmx_ia64_ivt+0x3800
751 /////////////////////////////////////////////////////////////////////////////////////////
752 // 0x3800 Entry 14 (size 64 bundles) Reserved
753 VMX_DBG_FAULT(14)
754 VMX_FAULT(14)
755 // this code segment is from 2.6.16.13
757 /*
758 * There is no particular reason for this code to be here, other than that
759 * there happens to be space here that would go unused otherwise. If this
760 * fault ever gets "unreserved", simply moved the following code to a more
761 * suitable spot...
762 *
763 * ia64_syscall_setup() is a separate subroutine so that it can
764 * allocate stacked registers so it can safely demine any
765 * potential NaT values from the input registers.
766 *
767 * On entry:
768 * - executing on bank 0 or bank 1 register set (doesn't matter)
769 * - r1: stack pointer
770 * - r2: current task pointer
771 * - r3: preserved
772 * - r11: original contents (saved ar.pfs to be saved)
773 * - r12: original contents (sp to be saved)
774 * - r13: original contents (tp to be saved)
775 * - r15: original contents (syscall # to be saved)
776 * - r18: saved bsp (after switching to kernel stack)
777 * - r19: saved b6
778 * - r20: saved r1 (gp)
779 * - r21: saved ar.fpsr
780 * - r22: kernel's register backing store base (krbs_base)
781 * - r23: saved ar.bspstore
782 * - r24: saved ar.rnat
783 * - r25: saved ar.unat
784 * - r26: saved ar.pfs
785 * - r27: saved ar.rsc
786 * - r28: saved cr.iip
787 * - r29: saved cr.ipsr
788 * - r31: saved pr
789 * - b0: original contents (to be saved)
790 * On exit:
791 * - p10: TRUE if syscall is invoked with more than 8 out
792 * registers or r15's Nat is true
793 * - r1: kernel's gp
794 * - r3: preserved (same as on entry)
795 * - r8: -EINVAL if p10 is true
796 * - r12: points to kernel stack
797 * - r13: points to current task
798 * - r14: preserved (same as on entry)
799 * - p13: preserved
800 * - p15: TRUE if interrupts need to be re-enabled
801 * - ar.fpsr: set to kernel settings
802 * - b6: preserved (same as on entry)
803 */
804 GLOBAL_ENTRY(ia64_hypercall_setup)
805 #if PT(B6) != 0
806 # error This code assumes that b6 is the first field in pt_regs.
807 #endif
808 st8 [r1]=r19 // save b6
809 add r16=PT(CR_IPSR),r1 // initialize first base pointer
810 add r17=PT(R11),r1 // initialize second base pointer
811 ;;
812 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
813 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
814 tnat.nz p8,p0=in0
816 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
817 tnat.nz p9,p0=in1
818 (pKStk) mov r18=r0 // make sure r18 isn't NaT
819 ;;
821 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
822 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
823 mov r28=b0 // save b0 (2 cyc)
824 ;;
826 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
827 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
828 (p8) mov in0=-1
829 ;;
831 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
832 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
833 and r8=0x7f,r19 // A // get sof of ar.pfs
835 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
836 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
837 (p9) mov in1=-1
838 ;;
840 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
841 tnat.nz p10,p0=in2
842 add r11=8,r11
843 ;;
844 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
845 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
846 tnat.nz p11,p0=in3
847 ;;
848 (p10) mov in2=-1
849 tnat.nz p12,p0=in4 // [I0]
850 (p11) mov in3=-1
851 ;;
852 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
853 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
854 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
855 ;;
856 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
857 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
858 tnat.nz p13,p0=in5 // [I0]
859 ;;
860 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
861 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
862 (p12) mov in4=-1
863 ;;
865 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
866 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
867 (p13) mov in5=-1
868 ;;
869 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
870 tnat.nz p13,p0=in6
871 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
872 ;;
873 mov r8=1
874 (p9) tnat.nz p10,p0=r15
875 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
877 st8.spill [r17]=r15 // save r15
878 tnat.nz p8,p0=in7
879 nop.i 0
881 mov r13=r2 // establish `current'
882 movl r1=__gp // establish kernel global pointer
883 ;;
884 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
885 (p13) mov in6=-1
886 (p8) mov in7=-1
888 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
889 movl r17=FPSR_DEFAULT
890 ;;
891 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
892 (p10) mov r8=-EINVAL
893 br.ret.sptk.many b7
894 END(ia64_hypercall_setup)
897 .org vmx_ia64_ivt+0x3c00
898 /////////////////////////////////////////////////////////////////////////////////////////
899 // 0x3c00 Entry 15 (size 64 bundles) Reserved
900 VMX_DBG_FAULT(15)
901 VMX_FAULT(15)
904 .org vmx_ia64_ivt+0x4000
905 /////////////////////////////////////////////////////////////////////////////////////////
906 // 0x4000 Entry 16 (size 64 bundles) Reserved
907 VMX_DBG_FAULT(16)
908 VMX_FAULT(16)
910 .org vmx_ia64_ivt+0x4400
911 /////////////////////////////////////////////////////////////////////////////////////////
912 // 0x4400 Entry 17 (size 64 bundles) Reserved
913 VMX_DBG_FAULT(17)
914 VMX_FAULT(17)
916 .org vmx_ia64_ivt+0x4800
917 /////////////////////////////////////////////////////////////////////////////////////////
918 // 0x4800 Entry 18 (size 64 bundles) Reserved
919 VMX_DBG_FAULT(18)
920 VMX_FAULT(18)
922 .org vmx_ia64_ivt+0x4c00
923 /////////////////////////////////////////////////////////////////////////////////////////
924 // 0x4c00 Entry 19 (size 64 bundles) Reserved
925 VMX_DBG_FAULT(19)
926 VMX_FAULT(19)
928 .org vmx_ia64_ivt+0x5000
929 /////////////////////////////////////////////////////////////////////////////////////////
930 // 0x5000 Entry 20 (size 16 bundles) Page Not Present
931 ENTRY(vmx_page_not_present)
932 VMX_DBG_FAULT(20)
933 VMX_REFLECT(20)
934 END(vmx_page_not_present)
936 .org vmx_ia64_ivt+0x5100
937 /////////////////////////////////////////////////////////////////////////////////////////
938 // 0x5100 Entry 21 (size 16 bundles) Key Permission vector
939 ENTRY(vmx_key_permission)
940 VMX_DBG_FAULT(21)
941 VMX_REFLECT(21)
942 END(vmx_key_permission)
944 .org vmx_ia64_ivt+0x5200
945 /////////////////////////////////////////////////////////////////////////////////////////
946 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
947 ENTRY(vmx_iaccess_rights)
948 VMX_DBG_FAULT(22)
949 VMX_REFLECT(22)
950 END(vmx_iaccess_rights)
952 .org vmx_ia64_ivt+0x5300
953 /////////////////////////////////////////////////////////////////////////////////////////
954 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
955 ENTRY(vmx_daccess_rights)
956 VMX_DBG_FAULT(23)
957 VMX_REFLECT(23)
958 END(vmx_daccess_rights)
960 .org vmx_ia64_ivt+0x5400
961 /////////////////////////////////////////////////////////////////////////////////////////
962 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
963 ENTRY(vmx_general_exception)
964 VMX_DBG_FAULT(24)
965 VMX_REFLECT(24)
966 // VMX_FAULT(24)
967 END(vmx_general_exception)
969 .org vmx_ia64_ivt+0x5500
970 /////////////////////////////////////////////////////////////////////////////////////////
971 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
972 ENTRY(vmx_disabled_fp_reg)
973 VMX_DBG_FAULT(25)
974 VMX_REFLECT(25)
975 END(vmx_disabled_fp_reg)
977 .org vmx_ia64_ivt+0x5600
978 /////////////////////////////////////////////////////////////////////////////////////////
979 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
980 ENTRY(vmx_nat_consumption)
981 VMX_DBG_FAULT(26)
982 VMX_REFLECT(26)
983 END(vmx_nat_consumption)
985 .org vmx_ia64_ivt+0x5700
986 /////////////////////////////////////////////////////////////////////////////////////////
987 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
988 ENTRY(vmx_speculation_vector)
989 VMX_DBG_FAULT(27)
990 VMX_REFLECT(27)
991 END(vmx_speculation_vector)
993 .org vmx_ia64_ivt+0x5800
994 /////////////////////////////////////////////////////////////////////////////////////////
995 // 0x5800 Entry 28 (size 16 bundles) Reserved
996 VMX_DBG_FAULT(28)
997 VMX_FAULT(28)
999 .org vmx_ia64_ivt+0x5900
1000 /////////////////////////////////////////////////////////////////////////////////////////
1001 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1002 ENTRY(vmx_debug_vector)
1003 VMX_DBG_FAULT(29)
1004 VMX_FAULT(29)
1005 END(vmx_debug_vector)
1007 .org vmx_ia64_ivt+0x5a00
1008 /////////////////////////////////////////////////////////////////////////////////////////
1009 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1010 ENTRY(vmx_unaligned_access)
1011 VMX_DBG_FAULT(30)
1012 VMX_REFLECT(30)
1013 END(vmx_unaligned_access)
1015 .org vmx_ia64_ivt+0x5b00
1016 /////////////////////////////////////////////////////////////////////////////////////////
1017 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1018 ENTRY(vmx_unsupported_data_reference)
1019 VMX_DBG_FAULT(31)
1020 VMX_REFLECT(31)
1021 END(vmx_unsupported_data_reference)
1023 .org vmx_ia64_ivt+0x5c00
1024 /////////////////////////////////////////////////////////////////////////////////////////
1025 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1026 ENTRY(vmx_floating_point_fault)
1027 VMX_DBG_FAULT(32)
1028 VMX_REFLECT(32)
1029 END(vmx_floating_point_fault)
1031 .org vmx_ia64_ivt+0x5d00
1032 /////////////////////////////////////////////////////////////////////////////////////////
1033 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1034 ENTRY(vmx_floating_point_trap)
1035 VMX_DBG_FAULT(33)
1036 VMX_REFLECT(33)
1037 END(vmx_floating_point_trap)
1039 .org vmx_ia64_ivt+0x5e00
1040 /////////////////////////////////////////////////////////////////////////////////////////
1041 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1042 ENTRY(vmx_lower_privilege_trap)
1043 VMX_DBG_FAULT(34)
1044 VMX_REFLECT(34)
1045 END(vmx_lower_privilege_trap)
1047 .org vmx_ia64_ivt+0x5f00
1048 /////////////////////////////////////////////////////////////////////////////////////////
1049 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1050 ENTRY(vmx_taken_branch_trap)
1051 VMX_DBG_FAULT(35)
1052 VMX_REFLECT(35)
1053 END(vmx_taken_branch_trap)
1055 .org vmx_ia64_ivt+0x6000
1056 /////////////////////////////////////////////////////////////////////////////////////////
1057 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1058 ENTRY(vmx_single_step_trap)
1059 VMX_DBG_FAULT(36)
1060 VMX_REFLECT(36)
1061 END(vmx_single_step_trap)
1063 .global vmx_virtualization_fault_back
1064 .org vmx_ia64_ivt+0x6100
1065 /////////////////////////////////////////////////////////////////////////////////////////
1066 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
1067 ENTRY(vmx_virtualization_fault)
1068 // VMX_DBG_FAULT(37)
1069 mov r31=pr
1070 ;;
1071 cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
1072 cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
1073 cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
1074 cmp.eq p9,p0=EVENT_RSM,r24
1075 cmp.eq p10,p0=EVENT_SSM,r24
1076 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
1077 cmp.eq p12,p0=EVENT_THASH,r24
1078 (p6) br.dptk.many vmx_asm_mov_from_ar
1079 (p7) br.dptk.many vmx_asm_mov_from_rr
1080 (p8) br.dptk.many vmx_asm_mov_to_rr
1081 (p9) br.dptk.many vmx_asm_rsm
1082 (p10) br.dptk.many vmx_asm_ssm
1083 (p11) br.dptk.many vmx_asm_mov_to_psr
1084 (p12) br.dptk.many vmx_asm_thash
1085 ;;
1086 vmx_virtualization_fault_back:
1087 mov r19=37
1088 adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
1089 adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
1090 ;;
1091 st8 [r16] = r24
1092 st8 [r17] = r25
1093 ;;
1094 cmp.ne p6,p0=EVENT_RFI, r24
1095 (p6) br.sptk vmx_dispatch_virtualization_fault
1096 ;;
1097 adds r18=IA64_VPD_BASE_OFFSET,r21
1098 ;;
1099 ld8 r18=[r18]
1100 ;;
1101 adds r18=IA64_VPD_VIFS_OFFSET,r18
1102 ;;
1103 ld8 r18=[r18]
1104 ;;
1105 tbit.z p6,p0=r18,63
1106 (p6) br.sptk vmx_dispatch_virtualization_fault
1107 ;;
1108 //if vifs.v=1 desert current register frame
1109 alloc r18=ar.pfs,0,0,0,0
1110 br.sptk vmx_dispatch_virtualization_fault
1111 END(vmx_virtualization_fault)
1113 .org vmx_ia64_ivt+0x6200
1114 /////////////////////////////////////////////////////////////////////////////////////////
1115 // 0x6200 Entry 38 (size 16 bundles) Reserved
1116 VMX_DBG_FAULT(38)
1117 VMX_FAULT(38)
1119 .org vmx_ia64_ivt+0x6300
1120 /////////////////////////////////////////////////////////////////////////////////////////
1121 // 0x6300 Entry 39 (size 16 bundles) Reserved
1122 VMX_DBG_FAULT(39)
1123 VMX_FAULT(39)
1125 .org vmx_ia64_ivt+0x6400
1126 /////////////////////////////////////////////////////////////////////////////////////////
1127 // 0x6400 Entry 40 (size 16 bundles) Reserved
1128 VMX_DBG_FAULT(40)
1129 VMX_FAULT(40)
1131 .org vmx_ia64_ivt+0x6500
1132 /////////////////////////////////////////////////////////////////////////////////////////
1133 // 0x6500 Entry 41 (size 16 bundles) Reserved
1134 VMX_DBG_FAULT(41)
1135 VMX_FAULT(41)
1137 .org vmx_ia64_ivt+0x6600
1138 /////////////////////////////////////////////////////////////////////////////////////////
1139 // 0x6600 Entry 42 (size 16 bundles) Reserved
1140 VMX_DBG_FAULT(42)
1141 VMX_FAULT(42)
1143 .org vmx_ia64_ivt+0x6700
1144 /////////////////////////////////////////////////////////////////////////////////////////
1145 // 0x6700 Entry 43 (size 16 bundles) Reserved
1146 VMX_DBG_FAULT(43)
1147 VMX_FAULT(43)
1149 .org vmx_ia64_ivt+0x6800
1150 /////////////////////////////////////////////////////////////////////////////////////////
1151 // 0x6800 Entry 44 (size 16 bundles) Reserved
1152 VMX_DBG_FAULT(44)
1153 VMX_FAULT(44)
1155 .org vmx_ia64_ivt+0x6900
1156 /////////////////////////////////////////////////////////////////////////////////////////
1157 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1158 ENTRY(vmx_ia32_exception)
1159 VMX_DBG_FAULT(45)
1160 VMX_FAULT(45)
1161 END(vmx_ia32_exception)
1163 .org vmx_ia64_ivt+0x6a00
1164 /////////////////////////////////////////////////////////////////////////////////////////
1165 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1166 ENTRY(vmx_ia32_intercept)
1167 VMX_DBG_FAULT(46)
1168 VMX_FAULT(46)
1169 END(vmx_ia32_intercept)
1171 .org vmx_ia64_ivt+0x6b00
1172 /////////////////////////////////////////////////////////////////////////////////////////
1173 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1174 ENTRY(vmx_ia32_interrupt)
1175 VMX_DBG_FAULT(47)
1176 VMX_FAULT(47)
1177 END(vmx_ia32_interrupt)
1179 .org vmx_ia64_ivt+0x6c00
1180 /////////////////////////////////////////////////////////////////////////////////////////
1181 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1182 VMX_DBG_FAULT(48)
1183 VMX_FAULT(48)
1185 .org vmx_ia64_ivt+0x6d00
1186 /////////////////////////////////////////////////////////////////////////////////////////
1187 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1188 VMX_DBG_FAULT(49)
1189 VMX_FAULT(49)
1191 .org vmx_ia64_ivt+0x6e00
1192 /////////////////////////////////////////////////////////////////////////////////////////
1193 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1194 VMX_DBG_FAULT(50)
1195 VMX_FAULT(50)
1197 .org vmx_ia64_ivt+0x6f00
1198 /////////////////////////////////////////////////////////////////////////////////////////
1199 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1200 VMX_DBG_FAULT(51)
1201 VMX_FAULT(51)
1203 .org vmx_ia64_ivt+0x7000
1204 /////////////////////////////////////////////////////////////////////////////////////////
1205 // 0x7000 Entry 52 (size 16 bundles) Reserved
1206 VMX_DBG_FAULT(52)
1207 VMX_FAULT(52)
1209 .org vmx_ia64_ivt+0x7100
1210 /////////////////////////////////////////////////////////////////////////////////////////
1211 // 0x7100 Entry 53 (size 16 bundles) Reserved
1212 VMX_DBG_FAULT(53)
1213 VMX_FAULT(53)
1215 .org vmx_ia64_ivt+0x7200
1216 /////////////////////////////////////////////////////////////////////////////////////////
1217 // 0x7200 Entry 54 (size 16 bundles) Reserved
1218 VMX_DBG_FAULT(54)
1219 VMX_FAULT(54)
1221 .org vmx_ia64_ivt+0x7300
1222 /////////////////////////////////////////////////////////////////////////////////////////
1223 // 0x7300 Entry 55 (size 16 bundles) Reserved
1224 VMX_DBG_FAULT(55)
1225 VMX_FAULT(55)
1227 .org vmx_ia64_ivt+0x7400
1228 /////////////////////////////////////////////////////////////////////////////////////////
1229 // 0x7400 Entry 56 (size 16 bundles) Reserved
1230 VMX_DBG_FAULT(56)
1231 VMX_FAULT(56)
1233 .org vmx_ia64_ivt+0x7500
1234 /////////////////////////////////////////////////////////////////////////////////////////
1235 // 0x7500 Entry 57 (size 16 bundles) Reserved
1236 VMX_DBG_FAULT(57)
1237 VMX_FAULT(57)
1239 .org vmx_ia64_ivt+0x7600
1240 /////////////////////////////////////////////////////////////////////////////////////////
1241 // 0x7600 Entry 58 (size 16 bundles) Reserved
1242 VMX_DBG_FAULT(58)
1243 VMX_FAULT(58)
1245 .org vmx_ia64_ivt+0x7700
1246 /////////////////////////////////////////////////////////////////////////////////////////
1247 // 0x7700 Entry 59 (size 16 bundles) Reserved
1248 VMX_DBG_FAULT(59)
1249 VMX_FAULT(59)
1251 .org vmx_ia64_ivt+0x7800
1252 /////////////////////////////////////////////////////////////////////////////////////////
1253 // 0x7800 Entry 60 (size 16 bundles) Reserved
1254 VMX_DBG_FAULT(60)
1255 VMX_FAULT(60)
1257 .org vmx_ia64_ivt+0x7900
1258 /////////////////////////////////////////////////////////////////////////////////////////
1259 // 0x7900 Entry 61 (size 16 bundles) Reserved
1260 VMX_DBG_FAULT(61)
1261 VMX_FAULT(61)
1263 .org vmx_ia64_ivt+0x7a00
1264 /////////////////////////////////////////////////////////////////////////////////////////
1265 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1266 VMX_DBG_FAULT(62)
1267 VMX_FAULT(62)
1269 .org vmx_ia64_ivt+0x7b00
1270 /////////////////////////////////////////////////////////////////////////////////////////
1271 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1272 VMX_DBG_FAULT(63)
1273 VMX_FAULT(63)
1275 .org vmx_ia64_ivt+0x7c00
1276 /////////////////////////////////////////////////////////////////////////////////////////
1277 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1278 VMX_DBG_FAULT(64)
1279 VMX_FAULT(64)
1281 .org vmx_ia64_ivt+0x7d00
1282 /////////////////////////////////////////////////////////////////////////////////////////
1283 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1284 VMX_DBG_FAULT(65)
1285 VMX_FAULT(65)
1287 .org vmx_ia64_ivt+0x7e00
1288 /////////////////////////////////////////////////////////////////////////////////////////
1289 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1290 VMX_DBG_FAULT(66)
1291 VMX_FAULT(66)
1293 .org vmx_ia64_ivt+0x7f00
1294 /////////////////////////////////////////////////////////////////////////////////////////
1295 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1296 VMX_DBG_FAULT(67)
1297 VMX_FAULT(67)
1299 .org vmx_ia64_ivt+0x8000
1300 // There is no particular reason for this code to be here, other than that
1301 // there happens to be space here that would go unused otherwise. If this
1302 // fault ever gets "unreserved", simply moved the following code to a more
1303 // suitable spot...
1306 ENTRY(vmx_dispatch_reflection)
1307 /*
1308 * Input:
1309 * psr.ic: off
1310 * r19: intr type (offset into ivt, see ia64_int.h)
1311 * r31: contains saved predicates (pr)
1312 */
1313 VMX_SAVE_MIN_WITH_COVER_R19
1314 alloc r14=ar.pfs,0,0,5,0
1315 mov out0=cr.ifa
1316 mov out1=cr.isr
1317 mov out2=cr.iim
1318 mov out3=r15
1319 adds r3=8,r2 // set up second base pointer
1320 ;;
1321 ssm psr.ic
1322 ;;
1323 srlz.i // guarantee that interruption collection is on
1324 ;;
1325 (p15) ssm psr.i // restore psr.i
1326 movl r14=ia64_leave_hypervisor
1327 ;;
1328 VMX_SAVE_REST
1329 mov rp=r14
1330 ;;
1331 adds out4=16,r12
1332 br.call.sptk.many b6=vmx_reflect_interruption
1333 END(vmx_dispatch_reflection)
1335 ENTRY(vmx_dispatch_virtualization_fault)
1336 VMX_SAVE_MIN_WITH_COVER_R19
1337 ;;
1338 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1339 mov out0=r13 //vcpu
1340 adds r3=8,r2 // set up second base pointer
1341 ;;
1342 ssm psr.ic
1343 ;;
1344 srlz.i // guarantee that interruption collection is on
1345 ;;
1346 (p15) ssm psr.i // restore psr.i
1347 movl r14=ia64_leave_hypervisor_prepare
1348 ;;
1349 VMX_SAVE_REST
1350 VMX_SAVE_EXTRA
1351 mov rp=r14
1352 ;;
1353 adds out1=16,sp //regs
1354 br.call.sptk.many b6=vmx_emulate
1355 END(vmx_dispatch_virtualization_fault)
1358 GLOBAL_ENTRY(vmx_dispatch_vexirq)
1359 VMX_SAVE_MIN_WITH_COVER_R19
1360 alloc r14=ar.pfs,0,0,1,0
1361 mov out0=r13
1363 ssm psr.ic
1364 ;;
1365 srlz.i // guarantee that interruption collection is on
1366 ;;
1367 (p15) ssm psr.i // restore psr.i
1368 adds r3=8,r2 // set up second base pointer
1369 ;;
1370 VMX_SAVE_REST
1371 movl r14=ia64_leave_hypervisor
1372 ;;
1373 mov rp=r14
1374 br.call.sptk.many b6=vmx_vexirq
1375 END(vmx_dispatch_vexirq)
1377 ENTRY(vmx_dispatch_itlb_miss)
1378 VMX_SAVE_MIN_WITH_COVER_R19
1379 alloc r14=ar.pfs,0,0,3,0
1380 mov out0=cr.ifa
1381 mov out1=r15
1382 adds r3=8,r2 // set up second base pointer
1383 ;;
1384 ssm psr.ic
1385 ;;
1386 srlz.i // guarantee that interruption collection is on
1387 ;;
1388 (p15) ssm psr.i // restore psr.i
1389 movl r14=ia64_leave_hypervisor
1390 ;;
1391 VMX_SAVE_REST
1392 mov rp=r14
1393 ;;
1394 adds out2=16,r12
1395 br.call.sptk.many b6=vmx_hpw_miss
1396 END(vmx_dispatch_itlb_miss)
1398 ENTRY(vmx_dispatch_dtlb_miss)
1399 VMX_SAVE_MIN_WITH_COVER_R19
1400 alloc r14=ar.pfs,0,0,3,0
1401 mov out0=cr.ifa
1402 mov out1=r15
1403 adds r3=8,r2 // set up second base pointer
1404 ;;
1405 ssm psr.ic
1406 ;;
1407 srlz.i // guarantee that interruption collection is on
1408 ;;
1409 (p15) ssm psr.i // restore psr.i
1410 movl r14=ia64_leave_hypervisor_prepare
1411 ;;
1412 VMX_SAVE_REST
1413 VMX_SAVE_EXTRA
1414 mov rp=r14
1415 ;;
1416 adds out2=16,r12
1417 br.call.sptk.many b6=vmx_hpw_miss
1418 END(vmx_dispatch_dtlb_miss)
1420 ENTRY(vmx_dispatch_break_fault)
1421 VMX_SAVE_MIN_WITH_COVER_R19
1422 ;;
1423 ;;
1424 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1425 mov out0=cr.ifa
1426 mov out2=cr.isr // FIXME: pity to make this slow access twice
1427 mov out3=cr.iim // FIXME: pity to make this slow access twice
1428 adds r3=8,r2 // set up second base pointer
1429 ;;
1430 ssm psr.ic
1431 ;;
1432 srlz.i // guarantee that interruption collection is on
1433 ;;
1434 (p15)ssm psr.i // restore psr.i
1435 movl r14=ia64_leave_hypervisor
1436 ;;
1437 VMX_SAVE_REST
1438 mov rp=r14
1439 ;;
1440 adds out1=16,sp
1441 br.call.sptk.many b6=vmx_ia64_handle_break
1442 ;;
1443 END(vmx_dispatch_break_fault)
1446 ENTRY(vmx_dispatch_interrupt)
1447 VMX_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
1448 ;;
1449 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1450 mov out0=cr.ivr // pass cr.ivr as first arg
1451 adds r3=8,r2 // set up second base pointer for SAVE_REST
1452 ;;
1453 ssm psr.ic
1454 ;;
1455 srlz.i
1456 ;;
1457 (p15) ssm psr.i
1458 movl r14=ia64_leave_hypervisor
1459 ;;
1460 VMX_SAVE_REST
1461 mov rp=r14
1462 ;;
1463 add out1=16,sp // pass pointer to pt_regs as second arg
1464 br.call.sptk.many b6=ia64_handle_irq
1465 END(vmx_dispatch_interrupt)