ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_ivt.S @ 9563:9bee4875a848

Rename sched_op->sched_op_compat and sched_op_new->sched_op
after Christian's interface cleanup.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Apr 01 11:08:50 2006 +0100 (2006-04-01)
parents cfe20f41f043
children 5cc367720223
line source
1 /*
2 * arch/ia64/kernel/vmx_ivt.S
3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 * Asit Mallick <asit.k.mallick@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Kenneth Chen <kenneth.w.chen@intel.com>
11 * Fenghua Yu <fenghua.yu@intel.com>
12 *
13 *
14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
15 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
16 *
17 * 05/3/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
18 * Supporting Intel virtualization architecture
19 *
20 */
22 /*
23 * This file defines the interruption vector table used by the CPU.
24 * It does not include one entry per possible cause of interruption.
25 *
26 * The first 20 entries of the table contain 64 bundles each while the
27 * remaining 48 entries contain only 16 bundles each.
28 *
29 * The 64 bundles are used to allow inlining the whole handler for critical
30 * interruptions like TLB misses.
31 *
32 * For each entry, the comment is as follows:
33 *
34 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
35 * entry offset ----/ / / / /
36 * entry number ---------/ / / /
37 * size of the entry -------------/ / /
38 * vector name -------------------------------------/ /
39 * interruptions triggering this vector ----------------------/
40 *
41 * The table is 32KB in size and must be aligned on 32KB boundary.
42 * (The CPU ignores the 15 lower bits of the address)
43 *
44 * Table is based upon EAS2.6 (Oct 1999)
45 */
47 #include <linux/config.h>
49 #include <asm/asmmacro.h>
50 #include <asm/break.h>
51 #include <asm/ia32.h>
52 #include <asm/kregs.h>
53 #include <asm/offsets.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
57 #include <asm/system.h>
58 #include <asm/thread_info.h>
59 #include <asm/unistd.h>
60 #include <asm/vhpt.h>
62 #ifdef VTI_DEBUG
63 /*
64 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
65 * needed for something else before enabling this...
66 */
67 #define VMX_DBG_FAULT(i) \
68 add r16=IVT_CUR_OFS,r21; \
69 add r17=IVT_DBG_OFS,r21;; \
70 ld8 r18=[r16];; \
71 add r17=r18,r17; \
72 mov r19=cr.iip; \
73 mov r20=cr.ipsr; \
74 mov r22=cr.ifa; \
75 mov r23=i;; \
76 st8 [r17]=r19,8; \
77 add r18=32,r18;; \
78 st8 [r17]=r20,8; \
79 mov r19=0xfe0;; \
80 st8 [r17]=r22,8; \
81 and r18=r19,r18;; \
82 st8 [r17]=r23; \
83 st8 [r16]=r18;; \
84 //# define VMX_DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
85 #else
86 # define VMX_DBG_FAULT(i)
87 #endif
89 #include "vmx_minstate.h"
93 #define VMX_FAULT(n) \
94 vmx_fault_##n:; \
95 br.sptk vmx_fault_##n; \
96 ;; \
99 #define VMX_REFLECT(n) \
100 mov r31=pr; \
101 mov r19=n; /* prepare to save predicates */ \
102 mov r29=cr.ipsr; \
103 ;; \
104 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
105 (p7) br.sptk.many vmx_dispatch_reflection; \
106 VMX_FAULT(n); \
109 GLOBAL_ENTRY(vmx_panic)
110 br.sptk.many vmx_panic
111 ;;
112 END(vmx_panic)
118 .section .text.ivt,"ax"
120 .align 32768 // align on 32KB boundary
121 .global vmx_ia64_ivt
122 vmx_ia64_ivt:
123 /////////////////////////////////////////////////////////////////////////////////////////
124 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
125 ENTRY(vmx_vhpt_miss)
126 VMX_DBG_FAULT(0)
127 VMX_FAULT(0)
128 END(vmx_vhpt_miss)
130 .org vmx_ia64_ivt+0x400
131 /////////////////////////////////////////////////////////////////////////////////////////
132 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
133 ENTRY(vmx_itlb_miss)
134 VMX_DBG_FAULT(1)
135 mov r31 = pr
136 mov r29=cr.ipsr;
137 ;;
138 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
139 (p6) br.sptk vmx_alt_itlb_miss_1
140 //(p6) br.sptk vmx_fault_1
141 mov r16 = cr.ifa
142 ;;
143 thash r17 = r16
144 ;;
145 ttag r20 = r16
146 ;;
147 vmx_itlb_loop:
148 cmp.eq p6,p0 = r0, r17
149 (p6) br vmx_itlb_out
150 ;;
151 adds r22 = VLE_TITAG_OFFSET, r17
152 adds r23 = VLE_CCHAIN_OFFSET, r17
153 ;;
154 ld8 r24 = [r22]
155 ld8 r25 = [r23]
156 ;;
157 lfetch [r25]
158 cmp.eq p6,p7 = r20, r24
159 ;;
160 (p7) mov r17 = r25;
161 (p7) br.sptk vmx_itlb_loop
162 ;;
163 adds r23 = VLE_PGFLAGS_OFFSET, r17
164 adds r24 = VLE_ITIR_OFFSET, r17
165 ;;
166 ld8 r26 = [r23]
167 ld8 r25 = [r24]
168 ;;
169 mov cr.itir = r25
170 ;;
171 itc.i r26
172 ;;
173 srlz.i
174 ;;
175 mov r23=r31
176 mov r22=b0
177 adds r16=IA64_VPD_BASE_OFFSET,r21
178 ;;
179 ld8 r18=[r16]
180 ;;
181 adds r19=VPD(VPSR),r18
182 movl r20=__vsa_base
183 ;;
184 ld8 r19=[r19]
185 ld8 r20=[r20]
186 ;;
187 br.sptk ia64_vmm_entry
188 ;;
189 vmx_itlb_out:
190 mov r19 = 1
191 br.sptk vmx_dispatch_tlb_miss
192 VMX_FAULT(1);
193 END(vmx_itlb_miss)
195 .org vmx_ia64_ivt+0x0800
196 /////////////////////////////////////////////////////////////////////////////////////////
197 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
198 ENTRY(vmx_dtlb_miss)
199 VMX_DBG_FAULT(2)
200 mov r31 = pr
201 mov r29=cr.ipsr;
202 ;;
203 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
204 (p6)br.sptk vmx_alt_dtlb_miss_1
205 //(p6)br.sptk vmx_fault_2
206 mov r16 = cr.ifa
207 ;;
208 thash r17 = r16
209 ;;
210 ttag r20 = r16
211 ;;
212 vmx_dtlb_loop:
213 cmp.eq p6,p0 = r0, r17
214 (p6)br vmx_dtlb_out
215 ;;
216 adds r22 = VLE_TITAG_OFFSET, r17
217 adds r23 = VLE_CCHAIN_OFFSET, r17
218 ;;
219 ld8 r24 = [r22]
220 ld8 r25 = [r23]
221 ;;
222 lfetch [r25]
223 cmp.eq p6,p7 = r20, r24
224 ;;
225 (p7)mov r17 = r25;
226 (p7)br.sptk vmx_dtlb_loop
227 ;;
228 adds r23 = VLE_PGFLAGS_OFFSET, r17
229 adds r24 = VLE_ITIR_OFFSET, r17
230 ;;
231 ld8 r26 = [r23]
232 ld8 r25 = [r24]
233 ;;
234 mov cr.itir = r25
235 ;;
236 itc.d r26
237 ;;
238 srlz.d;
239 ;;
240 mov r23=r31
241 mov r22=b0
242 adds r16=IA64_VPD_BASE_OFFSET,r21
243 ;;
244 ld8 r18=[r16]
245 ;;
246 adds r19=VPD(VPSR),r18
247 movl r20=__vsa_base
248 ;;
249 ld8 r19=[r19]
250 ld8 r20=[r20]
251 ;;
252 br.sptk ia64_vmm_entry
253 ;;
254 vmx_dtlb_out:
255 mov r19 = 2
256 br.sptk vmx_dispatch_tlb_miss
257 VMX_FAULT(2);
258 END(vmx_dtlb_miss)
260 .org vmx_ia64_ivt+0x0c00
261 /////////////////////////////////////////////////////////////////////////////////////////
262 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
263 ENTRY(vmx_alt_itlb_miss)
264 VMX_DBG_FAULT(3)
265 mov r31 = pr
266 mov r29=cr.ipsr;
267 ;;
268 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
269 (p7)br.sptk vmx_fault_3
270 vmx_alt_itlb_miss_1:
271 mov r16=cr.ifa // get address that caused the TLB miss
272 ;;
273 tbit.z p6,p7=r16,63
274 (p6)br.sptk vmx_fault_3
275 ;;
276 movl r17=PAGE_KERNEL
277 mov r24=cr.ipsr
278 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
279 ;;
280 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
281 shr.u r18=r16,55 // move address bit 59 to bit 4
282 ;;
283 and r18=0x10,r18 // bit 4=address-bit(61)
284 or r19=r17,r19 // insert PTE control bits into r19
285 ;;
286 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
287 ;;
288 itc.i r19 // insert the TLB entry
289 mov pr=r31,-1
290 rfi
291 VMX_FAULT(3);
292 END(vmx_alt_itlb_miss)
295 .org vmx_ia64_ivt+0x1000
296 /////////////////////////////////////////////////////////////////////////////////////////
297 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
298 ENTRY(vmx_alt_dtlb_miss)
299 VMX_DBG_FAULT(4)
300 mov r31=pr
301 mov r29=cr.ipsr;
302 ;;
303 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
304 (p7)br.sptk vmx_fault_4
305 vmx_alt_dtlb_miss_1:
306 mov r16=cr.ifa // get address that caused the TLB miss
307 ;;
308 tbit.z p6,p7=r16,63
309 (p6)br.sptk vmx_fault_4
310 ;;
311 movl r17=PAGE_KERNEL
312 mov r20=cr.isr
313 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
314 mov r24=cr.ipsr
315 ;;
316 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
317 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
318 shr.u r18=r16,55 // move address bit 59 to bit 4
319 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
320 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
321 ;;
322 and r18=0x10,r18 // bit 4=address-bit(61)
323 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
324 dep r24=-1,r24,IA64_PSR_ED_BIT,1
325 or r19=r19,r17 // insert PTE control bits into r19
326 ;;
327 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
328 (p6) mov cr.ipsr=r24
329 ;;
330 (p7) itc.d r19 // insert the TLB entry
331 mov pr=r31,-1
332 rfi
333 VMX_FAULT(4);
334 END(vmx_alt_dtlb_miss)
336 .org vmx_ia64_ivt+0x1400
337 /////////////////////////////////////////////////////////////////////////////////////////
338 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
339 ENTRY(vmx_nested_dtlb_miss)
340 VMX_DBG_FAULT(5)
341 VMX_FAULT(5)
342 END(vmx_nested_dtlb_miss)
344 .org vmx_ia64_ivt+0x1800
345 /////////////////////////////////////////////////////////////////////////////////////////
346 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
347 ENTRY(vmx_ikey_miss)
348 VMX_DBG_FAULT(6)
349 VMX_REFLECT(6)
350 END(vmx_ikey_miss)
352 .org vmx_ia64_ivt+0x1c00
353 /////////////////////////////////////////////////////////////////////////////////////////
354 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
355 ENTRY(vmx_dkey_miss)
356 VMX_DBG_FAULT(7)
357 VMX_REFLECT(7)
358 END(vmx_dkey_miss)
360 .org vmx_ia64_ivt+0x2000
361 /////////////////////////////////////////////////////////////////////////////////////////
362 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
363 ENTRY(vmx_dirty_bit)
364 VMX_DBG_FAULT(8)
365 VMX_REFLECT(8)
366 END(vmx_idirty_bit)
368 .org vmx_ia64_ivt+0x2400
369 /////////////////////////////////////////////////////////////////////////////////////////
370 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
371 ENTRY(vmx_iaccess_bit)
372 VMX_DBG_FAULT(9)
373 VMX_REFLECT(9)
374 END(vmx_iaccess_bit)
376 .org vmx_ia64_ivt+0x2800
377 /////////////////////////////////////////////////////////////////////////////////////////
378 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
379 ENTRY(vmx_daccess_bit)
380 VMX_DBG_FAULT(10)
381 VMX_REFLECT(10)
382 END(vmx_daccess_bit)
384 .org vmx_ia64_ivt+0x2c00
385 /////////////////////////////////////////////////////////////////////////////////////////
386 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
387 ENTRY(vmx_break_fault)
388 VMX_DBG_FAULT(11)
389 mov r31=pr
390 mov r19=11
391 mov r30=cr.iim
392 movl r29=0x1100
393 ;;
394 cmp.eq p6,p7=r30,r0
395 (p6) br.sptk vmx_fault_11
396 ;;
397 cmp.eq p6,p7=r29,r30
398 (p6) br.dptk.few vmx_hypercall_dispatch
399 (p7) br.sptk.many vmx_dispatch_break_fault
400 ;;
401 VMX_FAULT(11);
402 END(vmx_break_fault)
404 .org vmx_ia64_ivt+0x3000
405 /////////////////////////////////////////////////////////////////////////////////////////
406 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
407 ENTRY(vmx_interrupt)
408 // VMX_DBG_FAULT(12)
409 mov r31=pr // prepare to save predicates
410 mov r19=12
411 mov r29=cr.ipsr
412 ;;
413 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
414 tbit.z p0,p15=r29,IA64_PSR_I_BIT
415 ;;
416 (p7) br.sptk vmx_dispatch_interrupt
417 ;;
418 mov r27=ar.rsc /* M */
419 mov r20=r1 /* A */
420 mov r25=ar.unat /* M */
421 mov r26=ar.pfs /* I */
422 mov r28=cr.iip /* M */
423 cover /* B (or nothing) */
424 ;;
425 mov r1=sp
426 ;;
427 invala /* M */
428 mov r30=cr.ifs
429 ;;
430 addl r1=-IA64_PT_REGS_SIZE,r1
431 ;;
432 adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
433 adds r16=PT(CR_IPSR),r1
434 ;;
435 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
436 st8 [r16]=r29 /* save cr.ipsr */
437 ;;
438 lfetch.fault.excl.nt1 [r17]
439 mov r29=b0
440 ;;
441 adds r16=PT(R8),r1 /* initialize first base pointer */
442 adds r17=PT(R9),r1 /* initialize second base pointer */
443 mov r18=r0 /* make sure r18 isn't NaT */
444 ;;
445 .mem.offset 0,0; st8.spill [r16]=r8,16
446 .mem.offset 8,0; st8.spill [r17]=r9,16
447 ;;
448 .mem.offset 0,0; st8.spill [r16]=r10,24
449 .mem.offset 8,0; st8.spill [r17]=r11,24
450 ;;
451 st8 [r16]=r28,16 /* save cr.iip */
452 st8 [r17]=r30,16 /* save cr.ifs */
453 mov r8=ar.fpsr /* M */
454 mov r9=ar.csd
455 mov r10=ar.ssd
456 movl r11=FPSR_DEFAULT /* L-unit */
457 ;;
458 st8 [r16]=r25,16 /* save ar.unat */
459 st8 [r17]=r26,16 /* save ar.pfs */
460 shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
461 ;;
462 st8 [r16]=r27,16 /* save ar.rsc */
463 adds r17=16,r17 /* skip over ar_rnat field */
464 ;; /* avoid RAW on r16 & r17 */
465 st8 [r17]=r31,16 /* save predicates */
466 adds r16=16,r16 /* skip over ar_bspstore field */
467 ;;
468 st8 [r16]=r29,16 /* save b0 */
469 st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
470 ;;
471 .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
472 .mem.offset 8,0; st8.spill [r17]=r12,16
473 adds r12=-16,r1 /* switch to kernel memory stack (with 16 bytes of scratch) */
474 ;;
475 .mem.offset 0,0; st8.spill [r16]=r13,16
476 .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
477 mov r13=r21 /* establish `current' */
478 ;;
479 .mem.offset 0,0; st8.spill [r16]=r15,16
480 .mem.offset 8,0; st8.spill [r17]=r14,16
481 dep r14=-1,r0,60,4
482 ;;
483 .mem.offset 0,0; st8.spill [r16]=r2,16
484 .mem.offset 8,0; st8.spill [r17]=r3,16
485 adds r2=IA64_PT_REGS_R16_OFFSET,r1
486 ;;
487 mov r8=ar.ccv
488 movl r1=__gp /* establish kernel global pointer */
489 ;; \
490 bsw.1
491 ;;
492 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
493 mov out0=cr.ivr // pass cr.ivr as first arg
494 add out1=16,sp // pass pointer to pt_regs as second arg
496 ssm psr.ic
497 ;;
498 srlz.i
499 ;;
500 (p15) ssm psr.i
501 adds r3=8,r2 // set up second base pointer for SAVE_REST
502 srlz.i // ensure everybody knows psr.ic is back on
503 ;;
504 .mem.offset 0,0; st8.spill [r2]=r16,16
505 .mem.offset 8,0; st8.spill [r3]=r17,16
506 ;;
507 .mem.offset 0,0; st8.spill [r2]=r18,16
508 .mem.offset 8,0; st8.spill [r3]=r19,16
509 ;;
510 .mem.offset 0,0; st8.spill [r2]=r20,16
511 .mem.offset 8,0; st8.spill [r3]=r21,16
512 mov r18=b6
513 ;;
514 .mem.offset 0,0; st8.spill [r2]=r22,16
515 .mem.offset 8,0; st8.spill [r3]=r23,16
516 mov r19=b7
517 ;;
518 .mem.offset 0,0; st8.spill [r2]=r24,16
519 .mem.offset 8,0; st8.spill [r3]=r25,16
520 ;;
521 .mem.offset 0,0; st8.spill [r2]=r26,16
522 .mem.offset 8,0; st8.spill [r3]=r27,16
523 ;;
524 .mem.offset 0,0; st8.spill [r2]=r28,16
525 .mem.offset 8,0; st8.spill [r3]=r29,16
526 ;;
527 .mem.offset 0,0; st8.spill [r2]=r30,16
528 .mem.offset 8,0; st8.spill [r3]=r31,32
529 ;;
530 mov ar.fpsr=r11 /* M-unit */
531 st8 [r2]=r8,8 /* ar.ccv */
532 adds r24=PT(B6)-PT(F7),r3
533 ;;
534 stf.spill [r2]=f6,32
535 stf.spill [r3]=f7,32
536 ;;
537 stf.spill [r2]=f8,32
538 stf.spill [r3]=f9,32
539 ;;
540 stf.spill [r2]=f10
541 stf.spill [r3]=f11
542 adds r25=PT(B7)-PT(F11),r3
543 ;;
544 st8 [r24]=r18,16 /* b6 */
545 st8 [r25]=r19,16 /* b7 */
546 ;;
547 st8 [r24]=r9 /* ar.csd */
548 st8 [r25]=r10 /* ar.ssd */
549 ;;
550 srlz.d // make sure we see the effect of cr.ivr
551 movl r14=ia64_leave_nested
552 ;;
553 mov rp=r14
554 br.call.sptk.many b6=vmx_ia64_handle_irq
555 ;;
556 END(vmx_interrupt)
558 .org vmx_ia64_ivt+0x3400
559 /////////////////////////////////////////////////////////////////////////////////////////
560 // 0x3400 Entry 13 (size 64 bundles) Reserved
561 ENTRY(vmx_virtual_exirq)
562 VMX_DBG_FAULT(13)
563 mov r31=pr
564 mov r19=13
565 br.sptk vmx_dispatch_vexirq
566 END(vmx_virtual_exirq)
568 .org vmx_ia64_ivt+0x3800
569 /////////////////////////////////////////////////////////////////////////////////////////
570 // 0x3800 Entry 14 (size 64 bundles) Reserved
571 VMX_DBG_FAULT(14)
572 VMX_FAULT(14)
575 .org vmx_ia64_ivt+0x3c00
576 /////////////////////////////////////////////////////////////////////////////////////////
577 // 0x3c00 Entry 15 (size 64 bundles) Reserved
578 VMX_DBG_FAULT(15)
579 VMX_FAULT(15)
582 .org vmx_ia64_ivt+0x4000
583 /////////////////////////////////////////////////////////////////////////////////////////
584 // 0x4000 Entry 16 (size 64 bundles) Reserved
585 VMX_DBG_FAULT(16)
586 VMX_FAULT(16)
588 .org vmx_ia64_ivt+0x4400
589 /////////////////////////////////////////////////////////////////////////////////////////
590 // 0x4400 Entry 17 (size 64 bundles) Reserved
591 VMX_DBG_FAULT(17)
592 VMX_FAULT(17)
594 .org vmx_ia64_ivt+0x4800
595 /////////////////////////////////////////////////////////////////////////////////////////
596 // 0x4800 Entry 18 (size 64 bundles) Reserved
597 VMX_DBG_FAULT(18)
598 VMX_FAULT(18)
600 .org vmx_ia64_ivt+0x4c00
601 /////////////////////////////////////////////////////////////////////////////////////////
602 // 0x4c00 Entry 19 (size 64 bundles) Reserved
603 VMX_DBG_FAULT(19)
604 VMX_FAULT(19)
606 .org vmx_ia64_ivt+0x5000
607 /////////////////////////////////////////////////////////////////////////////////////////
608 // 0x5000 Entry 20 (size 16 bundles) Page Not Present
609 ENTRY(vmx_page_not_present)
610 VMX_DBG_FAULT(20)
611 VMX_REFLECT(20)
612 END(vmx_page_not_present)
614 .org vmx_ia64_ivt+0x5100
615 /////////////////////////////////////////////////////////////////////////////////////////
616 // 0x5100 Entry 21 (size 16 bundles) Key Permission vector
617 ENTRY(vmx_key_permission)
618 VMX_DBG_FAULT(21)
619 VMX_REFLECT(21)
620 END(vmx_key_permission)
622 .org vmx_ia64_ivt+0x5200
623 /////////////////////////////////////////////////////////////////////////////////////////
624 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
625 ENTRY(vmx_iaccess_rights)
626 VMX_DBG_FAULT(22)
627 VMX_REFLECT(22)
628 END(vmx_iaccess_rights)
630 .org vmx_ia64_ivt+0x5300
631 /////////////////////////////////////////////////////////////////////////////////////////
632 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
633 ENTRY(vmx_daccess_rights)
634 VMX_DBG_FAULT(23)
635 VMX_REFLECT(23)
636 END(vmx_daccess_rights)
638 .org vmx_ia64_ivt+0x5400
639 /////////////////////////////////////////////////////////////////////////////////////////
640 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
641 ENTRY(vmx_general_exception)
642 VMX_DBG_FAULT(24)
643 VMX_FAULT(24)
644 // VMX_REFLECT(24)
645 END(vmx_general_exception)
647 .org vmx_ia64_ivt+0x5500
648 /////////////////////////////////////////////////////////////////////////////////////////
649 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
650 ENTRY(vmx_disabled_fp_reg)
651 VMX_DBG_FAULT(25)
652 VMX_REFLECT(25)
653 END(vmx_disabled_fp_reg)
655 .org vmx_ia64_ivt+0x5600
656 /////////////////////////////////////////////////////////////////////////////////////////
657 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
658 ENTRY(vmx_nat_consumption)
659 VMX_DBG_FAULT(26)
660 VMX_REFLECT(26)
661 END(vmx_nat_consumption)
663 .org vmx_ia64_ivt+0x5700
664 /////////////////////////////////////////////////////////////////////////////////////////
665 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
666 ENTRY(vmx_speculation_vector)
667 VMX_DBG_FAULT(27)
668 VMX_REFLECT(27)
669 END(vmx_speculation_vector)
671 .org vmx_ia64_ivt+0x5800
672 /////////////////////////////////////////////////////////////////////////////////////////
673 // 0x5800 Entry 28 (size 16 bundles) Reserved
674 VMX_DBG_FAULT(28)
675 VMX_FAULT(28)
677 .org vmx_ia64_ivt+0x5900
678 /////////////////////////////////////////////////////////////////////////////////////////
679 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
680 ENTRY(vmx_debug_vector)
681 VMX_DBG_FAULT(29)
682 VMX_FAULT(29)
683 END(vmx_debug_vector)
685 .org vmx_ia64_ivt+0x5a00
686 /////////////////////////////////////////////////////////////////////////////////////////
687 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
688 ENTRY(vmx_unaligned_access)
689 VMX_DBG_FAULT(30)
690 VMX_REFLECT(30)
691 END(vmx_unaligned_access)
693 .org vmx_ia64_ivt+0x5b00
694 /////////////////////////////////////////////////////////////////////////////////////////
695 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
696 ENTRY(vmx_unsupported_data_reference)
697 VMX_DBG_FAULT(31)
698 VMX_REFLECT(31)
699 END(vmx_unsupported_data_reference)
701 .org vmx_ia64_ivt+0x5c00
702 /////////////////////////////////////////////////////////////////////////////////////////
703 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
704 ENTRY(vmx_floating_point_fault)
705 VMX_DBG_FAULT(32)
706 VMX_REFLECT(32)
707 END(vmx_floating_point_fault)
709 .org vmx_ia64_ivt+0x5d00
710 /////////////////////////////////////////////////////////////////////////////////////////
711 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
712 ENTRY(vmx_floating_point_trap)
713 VMX_DBG_FAULT(33)
714 VMX_REFLECT(33)
715 END(vmx_floating_point_trap)
717 .org vmx_ia64_ivt+0x5e00
718 /////////////////////////////////////////////////////////////////////////////////////////
719 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
720 ENTRY(vmx_lower_privilege_trap)
721 VMX_DBG_FAULT(34)
722 VMX_REFLECT(34)
723 END(vmx_lower_privilege_trap)
725 .org vmx_ia64_ivt+0x5f00
726 /////////////////////////////////////////////////////////////////////////////////////////
727 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
728 ENTRY(vmx_taken_branch_trap)
729 VMX_DBG_FAULT(35)
730 VMX_REFLECT(35)
731 END(vmx_taken_branch_trap)
733 .org vmx_ia64_ivt+0x6000
734 /////////////////////////////////////////////////////////////////////////////////////////
735 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
736 ENTRY(vmx_single_step_trap)
737 VMX_DBG_FAULT(36)
738 VMX_REFLECT(36)
739 END(vmx_single_step_trap)
741 .org vmx_ia64_ivt+0x6100
742 /////////////////////////////////////////////////////////////////////////////////////////
743 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
744 ENTRY(vmx_virtualization_fault)
745 // VMX_DBG_FAULT(37)
746 mov r31=pr
747 mov r19=37
748 adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
749 adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
750 ;;
751 st8 [r16] = r24
752 st8 [r17] = r25
753 ;;
754 br.sptk vmx_dispatch_virtualization_fault
755 END(vmx_virtualization_fault)
757 .org vmx_ia64_ivt+0x6200
758 /////////////////////////////////////////////////////////////////////////////////////////
759 // 0x6200 Entry 38 (size 16 bundles) Reserved
760 VMX_DBG_FAULT(38)
761 VMX_FAULT(38)
763 .org vmx_ia64_ivt+0x6300
764 /////////////////////////////////////////////////////////////////////////////////////////
765 // 0x6300 Entry 39 (size 16 bundles) Reserved
766 VMX_DBG_FAULT(39)
767 VMX_FAULT(39)
769 .org vmx_ia64_ivt+0x6400
770 /////////////////////////////////////////////////////////////////////////////////////////
771 // 0x6400 Entry 40 (size 16 bundles) Reserved
772 VMX_DBG_FAULT(40)
773 VMX_FAULT(40)
775 .org vmx_ia64_ivt+0x6500
776 /////////////////////////////////////////////////////////////////////////////////////////
777 // 0x6500 Entry 41 (size 16 bundles) Reserved
778 VMX_DBG_FAULT(41)
779 VMX_FAULT(41)
781 .org vmx_ia64_ivt+0x6600
782 /////////////////////////////////////////////////////////////////////////////////////////
783 // 0x6600 Entry 42 (size 16 bundles) Reserved
784 VMX_DBG_FAULT(42)
785 VMX_FAULT(42)
787 .org vmx_ia64_ivt+0x6700
788 /////////////////////////////////////////////////////////////////////////////////////////
789 // 0x6700 Entry 43 (size 16 bundles) Reserved
790 VMX_DBG_FAULT(43)
791 VMX_FAULT(43)
793 .org vmx_ia64_ivt+0x6800
794 /////////////////////////////////////////////////////////////////////////////////////////
795 // 0x6800 Entry 44 (size 16 bundles) Reserved
796 VMX_DBG_FAULT(44)
797 VMX_FAULT(44)
799 .org vmx_ia64_ivt+0x6900
800 /////////////////////////////////////////////////////////////////////////////////////////
801 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
802 ENTRY(vmx_ia32_exception)
803 VMX_DBG_FAULT(45)
804 VMX_FAULT(45)
805 END(vmx_ia32_exception)
807 .org vmx_ia64_ivt+0x6a00
808 /////////////////////////////////////////////////////////////////////////////////////////
809 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
810 ENTRY(vmx_ia32_intercept)
811 VMX_DBG_FAULT(46)
812 VMX_FAULT(46)
813 END(vmx_ia32_intercept)
815 .org vmx_ia64_ivt+0x6b00
816 /////////////////////////////////////////////////////////////////////////////////////////
817 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
818 ENTRY(vmx_ia32_interrupt)
819 VMX_DBG_FAULT(47)
820 VMX_FAULT(47)
821 END(vmx_ia32_interrupt)
823 .org vmx_ia64_ivt+0x6c00
824 /////////////////////////////////////////////////////////////////////////////////////////
825 // 0x6c00 Entry 48 (size 16 bundles) Reserved
826 VMX_DBG_FAULT(48)
827 VMX_FAULT(48)
829 .org vmx_ia64_ivt+0x6d00
830 /////////////////////////////////////////////////////////////////////////////////////////
831 // 0x6d00 Entry 49 (size 16 bundles) Reserved
832 VMX_DBG_FAULT(49)
833 VMX_FAULT(49)
835 .org vmx_ia64_ivt+0x6e00
836 /////////////////////////////////////////////////////////////////////////////////////////
837 // 0x6e00 Entry 50 (size 16 bundles) Reserved
838 VMX_DBG_FAULT(50)
839 VMX_FAULT(50)
841 .org vmx_ia64_ivt+0x6f00
842 /////////////////////////////////////////////////////////////////////////////////////////
843 // 0x6f00 Entry 51 (size 16 bundles) Reserved
844 VMX_DBG_FAULT(51)
845 VMX_FAULT(51)
847 .org vmx_ia64_ivt+0x7000
848 /////////////////////////////////////////////////////////////////////////////////////////
849 // 0x7000 Entry 52 (size 16 bundles) Reserved
850 VMX_DBG_FAULT(52)
851 VMX_FAULT(52)
853 .org vmx_ia64_ivt+0x7100
854 /////////////////////////////////////////////////////////////////////////////////////////
855 // 0x7100 Entry 53 (size 16 bundles) Reserved
856 VMX_DBG_FAULT(53)
857 VMX_FAULT(53)
859 .org vmx_ia64_ivt+0x7200
860 /////////////////////////////////////////////////////////////////////////////////////////
861 // 0x7200 Entry 54 (size 16 bundles) Reserved
862 VMX_DBG_FAULT(54)
863 VMX_FAULT(54)
865 .org vmx_ia64_ivt+0x7300
866 /////////////////////////////////////////////////////////////////////////////////////////
867 // 0x7300 Entry 55 (size 16 bundles) Reserved
868 VMX_DBG_FAULT(55)
869 VMX_FAULT(55)
871 .org vmx_ia64_ivt+0x7400
872 /////////////////////////////////////////////////////////////////////////////////////////
873 // 0x7400 Entry 56 (size 16 bundles) Reserved
874 VMX_DBG_FAULT(56)
875 VMX_FAULT(56)
877 .org vmx_ia64_ivt+0x7500
878 /////////////////////////////////////////////////////////////////////////////////////////
879 // 0x7500 Entry 57 (size 16 bundles) Reserved
880 VMX_DBG_FAULT(57)
881 VMX_FAULT(57)
883 .org vmx_ia64_ivt+0x7600
884 /////////////////////////////////////////////////////////////////////////////////////////
885 // 0x7600 Entry 58 (size 16 bundles) Reserved
886 VMX_DBG_FAULT(58)
887 VMX_FAULT(58)
889 .org vmx_ia64_ivt+0x7700
890 /////////////////////////////////////////////////////////////////////////////////////////
891 // 0x7700 Entry 59 (size 16 bundles) Reserved
892 VMX_DBG_FAULT(59)
893 VMX_FAULT(59)
895 .org vmx_ia64_ivt+0x7800
896 /////////////////////////////////////////////////////////////////////////////////////////
897 // 0x7800 Entry 60 (size 16 bundles) Reserved
898 VMX_DBG_FAULT(60)
899 VMX_FAULT(60)
901 .org vmx_ia64_ivt+0x7900
902 /////////////////////////////////////////////////////////////////////////////////////////
903 // 0x7900 Entry 61 (size 16 bundles) Reserved
904 VMX_DBG_FAULT(61)
905 VMX_FAULT(61)
907 .org vmx_ia64_ivt+0x7a00
908 /////////////////////////////////////////////////////////////////////////////////////////
909 // 0x7a00 Entry 62 (size 16 bundles) Reserved
910 VMX_DBG_FAULT(62)
911 VMX_FAULT(62)
913 .org vmx_ia64_ivt+0x7b00
914 /////////////////////////////////////////////////////////////////////////////////////////
915 // 0x7b00 Entry 63 (size 16 bundles) Reserved
916 VMX_DBG_FAULT(63)
917 VMX_FAULT(63)
919 .org vmx_ia64_ivt+0x7c00
920 /////////////////////////////////////////////////////////////////////////////////////////
921 // 0x7c00 Entry 64 (size 16 bundles) Reserved
922 VMX_DBG_FAULT(64)
923 VMX_FAULT(64)
925 .org vmx_ia64_ivt+0x7d00
926 /////////////////////////////////////////////////////////////////////////////////////////
927 // 0x7d00 Entry 65 (size 16 bundles) Reserved
928 VMX_DBG_FAULT(65)
929 VMX_FAULT(65)
931 .org vmx_ia64_ivt+0x7e00
932 /////////////////////////////////////////////////////////////////////////////////////////
933 // 0x7e00 Entry 66 (size 16 bundles) Reserved
934 VMX_DBG_FAULT(66)
935 VMX_FAULT(66)
937 .org vmx_ia64_ivt+0x7f00
938 /////////////////////////////////////////////////////////////////////////////////////////
939 // 0x7f00 Entry 67 (size 16 bundles) Reserved
940 VMX_DBG_FAULT(67)
941 VMX_FAULT(67)
943 .org vmx_ia64_ivt+0x8000
944 // There is no particular reason for this code to be here, other than that
945 // there happens to be space here that would go unused otherwise. If this
946 // fault ever gets "unreserved", simply moved the following code to a more
947 // suitable spot...
950 ENTRY(vmx_dispatch_reflection)
951 /*
952 * Input:
953 * psr.ic: off
954 * r19: intr type (offset into ivt, see ia64_int.h)
955 * r31: contains saved predicates (pr)
956 */
957 VMX_SAVE_MIN_WITH_COVER_R19
958 alloc r14=ar.pfs,0,0,5,0
959 mov out0=cr.ifa
960 mov out1=cr.isr
961 mov out2=cr.iim
962 mov out3=r15
963 adds r3=8,r2 // set up second base pointer
964 ;;
965 ssm psr.ic
966 ;;
967 srlz.i // guarantee that interruption collection is on
968 ;;
969 (p15) ssm psr.i // restore psr.i
970 movl r14=ia64_leave_hypervisor
971 ;;
972 VMX_SAVE_REST
973 mov rp=r14
974 ;;
975 adds out4=16,r12
976 br.call.sptk.many b6=vmx_reflect_interruption
977 END(vmx_dispatch_reflection)
979 ENTRY(vmx_dispatch_virtualization_fault)
980 VMX_SAVE_MIN_WITH_COVER_R19
981 ;;
982 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
983 mov out0=r13 //vcpu
984 adds r3=8,r2 // set up second base pointer
985 ;;
986 ssm psr.ic
987 ;;
988 srlz.i // guarantee that interruption collection is on
989 ;;
990 (p15) ssm psr.i // restore psr.i
991 movl r14=ia64_leave_hypervisor
992 ;;
993 VMX_SAVE_REST
994 mov rp=r14
995 ;;
996 adds out1=16,sp //regs
997 br.call.sptk.many b6=vmx_emulate
998 END(vmx_dispatch_virtualization_fault)
1001 ENTRY(vmx_dispatch_vexirq)
1002 VMX_SAVE_MIN_WITH_COVER_R19
1003 alloc r14=ar.pfs,0,0,1,0
1004 mov out0=r13
1006 ssm psr.ic
1007 ;;
1008 srlz.i // guarantee that interruption collection is on
1009 ;;
1010 (p15) ssm psr.i // restore psr.i
1011 adds r3=8,r2 // set up second base pointer
1012 ;;
1013 VMX_SAVE_REST
1014 movl r14=ia64_leave_hypervisor
1015 ;;
1016 mov rp=r14
1017 br.call.sptk.many b6=vmx_vexirq
1018 END(vmx_dispatch_vexirq)
1020 ENTRY(vmx_dispatch_tlb_miss)
1021 VMX_SAVE_MIN_WITH_COVER_R19
1022 alloc r14=ar.pfs,0,0,3,0
1023 mov out0=cr.ifa
1024 mov out1=r15
1025 adds r3=8,r2 // set up second base pointer
1026 ;;
1027 ssm psr.ic
1028 ;;
1029 srlz.i // guarantee that interruption collection is on
1030 ;;
1031 (p15) ssm psr.i // restore psr.i
1032 movl r14=ia64_leave_hypervisor
1033 ;;
1034 VMX_SAVE_REST
1035 mov rp=r14
1036 ;;
1037 adds out2=16,r12
1038 br.call.sptk.many b6=vmx_hpw_miss
1039 END(vmx_dispatch_tlb_miss)
1042 ENTRY(vmx_dispatch_break_fault)
1043 VMX_SAVE_MIN_WITH_COVER_R19
1044 ;;
1045 ;;
1046 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1047 mov out0=cr.ifa
1048 mov out2=cr.isr // FIXME: pity to make this slow access twice
1049 mov out3=cr.iim // FIXME: pity to make this slow access twice
1050 adds r3=8,r2 // set up second base pointer
1051 ;;
1052 ssm psr.ic
1053 ;;
1054 srlz.i // guarantee that interruption collection is on
1055 ;;
1056 (p15)ssm psr.i // restore psr.i
1057 movl r14=ia64_leave_hypervisor
1058 ;;
1059 VMX_SAVE_REST
1060 mov rp=r14
1061 ;;
1062 adds out1=16,sp
1063 br.call.sptk.many b6=vmx_ia64_handle_break
1064 ;;
1065 END(vmx_dispatch_break_fault)
1068 ENTRY(vmx_hypercall_dispatch)
1069 VMX_SAVE_MIN_WITH_COVER
1070 ssm psr.ic
1071 ;;
1072 srlz.i // guarantee that interruption collection is on
1073 ;;
1074 (p15) ssm psr.i // restore psr.i
1075 adds r3=8,r2 // set up second base pointer
1076 ;;
1077 VMX_SAVE_REST
1078 ;;
1079 movl r14=ia64_leave_hypervisor
1080 movl r2=hyper_call_table
1081 ;;
1082 mov rp=r14
1083 shladd r2=r15,3,r2
1084 ;;
1085 ld8 r2=[r2]
1086 ;;
1087 mov b6=r2
1088 ;;
1089 br.call.sptk.many b6=b6
1090 ;;
1091 END(vmx_hypercall_dispatch)
1095 ENTRY(vmx_dispatch_interrupt)
1096 VMX_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
1097 ;;
1098 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1099 mov out0=cr.ivr // pass cr.ivr as first arg
1100 adds r3=8,r2 // set up second base pointer for SAVE_REST
1101 ;;
1102 ssm psr.ic
1103 ;;
1104 srlz.i
1105 ;;
1106 (p15) ssm psr.i
1107 movl r14=ia64_leave_hypervisor
1108 ;;
1109 VMX_SAVE_REST
1110 mov rp=r14
1111 ;;
1112 add out1=16,sp // pass pointer to pt_regs as second arg
1113 br.call.sptk.many b6=vmx_ia64_handle_irq
1114 END(vmx_dispatch_interrupt)
1118 .rodata
1119 .align 8
1120 .globl hyper_call_table
1121 hyper_call_table:
1122 data8 hyper_not_support //hyper_set_trap_table /* 0 */
1123 data8 hyper_mmu_update
1124 data8 hyper_not_support //hyper_set_gdt
1125 data8 hyper_not_support //hyper_stack_switch
1126 data8 hyper_not_support //hyper_set_callbacks
1127 data8 hyper_not_support //hyper_fpu_taskswitch /* 5 */
1128 data8 hyper_sched_op_compat
1129 data8 hyper_dom0_op
1130 data8 hyper_not_support //hyper_set_debugreg
1131 data8 hyper_not_support //hyper_get_debugreg
1132 data8 hyper_not_support //hyper_update_descriptor /* 10 */
1133 data8 hyper_not_support //hyper_set_fast_trap
1134 data8 hyper_dom_mem_op
1135 data8 hyper_not_support //hyper_multicall
1136 data8 hyper_not_support //hyper_update_va_mapping
1137 data8 hyper_not_support //hyper_set_timer_op /* 15 */
1138 data8 hyper_event_channel_op
1139 data8 hyper_xen_version
1140 data8 hyper_not_support //hyper_console_io
1141 data8 hyper_not_support //hyper_physdev_op
1142 data8 hyper_not_support //hyper_grant_table_op /* 20 */
1143 data8 hyper_not_support //hyper_vm_assist
1144 data8 hyper_not_support //hyper_update_va_mapping_otherdomain
1145 data8 hyper_not_support //hyper_switch_vm86
1146 data8 hyper_not_support //hyper_boot_vcpu
1147 data8 hyper_not_support //hyper_ni_hypercall /* 25 */
1148 data8 hyper_not_support //hyper_mmuext_op
1149 data8 hyper_not_support //tata8 hyper_lock_page
1150 data8 hyper_set_shared_page