ia64/xen-unstable

view xen/arch/ia64/vmx_ivt.S @ 5797:ca44d2dbb273

Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:58:56 2005 -0700 (2005-07-09)
parents 649cd37aa1ab
children a83ac0806d6b
line source
1 /*
2 * arch/ia64/kernel/vmx_ivt.S
3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 * Asit Mallick <asit.k.mallick@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Kenneth Chen <kenneth.w.chen@intel.com>
11 * Fenghua Yu <fenghua.yu@intel.com>
12 *
13 *
14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
15 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
16 *
17 * 05/3/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
18 * Supporting Intel virtualization architecture
19 *
20 */
22 /*
23 * This file defines the interruption vector table used by the CPU.
24 * It does not include one entry per possible cause of interruption.
25 *
26 * The first 20 entries of the table contain 64 bundles each while the
27 * remaining 48 entries contain only 16 bundles each.
28 *
29 * The 64 bundles are used to allow inlining the whole handler for critical
30 * interruptions like TLB misses.
31 *
32 * For each entry, the comment is as follows:
33 *
34 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
35 * entry offset ----/ / / / /
36 * entry number ---------/ / / /
37 * size of the entry -------------/ / /
38 * vector name -------------------------------------/ /
39 * interruptions triggering this vector ----------------------/
40 *
41 * The table is 32KB in size and must be aligned on 32KB boundary.
42 * (The CPU ignores the 15 lower bits of the address)
43 *
44 * Table is based upon EAS2.6 (Oct 1999)
45 */
47 #include <linux/config.h>
49 #include <asm/asmmacro.h>
50 #include <asm/break.h>
51 #include <asm/ia32.h>
52 #include <asm/kregs.h>
53 #include <asm/offsets.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
57 #include <asm/system.h>
58 #include <asm/thread_info.h>
59 #include <asm/unistd.h>
60 #include <asm/vhpt.h>
63 #if 0
64 /*
65 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
66 * needed for something else before enabling this...
67 */
68 # define VMX_DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
69 #else
70 # define VMX_DBG_FAULT(i)
71 #endif
73 #include "vmx_minstate.h"
77 #define VMX_FAULT(n) \
78 vmx_fault_##n:; \
79 br.sptk vmx_fault_##n; \
80 ;; \
83 #define VMX_REFLECT(n) \
84 mov r31=pr; \
85 mov r19=n; /* prepare to save predicates */ \
86 mov r29=cr.ipsr; \
87 ;; \
88 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
89 (p7) br.sptk.many vmx_dispatch_reflection; \
90 VMX_FAULT(n); \
93 GLOBAL_ENTRY(vmx_panic)
94 br.sptk.many vmx_panic
95 ;;
96 END(vmx_panic)
102 .section .text.ivt,"ax"
104 .align 32768 // align on 32KB boundary
105 .global vmx_ia64_ivt
106 vmx_ia64_ivt:
107 /////////////////////////////////////////////////////////////////////////////////////////
108 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
109 ENTRY(vmx_vhpt_miss)
110 VMX_FAULT(0)
111 END(vmx_vhpt_miss)
113 .org vmx_ia64_ivt+0x400
114 /////////////////////////////////////////////////////////////////////////////////////////
115 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
116 ENTRY(vmx_itlb_miss)
117 mov r31 = pr
118 mov r29=cr.ipsr;
119 ;;
120 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
121 (p6) br.sptk vmx_fault_1
122 mov r16 = cr.ifa
123 ;;
124 thash r17 = r16
125 ttag r20 = r16
126 ;;
127 vmx_itlb_loop:
128 cmp.eq p6,p0 = r0, r17
129 (p6) br vmx_itlb_out
130 ;;
131 adds r22 = VLE_TITAG_OFFSET, r17
132 adds r23 = VLE_CCHAIN_OFFSET, r17
133 ;;
134 ld8 r24 = [r22]
135 ld8 r25 = [r23]
136 ;;
137 lfetch [r25]
138 cmp.eq p6,p7 = r20, r24
139 ;;
140 (p7) mov r17 = r25;
141 (p7) br.sptk vmx_itlb_loop
142 ;;
143 adds r23 = VLE_PGFLAGS_OFFSET, r17
144 adds r24 = VLE_ITIR_OFFSET, r17
145 ;;
146 ld8 r26 = [r23]
147 ld8 r25 = [r24]
148 ;;
149 mov cr.itir = r25
150 ;;
151 itc.i r26
152 ;;
153 srlz.i
154 ;;
155 mov r23=r31
156 mov r22=b0
157 adds r16=IA64_VPD_BASE_OFFSET,r21
158 ;;
159 ld8 r18=[r16]
160 ;;
161 adds r19=VPD(VPSR),r18
162 movl r20=__vsa_base
163 ;;
164 ld8 r19=[r19]
165 ld8 r20=[r20]
166 ;;
167 br.sptk ia64_vmm_entry
168 ;;
169 vmx_itlb_out:
170 mov r19 = 1
171 br.sptk vmx_dispatch_tlb_miss
172 VMX_FAULT(1);
173 END(vmx_itlb_miss)
175 .org vmx_ia64_ivt+0x0800
176 /////////////////////////////////////////////////////////////////////////////////////////
177 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
178 ENTRY(vmx_dtlb_miss)
179 mov r31 = pr
180 mov r29=cr.ipsr;
181 ;;
182 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
183 (p6)br.sptk vmx_fault_2
184 mov r16 = cr.ifa
185 ;;
186 thash r17 = r16
187 ttag r20 = r16
188 ;;
189 vmx_dtlb_loop:
190 cmp.eq p6,p0 = r0, r17
191 (p6)br vmx_dtlb_out
192 ;;
193 adds r22 = VLE_TITAG_OFFSET, r17
194 adds r23 = VLE_CCHAIN_OFFSET, r17
195 ;;
196 ld8 r24 = [r22]
197 ld8 r25 = [r23]
198 ;;
199 lfetch [r25]
200 cmp.eq p6,p7 = r20, r24
201 ;;
202 (p7)mov r17 = r25;
203 (p7)br.sptk vmx_dtlb_loop
204 ;;
205 adds r23 = VLE_PGFLAGS_OFFSET, r17
206 adds r24 = VLE_ITIR_OFFSET, r17
207 ;;
208 ld8 r26 = [r23]
209 ld8 r25 = [r24]
210 ;;
211 mov cr.itir = r25
212 ;;
213 itc.d r26
214 ;;
215 srlz.d;
216 ;;
217 mov r23=r31
218 mov r22=b0
219 adds r16=IA64_VPD_BASE_OFFSET,r21
220 ;;
221 ld8 r18=[r16]
222 ;;
223 adds r19=VPD(VPSR),r18
224 movl r20=__vsa_base
225 ;;
226 ld8 r19=[r19]
227 ld8 r20=[r20]
228 ;;
229 br.sptk ia64_vmm_entry
230 ;;
231 vmx_dtlb_out:
232 mov r19 = 2
233 br.sptk vmx_dispatch_tlb_miss
234 VMX_FAULT(2);
235 END(vmx_dtlb_miss)
237 .org vmx_ia64_ivt+0x0c00
238 /////////////////////////////////////////////////////////////////////////////////////////
239 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
240 ENTRY(vmx_alt_itlb_miss)
241 mov r31 = pr
242 mov r29=cr.ipsr;
243 ;;
244 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
245 (p7)br.sptk vmx_fault_3
246 mov r16=cr.ifa // get address that caused the TLB miss
247 movl r17=PAGE_KERNEL
248 mov r24=cr.ipsr
249 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
250 ;;
251 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
252 shr.u r18=r16,55 // move address bit 59 to bit 4
253 ;;
254 and r18=0x10,r18 // bit 4=address-bit(61)
255 or r19=r17,r19 // insert PTE control bits into r19
256 ;;
257 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
258 ;;
259 itc.i r19 // insert the TLB entry
260 mov pr=r31,-1
261 rfi
262 VMX_FAULT(3);
263 END(vmx_alt_itlb_miss)
266 .org vmx_ia64_ivt+0x1000
267 /////////////////////////////////////////////////////////////////////////////////////////
268 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
269 ENTRY(vmx_alt_dtlb_miss)
270 mov r31=pr
271 mov r29=cr.ipsr;
272 ;;
273 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
274 (p7)br.sptk vmx_fault_4
275 mov r16=cr.ifa // get address that caused the TLB miss
276 movl r17=PAGE_KERNEL
277 mov r20=cr.isr
278 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
279 mov r24=cr.ipsr
280 ;;
281 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
282 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
283 shr.u r18=r16,55 // move address bit 59 to bit 4
284 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
285 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
286 ;;
287 and r18=0x10,r18 // bit 4=address-bit(61)
288 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
289 dep r24=-1,r24,IA64_PSR_ED_BIT,1
290 or r19=r19,r17 // insert PTE control bits into r19
291 ;;
292 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
293 (p6) mov cr.ipsr=r24
294 ;;
295 (p7) itc.d r19 // insert the TLB entry
296 mov pr=r31,-1
297 rfi
298 VMX_FAULT(4);
299 END(vmx_alt_dtlb_miss)
301 .org vmx_ia64_ivt+0x1400
302 /////////////////////////////////////////////////////////////////////////////////////////
303 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
304 ENTRY(vmx_nested_dtlb_miss)
305 VMX_FAULT(5)
306 END(vmx_nested_dtlb_miss)
308 .org vmx_ia64_ivt+0x1800
309 /////////////////////////////////////////////////////////////////////////////////////////
310 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
311 ENTRY(vmx_ikey_miss)
312 VMX_REFLECT(6)
313 END(vmx_ikey_miss)
315 .org vmx_ia64_ivt+0x1c00
316 /////////////////////////////////////////////////////////////////////////////////////////
317 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
318 ENTRY(vmx_dkey_miss)
319 VMX_REFLECT(7)
320 END(vmx_dkey_miss)
322 .org vmx_ia64_ivt+0x2000
323 /////////////////////////////////////////////////////////////////////////////////////////
324 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
325 ENTRY(vmx_dirty_bit)
326 VMX_REFLECT(8)
327 END(vmx_idirty_bit)
329 .org vmx_ia64_ivt+0x2400
330 /////////////////////////////////////////////////////////////////////////////////////////
331 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
332 ENTRY(vmx_iaccess_bit)
333 VMX_REFLECT(9)
334 END(vmx_iaccess_bit)
336 .org vmx_ia64_ivt+0x2800
337 /////////////////////////////////////////////////////////////////////////////////////////
338 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
339 ENTRY(vmx_daccess_bit)
340 VMX_REFLECT(10)
341 END(vmx_daccess_bit)
343 .org vmx_ia64_ivt+0x2c00
344 /////////////////////////////////////////////////////////////////////////////////////////
345 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
346 ENTRY(vmx_break_fault)
347 mov r31=pr
348 mov r19=11
349 mov r30=cr.iim
350 movl r29=0x1100
351 ;;
352 cmp.eq p6,p7=r30,r0
353 (p6) br.sptk vmx_fault_11
354 ;;
355 cmp.eq p6,p7=r29,r30
356 (p6) br.dptk.few vmx_hypercall_dispatch
357 (p7) br.sptk.many vmx_dispatch_break_fault
358 ;;
359 VMX_FAULT(11);
360 END(vmx_break_fault)
362 .org vmx_ia64_ivt+0x3000
363 /////////////////////////////////////////////////////////////////////////////////////////
364 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
365 ENTRY(vmx_interrupt)
366 mov r31=pr // prepare to save predicates
367 mov r19=12
368 mov r29=cr.ipsr
369 ;;
370 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
371 tbit.z p0,p15=r29,IA64_PSR_I_BIT
372 ;;
373 (p7) br.sptk vmx_dispatch_interrupt
374 ;;
375 mov r27=ar.rsc /* M */
376 mov r20=r1 /* A */
377 mov r25=ar.unat /* M */
378 mov r26=ar.pfs /* I */
379 mov r28=cr.iip /* M */
380 cover /* B (or nothing) */
381 ;;
382 mov r1=sp
383 ;;
384 invala /* M */
385 mov r30=cr.ifs
386 ;;
387 addl r1=-IA64_PT_REGS_SIZE,r1
388 ;;
389 adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
390 adds r16=PT(CR_IPSR),r1
391 ;;
392 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
393 st8 [r16]=r29 /* save cr.ipsr */
394 ;;
395 lfetch.fault.excl.nt1 [r17]
396 mov r29=b0
397 ;;
398 adds r16=PT(R8),r1 /* initialize first base pointer */
399 adds r17=PT(R9),r1 /* initialize second base pointer */
400 mov r18=r0 /* make sure r18 isn't NaT */
401 ;;
402 .mem.offset 0,0; st8.spill [r16]=r8,16
403 .mem.offset 8,0; st8.spill [r17]=r9,16
404 ;;
405 .mem.offset 0,0; st8.spill [r16]=r10,24
406 .mem.offset 8,0; st8.spill [r17]=r11,24
407 ;;
408 st8 [r16]=r28,16 /* save cr.iip */
409 st8 [r17]=r30,16 /* save cr.ifs */
410 mov r8=ar.fpsr /* M */
411 mov r9=ar.csd
412 mov r10=ar.ssd
413 movl r11=FPSR_DEFAULT /* L-unit */
414 ;;
415 st8 [r16]=r25,16 /* save ar.unat */
416 st8 [r17]=r26,16 /* save ar.pfs */
417 shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
418 ;;
419 st8 [r16]=r27,16 /* save ar.rsc */
420 adds r17=16,r17 /* skip over ar_rnat field */
421 ;; /* avoid RAW on r16 & r17 */
422 st8 [r17]=r31,16 /* save predicates */
423 adds r16=16,r16 /* skip over ar_bspstore field */
424 ;;
425 st8 [r16]=r29,16 /* save b0 */
426 st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
427 ;;
428 .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
429 .mem.offset 8,0; st8.spill [r17]=r12,16
430 adds r12=-16,r1 /* switch to kernel memory stack (with 16 bytes of scratch) */
431 ;;
432 .mem.offset 0,0; st8.spill [r16]=r13,16
433 .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
434 mov r13=r21 /* establish `current' */
435 ;;
436 .mem.offset 0,0; st8.spill [r16]=r15,16
437 .mem.offset 8,0; st8.spill [r17]=r14,16
438 dep r14=-1,r0,60,4
439 ;;
440 .mem.offset 0,0; st8.spill [r16]=r2,16
441 .mem.offset 8,0; st8.spill [r17]=r3,16
442 adds r2=IA64_PT_REGS_R16_OFFSET,r1
443 ;;
444 mov r8=ar.ccv
445 movl r1=__gp /* establish kernel global pointer */
446 ;; \
447 bsw.1
448 ;;
449 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
450 mov out0=cr.ivr // pass cr.ivr as first arg
451 add out1=16,sp // pass pointer to pt_regs as second arg
453 ssm psr.ic
454 ;;
455 srlz.i
456 ;;
457 (p15) ssm psr.i
458 adds r3=8,r2 // set up second base pointer for SAVE_REST
459 srlz.i // ensure everybody knows psr.ic is back on
460 ;;
461 .mem.offset 0,0; st8.spill [r2]=r16,16
462 .mem.offset 8,0; st8.spill [r3]=r17,16
463 ;;
464 .mem.offset 0,0; st8.spill [r2]=r18,16
465 .mem.offset 8,0; st8.spill [r3]=r19,16
466 ;;
467 .mem.offset 0,0; st8.spill [r2]=r20,16
468 .mem.offset 8,0; st8.spill [r3]=r21,16
469 mov r18=b6
470 ;;
471 .mem.offset 0,0; st8.spill [r2]=r22,16
472 .mem.offset 8,0; st8.spill [r3]=r23,16
473 mov r19=b7
474 ;;
475 .mem.offset 0,0; st8.spill [r2]=r24,16
476 .mem.offset 8,0; st8.spill [r3]=r25,16
477 ;;
478 .mem.offset 0,0; st8.spill [r2]=r26,16
479 .mem.offset 8,0; st8.spill [r3]=r27,16
480 ;;
481 .mem.offset 0,0; st8.spill [r2]=r28,16
482 .mem.offset 8,0; st8.spill [r3]=r29,16
483 ;;
484 .mem.offset 0,0; st8.spill [r2]=r30,16
485 .mem.offset 8,0; st8.spill [r3]=r31,32
486 ;;
487 mov ar.fpsr=r11 /* M-unit */
488 st8 [r2]=r8,8 /* ar.ccv */
489 adds r24=PT(B6)-PT(F7),r3
490 ;;
491 stf.spill [r2]=f6,32
492 stf.spill [r3]=f7,32
493 ;;
494 stf.spill [r2]=f8,32
495 stf.spill [r3]=f9,32
496 ;;
497 stf.spill [r2]=f10
498 stf.spill [r3]=f11
499 adds r25=PT(B7)-PT(F11),r3
500 ;;
501 st8 [r24]=r18,16 /* b6 */
502 st8 [r25]=r19,16 /* b7 */
503 ;;
504 st8 [r24]=r9 /* ar.csd */
505 st8 [r25]=r10 /* ar.ssd */
506 ;;
507 srlz.d // make sure we see the effect of cr.ivr
508 movl r14=ia64_leave_nested
509 ;;
510 mov rp=r14
511 br.call.sptk.many b6=vmx_ia64_handle_irq
512 ;;
513 END(vmx_interrupt)
515 .org vmx_ia64_ivt+0x3400
516 /////////////////////////////////////////////////////////////////////////////////////////
517 // 0x3400 Entry 13 (size 64 bundles) Reserved
518 ENTRY(vmx_virtual_exirq)
519 VMX_DBG_FAULT(13)
520 mov r31=pr
521 mov r19=13
522 br.sptk vmx_dispatch_vexirq
523 END(vmx_virtual_exirq)
525 .org vmx_ia64_ivt+0x3800
526 /////////////////////////////////////////////////////////////////////////////////////////
527 // 0x3800 Entry 14 (size 64 bundles) Reserved
528 VMX_DBG_FAULT(14)
529 VMX_FAULT(14)
532 .org vmx_ia64_ivt+0x3c00
533 /////////////////////////////////////////////////////////////////////////////////////////
534 // 0x3c00 Entry 15 (size 64 bundles) Reserved
535 VMX_DBG_FAULT(15)
536 VMX_FAULT(15)
539 .org vmx_ia64_ivt+0x4000
540 /////////////////////////////////////////////////////////////////////////////////////////
541 // 0x4000 Entry 16 (size 64 bundles) Reserved
542 VMX_DBG_FAULT(16)
543 VMX_FAULT(16)
545 .org vmx_ia64_ivt+0x4400
546 /////////////////////////////////////////////////////////////////////////////////////////
547 // 0x4400 Entry 17 (size 64 bundles) Reserved
548 VMX_DBG_FAULT(17)
549 VMX_FAULT(17)
551 .org vmx_ia64_ivt+0x4800
552 /////////////////////////////////////////////////////////////////////////////////////////
553 // 0x4800 Entry 18 (size 64 bundles) Reserved
554 VMX_DBG_FAULT(18)
555 VMX_FAULT(18)
557 .org vmx_ia64_ivt+0x4c00
558 /////////////////////////////////////////////////////////////////////////////////////////
559 // 0x4c00 Entry 19 (size 64 bundles) Reserved
560 VMX_DBG_FAULT(19)
561 VMX_FAULT(19)
563 /////////////////////////////////////////////////////////////////////////////////////////
564 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
565 ENTRY(vmx_iaccess_rights)
566 VMX_REFLECT(22)
567 END(vmx_iaccess_rights)
569 .org vmx_ia64_ivt+0x5300
570 /////////////////////////////////////////////////////////////////////////////////////////
571 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
572 ENTRY(vmx_daccess_rights)
573 VMX_REFLECT(23)
574 END(vmx_daccess_rights)
576 .org vmx_ia64_ivt+0x5400
577 /////////////////////////////////////////////////////////////////////////////////////////
578 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
579 ENTRY(vmx_general_exception)
580 VMX_FAULT(24)
581 // VMX_REFLECT(24)
582 END(vmx_general_exception)
584 .org vmx_ia64_ivt+0x5500
585 /////////////////////////////////////////////////////////////////////////////////////////
586 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
587 ENTRY(vmx_disabled_fp_reg)
588 VMX_REFLECT(25)
589 END(vmx_disabled_fp_reg)
591 .org vmx_ia64_ivt+0x5600
592 /////////////////////////////////////////////////////////////////////////////////////////
593 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
594 ENTRY(vmx_nat_consumption)
595 VMX_REFLECT(26)
596 END(vmx_nat_consumption)
598 .org vmx_ia64_ivt+0x5700
599 /////////////////////////////////////////////////////////////////////////////////////////
600 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
601 ENTRY(vmx_speculation_vector)
602 VMX_REFLECT(27)
603 END(vmx_speculation_vector)
605 .org vmx_ia64_ivt+0x5800
606 /////////////////////////////////////////////////////////////////////////////////////////
607 // 0x5800 Entry 28 (size 16 bundles) Reserved
608 VMX_DBG_FAULT(28)
609 VMX_FAULT(28)
611 .org vmx_ia64_ivt+0x5900
612 /////////////////////////////////////////////////////////////////////////////////////////
613 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
614 ENTRY(vmx_debug_vector)
615 VMX_DBG_FAULT(29)
616 VMX_FAULT(29)
617 END(vmx_debug_vector)
619 .org vmx_ia64_ivt+0x5a00
620 /////////////////////////////////////////////////////////////////////////////////////////
621 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
622 ENTRY(vmx_unaligned_access)
623 VMX_REFLECT(30)
624 END(vmx_unaligned_access)
626 .org vmx_ia64_ivt+0x5b00
627 /////////////////////////////////////////////////////////////////////////////////////////
628 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
629 ENTRY(vmx_unsupported_data_reference)
630 VMX_REFLECT(31)
631 END(vmx_unsupported_data_reference)
633 .org vmx_ia64_ivt+0x5c00
634 /////////////////////////////////////////////////////////////////////////////////////////
635 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
636 ENTRY(vmx_floating_point_fault)
637 VMX_REFLECT(32)
638 END(vmx_floating_point_fault)
640 .org vmx_ia64_ivt+0x5d00
641 /////////////////////////////////////////////////////////////////////////////////////////
642 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
643 ENTRY(vmx_floating_point_trap)
644 VMX_REFLECT(33)
645 END(vmx_floating_point_trap)
647 .org vmx_ia64_ivt+0x5e00
648 /////////////////////////////////////////////////////////////////////////////////////////
649 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
650 ENTRY(vmx_lower_privilege_trap)
651 VMX_REFLECT(34)
652 END(vmx_lower_privilege_trap)
654 .org vmx_ia64_ivt+0x5f00
655 /////////////////////////////////////////////////////////////////////////////////////////
656 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
657 ENTRY(vmx_taken_branch_trap)
658 VMX_REFLECT(35)
659 END(vmx_taken_branch_trap)
661 .org vmx_ia64_ivt+0x6000
662 /////////////////////////////////////////////////////////////////////////////////////////
663 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
664 ENTRY(vmx_single_step_trap)
665 VMX_REFLECT(36)
666 END(vmx_single_step_trap)
668 .org vmx_ia64_ivt+0x6100
669 /////////////////////////////////////////////////////////////////////////////////////////
670 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
671 ENTRY(vmx_virtualization_fault)
672 VMX_DBG_FAULT(37)
673 mov r31=pr
674 mov r19=37
675 br.sptk vmx_dispatch_virtualization_fault
676 END(vmx_virtualization_fault)
678 .org vmx_ia64_ivt+0x6200
679 /////////////////////////////////////////////////////////////////////////////////////////
680 // 0x6200 Entry 38 (size 16 bundles) Reserved
681 VMX_DBG_FAULT(38)
682 VMX_FAULT(38)
684 .org vmx_ia64_ivt+0x6300
685 /////////////////////////////////////////////////////////////////////////////////////////
686 // 0x6300 Entry 39 (size 16 bundles) Reserved
687 VMX_DBG_FAULT(39)
688 VMX_FAULT(39)
690 .org vmx_ia64_ivt+0x6400
691 /////////////////////////////////////////////////////////////////////////////////////////
692 // 0x6400 Entry 40 (size 16 bundles) Reserved
693 VMX_DBG_FAULT(40)
694 VMX_FAULT(40)
696 .org vmx_ia64_ivt+0x6500
697 /////////////////////////////////////////////////////////////////////////////////////////
698 // 0x6500 Entry 41 (size 16 bundles) Reserved
699 VMX_DBG_FAULT(41)
700 VMX_FAULT(41)
702 .org vmx_ia64_ivt+0x6600
703 /////////////////////////////////////////////////////////////////////////////////////////
704 // 0x6600 Entry 42 (size 16 bundles) Reserved
705 VMX_DBG_FAULT(42)
706 VMX_FAULT(42)
708 .org vmx_ia64_ivt+0x6700
709 /////////////////////////////////////////////////////////////////////////////////////////
710 // 0x6700 Entry 43 (size 16 bundles) Reserved
711 VMX_DBG_FAULT(43)
712 VMX_FAULT(43)
714 .org vmx_ia64_ivt+0x6800
715 /////////////////////////////////////////////////////////////////////////////////////////
716 // 0x6800 Entry 44 (size 16 bundles) Reserved
717 VMX_DBG_FAULT(44)
718 VMX_FAULT(44)
720 .org vmx_ia64_ivt+0x6900
721 /////////////////////////////////////////////////////////////////////////////////////////
722 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
723 ENTRY(vmx_ia32_exception)
724 VMX_DBG_FAULT(45)
725 VMX_FAULT(45)
726 END(vmx_ia32_exception)
728 .org vmx_ia64_ivt+0x6a00
729 /////////////////////////////////////////////////////////////////////////////////////////
730 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
731 ENTRY(vmx_ia32_intercept)
732 VMX_DBG_FAULT(46)
733 VMX_FAULT(46)
734 END(vmx_ia32_intercept)
736 .org vmx_ia64_ivt+0x6b00
737 /////////////////////////////////////////////////////////////////////////////////////////
738 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
739 ENTRY(vmx_ia32_interrupt)
740 VMX_DBG_FAULT(47)
741 VMX_FAULT(47)
742 END(vmx_ia32_interrupt)
744 .org vmx_ia64_ivt+0x6c00
745 /////////////////////////////////////////////////////////////////////////////////////////
746 // 0x6c00 Entry 48 (size 16 bundles) Reserved
747 VMX_DBG_FAULT(48)
748 VMX_FAULT(48)
750 .org vmx_ia64_ivt+0x6d00
751 /////////////////////////////////////////////////////////////////////////////////////////
752 // 0x6d00 Entry 49 (size 16 bundles) Reserved
753 VMX_DBG_FAULT(49)
754 VMX_FAULT(49)
756 .org vmx_ia64_ivt+0x6e00
757 /////////////////////////////////////////////////////////////////////////////////////////
758 // 0x6e00 Entry 50 (size 16 bundles) Reserved
759 VMX_DBG_FAULT(50)
760 VMX_FAULT(50)
762 .org vmx_ia64_ivt+0x6f00
763 /////////////////////////////////////////////////////////////////////////////////////////
764 // 0x6f00 Entry 51 (size 16 bundles) Reserved
765 VMX_DBG_FAULT(51)
766 VMX_FAULT(51)
768 .org vmx_ia64_ivt+0x7000
769 /////////////////////////////////////////////////////////////////////////////////////////
770 // 0x7000 Entry 52 (size 16 bundles) Reserved
771 VMX_DBG_FAULT(52)
772 VMX_FAULT(52)
774 .org vmx_ia64_ivt+0x7100
775 /////////////////////////////////////////////////////////////////////////////////////////
776 // 0x7100 Entry 53 (size 16 bundles) Reserved
777 VMX_DBG_FAULT(53)
778 VMX_FAULT(53)
780 .org vmx_ia64_ivt+0x7200
781 /////////////////////////////////////////////////////////////////////////////////////////
782 // 0x7200 Entry 54 (size 16 bundles) Reserved
783 VMX_DBG_FAULT(54)
784 VMX_FAULT(54)
786 .org vmx_ia64_ivt+0x7300
787 /////////////////////////////////////////////////////////////////////////////////////////
788 // 0x7300 Entry 55 (size 16 bundles) Reserved
789 VMX_DBG_FAULT(55)
790 VMX_FAULT(55)
792 .org vmx_ia64_ivt+0x7400
793 /////////////////////////////////////////////////////////////////////////////////////////
794 // 0x7400 Entry 56 (size 16 bundles) Reserved
795 VMX_DBG_FAULT(56)
796 VMX_FAULT(56)
798 .org vmx_ia64_ivt+0x7500
799 /////////////////////////////////////////////////////////////////////////////////////////
800 // 0x7500 Entry 57 (size 16 bundles) Reserved
801 VMX_DBG_FAULT(57)
802 VMX_FAULT(57)
804 .org vmx_ia64_ivt+0x7600
805 /////////////////////////////////////////////////////////////////////////////////////////
806 // 0x7600 Entry 58 (size 16 bundles) Reserved
807 VMX_DBG_FAULT(58)
808 VMX_FAULT(58)
810 .org vmx_ia64_ivt+0x7700
811 /////////////////////////////////////////////////////////////////////////////////////////
812 // 0x7700 Entry 59 (size 16 bundles) Reserved
813 VMX_DBG_FAULT(59)
814 VMX_FAULT(59)
816 .org vmx_ia64_ivt+0x7800
817 /////////////////////////////////////////////////////////////////////////////////////////
818 // 0x7800 Entry 60 (size 16 bundles) Reserved
819 VMX_DBG_FAULT(60)
820 VMX_FAULT(60)
822 .org vmx_ia64_ivt+0x7900
823 /////////////////////////////////////////////////////////////////////////////////////////
824 // 0x7900 Entry 61 (size 16 bundles) Reserved
825 VMX_DBG_FAULT(61)
826 VMX_FAULT(61)
828 .org vmx_ia64_ivt+0x7a00
829 /////////////////////////////////////////////////////////////////////////////////////////
830 // 0x7a00 Entry 62 (size 16 bundles) Reserved
831 VMX_DBG_FAULT(62)
832 VMX_FAULT(62)
834 .org vmx_ia64_ivt+0x7b00
835 /////////////////////////////////////////////////////////////////////////////////////////
836 // 0x7b00 Entry 63 (size 16 bundles) Reserved
837 VMX_DBG_FAULT(63)
838 VMX_FAULT(63)
840 .org vmx_ia64_ivt+0x7c00
841 /////////////////////////////////////////////////////////////////////////////////////////
842 // 0x7c00 Entry 64 (size 16 bundles) Reserved
843 VMX_DBG_FAULT(64)
844 VMX_FAULT(64)
846 .org vmx_ia64_ivt+0x7d00
847 /////////////////////////////////////////////////////////////////////////////////////////
848 // 0x7d00 Entry 65 (size 16 bundles) Reserved
849 VMX_DBG_FAULT(65)
850 VMX_FAULT(65)
852 .org vmx_ia64_ivt+0x7e00
853 /////////////////////////////////////////////////////////////////////////////////////////
854 // 0x7e00 Entry 66 (size 16 bundles) Reserved
855 VMX_DBG_FAULT(66)
856 VMX_FAULT(66)
858 .org vmx_ia64_ivt+0x7f00
859 /////////////////////////////////////////////////////////////////////////////////////////
860 // 0x7f00 Entry 67 (size 16 bundles) Reserved
861 VMX_DBG_FAULT(67)
862 VMX_FAULT(67)
864 .org vmx_ia64_ivt+0x8000
865 // There is no particular reason for this code to be here, other than that
866 // there happens to be space here that would go unused otherwise. If this
867 // fault ever gets "unreserved", simply moved the following code to a more
868 // suitable spot...
871 ENTRY(vmx_dispatch_reflection)
872 /*
873 * Input:
874 * psr.ic: off
875 * r19: intr type (offset into ivt, see ia64_int.h)
876 * r31: contains saved predicates (pr)
877 */
878 VMX_SAVE_MIN_WITH_COVER_R19
879 alloc r14=ar.pfs,0,0,4,0
880 mov out0=cr.ifa
881 mov out1=cr.isr
882 mov out2=cr.iim
883 mov out3=r15
885 ssm psr.ic
886 ;;
887 srlz.i // guarantee that interruption collection is on
888 ;;
889 (p15) ssm psr.i // restore psr.i
890 adds r3=16,r2 // set up second base pointer
891 ;;
892 VMX_SAVE_REST
893 movl r14=ia64_leave_hypervisor
894 ;;
895 mov rp=r14
896 br.call.sptk.many b6=vmx_reflect_interruption
897 END(vmx_dispatch_reflection)
899 ENTRY(vmx_dispatch_virtualization_fault)
900 VMX_SAVE_MIN_WITH_COVER_R19
901 ;;
902 alloc r14=ar.pfs,0,0,3,0 // now it's safe (must be first in insn group!)
903 mov out0=r13 //vcpu
904 mov out1=r4 //cause
905 mov out2=r5 //opcode
906 ssm psr.ic
907 ;;
908 srlz.i // guarantee that interruption collection is on
909 ;;
910 (p15) ssm psr.i // restore psr.i
911 adds r3=16,r2 // set up second base pointer
912 ;;
913 VMX_SAVE_REST
914 movl r14=ia64_leave_hypervisor
915 ;;
916 mov rp=r14
917 br.call.sptk.many b6=vmx_emulate
918 END(vmx_dispatch_virtualization_fault)
921 ENTRY(vmx_dispatch_vexirq)
922 VMX_SAVE_MIN_WITH_COVER_R19
923 alloc r14=ar.pfs,0,0,1,0
924 mov out0=r13
926 ssm psr.ic
927 ;;
928 srlz.i // guarantee that interruption collection is on
929 ;;
930 (p15) ssm psr.i // restore psr.i
931 adds r3=16,r2 // set up second base pointer
932 ;;
933 VMX_SAVE_REST
934 movl r14=ia64_leave_hypervisor
935 ;;
936 mov rp=r14
937 br.call.sptk.many b6=vmx_vexirq
938 END(vmx_dispatch_vexirq)
940 ENTRY(vmx_dispatch_tlb_miss)
941 VMX_SAVE_MIN_WITH_COVER_R19
942 alloc r14=ar.pfs,0,0,3,0
943 mov out0=r13
944 mov out1=r15
945 mov out2=cr.ifa
947 ssm psr.ic
948 ;;
949 srlz.i // guarantee that interruption collection is on
950 ;;
951 (p15) ssm psr.i // restore psr.i
952 adds r3=16,r2 // set up second base pointer
953 ;;
954 VMX_SAVE_REST
955 movl r14=ia64_leave_hypervisor
956 ;;
957 mov rp=r14
958 br.call.sptk.many b6=vmx_hpw_miss
959 END(vmx_dispatch_tlb_miss)
962 ENTRY(vmx_dispatch_break_fault)
963 VMX_SAVE_MIN_WITH_COVER_R19
964 ;;
965 ;;
966 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
967 mov out0=cr.ifa
968 adds out1=16,sp
969 mov out2=cr.isr // FIXME: pity to make this slow access twice
970 mov out3=cr.iim // FIXME: pity to make this slow access twice
972 ssm psr.ic
973 ;;
974 srlz.i // guarantee that interruption collection is on
975 ;;
976 (p15)ssm psr.i // restore psr.i
977 adds r3=16,r2 // set up second base pointer
978 ;;
979 VMX_SAVE_REST
980 movl r14=ia64_leave_hypervisor
981 ;;
982 mov rp=r14
983 br.call.sptk.many b6=vmx_ia64_handle_break
984 ;;
985 END(vmx_dispatch_break_fault)
988 ENTRY(vmx_hypercall_dispatch)
989 VMX_SAVE_MIN_WITH_COVER
990 ssm psr.ic
991 ;;
992 srlz.i // guarantee that interruption collection is on
993 ;;
994 (p15) ssm psr.i // restore psr.i
995 adds r3=16,r2 // set up second base pointer
996 ;;
997 VMX_SAVE_REST
998 ;;
999 movl r14=ia64_leave_hypervisor
1000 movl r2=hyper_call_table
1001 ;;
1002 mov rp=r14
1003 shladd r2=r15,3,r2
1004 ;;
1005 ld8 r2=[r2]
1006 ;;
1007 mov b6=r2
1008 ;;
1009 br.call.sptk.many b6=b6
1010 ;;
1011 END(vmx_hypercall_dispatch)
1015 ENTRY(vmx_dispatch_interrupt)
1016 VMX_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
1017 ;;
1018 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1019 mov out0=cr.ivr // pass cr.ivr as first arg
1020 add out1=16,sp // pass pointer to pt_regs as second arg
1022 ssm psr.ic
1023 ;;
1024 srlz.i
1025 ;;
1026 (p15) ssm psr.i
1027 adds r3=16,r2 // set up second base pointer for SAVE_REST
1028 ;;
1029 VMX_SAVE_REST
1030 movl r14=ia64_leave_hypervisor
1031 ;;
1032 mov rp=r14
1033 br.call.sptk.many b6=vmx_ia64_handle_irq
1034 END(vmx_dispatch_interrupt)
1038 .rodata
1039 .align 8
1040 .globl hyper_call_table
1041 hyper_call_table:
1042 data8 hyper_not_support //hyper_set_trap_table /* 0 */
1043 data8 hyper_mmu_update
1044 data8 hyper_not_support //hyper_set_gdt
1045 data8 hyper_not_support //hyper_stack_switch
1046 data8 hyper_not_support //hyper_set_callbacks
1047 data8 hyper_not_support //hyper_fpu_taskswitch /* 5 */
1048 data8 hyper_sched_op
1049 data8 hyper_dom0_op
1050 data8 hyper_not_support //hyper_set_debugreg
1051 data8 hyper_not_support //hyper_get_debugreg
1052 data8 hyper_not_support //hyper_update_descriptor /* 10 */
1053 data8 hyper_not_support //hyper_set_fast_trap
1054 data8 hyper_dom_mem_op
1055 data8 hyper_not_support //hyper_multicall
1056 data8 hyper_not_support //hyper_update_va_mapping
1057 data8 hyper_not_support //hyper_set_timer_op /* 15 */
1058 data8 hyper_event_channel_op
1059 data8 hyper_xen_version
1060 data8 hyper_not_support //hyper_console_io
1061 data8 hyper_not_support //hyper_physdev_op
1062 data8 hyper_not_support //hyper_grant_table_op /* 20 */
1063 data8 hyper_not_support //hyper_vm_assist
1064 data8 hyper_not_support //hyper_update_va_mapping_otherdomain
1065 data8 hyper_not_support //hyper_switch_vm86
1066 data8 hyper_not_support //hyper_boot_vcpu
1067 data8 hyper_not_support //hyper_ni_hypercall /* 25 */
1068 data8 hyper_not_support //hyper_mmuext_op
1069 data8 hyper_lock_page
1070 data8 hyper_set_shared_page