ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_ivt.S @ 10923:b94d43606bcd

[IA64] cleanup the hypercall handling code for VT-i domain

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Thu Aug 03 10:38:04 2006 -0600 (2006-08-03)
parents e61bb865ec74
children 5292d57b0771
line source
1 /*
2 * arch/ia64/kernel/vmx_ivt.S
3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 * Asit Mallick <asit.k.mallick@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Kenneth Chen <kenneth.w.chen@intel.com>
11 * Fenghua Yu <fenghua.yu@intel.com>
12 *
13 *
14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
15 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
16 *
17 * 05/3/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
18 * Supporting Intel virtualization architecture
19 *
20 */
22 /*
23 * This file defines the interruption vector table used by the CPU.
24 * It does not include one entry per possible cause of interruption.
25 *
26 * The first 20 entries of the table contain 64 bundles each while the
27 * remaining 48 entries contain only 16 bundles each.
28 *
29 * The 64 bundles are used to allow inlining the whole handler for critical
30 * interruptions like TLB misses.
31 *
32 * For each entry, the comment is as follows:
33 *
34 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
35 * entry offset ----/ / / / /
36 * entry number ---------/ / / /
37 * size of the entry -------------/ / /
38 * vector name -------------------------------------/ /
39 * interruptions triggering this vector ----------------------/
40 *
41 * The table is 32KB in size and must be aligned on 32KB boundary.
42 * (The CPU ignores the 15 lower bits of the address)
43 *
44 * Table is based upon EAS2.6 (Oct 1999)
45 */
47 #include <linux/config.h>
49 #include <asm/asmmacro.h>
50 #include <asm/break.h>
51 #include <asm/ia32.h>
52 #include <asm/kregs.h>
53 #include <asm/offsets.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
57 #include <asm/system.h>
58 #include <asm/thread_info.h>
59 #include <asm/unistd.h>
60 #include <asm/vhpt.h>
61 #include <asm/virt_event.h>
63 #ifdef VTI_DEBUG
64 /*
65 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
66 * needed for something else before enabling this...
67 */
68 #define VMX_DBG_FAULT(i) \
69 add r16=IVT_CUR_OFS,r21; \
70 add r17=IVT_DBG_OFS,r21;; \
71 ld8 r18=[r16];; \
72 add r17=r18,r17; \
73 mov r19=cr.iip; \
74 mov r20=cr.ipsr; \
75 mov r22=cr.ifa; \
76 mov r23=i;; \
77 st8 [r17]=r19,8; \
78 add r18=32,r18;; \
79 st8 [r17]=r20,8; \
80 mov r19=0xfe0;; \
81 st8 [r17]=r22,8; \
82 and r18=r19,r18;; \
83 st8 [r17]=r23; \
84 st8 [r16]=r18;; \
85 //# define VMX_DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
86 #else
87 # define VMX_DBG_FAULT(i)
88 #endif
90 #include "vmx_minstate.h"
92 #define MINSTATE_VIRT /* needed by minstate.h */
93 #include "minstate.h"
96 #define VMX_FAULT(n) \
97 vmx_fault_##n:; \
98 br.sptk.many dispatch_to_fault_handler; \
99 ;; \
102 #define VMX_REFLECT(n) \
103 mov r31=pr; \
104 mov r19=n; /* prepare to save predicates */ \
105 mov r29=cr.ipsr; \
106 ;; \
107 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
108 (p7)br.sptk.many vmx_dispatch_reflection; \
109 VMX_FAULT(n); \
112 GLOBAL_ENTRY(vmx_panic)
113 br.sptk.many vmx_panic
114 ;;
115 END(vmx_panic)
121 .section .text.ivt,"ax"
123 .align 32768 // align on 32KB boundary
124 .global vmx_ia64_ivt
125 vmx_ia64_ivt:
126 /////////////////////////////////////////////////////////////////////////////////////////
127 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
128 ENTRY(vmx_vhpt_miss)
129 VMX_DBG_FAULT(0)
130 VMX_FAULT(0)
131 END(vmx_vhpt_miss)
133 .org vmx_ia64_ivt+0x400
134 /////////////////////////////////////////////////////////////////////////////////////////
135 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
136 ENTRY(vmx_itlb_miss)
137 VMX_DBG_FAULT(1)
138 mov r31 = pr
139 mov r29=cr.ipsr;
140 ;;
141 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
142 (p6) br.sptk vmx_alt_itlb_miss_1
143 //(p6) br.sptk vmx_fault_1
144 mov r16 = cr.ifa
145 ;;
146 thash r17 = r16
147 ttag r20 = r16
148 ;;
149 mov r18 = r17
150 adds r28 = VLE_TITAG_OFFSET,r17
151 adds r19 = VLE_CCHAIN_OFFSET, r17
152 ;;
153 ld8 r17 = [r19]
154 ;;
155 vmx_itlb_loop:
156 cmp.eq p6,p0 = r0, r17
157 (p6)br vmx_itlb_out
158 ;;
159 adds r16 = VLE_TITAG_OFFSET, r17
160 adds r19 = VLE_CCHAIN_OFFSET, r17
161 ;;
162 ld8 r22 = [r16]
163 ld8 r23 = [r19]
164 ;;
165 lfetch [r23]
166 cmp.eq p6,p7 = r20, r22
167 ;;
168 (p7)mov r17 = r23;
169 (p7)br.sptk vmx_itlb_loop
170 ;;
171 ld8 r25 = [r17]
172 ld8 r27 = [r18]
173 ld8 r29 = [r28]
174 ;;
175 st8 [r16] = r29
176 st8 [r28] = r22
177 extr.u r19 = r27, 56, 4
178 ;;
179 dep r27 = r0, r27, 56, 4
180 dep r25 = r19, r25, 56, 4
181 ;;
182 st8 [r18] = r25
183 st8 [r17] = r27
184 ;;
185 itc.i r25
186 dv_serialize_data
187 mov r17=cr.isr
188 mov r23=r31
189 mov r22=b0
190 adds r16=IA64_VPD_BASE_OFFSET,r21
191 ;;
192 ld8 r18=[r16]
193 ;;
194 adds r19=VPD(VPSR),r18
195 movl r20=__vsa_base
196 ;;
197 ld8 r19=[r19]
198 ld8 r20=[r20]
199 ;;
200 br.sptk ia64_vmm_entry
201 ;;
202 vmx_itlb_out:
203 mov r19 = 1
204 br.sptk vmx_dispatch_itlb_miss
205 VMX_FAULT(1);
206 END(vmx_itlb_miss)
208 .org vmx_ia64_ivt+0x0800
209 /////////////////////////////////////////////////////////////////////////////////////////
210 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
211 ENTRY(vmx_dtlb_miss)
212 VMX_DBG_FAULT(2)
213 mov r31 = pr
214 mov r29=cr.ipsr;
215 ;;
216 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
217 (p6)br.sptk vmx_alt_dtlb_miss_1
218 mov r16 = cr.ifa
219 ;;
220 thash r17 = r16
221 ttag r20 = r16
222 ;;
223 mov r18 = r17
224 adds r28 = VLE_TITAG_OFFSET,r17
225 adds r19 = VLE_CCHAIN_OFFSET, r17
226 ;;
227 ld8 r17 = [r19]
228 ;;
229 vmx_dtlb_loop:
230 cmp.eq p6,p0 = r0, r17
231 (p6)br vmx_dtlb_out
232 ;;
233 adds r16 = VLE_TITAG_OFFSET, r17
234 adds r19 = VLE_CCHAIN_OFFSET, r17
235 ;;
236 ld8 r22 = [r16]
237 ld8 r23 = [r19]
238 ;;
239 lfetch [r23]
240 cmp.eq p6,p7 = r20, r22
241 ;;
242 (p7)mov r17 = r23;
243 (p7)br.sptk vmx_dtlb_loop
244 ;;
245 ld8 r25 = [r17]
246 ld8 r27 = [r18]
247 ld8 r29 = [r28]
248 ;;
249 st8 [r16] = r29
250 st8 [r28] = r22
251 extr.u r19 = r27, 56, 4
252 ;;
253 dep r27 = r0, r27, 56, 4
254 dep r25 = r19, r25, 56, 4
255 ;;
256 st8 [r18] = r25
257 st8 [r17] = r27
258 ;;
259 itc.d r25
260 dv_serialize_data
261 mov r17=cr.isr
262 mov r23=r31
263 mov r22=b0
264 adds r16=IA64_VPD_BASE_OFFSET,r21
265 ;;
266 ld8 r18=[r16]
267 ;;
268 adds r19=VPD(VPSR),r18
269 movl r20=__vsa_base
270 ;;
271 ld8 r19=[r19]
272 ld8 r20=[r20]
273 ;;
274 br.sptk ia64_vmm_entry
275 ;;
276 vmx_dtlb_out:
277 mov r19 = 2
278 br.sptk vmx_dispatch_dtlb_miss
279 VMX_FAULT(2);
280 END(vmx_dtlb_miss)
282 .org vmx_ia64_ivt+0x0c00
283 /////////////////////////////////////////////////////////////////////////////////////////
284 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
285 ENTRY(vmx_alt_itlb_miss)
286 VMX_DBG_FAULT(3)
287 mov r31 = pr
288 mov r29=cr.ipsr;
289 ;;
290 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
291 (p7)br.spnt vmx_fault_3
292 vmx_alt_itlb_miss_1:
293 mov r16=cr.ifa // get address that caused the TLB miss
294 ;;
295 tbit.z p6,p7=r16,63
296 (p6)br.spnt vmx_fault_3
297 ;;
298 movl r17=PAGE_KERNEL
299 mov r24=cr.ipsr
300 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
301 ;;
302 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
303 shr.u r18=r16,55 // move address bit 59 to bit 4
304 ;;
305 and r18=0x10,r18 // bit 4=address-bit(61)
306 or r19=r17,r19 // insert PTE control bits into r19
307 ;;
308 movl r20=IA64_GRANULE_SHIFT<<2
309 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
310 ;;
311 mov cr.itir=r20
312 ;;
313 itc.i r19 // insert the TLB entry
314 mov pr=r31,-1
315 rfi
316 VMX_FAULT(3);
317 END(vmx_alt_itlb_miss)
320 .org vmx_ia64_ivt+0x1000
321 /////////////////////////////////////////////////////////////////////////////////////////
322 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
323 ENTRY(vmx_alt_dtlb_miss)
324 VMX_DBG_FAULT(4)
325 mov r31=pr
326 mov r29=cr.ipsr;
327 ;;
328 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
329 (p7)br.spnt vmx_fault_4
330 vmx_alt_dtlb_miss_1:
331 mov r16=cr.ifa // get address that caused the TLB miss
332 ;;
333 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
334 // Test for the address of virtual frame_table
335 shr r22=r16,56;;
336 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
337 (p8)br.cond.sptk frametable_miss ;;
338 #endif
339 tbit.z p6,p7=r16,63
340 (p6)br.spnt vmx_fault_4
341 ;;
342 movl r17=PAGE_KERNEL
343 mov r20=cr.isr
344 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
345 mov r24=cr.ipsr
346 ;;
347 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
348 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
349 shr.u r18=r16,55 // move address bit 59 to bit 4
350 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
351 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
352 ;;
353 and r18=0x10,r18 // bit 4=address-bit(61)
354 (p9)cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
355 dep r24=-1,r24,IA64_PSR_ED_BIT,1
356 or r19=r19,r17 // insert PTE control bits into r19
357 ;;
358 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
359 (p6)mov cr.ipsr=r24
360 movl r20=IA64_GRANULE_SHIFT<<2
361 ;;
362 mov cr.itir=r20
363 ;;
364 (p7)itc.d r19 // insert the TLB entry
365 mov pr=r31,-1
366 rfi
367 VMX_FAULT(4);
368 END(vmx_alt_dtlb_miss)
370 .org vmx_ia64_ivt+0x1400
371 /////////////////////////////////////////////////////////////////////////////////////////
372 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
373 ENTRY(vmx_nested_dtlb_miss)
374 VMX_DBG_FAULT(5)
375 VMX_FAULT(5)
376 END(vmx_nested_dtlb_miss)
378 .org vmx_ia64_ivt+0x1800
379 /////////////////////////////////////////////////////////////////////////////////////////
380 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
381 ENTRY(vmx_ikey_miss)
382 VMX_DBG_FAULT(6)
383 VMX_REFLECT(6)
384 END(vmx_ikey_miss)
386 .org vmx_ia64_ivt+0x1c00
387 /////////////////////////////////////////////////////////////////////////////////////////
388 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
389 ENTRY(vmx_dkey_miss)
390 VMX_DBG_FAULT(7)
391 VMX_REFLECT(7)
392 END(vmx_dkey_miss)
394 .org vmx_ia64_ivt+0x2000
395 /////////////////////////////////////////////////////////////////////////////////////////
396 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
397 ENTRY(vmx_dirty_bit)
398 VMX_DBG_FAULT(8)
399 VMX_REFLECT(8)
400 END(vmx_dirty_bit)
402 .org vmx_ia64_ivt+0x2400
403 /////////////////////////////////////////////////////////////////////////////////////////
404 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
405 ENTRY(vmx_iaccess_bit)
406 VMX_DBG_FAULT(9)
407 VMX_REFLECT(9)
408 END(vmx_iaccess_bit)
410 .org vmx_ia64_ivt+0x2800
411 /////////////////////////////////////////////////////////////////////////////////////////
412 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
413 ENTRY(vmx_daccess_bit)
414 VMX_DBG_FAULT(10)
415 VMX_REFLECT(10)
416 END(vmx_daccess_bit)
418 .org vmx_ia64_ivt+0x2c00
419 /////////////////////////////////////////////////////////////////////////////////////////
420 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
421 ENTRY(vmx_break_fault)
422 VMX_DBG_FAULT(11)
423 mov r31=pr
424 mov r19=11
425 mov r30=cr.iim
426 ;;
427 #ifdef VTI_DEBUG
428 // break 0 is already handled in vmx_ia64_handle_break.
429 cmp.eq p6,p7=r30,r0
430 (p6) br.sptk vmx_fault_11
431 ;;
432 #endif
433 br.sptk.many vmx_dispatch_break_fault
434 ;;
435 VMX_FAULT(11);
436 END(vmx_break_fault)
438 .org vmx_ia64_ivt+0x3000
439 /////////////////////////////////////////////////////////////////////////////////////////
440 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
441 ENTRY(vmx_interrupt)
442 // VMX_DBG_FAULT(12)
443 mov r31=pr // prepare to save predicates
444 mov r19=12
445 mov r29=cr.ipsr
446 ;;
447 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
448 tbit.z p0,p15=r29,IA64_PSR_I_BIT
449 ;;
450 (p7) br.sptk vmx_dispatch_interrupt
451 ;;
452 mov r27=ar.rsc /* M */
453 mov r20=r1 /* A */
454 mov r25=ar.unat /* M */
455 mov r26=ar.pfs /* I */
456 mov r28=cr.iip /* M */
457 cover /* B (or nothing) */
458 ;;
459 mov r1=sp
460 ;;
461 invala /* M */
462 mov r30=cr.ifs
463 ;;
464 addl r1=-IA64_PT_REGS_SIZE,r1
465 ;;
466 adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
467 adds r16=PT(CR_IPSR),r1
468 ;;
469 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
470 st8 [r16]=r29 /* save cr.ipsr */
471 ;;
472 lfetch.fault.excl.nt1 [r17]
473 mov r29=b0
474 ;;
475 adds r16=PT(R8),r1 /* initialize first base pointer */
476 adds r17=PT(R9),r1 /* initialize second base pointer */
477 mov r18=r0 /* make sure r18 isn't NaT */
478 ;;
479 .mem.offset 0,0; st8.spill [r16]=r8,16
480 .mem.offset 8,0; st8.spill [r17]=r9,16
481 ;;
482 .mem.offset 0,0; st8.spill [r16]=r10,24
483 .mem.offset 8,0; st8.spill [r17]=r11,24
484 ;;
485 st8 [r16]=r28,16 /* save cr.iip */
486 st8 [r17]=r30,16 /* save cr.ifs */
487 mov r8=ar.fpsr /* M */
488 mov r9=ar.csd
489 mov r10=ar.ssd
490 movl r11=FPSR_DEFAULT /* L-unit */
491 ;;
492 st8 [r16]=r25,16 /* save ar.unat */
493 st8 [r17]=r26,16 /* save ar.pfs */
494 shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
495 ;;
496 st8 [r16]=r27,16 /* save ar.rsc */
497 adds r17=16,r17 /* skip over ar_rnat field */
498 ;;
499 st8 [r17]=r31,16 /* save predicates */
500 adds r16=16,r16 /* skip over ar_bspstore field */
501 ;;
502 st8 [r16]=r29,16 /* save b0 */
503 st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
504 ;;
505 .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
506 .mem.offset 8,0; st8.spill [r17]=r12,16
507 adds r12=-16,r1 /* switch to kernel memory stack (with 16 bytes of scratch) */
508 ;;
509 .mem.offset 0,0; st8.spill [r16]=r13,16
510 .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
511 MINSTATE_GET_CURRENT(r13)
512 ;;
513 .mem.offset 0,0; st8.spill [r16]=r15,16
514 .mem.offset 8,0; st8.spill [r17]=r14,16
515 dep r14=-1,r0,60,4
516 ;;
517 .mem.offset 0,0; st8.spill [r16]=r2,16
518 .mem.offset 8,0; st8.spill [r17]=r3,16
519 adds r2=IA64_PT_REGS_R16_OFFSET,r1
520 ;;
521 mov r8=ar.ccv
522 movl r1=__gp /* establish kernel global pointer */
523 ;; \
524 bsw.1
525 ;;
526 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
527 mov out0=cr.ivr // pass cr.ivr as first arg
528 add out1=16,sp // pass pointer to pt_regs as second arg
530 ssm psr.ic
531 ;;
532 srlz.i
533 ;;
534 (p15) ssm psr.i
535 adds r3=8,r2 // set up second base pointer for SAVE_REST
536 srlz.i // ensure everybody knows psr.ic is back on
537 ;;
538 .mem.offset 0,0; st8.spill [r2]=r16,16
539 .mem.offset 8,0; st8.spill [r3]=r17,16
540 ;;
541 .mem.offset 0,0; st8.spill [r2]=r18,16
542 .mem.offset 8,0; st8.spill [r3]=r19,16
543 ;;
544 .mem.offset 0,0; st8.spill [r2]=r20,16
545 .mem.offset 8,0; st8.spill [r3]=r21,16
546 mov r18=b6
547 ;;
548 .mem.offset 0,0; st8.spill [r2]=r22,16
549 .mem.offset 8,0; st8.spill [r3]=r23,16
550 mov r19=b7
551 ;;
552 .mem.offset 0,0; st8.spill [r2]=r24,16
553 .mem.offset 8,0; st8.spill [r3]=r25,16
554 ;;
555 .mem.offset 0,0; st8.spill [r2]=r26,16
556 .mem.offset 8,0; st8.spill [r3]=r27,16
557 ;;
558 .mem.offset 0,0; st8.spill [r2]=r28,16
559 .mem.offset 8,0; st8.spill [r3]=r29,16
560 ;;
561 .mem.offset 0,0; st8.spill [r2]=r30,16
562 .mem.offset 8,0; st8.spill [r3]=r31,32
563 ;;
564 mov ar.fpsr=r11 /* M-unit */
565 st8 [r2]=r8,8 /* ar.ccv */
566 adds r24=PT(B6)-PT(F7),r3
567 ;;
568 stf.spill [r2]=f6,32
569 stf.spill [r3]=f7,32
570 ;;
571 stf.spill [r2]=f8,32
572 stf.spill [r3]=f9,32
573 ;;
574 stf.spill [r2]=f10
575 stf.spill [r3]=f11
576 adds r25=PT(B7)-PT(F11),r3
577 ;;
578 st8 [r24]=r18,16 /* b6 */
579 st8 [r25]=r19,16 /* b7 */
580 ;;
581 st8 [r24]=r9 /* ar.csd */
582 st8 [r25]=r10 /* ar.ssd */
583 ;;
584 srlz.d // make sure we see the effect of cr.ivr
585 movl r14=ia64_leave_nested
586 ;;
587 mov rp=r14
588 br.call.sptk.many b6=ia64_handle_irq
589 ;;
590 END(vmx_interrupt)
592 .org vmx_ia64_ivt+0x3400
593 /////////////////////////////////////////////////////////////////////////////////////////
594 // 0x3400 Entry 13 (size 64 bundles) Reserved
595 ENTRY(vmx_virtual_exirq)
596 VMX_DBG_FAULT(13)
597 mov r31=pr
598 mov r19=13
599 br.sptk vmx_dispatch_vexirq
600 END(vmx_virtual_exirq)
602 .org vmx_ia64_ivt+0x3800
603 /////////////////////////////////////////////////////////////////////////////////////////
604 // 0x3800 Entry 14 (size 64 bundles) Reserved
605 VMX_DBG_FAULT(14)
606 VMX_FAULT(14)
609 .org vmx_ia64_ivt+0x3c00
610 /////////////////////////////////////////////////////////////////////////////////////////
611 // 0x3c00 Entry 15 (size 64 bundles) Reserved
612 VMX_DBG_FAULT(15)
613 VMX_FAULT(15)
616 .org vmx_ia64_ivt+0x4000
617 /////////////////////////////////////////////////////////////////////////////////////////
618 // 0x4000 Entry 16 (size 64 bundles) Reserved
619 VMX_DBG_FAULT(16)
620 VMX_FAULT(16)
622 .org vmx_ia64_ivt+0x4400
623 /////////////////////////////////////////////////////////////////////////////////////////
624 // 0x4400 Entry 17 (size 64 bundles) Reserved
625 VMX_DBG_FAULT(17)
626 VMX_FAULT(17)
628 .org vmx_ia64_ivt+0x4800
629 /////////////////////////////////////////////////////////////////////////////////////////
630 // 0x4800 Entry 18 (size 64 bundles) Reserved
631 VMX_DBG_FAULT(18)
632 VMX_FAULT(18)
634 .org vmx_ia64_ivt+0x4c00
635 /////////////////////////////////////////////////////////////////////////////////////////
636 // 0x4c00 Entry 19 (size 64 bundles) Reserved
637 VMX_DBG_FAULT(19)
638 VMX_FAULT(19)
640 .org vmx_ia64_ivt+0x5000
641 /////////////////////////////////////////////////////////////////////////////////////////
642 // 0x5000 Entry 20 (size 16 bundles) Page Not Present
643 ENTRY(vmx_page_not_present)
644 VMX_DBG_FAULT(20)
645 VMX_REFLECT(20)
646 END(vmx_page_not_present)
648 .org vmx_ia64_ivt+0x5100
649 /////////////////////////////////////////////////////////////////////////////////////////
650 // 0x5100 Entry 21 (size 16 bundles) Key Permission vector
651 ENTRY(vmx_key_permission)
652 VMX_DBG_FAULT(21)
653 VMX_REFLECT(21)
654 END(vmx_key_permission)
656 .org vmx_ia64_ivt+0x5200
657 /////////////////////////////////////////////////////////////////////////////////////////
658 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
659 ENTRY(vmx_iaccess_rights)
660 VMX_DBG_FAULT(22)
661 VMX_REFLECT(22)
662 END(vmx_iaccess_rights)
664 .org vmx_ia64_ivt+0x5300
665 /////////////////////////////////////////////////////////////////////////////////////////
666 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
667 ENTRY(vmx_daccess_rights)
668 VMX_DBG_FAULT(23)
669 VMX_REFLECT(23)
670 END(vmx_daccess_rights)
672 .org vmx_ia64_ivt+0x5400
673 /////////////////////////////////////////////////////////////////////////////////////////
674 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
675 ENTRY(vmx_general_exception)
676 VMX_DBG_FAULT(24)
677 VMX_REFLECT(24)
678 // VMX_FAULT(24)
679 END(vmx_general_exception)
681 .org vmx_ia64_ivt+0x5500
682 /////////////////////////////////////////////////////////////////////////////////////////
683 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
684 ENTRY(vmx_disabled_fp_reg)
685 VMX_DBG_FAULT(25)
686 VMX_REFLECT(25)
687 END(vmx_disabled_fp_reg)
689 .org vmx_ia64_ivt+0x5600
690 /////////////////////////////////////////////////////////////////////////////////////////
691 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
692 ENTRY(vmx_nat_consumption)
693 VMX_DBG_FAULT(26)
694 VMX_REFLECT(26)
695 END(vmx_nat_consumption)
697 .org vmx_ia64_ivt+0x5700
698 /////////////////////////////////////////////////////////////////////////////////////////
699 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
700 ENTRY(vmx_speculation_vector)
701 VMX_DBG_FAULT(27)
702 VMX_REFLECT(27)
703 END(vmx_speculation_vector)
705 .org vmx_ia64_ivt+0x5800
706 /////////////////////////////////////////////////////////////////////////////////////////
707 // 0x5800 Entry 28 (size 16 bundles) Reserved
708 VMX_DBG_FAULT(28)
709 VMX_FAULT(28)
711 .org vmx_ia64_ivt+0x5900
712 /////////////////////////////////////////////////////////////////////////////////////////
713 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
714 ENTRY(vmx_debug_vector)
715 VMX_DBG_FAULT(29)
716 VMX_FAULT(29)
717 END(vmx_debug_vector)
719 .org vmx_ia64_ivt+0x5a00
720 /////////////////////////////////////////////////////////////////////////////////////////
721 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
722 ENTRY(vmx_unaligned_access)
723 VMX_DBG_FAULT(30)
724 VMX_REFLECT(30)
725 END(vmx_unaligned_access)
727 .org vmx_ia64_ivt+0x5b00
728 /////////////////////////////////////////////////////////////////////////////////////////
729 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
730 ENTRY(vmx_unsupported_data_reference)
731 VMX_DBG_FAULT(31)
732 VMX_REFLECT(31)
733 END(vmx_unsupported_data_reference)
735 .org vmx_ia64_ivt+0x5c00
736 /////////////////////////////////////////////////////////////////////////////////////////
737 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
738 ENTRY(vmx_floating_point_fault)
739 VMX_DBG_FAULT(32)
740 VMX_REFLECT(32)
741 END(vmx_floating_point_fault)
743 .org vmx_ia64_ivt+0x5d00
744 /////////////////////////////////////////////////////////////////////////////////////////
745 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
746 ENTRY(vmx_floating_point_trap)
747 VMX_DBG_FAULT(33)
748 VMX_REFLECT(33)
749 END(vmx_floating_point_trap)
751 .org vmx_ia64_ivt+0x5e00
752 /////////////////////////////////////////////////////////////////////////////////////////
753 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
754 ENTRY(vmx_lower_privilege_trap)
755 VMX_DBG_FAULT(34)
756 VMX_REFLECT(34)
757 END(vmx_lower_privilege_trap)
759 .org vmx_ia64_ivt+0x5f00
760 /////////////////////////////////////////////////////////////////////////////////////////
761 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
762 ENTRY(vmx_taken_branch_trap)
763 VMX_DBG_FAULT(35)
764 VMX_REFLECT(35)
765 END(vmx_taken_branch_trap)
767 .org vmx_ia64_ivt+0x6000
768 /////////////////////////////////////////////////////////////////////////////////////////
769 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
770 ENTRY(vmx_single_step_trap)
771 VMX_DBG_FAULT(36)
772 VMX_REFLECT(36)
773 END(vmx_single_step_trap)
775 .org vmx_ia64_ivt+0x6100
776 /////////////////////////////////////////////////////////////////////////////////////////
777 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
778 ENTRY(vmx_virtualization_fault)
779 // VMX_DBG_FAULT(37)
780 mov r31=pr
781 mov r19=37
782 adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
783 adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
784 ;;
785 st8 [r16] = r24
786 st8 [r17] = r25
787 ;;
788 cmp.ne p6,p0=EVENT_RFI, r24
789 (p6) br.sptk vmx_dispatch_virtualization_fault
790 ;;
791 adds r18=IA64_VPD_BASE_OFFSET,r21
792 ;;
793 ld8 r18=[r18]
794 ;;
795 adds r18=IA64_VPD_VIFS_OFFSET,r18
796 ;;
797 ld8 r18=[r18]
798 ;;
799 tbit.z p6,p0=r18,63
800 (p6) br.sptk vmx_dispatch_virtualization_fault
801 ;;
802 //if vifs.v=1 desert current register frame
803 alloc r18=ar.pfs,0,0,0,0
804 br.sptk vmx_dispatch_virtualization_fault
805 END(vmx_virtualization_fault)
807 .org vmx_ia64_ivt+0x6200
808 /////////////////////////////////////////////////////////////////////////////////////////
809 // 0x6200 Entry 38 (size 16 bundles) Reserved
810 VMX_DBG_FAULT(38)
811 VMX_FAULT(38)
813 .org vmx_ia64_ivt+0x6300
814 /////////////////////////////////////////////////////////////////////////////////////////
815 // 0x6300 Entry 39 (size 16 bundles) Reserved
816 VMX_DBG_FAULT(39)
817 VMX_FAULT(39)
819 .org vmx_ia64_ivt+0x6400
820 /////////////////////////////////////////////////////////////////////////////////////////
821 // 0x6400 Entry 40 (size 16 bundles) Reserved
822 VMX_DBG_FAULT(40)
823 VMX_FAULT(40)
825 .org vmx_ia64_ivt+0x6500
826 /////////////////////////////////////////////////////////////////////////////////////////
827 // 0x6500 Entry 41 (size 16 bundles) Reserved
828 VMX_DBG_FAULT(41)
829 VMX_FAULT(41)
831 .org vmx_ia64_ivt+0x6600
832 /////////////////////////////////////////////////////////////////////////////////////////
833 // 0x6600 Entry 42 (size 16 bundles) Reserved
834 VMX_DBG_FAULT(42)
835 VMX_FAULT(42)
837 .org vmx_ia64_ivt+0x6700
838 /////////////////////////////////////////////////////////////////////////////////////////
839 // 0x6700 Entry 43 (size 16 bundles) Reserved
840 VMX_DBG_FAULT(43)
841 VMX_FAULT(43)
843 .org vmx_ia64_ivt+0x6800
844 /////////////////////////////////////////////////////////////////////////////////////////
845 // 0x6800 Entry 44 (size 16 bundles) Reserved
846 VMX_DBG_FAULT(44)
847 VMX_FAULT(44)
849 .org vmx_ia64_ivt+0x6900
850 /////////////////////////////////////////////////////////////////////////////////////////
851 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
852 ENTRY(vmx_ia32_exception)
853 VMX_DBG_FAULT(45)
854 VMX_FAULT(45)
855 END(vmx_ia32_exception)
857 .org vmx_ia64_ivt+0x6a00
858 /////////////////////////////////////////////////////////////////////////////////////////
859 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
860 ENTRY(vmx_ia32_intercept)
861 VMX_DBG_FAULT(46)
862 VMX_FAULT(46)
863 END(vmx_ia32_intercept)
865 .org vmx_ia64_ivt+0x6b00
866 /////////////////////////////////////////////////////////////////////////////////////////
867 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
868 ENTRY(vmx_ia32_interrupt)
869 VMX_DBG_FAULT(47)
870 VMX_FAULT(47)
871 END(vmx_ia32_interrupt)
873 .org vmx_ia64_ivt+0x6c00
874 /////////////////////////////////////////////////////////////////////////////////////////
875 // 0x6c00 Entry 48 (size 16 bundles) Reserved
876 VMX_DBG_FAULT(48)
877 VMX_FAULT(48)
879 .org vmx_ia64_ivt+0x6d00
880 /////////////////////////////////////////////////////////////////////////////////////////
881 // 0x6d00 Entry 49 (size 16 bundles) Reserved
882 VMX_DBG_FAULT(49)
883 VMX_FAULT(49)
885 .org vmx_ia64_ivt+0x6e00
886 /////////////////////////////////////////////////////////////////////////////////////////
887 // 0x6e00 Entry 50 (size 16 bundles) Reserved
888 VMX_DBG_FAULT(50)
889 VMX_FAULT(50)
891 .org vmx_ia64_ivt+0x6f00
892 /////////////////////////////////////////////////////////////////////////////////////////
893 // 0x6f00 Entry 51 (size 16 bundles) Reserved
894 VMX_DBG_FAULT(51)
895 VMX_FAULT(51)
897 .org vmx_ia64_ivt+0x7000
898 /////////////////////////////////////////////////////////////////////////////////////////
899 // 0x7000 Entry 52 (size 16 bundles) Reserved
900 VMX_DBG_FAULT(52)
901 VMX_FAULT(52)
903 .org vmx_ia64_ivt+0x7100
904 /////////////////////////////////////////////////////////////////////////////////////////
905 // 0x7100 Entry 53 (size 16 bundles) Reserved
906 VMX_DBG_FAULT(53)
907 VMX_FAULT(53)
909 .org vmx_ia64_ivt+0x7200
910 /////////////////////////////////////////////////////////////////////////////////////////
911 // 0x7200 Entry 54 (size 16 bundles) Reserved
912 VMX_DBG_FAULT(54)
913 VMX_FAULT(54)
915 .org vmx_ia64_ivt+0x7300
916 /////////////////////////////////////////////////////////////////////////////////////////
917 // 0x7300 Entry 55 (size 16 bundles) Reserved
918 VMX_DBG_FAULT(55)
919 VMX_FAULT(55)
921 .org vmx_ia64_ivt+0x7400
922 /////////////////////////////////////////////////////////////////////////////////////////
923 // 0x7400 Entry 56 (size 16 bundles) Reserved
924 VMX_DBG_FAULT(56)
925 VMX_FAULT(56)
927 .org vmx_ia64_ivt+0x7500
928 /////////////////////////////////////////////////////////////////////////////////////////
929 // 0x7500 Entry 57 (size 16 bundles) Reserved
930 VMX_DBG_FAULT(57)
931 VMX_FAULT(57)
933 .org vmx_ia64_ivt+0x7600
934 /////////////////////////////////////////////////////////////////////////////////////////
935 // 0x7600 Entry 58 (size 16 bundles) Reserved
936 VMX_DBG_FAULT(58)
937 VMX_FAULT(58)
939 .org vmx_ia64_ivt+0x7700
940 /////////////////////////////////////////////////////////////////////////////////////////
941 // 0x7700 Entry 59 (size 16 bundles) Reserved
942 VMX_DBG_FAULT(59)
943 VMX_FAULT(59)
945 .org vmx_ia64_ivt+0x7800
946 /////////////////////////////////////////////////////////////////////////////////////////
947 // 0x7800 Entry 60 (size 16 bundles) Reserved
948 VMX_DBG_FAULT(60)
949 VMX_FAULT(60)
951 .org vmx_ia64_ivt+0x7900
952 /////////////////////////////////////////////////////////////////////////////////////////
953 // 0x7900 Entry 61 (size 16 bundles) Reserved
954 VMX_DBG_FAULT(61)
955 VMX_FAULT(61)
957 .org vmx_ia64_ivt+0x7a00
958 /////////////////////////////////////////////////////////////////////////////////////////
959 // 0x7a00 Entry 62 (size 16 bundles) Reserved
960 VMX_DBG_FAULT(62)
961 VMX_FAULT(62)
963 .org vmx_ia64_ivt+0x7b00
964 /////////////////////////////////////////////////////////////////////////////////////////
965 // 0x7b00 Entry 63 (size 16 bundles) Reserved
966 VMX_DBG_FAULT(63)
967 VMX_FAULT(63)
969 .org vmx_ia64_ivt+0x7c00
970 /////////////////////////////////////////////////////////////////////////////////////////
971 // 0x7c00 Entry 64 (size 16 bundles) Reserved
972 VMX_DBG_FAULT(64)
973 VMX_FAULT(64)
975 .org vmx_ia64_ivt+0x7d00
976 /////////////////////////////////////////////////////////////////////////////////////////
977 // 0x7d00 Entry 65 (size 16 bundles) Reserved
978 VMX_DBG_FAULT(65)
979 VMX_FAULT(65)
981 .org vmx_ia64_ivt+0x7e00
982 /////////////////////////////////////////////////////////////////////////////////////////
983 // 0x7e00 Entry 66 (size 16 bundles) Reserved
984 VMX_DBG_FAULT(66)
985 VMX_FAULT(66)
987 .org vmx_ia64_ivt+0x7f00
988 /////////////////////////////////////////////////////////////////////////////////////////
989 // 0x7f00 Entry 67 (size 16 bundles) Reserved
990 VMX_DBG_FAULT(67)
991 VMX_FAULT(67)
993 .org vmx_ia64_ivt+0x8000
994 // There is no particular reason for this code to be here, other than that
995 // there happens to be space here that would go unused otherwise. If this
996 // fault ever gets "unreserved", simply moved the following code to a more
997 // suitable spot...
1000 ENTRY(vmx_dispatch_reflection)
1001 /*
1002 * Input:
1003 * psr.ic: off
1004 * r19: intr type (offset into ivt, see ia64_int.h)
1005 * r31: contains saved predicates (pr)
1006 */
1007 VMX_SAVE_MIN_WITH_COVER_R19
1008 alloc r14=ar.pfs,0,0,5,0
1009 mov out0=cr.ifa
1010 mov out1=cr.isr
1011 mov out2=cr.iim
1012 mov out3=r15
1013 adds r3=8,r2 // set up second base pointer
1014 ;;
1015 ssm psr.ic
1016 ;;
1017 srlz.i // guarantee that interruption collection is on
1018 ;;
1019 (p15) ssm psr.i // restore psr.i
1020 movl r14=ia64_leave_hypervisor
1021 ;;
1022 VMX_SAVE_REST
1023 mov rp=r14
1024 ;;
1025 adds out4=16,r12
1026 br.call.sptk.many b6=vmx_reflect_interruption
1027 END(vmx_dispatch_reflection)
1029 ENTRY(vmx_dispatch_virtualization_fault)
1030 VMX_SAVE_MIN_WITH_COVER_R19
1031 ;;
1032 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1033 mov out0=r13 //vcpu
1034 adds r3=8,r2 // set up second base pointer
1035 ;;
1036 ssm psr.ic
1037 ;;
1038 srlz.i // guarantee that interruption collection is on
1039 ;;
1040 (p15) ssm psr.i // restore psr.i
1041 movl r14=ia64_leave_hypervisor_prepare
1042 ;;
1043 VMX_SAVE_REST
1044 VMX_SAVE_EXTRA
1045 mov rp=r14
1046 ;;
1047 adds out1=16,sp //regs
1048 br.call.sptk.many b6=vmx_emulate
1049 END(vmx_dispatch_virtualization_fault)
1052 ENTRY(vmx_dispatch_vexirq)
1053 VMX_SAVE_MIN_WITH_COVER_R19
1054 alloc r14=ar.pfs,0,0,1,0
1055 mov out0=r13
1057 ssm psr.ic
1058 ;;
1059 srlz.i // guarantee that interruption collection is on
1060 ;;
1061 (p15) ssm psr.i // restore psr.i
1062 adds r3=8,r2 // set up second base pointer
1063 ;;
1064 VMX_SAVE_REST
1065 movl r14=ia64_leave_hypervisor
1066 ;;
1067 mov rp=r14
1068 br.call.sptk.many b6=vmx_vexirq
1069 END(vmx_dispatch_vexirq)
1071 ENTRY(vmx_dispatch_itlb_miss)
1072 VMX_SAVE_MIN_WITH_COVER_R19
1073 alloc r14=ar.pfs,0,0,3,0
1074 mov out0=cr.ifa
1075 mov out1=r15
1076 adds r3=8,r2 // set up second base pointer
1077 ;;
1078 ssm psr.ic
1079 ;;
1080 srlz.i // guarantee that interruption collection is on
1081 ;;
1082 (p15) ssm psr.i // restore psr.i
1083 movl r14=ia64_leave_hypervisor
1084 ;;
1085 VMX_SAVE_REST
1086 mov rp=r14
1087 ;;
1088 adds out2=16,r12
1089 br.call.sptk.many b6=vmx_hpw_miss
1090 END(vmx_dispatch_itlb_miss)
1092 ENTRY(vmx_dispatch_dtlb_miss)
1093 VMX_SAVE_MIN_WITH_COVER_R19
1094 alloc r14=ar.pfs,0,0,3,0
1095 mov out0=cr.ifa
1096 mov out1=r15
1097 adds r3=8,r2 // set up second base pointer
1098 ;;
1099 ssm psr.ic
1100 ;;
1101 srlz.i // guarantee that interruption collection is on
1102 ;;
1103 (p15) ssm psr.i // restore psr.i
1104 movl r14=ia64_leave_hypervisor_prepare
1105 ;;
1106 VMX_SAVE_REST
1107 VMX_SAVE_EXTRA
1108 mov rp=r14
1109 ;;
1110 adds out2=16,r12
1111 br.call.sptk.many b6=vmx_hpw_miss
1112 END(vmx_dispatch_dtlb_miss)
1114 ENTRY(vmx_dispatch_break_fault)
1115 VMX_SAVE_MIN_WITH_COVER_R19
1116 ;;
1117 ;;
1118 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1119 mov out0=cr.ifa
1120 mov out2=cr.isr // FIXME: pity to make this slow access twice
1121 mov out3=cr.iim // FIXME: pity to make this slow access twice
1122 adds r3=8,r2 // set up second base pointer
1123 ;;
1124 ssm psr.ic
1125 ;;
1126 srlz.i // guarantee that interruption collection is on
1127 ;;
1128 (p15)ssm psr.i // restore psr.i
1129 movl r14=ia64_leave_hypervisor
1130 ;;
1131 VMX_SAVE_REST
1132 mov rp=r14
1133 ;;
1134 adds out1=16,sp
1135 br.call.sptk.many b6=vmx_ia64_handle_break
1136 ;;
1137 END(vmx_dispatch_break_fault)
1140 ENTRY(vmx_dispatch_interrupt)
1141 VMX_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
1142 ;;
1143 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1144 mov out0=cr.ivr // pass cr.ivr as first arg
1145 adds r3=8,r2 // set up second base pointer for SAVE_REST
1146 ;;
1147 ssm psr.ic
1148 ;;
1149 srlz.i
1150 ;;
1151 (p15) ssm psr.i
1152 movl r14=ia64_leave_hypervisor
1153 ;;
1154 VMX_SAVE_REST
1155 mov rp=r14
1156 ;;
1157 add out1=16,sp // pass pointer to pt_regs as second arg
1158 br.call.sptk.many b6=ia64_handle_irq
1159 END(vmx_dispatch_interrupt)