ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_ivt.S @ 18090:1e5d42cf61ec

[IA64] kexec: Handle EFI UC area correctly in vmx_alt_dtlb

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Acked-by: Simon Horman <horms@verge.net.au>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Jul 22 12:15:02 2008 +0900 (2008-07-22)
parents ef290f39ae6b
children 5a61dba2cf0a
line source
1 /*
2 * arch/ia64/kernel/vmx_ivt.S
3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 * Asit Mallick <asit.k.mallick@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Kenneth Chen <kenneth.w.chen@intel.com>
11 * Fenghua Yu <fenghua.yu@intel.com>
12 *
13 *
14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
15 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
16 *
17 * 05/3/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
18 * Supporting Intel virtualization architecture
19 *
20 */
22 /*
23 * This file defines the interruption vector table used by the CPU.
24 * It does not include one entry per possible cause of interruption.
25 *
26 * The first 20 entries of the table contain 64 bundles each while the
27 * remaining 48 entries contain only 16 bundles each.
28 *
29 * The 64 bundles are used to allow inlining the whole handler for critical
30 * interruptions like TLB misses.
31 *
32 * For each entry, the comment is as follows:
33 *
34 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
35 * entry offset ----/ / / / /
36 * entry number ---------/ / / /
37 * size of the entry -------------/ / /
38 * vector name -------------------------------------/ /
39 * interruptions triggering this vector ----------------------/
40 *
41 * The table is 32KB in size and must be aligned on 32KB boundary.
42 * (The CPU ignores the 15 lower bits of the address)
43 *
44 * Table is based upon EAS2.6 (Oct 1999)
45 */
47 #include <linux/config.h>
49 #include <asm/asmmacro.h>
50 #include <asm/break.h>
51 #include <asm/ia32.h>
52 #include <asm/kregs.h>
53 #include <asm/offsets.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
57 #include <asm/system.h>
58 #include <asm/thread_info.h>
59 #include <asm/unistd.h>
60 #include <asm/vhpt.h>
61 #include <asm/virt_event.h>
62 #include <asm/vmx_phy_mode.h>
63 #include <xen/errno.h>
65 #if 1
66 # define PSR_DEFAULT_BITS psr.ac
67 #else
68 # define PSR_DEFAULT_BITS 0
69 #endif
72 #ifdef VTI_DEBUG
73 #define IVT_DEBUG_MASK (IVT_DEBUG_SIZE * (IVT_DEBUG_MAX - 1))
74 #define VMX_DBG_FAULT(i) \
75 mov r31=pr; \
76 mov r20=cr.ipsr;; \
77 tbit.z p6,p0=r20,IA64_PSR_VM_BIT;; \
78 (p6)movl r21=THIS_CPU(cpu_kr)+ \
79 IA64_KR_CURRENT_OFFSET;; \
80 (p6)ld8 r21=[r21]; \
81 mov pr=r31;; \
82 add r16=IVT_CUR_OFS,r21; \
83 add r17=IVT_DBG_OFS,r21;; \
84 ld8 r18=[r16];; \
85 add r17=r18,r17; \
86 mov r19=cr.iip; \
87 mov r22=cr.ifa; \
88 mov r23=i;; \
89 st8 [r17]=r19,8; \
90 add r18=IVT_DEBUG_SIZE,r18;; \
91 st8 [r17]=r20,8; \
92 mov r19=IVT_DEBUG_MASK;; \
93 st8 [r17]=r22,8; \
94 and r18=r19,r18;; \
95 st8 [r17]=r23; \
96 st8 [r16]=r18;;
97 #else
98 # define VMX_DBG_FAULT(i)
99 #endif
101 #include "vmx_minstate.h"
103 #define MINSTATE_VIRT /* needed by minstate.h */
104 #include "minstate.h"
107 #define VMX_FAULT(n) \
108 vmx_fault_##n:; \
109 mov r19=n; \
110 br.sptk.many dispatch_to_fault_handler; \
111 ;;
113 #define VMX_REFLECT(n) \
114 mov r31=pr; \
115 mov r19=n; /* prepare to save predicates */ \
116 mov r29=cr.ipsr; \
117 ;; \
118 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
119 (p7)br.sptk.many vmx_dispatch_reflection; \
120 br.sptk.many dispatch_to_fault_handler
122 #ifdef CONFIG_VMX_PANIC
123 GLOBAL_ENTRY(vmx_panic)
124 br.sptk.many vmx_panic
125 ;;
126 END(vmx_panic)
127 #endif
132 .section .text.ivt,"ax"
134 .align 32768 // align on 32KB boundary
135 .global vmx_ia64_ivt
136 vmx_ia64_ivt:
137 /////////////////////////////////////////////////////////////////////////////////////////
138 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
139 ENTRY(vmx_vhpt_miss)
140 VMX_DBG_FAULT(0)
141 VMX_FAULT(0)
142 END(vmx_vhpt_miss)
144 .org vmx_ia64_ivt+0x400
145 /////////////////////////////////////////////////////////////////////////////////////////
146 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
147 ENTRY(vmx_itlb_miss)
148 VMX_DBG_FAULT(1)
149 mov r29=cr.ipsr
150 mov r31 = pr
151 ;;
152 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
153 (p6) br.sptk vmx_alt_itlb_miss_vmm
154 mov r16 = cr.ifa
155 ;;
156 thash r17 = r16
157 ttag r20 = r16
158 ;;
159 mov r18 = r17
160 adds r28 = VLE_TITAG_OFFSET,r17
161 adds r19 = VLE_CCHAIN_OFFSET, r17
162 ;;
163 ld8 r17 = [r19] // Read chain
164 ;;
165 vmx_itlb_loop:
166 cmp.eq p6,p0 = r0, r17 // End of chain ?
167 (p6)br vmx_itlb_out
168 ;;
169 adds r16 = VLE_TITAG_OFFSET, r17
170 adds r19 = VLE_CCHAIN_OFFSET, r17
171 ;;
172 ld8 r24 = [r16] // Read tag
173 ld8 r23 = [r19] // Read chain
174 ;;
175 lfetch [r23]
176 cmp.eq p6,p7 = r20, r24 // does tag match ?
177 ;;
178 (p7)mov r17 = r23; // No: entry = chain
179 (p7)br.sptk vmx_itlb_loop // again
180 ;;
181 // Swap the first entry with the entry found in the collision chain
182 // to speed up next hardware search (and keep LRU).
183 // In comments 1 stands for the first entry and 2 for the found entry.
184 ld8 r29 = [r28] // Read tag of 1
185 dep r22 = -1,r24,63,1 // set ti=1 of 2 (to disable it during the swap)
186 ;;
187 ld8 r25 = [r17] // Read value of 2
188 ld8 r27 = [r18] // Read value of 1
189 st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET // Write tag of 2
190 st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET // Write tag of 1
191 mf
192 ;;
193 ld8 r29 = [r16] // read itir of 2
194 ld8 r22 = [r28] // read itir of 1
195 st8 [r18] = r25 // Write value of 1
196 st8 [r17] = r27 // Write value of 2
197 ;;
198 st8 [r16] = r22 // Write itir of 2
199 st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET // write itir of 1
200 ;;
201 st8.rel [r28] = r24 // Write tag of 1 (with ti=0)
202 // Insert the translation entry
203 itc.i r25
204 dv_serialize_data
205 // Resume
206 mov r17=cr.isr
207 mov r23=r31
208 mov r22=b0
209 adds r16=IA64_VPD_BASE_OFFSET,r21
210 ;;
211 ld8 r18=[r16]
212 ;;
213 adds r19=VPD(VPSR),r18
214 ;;
215 ld8 r19=[r19]
216 br.sptk ia64_vmm_entry
217 ;;
218 vmx_itlb_out:
219 mov r19 = 1
220 br.sptk vmx_dispatch_tlb_miss
221 VMX_FAULT(1);
222 END(vmx_itlb_miss)
224 .org vmx_ia64_ivt+0x0800
225 /////////////////////////////////////////////////////////////////////////////////////////
226 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
227 ENTRY(vmx_dtlb_miss)
228 VMX_DBG_FAULT(2)
229 mov r29=cr.ipsr
230 mov r31 = pr
231 ;;
232 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
233 (p6)br.sptk vmx_alt_dtlb_miss_vmm
234 mov r16 = cr.ifa
235 ;;
236 thash r17 = r16
237 ttag r20 = r16
238 ;;
239 mov r18 = r17
240 adds r28 = VLE_TITAG_OFFSET,r17
241 adds r19 = VLE_CCHAIN_OFFSET, r17
242 ;;
243 ld8 r17 = [r19]
244 ;;
245 vmx_dtlb_loop:
246 cmp.eq p6,p0 = r0, r17
247 (p6)br vmx_dtlb_out
248 ;;
249 adds r16 = VLE_TITAG_OFFSET, r17
250 adds r19 = VLE_CCHAIN_OFFSET, r17
251 ;;
252 ld8 r24 = [r16]
253 ld8 r23 = [r19]
254 ;;
255 lfetch [r23]
256 cmp.eq p6,p7 = r20, r24
257 ;;
258 (p7)mov r17 = r23;
259 (p7)br.sptk vmx_dtlb_loop
260 ;;
261 ld8 r29 = [r28]
262 dep r22 = -1,r24,63,1 //set ti=1
263 ;;
264 ld8 r25 = [r17]
265 ld8 r27 = [r18]
266 st8 [r16] = r29, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
267 st8 [r28] = r22, VLE_ITIR_OFFSET - VLE_TITAG_OFFSET
268 mf
269 ;;
270 ld8 r29 = [r16]
271 ld8 r22 = [r28]
272 st8 [r18] = r25
273 st8 [r17] = r27
274 ;;
275 st8 [r16] = r22
276 st8 [r28] = r29, VLE_TITAG_OFFSET - VLE_ITIR_OFFSET
277 ;;
278 st8.rel [r28] = r24
279 itc.d r25
280 dv_serialize_data
281 mov r17=cr.isr
282 mov r23=r31
283 mov r22=b0
284 adds r16=IA64_VPD_BASE_OFFSET,r21
285 ;;
286 ld8 r18=[r16]
287 ;;
288 adds r19=VPD(VPSR),r18
289 ;;
290 ld8 r19=[r19]
291 br.sptk ia64_vmm_entry
292 ;;
293 vmx_dtlb_out:
294 mov r19 = 2
295 br.sptk vmx_dispatch_tlb_miss
296 VMX_FAULT(2);
297 END(vmx_dtlb_miss)
299 .org vmx_ia64_ivt+0x0c00
300 /////////////////////////////////////////////////////////////////////////////////////////
301 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
302 ENTRY(vmx_alt_itlb_miss)
303 VMX_DBG_FAULT(3)
304 mov r29=cr.ipsr
305 mov r31 = pr
306 adds r22=IA64_VCPU_MMU_MODE_OFFSET, r21
307 ;;
308 tbit.nz p7,p0=r29,IA64_PSR_VM_BIT
309 (p7)br.spnt vmx_alt_itlb_miss_dom
310 vmx_alt_itlb_miss_vmm:
311 mov r16=cr.ifa // get address that caused the TLB miss
312 movl r17=PAGE_KERNEL
313 mov r24=cr.ipsr
314 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
315 ;;
316 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
317 extr.u r18=r16,XEN_VIRT_UC_BIT, 15 // extract UC bit
318 ;;
319 or r19=r17,r19 // insert PTE control bits into r19
320 mov r20=IA64_GRANULE_SHIFT<<2
321 ;;
322 dep r19=r18,r19,4,1 // set bit 4 (uncached) if the access was to UC region
323 mov cr.itir=r20
324 ;;
325 itc.i r19 // insert the TLB entry
326 mov pr=r31,-1
327 rfi
328 ;;
329 vmx_alt_itlb_miss_dom:
330 ld1 r23=[r22] // Load mmu_mode
331 ;;
332 cmp.eq p6,p7=VMX_MMU_PHY_D,r23
333 (p7)br.sptk vmx_fault_3
334 ;;
335 mov r19=3
336 br.sptk vmx_dispatch_tlb_miss
337 VMX_FAULT(3);
338 END(vmx_alt_itlb_miss)
341 .org vmx_ia64_ivt+0x1000
342 /////////////////////////////////////////////////////////////////////////////////////////
343 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
344 ENTRY(vmx_alt_dtlb_miss)
345 VMX_DBG_FAULT(4)
346 mov r29=cr.ipsr
347 mov r31=pr
348 adds r22=IA64_VCPU_MMU_MODE_OFFSET, r21
349 ;;
350 tbit.nz p7,p0=r29,IA64_PSR_VM_BIT
351 (p7)br.spnt vmx_alt_dtlb_miss_dom
352 vmx_alt_dtlb_miss_vmm:
353 mov r16=cr.ifa // get address that caused the TLB miss
354 ;;
355 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
356 // Test for the address of virtual frame_table
357 shr r22=r16,56;;
358 cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
359 (p8)br.cond.sptk frametable_miss ;;
360 #endif
361 movl r17=PAGE_KERNEL
362 mov r20=cr.isr
363 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
364 mov r24=cr.ipsr
365 ;;
366 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
367 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
368 tbit.nz p8,p0=r16,XEN_VIRT_UC_BIT // is Xen UC region?
369 extr.u r23=r16,59,5 // iva fault address
370 // 0xc0000000_00000000 >> 59 = 0x18 EFI UC address
371 // 0xe0000000_00000000 >> 59 = 0x1c EFI address
373 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
374 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
375 ;;
376 cmp.eq.or p8,p0=0x18,r23 // Region 6 is UC for EFI
377 (p9)cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
378 dep r24=-1,r24,IA64_PSR_ED_BIT,1
379 or r19=r19,r17 // insert PTE control bits into r19
380 mov r20=IA64_GRANULE_SHIFT<<2
381 ;;
382 (p8)dep r19=-1,r19,4,1 // set bit 4 (uncached) if access to UC area
384 (p6)mov cr.ipsr=r24
385 mov cr.itir=r20
386 ;;
387 (p7)itc.d r19 // insert the TLB entry
388 mov pr=r31,-1
389 rfi
390 ;;
391 vmx_alt_dtlb_miss_dom:
392 ld1 r23=[r22] // Load mmu_mode
393 ;;
394 cmp.eq p6,p7=VMX_MMU_PHY_D,r23
395 (p7)br.sptk vmx_fault_4
396 ;;
397 mov r19=4
398 br.sptk vmx_dispatch_tlb_miss
399 VMX_FAULT(4);
400 END(vmx_alt_dtlb_miss)
402 .org vmx_ia64_ivt+0x1400
403 /////////////////////////////////////////////////////////////////////////////////////////
404 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
405 ENTRY(vmx_nested_dtlb_miss)
406 VMX_DBG_FAULT(5)
407 mov r29=cr.ipsr
408 mov b0=r30
409 ;;
410 tbit.z p6,p0=r29,IA64_PSR_VM_BIT
411 (p6)br.sptk b0 // return to the continuation point
412 VMX_FAULT(5)
413 END(vmx_nested_dtlb_miss)
415 .org vmx_ia64_ivt+0x1800
416 /////////////////////////////////////////////////////////////////////////////////////////
417 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
418 ENTRY(vmx_ikey_miss)
419 VMX_DBG_FAULT(6)
420 VMX_REFLECT(6)
421 END(vmx_ikey_miss)
423 .org vmx_ia64_ivt+0x1c00
424 /////////////////////////////////////////////////////////////////////////////////////////
425 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
426 ENTRY(vmx_dkey_miss)
427 VMX_DBG_FAULT(7)
428 VMX_REFLECT(7)
429 END(vmx_dkey_miss)
431 .org vmx_ia64_ivt+0x2000
432 /////////////////////////////////////////////////////////////////////////////////////////
433 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
434 ENTRY(vmx_dirty_bit)
435 VMX_DBG_FAULT(8)
436 mov r28=cr.ipsr
437 mov r31=pr
438 ;;
439 mov r19=cr.ifa
440 tbit.z p6,p0=r28,IA64_PSR_VM_BIT
441 (p6)br.spnt.few vmx_fault_8
442 // Prepare for nested dtlb miss
443 mov r22=b0
444 dep.z r29=r28,IA64_PSR_VM_BIT,1
445 ;;
446 mov cr.ipsr=r29 // ipsr.vm=0
447 movl r30=dirty_bit_tpa_fail
448 ;;
449 tpa r19=r19 // possibly nested dtlb miss?
450 mov cr.ipsr=r28 // ipsr.vm=1
451 br.sptk vmx_dispatch_shadow_fault
452 VMX_FAULT(8)
453 dirty_bit_tpa_fail:
454 // Resume & Retry
455 mov cr.ipsr=r28 // ipsr.vm=1
456 mov r17=cr.isr
457 mov r23=r31
458 // mov r22=b0 // b0 is clobbered in vmx_nested_dtlb_miss
459 adds r16=IA64_VPD_BASE_OFFSET,r21
460 ;;
461 ld8 r18=[r16]
462 ;;
463 adds r19=VPD(VPSR),r18
464 ;;
465 ld8 r19=[r19]
466 br.sptk ia64_vmm_entry
467 ;;
468 END(vmx_dirty_bit)
470 .org vmx_ia64_ivt+0x2400
471 /////////////////////////////////////////////////////////////////////////////////////////
472 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
473 ENTRY(vmx_iaccess_bit)
474 VMX_DBG_FAULT(9)
475 VMX_REFLECT(9)
476 END(vmx_iaccess_bit)
478 .org vmx_ia64_ivt+0x2800
479 /////////////////////////////////////////////////////////////////////////////////////////
480 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
481 ENTRY(vmx_daccess_bit)
482 VMX_DBG_FAULT(10)
483 VMX_REFLECT(10)
484 END(vmx_daccess_bit)
486 .org vmx_ia64_ivt+0x2c00
487 /////////////////////////////////////////////////////////////////////////////////////////
488 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
489 ENTRY(vmx_break_fault)
490 VMX_DBG_FAULT(11)
491 mov r31=pr
492 mov r19=11
493 mov r17=cr.iim
494 mov r29=cr.ipsr
495 ;;
496 tbit.z p6,p0=r29,IA64_PSR_VM_BIT
497 (p6)br.sptk.many vmx_dispatch_break_fault /* make sure before access [r21] */
498 adds r22=IA64_VCPU_BREAKIMM_OFFSET, r21
499 ;;
500 ld4 r22=[r22]
501 extr.u r24=r29,IA64_PSR_CPL0_BIT,2
502 cmp.ltu p6,p0=NR_hypercalls,r2
503 ;;
504 cmp.ne.or p6,p0=r22,r17
505 cmp.ne.or p6,p0=r0,r24
506 (p6) br.sptk.many vmx_dispatch_break_fault
507 ;;
508 /*
509 * The streamlined system call entry/exit paths only save/restore the initial part
510 * of pt_regs. This implies that the callers of system-calls must adhere to the
511 * normal procedure calling conventions.
512 *
513 * Registers to be saved & restored:
514 * CR registers: cr.ipsr, cr.iip, cr.ifs
515 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
516 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
517 * Registers to be restored only:
518 * r8-r11: output value from the system call.
519 *
520 * During system call exit, scratch registers (including r15) are modified/cleared
521 * to prevent leaking bits from kernel to user level.
522 */
524 mov r14=r21 // save r21 before bsw.1
525 bsw.1 // B (6 cyc) switch to bank 1
526 ;;
527 mov r29=cr.ipsr // M2 (12 cyc)
528 mov r31=pr // I0 (2 cyc)
529 mov r16=r14
530 mov r15=r2
532 mov r17=cr.iim // M2 (2 cyc)
533 mov.m r27=ar.rsc // M2 (12 cyc)
535 mov.m ar.rsc=0 // M2
536 mov.m r21=ar.fpsr // M2 (12 cyc)
537 mov r19=b6 // I0 (2 cyc)
538 ;;
539 mov.m r23=ar.bspstore // M2 (12 cyc)
540 mov.m r24=ar.rnat // M2 (5 cyc)
541 mov.i r26=ar.pfs // I0 (2 cyc)
543 invala // M0|1
544 nop.m 0 // M
545 mov r20=r1 // A save r1
547 nop.m 0
548 movl r30=ia64_hypercall_table // X
550 mov r28=cr.iip // M2 (2 cyc)
551 //
552 // From this point on, we are definitely on the syscall-path
553 // and we can use (non-banked) scratch registers.
554 //
555 ///////////////////////////////////////////////////////////////////////
556 mov r1=r16 // A move task-pointer to "addl"-addressable reg
557 mov r2=r16 // A setup r2 for ia64_syscall_setup
559 mov r3=NR_hypercalls - 1
560 ;;
561 mov r9=r0 // force flags = 0
562 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
564 shladd r30=r15,3,r30 // A r30 = hcall_table + 8*syscall
565 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
566 cmp.leu p6,p7=r15,r3 // A syscall number in range?
567 ;;
569 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
570 (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
571 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
573 mov.m ar.bspstore=r22 // M2 switch to kernel RBS
574 cmp.eq p8,p9=2,r8 // A isr.ei==2?
575 ;;
577 (p8) mov r8=0 // A clear ei to 0
578 (p7) movl r30=do_ni_hypercall // X
580 (p8) adds r28=16,r28 // A switch cr.iip to next bundle
581 (p9) adds r8=1,r8 // A increment ei to next slot
582 nop.i 0
583 ;;
585 mov.m r25=ar.unat // M2 (5 cyc)
586 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
587 //
588 // If any of the above loads miss in L1D, we'll stall here until
589 // the data arrives.
590 //
591 ///////////////////////////////////////////////////////////////////////
592 mov b6=r30 // I0 setup syscall handler branch reg early
594 mov r18=ar.bsp // M2 (12 cyc)
595 ;;
596 addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
597 br.call.sptk.many b7=ia64_hypercall_setup // B
598 1:
599 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
600 ;;
601 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
602 ;;
604 srlz.i // M0 ensure interruption collection is on
605 (p15) ssm psr.i // M2 restore psr.i
606 br.call.sptk.many b0=b6 // B invoke syscall-handker (ignore return addr)
607 ;;
608 //restore hypercall argument if continuation
609 adds r2=IA64_VCPU_HYPERCALL_CONTINUATION_OFS,r13
610 ;;
611 ld1 r20=[r2]
612 ;;
613 st1 [r2]=r0
614 cmp.ne p6,p0=r20,r0
615 ;;
616 (p6) adds r2=PT(R16)+16,r12
617 (p6) adds r3=PT(R17)+16,r12
618 ;;
619 (p6) ld8 r32=[r2],16
620 (p6) ld8 r33=[r3],16
621 ;;
622 (p6) ld8 r34=[r2],16
623 (p6) ld8 r35=[r3],16
624 ;;
625 (p6) ld8 r36=[r2],16
626 ;;
627 br.sptk.many ia64_leave_hypercall
628 ;;
630 VMX_FAULT(11)
631 END(vmx_break_fault)
633 .org vmx_ia64_ivt+0x3000
634 /////////////////////////////////////////////////////////////////////////////////////////
635 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
636 ENTRY(vmx_interrupt)
637 VMX_DBG_FAULT(12)
638 mov r31=pr // prepare to save predicates
639 mov r19=12
640 br.sptk vmx_dispatch_interrupt
641 END(vmx_interrupt)
643 .org vmx_ia64_ivt+0x3400
644 /////////////////////////////////////////////////////////////////////////////////////////
645 // 0x3400 Entry 13 (size 64 bundles) Reserved
646 ENTRY(vmx_virtual_exirq)
647 VMX_DBG_FAULT(13)
648 mov r31=pr
649 mov r19=13
650 br.sptk vmx_dispatch_vexirq
651 END(vmx_virtual_exirq)
653 .org vmx_ia64_ivt+0x3800
654 /////////////////////////////////////////////////////////////////////////////////////////
655 // 0x3800 Entry 14 (size 64 bundles) Reserved
656 VMX_DBG_FAULT(14)
657 VMX_FAULT(14)
658 // this code segment is from 2.6.16.13
660 /*
661 * There is no particular reason for this code to be here, other than that
662 * there happens to be space here that would go unused otherwise. If this
663 * fault ever gets "unreserved", simply moved the following code to a more
664 * suitable spot...
665 *
666 * ia64_syscall_setup() is a separate subroutine so that it can
667 * allocate stacked registers so it can safely demine any
668 * potential NaT values from the input registers.
669 *
670 * On entry:
671 * - executing on bank 0 or bank 1 register set (doesn't matter)
672 * - r1: stack pointer
673 * - r2: current task pointer
674 * - r3: preserved
675 * - r11: original contents (saved ar.pfs to be saved)
676 * - r12: original contents (sp to be saved)
677 * - r13: original contents (tp to be saved)
678 * - r15: original contents (syscall # to be saved)
679 * - r18: saved bsp (after switching to kernel stack)
680 * - r19: saved b6
681 * - r20: saved r1 (gp)
682 * - r21: saved ar.fpsr
683 * - r22: kernel's register backing store base (krbs_base)
684 * - r23: saved ar.bspstore
685 * - r24: saved ar.rnat
686 * - r25: saved ar.unat
687 * - r26: saved ar.pfs
688 * - r27: saved ar.rsc
689 * - r28: saved cr.iip
690 * - r29: saved cr.ipsr
691 * - r31: saved pr
692 * - b0: original contents (to be saved)
693 * On exit:
694 * - p10: TRUE if syscall is invoked with more than 8 out
695 * registers or r15's Nat is true
696 * - r1: kernel's gp
697 * - r3: preserved (same as on entry)
698 * - r8: -EINVAL if p10 is true
699 * - r12: points to kernel stack
700 * - r13: points to current task
701 * - r14: preserved (same as on entry)
702 * - p13: preserved
703 * - p15: TRUE if interrupts need to be re-enabled
704 * - ar.fpsr: set to kernel settings
705 * - b6: preserved (same as on entry)
706 */
707 ENTRY(ia64_hypercall_setup)
708 #if PT(B6) != 0
709 # error This code assumes that b6 is the first field in pt_regs.
710 #endif
711 st8 [r1]=r19 // save b6
712 add r16=PT(CR_IPSR),r1 // initialize first base pointer
713 add r17=PT(R11),r1 // initialize second base pointer
714 ;;
715 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
716 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
717 tnat.nz p8,p0=in0
719 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
720 tnat.nz p9,p0=in1
721 //(pKStk) mov r18=r0 // make sure r18 isn't NaT
722 ;;
724 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
725 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
726 mov r28=b0 // save b0 (2 cyc)
727 ;;
729 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
730 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
731 (p8) mov in0=-1
732 ;;
734 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
735 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
736 and r8=0x7f,r19 // A // get sof of ar.pfs
738 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
739 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
740 (p9) mov in1=-1
741 ;;
743 //(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
744 sub r18=r18,r22 // r18=RSE.ndirty*8
745 tnat.nz p10,p0=in2
746 add r11=8,r11
747 ;;
748 //(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
749 //(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
750 tnat.nz p11,p0=in3
751 ;;
752 (p10) mov in2=-1
753 tnat.nz p12,p0=in4 // [I0]
754 (p11) mov in3=-1
755 ;;
756 //(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
757 st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
758 //(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
759 st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
760 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
761 ;;
762 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
763 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
764 tnat.nz p13,p0=in5 // [I0]
765 ;;
766 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
767 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
768 (p12) mov in4=-1
769 ;;
771 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
772 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
773 (p13) mov in5=-1
774 ;;
775 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
776 tnat.nz p13,p0=in6
777 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
778 ;;
779 mov r8=1
780 (p9) tnat.nz p10,p0=r15
781 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
783 st8.spill [r17]=r15 // save r15
784 tnat.nz p8,p0=in7
785 nop.i 0
787 mov r13=r2 // establish `current'
788 movl r1=__gp // establish kernel global pointer
789 ;;
790 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
791 (p13) mov in6=-1
792 (p8) mov in7=-1
794 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
795 movl r17=FPSR_DEFAULT
796 ;;
797 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
798 (p10) mov r8=-EINVAL
799 br.ret.sptk.many b7
800 END(ia64_hypercall_setup)
803 .org vmx_ia64_ivt+0x3c00
804 /////////////////////////////////////////////////////////////////////////////////////////
805 // 0x3c00 Entry 15 (size 64 bundles) Reserved
806 VMX_DBG_FAULT(15)
807 VMX_FAULT(15)
810 .org vmx_ia64_ivt+0x4000
811 /////////////////////////////////////////////////////////////////////////////////////////
812 // 0x4000 Entry 16 (size 64 bundles) Reserved
813 VMX_DBG_FAULT(16)
814 VMX_FAULT(16)
816 .org vmx_ia64_ivt+0x4400
817 /////////////////////////////////////////////////////////////////////////////////////////
818 // 0x4400 Entry 17 (size 64 bundles) Reserved
819 VMX_DBG_FAULT(17)
820 VMX_FAULT(17)
822 .org vmx_ia64_ivt+0x4800
823 /////////////////////////////////////////////////////////////////////////////////////////
824 // 0x4800 Entry 18 (size 64 bundles) Reserved
825 VMX_DBG_FAULT(18)
826 VMX_FAULT(18)
828 .org vmx_ia64_ivt+0x4c00
829 /////////////////////////////////////////////////////////////////////////////////////////
830 // 0x4c00 Entry 19 (size 64 bundles) Reserved
831 VMX_DBG_FAULT(19)
832 VMX_FAULT(19)
834 .org vmx_ia64_ivt+0x5000
835 /////////////////////////////////////////////////////////////////////////////////////////
836 // 0x5000 Entry 20 (size 16 bundles) Page Not Present
837 ENTRY(vmx_page_not_present)
838 VMX_DBG_FAULT(20)
839 VMX_REFLECT(20)
840 END(vmx_page_not_present)
842 .org vmx_ia64_ivt+0x5100
843 /////////////////////////////////////////////////////////////////////////////////////////
844 // 0x5100 Entry 21 (size 16 bundles) Key Permission vector
845 ENTRY(vmx_key_permission)
846 VMX_DBG_FAULT(21)
847 VMX_REFLECT(21)
848 END(vmx_key_permission)
850 .org vmx_ia64_ivt+0x5200
851 /////////////////////////////////////////////////////////////////////////////////////////
852 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
853 ENTRY(vmx_iaccess_rights)
854 VMX_DBG_FAULT(22)
855 VMX_REFLECT(22)
856 END(vmx_iaccess_rights)
858 .org vmx_ia64_ivt+0x5300
859 /////////////////////////////////////////////////////////////////////////////////////////
860 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
861 ENTRY(vmx_daccess_rights)
862 VMX_DBG_FAULT(23)
863 VMX_REFLECT(23)
864 END(vmx_daccess_rights)
866 .org vmx_ia64_ivt+0x5400
867 /////////////////////////////////////////////////////////////////////////////////////////
868 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
869 ENTRY(vmx_general_exception)
870 VMX_DBG_FAULT(24)
871 VMX_REFLECT(24)
872 // VMX_FAULT(24)
873 END(vmx_general_exception)
875 .org vmx_ia64_ivt+0x5500
876 /////////////////////////////////////////////////////////////////////////////////////////
877 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
878 ENTRY(vmx_disabled_fp_reg)
879 VMX_DBG_FAULT(25)
880 VMX_REFLECT(25)
881 END(vmx_disabled_fp_reg)
883 .org vmx_ia64_ivt+0x5600
884 /////////////////////////////////////////////////////////////////////////////////////////
885 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
886 ENTRY(vmx_nat_consumption)
887 VMX_DBG_FAULT(26)
888 VMX_REFLECT(26)
889 END(vmx_nat_consumption)
891 .org vmx_ia64_ivt+0x5700
892 /////////////////////////////////////////////////////////////////////////////////////////
893 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
894 ENTRY(vmx_speculation_vector)
895 VMX_DBG_FAULT(27)
896 VMX_REFLECT(27)
897 END(vmx_speculation_vector)
899 .org vmx_ia64_ivt+0x5800
900 /////////////////////////////////////////////////////////////////////////////////////////
901 // 0x5800 Entry 28 (size 16 bundles) Reserved
902 VMX_DBG_FAULT(28)
903 VMX_FAULT(28)
905 .org vmx_ia64_ivt+0x5900
906 /////////////////////////////////////////////////////////////////////////////////////////
907 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
908 ENTRY(vmx_debug_vector)
909 VMX_DBG_FAULT(29)
910 VMX_REFLECT(29)
911 END(vmx_debug_vector)
913 .org vmx_ia64_ivt+0x5a00
914 /////////////////////////////////////////////////////////////////////////////////////////
915 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
916 ENTRY(vmx_unaligned_access)
917 VMX_DBG_FAULT(30)
918 VMX_REFLECT(30)
919 END(vmx_unaligned_access)
921 .org vmx_ia64_ivt+0x5b00
922 /////////////////////////////////////////////////////////////////////////////////////////
923 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
924 ENTRY(vmx_unsupported_data_reference)
925 VMX_DBG_FAULT(31)
926 VMX_REFLECT(31)
927 END(vmx_unsupported_data_reference)
929 .org vmx_ia64_ivt+0x5c00
930 /////////////////////////////////////////////////////////////////////////////////////////
931 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
932 ENTRY(vmx_floating_point_fault)
933 VMX_DBG_FAULT(32)
934 VMX_REFLECT(32)
935 END(vmx_floating_point_fault)
937 .org vmx_ia64_ivt+0x5d00
938 /////////////////////////////////////////////////////////////////////////////////////////
939 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
940 ENTRY(vmx_floating_point_trap)
941 VMX_DBG_FAULT(33)
942 VMX_REFLECT(33)
943 END(vmx_floating_point_trap)
945 .org vmx_ia64_ivt+0x5e00
946 /////////////////////////////////////////////////////////////////////////////////////////
947 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
948 ENTRY(vmx_lower_privilege_trap)
949 VMX_DBG_FAULT(34)
950 VMX_REFLECT(34)
951 END(vmx_lower_privilege_trap)
953 .org vmx_ia64_ivt+0x5f00
954 /////////////////////////////////////////////////////////////////////////////////////////
955 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
956 ENTRY(vmx_taken_branch_trap)
957 VMX_DBG_FAULT(35)
958 VMX_REFLECT(35)
959 END(vmx_taken_branch_trap)
961 .org vmx_ia64_ivt+0x6000
962 /////////////////////////////////////////////////////////////////////////////////////////
963 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
964 ENTRY(vmx_single_step_trap)
965 VMX_DBG_FAULT(36)
966 VMX_REFLECT(36)
967 END(vmx_single_step_trap)
969 .global vmx_virtualization_fault_back
970 .org vmx_ia64_ivt+0x6100
971 /////////////////////////////////////////////////////////////////////////////////////////
972 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
973 ENTRY(vmx_virtualization_fault)
974 // VMX_DBG_FAULT(37)
975 mov r31=pr
976 movl r30 = virtualization_fault_table
977 mov r23=b0
978 ;;
979 shladd r30=r24,4,r30
980 ;;
981 mov b0=r30
982 br.sptk.many b0
983 ;;
984 vmx_virtualization_fault_back:
985 mov r19=37
986 adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
987 adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
988 ;;
989 st8 [r16] = r24
990 st8 [r17] = r25
991 br.sptk vmx_dispatch_virtualization_fault
992 END(vmx_virtualization_fault)
994 .org vmx_ia64_ivt+0x6200
995 /////////////////////////////////////////////////////////////////////////////////////////
996 // 0x6200 Entry 38 (size 16 bundles) Reserved
997 VMX_DBG_FAULT(38)
998 VMX_FAULT(38)
1000 .org vmx_ia64_ivt+0x6300
1001 /////////////////////////////////////////////////////////////////////////////////////////
1002 // 0x6300 Entry 39 (size 16 bundles) Reserved
1003 VMX_DBG_FAULT(39)
1004 VMX_FAULT(39)
1006 .org vmx_ia64_ivt+0x6400
1007 /////////////////////////////////////////////////////////////////////////////////////////
1008 // 0x6400 Entry 40 (size 16 bundles) Reserved
1009 VMX_DBG_FAULT(40)
1010 VMX_FAULT(40)
1012 .org vmx_ia64_ivt+0x6500
1013 /////////////////////////////////////////////////////////////////////////////////////////
1014 // 0x6500 Entry 41 (size 16 bundles) Reserved
1015 VMX_DBG_FAULT(41)
1016 VMX_FAULT(41)
1018 .org vmx_ia64_ivt+0x6600
1019 /////////////////////////////////////////////////////////////////////////////////////////
1020 // 0x6600 Entry 42 (size 16 bundles) Reserved
1021 VMX_DBG_FAULT(42)
1022 VMX_FAULT(42)
1024 .org vmx_ia64_ivt+0x6700
1025 /////////////////////////////////////////////////////////////////////////////////////////
1026 // 0x6700 Entry 43 (size 16 bundles) Reserved
1027 VMX_DBG_FAULT(43)
1028 VMX_FAULT(43)
1030 .org vmx_ia64_ivt+0x6800
1031 /////////////////////////////////////////////////////////////////////////////////////////
1032 // 0x6800 Entry 44 (size 16 bundles) Reserved
1033 VMX_DBG_FAULT(44)
1034 VMX_FAULT(44)
1036 .org vmx_ia64_ivt+0x6900
1037 /////////////////////////////////////////////////////////////////////////////////////////
1038 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1039 ENTRY(vmx_ia32_exception)
1040 VMX_DBG_FAULT(45)
1041 VMX_FAULT(45)
1042 END(vmx_ia32_exception)
1044 .org vmx_ia64_ivt+0x6a00
1045 /////////////////////////////////////////////////////////////////////////////////////////
1046 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1047 ENTRY(vmx_ia32_intercept)
1048 VMX_DBG_FAULT(46)
1049 VMX_FAULT(46)
1050 END(vmx_ia32_intercept)
1052 .org vmx_ia64_ivt+0x6b00
1053 /////////////////////////////////////////////////////////////////////////////////////////
1054 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1055 ENTRY(vmx_ia32_interrupt)
1056 VMX_DBG_FAULT(47)
1057 VMX_FAULT(47)
1058 END(vmx_ia32_interrupt)
1060 .org vmx_ia64_ivt+0x6c00
1061 /////////////////////////////////////////////////////////////////////////////////////////
1062 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1063 VMX_DBG_FAULT(48)
1064 VMX_FAULT(48)
1066 .org vmx_ia64_ivt+0x6d00
1067 /////////////////////////////////////////////////////////////////////////////////////////
1068 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1069 VMX_DBG_FAULT(49)
1070 VMX_FAULT(49)
1072 .org vmx_ia64_ivt+0x6e00
1073 /////////////////////////////////////////////////////////////////////////////////////////
1074 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1075 VMX_DBG_FAULT(50)
1076 VMX_FAULT(50)
1078 .org vmx_ia64_ivt+0x6f00
1079 /////////////////////////////////////////////////////////////////////////////////////////
1080 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1081 VMX_DBG_FAULT(51)
1082 VMX_FAULT(51)
1084 .org vmx_ia64_ivt+0x7000
1085 /////////////////////////////////////////////////////////////////////////////////////////
1086 // 0x7000 Entry 52 (size 16 bundles) Reserved
1087 VMX_DBG_FAULT(52)
1088 VMX_FAULT(52)
1090 .org vmx_ia64_ivt+0x7100
1091 /////////////////////////////////////////////////////////////////////////////////////////
1092 // 0x7100 Entry 53 (size 16 bundles) Reserved
1093 VMX_DBG_FAULT(53)
1094 VMX_FAULT(53)
1096 .org vmx_ia64_ivt+0x7200
1097 /////////////////////////////////////////////////////////////////////////////////////////
1098 // 0x7200 Entry 54 (size 16 bundles) Reserved
1099 VMX_DBG_FAULT(54)
1100 VMX_FAULT(54)
1102 .org vmx_ia64_ivt+0x7300
1103 /////////////////////////////////////////////////////////////////////////////////////////
1104 // 0x7300 Entry 55 (size 16 bundles) Reserved
1105 VMX_DBG_FAULT(55)
1106 VMX_FAULT(55)
1108 .org vmx_ia64_ivt+0x7400
1109 /////////////////////////////////////////////////////////////////////////////////////////
1110 // 0x7400 Entry 56 (size 16 bundles) Reserved
1111 VMX_DBG_FAULT(56)
1112 VMX_FAULT(56)
1114 .org vmx_ia64_ivt+0x7500
1115 /////////////////////////////////////////////////////////////////////////////////////////
1116 // 0x7500 Entry 57 (size 16 bundles) Reserved
1117 VMX_DBG_FAULT(57)
1118 VMX_FAULT(57)
1120 .org vmx_ia64_ivt+0x7600
1121 /////////////////////////////////////////////////////////////////////////////////////////
1122 // 0x7600 Entry 58 (size 16 bundles) Reserved
1123 VMX_DBG_FAULT(58)
1124 VMX_FAULT(58)
1126 .org vmx_ia64_ivt+0x7700
1127 /////////////////////////////////////////////////////////////////////////////////////////
1128 // 0x7700 Entry 59 (size 16 bundles) Reserved
1129 VMX_DBG_FAULT(59)
1130 VMX_FAULT(59)
1132 .org vmx_ia64_ivt+0x7800
1133 /////////////////////////////////////////////////////////////////////////////////////////
1134 // 0x7800 Entry 60 (size 16 bundles) Reserved
1135 VMX_DBG_FAULT(60)
1136 VMX_FAULT(60)
1138 .org vmx_ia64_ivt+0x7900
1139 /////////////////////////////////////////////////////////////////////////////////////////
1140 // 0x7900 Entry 61 (size 16 bundles) Reserved
1141 VMX_DBG_FAULT(61)
1142 VMX_FAULT(61)
1144 .org vmx_ia64_ivt+0x7a00
1145 /////////////////////////////////////////////////////////////////////////////////////////
1146 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1147 VMX_DBG_FAULT(62)
1148 VMX_FAULT(62)
1150 .org vmx_ia64_ivt+0x7b00
1151 /////////////////////////////////////////////////////////////////////////////////////////
1152 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1153 VMX_DBG_FAULT(63)
1154 VMX_FAULT(63)
1156 .org vmx_ia64_ivt+0x7c00
1157 /////////////////////////////////////////////////////////////////////////////////////////
1158 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1159 VMX_DBG_FAULT(64)
1160 VMX_FAULT(64)
1162 .org vmx_ia64_ivt+0x7d00
1163 /////////////////////////////////////////////////////////////////////////////////////////
1164 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1165 VMX_DBG_FAULT(65)
1166 VMX_FAULT(65)
1168 .org vmx_ia64_ivt+0x7e00
1169 /////////////////////////////////////////////////////////////////////////////////////////
1170 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1171 VMX_DBG_FAULT(66)
1172 VMX_FAULT(66)
1174 .org vmx_ia64_ivt+0x7f00
1175 /////////////////////////////////////////////////////////////////////////////////////////
1176 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1177 VMX_DBG_FAULT(67)
1178 VMX_FAULT(67)
1180 .org vmx_ia64_ivt+0x8000
1181 // There is no particular reason for this code to be here, other than that
1182 // there happens to be space here that would go unused otherwise. If this
1183 // fault ever gets "unreserved", simply moved the following code to a more
1184 // suitable spot...
1187 ENTRY(vmx_dispatch_reflection)
1188 /*
1189 * Input:
1190 * psr.ic: off
1191 * r19: intr type (offset into ivt, see ia64_int.h)
1192 * r31: contains saved predicates (pr)
1193 */
1194 VMX_SAVE_MIN_WITH_COVER_R19
1195 alloc r14=ar.pfs,0,0,5,0
1196 mov out0=cr.ifa
1197 mov out1=cr.isr
1198 mov out2=cr.iim
1199 mov out3=r15
1200 adds r3=8,r2 // set up second base pointer
1201 ;;
1202 ssm psr.ic
1203 ;;
1204 srlz.i // guarantee that interruption collection is on
1205 ;;
1206 (p15) ssm psr.i // restore psr.i
1207 movl r14=ia64_leave_hypervisor
1208 ;;
1209 VMX_SAVE_REST
1210 mov rp=r14
1211 ;;
1212 P6_BR_CALL_PANIC(.Lvmx_dispatch_reflection_string)
1213 adds out4=16,r12
1214 br.call.sptk.many b6=vmx_reflect_interruption
1215 END(vmx_dispatch_reflection)
1217 ENTRY(vmx_dispatch_virtualization_fault)
1218 VMX_SAVE_MIN_WITH_COVER
1219 ;;
1220 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1221 mov out0=r13 //vcpu
1222 adds r3=8,r2 // set up second base pointer
1223 ;;
1224 ssm psr.ic
1225 ;;
1226 srlz.i // guarantee that interruption collection is on
1227 ;;
1228 (p15) ssm psr.i // restore psr.i
1229 movl r14=ia64_leave_hypervisor_prepare
1230 ;;
1231 VMX_SAVE_REST
1232 VMX_SAVE_EXTRA
1233 mov rp=r14
1234 ;;
1235 P6_BR_CALL_PANIC(.Lvmx_dispatch_virtualization_fault_string)
1236 adds out1=16,sp //regs
1237 br.call.sptk.many b6=vmx_emulate
1238 END(vmx_dispatch_virtualization_fault)
1241 GLOBAL_ENTRY(vmx_dispatch_vexirq)
1242 VMX_SAVE_MIN_WITH_COVER
1243 alloc r14=ar.pfs,0,0,1,0
1244 mov out0=r13
1246 ssm psr.ic
1247 ;;
1248 srlz.i // guarantee that interruption collection is on
1249 ;;
1250 (p15) ssm psr.i // restore psr.i
1251 adds r3=8,r2 // set up second base pointer
1252 ;;
1253 VMX_SAVE_REST
1254 movl r14=ia64_leave_hypervisor
1255 ;;
1256 mov rp=r14
1257 P6_BR_CALL_PANIC(.Lvmx_dispatch_vexirq_string)
1258 br.call.sptk.many b6=vmx_vexirq
1259 END(vmx_dispatch_vexirq)
1261 ENTRY(vmx_dispatch_tlb_miss)
1262 VMX_SAVE_MIN_WITH_COVER_R19
1263 alloc r14=ar.pfs,0,0,3,0
1264 mov out0=cr.ifa
1265 mov out1=r15
1266 adds r3=8,r2 // set up second base pointer
1267 ;;
1268 ssm psr.ic
1269 ;;
1270 srlz.i // guarantee that interruption collection is on
1271 ;;
1272 (p15) ssm psr.i // restore psr.i
1273 movl r14=ia64_leave_hypervisor
1274 ;;
1275 VMX_SAVE_REST
1276 mov rp=r14
1277 ;;
1278 P6_BR_CALL_PANIC(.Lvmx_dispatch_tlb_miss_string)
1279 adds out2=16,r12
1280 br.call.sptk.many b6=vmx_hpw_miss
1281 END(vmx_dispatch_tlb_miss)
1283 ENTRY(vmx_dispatch_break_fault)
1284 VMX_SAVE_MIN_WITH_COVER_NO_PANIC
1285 ;;
1286 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
1287 mov out0=cr.ifa
1288 mov out2=cr.isr // FIXME: pity to make this slow access twice
1289 mov out3=cr.iim // FIXME: pity to make this slow access twice
1290 adds r3=8,r2 // set up second base pointer
1291 ;;
1292 ssm psr.ic
1293 ;;
1294 srlz.i // guarantee that interruption collection is on
1295 ;;
1296 (p15)ssm psr.i // restore psr.i
1297 (pUStk)movl r14=ia64_leave_hypervisor
1298 ;;
1299 (pKStk)movl r14=ia64_leave_nested
1300 VMX_SAVE_REST
1301 mov rp=r14
1302 ;;
1303 adds out1=16,sp
1304 br.call.sptk.many b6=vmx_ia64_handle_break
1305 ;;
1306 END(vmx_dispatch_break_fault)
1309 ENTRY(vmx_dispatch_interrupt)
1310 VMX_SAVE_MIN_WITH_COVER_NO_PANIC // uses r31; defines r2 and r3
1311 ;;
1312 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1313 ssm psr.ic
1314 mov out0=cr.ivr // pass cr.ivr as first arg
1315 adds r3=8,r2 // set up second base pointer for SAVE_REST
1316 ;;
1317 (pUStk) movl r14=ia64_leave_hypervisor
1318 srlz.i
1319 ;;
1320 (pKStk) movl r14=ia64_leave_nested
1321 VMX_SAVE_REST
1322 add out1=16,sp // pass pointer to pt_regs as second arg
1323 mov rp=r14
1324 br.call.sptk.many b6=ia64_handle_irq
1325 END(vmx_dispatch_interrupt)
1328 ENTRY(vmx_dispatch_shadow_fault)
1329 VMX_SAVE_MIN_WITH_COVER_R19
1330 alloc r14=ar.pfs,0,0,4,0
1331 mov out0=cr.ifa
1332 mov out1=cr.isr
1333 mov out2=r15
1334 adds r3=8,r2 // set up second base pointer
1335 ;;
1336 ssm psr.ic
1337 ;;
1338 srlz.i // guarantee that interruption collection is on
1339 ;;
1340 (p15) ssm psr.i // restore psr.i
1341 movl r14=ia64_leave_hypervisor
1342 ;;
1343 VMX_SAVE_REST
1344 mov rp=r14
1345 ;;
1346 P6_BR_CALL_PANIC(.Lvmx_dispatch_shadow_fault_string)
1347 adds out3=16,r12
1348 br.call.sptk.many b6=vmx_ia64_shadow_fault
1349 END(vmx_dispatch_shadow_fault)
1351 .Lvmx_dispatch_reflection_string:
1352 .asciz "vmx_dispatch_reflection\n"
1353 .Lvmx_dispatch_virtualization_fault_string:
1354 .asciz "vmx_dispatch_virtualization_fault\n"
1355 .Lvmx_dispatch_vexirq_string:
1356 .asciz "vmx_dispatch_vexirq\n"
1357 .Lvmx_dispatch_tlb_miss_string:
1358 .asciz "vmx_dispatch_tlb_miss\n"
1359 .Lvmx_dispatch_shadow_fault_string:
1360 .asciz "vmx_dispatch_shadow_fault\n"