ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_minstate.h @ 16757:9ab95900afec

[IA64] vti fault handler clean up: improve the VTi domain fault handler

Improve the VTi domain fault handler panic path. Currently when a
VTi fault handler finds something wrong, it enters an infinite loop
in vmx_panic with interrupts masked. It makes sense if hw-based
debugger is available. However in most cases this isn't available,
especially for normal users. This patch makes those panic paths
more user friendly by printing out the panic message. The old
behaviour is left with vmx_panic configuration.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Fri Dec 14 13:37:57 2007 -0700 (2007-12-14)
parents fa80218de06a
children 5ab3288e5b0f
line source
1 /*
2 * vmx_minstate.h:
3 * Copyright (c) 2005, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
19 */
21 #include <linux/config.h>
23 #include <asm/asmmacro.h>
24 #include <asm/fpu.h>
25 #include <asm/mmu_context.h>
26 #include <asm/offsets.h>
27 #include <asm/pal.h>
28 #include <asm/pgtable.h>
29 #include <asm/processor.h>
30 #include <asm/ptrace.h>
31 #include <asm/system.h>
32 #include <asm/vmx_pal_vsa.h>
33 #include <asm/vmx_vpd.h>
34 #include <asm/cache.h>
35 #include "entry.h"
37 #define VMX_MINSTATE_START_SAVE_MIN \
38 (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
39 ;; \
40 (pUStk) mov.m r28=ar.rnat; \
41 (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
42 (pKStk) mov r1=sp; /* get sp */ \
43 ;; \
44 (pUStk) lfetch.fault.excl.nt1 [r22]; \
45 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
46 (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
47 ;; \
48 (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
49 (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
50 ;; \
51 (pUStk) mov r18=ar.bsp; \
52 (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
54 #define VMX_MINSTATE_END_SAVE_MIN \
55 bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
56 ;;
58 #define PAL_VSA_SYNC_READ \
59 /* begin to call pal vps sync_read */ \
60 (pUStk) add r25=IA64_VPD_BASE_OFFSET, r21; \
61 (pUStk) movl r20=__vsa_base; \
62 ;; \
63 (pUStk) ld8 r25=[r25]; /* read vpd base */ \
64 (pUStk) ld8 r20=[r20]; /* read entry point */ \
65 ;; \
66 (pUStk) add r20=PAL_VPS_SYNC_READ,r20; \
67 ;; \
68 { .mii; \
69 (pUStk) nop 0x0; \
70 (pUStk) mov r24=ip; \
71 (pUStk) mov b0=r20; \
72 ;; \
73 }; \
74 { .mmb; \
75 (pUStk) add r24 = 0x20, r24; \
76 (pUStk) nop 0x0; \
77 (pUStk) br.cond.sptk b0; /* call the service */ \
78 ;; \
79 };
81 #define IA64_CURRENT_REG IA64_KR(CURRENT) /* r21 is reserved for current pointer */
82 //#define VMX_MINSTATE_GET_CURRENT(reg) mov reg=IA64_CURRENT_REG
83 #define VMX_MINSTATE_GET_CURRENT(reg) mov reg=r21
85 /*
86 * VMX_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
87 * the minimum state necessary that allows us to turn psr.ic back
88 * on.
89 *
90 * Assumed state upon entry:
91 * psr.ic: off
92 * r31: contains saved predicates (pr)
93 *
94 * Upon exit, the state is as follows:
95 * psr.ic: off
96 * r2 = points to &pt_regs.r16
97 * r8 = contents of ar.ccv
98 * r9 = contents of ar.csd
99 * r10 = contents of ar.ssd
100 * r11 = FPSR_DEFAULT
101 * r12 = kernel sp (kernel virtual address)
102 * r13 = points to current task_struct (kernel virtual address)
103 * p6 = (psr.vm || isr.ni)
104 * panic if not external interrupt (fault in xen VMM)
105 * p15 = TRUE if psr.i is set in cr.ipsr
106 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
107 * preserved
108 *
109 * Note that psr.ic is NOT turned on by this macro. This is so that
110 * we can pass interruption state as arguments to a handler.
111 */
113 #ifdef CONFIG_VMX_PANIC
114 # define P6_BR_VMX_PANIC (p6)br.spnt.few vmx_panic;
115 #else
116 # define P6_BR_VMX_PANIC /* nothing */
117 #endif
119 #define P6_BR_CALL_PANIC(panic_string) \
120 (p6) movl out0=panic_string; \
121 (p6) br.call.spnt.few b6=panic;
123 #define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
124 mov r27=ar.rsc; /* M */ \
125 mov r20=r1; /* A */ \
126 mov r25=ar.unat; /* M */ \
127 mov r29=cr.ipsr; /* M */ \
128 mov r26=ar.pfs; /* I */ \
129 mov r18=cr.isr; \
130 COVER; /* B;; (or nothing) */ \
131 ;; \
132 cmp.eq p6,p0=r0,r0; \
133 tbit.z pKStk,pUStk=r29,IA64_PSR_VM_BIT; \
134 tbit.z p0,p15=r29,IA64_PSR_I_BIT; \
135 ;; \
136 (pUStk) tbit.nz.and p6,p0=r18,IA64_ISR_NI_BIT; \
137 ;; \
138 P6_BR_VMX_PANIC \
139 (pUStk)VMX_MINSTATE_GET_CURRENT(r1); \
140 /* mov r21=r16; */ \
141 /* switch from user to kernel RBS: */ \
142 ;; \
143 invala; /* M */ \
144 SAVE_IFS; \
145 ;; \
146 VMX_MINSTATE_START_SAVE_MIN \
147 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
148 adds r16=PT(CR_IPSR),r1; \
149 ;; \
150 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
151 st8 [r16]=r29; /* save cr.ipsr */ \
152 ;; \
153 lfetch.fault.excl.nt1 [r17]; \
154 mov r29=b0 \
155 ;; \
156 adds r16=PT(R8),r1; /* initialize first base pointer */ \
157 adds r17=PT(R9),r1; /* initialize second base pointer */ \
158 (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
159 ;; \
160 .mem.offset 0,0; st8.spill [r16]=r8,16; \
161 .mem.offset 8,0; st8.spill [r17]=r9,16; \
162 ;; \
163 .mem.offset 0,0; st8.spill [r16]=r10,24; \
164 .mem.offset 8,0; st8.spill [r17]=r11,24; \
165 ;; \
166 mov r9=cr.iip; /* M */ \
167 mov r10=ar.fpsr; /* M */ \
168 ;; \
169 st8 [r16]=r9,16; /* save cr.iip */ \
170 st8 [r17]=r30,16; /* save cr.ifs */ \
171 (pUStk) sub r18=r18,r22;/* r18=RSE.ndirty*8 */ \
172 ;; \
173 st8 [r16]=r25,16; /* save ar.unat */ \
174 st8 [r17]=r26,16; /* save ar.pfs */ \
175 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
176 ;; \
177 st8 [r16]=r27,16; /* save ar.rsc */ \
178 (pUStk) st8 [r17]=r28,16;/* save ar.rnat */ \
179 (pKStk) adds r17=16,r17;/* skip over ar_rnat field */ \
180 ;; /* avoid RAW on r16 & r17 */ \
181 (pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
182 st8 [r17]=r31,16; /* save predicates */ \
183 (pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
184 ;; \
185 st8 [r16]=r29,16; /* save b0 */ \
186 st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
187 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
188 ;; \
189 .mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
190 .mem.offset 8,0; st8.spill [r17]=r12,16; \
191 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
192 ;; \
193 .mem.offset 0,0; st8.spill [r16]=r13,16; \
194 .mem.offset 8,0; st8.spill [r17]=r10,16; /* save ar.fpsr */ \
195 (pUStk) VMX_MINSTATE_GET_CURRENT(r13); /* establish `current' */ \
196 (pKStk) movl r13=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;/* From MINSTATE_GET_CURRENT */\
197 ;; \
198 .mem.offset 0,0; st8.spill [r16]=r15,16; \
199 .mem.offset 8,0; st8.spill [r17]=r14,16; \
200 (pKStk) ld8 r13=[r13]; /* establish `current' */ \
201 ;; \
202 .mem.offset 0,0; st8.spill [r16]=r2,16; \
203 .mem.offset 8,0; st8.spill [r17]=r3,16; \
204 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
205 ;; \
206 (pUStk) adds r16=IA64_VCPU_IIPA_OFFSET,r13; \
207 (pUStk) adds r17=IA64_VCPU_ISR_OFFSET,r13; \
208 (pUStk) mov r26=cr.iipa; \
209 (pUStk) mov r27=cr.isr; \
210 ;; \
211 (pUStk) st8 [r16]=r26; \
212 (pUStk) st8 [r17]=r27; \
213 ;; \
214 EXTRA; \
215 mov r8=ar.ccv; \
216 mov r9=ar.csd; \
217 mov r10=ar.ssd; \
218 movl r11=FPSR_DEFAULT; /* L-unit */ \
219 movl r1=__gp; /* establish kernel global pointer */ \
220 ;; \
221 PAL_VSA_SYNC_READ \
222 VMX_MINSTATE_END_SAVE_MIN
224 /*
225 * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
226 *
227 * Assumed state upon entry:
228 * psr.ic: on
229 * r2: points to &pt_regs.f6
230 * r3: points to &pt_regs.f7
231 * r8: contents of ar.ccv
232 * r9: contents of ar.csd
233 * r10: contents of ar.ssd
234 * r11: FPSR_DEFAULT
235 *
236 * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
237 */
238 #define VMX_SAVE_REST \
239 .mem.offset 0,0; st8.spill [r2]=r16,16; \
240 .mem.offset 8,0; st8.spill [r3]=r17,16; \
241 ;; \
242 .mem.offset 0,0; st8.spill [r2]=r18,16; \
243 .mem.offset 8,0; st8.spill [r3]=r19,16; \
244 ;; \
245 .mem.offset 0,0; st8.spill [r2]=r20,16; \
246 .mem.offset 8,0; st8.spill [r3]=r21,16; \
247 mov r18=b6; \
248 ;; \
249 .mem.offset 0,0; st8.spill [r2]=r22,16; \
250 .mem.offset 8,0; st8.spill [r3]=r23,16; \
251 mov r19=b7; \
252 ;; \
253 .mem.offset 0,0; st8.spill [r2]=r24,16; \
254 .mem.offset 8,0; st8.spill [r3]=r25,16; \
255 ;; \
256 .mem.offset 0,0; st8.spill [r2]=r26,16; \
257 .mem.offset 8,0; st8.spill [r3]=r27,16; \
258 ;; \
259 .mem.offset 0,0; st8.spill [r2]=r28,16; \
260 .mem.offset 8,0; st8.spill [r3]=r29,16; \
261 ;; \
262 .mem.offset 0,0; st8.spill [r2]=r30,16; \
263 .mem.offset 8,0; st8.spill [r3]=r31,32; \
264 ;; \
265 mov ar.fpsr=r11; \
266 st8 [r2]=r8,8; \
267 adds r24=PT(B6)-PT(F7),r3; \
268 ;; \
269 stf.spill [r2]=f6,32; \
270 stf.spill [r3]=f7,32; \
271 ;; \
272 stf.spill [r2]=f8,32; \
273 stf.spill [r3]=f9,32; \
274 ;; \
275 stf.spill [r2]=f10,32; \
276 stf.spill [r3]=f11; \
277 adds r25=PT(B7)-PT(F11),r3; \
278 ;; \
279 st8 [r24]=r18,16; /* b6 */ \
280 st8 [r25]=r19,16; /* b7 */ \
281 adds r3=PT(R5)-PT(F11),r3; \
282 ;; \
283 st8 [r24]=r9; /* ar.csd */ \
284 st8 [r25]=r10; /* ar.ssd */ \
285 ;; \
286 (pUStk)mov r18=ar.unat; \
287 (pUStk)adds r19=PT(EML_UNAT)-PT(R4),r2; \
288 ;; \
289 (pUStk)st8 [r19]=r18; /* eml_unat */
291 #define VMX_SAVE_EXTRA \
292 .mem.offset 0,0; st8.spill [r2]=r4,16; \
293 .mem.offset 8,0; st8.spill [r3]=r5,16; \
294 ;; \
295 .mem.offset 0,0; st8.spill [r2]=r6,16; \
296 .mem.offset 8,0; st8.spill [r3]=r7; \
297 ;; \
298 mov r26=ar.unat; \
299 ;; \
300 st8 [r2]=r26; /* eml_unat */
302 #define VMX_SAVE_MIN_WITH_COVER VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,)
303 #define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
304 #define VMX_SAVE_MIN VMX_DO_SAVE_MIN( , mov r30=r0, )
306 /*
307 * Local variables:
308 * mode: C
309 * c-set-style: "BSD"
310 * c-basic-offset: 4
311 * tab-width: 4
312 * indent-tabs-mode: nil
313 * End:
314 */