ia64/xen-unstable

view linux-2.6-xen-sparse/arch/ia64/xen/xenminstate.h @ 9296:f85bb99187bf

Update interface documentation to include sched_op_new hypercall
and clean up the style a bit. Also clean up the sched_op_new
description in the sched.h public header.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Mar 15 19:19:22 2006 +0100 (2006-03-15)
parents c83307a81602
children 707737b66f58
line source
1 #include <linux/config.h>
3 #include <asm/cache.h>
5 #ifdef CONFIG_XEN
6 #include "../kernel/entry.h"
7 #else
8 #include "entry.h"
9 #endif
11 /*
12 * For ivt.s we want to access the stack virtually so we don't have to disable translation
13 * on interrupts.
14 *
15 * On entry:
16 * r1: pointer to current task (ar.k6)
17 */
18 #define MINSTATE_START_SAVE_MIN_VIRT \
19 (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
20 ;; \
21 (pUStk) mov.m r24=ar.rnat; \
22 (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
23 (pKStk) mov r1=sp; /* get sp */ \
24 ;; \
25 (pUStk) lfetch.fault.excl.nt1 [r22]; \
26 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
27 (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
28 ;; \
29 (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
30 (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
31 ;; \
32 (pUStk) mov r18=ar.bsp; \
33 (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
35 #define MINSTATE_END_SAVE_MIN_VIRT \
36 bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
37 ;;
39 /*
40 * For mca_asm.S we want to access the stack physically since the state is saved before we
41 * go virtual and don't want to destroy the iip or ipsr.
42 */
43 #define MINSTATE_START_SAVE_MIN_PHYS \
44 (pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \
45 (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
46 (pKStk) ld8 r3 = [r3];; \
47 (pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
48 (pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
49 (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
50 (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
51 ;; \
52 (pUStk) mov r24=ar.rnat; \
53 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
54 (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
55 (pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
56 ;; \
57 (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
58 (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
59 ;; \
60 (pUStk) mov r18=ar.bsp; \
61 (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
63 #define MINSTATE_END_SAVE_MIN_PHYS \
64 dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \
65 ;;
67 #ifdef MINSTATE_VIRT
68 # define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT)
69 # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT
70 # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT
71 #endif
73 #ifdef MINSTATE_PHYS
74 # define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; tpa reg=reg
75 # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS
76 # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS
77 #endif
79 /*
80 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
81 * the minimum state necessary that allows us to turn psr.ic back
82 * on.
83 *
84 * Assumed state upon entry:
85 * psr.ic: off
86 * r31: contains saved predicates (pr)
87 *
88 * Upon exit, the state is as follows:
89 * psr.ic: off
90 * r2 = points to &pt_regs.r16
91 * r8 = contents of ar.ccv
92 * r9 = contents of ar.csd
93 * r10 = contents of ar.ssd
94 * r11 = FPSR_DEFAULT
95 * r12 = kernel sp (kernel virtual address)
96 * r13 = points to current task_struct (kernel virtual address)
97 * p15 = TRUE if psr.i is set in cr.ipsr
98 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
99 * preserved
100 * CONFIG_XEN note: p6/p7 are not preserved
101 *
102 * Note that psr.ic is NOT turned on by this macro. This is so that
103 * we can pass interruption state as arguments to a handler.
104 */
105 #ifdef CONFIG_XEN
106 #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
107 MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
108 mov r27=ar.rsc; /* M */ \
109 mov r20=r1; /* A */ \
110 mov r25=ar.unat; /* M */ \
111 /* mov r29=cr.ipsr; /* M */ \
112 movl r29=XSI_IPSR;; \
113 ld8 r29=[r29];; \
114 mov r26=ar.pfs; /* I */ \
115 /* mov r28=cr.iip; /* M */ \
116 movl r28=XSI_IIP;; \
117 ld8 r28=[r28];; \
118 mov r21=ar.fpsr; /* M */ \
119 COVER; /* B;; (or nothing) */ \
120 ;; \
121 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
122 ;; \
123 ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
124 st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
125 adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
126 /* switch from user to kernel RBS: */ \
127 ;; \
128 invala; /* M */ \
129 /* SAVE_IFS; /* see xen special handling below */ \
130 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
131 ;; \
132 MINSTATE_START_SAVE_MIN \
133 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
134 adds r16=PT(CR_IPSR),r1; \
135 ;; \
136 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
137 st8 [r16]=r29; /* save cr.ipsr */ \
138 ;; \
139 lfetch.fault.excl.nt1 [r17]; \
140 tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
141 mov r29=b0 \
142 ;; \
143 adds r16=PT(R8),r1; /* initialize first base pointer */ \
144 adds r17=PT(R9),r1; /* initialize second base pointer */ \
145 (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
146 ;; \
147 .mem.offset 0,0; st8.spill [r16]=r8,16; \
148 .mem.offset 8,0; st8.spill [r17]=r9,16; \
149 ;; \
150 .mem.offset 0,0; st8.spill [r16]=r10,24; \
151 .mem.offset 8,0; st8.spill [r17]=r11,24; \
152 ;; \
153 /* xen special handling for possibly lazy cover */ \
154 movl r8=XSI_INCOMPL_REGFR; \
155 ;; \
156 ld4 r30=[r8]; \
157 ;; \
158 cmp.eq p6,p7=r30,r0; \
159 ;; /* not sure if this stop bit is necessary */ \
160 (p6) adds r8=XSI_PRECOVER_IFS-XSI_INCOMPL_REGFR,r8; \
161 (p7) adds r8=XSI_IFS-XSI_INCOMPL_REGFR,r8; \
162 ;; \
163 ld8 r30=[r8]; \
164 ;; \
165 st8 [r16]=r28,16; /* save cr.iip */ \
166 st8 [r17]=r30,16; /* save cr.ifs */ \
167 (pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
168 mov r8=ar.ccv; \
169 mov r9=ar.csd; \
170 mov r10=ar.ssd; \
171 movl r11=FPSR_DEFAULT; /* L-unit */ \
172 ;; \
173 st8 [r16]=r25,16; /* save ar.unat */ \
174 st8 [r17]=r26,16; /* save ar.pfs */ \
175 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
176 ;; \
177 st8 [r16]=r27,16; /* save ar.rsc */ \
178 (pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
179 (pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
180 ;; /* avoid RAW on r16 & r17 */ \
181 (pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
182 st8 [r17]=r31,16; /* save predicates */ \
183 (pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
184 ;; \
185 st8 [r16]=r29,16; /* save b0 */ \
186 st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
187 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
188 ;; \
189 .mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
190 .mem.offset 8,0; st8.spill [r17]=r12,16; \
191 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
192 ;; \
193 .mem.offset 0,0; st8.spill [r16]=r13,16; \
194 .mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
195 mov r13=IA64_KR(CURRENT); /* establish `current' */ \
196 ;; \
197 .mem.offset 0,0; st8.spill [r16]=r15,16; \
198 .mem.offset 8,0; st8.spill [r17]=r14,16; \
199 ;; \
200 .mem.offset 0,0; st8.spill [r16]=r2,16; \
201 .mem.offset 8,0; st8.spill [r17]=r3,16; \
202 ;; \
203 EXTRA; \
204 mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2; \
205 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
206 ;; \
207 movl r1=__gp; /* establish kernel global pointer */ \
208 ;; \
209 /* MINSTATE_END_SAVE_MIN */
210 #else
211 #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
212 MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
213 mov r27=ar.rsc; /* M */ \
214 mov r20=r1; /* A */ \
215 mov r25=ar.unat; /* M */ \
216 mov r29=cr.ipsr; /* M */ \
217 mov r26=ar.pfs; /* I */ \
218 mov r28=cr.iip; /* M */ \
219 mov r21=ar.fpsr; /* M */ \
220 COVER; /* B;; (or nothing) */ \
221 ;; \
222 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
223 ;; \
224 ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
225 st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
226 adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
227 /* switch from user to kernel RBS: */ \
228 ;; \
229 invala; /* M */ \
230 SAVE_IFS; \
231 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
232 ;; \
233 MINSTATE_START_SAVE_MIN \
234 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
235 adds r16=PT(CR_IPSR),r1; \
236 ;; \
237 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
238 st8 [r16]=r29; /* save cr.ipsr */ \
239 ;; \
240 lfetch.fault.excl.nt1 [r17]; \
241 tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
242 mov r29=b0 \
243 ;; \
244 adds r16=PT(R8),r1; /* initialize first base pointer */ \
245 adds r17=PT(R9),r1; /* initialize second base pointer */ \
246 (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
247 ;; \
248 .mem.offset 0,0; st8.spill [r16]=r8,16; \
249 .mem.offset 8,0; st8.spill [r17]=r9,16; \
250 ;; \
251 .mem.offset 0,0; st8.spill [r16]=r10,24; \
252 .mem.offset 8,0; st8.spill [r17]=r11,24; \
253 ;; \
254 st8 [r16]=r28,16; /* save cr.iip */ \
255 st8 [r17]=r30,16; /* save cr.ifs */ \
256 (pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
257 mov r8=ar.ccv; \
258 mov r9=ar.csd; \
259 mov r10=ar.ssd; \
260 movl r11=FPSR_DEFAULT; /* L-unit */ \
261 ;; \
262 st8 [r16]=r25,16; /* save ar.unat */ \
263 st8 [r17]=r26,16; /* save ar.pfs */ \
264 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
265 ;; \
266 st8 [r16]=r27,16; /* save ar.rsc */ \
267 (pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
268 (pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
269 ;; /* avoid RAW on r16 & r17 */ \
270 (pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
271 st8 [r17]=r31,16; /* save predicates */ \
272 (pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
273 ;; \
274 st8 [r16]=r29,16; /* save b0 */ \
275 st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
276 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
277 ;; \
278 .mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
279 .mem.offset 8,0; st8.spill [r17]=r12,16; \
280 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
281 ;; \
282 .mem.offset 0,0; st8.spill [r16]=r13,16; \
283 .mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
284 mov r13=IA64_KR(CURRENT); /* establish `current' */ \
285 ;; \
286 .mem.offset 0,0; st8.spill [r16]=r15,16; \
287 .mem.offset 8,0; st8.spill [r17]=r14,16; \
288 ;; \
289 .mem.offset 0,0; st8.spill [r16]=r2,16; \
290 .mem.offset 8,0; st8.spill [r17]=r3,16; \
291 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
292 ;; \
293 EXTRA; \
294 movl r1=__gp; /* establish kernel global pointer */ \
295 ;; \
296 MINSTATE_END_SAVE_MIN
297 #endif
299 /*
300 * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
301 *
302 * Assumed state upon entry:
303 * psr.ic: on
304 * r2: points to &pt_regs.r16
305 * r3: points to &pt_regs.r17
306 * r8: contents of ar.ccv
307 * r9: contents of ar.csd
308 * r10: contents of ar.ssd
309 * r11: FPSR_DEFAULT
310 *
311 * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
312 */
313 #define SAVE_REST \
314 .mem.offset 0,0; st8.spill [r2]=r16,16; \
315 .mem.offset 8,0; st8.spill [r3]=r17,16; \
316 ;; \
317 .mem.offset 0,0; st8.spill [r2]=r18,16; \
318 .mem.offset 8,0; st8.spill [r3]=r19,16; \
319 ;; \
320 .mem.offset 0,0; st8.spill [r2]=r20,16; \
321 .mem.offset 8,0; st8.spill [r3]=r21,16; \
322 mov r18=b6; \
323 ;; \
324 .mem.offset 0,0; st8.spill [r2]=r22,16; \
325 .mem.offset 8,0; st8.spill [r3]=r23,16; \
326 mov r19=b7; \
327 ;; \
328 .mem.offset 0,0; st8.spill [r2]=r24,16; \
329 .mem.offset 8,0; st8.spill [r3]=r25,16; \
330 ;; \
331 .mem.offset 0,0; st8.spill [r2]=r26,16; \
332 .mem.offset 8,0; st8.spill [r3]=r27,16; \
333 ;; \
334 .mem.offset 0,0; st8.spill [r2]=r28,16; \
335 .mem.offset 8,0; st8.spill [r3]=r29,16; \
336 ;; \
337 .mem.offset 0,0; st8.spill [r2]=r30,16; \
338 .mem.offset 8,0; st8.spill [r3]=r31,32; \
339 ;; \
340 mov ar.fpsr=r11; /* M-unit */ \
341 st8 [r2]=r8,8; /* ar.ccv */ \
342 adds r24=PT(B6)-PT(F7),r3; \
343 ;; \
344 stf.spill [r2]=f6,32; \
345 stf.spill [r3]=f7,32; \
346 ;; \
347 stf.spill [r2]=f8,32; \
348 stf.spill [r3]=f9,32; \
349 ;; \
350 stf.spill [r2]=f10; \
351 stf.spill [r3]=f11; \
352 adds r25=PT(B7)-PT(F11),r3; \
353 ;; \
354 st8 [r24]=r18,16; /* b6 */ \
355 st8 [r25]=r19,16; /* b7 */ \
356 ;; \
357 st8 [r24]=r9; /* ar.csd */ \
358 st8 [r25]=r10; /* ar.ssd */ \
359 ;;
361 #define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
362 #define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
363 #ifdef CONFIG_XEN
364 #define SAVE_MIN break 0;; /* FIXME: non-cover version only for ia32 support? */
365 #else
366 #define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )
367 #endif