ia64/xen-unstable

view xen/arch/powerpc/powerpc64/exceptions.S @ 11493:ae7a722b7241

[POWERPC][XEN] Erratum: Must clear larx/stcx reservation on exception

PowerPC 970 Erratum that an "OS should execute a stcx in
the interrupt handler to clear the reservation."

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Fri Sep 01 11:42:51 2006 -0400 (2006-09-01)
parents 1ef82dd7f66b
children ccdaa3ea71a7
line source
1 /*
2 * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 */
20 #include <asm/config.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/reg_defs.h>
23 #include <asm/msr.h>
24 #include <asm/processor.h>
26 .macro SAVE_GPR regno uregs
27 std \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs)
28 .endm
30 .macro SAVE_GPRS from to uregs
31 .ifge \to-\from
32 SAVE_GPR \from, \uregs
33 SAVE_GPRS "(\from+1)", \to, \uregs
34 .endif
35 .endm
37 .macro LOAD_GPR regno uregs
38 ld \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs)
39 .endm
41 .macro LOAD_GPRS from to uregs
42 .ifge \to-\from
43 LOAD_GPR \from, \uregs
44 LOAD_GPRS "(\from+1)", \to, \uregs
45 .endif
46 .endm
48 .macro GET_STACK parea srr1
49 /* get processor area pointer and save off a couple registers there */
50 mtspr SPRN_HSPRG1, \parea
51 mfspr \parea, SPRN_HSPRG0
52 std r1, PAREA_r1(\parea)
53 mfcr r1
54 std r1, PAREA_cr(\parea)
55 mfspr r1, \srr1
56 rldicl. r1, r1, 4, 63 /* test (H)SRR1:HV */
57 /* assume we interrupted the guest, in which case we start at top of this
58 * processsor's hypervisor stack (as found in parea). */
59 ld r1, PAREA_stack(\parea)
60 beq 1f
61 /* nope, we interrupted the hypervisor. continue on that stack. */
62 ld r1, PAREA_r1(\parea)
63 1:
64 .endm
66 /* SAVE_C_STATE: set up enough state to jump to C code
67 * r14-r31 are non-volatile in the C ABI, so not saved here
68 */
69 .macro SAVE_C_STATE uregs
70 SAVE_GPRS r2, r12, \uregs /* save r2-r12 */
72 mflr r0
73 std r0, UREGS_lr(\uregs) /* save LR */
74 mfxer r0
75 std r0, UREGS_xer(\uregs) /* save XER */
76 .endm
78 .macro LOAD_C_STATE uregs
79 ld r0, UREGS_lr(\uregs) /* load LR */
80 mtlr r0
81 ld r0, UREGS_xer(\uregs) /* load XER */
82 mtxer r0
83 lwz r0, UREGS_cr(\uregs) /* load CR */
84 mtcr r0
86 LOAD_GPRS r2, r12, \uregs /* load r2-r12 */
87 .endm
89 .macro LOADADDR reg symbol
90 lis \reg,\symbol@highest
91 ori \reg,\reg,\symbol@higher
92 rldicr \reg,\reg,32,31
93 oris \reg,\reg,\symbol@h
94 ori \reg,\reg,\symbol@l
95 .endm
97 .macro CALL_CFUNC reg
98 ld r2, 8(\reg) /* load function's TOC value */
99 ld \reg, 0(\reg)
100 mtctr \reg
101 bctrl
102 nop
103 .endm
105 .macro EXCEPTION_HEAD parea continue
106 /* make room for cpu_user_regs */
107 subi r1, r1, STACK_VOLATILE_AREA + UREGS_sizeof
109 /* get all we need from the processor_area */
110 std r0, UREGS_r0(r1) /* get scratch register */
111 ld r0, PAREA_r1(\parea)
112 std r0, UREGS_r1(r1) /* save R1 */
113 ld r0, PAREA_cr(\parea)
114 stw r0, UREGS_cr(r1) /* save CR */
115 mfspr r0, SPRN_HSPRG1
116 std r0, UREGS_r13(r1) /* save R13 from HSPRG1 */
118 /* Blow away any reservation according to 970 errata after saving CR */
119 stdcx. r1, 0, r1
121 /* done with processor_area; re-enable MSR:RI */
122 mfmsr r0
123 ori r0, r0, MSR_RI@l
124 mtmsrd r0
126 /* save CTR and use it to jump */
127 mfctr r0
128 std r0, UREGS_ctr(r1)
129 LOADADDR r0, \continue
130 mtctr r0
131 .endm
133 /* For normal exceptions. */
134 .macro EXCEPTION_SAVE_STATE uregs
135 SAVE_C_STATE \uregs
137 /* save DEC */
138 mfdec r0
139 ld r3, PAREA_vcpu(r13)
140 stw r0, VCPU_dec(r3)
142 /* save PC, MSR */
143 mfspr r0, SPRN_SRR0
144 std r0, UREGS_pc(\uregs)
145 mfspr r0, SPRN_SRR1
146 std r0, UREGS_msr(\uregs)
147 li r0, -1 /* we clobbered the OS's SRR0/SRR1 to get here. */
148 std r0, UREGS_srr0(\uregs)
149 std r0, UREGS_srr1(\uregs)
150 .endm
152 /* For exceptions that use HSRR0/1 (preserving the OS's SRR0/1). */
153 .macro H_EXCEPTION_SAVE_STATE uregs
154 SAVE_C_STATE \uregs
156 /* save DEC */
157 mfdec r0
158 ld r3, PAREA_vcpu(r13)
159 stw r0, VCPU_dec(r3)
161 /* save PC, MSR */
162 mfspr r0, SPRN_HSRR0
163 std r0, UREGS_pc(\uregs)
164 mfspr r0, SPRN_HSRR1
165 std r0, UREGS_msr(\uregs)
166 mfspr r0, SPRN_SRR0
167 std r0, UREGS_srr0(\uregs)
168 mfspr r0, SPRN_SRR1
169 std r0, UREGS_srr1(\uregs)
170 .endm
172 /* Hypervisor exception handling code; copied to physical address zero. */
173 .align 3
174 .globl exception_vectors
175 exception_vectors:
177 . = 0x0 # wild branch to 0
178 zero:
179 GET_STACK r13 SPRN_SRR1
180 EXCEPTION_HEAD r13 ex_program_continued
181 li r0, 0x0 /* exception vector for GDB stub */
182 bctr
184 . = 0x100 # System Reset
185 ex_reset:
186 /* XXX thread initialization */
187 GET_STACK r13 SPRN_SRR1
188 EXCEPTION_HEAD r13 ex_program_continued
189 li r0, 0x100 /* exception vector for GDB stub */
190 bctr
192 . = 0x200 # Machine Check
193 ex_machcheck:
194 GET_STACK r13 SPRN_SRR1
195 EXCEPTION_HEAD r13 ex_program_continued
196 li r0, 0x200 /* exception vector for GDB stub */
197 bctr
199 . = 0x300
200 ex_dsi:
201 GET_STACK r13 SPRN_SRR1
202 EXCEPTION_HEAD r13 ex_program_continued
203 li r0, 0x300 /* exception vector for GDB stub */
204 bctr
206 . = 0x380
207 ex_data_slb:
208 GET_STACK r13 SPRN_SRR1
209 EXCEPTION_HEAD r13 ex_program_continued
210 li r0, 0x380 /* exception vector for GDB stub */
211 bctr
213 . = 0x400
214 ex_isi:
215 GET_STACK r13 SPRN_SRR1
216 EXCEPTION_HEAD r13 ex_program_continued
217 li r0, 0x400 /* exception vector for GDB stub */
218 bctr
220 . = 0x480
221 ex_inst_slb:
222 GET_STACK r13 SPRN_SRR1
223 EXCEPTION_HEAD r13 ex_program_continued
224 li r0, 0x480 /* exception vector for GDB stub */
225 bctr
227 . = 0x500
228 ex_external:
229 GET_STACK r13 SPRN_SRR1
230 EXCEPTION_HEAD r13 ex_external_continued
231 bctr
233 . = 0x600
234 ex_alignment:
235 GET_STACK r13 SPRN_SRR1
236 EXCEPTION_HEAD r13 ex_program_continued
237 li r0, 0x600 /* exception vector for GDB stub */
238 bctr
240 . = 0x700
241 ex_program:
242 GET_STACK r13 SPRN_SRR1
243 EXCEPTION_HEAD r13 ex_program_continued
244 li r0, 0x700 /* exception vector for GDB stub */
245 bctr
247 . = 0x800
248 ex_float:
249 GET_STACK r13 SPRN_SRR1
250 EXCEPTION_HEAD r13 ex_program_continued
251 li r0, 0x800 /* exception vector for GDB stub */
252 bctr
254 . = 0x900
255 ex_dec:
256 /* delivered to hypervisor when MSR:EE is set... */
257 #ifdef SLOW_TRAP
258 GET_STACK r13 SPRN_SRR1
259 EXCEPTION_HEAD r13 ex_dec_continued
260 bctr
261 #else
262 /* XXX for now just reset DEC and return */
263 mtspr SPRN_HSPRG1, r3
264 lis r3, 0x7fff
265 mtdec r3
266 mfspr r3, SPRN_HSPRG1
267 rfid
268 #endif
270 . = 0x980
271 ex_hdec:
272 GET_STACK r13 SPRN_HSRR1
273 EXCEPTION_HEAD r13 ex_hdec_continued
274 bctr
276 . = 0xc00
277 ex_syscall:
278 GET_STACK r13 SPRN_SRR1
279 EXCEPTION_HEAD r13 ex_hcall_continued
280 bctr
282 . = 0xd00
283 ex_trace:
284 GET_STACK r13 SPRN_SRR1
285 EXCEPTION_HEAD r13 ex_program_continued
286 li r0, 0xd00 /* exception vector for GDB stub */
287 bctr
289 . = 0xe00
290 ex_fp:
291 GET_STACK r13 SPRN_SRR1
292 EXCEPTION_HEAD r13 ex_program_continued
293 li r0, 0xe00 /* exception vector for GDB stub */
294 bctr
296 .align 3
297 .globl exception_vectors_end
299 exception_vectors_end:
300 /* put some stuff here so we see the next symbol */
301 .long 0xdeadbeef
302 .long 0xdeadbeef
304 .macro FAST_RESUME
305 LOAD_C_STATE r1 /* restore most C volatiles */
307 ld r0, UREGS_ctr(r1)
308 mtctr r0
310 /* clear MSR:RI/EE to set SRR0/SRR1 */
311 li r0, 0
312 mtmsrd r0, 1
314 ld r0, UREGS_pc(r1)
315 mtspr SPRN_HSRR0, r0
316 ld r0, UREGS_msr(r1)
317 mtspr SPRN_HSRR1, r0
319 ld r0, UREGS_srr0(r1)
320 mtspr SPRN_SRR0, r0
321 ld r0, UREGS_srr1(r1)
322 mtspr SPRN_SRR1, r0
324 ld r13, UREGS_r13(r1)
325 ld r0, UREGS_r0(r1)
326 ld r1, UREGS_r1(r1)
327 HRFID
328 b . /* prevent speculative icache fetch */
329 .endm
331 /* We enter with the exception number in r0. The EXCEPTION_SAVE_STATE macro
332 * clobbers r0 though, so we have to move it around a little bit. Not ideal,
333 * but hopefully program exception is not performance-critical... Maybe there's
334 * a better way, but this works for now. */
335 ex_program_continued:
336 SAVE_GPRS r14, r31, r1 /* save all the non-volatiles */
337 /* save hid4 for debug */
338 mfspr r14, SPRN_HID4
339 std r14, UREGS_hid4(r1)
340 mr r14, r0
341 EXCEPTION_SAVE_STATE r1
342 mr r4, r14
343 LOADADDR r12, program_exception
344 mr r3, r1 /* pass pointer to cpu_user_regs */
345 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
346 CALL_CFUNC r12
348 /* reload state and rfid */
349 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
350 LOAD_GPRS r14, r31, r1
351 FAST_RESUME
353 ex_external_continued:
354 EXCEPTION_SAVE_STATE r1
355 LOADADDR r12, do_external
356 mr r3, r1 /* pass pointer to cpu_user_regs */
357 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
358 CALL_CFUNC r12
360 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
361 b fast_resume
363 ex_hcall_continued:
364 /* We have to save the non-volatiles here in case of a block hcall (which
365 * will end up in context_switch()). */
366 SAVE_GPRS r14, r31, r1
367 EXCEPTION_SAVE_STATE r1
368 LOADADDR r12, do_hcall
369 mr r3, r1 /* pass pointer to cpu_user_regs */
370 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
371 CALL_CFUNC r12 /* call hcall handler */
373 /* test for pending softirqs, and loop until there are no more. */
374 mfmsr r14
375 ori r14, r14, MSR_EE
376 xori r15, r14, MSR_EE
377 hcall_test_all_events:
378 mtmsrd r15, 1 /* disable interrupts */
379 ld r3, PAREA_vcpu(r13)
380 lwz r3, VCPU_processor(r3)
381 LOADADDR r4, irq_stat
382 sldi r3, r3, IRQSTAT_shift
383 add r4, r3, r4
384 ld r5, IRQSTAT_pending(r4)
385 cmpldi r5, 0
386 beq hcall_out /* no more softirqs; exit loop */
388 LOADADDR r6, do_softirq
389 mtmsrd r14, 1 /* enable interrupts */
390 CALL_CFUNC r6 /* process softirqs */
391 b hcall_test_all_events /* look for more */
393 hcall_out:
394 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
395 LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 */
396 b fast_resume
399 ex_dec_continued:
400 EXCEPTION_SAVE_STATE r1
401 LOADADDR r12, do_dec
402 mr r3, r1 /* pass pointer to cpu_user_regs */
403 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
404 CALL_CFUNC r12
406 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
407 b fast_resume
409 ex_hdec_continued:
410 /* When we get an HDEC, we (almost?) always context_switch, so we need to
411 * save the nonvolatiles. */
412 SAVE_GPRS r14, r31, r1
413 H_EXCEPTION_SAVE_STATE r1
415 LOADADDR r12, do_timer
416 mr r3, r1
417 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
418 CALL_CFUNC r12
420 /* if we are resuming into hypervisor, don't handle softirqs */
421 ld r10, (UREGS_msr + STACK_FRAME_OVERHEAD)(r1)
422 rldicl. r11, r10, 4, 63 /* test SRR1:HV */
423 bne hdec_out
425 /* test for pending softirqs, and loop until there are no more. */
426 mfmsr r14
427 ori r14, r14, MSR_EE
428 xori r15, r14, MSR_EE
429 test_all_events:
430 mtmsrd r15, 1 /* disable interrupts */
431 ld r3, PAREA_vcpu(r13)
432 lwz r3, VCPU_processor(r3)
433 LOADADDR r4, irq_stat
434 sldi r3, r3, IRQSTAT_shift
435 add r4, r3, r4
436 ld r5, IRQSTAT_pending(r4)
437 cmpldi r5, 0
438 beq hdec_out /* no more softirqs; exit loop */
440 LOADADDR r6, do_softirq
441 mtmsrd r14, 1 /* enable interrupts */
442 CALL_CFUNC r6 /* process softirqs */
443 b test_all_events /* look for more */
445 hdec_out:
446 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
447 LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 in the loop */
449 /* r1 points to the to-be-restored cpu_user_regs. These could be mid-hypervisor
450 * stack (returning into elsewhere in Xen) or at the top of the stack
451 * (restoring the domain). */
452 _GLOBAL(full_resume)
453 /* disable MSR:EE, since we could have come from do_softirq() */
454 mfmsr r7
455 ori r7, r7, MSR_EE
456 xori r7, r7, MSR_EE
457 mtmsrd r7, 1
459 LOAD_GPRS r14, r31, r1 /* restore all non-volatiles */
461 fast_resume:
462 ld r10, UREGS_msr(r1)
463 rldicl. r11, r10, 4, 63 /* test SRR1:HV */
464 bne 1f /* returning to hypervisor */
466 /* check for pending irqs */
467 mr r3, r1
468 subi r1, r1, STACK_FRAME_OVERHEAD
469 bl .deliver_ee
470 addi r1, r1, STACK_FRAME_OVERHEAD
472 /* if we took a DEC in hypervisor mode, we don't want to reload the DEC
473 * until we return to the domain. MSR_EE is clear, so the domain will take
474 * any impending DEC. */
475 ld r3, PAREA_vcpu(r13)
476 lwz r0, VCPU_dec(r3)
477 mtdec r0
479 1:
480 FAST_RESUME
481 /* not reached */
483 /* move all of the below somewhere else */
485 _GLOBAL(papr_hcall_jump)
486 mtctr r4
487 bctr
488 /* return to caller via LR */
490 /* XXX don't need to load all the registers */
491 _GLOBAL(xen_hvcall_jump)
492 mtctr r4
493 ld r10, (UREGS_gprs + GPR_WIDTH * 11)(r3)
494 ld r9, (UREGS_gprs + GPR_WIDTH * 10)(r3)
495 ld r8, (UREGS_gprs + GPR_WIDTH * 9)(r3)
496 ld r7, (UREGS_gprs + GPR_WIDTH * 8)(r3)
497 ld r6, (UREGS_gprs + GPR_WIDTH * 7)(r3)
498 ld r5, (UREGS_gprs + GPR_WIDTH * 6)(r3)
499 ld r4, (UREGS_gprs + GPR_WIDTH * 5)(r3)
500 ld r3, (UREGS_gprs + GPR_WIDTH * 4)(r3)
501 bctr
503 _GLOBAL(_reset_stack_and_jump)
504 ld r2, 8(r3)
505 ld r3, 0(r3)
506 mtctr r3
507 mr r1, r4
508 bctr
510 _GLOBAL(sleep)
511 mfmsr r3
512 ori r4, r3, MSR_EE
513 oris r4, r4, MSR_POW@h
514 sync
515 mtmsrd r4
516 isync
517 mtmsrd r3
518 blr
520 /* The primary processor issues a firmware call to spin us up at this
521 * address, passing our CPU number in r3. We only need a function
522 * entry point instead of a descriptor since this is never called from
523 * C code.
524 */
525 .globl spin_start
526 spin_start:
527 /* Write our processor number as an acknowledgment that we're alive. */
528 LOADADDR(r14, __spin_ack)
529 stw r3, 0(r14)
530 sync
531 /* If NR_CPUS is too small, we should just spin forever. */
532 LOADADDR(r15, NR_CPUS)
533 cmpd r3, r15
534 blt 2f
535 b .
536 /* Find our index in the array of processor_area struct pointers. */
537 2: LOADADDR(r14, global_cpu_table)
538 muli r15, r3, 8
539 add r14, r14, r15
540 /* Spin until the pointer for our processor goes valid. */
541 1: ld r15, 0(r14)
542 cmpldi r15, 0
543 beq 1b
544 /* Dereference the pointer and load our stack pointer. */
545 isync
546 ld r1, PAREA_stack(r15)
547 li r14, STACK_FRAME_OVERHEAD
548 sub r1, r1, r14
549 /* Load up the TOC and entry point for the C function to be called. */
550 LOADADDR(r14, secondary_cpu_init)
551 ld r2, 8(r14)
552 ld r11, 0(r14)
553 mtctr r11
554 /* Warning: why do we need this synchronizing instruction on 970FX? */
555 isync
556 /* Jump into C code now. */
557 bctrl
558 nop
559 b .