ia64/xen-unstable

view xen/arch/powerpc/powerpc64/exceptions.S @ 12929:24fd59787982

[XEN][POWERPC] Flush all of text per CPU in case the loader did not
Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Mon Oct 02 11:06:10 2006 -0400 (2006-10-02)
parents a17aa5e65209
children 0ff8b14fb530
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Jimi Xenidis <jimix@watson.ibm.com>
19 * Hollis Blanchard <hollisb@us.ibm.com>
20 */
22 #include <asm/config.h>
23 #include <asm/asm-offsets.h>
24 #include <asm/reg_defs.h>
25 #include <asm/msr.h>
26 #include <asm/processor.h>
27 #include <asm/percpu.h>
29 .macro SAVE_GPR regno uregs
30 std \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs)
31 .endm
33 .macro SAVE_GPRS from to uregs
34 .ifge \to-\from
35 SAVE_GPR \from, \uregs
36 SAVE_GPRS "(\from+1)", \to, \uregs
37 .endif
38 .endm
40 .macro LOAD_GPR regno uregs
41 ld \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs)
42 .endm
44 .macro LOAD_GPRS from to uregs
45 .ifge \to-\from
46 LOAD_GPR \from, \uregs
47 LOAD_GPRS "(\from+1)", \to, \uregs
48 .endif
49 .endm
51 .macro GET_STACK parea srr1
52 /* get processor area pointer and save off a couple registers there */
53 mtspr SPRN_HSPRG1, \parea
54 mfspr \parea, SPRN_HSPRG0
55 std r1, PAREA_r1(\parea)
56 mfcr r1
57 std r1, PAREA_cr(\parea)
58 mfspr r1, \srr1
59 rldicl. r1, r1, 4, 63 /* test (H)SRR1:HV */
60 /* assume we interrupted the guest, in which case we start at top of this
61 * processsor's hypervisor stack (as found in parea). */
62 ld r1, PAREA_stack(\parea)
63 beq 1f
64 /* nope, we interrupted the hypervisor. continue on that stack. */
65 ld r1, PAREA_r1(\parea)
66 1:
67 .endm
69 /* SAVE_C_STATE: set up enough state to jump to C code
70 * r14-r31 are non-volatile in the C ABI, so not saved here
71 */
72 .macro SAVE_C_STATE uregs
73 SAVE_GPRS r2, r12, \uregs /* save r2-r12 */
75 mflr r0
76 std r0, UREGS_lr(\uregs) /* save LR */
77 mfxer r0
78 std r0, UREGS_xer(\uregs) /* save XER */
79 .endm
81 .macro LOAD_C_STATE uregs
82 ld r0, UREGS_lr(\uregs) /* load LR */
83 mtlr r0
84 ld r0, UREGS_xer(\uregs) /* load XER */
85 mtxer r0
86 lwz r0, UREGS_cr(\uregs) /* load CR */
87 mtcr r0
89 LOAD_GPRS r2, r12, \uregs /* load r2-r12 */
90 .endm
92 .macro LOADADDR reg symbol
93 lis \reg,\symbol@highest
94 ori \reg,\reg,\symbol@higher
95 rldicr \reg,\reg,32,31
96 oris \reg,\reg,\symbol@h
97 ori \reg,\reg,\symbol@l
98 .endm
100 .macro CALL_CFUNC reg
101 ld r2, 8(\reg) /* load function's TOC value */
102 ld \reg, 0(\reg)
103 mtctr \reg
104 bctrl
105 nop
106 .endm
108 .macro EXCEPTION_HEAD parea continue
109 /* make room for cpu_user_regs */
110 subi r1, r1, STACK_VOLATILE_AREA + UREGS_sizeof
112 /* get all we need from the processor_area */
113 std r0, UREGS_r0(r1) /* get scratch register */
114 ld r0, PAREA_r1(\parea)
115 std r0, UREGS_r1(r1) /* save R1 */
116 ld r0, PAREA_cr(\parea)
117 stw r0, UREGS_cr(r1) /* save CR */
118 mfspr r0, SPRN_HSPRG1
119 std r0, UREGS_r13(r1) /* save R13 from HSPRG1 */
121 /* Only _one_ larx is allowed at a time. Any future use will be
122 * rejected until the earlier one (if any) completes. Since we
123 * may have interrupted a larx in the Domain, or Xen we need to
124 * clear any larx that may currently exist. We could probably
125 * skip which for hcalls */
126 ldx r0, 0, r1
127 stdcx. r0, 0, r1
129 /* save CTR and use it to jump */
130 mfctr r0
131 std r0, UREGS_ctr(r1)
132 LOADADDR r0, \continue
133 mtctr r0
134 .endm
136 /* For normal exceptions. */
137 .macro EXCEPTION_SAVE_STATE uregs
138 SAVE_C_STATE \uregs
140 /* save DEC */
141 mfdec r0
142 ld r3, PAREA_vcpu(r13)
143 stw r0, VCPU_dec(r3)
145 /* save PC, MSR */
146 mfspr r0, SPRN_SRR0
147 std r0, UREGS_pc(\uregs)
148 mfspr r0, SPRN_SRR1
149 std r0, UREGS_msr(\uregs)
150 li r0, -1 /* we clobbered the OS's SRR0/SRR1 to get here. */
151 std r0, UREGS_srr0(\uregs)
152 std r0, UREGS_srr1(\uregs)
154 /* done with processor_area; re-enable MSR:RI */
155 mfmsr r0
156 ori r0, r0, MSR_RI@l
157 mtmsrd r0
160 .endm
162 /* For exceptions that use HSRR0/1 (preserving the OS's SRR0/1). */
163 .macro H_EXCEPTION_SAVE_STATE uregs
164 SAVE_C_STATE \uregs
166 /* save DEC */
167 mfdec r0
168 ld r3, PAREA_vcpu(r13)
169 stw r0, VCPU_dec(r3)
171 /* save PC, MSR */
172 mfspr r0, SPRN_HSRR0
173 std r0, UREGS_pc(\uregs)
174 mfspr r0, SPRN_HSRR1
175 std r0, UREGS_msr(\uregs)
176 mfspr r0, SPRN_SRR0
177 std r0, UREGS_srr0(\uregs)
178 mfspr r0, SPRN_SRR1
179 std r0, UREGS_srr1(\uregs)
181 /* done with processor_area; re-enable MSR:RI */
182 mfmsr r0
183 ori r0, r0, MSR_RI@l
184 mtmsrd r0
186 .endm
188 /* Hypervisor exception handling code; copied to physical address zero. */
189 .align 3
190 .globl exception_vectors
191 exception_vectors:
193 . = 0x0 # wild branch to 0
194 zero:
195 GET_STACK r13 SPRN_SRR1
196 EXCEPTION_HEAD r13 ex_program_continued
197 li r0, 0x0 /* exception vector for GDB stub */
198 bctr
200 /* The following byte array is where any per-CPU state flags
201 * that can be be used across interrupts. Currently it is only used
202 * to track Cache Inhibited Mode when a Machine Check occurs. */
203 /* NOTE: This array is indexed by PIR NOT CPUID */
204 . = MCK_CPU_STAT_BASE
205 .space NR_CPUS
206 . = MCK_GOOD_HID4
207 .quad 0
208 . = 0x100 # System Reset
209 ex_reset:
210 /* XXX thread initialization */
211 GET_STACK r13 SPRN_SRR1
212 EXCEPTION_HEAD r13 ex_program_continued
213 li r0, 0x100 /* exception vector for GDB stub */
214 bctr
216 . = 0x200 # Machine Check
217 ex_machcheck:
218 /* Restore HID4 to a known state early, we do not recover from
219 * machine check yet, but when we do we shoul dbe able to restore
220 * HID4 to it proper value */
221 mtspr SPRN_HSPRG1, r13
222 ld r13, MCK_GOOD_HID4(0)
223 sync
224 mtspr SPRN_HID4, r13
225 isync
226 /* Hopefully we don't have to worry about the ERAT */
227 mfspr r13, SPRN_HSPRG1
228 /* and now back to our regularly schedualed program */
229 GET_STACK r13 SPRN_SRR1
230 EXCEPTION_HEAD r13 ex_machcheck_continued
231 li r0, 0x200 /* exception vector for GDB stub */
232 bctr
234 . = 0x300
235 ex_dsi:
236 GET_STACK r13 SPRN_SRR1
237 EXCEPTION_HEAD r13 ex_program_continued
238 li r0, 0x300 /* exception vector for GDB stub */
239 bctr
241 . = 0x380
242 ex_data_slb:
243 GET_STACK r13 SPRN_SRR1
244 EXCEPTION_HEAD r13 ex_program_continued
245 li r0, 0x380 /* exception vector for GDB stub */
246 bctr
248 . = 0x400
249 ex_isi:
250 GET_STACK r13 SPRN_SRR1
251 EXCEPTION_HEAD r13 ex_program_continued
252 li r0, 0x400 /* exception vector for GDB stub */
253 bctr
255 . = 0x480
256 ex_inst_slb:
257 GET_STACK r13 SPRN_SRR1
258 EXCEPTION_HEAD r13 ex_program_continued
259 li r0, 0x480 /* exception vector for GDB stub */
260 bctr
262 . = 0x500
263 ex_external:
264 GET_STACK r13 SPRN_SRR1
265 EXCEPTION_HEAD r13 ex_external_continued
266 bctr
268 . = 0x600
269 ex_alignment:
270 GET_STACK r13 SPRN_SRR1
271 EXCEPTION_HEAD r13 ex_program_continued
272 li r0, 0x600 /* exception vector for GDB stub */
273 bctr
275 . = 0x700
276 ex_program:
277 GET_STACK r13 SPRN_SRR1
278 EXCEPTION_HEAD r13 ex_program_continued
279 li r0, 0x700 /* exception vector for GDB stub */
280 bctr
282 . = 0x800
283 ex_float:
284 GET_STACK r13 SPRN_SRR1
285 EXCEPTION_HEAD r13 ex_program_continued
286 li r0, 0x800 /* exception vector for GDB stub */
287 bctr
289 . = 0x900
290 ex_dec:
291 /* delivered to hypervisor when MSR:EE is set... */
292 #ifdef SLOW_TRAP
293 GET_STACK r13 SPRN_SRR1
294 EXCEPTION_HEAD r13 ex_dec_continued
295 bctr
296 #else
297 /* XXX for now just reset DEC and return */
298 mtspr SPRN_HSPRG1, r3
299 lis r3, 0x7fff
300 mtdec r3
301 mfspr r3, SPRN_HSPRG1
302 rfid
303 #endif
305 . = 0x980
306 ex_hdec:
307 GET_STACK r13 SPRN_HSRR1
308 EXCEPTION_HEAD r13 ex_hdec_continued
309 bctr
311 . = 0xc00
312 ex_syscall:
313 GET_STACK r13 SPRN_SRR1
314 EXCEPTION_HEAD r13 ex_hcall_continued
315 bctr
317 . = 0xd00
318 ex_trace:
319 GET_STACK r13 SPRN_SRR1
320 EXCEPTION_HEAD r13 ex_program_continued
321 li r0, 0xd00 /* exception vector for GDB stub */
322 bctr
324 . = 0xe00
325 ex_fp:
326 GET_STACK r13 SPRN_SRR1
327 EXCEPTION_HEAD r13 ex_program_continued
328 li r0, 0xe00 /* exception vector for GDB stub */
329 bctr
331 .align 3
332 .globl exception_vectors_end
334 exception_vectors_end:
335 /* put some stuff here so we see the next symbol */
336 .long 0xdeadbeef
337 .long 0xdeadbeef
339 .macro FAST_RESUME
340 LOAD_C_STATE r1 /* restore most C volatiles */
342 ld r0, UREGS_ctr(r1)
343 mtctr r0
345 /* clear MSR:RI/EE to set SRR0/SRR1 */
346 li r0, 0
347 mtmsrd r0, 1
349 ld r0, UREGS_pc(r1)
350 mtspr SPRN_HSRR0, r0
351 ld r0, UREGS_msr(r1)
352 mtspr SPRN_HSRR1, r0
354 ld r0, UREGS_srr0(r1)
355 mtspr SPRN_SRR0, r0
356 ld r0, UREGS_srr1(r1)
357 mtspr SPRN_SRR1, r0
359 ld r13, UREGS_r13(r1)
360 ld r0, UREGS_r0(r1)
361 ld r1, UREGS_r1(r1)
362 HRFID
363 b . /* prevent speculative icache fetch */
364 .endm
366 /* Not a whole lot just yet */
367 ex_machcheck_continued:
370 /* We enter with the exception number in r0. The EXCEPTION_SAVE_STATE macro
371 * clobbers r0 though, so we have to move it around a little bit. Not ideal,
372 * but hopefully program exception is not performance-critical... Maybe there's
373 * a better way, but this works for now. */
374 ex_program_continued:
375 SAVE_GPRS r14, r31, r1 /* save all the non-volatiles */
376 /* save hid4 for debug */
377 mfspr r14, SPRN_HID4
378 std r14, UREGS_hid4(r1)
379 mr r14, r0
380 EXCEPTION_SAVE_STATE r1
381 mr r4, r14
382 LOADADDR r12, program_exception
383 mr r3, r1 /* pass pointer to cpu_user_regs */
384 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
385 CALL_CFUNC r12
387 /* reload state and rfid */
388 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
389 LOAD_GPRS r14, r31, r1
390 FAST_RESUME
392 ex_external_continued:
393 EXCEPTION_SAVE_STATE r1
394 LOADADDR r12, do_external
395 mr r3, r1 /* pass pointer to cpu_user_regs */
396 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
397 CALL_CFUNC r12
399 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
400 b fast_resume
402 ex_hcall_continued:
403 /* We have to save the non-volatiles here in case of a block hcall (which
404 * will end up in context_switch()). */
405 SAVE_GPRS r14, r31, r1
406 EXCEPTION_SAVE_STATE r1
407 LOADADDR r12, do_hcall
408 mr r3, r1 /* pass pointer to cpu_user_regs */
409 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
410 CALL_CFUNC r12 /* call hcall handler */
412 /* test for pending softirqs, and loop until there are no more. */
413 mfmsr r14
414 ori r14, r14, MSR_EE
415 xori r15, r14, MSR_EE
417 hcall_test_all_events:
418 mtmsrd r15, 1 /* disable interrupts */
419 ld r3, PAREA_vcpu(r13)
420 lwz r3, VCPU_processor(r3)
421 LOADADDR r4, irq_stat
422 sldi r3, r3, IRQSTAT_shift
423 add r4, r3, r4
424 ld r5, IRQSTAT_pending(r4)
425 cmpldi r5, 0
426 beq hcall_out /* no more softirqs; exit loop */
428 LOADADDR r6, do_softirq
429 mtmsrd r14, 1 /* enable interrupts */
430 CALL_CFUNC r6 /* process softirqs */
431 b hcall_test_all_events /* look for more */
433 hcall_out:
434 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
435 LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 */
436 b fast_resume
439 ex_dec_continued:
440 EXCEPTION_SAVE_STATE r1
441 LOADADDR r12, do_dec
442 mr r3, r1 /* pass pointer to cpu_user_regs */
443 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
444 CALL_CFUNC r12
446 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
447 b fast_resume
449 ex_hdec_continued:
450 /* When we get an HDEC, we (almost?) always context_switch, so we need to
451 * save the nonvolatiles. */
452 SAVE_GPRS r14, r31, r1
453 H_EXCEPTION_SAVE_STATE r1
455 LOADADDR r12, do_timer
456 mr r3, r1
457 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
458 CALL_CFUNC r12
460 /* if we are resuming into hypervisor, don't handle softirqs */
461 ld r10, (UREGS_msr + STACK_FRAME_OVERHEAD)(r1)
462 rldicl. r11, r10, 4, 63 /* test SRR1:HV */
463 bne hdec_out
465 /* test for pending softirqs, and loop until there are no more. */
466 mfmsr r14
467 ori r14, r14, MSR_EE
468 xori r15, r14, MSR_EE
469 test_all_events:
470 mtmsrd r15, 1 /* disable interrupts */
471 ld r3, PAREA_vcpu(r13)
472 lwz r3, VCPU_processor(r3)
473 LOADADDR r4, irq_stat
474 sldi r3, r3, IRQSTAT_shift
475 add r4, r3, r4
476 ld r5, IRQSTAT_pending(r4)
477 cmpldi r5, 0
478 beq hdec_out /* no more softirqs; exit loop */
480 LOADADDR r6, do_softirq
481 mtmsrd r14, 1 /* enable interrupts */
482 CALL_CFUNC r6 /* process softirqs */
483 b test_all_events /* look for more */
485 hdec_out:
486 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
487 LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 in the loop */
489 /* r1 points to the to-be-restored cpu_user_regs. These could be mid-hypervisor
490 * stack (returning into elsewhere in Xen) or at the top of the stack
491 * (restoring the domain). */
492 _GLOBAL(full_resume)
493 /* disable MSR:EE, since we could have come from do_softirq() */
494 mfmsr r7
495 ori r7, r7, MSR_EE
496 xori r7, r7, MSR_EE
497 mtmsrd r7, 1
499 LOAD_GPRS r14, r31, r1 /* restore all non-volatiles */
501 fast_resume:
502 ld r10, UREGS_msr(r1)
503 rldicl. r11, r10, 4, 63 /* test SRR1:HV */
504 bne 1f /* returning to hypervisor */
506 /* check for pending irqs */
507 mr r3, r1
508 subi r1, r1, STACK_FRAME_OVERHEAD
509 bl .deliver_ee
510 addi r1, r1, STACK_FRAME_OVERHEAD
512 /* if we took a DEC in hypervisor mode, we don't want to reload the DEC
513 * until we return to the domain. MSR_EE is clear, so the domain will take
514 * any impending DEC. */
515 ld r3, PAREA_vcpu(r13)
516 lwz r0, VCPU_dec(r3)
517 mtdec r0
519 1:
520 FAST_RESUME
521 /* not reached */
523 /* move all of the below somewhere else */
525 _GLOBAL(papr_hcall_jump)
526 mtctr r4
527 bctr
528 /* return to caller via LR */
530 /* XXX don't need to load all the registers */
531 _GLOBAL(xen_hvcall_jump)
532 mtctr r4
533 ld r10, (UREGS_gprs + GPR_WIDTH * 11)(r3)
534 ld r9, (UREGS_gprs + GPR_WIDTH * 10)(r3)
535 ld r8, (UREGS_gprs + GPR_WIDTH * 9)(r3)
536 ld r7, (UREGS_gprs + GPR_WIDTH * 8)(r3)
537 ld r6, (UREGS_gprs + GPR_WIDTH * 7)(r3)
538 ld r5, (UREGS_gprs + GPR_WIDTH * 6)(r3)
539 ld r4, (UREGS_gprs + GPR_WIDTH * 5)(r3)
540 ld r3, (UREGS_gprs + GPR_WIDTH * 4)(r3)
541 bctr
543 _GLOBAL(_reset_stack_and_jump)
544 ld r2, 8(r3)
545 ld r3, 0(r3)
546 mtctr r3
547 mr r1, r4
548 bctr
550 _GLOBAL(sleep)
551 mfmsr r3
552 ori r4, r3, MSR_EE
553 oris r4, r4, MSR_POW@h
554 sync
555 mtmsrd r4
556 isync
557 mtmsrd r3
558 blr
560 /* The primary processor issues a firmware call to spin us up at this
561 * address, passing our CPU number in r3. We only need a function
562 * entry point instead of a descriptor since this is never called from
563 * C code.
564 */
565 .globl spin_start
566 spin_start:
568 /* Do a cache flush for our text, in case the loader didn't */
569 LOADADDR(r9, _start)
570 LOADADDR(r8, _etext)
571 4: dcbf r0,r9
572 icbi r0,r9
573 addi r9,r9,0x20 /* up to a 4 way set per line */
574 cmpld cr0,r9,r8
575 blt 4b
576 sync
577 isync
579 /* Write our processor number as an acknowledgment that we're alive. */
580 LOADADDR(r14, __spin_ack)
581 stw r3, 0(r14)
582 sync
583 /* If NR_CPUS is too small, we should just spin forever. */
584 LOADADDR(r15, NR_CPUS)
585 cmpd r3, r15
586 blt 2f
587 b .
588 /* Find our index in the array of processor_area struct pointers. */
589 2: LOADADDR(r14, global_cpu_table)
590 muli r15, r3, 8
591 add r14, r14, r15
592 /* Spin until the pointer for our processor goes valid. */
593 1: ld r15, 0(r14)
594 cmpldi r15, 0
595 beq 1b
596 /* Dereference the pointer and load our stack pointer. */
597 isync
598 ld r1, PAREA_stack(r15)
599 li r14, STACK_FRAME_OVERHEAD
600 sub r1, r1, r14
601 /* Load up the TOC and entry point for the C function to be called. */
602 LOADADDR(r14, secondary_cpu_init)
603 ld r2, 8(r14)
604 ld r11, 0(r14)
605 mtctr r11
606 /* Warning: why do we need this synchronizing instruction on 970FX? */
607 isync
608 /* Jump into C code now. */
609 bctrl
610 nop
611 b .