direct-io.hg

view xen/arch/powerpc/powerpc64/exceptions.S @ 11495:ccdaa3ea71a7

[POWERPC][XEN] move setting of MSR[RI] till after SRR0/1

This also frees up space so we can properly/safely blow away larx/stcx
reservations.

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Tue Sep 05 15:25:06 2006 -0400 (2006-09-05)
parents ae7a722b7241
children 2ebf55e419c9
line source
1 /*
2 * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 */
20 #include <asm/config.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/reg_defs.h>
23 #include <asm/msr.h>
24 #include <asm/processor.h>
26 .macro SAVE_GPR regno uregs
27 std \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs)
28 .endm
30 .macro SAVE_GPRS from to uregs
31 .ifge \to-\from
32 SAVE_GPR \from, \uregs
33 SAVE_GPRS "(\from+1)", \to, \uregs
34 .endif
35 .endm
37 .macro LOAD_GPR regno uregs
38 ld \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs)
39 .endm
41 .macro LOAD_GPRS from to uregs
42 .ifge \to-\from
43 LOAD_GPR \from, \uregs
44 LOAD_GPRS "(\from+1)", \to, \uregs
45 .endif
46 .endm
48 .macro GET_STACK parea srr1
49 /* get processor area pointer and save off a couple registers there */
50 mtspr SPRN_HSPRG1, \parea
51 mfspr \parea, SPRN_HSPRG0
52 std r1, PAREA_r1(\parea)
53 mfcr r1
54 std r1, PAREA_cr(\parea)
55 mfspr r1, \srr1
56 rldicl. r1, r1, 4, 63 /* test (H)SRR1:HV */
57 /* assume we interrupted the guest, in which case we start at top of this
58 * processsor's hypervisor stack (as found in parea). */
59 ld r1, PAREA_stack(\parea)
60 beq 1f
61 /* nope, we interrupted the hypervisor. continue on that stack. */
62 ld r1, PAREA_r1(\parea)
63 1:
64 .endm
66 /* SAVE_C_STATE: set up enough state to jump to C code
67 * r14-r31 are non-volatile in the C ABI, so not saved here
68 */
69 .macro SAVE_C_STATE uregs
70 SAVE_GPRS r2, r12, \uregs /* save r2-r12 */
72 mflr r0
73 std r0, UREGS_lr(\uregs) /* save LR */
74 mfxer r0
75 std r0, UREGS_xer(\uregs) /* save XER */
76 .endm
78 .macro LOAD_C_STATE uregs
79 ld r0, UREGS_lr(\uregs) /* load LR */
80 mtlr r0
81 ld r0, UREGS_xer(\uregs) /* load XER */
82 mtxer r0
83 lwz r0, UREGS_cr(\uregs) /* load CR */
84 mtcr r0
86 LOAD_GPRS r2, r12, \uregs /* load r2-r12 */
87 .endm
89 .macro LOADADDR reg symbol
90 lis \reg,\symbol@highest
91 ori \reg,\reg,\symbol@higher
92 rldicr \reg,\reg,32,31
93 oris \reg,\reg,\symbol@h
94 ori \reg,\reg,\symbol@l
95 .endm
97 .macro CALL_CFUNC reg
98 ld r2, 8(\reg) /* load function's TOC value */
99 ld \reg, 0(\reg)
100 mtctr \reg
101 bctrl
102 nop
103 .endm
105 .macro EXCEPTION_HEAD parea continue
106 /* make room for cpu_user_regs */
107 subi r1, r1, STACK_VOLATILE_AREA + UREGS_sizeof
109 /* get all we need from the processor_area */
110 std r0, UREGS_r0(r1) /* get scratch register */
111 ld r0, PAREA_r1(\parea)
112 std r0, UREGS_r1(r1) /* save R1 */
113 ld r0, PAREA_cr(\parea)
114 stw r0, UREGS_cr(r1) /* save CR */
115 mfspr r0, SPRN_HSPRG1
116 std r0, UREGS_r13(r1) /* save R13 from HSPRG1 */
118 /* Blow away any reservation according to 970 errata after saving CR */
119 ldx r0, 0, r1
120 stdcx. r0, 0, r1
122 /* save CTR and use it to jump */
123 mfctr r0
124 std r0, UREGS_ctr(r1)
125 LOADADDR r0, \continue
126 mtctr r0
127 .endm
129 /* For normal exceptions. */
130 .macro EXCEPTION_SAVE_STATE uregs
131 SAVE_C_STATE \uregs
133 /* save DEC */
134 mfdec r0
135 ld r3, PAREA_vcpu(r13)
136 stw r0, VCPU_dec(r3)
138 /* save PC, MSR */
139 mfspr r0, SPRN_SRR0
140 std r0, UREGS_pc(\uregs)
141 mfspr r0, SPRN_SRR1
142 std r0, UREGS_msr(\uregs)
143 li r0, -1 /* we clobbered the OS's SRR0/SRR1 to get here. */
144 std r0, UREGS_srr0(\uregs)
145 std r0, UREGS_srr1(\uregs)
147 /* done with processor_area; re-enable MSR:RI */
148 mfmsr r0
149 ori r0, r0, MSR_RI@l
150 mtmsrd r0
153 .endm
155 /* For exceptions that use HSRR0/1 (preserving the OS's SRR0/1). */
156 .macro H_EXCEPTION_SAVE_STATE uregs
157 SAVE_C_STATE \uregs
159 /* save DEC */
160 mfdec r0
161 ld r3, PAREA_vcpu(r13)
162 stw r0, VCPU_dec(r3)
164 /* save PC, MSR */
165 mfspr r0, SPRN_HSRR0
166 std r0, UREGS_pc(\uregs)
167 mfspr r0, SPRN_HSRR1
168 std r0, UREGS_msr(\uregs)
169 mfspr r0, SPRN_SRR0
170 std r0, UREGS_srr0(\uregs)
171 mfspr r0, SPRN_SRR1
172 std r0, UREGS_srr1(\uregs)
174 /* done with processor_area; re-enable MSR:RI */
175 mfmsr r0
176 ori r0, r0, MSR_RI@l
177 mtmsrd r0
179 .endm
181 /* Hypervisor exception handling code; copied to physical address zero. */
182 .align 3
183 .globl exception_vectors
184 exception_vectors:
186 . = 0x0 # wild branch to 0
187 zero:
188 GET_STACK r13 SPRN_SRR1
189 EXCEPTION_HEAD r13 ex_program_continued
190 li r0, 0x0 /* exception vector for GDB stub */
191 bctr
193 . = 0x100 # System Reset
194 ex_reset:
195 /* XXX thread initialization */
196 GET_STACK r13 SPRN_SRR1
197 EXCEPTION_HEAD r13 ex_program_continued
198 li r0, 0x100 /* exception vector for GDB stub */
199 bctr
201 . = 0x200 # Machine Check
202 ex_machcheck:
203 GET_STACK r13 SPRN_SRR1
204 EXCEPTION_HEAD r13 ex_program_continued
205 li r0, 0x200 /* exception vector for GDB stub */
206 bctr
208 . = 0x300
209 ex_dsi:
210 GET_STACK r13 SPRN_SRR1
211 EXCEPTION_HEAD r13 ex_program_continued
212 li r0, 0x300 /* exception vector for GDB stub */
213 bctr
215 . = 0x380
216 ex_data_slb:
217 GET_STACK r13 SPRN_SRR1
218 EXCEPTION_HEAD r13 ex_program_continued
219 li r0, 0x380 /* exception vector for GDB stub */
220 bctr
222 . = 0x400
223 ex_isi:
224 GET_STACK r13 SPRN_SRR1
225 EXCEPTION_HEAD r13 ex_program_continued
226 li r0, 0x400 /* exception vector for GDB stub */
227 bctr
229 . = 0x480
230 ex_inst_slb:
231 GET_STACK r13 SPRN_SRR1
232 EXCEPTION_HEAD r13 ex_program_continued
233 li r0, 0x480 /* exception vector for GDB stub */
234 bctr
236 . = 0x500
237 ex_external:
238 GET_STACK r13 SPRN_SRR1
239 EXCEPTION_HEAD r13 ex_external_continued
240 bctr
242 . = 0x600
243 ex_alignment:
244 GET_STACK r13 SPRN_SRR1
245 EXCEPTION_HEAD r13 ex_program_continued
246 li r0, 0x600 /* exception vector for GDB stub */
247 bctr
249 . = 0x700
250 ex_program:
251 GET_STACK r13 SPRN_SRR1
252 EXCEPTION_HEAD r13 ex_program_continued
253 li r0, 0x700 /* exception vector for GDB stub */
254 bctr
256 . = 0x800
257 ex_float:
258 GET_STACK r13 SPRN_SRR1
259 EXCEPTION_HEAD r13 ex_program_continued
260 li r0, 0x800 /* exception vector for GDB stub */
261 bctr
263 . = 0x900
264 ex_dec:
265 /* delivered to hypervisor when MSR:EE is set... */
266 #ifdef SLOW_TRAP
267 GET_STACK r13 SPRN_SRR1
268 EXCEPTION_HEAD r13 ex_dec_continued
269 bctr
270 #else
271 /* XXX for now just reset DEC and return */
272 mtspr SPRN_HSPRG1, r3
273 lis r3, 0x7fff
274 mtdec r3
275 mfspr r3, SPRN_HSPRG1
276 rfid
277 #endif
279 . = 0x980
280 ex_hdec:
281 GET_STACK r13 SPRN_HSRR1
282 EXCEPTION_HEAD r13 ex_hdec_continued
283 bctr
285 . = 0xc00
286 ex_syscall:
287 GET_STACK r13 SPRN_SRR1
288 EXCEPTION_HEAD r13 ex_hcall_continued
289 bctr
291 . = 0xd00
292 ex_trace:
293 GET_STACK r13 SPRN_SRR1
294 EXCEPTION_HEAD r13 ex_program_continued
295 li r0, 0xd00 /* exception vector for GDB stub */
296 bctr
298 . = 0xe00
299 ex_fp:
300 GET_STACK r13 SPRN_SRR1
301 EXCEPTION_HEAD r13 ex_program_continued
302 li r0, 0xe00 /* exception vector for GDB stub */
303 bctr
305 .align 3
306 .globl exception_vectors_end
308 exception_vectors_end:
309 /* put some stuff here so we see the next symbol */
310 .long 0xdeadbeef
311 .long 0xdeadbeef
313 .macro FAST_RESUME
314 LOAD_C_STATE r1 /* restore most C volatiles */
316 ld r0, UREGS_ctr(r1)
317 mtctr r0
319 /* clear MSR:RI/EE to set SRR0/SRR1 */
320 li r0, 0
321 mtmsrd r0, 1
323 ld r0, UREGS_pc(r1)
324 mtspr SPRN_HSRR0, r0
325 ld r0, UREGS_msr(r1)
326 mtspr SPRN_HSRR1, r0
328 ld r0, UREGS_srr0(r1)
329 mtspr SPRN_SRR0, r0
330 ld r0, UREGS_srr1(r1)
331 mtspr SPRN_SRR1, r0
333 ld r13, UREGS_r13(r1)
334 ld r0, UREGS_r0(r1)
335 ld r1, UREGS_r1(r1)
336 HRFID
337 b . /* prevent speculative icache fetch */
338 .endm
340 /* We enter with the exception number in r0. The EXCEPTION_SAVE_STATE macro
341 * clobbers r0 though, so we have to move it around a little bit. Not ideal,
342 * but hopefully program exception is not performance-critical... Maybe there's
343 * a better way, but this works for now. */
344 ex_program_continued:
345 SAVE_GPRS r14, r31, r1 /* save all the non-volatiles */
346 /* save hid4 for debug */
347 mfspr r14, SPRN_HID4
348 std r14, UREGS_hid4(r1)
349 mr r14, r0
350 EXCEPTION_SAVE_STATE r1
351 mr r4, r14
352 LOADADDR r12, program_exception
353 mr r3, r1 /* pass pointer to cpu_user_regs */
354 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
355 CALL_CFUNC r12
357 /* reload state and rfid */
358 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
359 LOAD_GPRS r14, r31, r1
360 FAST_RESUME
362 ex_external_continued:
363 EXCEPTION_SAVE_STATE r1
364 LOADADDR r12, do_external
365 mr r3, r1 /* pass pointer to cpu_user_regs */
366 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
367 CALL_CFUNC r12
369 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
370 b fast_resume
372 ex_hcall_continued:
373 /* We have to save the non-volatiles here in case of a block hcall (which
374 * will end up in context_switch()). */
375 SAVE_GPRS r14, r31, r1
376 EXCEPTION_SAVE_STATE r1
377 LOADADDR r12, do_hcall
378 mr r3, r1 /* pass pointer to cpu_user_regs */
379 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
380 CALL_CFUNC r12 /* call hcall handler */
382 /* test for pending softirqs, and loop until there are no more. */
383 mfmsr r14
384 ori r14, r14, MSR_EE
385 xori r15, r14, MSR_EE
387 hcall_test_all_events:
388 mtmsrd r15, 1 /* disable interrupts */
389 ld r3, PAREA_vcpu(r13)
390 lwz r3, VCPU_processor(r3)
391 LOADADDR r4, irq_stat
392 sldi r3, r3, IRQSTAT_shift
393 add r4, r3, r4
394 ld r5, IRQSTAT_pending(r4)
395 cmpldi r5, 0
396 beq hcall_out /* no more softirqs; exit loop */
398 LOADADDR r6, do_softirq
399 mtmsrd r14, 1 /* enable interrupts */
400 CALL_CFUNC r6 /* process softirqs */
401 b hcall_test_all_events /* look for more */
403 hcall_out:
404 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
405 LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 */
406 b fast_resume
409 ex_dec_continued:
410 EXCEPTION_SAVE_STATE r1
411 LOADADDR r12, do_dec
412 mr r3, r1 /* pass pointer to cpu_user_regs */
413 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
414 CALL_CFUNC r12
416 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
417 b fast_resume
419 ex_hdec_continued:
420 /* When we get an HDEC, we (almost?) always context_switch, so we need to
421 * save the nonvolatiles. */
422 SAVE_GPRS r14, r31, r1
423 H_EXCEPTION_SAVE_STATE r1
425 LOADADDR r12, do_timer
426 mr r3, r1
427 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
428 CALL_CFUNC r12
430 /* if we are resuming into hypervisor, don't handle softirqs */
431 ld r10, (UREGS_msr + STACK_FRAME_OVERHEAD)(r1)
432 rldicl. r11, r10, 4, 63 /* test SRR1:HV */
433 bne hdec_out
435 /* test for pending softirqs, and loop until there are no more. */
436 mfmsr r14
437 ori r14, r14, MSR_EE
438 xori r15, r14, MSR_EE
439 test_all_events:
440 mtmsrd r15, 1 /* disable interrupts */
441 ld r3, PAREA_vcpu(r13)
442 lwz r3, VCPU_processor(r3)
443 LOADADDR r4, irq_stat
444 sldi r3, r3, IRQSTAT_shift
445 add r4, r3, r4
446 ld r5, IRQSTAT_pending(r4)
447 cmpldi r5, 0
448 beq hdec_out /* no more softirqs; exit loop */
450 LOADADDR r6, do_softirq
451 mtmsrd r14, 1 /* enable interrupts */
452 CALL_CFUNC r6 /* process softirqs */
453 b test_all_events /* look for more */
455 hdec_out:
456 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
457 LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 in the loop */
459 /* r1 points to the to-be-restored cpu_user_regs. These could be mid-hypervisor
460 * stack (returning into elsewhere in Xen) or at the top of the stack
461 * (restoring the domain). */
462 _GLOBAL(full_resume)
463 /* disable MSR:EE, since we could have come from do_softirq() */
464 mfmsr r7
465 ori r7, r7, MSR_EE
466 xori r7, r7, MSR_EE
467 mtmsrd r7, 1
469 LOAD_GPRS r14, r31, r1 /* restore all non-volatiles */
471 fast_resume:
472 ld r10, UREGS_msr(r1)
473 rldicl. r11, r10, 4, 63 /* test SRR1:HV */
474 bne 1f /* returning to hypervisor */
476 /* check for pending irqs */
477 mr r3, r1
478 subi r1, r1, STACK_FRAME_OVERHEAD
479 bl .deliver_ee
480 addi r1, r1, STACK_FRAME_OVERHEAD
482 /* if we took a DEC in hypervisor mode, we don't want to reload the DEC
483 * until we return to the domain. MSR_EE is clear, so the domain will take
484 * any impending DEC. */
485 ld r3, PAREA_vcpu(r13)
486 lwz r0, VCPU_dec(r3)
487 mtdec r0
489 1:
490 FAST_RESUME
491 /* not reached */
493 /* move all of the below somewhere else */
495 _GLOBAL(papr_hcall_jump)
496 mtctr r4
497 bctr
498 /* return to caller via LR */
500 /* XXX don't need to load all the registers */
501 _GLOBAL(xen_hvcall_jump)
502 mtctr r4
503 ld r10, (UREGS_gprs + GPR_WIDTH * 11)(r3)
504 ld r9, (UREGS_gprs + GPR_WIDTH * 10)(r3)
505 ld r8, (UREGS_gprs + GPR_WIDTH * 9)(r3)
506 ld r7, (UREGS_gprs + GPR_WIDTH * 8)(r3)
507 ld r6, (UREGS_gprs + GPR_WIDTH * 7)(r3)
508 ld r5, (UREGS_gprs + GPR_WIDTH * 6)(r3)
509 ld r4, (UREGS_gprs + GPR_WIDTH * 5)(r3)
510 ld r3, (UREGS_gprs + GPR_WIDTH * 4)(r3)
511 bctr
513 _GLOBAL(_reset_stack_and_jump)
514 ld r2, 8(r3)
515 ld r3, 0(r3)
516 mtctr r3
517 mr r1, r4
518 bctr
520 _GLOBAL(sleep)
521 mfmsr r3
522 ori r4, r3, MSR_EE
523 oris r4, r4, MSR_POW@h
524 sync
525 mtmsrd r4
526 isync
527 mtmsrd r3
528 blr
530 /* The primary processor issues a firmware call to spin us up at this
531 * address, passing our CPU number in r3. We only need a function
532 * entry point instead of a descriptor since this is never called from
533 * C code.
534 */
535 .globl spin_start
536 spin_start:
537 /* Write our processor number as an acknowledgment that we're alive. */
538 LOADADDR(r14, __spin_ack)
539 stw r3, 0(r14)
540 sync
541 /* If NR_CPUS is too small, we should just spin forever. */
542 LOADADDR(r15, NR_CPUS)
543 cmpd r3, r15
544 blt 2f
545 b .
546 /* Find our index in the array of processor_area struct pointers. */
547 2: LOADADDR(r14, global_cpu_table)
548 muli r15, r3, 8
549 add r14, r14, r15
550 /* Spin until the pointer for our processor goes valid. */
551 1: ld r15, 0(r14)
552 cmpldi r15, 0
553 beq 1b
554 /* Dereference the pointer and load our stack pointer. */
555 isync
556 ld r1, PAREA_stack(r15)
557 li r14, STACK_FRAME_OVERHEAD
558 sub r1, r1, r14
559 /* Load up the TOC and entry point for the C function to be called. */
560 LOADADDR(r14, secondary_cpu_init)
561 ld r2, 8(r14)
562 ld r11, 0(r14)
563 mtctr r11
564 /* Warning: why do we need this synchronizing instruction on 970FX? */
565 isync
566 /* Jump into C code now. */
567 bctrl
568 nop
569 b .