ia64/xen-unstable

view xen/arch/ia64/linux-xen/mca_asm.S @ 18367:0ac39e4bf63a

[IA64] fix mca hander.

When reloading dtr[], itr[], overlapping must be avoided.
Add overlap check.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Aug 25 19:04:37 2008 +0900 (2008-08-25)
parents e9706492e960
children
line source
1 //
2 // assembly portion of the IA64 MCA handling
3 //
4 // Mods by cfleck to integrate into kernel build
5 // 00/03/15 davidm Added various stop bits to get a clean compile
6 //
7 // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8 // kstack, switch modes, jump to C INIT handler
9 //
10 // 02/01/04 J.Hall <jenna.s.hall@intel.com>
11 // Before entering virtual mode code:
12 // 1. Check for TLB CPU error
13 // 2. Restore current thread pointer to kr6
14 // 3. Move stack ptr 16 bytes to conform to C calling convention
15 //
16 // 04/11/12 Russ Anderson <rja@sgi.com>
17 // Added per cpu MCA/INIT stack save areas.
18 //
19 #include <linux/config.h>
20 #include <linux/threads.h>
22 #include <asm/asmmacro.h>
23 #include <asm/pgtable.h>
24 #include <asm/processor.h>
25 #include <asm/mca_asm.h>
26 #include <asm/mca.h>
27 #ifdef XEN
28 #include <asm/vhpt.h>
29 #include <public/arch-ia64.h>
30 #endif
32 /*
33 * When we get a machine check, the kernel stack pointer is no longer
34 * valid, so we need to set a new stack pointer.
35 */
36 #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */
38 /*
39 * Needed for return context to SAL
40 */
41 #define IA64_MCA_SAME_CONTEXT 0
42 #define IA64_MCA_COLD_BOOT -2
44 #include "minstate.h"
46 /*
47 * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
48 * 1. GR1 = OS GP
49 * 2. GR8 = PAL_PROC physical address
50 * 3. GR9 = SAL_PROC physical address
51 * 4. GR10 = SAL GP (physical)
52 * 5. GR11 = Rendez state
53 * 6. GR12 = Return address to location within SAL_CHECK
54 */
55 #ifdef XEN
56 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
57 GET_THIS_PADDR(_tmp, ia64_sal_to_os_handoff_state_addr);; \
58 ld8 _tmp=[_tmp];; \
59 st8 [_tmp]=r1,0x08;; \
60 st8 [_tmp]=r8,0x08;; \
61 st8 [_tmp]=r9,0x08;; \
62 st8 [_tmp]=r10,0x08;; \
63 st8 [_tmp]=r11,0x08;; \
64 st8 [_tmp]=r12,0x08;; \
65 st8 [_tmp]=r17,0x08;; \
66 st8 [_tmp]=r18,0x08
67 #else
68 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
69 LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
70 st8 [_tmp]=r1,0x08;; \
71 st8 [_tmp]=r8,0x08;; \
72 st8 [_tmp]=r9,0x08;; \
73 st8 [_tmp]=r10,0x08;; \
74 st8 [_tmp]=r11,0x08;; \
75 st8 [_tmp]=r12,0x08;; \
76 st8 [_tmp]=r17,0x08;; \
77 st8 [_tmp]=r18,0x08
78 #endif /* XEN */
80 /*
81 * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
82 * (p6) is executed if we never entered virtual mode (TLB error)
83 * (p7) is executed if we entered virtual mode as expected (normal case)
84 * 1. GR8 = OS_MCA return status
85 * 2. GR9 = SAL GP (physical)
86 * 3. GR10 = 0/1 returning same/new context
87 * 4. GR22 = New min state save area pointer
88 * returns ptr to SAL rtn save loc in _tmp
89 */
90 #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
91 movl _tmp=ia64_os_to_sal_handoff_state;; \
92 DATA_VA_TO_PA(_tmp);; \
93 ld8 r8=[_tmp],0x08;; \
94 ld8 r9=[_tmp],0x08;; \
95 ld8 r10=[_tmp],0x08;; \
96 ld8 r22=[_tmp],0x08;;
97 // now _tmp is pointing to SAL rtn save location
99 /*
100 * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
101 * imots_os_status=IA64_MCA_COLD_BOOT
102 * imots_sal_gp=SAL GP
103 * imots_context=IA64_MCA_SAME_CONTEXT
104 * imots_new_min_state=Min state save area pointer
105 * imots_sal_check_ra=Return address to location within SAL_CHECK
106 *
107 */
108 #ifdef XEN
109 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
110 movl tmp=IA64_MCA_COLD_BOOT; \
111 GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);; \
112 ld8 sal_to_os_handoff=[sal_to_os_handoff];; \
113 movl os_to_sal_handoff=ia64_os_to_sal_handoff_state;; \
114 dep os_to_sal_handoff = 0, os_to_sal_handoff, 60, 4;; \
115 /*DATA_VA_TO_PA(os_to_sal_handoff);;*/ \
116 st8 [os_to_sal_handoff]=tmp,8;; \
117 ld8 tmp=[sal_to_os_handoff],48;; \
118 st8 [os_to_sal_handoff]=tmp,8;; \
119 movl tmp=IA64_MCA_SAME_CONTEXT;; \
120 st8 [os_to_sal_handoff]=tmp,8;; \
121 ld8 tmp=[sal_to_os_handoff],-8;; \
122 st8 [os_to_sal_handoff]=tmp,8;; \
123 ld8 tmp=[sal_to_os_handoff];; \
124 st8 [os_to_sal_handoff]=tmp;;
125 #else /* XEN */
126 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
127 movl tmp=IA64_MCA_COLD_BOOT; \
128 movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \
129 movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \
130 st8 [os_to_sal_handoff]=tmp,8;; \
131 ld8 tmp=[sal_to_os_handoff],48;; \
132 st8 [os_to_sal_handoff]=tmp,8;; \
133 movl tmp=IA64_MCA_SAME_CONTEXT;; \
134 st8 [os_to_sal_handoff]=tmp,8;; \
135 ld8 tmp=[sal_to_os_handoff],-8;; \
136 st8 [os_to_sal_handoff]=tmp,8;; \
137 ld8 tmp=[sal_to_os_handoff];; \
138 st8 [os_to_sal_handoff]=tmp;;
139 #endif /* XEN */
141 #define GET_IA64_MCA_DATA(reg) \
142 GET_THIS_PADDR(reg, ia64_mca_data) \
143 ;; \
144 ld8 reg=[reg]
146 .global ia64_os_mca_dispatch
147 .global ia64_os_mca_dispatch_end
148 #ifndef XEN
149 .global ia64_sal_to_os_handoff_state
150 .global ia64_os_to_sal_handoff_state
151 #endif
152 .global ia64_do_tlb_purge
154 .text
155 .align 16
157 /*
158 * Just the TLB purge part is moved to a separate function
159 * so we can re-use the code for cpu hotplug code as well
160 * Caller should now setup b1, so we can branch once the
161 * tlb flush is complete.
162 */
164 ia64_do_tlb_purge:
165 #define O(member) IA64_CPUINFO_##member##_OFFSET
167 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
168 ;;
169 addl r17=O(PTCE_STRIDE),r2
170 addl r2=O(PTCE_BASE),r2
171 ;;
172 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
173 ld4 r19=[r2],4 // r19=ptce_count[0]
174 ld4 r21=[r17],4 // r21=ptce_stride[0]
175 ;;
176 ld4 r20=[r2] // r20=ptce_count[1]
177 ld4 r22=[r17] // r22=ptce_stride[1]
178 mov r24=0
179 ;;
180 adds r20=-1,r20
181 ;;
182 #undef O
184 2:
185 cmp.ltu p6,p7=r24,r19
186 (p7) br.cond.dpnt.few 4f
187 mov ar.lc=r20
188 3:
189 ptc.e r18
190 ;;
191 add r18=r22,r18
192 br.cloop.sptk.few 3b
193 ;;
194 add r18=r21,r18
195 add r24=1,r24
196 ;;
197 br.sptk.few 2b
198 4:
199 srlz.i // srlz.i implies srlz.d
200 ;;
202 // Now purge addresses formerly mapped by TR registers
203 // 1. Purge ITR&DTR for kernel.
204 movl r16=KERNEL_START
205 mov r18=KERNEL_TR_PAGE_SHIFT<<2
206 ;;
207 ptr.i r16, r18
208 ptr.d r16, r18
209 ;;
210 srlz.i
211 ;;
212 srlz.d
213 ;;
214 // 2. Purge DTR for PERCPU data.
215 movl r16=PERCPU_ADDR
216 mov r18=PERCPU_PAGE_SHIFT<<2
217 ;;
218 ptr.d r16,r18
219 ;;
220 srlz.d
221 ;;
222 // 3. Purge ITR for PAL code.
223 GET_THIS_PADDR(r2, ia64_mca_pal_base)
224 ;;
225 ld8 r16=[r2]
226 mov r18=IA64_GRANULE_SHIFT<<2
227 ;;
228 ptr.i r16,r18
229 ;;
230 srlz.i
231 ;;
232 // 4. Purge DTR for stack.
233 #ifdef XEN
234 // Kernel registers are saved in a per_cpu cpu_kr_ia64_t
235 // to allow the kernel registers themselves to be used by domains.
236 GET_THIS_PADDR(r2, cpu_kr);;
237 add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
238 ;;
239 ld8 r16=[r2]
240 #else
241 mov r16=IA64_KR(CURRENT_STACK)
242 #endif
243 ;;
244 shl r16=r16,IA64_GRANULE_SHIFT
245 movl r19=PAGE_OFFSET
246 ;;
247 add r16=r19,r16
248 mov r18=IA64_GRANULE_SHIFT<<2
249 ;;
250 ptr.d r16,r18
251 ;;
252 srlz.i
253 ;;
254 #ifdef XEN
255 // 5. shared_info
256 GET_THIS_PADDR(r2, inserted_shared_info);;
257 ld8 r16=[r2]
258 mov r18=XSI_SHIFT<<2
259 ;;
260 ptr.d r16,r18
261 ;;
262 srlz.d
263 ;;
265 // 6. mapped_regs
266 GET_THIS_PADDR(r2, inserted_mapped_regs);;
267 ld8 r16=[r2]
268 mov r18=XMAPPEDREGS_SHIFT<<2
269 ;;
270 ptr.d r16,r18
271 ;;
272 srlz.d
273 ;;
275 // 7. VPD
276 // The VPD will not be mapped in the case where
277 // a VMX domain hasn't been started since boot
278 GET_THIS_PADDR(r2, inserted_vpd);;
279 ld8 r16=[r2]
280 mov r18=IA64_GRANULE_SHIFT<<2
281 ;;
282 cmp.eq p7,p0=r16,r0
283 ;;
284 (p7) br.cond.sptk .vpd_not_mapped
285 ;;
286 ptr.i r16,r18
287 ;;
288 ptr.d r16,r18
289 ;;
290 srlz.i
291 ;;
292 srlz.d
293 ;;
294 .vpd_not_mapped:
296 // 8. VHPT
297 // GET_VA_VCPU_VHPT_MADDR() may not give the
298 // value of the VHPT currently pinned into the TLB
299 GET_THIS_PADDR(r2, inserted_vhpt);;
300 ld8 r2=[r2]
301 ;;
302 cmp.eq p7,p0=r2,r0
303 ;;
304 (p7) br.cond.sptk .vhpt_not_mapped
305 dep r16=0,r2,0,IA64_GRANULE_SHIFT
306 mov r18=IA64_GRANULE_SHIFT<<2
307 ;;
308 ptr.d r16,r18
309 ;;
310 srlz.d
311 ;;
312 .vhpt_not_mapped:
313 #endif
314 // Now branch away to caller.
315 br.sptk.many b1
316 ;;
318 ia64_os_mca_dispatch:
320 // Serialize all MCA processing
321 mov r3=1;;
322 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
323 ia64_os_mca_spin:
324 xchg8 r4=[r2],r3;;
325 cmp.ne p6,p0=r4,r0
326 (p6) br ia64_os_mca_spin
328 // Save the SAL to OS MCA handoff state as defined
329 // by SAL SPEC 3.0
330 // NOTE : The order in which the state gets saved
331 // is dependent on the way the C-structure
332 // for ia64_mca_sal_to_os_state_t has been
333 // defined in include/asm/mca.h
334 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
335 ;;
337 // LOG PROCESSOR STATE INFO FROM HERE ON..
338 begin_os_mca_dump:
339 br ia64_os_mca_proc_state_dump;;
341 ia64_os_mca_done_dump:
343 #ifdef XEN
344 // Set current to ar.k6
345 GET_THIS_PADDR(r2,cpu_kr);;
346 add r2=IA64_KR_CURRENT_OFFSET,r2;;
347 ld8 r2=[r2];;
348 mov ar.k6=r2;;
350 GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);;
351 ld8 r2=[r2];;
352 adds r16=56,r2
353 #else
354 LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
355 #endif
356 ;;
357 ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
358 ;;
359 tbit.nz p6,p7=r18,60
360 (p7) br.spnt done_tlb_purge_and_reload
362 // The following code purges TC and TR entries. Then reload all TC entries.
363 // Purge percpu data TC entries.
364 begin_tlb_purge_and_reload:
365 movl r18=ia64_reload_tr;;
366 LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
367 mov b1=r18;;
368 br.sptk.many ia64_do_tlb_purge;;
370 ia64_reload_tr:
371 // Finally reload the TR registers.
372 // 1. Reload DTR/ITR registers for kernel.
373 mov r18=KERNEL_TR_PAGE_SHIFT<<2
374 movl r17=KERNEL_START
375 ;;
376 mov cr.itir=r18
377 mov cr.ifa=r17
378 mov r16=IA64_TR_KERNEL
379 mov r19=ip
380 movl r18=PAGE_KERNEL
381 ;;
382 dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
383 ;;
384 or r18=r17,r18
385 ;;
386 itr.i itr[r16]=r18
387 ;;
388 itr.d dtr[r16]=r18
389 ;;
390 srlz.i
391 srlz.d
392 ;;
393 // 2. Reload DTR register for PERCPU data.
394 GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
395 ;;
396 movl r16=PERCPU_ADDR // vaddr
397 movl r18=PERCPU_PAGE_SHIFT<<2
398 ;;
399 mov cr.itir=r18
400 mov cr.ifa=r16
401 ;;
402 ld8 r18=[r2] // load per-CPU PTE
403 mov r16=IA64_TR_PERCPU_DATA;
404 ;;
405 itr.d dtr[r16]=r18
406 ;;
407 srlz.d
408 ;;
409 #ifndef XEN
410 // 3. Reload ITR for PAL code.
411 GET_THIS_PADDR(r2, ia64_mca_pal_pte)
412 ;;
413 ld8 r18=[r2] // load PAL PTE
414 ;;
415 GET_THIS_PADDR(r2, ia64_mca_pal_base)
416 ;;
417 ld8 r16=[r2] // load PAL vaddr
418 mov r19=IA64_GRANULE_SHIFT<<2
419 ;;
420 mov cr.itir=r19
421 mov cr.ifa=r16
422 mov r20=IA64_TR_PALCODE
423 ;;
424 itr.i itr[r20]=r18
425 ;;
426 srlz.i
427 ;;
428 #endif
430 // 4. Reload DTR for stack.
431 #ifdef XEN
432 // Kernel registers are saved in a per_cpu cpu_kr_ia64_t
433 // to allow the kernel registers themselves to be used by domains.
434 GET_THIS_PADDR(r2, cpu_kr);;
435 add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
436 ;;
437 ld8 r16=[r2]
438 #else
439 mov r16=IA64_KR(CURRENT_STACK)
440 #endif
441 ;;
442 shl r16=r16,IA64_GRANULE_SHIFT
443 movl r19=PAGE_OFFSET
444 ;;
445 add r18=r19,r16
446 movl r20=PAGE_KERNEL
447 ;;
448 add r16=r20,r16
449 mov r19=IA64_GRANULE_SHIFT<<2
450 ;;
451 mov cr.itir=r19
452 mov cr.ifa=r18
453 mov r20=IA64_TR_CURRENT_STACK
454 ;;
455 itr.d dtr[r20]=r16
456 ;;
457 srlz.d
458 ;;
459 #ifdef XEN
460 // if !VMX_DOMAIN(current)
461 // pin down shared_info and mapped_regs
462 // else
463 // pin down VPD
464 GET_THIS_PADDR(r2,cpu_kr);;
465 add r2=IA64_KR_CURRENT_OFFSET,r2
466 ;;
467 ld8 r2=[r2]
468 ;;
469 dep r2=0,r2,60,4
470 ;;
471 add r2=IA64_VCPU_FLAGS_OFFSET,r2
472 ;;
473 ld8 r2=[r2]
474 ;;
475 cmp.eq p6,p7 = r2,r0
476 (p7) br.cond.sptk .vmx_domain
478 // 5. shared_info
479 GET_THIS_PADDR(r2, inserted_shared_info);;
480 ld8 r16=[r2]
481 mov r18=XSI_SHIFT<<2
482 movl r20=__pgprot(__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RW)
483 ;;
484 GET_THIS_PADDR(r2, domain_shared_info);;
485 ld8 r17=[r2]
486 ;;
487 dep r17=0,r17,60,4
488 ;;
489 or r17=r17,r20 // construct PA | page properties
490 mov cr.itir=r18
491 mov cr.ifa=r16
492 ;;
493 mov r16=IA64_TR_SHARED_INFO
494 ;;
495 itr.d dtr[r16]=r17 // wire in new mapping...
496 ;;
497 srlz.d
498 ;;
500 // 6. mapped_regs
501 GET_THIS_PADDR(r2, inserted_mapped_regs);;
502 ld8 r16=[r2]
503 mov r18=XMAPPEDREGS_SHIFT<<2
504 ;;
505 GET_THIS_PADDR(r2,cpu_kr);;
506 add r2=IA64_KR_CURRENT_OFFSET,r2
507 ;;
508 ld8 r2=[r2]
509 ;;
510 dep r2=0,r2,60,4
511 ;;
512 add r2=IA64_VPD_BASE_OFFSET,r2
513 ;;
514 ld8 r17=[r2]
515 ;;
516 dep r17=0,r17,60,4
517 ;;
518 or r17=r17,r20 // construct PA | page properties
519 mov cr.itir=r18
520 mov cr.ifa=r16
521 ;;
522 mov r16=IA64_TR_MAPPED_REGS
523 ;;
524 itr.d dtr[r16]=r17 // wire in new mapping...
525 ;;
526 srlz.d
527 ;;
528 br.sptk.many .reload_vpd_not_mapped;;
529 .vmx_domain:
531 // 7. VPD
532 GET_THIS_PADDR(r2, inserted_vpd);;
533 ld8 r16=[r2]
534 mov r18=IA64_GRANULE_SHIFT<<2
535 ;;
536 cmp.eq p7,p0=r16,r0
537 ;;
538 (p7) br.cond.sptk .reload_vpd_not_mapped
539 dep r17=0,r16,60,4
540 ;;
541 dep r17=0,r17,0,IA64_GRANULE_SHIFT
542 ;;
544 // avoid overlapping with stack
545 GET_THIS_PADDR(r2, cpu_kr);;
546 add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
547 ;;
548 ld8 r19=[r2]
549 ;;
550 shl r19=r19,IA64_GRANULE_SHIFT
551 ;;
552 cmp.eq p0,p7=r17,r19
554 movl r20=PAGE_KERNEL
555 ;;
556 or r17=r20,r17 // construct PA | page properties
557 ;;
558 mov cr.itir=r18
559 mov cr.ifa=r16
560 ;;
561 mov r16=IA64_TR_VPD
562 mov r18=IA64_TR_MAPPED_REGS
563 ;;
564 itr.i itr[r16]=r17
565 ;;
566 (p7) itr.d dtr[r18]=r17
567 ;;
568 srlz.i
569 ;;
570 srlz.d
571 ;;
572 .reload_vpd_not_mapped:
574 // 8. VHPT
575 GET_THIS_PADDR(r2, inserted_vhpt);;
576 ld8 r2=[r2]
577 ;;
578 cmp.eq p7,p0=r2,r0
579 ;;
580 (p7) br.cond.sptk .overlap_vhpt // vhpt isn't mapped.
582 dep r16=0,r2,0,IA64_GRANULE_SHIFT
583 ;;
584 dep r17=0,r16,60,4 // physical address of
585 // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
587 // avoid overlapping with stack TR
588 GET_THIS_PADDR(r2,cpu_kr);;
589 add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
590 ;;
591 ld8 r2=[r2]
592 ;;
593 shl r18=r2,IA64_GRANULE_SHIFT
594 ;;
595 cmp.eq p7,p0=r17,r18
596 (p7) br.cond.sptk .overlap_vhpt
598 // avoid overlapping with VPD
599 GET_THIS_PADDR(r2, inserted_vpd);;
600 ld8 r18=[r2]
601 ;;
602 dep r18=0,r18,60,4
603 ;;
604 dep r18=0,r18,0,IA64_GRANULE_SHIFT
605 ;;
606 cmp.eq p7,p0=r17,r18
607 (p7) br.cond.sptk .overlap_vhpt
609 movl r20=PAGE_KERNEL
610 ;;
611 mov r18=IA64_TR_VHPT
612 mov r19=IA64_GRANULE_SHIFT<<2
613 ;;
614 or r17=r17,r20 // construct PA | page properties
615 mov cr.itir=r19
616 mov cr.ifa=r16
617 ;;
618 itr.d dtr[r18]=r17 // wire in new mapping...
619 ;;
620 srlz.d
621 ;;
622 .overlap_vhpt:
623 #endif
624 br.sptk.many done_tlb_purge_and_reload
625 err:
626 COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
627 br.sptk.many ia64_os_mca_done_restore
629 done_tlb_purge_and_reload:
631 // Setup new stack frame for OS_MCA handling
632 GET_IA64_MCA_DATA(r2)
633 ;;
634 add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
635 add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
636 ;;
637 rse_switch_context(r6,r3,r2);; // RSC management in this new context
639 GET_IA64_MCA_DATA(r2)
640 ;;
641 add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
642 ;;
643 mov r12=r2 // establish new stack-pointer
645 // Enter virtual mode from physical mode
646 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
647 ia64_os_mca_virtual_begin:
649 // Call virtual mode handler
650 movl r2=ia64_mca_ucmc_handler;;
651 mov b6=r2;;
652 br.call.sptk.many b0=b6;;
653 .ret0:
654 // Revert back to physical mode before going back to SAL
655 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
656 ia64_os_mca_virtual_end:
658 // restore the original stack frame here
659 GET_IA64_MCA_DATA(r2)
660 ;;
661 add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
662 ;;
663 movl r4=IA64_PSR_MC
664 ;;
665 rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
667 // let us restore all the registers from our PSI structure
668 mov r8=gp
669 ;;
670 begin_os_mca_restore:
671 br ia64_os_mca_proc_state_restore;;
673 ia64_os_mca_done_restore:
674 OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
675 // branch back to SALE_CHECK
676 ld8 r3=[r2];;
677 mov b0=r3;; // SAL_CHECK return address
679 // release lock
680 movl r3=ia64_mca_serialize;;
681 DATA_VA_TO_PA(r3);;
682 st8.rel [r3]=r0
684 br b0
685 ;;
686 ia64_os_mca_dispatch_end:
687 //EndMain//////////////////////////////////////////////////////////////////////
690 //++
691 // Name:
692 // ia64_os_mca_proc_state_dump()
693 //
694 // Stub Description:
695 //
696 // This stub dumps the processor state during MCHK to a data area
697 //
698 //--
700 ia64_os_mca_proc_state_dump:
701 // Save bank 1 GRs 16-31 which will be used by c-language code when we switch
702 // to virtual addressing mode.
703 GET_IA64_MCA_DATA(r2)
704 ;;
705 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
706 ;;
707 // save ar.NaT
708 mov r5=ar.unat // ar.unat
710 // save banked GRs 16-31 along with NaT bits
711 bsw.1;;
712 st8.spill [r2]=r16,8;;
713 st8.spill [r2]=r17,8;;
714 st8.spill [r2]=r18,8;;
715 st8.spill [r2]=r19,8;;
716 st8.spill [r2]=r20,8;;
717 st8.spill [r2]=r21,8;;
718 st8.spill [r2]=r22,8;;
719 st8.spill [r2]=r23,8;;
720 st8.spill [r2]=r24,8;;
721 st8.spill [r2]=r25,8;;
722 st8.spill [r2]=r26,8;;
723 st8.spill [r2]=r27,8;;
724 st8.spill [r2]=r28,8;;
725 st8.spill [r2]=r29,8;;
726 st8.spill [r2]=r30,8;;
727 st8.spill [r2]=r31,8;;
729 mov r4=ar.unat;;
730 st8 [r2]=r4,8 // save User NaT bits for r16-r31
731 mov ar.unat=r5 // restore original unat
732 bsw.0;;
734 //save BRs
735 add r4=8,r2 // duplicate r2 in r4
736 add r6=2*8,r2 // duplicate r2 in r4
738 mov r3=b0
739 mov r5=b1
740 mov r7=b2;;
741 st8 [r2]=r3,3*8
742 st8 [r4]=r5,3*8
743 st8 [r6]=r7,3*8;;
745 mov r3=b3
746 mov r5=b4
747 mov r7=b5;;
748 st8 [r2]=r3,3*8
749 st8 [r4]=r5,3*8
750 st8 [r6]=r7,3*8;;
752 mov r3=b6
753 mov r5=b7;;
754 st8 [r2]=r3,2*8
755 st8 [r4]=r5,2*8;;
757 cSaveCRs:
758 // save CRs
759 add r4=8,r2 // duplicate r2 in r4
760 add r6=2*8,r2 // duplicate r2 in r4
762 mov r3=cr.dcr
763 mov r5=cr.itm
764 mov r7=cr.iva;;
766 st8 [r2]=r3,8*8
767 st8 [r4]=r5,3*8
768 st8 [r6]=r7,3*8;; // 48 byte rements
770 mov r3=cr.pta;;
771 st8 [r2]=r3,8*8;; // 64 byte rements
773 // if PSR.ic=0, reading interruption registers causes an illegal operation fault
774 mov r3=psr;;
775 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
776 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
777 begin_skip_intr_regs:
778 (p6) br SkipIntrRegs;;
780 add r4=8,r2 // duplicate r2 in r4
781 add r6=2*8,r2 // duplicate r2 in r6
783 mov r3=cr.ipsr
784 mov r5=cr.isr
785 mov r7=r0;;
786 st8 [r2]=r3,3*8
787 st8 [r4]=r5,3*8
788 st8 [r6]=r7,3*8;;
790 mov r3=cr.iip
791 mov r5=cr.ifa
792 mov r7=cr.itir;;
793 st8 [r2]=r3,3*8
794 st8 [r4]=r5,3*8
795 st8 [r6]=r7,3*8;;
797 mov r3=cr.iipa
798 mov r5=cr.ifs
799 mov r7=cr.iim;;
800 st8 [r2]=r3,3*8
801 st8 [r4]=r5,3*8
802 st8 [r6]=r7,3*8;;
804 mov r3=cr25;; // cr.iha
805 st8 [r2]=r3,160;; // 160 byte rement
807 SkipIntrRegs:
808 st8 [r2]=r0,152;; // another 152 byte .
810 add r4=8,r2 // duplicate r2 in r4
811 add r6=2*8,r2 // duplicate r2 in r6
813 mov r3=cr.lid
814 // mov r5=cr.ivr // cr.ivr, don't read it
815 mov r7=cr.tpr;;
816 st8 [r2]=r3,3*8
817 st8 [r4]=r5,3*8
818 st8 [r6]=r7,3*8;;
820 mov r3=r0 // cr.eoi => cr67
821 mov r5=r0 // cr.irr0 => cr68
822 mov r7=r0;; // cr.irr1 => cr69
823 st8 [r2]=r3,3*8
824 st8 [r4]=r5,3*8
825 st8 [r6]=r7,3*8;;
827 mov r3=r0 // cr.irr2 => cr70
828 mov r5=r0 // cr.irr3 => cr71
829 mov r7=cr.itv;;
830 st8 [r2]=r3,3*8
831 st8 [r4]=r5,3*8
832 st8 [r6]=r7,3*8;;
834 mov r3=cr.pmv
835 mov r5=cr.cmcv;;
836 st8 [r2]=r3,7*8
837 st8 [r4]=r5,7*8;;
839 mov r3=r0 // cr.lrr0 => cr80
840 mov r5=r0;; // cr.lrr1 => cr81
841 st8 [r2]=r3,23*8
842 st8 [r4]=r5,23*8;;
844 adds r2=25*8,r2;;
846 cSaveARs:
847 // save ARs
848 add r4=8,r2 // duplicate r2 in r4
849 add r6=2*8,r2 // duplicate r2 in r6
851 mov r3=ar.k0
852 mov r5=ar.k1
853 mov r7=ar.k2;;
854 st8 [r2]=r3,3*8
855 st8 [r4]=r5,3*8
856 st8 [r6]=r7,3*8;;
858 mov r3=ar.k3
859 mov r5=ar.k4
860 mov r7=ar.k5;;
861 st8 [r2]=r3,3*8
862 st8 [r4]=r5,3*8
863 st8 [r6]=r7,3*8;;
865 mov r3=ar.k6
866 mov r5=ar.k7
867 mov r7=r0;; // ar.kr8
868 st8 [r2]=r3,10*8
869 st8 [r4]=r5,10*8
870 st8 [r6]=r7,10*8;; // rement by 72 bytes
872 mov r3=ar.rsc
873 mov ar.rsc=r0 // put RSE in enforced lazy mode
874 mov r5=ar.bsp
875 ;;
876 mov r7=ar.bspstore;;
877 st8 [r2]=r3,3*8
878 st8 [r4]=r5,3*8
879 st8 [r6]=r7,3*8;;
881 mov r3=ar.rnat;;
882 st8 [r2]=r3,8*13 // increment by 13x8 bytes
884 mov r3=ar.ccv;;
885 st8 [r2]=r3,8*4
887 mov r3=ar.unat;;
888 st8 [r2]=r3,8*4
890 mov r3=ar.fpsr;;
891 st8 [r2]=r3,8*4
893 mov r3=ar.itc;;
894 st8 [r2]=r3,160 // 160
896 mov r3=ar.pfs;;
897 st8 [r2]=r3,8
899 mov r3=ar.lc;;
900 st8 [r2]=r3,8
902 mov r3=ar.ec;;
903 st8 [r2]=r3
904 add r2=8*62,r2 //padding
906 // save RRs
907 mov ar.lc=0x08-1
908 movl r4=0x00;;
910 cStRR:
911 dep.z r5=r4,61,3;;
912 mov r3=rr[r5];;
913 st8 [r2]=r3,8
914 add r4=1,r4
915 br.cloop.sptk.few cStRR
916 ;;
917 end_os_mca_dump:
918 br ia64_os_mca_done_dump;;
920 //EndStub//////////////////////////////////////////////////////////////////////
923 //++
924 // Name:
925 // ia64_os_mca_proc_state_restore()
926 //
927 // Stub Description:
928 //
929 // This is a stub to restore the saved processor state during MCHK
930 //
931 //--
933 ia64_os_mca_proc_state_restore:
935 // Restore bank1 GR16-31
936 GET_IA64_MCA_DATA(r2)
937 ;;
938 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
940 restore_GRs: // restore bank-1 GRs 16-31
941 bsw.1;;
942 add r3=16*8,r2;; // to get to NaT of GR 16-31
943 ld8 r3=[r3];;
944 mov ar.unat=r3;; // first restore NaT
946 ld8.fill r16=[r2],8;;
947 ld8.fill r17=[r2],8;;
948 ld8.fill r18=[r2],8;;
949 ld8.fill r19=[r2],8;;
950 ld8.fill r20=[r2],8;;
951 ld8.fill r21=[r2],8;;
952 ld8.fill r22=[r2],8;;
953 ld8.fill r23=[r2],8;;
954 ld8.fill r24=[r2],8;;
955 ld8.fill r25=[r2],8;;
956 ld8.fill r26=[r2],8;;
957 ld8.fill r27=[r2],8;;
958 ld8.fill r28=[r2],8;;
959 ld8.fill r29=[r2],8;;
960 ld8.fill r30=[r2],8;;
961 ld8.fill r31=[r2],8;;
963 ld8 r3=[r2],8;; // increment to skip NaT
964 bsw.0;;
966 restore_BRs:
967 add r4=8,r2 // duplicate r2 in r4
968 add r6=2*8,r2;; // duplicate r2 in r4
970 ld8 r3=[r2],3*8
971 ld8 r5=[r4],3*8
972 ld8 r7=[r6],3*8;;
973 mov b0=r3
974 mov b1=r5
975 mov b2=r7;;
977 ld8 r3=[r2],3*8
978 ld8 r5=[r4],3*8
979 ld8 r7=[r6],3*8;;
980 mov b3=r3
981 mov b4=r5
982 mov b5=r7;;
984 ld8 r3=[r2],2*8
985 ld8 r5=[r4],2*8;;
986 mov b6=r3
987 mov b7=r5;;
989 restore_CRs:
990 add r4=8,r2 // duplicate r2 in r4
991 add r6=2*8,r2;; // duplicate r2 in r4
993 ld8 r3=[r2],8*8
994 ld8 r5=[r4],3*8
995 ld8 r7=[r6],3*8;; // 48 byte increments
996 mov cr.dcr=r3
997 mov cr.itm=r5
998 mov cr.iva=r7;;
1000 ld8 r3=[r2],8*8;; // 64 byte increments
1001 // mov cr.pta=r3
1004 // if PSR.ic=1, reading interruption registers causes an illegal operation fault
1005 mov r3=psr;;
1006 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
1007 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
1009 begin_rskip_intr_regs:
1010 (p6) br rSkipIntrRegs;;
1012 add r4=8,r2 // duplicate r2 in r4
1013 add r6=2*8,r2;; // duplicate r2 in r4
1015 ld8 r3=[r2],3*8
1016 ld8 r5=[r4],3*8
1017 ld8 r7=[r6],3*8;;
1018 mov cr.ipsr=r3
1019 // mov cr.isr=r5 // cr.isr is read only
1021 ld8 r3=[r2],3*8
1022 ld8 r5=[r4],3*8
1023 ld8 r7=[r6],3*8;;
1024 mov cr.iip=r3
1025 mov cr.ifa=r5
1026 mov cr.itir=r7;;
1028 ld8 r3=[r2],3*8
1029 ld8 r5=[r4],3*8
1030 ld8 r7=[r6],3*8;;
1031 mov cr.iipa=r3
1032 mov cr.ifs=r5
1033 mov cr.iim=r7
1035 ld8 r3=[r2],160;; // 160 byte increment
1036 mov cr.iha=r3
1038 rSkipIntrRegs:
1039 ld8 r3=[r2],152;; // another 152 byte inc.
1041 add r4=8,r2 // duplicate r2 in r4
1042 add r6=2*8,r2;; // duplicate r2 in r6
1044 ld8 r3=[r2],8*3
1045 ld8 r5=[r4],8*3
1046 ld8 r7=[r6],8*3;;
1047 mov cr.lid=r3
1048 // mov cr.ivr=r5 // cr.ivr is read only
1049 mov cr.tpr=r7;;
1051 ld8 r3=[r2],8*3
1052 ld8 r5=[r4],8*3
1053 ld8 r7=[r6],8*3;;
1054 // mov cr.eoi=r3
1055 // mov cr.irr0=r5 // cr.irr0 is read only
1056 // mov cr.irr1=r7;; // cr.irr1 is read only
1058 ld8 r3=[r2],8*3
1059 ld8 r5=[r4],8*3
1060 ld8 r7=[r6],8*3;;
1061 // mov cr.irr2=r3 // cr.irr2 is read only
1062 // mov cr.irr3=r5 // cr.irr3 is read only
1063 mov cr.itv=r7;;
1065 ld8 r3=[r2],8*7
1066 ld8 r5=[r4],8*7;;
1067 mov cr.pmv=r3
1068 mov cr.cmcv=r5;;
1070 ld8 r3=[r2],8*23
1071 ld8 r5=[r4],8*23;;
1072 adds r2=8*23,r2
1073 adds r4=8*23,r4;;
1074 // mov cr.lrr0=r3
1075 // mov cr.lrr1=r5
1077 adds r2=8*2,r2;;
1079 restore_ARs:
1080 add r4=8,r2 // duplicate r2 in r4
1081 add r6=2*8,r2;; // duplicate r2 in r4
1083 ld8 r3=[r2],3*8
1084 ld8 r5=[r4],3*8
1085 ld8 r7=[r6],3*8;;
1086 mov ar.k0=r3
1087 mov ar.k1=r5
1088 mov ar.k2=r7;;
1090 ld8 r3=[r2],3*8
1091 ld8 r5=[r4],3*8
1092 ld8 r7=[r6],3*8;;
1093 mov ar.k3=r3
1094 mov ar.k4=r5
1095 mov ar.k5=r7;;
1097 ld8 r3=[r2],10*8
1098 ld8 r5=[r4],10*8
1099 ld8 r7=[r6],10*8;;
1100 mov ar.k6=r3
1101 mov ar.k7=r5
1102 ;;
1104 ld8 r3=[r2],3*8
1105 ld8 r5=[r4],3*8
1106 ld8 r7=[r6],3*8;;
1107 // mov ar.rsc=r3
1108 // mov ar.bsp=r5 // ar.bsp is read only
1109 mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode
1110 ;;
1111 mov ar.bspstore=r7;;
1113 ld8 r9=[r2],8*13;;
1114 mov ar.rnat=r9
1116 mov ar.rsc=r3
1117 ld8 r3=[r2],8*4;;
1118 mov ar.ccv=r3
1120 ld8 r3=[r2],8*4;;
1121 mov ar.unat=r3
1123 ld8 r3=[r2],8*4;;
1124 mov ar.fpsr=r3
1126 ld8 r3=[r2],160;; // 160
1127 // mov ar.itc=r3
1129 ld8 r3=[r2],8;;
1130 mov ar.pfs=r3
1132 ld8 r3=[r2],8;;
1133 mov ar.lc=r3
1135 ld8 r3=[r2];;
1136 mov ar.ec=r3
1137 add r2=8*62,r2;; // padding
1139 restore_RRs:
1140 mov r5=ar.lc
1141 mov ar.lc=0x08-1
1142 movl r4=0x00;;
1143 cStRRr:
1144 dep.z r7=r4,61,3
1145 ld8 r3=[r2],8;;
1146 mov rr[r7]=r3 // what are its access previledges?
1147 add r4=1,r4
1148 br.cloop.sptk.few cStRRr
1149 ;;
1150 mov ar.lc=r5
1151 ;;
1152 end_os_mca_restore:
1153 br ia64_os_mca_done_restore;;
1155 //EndStub//////////////////////////////////////////////////////////////////////
1158 // ok, the issue here is that we need to save state information so
1159 // it can be useable by the kernel debugger and show regs routines.
1160 // In order to do this, our best bet is save the current state (plus
1161 // the state information obtain from the MIN_STATE_AREA) into a pt_regs
1162 // format. This way we can pass it on in a useable format.
1163 //
1165 //
1166 // SAL to OS entry point for INIT on the monarch processor
1167 // This has been defined for registration purposes with SAL
1168 // as a part of ia64_mca_init.
1169 //
1170 // When we get here, the following registers have been
1171 // set by the SAL for our use
1172 //
1173 // 1. GR1 = OS INIT GP
1174 // 2. GR8 = PAL_PROC physical address
1175 // 3. GR9 = SAL_PROC physical address
1176 // 4. GR10 = SAL GP (physical)
1177 // 5. GR11 = Init Reason
1178 // 0 = Received INIT for event other than crash dump switch
1179 // 1 = Received wakeup at the end of an OS_MCA corrected machine check
1180 // 2 = Received INIT dude to CrashDump switch assertion
1181 //
1182 // 6. GR12 = Return address to location within SAL_INIT procedure
1185 GLOBAL_ENTRY(ia64_monarch_init_handler)
1186 .prologue
1187 #ifdef XEN /* Need in ia64_monarch_init_handler? */
1188 // Set current to ar.k6
1189 GET_THIS_PADDR(r2,cpu_kr);;
1190 add r2=IA64_KR_CURRENT_OFFSET,r2;;
1191 ld8 r2=[r2];;
1192 mov ar.k6=r2;;
1193 #endif
1194 // stash the information the SAL passed to os
1195 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
1196 ;;
1197 SAVE_MIN_WITH_COVER
1198 ;;
1199 mov r8=cr.ifa
1200 mov r9=cr.isr
1201 adds r3=8,r2 // set up second base pointer
1202 ;;
1203 SAVE_REST
1205 // ok, enough should be saved at this point to be dangerous, and supply
1206 // information for a dump
1207 // We need to switch to Virtual mode before hitting the C functions.
1209 movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
1210 mov r3=psr // get the current psr, minimum enabled at this point
1211 ;;
1212 or r2=r2,r3
1213 ;;
1214 movl r3=IVirtual_Switch
1215 ;;
1216 mov cr.iip=r3 // short return to set the appropriate bits
1217 mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
1218 ;;
1219 rfi
1220 ;;
1221 IVirtual_Switch:
1222 //
1223 // We should now be running virtual
1224 //
1225 // Let's call the C handler to get the rest of the state info
1226 //
1227 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1228 ;;
1229 adds out0=16,sp // out0 = pointer to pt_regs
1230 ;;
1231 DO_SAVE_SWITCH_STACK
1232 .body
1233 adds out1=16,sp // out0 = pointer to switch_stack
1235 br.call.sptk.many rp=ia64_init_handler
1236 .ret1:
1238 return_from_init:
1239 br.sptk return_from_init
1240 END(ia64_monarch_init_handler)
1242 //
1243 // SAL to OS entry point for INIT on the slave processor
1244 // This has been defined for registration purposes with SAL
1245 // as a part of ia64_mca_init.
1246 //
1248 GLOBAL_ENTRY(ia64_slave_init_handler)
1249 1: br.sptk 1b
1250 END(ia64_slave_init_handler)