ia64/xen-unstable

view xen/arch/ia64/linux-xen/mca_asm.S @ 18085:4f0428e4dd15

[IA64] kexec: Unpin shared_info, mapped_regs and VPD TR in ia64_do_tlb_purge

Unpinning shared_info, mapped_regs and VPD seems to be missing
from ia64_do_tlb_purge and seems to be needed for kexec.

Like VHPT, the pinned value is recored in a percpu variable
so that the correct value can be unpinned.

Cc: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Simon Horman <horms@verge.net.au>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Jul 22 12:15:02 2008 +0900 (2008-07-22)
parents 0b72d16e794b
children 7da7b53b2139
line source
1 //
2 // assembly portion of the IA64 MCA handling
3 //
4 // Mods by cfleck to integrate into kernel build
5 // 00/03/15 davidm Added various stop bits to get a clean compile
6 //
7 // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8 // kstack, switch modes, jump to C INIT handler
9 //
10 // 02/01/04 J.Hall <jenna.s.hall@intel.com>
11 // Before entering virtual mode code:
12 // 1. Check for TLB CPU error
13 // 2. Restore current thread pointer to kr6
14 // 3. Move stack ptr 16 bytes to conform to C calling convention
15 //
16 // 04/11/12 Russ Anderson <rja@sgi.com>
17 // Added per cpu MCA/INIT stack save areas.
18 //
19 #include <linux/config.h>
20 #include <linux/threads.h>
22 #include <asm/asmmacro.h>
23 #include <asm/pgtable.h>
24 #include <asm/processor.h>
25 #include <asm/mca_asm.h>
26 #include <asm/mca.h>
27 #ifdef XEN
28 #include <asm/vhpt.h>
29 #include <public/arch-ia64.h>
30 #endif
32 /*
33 * When we get a machine check, the kernel stack pointer is no longer
34 * valid, so we need to set a new stack pointer.
35 */
36 #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */
38 /*
39 * Needed for return context to SAL
40 */
41 #define IA64_MCA_SAME_CONTEXT 0
42 #define IA64_MCA_COLD_BOOT -2
44 #include "minstate.h"
46 /*
47 * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
48 * 1. GR1 = OS GP
49 * 2. GR8 = PAL_PROC physical address
50 * 3. GR9 = SAL_PROC physical address
51 * 4. GR10 = SAL GP (physical)
52 * 5. GR11 = Rendez state
53 * 6. GR12 = Return address to location within SAL_CHECK
54 */
55 #ifdef XEN
56 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
57 GET_THIS_PADDR(_tmp, ia64_sal_to_os_handoff_state_addr);; \
58 ld8 _tmp=[_tmp];; \
59 st8 [_tmp]=r1,0x08;; \
60 st8 [_tmp]=r8,0x08;; \
61 st8 [_tmp]=r9,0x08;; \
62 st8 [_tmp]=r10,0x08;; \
63 st8 [_tmp]=r11,0x08;; \
64 st8 [_tmp]=r12,0x08;; \
65 st8 [_tmp]=r17,0x08;; \
66 st8 [_tmp]=r18,0x08
67 #else
68 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
69 LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
70 st8 [_tmp]=r1,0x08;; \
71 st8 [_tmp]=r8,0x08;; \
72 st8 [_tmp]=r9,0x08;; \
73 st8 [_tmp]=r10,0x08;; \
74 st8 [_tmp]=r11,0x08;; \
75 st8 [_tmp]=r12,0x08;; \
76 st8 [_tmp]=r17,0x08;; \
77 st8 [_tmp]=r18,0x08
78 #endif /* XEN */
80 /*
81 * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
82 * (p6) is executed if we never entered virtual mode (TLB error)
83 * (p7) is executed if we entered virtual mode as expected (normal case)
84 * 1. GR8 = OS_MCA return status
85 * 2. GR9 = SAL GP (physical)
86 * 3. GR10 = 0/1 returning same/new context
87 * 4. GR22 = New min state save area pointer
88 * returns ptr to SAL rtn save loc in _tmp
89 */
90 #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
91 movl _tmp=ia64_os_to_sal_handoff_state;; \
92 DATA_VA_TO_PA(_tmp);; \
93 ld8 r8=[_tmp],0x08;; \
94 ld8 r9=[_tmp],0x08;; \
95 ld8 r10=[_tmp],0x08;; \
96 ld8 r22=[_tmp],0x08;;
97 // now _tmp is pointing to SAL rtn save location
99 /*
100 * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
101 * imots_os_status=IA64_MCA_COLD_BOOT
102 * imots_sal_gp=SAL GP
103 * imots_context=IA64_MCA_SAME_CONTEXT
104 * imots_new_min_state=Min state save area pointer
105 * imots_sal_check_ra=Return address to location within SAL_CHECK
106 *
107 */
108 #ifdef XEN
109 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
110 movl tmp=IA64_MCA_COLD_BOOT; \
111 GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);; \
112 ld8 sal_to_os_handoff=[sal_to_os_handoff];; \
113 movl os_to_sal_handoff=ia64_os_to_sal_handoff_state;; \
114 dep os_to_sal_handoff = 0, os_to_sal_handoff, 60, 4;; \
115 /*DATA_VA_TO_PA(os_to_sal_handoff);;*/ \
116 st8 [os_to_sal_handoff]=tmp,8;; \
117 ld8 tmp=[sal_to_os_handoff],48;; \
118 st8 [os_to_sal_handoff]=tmp,8;; \
119 movl tmp=IA64_MCA_SAME_CONTEXT;; \
120 st8 [os_to_sal_handoff]=tmp,8;; \
121 ld8 tmp=[sal_to_os_handoff],-8;; \
122 st8 [os_to_sal_handoff]=tmp,8;; \
123 ld8 tmp=[sal_to_os_handoff];; \
124 st8 [os_to_sal_handoff]=tmp;;
125 #else /* XEN */
126 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
127 movl tmp=IA64_MCA_COLD_BOOT; \
128 movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \
129 movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \
130 st8 [os_to_sal_handoff]=tmp,8;; \
131 ld8 tmp=[sal_to_os_handoff],48;; \
132 st8 [os_to_sal_handoff]=tmp,8;; \
133 movl tmp=IA64_MCA_SAME_CONTEXT;; \
134 st8 [os_to_sal_handoff]=tmp,8;; \
135 ld8 tmp=[sal_to_os_handoff],-8;; \
136 st8 [os_to_sal_handoff]=tmp,8;; \
137 ld8 tmp=[sal_to_os_handoff];; \
138 st8 [os_to_sal_handoff]=tmp;;
139 #endif /* XEN */
141 #define GET_IA64_MCA_DATA(reg) \
142 GET_THIS_PADDR(reg, ia64_mca_data) \
143 ;; \
144 ld8 reg=[reg]
146 .global ia64_os_mca_dispatch
147 .global ia64_os_mca_dispatch_end
148 #ifndef XEN
149 .global ia64_sal_to_os_handoff_state
150 .global ia64_os_to_sal_handoff_state
151 #endif
152 .global ia64_do_tlb_purge
154 .text
155 .align 16
157 #ifdef XEN
158 /*
159 * void set_per_cpu_data(void)
160 * {
161 * int i;
162 * for (i = 0; i < 64; i++) {
163 * if (ia64_mca_tlb_list[i].cr_lid == ia64_getreg(_IA64_REG_CR_LID)) {
164 * ia64_set_kr(IA64_KR_PER_CPU_DATA, ia64_mca_tlb_list[i].percpu_paddr);
165 * return;
166 * }
167 * }
168 * while(1); // Endless loop on error
169 * }
170 */
171 #define SET_PER_CPU_DATA() \
172 LOAD_PHYSICAL(p0,r2,ia64_mca_tlb_list);; \
173 mov r7 = r0; \
174 mov r6 = r0;; \
175 adds r3 = IA64_MCA_PERCPU_OFFSET, r2; \
176 1: add r4 = r6, r2; \
177 mov r5=cr.lid;; \
178 adds r7 = 1, r7; \
179 ld8 r4 = [r4];; \
180 cmp.ne p6, p7 = r5, r4; \
181 cmp4.lt p8, p9 = NR_CPUS-1, r7; \
182 (p7) br.cond.dpnt 3f; \
183 adds r6 = 16, r6; \
184 (p9) br.cond.sptk 1b; \
185 2: br 2b;; /* Endless loop on error */ \
186 3: add r4 = r6, r3;; \
187 ld8 r4 = [r4];; \
188 mov ar.k3=r4
190 /*
191 * GET_VA_VCPU_VHPT_MADDR() emulates 'reg = __va_ul(vcpu_vhpt_maddr(v))'.
192 */
193 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
194 #define HAS_PERVCPU_VHPT_MASK 0x2
195 #define GET_VA_VCPU_VHPT_MADDR(reg,tmp) \
196 GET_THIS_PADDR(reg,cpu_kr);; \
197 add reg=IA64_KR_CURRENT_OFFSET,reg;; \
198 ld8 reg=[reg];; \
199 dep tmp=0,reg,60,4;; /* V to P */ \
200 add tmp=IA64_VCPU_VHPT_PAGE_OFFSET,tmp;; \
201 ld8 tmp=[tmp];; \
202 cmp.eq p6,p0=tmp,r0; /* v->arch.vhpt_page == NULL */ \
203 (p6) br.cond.sptk 1f; \
204 add reg=IA64_VCPU_VHPT_MADDR_OFFSET,reg;; \
205 dep reg=0,reg,60,4;; /* V to P */ \
206 ld8 reg=[reg];; \
207 dep reg=-1,reg,60,4; /* P to V */ \
208 br.sptk 2f; \
209 1: \
210 GET_THIS_PADDR(reg, vhpt_paddr);; \
211 ld8 reg=[reg];; \
212 dep reg=-1,reg,60,4; /* P to V */ \
213 2:
214 #else /* CONFIG_XEN_IA64_PERVCPU_VHPT */
215 #define GET_VA_VCPU_VHPT_MADDR(reg,tmp) \
216 GET_THIS_PADDR(reg, vhpt_paddr);; \
217 ld8 reg=[reg];; \
218 dep reg=-1,reg,60,4 /* P to V */
219 #endif /* CONFIG_XEN_IA64_PERVCPU_VHPT */
220 #endif /* XEN */
222 /*
223 * Just the TLB purge part is moved to a separate function
224 * so we can re-use the code for cpu hotplug code as well
225 * Caller should now setup b1, so we can branch once the
226 * tlb flush is complete.
227 */
229 ia64_do_tlb_purge:
230 #ifdef XEN
231 // This needs to be called in order for GET_THIS_PADDR to work
232 SET_PER_CPU_DATA();;
233 #endif
234 #define O(member) IA64_CPUINFO_##member##_OFFSET
236 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
237 ;;
238 addl r17=O(PTCE_STRIDE),r2
239 addl r2=O(PTCE_BASE),r2
240 ;;
241 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
242 ld4 r19=[r2],4 // r19=ptce_count[0]
243 ld4 r21=[r17],4 // r21=ptce_stride[0]
244 ;;
245 ld4 r20=[r2] // r20=ptce_count[1]
246 ld4 r22=[r17] // r22=ptce_stride[1]
247 mov r24=0
248 ;;
249 adds r20=-1,r20
250 ;;
251 #undef O
253 2:
254 cmp.ltu p6,p7=r24,r19
255 (p7) br.cond.dpnt.few 4f
256 mov ar.lc=r20
257 3:
258 ptc.e r18
259 ;;
260 add r18=r22,r18
261 br.cloop.sptk.few 3b
262 ;;
263 add r18=r21,r18
264 add r24=1,r24
265 ;;
266 br.sptk.few 2b
267 4:
268 srlz.i // srlz.i implies srlz.d
269 ;;
271 // Now purge addresses formerly mapped by TR registers
272 // 1. Purge ITR&DTR for kernel.
273 movl r16=KERNEL_START
274 mov r18=KERNEL_TR_PAGE_SHIFT<<2
275 ;;
276 ptr.i r16, r18
277 ptr.d r16, r18
278 ;;
279 srlz.i
280 ;;
281 srlz.d
282 ;;
283 // 2. Purge DTR for PERCPU data.
284 movl r16=PERCPU_ADDR
285 mov r18=PERCPU_PAGE_SHIFT<<2
286 ;;
287 ptr.d r16,r18
288 ;;
289 srlz.d
290 ;;
291 // 3. Purge ITR for PAL code.
292 GET_THIS_PADDR(r2, ia64_mca_pal_base)
293 ;;
294 ld8 r16=[r2]
295 mov r18=IA64_GRANULE_SHIFT<<2
296 ;;
297 ptr.i r16,r18
298 ;;
299 srlz.i
300 ;;
301 // 4. Purge DTR for stack.
302 #ifdef XEN
303 // Kernel registers are saved in a per_cpu cpu_kr_ia64_t
304 // to allow the kernel registers themselves to be used by domains.
305 GET_THIS_PADDR(r2, cpu_kr);;
306 add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
307 ;;
308 ld8 r16=[r2]
309 #else
310 mov r16=IA64_KR(CURRENT_STACK)
311 #endif
312 ;;
313 shl r16=r16,IA64_GRANULE_SHIFT
314 movl r19=PAGE_OFFSET
315 ;;
316 add r16=r19,r16
317 mov r18=IA64_GRANULE_SHIFT<<2
318 ;;
319 ptr.d r16,r18
320 ;;
321 srlz.i
322 ;;
323 #ifdef XEN
324 // 5. shared_info
325 GET_THIS_PADDR(r2, inserted_shared_info);;
326 ld8 r16=[r2]
327 mov r18=XSI_SHIFT<<2
328 ;;
329 ptr.d r16,r18
330 ;;
331 srlz.d
332 ;;
334 // 6. mapped_regs
335 GET_THIS_PADDR(r2, inserted_mapped_regs);;
336 ld8 r16=[r2]
337 mov r18=XMAPPEDREGS_SHIFT<<2
338 ;;
339 ptr.d r16,r18
340 ;;
341 srlz.d
342 ;;
344 // 7. VPD
345 // The VPD will not be mapped in the case where
346 // a VMX domain hasn't been started since boot
347 GET_THIS_PADDR(r2, inserted_vpd);;
348 ld8 r16=[r2]
349 mov r18=XMAPPEDREGS_SHIFT<<2
350 ;;
351 cmp.eq p7,p0=r2,r0
352 ;;
353 (p7) br.cond.sptk .vpd_not_mapped
354 ;;
355 ptr.i r16,r18
356 ;;
357 srlz.i
358 ;;
359 .vpd_not_mapped:
361 // 8. VHPT
362 // GET_VA_VCPU_VHPT_MADDR() may not give the
363 // value of the VHPT currently pinned into the TLB
364 GET_THIS_PADDR(r2, inserted_vhpt);;
365 ;;
366 cmp.eq p7,p0=r2,r0
367 ;;
368 (p7) br.cond.sptk .vhpt_not_mapped
369 dep r16=0,r2,0,IA64_GRANULE_SHIFT
370 mov r18=IA64_GRANULE_SHIFT<<2
371 ;;
372 ptr.d r16,r18
373 ;;
374 srlz.d
375 ;;
376 .vhpt_not_mapped:
377 #endif
378 // Now branch away to caller.
379 br.sptk.many b1
380 ;;
382 ia64_os_mca_dispatch:
384 // Serialize all MCA processing
385 mov r3=1;;
386 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
387 ia64_os_mca_spin:
388 xchg8 r4=[r2],r3;;
389 cmp.ne p6,p0=r4,r0
390 (p6) br ia64_os_mca_spin
392 #ifdef XEN
393 SET_PER_CPU_DATA();;
394 #endif
395 // Save the SAL to OS MCA handoff state as defined
396 // by SAL SPEC 3.0
397 // NOTE : The order in which the state gets saved
398 // is dependent on the way the C-structure
399 // for ia64_mca_sal_to_os_state_t has been
400 // defined in include/asm/mca.h
401 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
402 ;;
404 // LOG PROCESSOR STATE INFO FROM HERE ON..
405 begin_os_mca_dump:
406 br ia64_os_mca_proc_state_dump;;
408 ia64_os_mca_done_dump:
410 #ifdef XEN
411 // Set current to ar.k6
412 GET_THIS_PADDR(r2,cpu_kr);;
413 add r2=IA64_KR_CURRENT_OFFSET,r2;;
414 ld8 r2=[r2];;
415 mov ar.k6=r2;;
417 GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);;
418 ld8 r2=[r2];;
419 adds r16=56,r2
420 #else
421 LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
422 #endif
423 ;;
424 ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
425 ;;
426 tbit.nz p6,p7=r18,60
427 (p7) br.spnt done_tlb_purge_and_reload
429 // The following code purges TC and TR entries. Then reload all TC entries.
430 // Purge percpu data TC entries.
431 begin_tlb_purge_and_reload:
432 movl r18=ia64_reload_tr;;
433 LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
434 mov b1=r18;;
435 br.sptk.many ia64_do_tlb_purge;;
437 ia64_reload_tr:
438 // Finally reload the TR registers.
439 // 1. Reload DTR/ITR registers for kernel.
440 mov r18=KERNEL_TR_PAGE_SHIFT<<2
441 movl r17=KERNEL_START
442 ;;
443 mov cr.itir=r18
444 mov cr.ifa=r17
445 mov r16=IA64_TR_KERNEL
446 mov r19=ip
447 movl r18=PAGE_KERNEL
448 ;;
449 dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
450 ;;
451 or r18=r17,r18
452 ;;
453 itr.i itr[r16]=r18
454 ;;
455 itr.d dtr[r16]=r18
456 ;;
457 srlz.i
458 srlz.d
459 ;;
460 // 2. Reload DTR register for PERCPU data.
461 GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
462 ;;
463 movl r16=PERCPU_ADDR // vaddr
464 movl r18=PERCPU_PAGE_SHIFT<<2
465 ;;
466 mov cr.itir=r18
467 mov cr.ifa=r16
468 ;;
469 ld8 r18=[r2] // load per-CPU PTE
470 mov r16=IA64_TR_PERCPU_DATA;
471 ;;
472 itr.d dtr[r16]=r18
473 ;;
474 srlz.d
475 ;;
476 // 3. Reload ITR for PAL code.
477 GET_THIS_PADDR(r2, ia64_mca_pal_pte)
478 ;;
479 ld8 r18=[r2] // load PAL PTE
480 ;;
481 GET_THIS_PADDR(r2, ia64_mca_pal_base)
482 ;;
483 ld8 r16=[r2] // load PAL vaddr
484 mov r19=IA64_GRANULE_SHIFT<<2
485 ;;
486 mov cr.itir=r19
487 mov cr.ifa=r16
488 mov r20=IA64_TR_PALCODE
489 ;;
490 itr.i itr[r20]=r18
491 ;;
492 srlz.i
493 ;;
494 // 4. Reload DTR for stack.
495 #ifdef XEN
496 // Kernel registers are saved in a per_cpu cpu_kr_ia64_t
497 // to allow the kernel registers themselves to be used by domains.
498 GET_THIS_PADDR(r2, cpu_kr);;
499 add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
500 ;;
501 ld8 r16=[r2]
502 #else
503 mov r16=IA64_KR(CURRENT_STACK)
504 #endif
505 ;;
506 shl r16=r16,IA64_GRANULE_SHIFT
507 movl r19=PAGE_OFFSET
508 ;;
509 add r18=r19,r16
510 movl r20=PAGE_KERNEL
511 ;;
512 add r16=r20,r16
513 mov r19=IA64_GRANULE_SHIFT<<2
514 ;;
515 mov cr.itir=r19
516 mov cr.ifa=r18
517 mov r20=IA64_TR_CURRENT_STACK
518 ;;
519 itr.d dtr[r20]=r16
520 ;;
521 srlz.d
522 ;;
523 #ifdef XEN
524 .reload_vhpt:
525 // 5. VHPT
526 GET_THIS_PADDR(r1, inserted_vhpt);;
527 cmp.eq p7,p0=r2,r0
528 (p7) br.cond.sptk .overlap_vhpt // vhpt isn't mapped.
530 // avoid overlapping with stack TR
531 shr.u r17=r2,IA64_GRANULE_SHIFT
532 GET_THIS_PADDR(r3, cpu_kr);;
533 add r3=IA64_KR_CURRENT_STACK_OFFSET,r3
534 ;;
535 ld8 r3=[r3]
536 ;;
537 cmp.eq p7,p0=r3,r17
538 (p7) br.cond.sptk .overlap_vhpt
539 ;;
541 dep r16=0,r2,0,IA64_GRANULE_SHIFT
542 movl r20=PAGE_KERNEL
543 ;;
544 mov r18=IA64_TR_VHPT
545 dep r17=0,r16,60,4 // physical address of
546 // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
547 mov r19=IA64_GRANULE_SHIFT<<2
548 ;;
549 or r17=r17,r20 // construct PA | page properties
550 mov cr.itir=r19
551 mov cr.ifa=r16
552 ;;
553 itr.d dtr[r18]=r17 // wire in new mapping...
554 ;;
555 srlz.d
556 ;;
557 .overlap_vhpt:
558 #endif
559 br.sptk.many done_tlb_purge_and_reload
560 err:
561 COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
562 br.sptk.many ia64_os_mca_done_restore
564 done_tlb_purge_and_reload:
566 // Setup new stack frame for OS_MCA handling
567 GET_IA64_MCA_DATA(r2)
568 ;;
569 add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
570 add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
571 ;;
572 rse_switch_context(r6,r3,r2);; // RSC management in this new context
574 GET_IA64_MCA_DATA(r2)
575 ;;
576 add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
577 ;;
578 mov r12=r2 // establish new stack-pointer
580 // Enter virtual mode from physical mode
581 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
582 ia64_os_mca_virtual_begin:
584 // Call virtual mode handler
585 movl r2=ia64_mca_ucmc_handler;;
586 mov b6=r2;;
587 br.call.sptk.many b0=b6;;
588 .ret0:
589 // Revert back to physical mode before going back to SAL
590 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
591 ia64_os_mca_virtual_end:
593 // restore the original stack frame here
594 GET_IA64_MCA_DATA(r2)
595 ;;
596 add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
597 ;;
598 movl r4=IA64_PSR_MC
599 ;;
600 rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
602 // let us restore all the registers from our PSI structure
603 mov r8=gp
604 ;;
605 begin_os_mca_restore:
606 br ia64_os_mca_proc_state_restore;;
608 ia64_os_mca_done_restore:
609 OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
610 // branch back to SALE_CHECK
611 ld8 r3=[r2];;
612 mov b0=r3;; // SAL_CHECK return address
614 // release lock
615 movl r3=ia64_mca_serialize;;
616 DATA_VA_TO_PA(r3);;
617 st8.rel [r3]=r0
619 br b0
620 ;;
621 ia64_os_mca_dispatch_end:
622 //EndMain//////////////////////////////////////////////////////////////////////
625 //++
626 // Name:
627 // ia64_os_mca_proc_state_dump()
628 //
629 // Stub Description:
630 //
631 // This stub dumps the processor state during MCHK to a data area
632 //
633 //--
635 ia64_os_mca_proc_state_dump:
636 // Save bank 1 GRs 16-31 which will be used by c-language code when we switch
637 // to virtual addressing mode.
638 GET_IA64_MCA_DATA(r2)
639 ;;
640 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
641 ;;
642 // save ar.NaT
643 mov r5=ar.unat // ar.unat
645 // save banked GRs 16-31 along with NaT bits
646 bsw.1;;
647 st8.spill [r2]=r16,8;;
648 st8.spill [r2]=r17,8;;
649 st8.spill [r2]=r18,8;;
650 st8.spill [r2]=r19,8;;
651 st8.spill [r2]=r20,8;;
652 st8.spill [r2]=r21,8;;
653 st8.spill [r2]=r22,8;;
654 st8.spill [r2]=r23,8;;
655 st8.spill [r2]=r24,8;;
656 st8.spill [r2]=r25,8;;
657 st8.spill [r2]=r26,8;;
658 st8.spill [r2]=r27,8;;
659 st8.spill [r2]=r28,8;;
660 st8.spill [r2]=r29,8;;
661 st8.spill [r2]=r30,8;;
662 st8.spill [r2]=r31,8;;
664 mov r4=ar.unat;;
665 st8 [r2]=r4,8 // save User NaT bits for r16-r31
666 mov ar.unat=r5 // restore original unat
667 bsw.0;;
669 //save BRs
670 add r4=8,r2 // duplicate r2 in r4
671 add r6=2*8,r2 // duplicate r2 in r4
673 mov r3=b0
674 mov r5=b1
675 mov r7=b2;;
676 st8 [r2]=r3,3*8
677 st8 [r4]=r5,3*8
678 st8 [r6]=r7,3*8;;
680 mov r3=b3
681 mov r5=b4
682 mov r7=b5;;
683 st8 [r2]=r3,3*8
684 st8 [r4]=r5,3*8
685 st8 [r6]=r7,3*8;;
687 mov r3=b6
688 mov r5=b7;;
689 st8 [r2]=r3,2*8
690 st8 [r4]=r5,2*8;;
692 cSaveCRs:
693 // save CRs
694 add r4=8,r2 // duplicate r2 in r4
695 add r6=2*8,r2 // duplicate r2 in r4
697 mov r3=cr.dcr
698 mov r5=cr.itm
699 mov r7=cr.iva;;
701 st8 [r2]=r3,8*8
702 st8 [r4]=r5,3*8
703 st8 [r6]=r7,3*8;; // 48 byte rements
705 mov r3=cr.pta;;
706 st8 [r2]=r3,8*8;; // 64 byte rements
708 // if PSR.ic=0, reading interruption registers causes an illegal operation fault
709 mov r3=psr;;
710 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
711 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
712 begin_skip_intr_regs:
713 (p6) br SkipIntrRegs;;
715 add r4=8,r2 // duplicate r2 in r4
716 add r6=2*8,r2 // duplicate r2 in r6
718 mov r3=cr.ipsr
719 mov r5=cr.isr
720 mov r7=r0;;
721 st8 [r2]=r3,3*8
722 st8 [r4]=r5,3*8
723 st8 [r6]=r7,3*8;;
725 mov r3=cr.iip
726 mov r5=cr.ifa
727 mov r7=cr.itir;;
728 st8 [r2]=r3,3*8
729 st8 [r4]=r5,3*8
730 st8 [r6]=r7,3*8;;
732 mov r3=cr.iipa
733 mov r5=cr.ifs
734 mov r7=cr.iim;;
735 st8 [r2]=r3,3*8
736 st8 [r4]=r5,3*8
737 st8 [r6]=r7,3*8;;
739 mov r3=cr25;; // cr.iha
740 st8 [r2]=r3,160;; // 160 byte rement
742 SkipIntrRegs:
743 st8 [r2]=r0,152;; // another 152 byte .
745 add r4=8,r2 // duplicate r2 in r4
746 add r6=2*8,r2 // duplicate r2 in r6
748 mov r3=cr.lid
749 // mov r5=cr.ivr // cr.ivr, don't read it
750 mov r7=cr.tpr;;
751 st8 [r2]=r3,3*8
752 st8 [r4]=r5,3*8
753 st8 [r6]=r7,3*8;;
755 mov r3=r0 // cr.eoi => cr67
756 mov r5=r0 // cr.irr0 => cr68
757 mov r7=r0;; // cr.irr1 => cr69
758 st8 [r2]=r3,3*8
759 st8 [r4]=r5,3*8
760 st8 [r6]=r7,3*8;;
762 mov r3=r0 // cr.irr2 => cr70
763 mov r5=r0 // cr.irr3 => cr71
764 mov r7=cr.itv;;
765 st8 [r2]=r3,3*8
766 st8 [r4]=r5,3*8
767 st8 [r6]=r7,3*8;;
769 mov r3=cr.pmv
770 mov r5=cr.cmcv;;
771 st8 [r2]=r3,7*8
772 st8 [r4]=r5,7*8;;
774 mov r3=r0 // cr.lrr0 => cr80
775 mov r5=r0;; // cr.lrr1 => cr81
776 st8 [r2]=r3,23*8
777 st8 [r4]=r5,23*8;;
779 adds r2=25*8,r2;;
781 cSaveARs:
782 // save ARs
783 add r4=8,r2 // duplicate r2 in r4
784 add r6=2*8,r2 // duplicate r2 in r6
786 mov r3=ar.k0
787 mov r5=ar.k1
788 mov r7=ar.k2;;
789 st8 [r2]=r3,3*8
790 st8 [r4]=r5,3*8
791 st8 [r6]=r7,3*8;;
793 mov r3=ar.k3
794 mov r5=ar.k4
795 mov r7=ar.k5;;
796 st8 [r2]=r3,3*8
797 st8 [r4]=r5,3*8
798 st8 [r6]=r7,3*8;;
800 mov r3=ar.k6
801 mov r5=ar.k7
802 mov r7=r0;; // ar.kr8
803 st8 [r2]=r3,10*8
804 st8 [r4]=r5,10*8
805 st8 [r6]=r7,10*8;; // rement by 72 bytes
807 mov r3=ar.rsc
808 mov ar.rsc=r0 // put RSE in enforced lazy mode
809 mov r5=ar.bsp
810 ;;
811 mov r7=ar.bspstore;;
812 st8 [r2]=r3,3*8
813 st8 [r4]=r5,3*8
814 st8 [r6]=r7,3*8;;
816 mov r3=ar.rnat;;
817 st8 [r2]=r3,8*13 // increment by 13x8 bytes
819 mov r3=ar.ccv;;
820 st8 [r2]=r3,8*4
822 mov r3=ar.unat;;
823 st8 [r2]=r3,8*4
825 mov r3=ar.fpsr;;
826 st8 [r2]=r3,8*4
828 mov r3=ar.itc;;
829 st8 [r2]=r3,160 // 160
831 mov r3=ar.pfs;;
832 st8 [r2]=r3,8
834 mov r3=ar.lc;;
835 st8 [r2]=r3,8
837 mov r3=ar.ec;;
838 st8 [r2]=r3
839 add r2=8*62,r2 //padding
841 // save RRs
842 mov ar.lc=0x08-1
843 movl r4=0x00;;
845 cStRR:
846 dep.z r5=r4,61,3;;
847 mov r3=rr[r5];;
848 st8 [r2]=r3,8
849 add r4=1,r4
850 br.cloop.sptk.few cStRR
851 ;;
852 end_os_mca_dump:
853 br ia64_os_mca_done_dump;;
855 //EndStub//////////////////////////////////////////////////////////////////////
858 //++
859 // Name:
860 // ia64_os_mca_proc_state_restore()
861 //
862 // Stub Description:
863 //
864 // This is a stub to restore the saved processor state during MCHK
865 //
866 //--
868 ia64_os_mca_proc_state_restore:
870 // Restore bank1 GR16-31
871 GET_IA64_MCA_DATA(r2)
872 ;;
873 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
875 restore_GRs: // restore bank-1 GRs 16-31
876 bsw.1;;
877 add r3=16*8,r2;; // to get to NaT of GR 16-31
878 ld8 r3=[r3];;
879 mov ar.unat=r3;; // first restore NaT
881 ld8.fill r16=[r2],8;;
882 ld8.fill r17=[r2],8;;
883 ld8.fill r18=[r2],8;;
884 ld8.fill r19=[r2],8;;
885 ld8.fill r20=[r2],8;;
886 ld8.fill r21=[r2],8;;
887 ld8.fill r22=[r2],8;;
888 ld8.fill r23=[r2],8;;
889 ld8.fill r24=[r2],8;;
890 ld8.fill r25=[r2],8;;
891 ld8.fill r26=[r2],8;;
892 ld8.fill r27=[r2],8;;
893 ld8.fill r28=[r2],8;;
894 ld8.fill r29=[r2],8;;
895 ld8.fill r30=[r2],8;;
896 ld8.fill r31=[r2],8;;
898 ld8 r3=[r2],8;; // increment to skip NaT
899 bsw.0;;
901 restore_BRs:
902 add r4=8,r2 // duplicate r2 in r4
903 add r6=2*8,r2;; // duplicate r2 in r4
905 ld8 r3=[r2],3*8
906 ld8 r5=[r4],3*8
907 ld8 r7=[r6],3*8;;
908 mov b0=r3
909 mov b1=r5
910 mov b2=r7;;
912 ld8 r3=[r2],3*8
913 ld8 r5=[r4],3*8
914 ld8 r7=[r6],3*8;;
915 mov b3=r3
916 mov b4=r5
917 mov b5=r7;;
919 ld8 r3=[r2],2*8
920 ld8 r5=[r4],2*8;;
921 mov b6=r3
922 mov b7=r5;;
924 restore_CRs:
925 add r4=8,r2 // duplicate r2 in r4
926 add r6=2*8,r2;; // duplicate r2 in r4
928 ld8 r3=[r2],8*8
929 ld8 r5=[r4],3*8
930 ld8 r7=[r6],3*8;; // 48 byte increments
931 mov cr.dcr=r3
932 mov cr.itm=r5
933 mov cr.iva=r7;;
935 ld8 r3=[r2],8*8;; // 64 byte increments
936 // mov cr.pta=r3
939 // if PSR.ic=1, reading interruption registers causes an illegal operation fault
940 mov r3=psr;;
941 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
942 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
944 begin_rskip_intr_regs:
945 (p6) br rSkipIntrRegs;;
947 add r4=8,r2 // duplicate r2 in r4
948 add r6=2*8,r2;; // duplicate r2 in r4
950 ld8 r3=[r2],3*8
951 ld8 r5=[r4],3*8
952 ld8 r7=[r6],3*8;;
953 mov cr.ipsr=r3
954 // mov cr.isr=r5 // cr.isr is read only
956 ld8 r3=[r2],3*8
957 ld8 r5=[r4],3*8
958 ld8 r7=[r6],3*8;;
959 mov cr.iip=r3
960 mov cr.ifa=r5
961 mov cr.itir=r7;;
963 ld8 r3=[r2],3*8
964 ld8 r5=[r4],3*8
965 ld8 r7=[r6],3*8;;
966 mov cr.iipa=r3
967 mov cr.ifs=r5
968 mov cr.iim=r7
970 ld8 r3=[r2],160;; // 160 byte increment
971 mov cr.iha=r3
973 rSkipIntrRegs:
974 ld8 r3=[r2],152;; // another 152 byte inc.
976 add r4=8,r2 // duplicate r2 in r4
977 add r6=2*8,r2;; // duplicate r2 in r6
979 ld8 r3=[r2],8*3
980 ld8 r5=[r4],8*3
981 ld8 r7=[r6],8*3;;
982 mov cr.lid=r3
983 // mov cr.ivr=r5 // cr.ivr is read only
984 mov cr.tpr=r7;;
986 ld8 r3=[r2],8*3
987 ld8 r5=[r4],8*3
988 ld8 r7=[r6],8*3;;
989 // mov cr.eoi=r3
990 // mov cr.irr0=r5 // cr.irr0 is read only
991 // mov cr.irr1=r7;; // cr.irr1 is read only
993 ld8 r3=[r2],8*3
994 ld8 r5=[r4],8*3
995 ld8 r7=[r6],8*3;;
996 // mov cr.irr2=r3 // cr.irr2 is read only
997 // mov cr.irr3=r5 // cr.irr3 is read only
998 mov cr.itv=r7;;
1000 ld8 r3=[r2],8*7
1001 ld8 r5=[r4],8*7;;
1002 mov cr.pmv=r3
1003 mov cr.cmcv=r5;;
1005 ld8 r3=[r2],8*23
1006 ld8 r5=[r4],8*23;;
1007 adds r2=8*23,r2
1008 adds r4=8*23,r4;;
1009 // mov cr.lrr0=r3
1010 // mov cr.lrr1=r5
1012 adds r2=8*2,r2;;
1014 restore_ARs:
1015 add r4=8,r2 // duplicate r2 in r4
1016 add r6=2*8,r2;; // duplicate r2 in r4
1018 ld8 r3=[r2],3*8
1019 ld8 r5=[r4],3*8
1020 ld8 r7=[r6],3*8;;
1021 mov ar.k0=r3
1022 mov ar.k1=r5
1023 mov ar.k2=r7;;
1025 ld8 r3=[r2],3*8
1026 ld8 r5=[r4],3*8
1027 ld8 r7=[r6],3*8;;
1028 mov ar.k3=r3
1029 mov ar.k4=r5
1030 mov ar.k5=r7;;
1032 ld8 r3=[r2],10*8
1033 ld8 r5=[r4],10*8
1034 ld8 r7=[r6],10*8;;
1035 mov ar.k6=r3
1036 mov ar.k7=r5
1037 ;;
1039 ld8 r3=[r2],3*8
1040 ld8 r5=[r4],3*8
1041 ld8 r7=[r6],3*8;;
1042 // mov ar.rsc=r3
1043 // mov ar.bsp=r5 // ar.bsp is read only
1044 mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode
1045 ;;
1046 mov ar.bspstore=r7;;
1048 ld8 r9=[r2],8*13;;
1049 mov ar.rnat=r9
1051 mov ar.rsc=r3
1052 ld8 r3=[r2],8*4;;
1053 mov ar.ccv=r3
1055 ld8 r3=[r2],8*4;;
1056 mov ar.unat=r3
1058 ld8 r3=[r2],8*4;;
1059 mov ar.fpsr=r3
1061 ld8 r3=[r2],160;; // 160
1062 // mov ar.itc=r3
1064 ld8 r3=[r2],8;;
1065 mov ar.pfs=r3
1067 ld8 r3=[r2],8;;
1068 mov ar.lc=r3
1070 ld8 r3=[r2];;
1071 mov ar.ec=r3
1072 add r2=8*62,r2;; // padding
1074 restore_RRs:
1075 mov r5=ar.lc
1076 mov ar.lc=0x08-1
1077 movl r4=0x00;;
1078 cStRRr:
1079 dep.z r7=r4,61,3
1080 ld8 r3=[r2],8;;
1081 mov rr[r7]=r3 // what are its access previledges?
1082 add r4=1,r4
1083 br.cloop.sptk.few cStRRr
1084 ;;
1085 mov ar.lc=r5
1086 ;;
1087 end_os_mca_restore:
1088 br ia64_os_mca_done_restore;;
1090 //EndStub//////////////////////////////////////////////////////////////////////
1093 // ok, the issue here is that we need to save state information so
1094 // it can be useable by the kernel debugger and show regs routines.
1095 // In order to do this, our best bet is save the current state (plus
1096 // the state information obtain from the MIN_STATE_AREA) into a pt_regs
1097 // format. This way we can pass it on in a useable format.
1098 //
1100 //
1101 // SAL to OS entry point for INIT on the monarch processor
1102 // This has been defined for registration purposes with SAL
1103 // as a part of ia64_mca_init.
1104 //
1105 // When we get here, the following registers have been
1106 // set by the SAL for our use
1107 //
1108 // 1. GR1 = OS INIT GP
1109 // 2. GR8 = PAL_PROC physical address
1110 // 3. GR9 = SAL_PROC physical address
1111 // 4. GR10 = SAL GP (physical)
1112 // 5. GR11 = Init Reason
1113 // 0 = Received INIT for event other than crash dump switch
1114 // 1 = Received wakeup at the end of an OS_MCA corrected machine check
1115 // 2 = Received INIT dude to CrashDump switch assertion
1116 //
1117 // 6. GR12 = Return address to location within SAL_INIT procedure
1120 GLOBAL_ENTRY(ia64_monarch_init_handler)
1121 .prologue
1122 #ifdef XEN /* Need in ia64_monarch_init_handler? */
1123 SET_PER_CPU_DATA();;
1125 // Set current to ar.k6
1126 GET_THIS_PADDR(r2,cpu_kr);;
1127 add r2=IA64_KR_CURRENT_OFFSET,r2;;
1128 ld8 r2=[r2];;
1129 mov ar.k6=r2;;
1130 #endif
1131 // stash the information the SAL passed to os
1132 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
1133 ;;
1134 SAVE_MIN_WITH_COVER
1135 ;;
1136 mov r8=cr.ifa
1137 mov r9=cr.isr
1138 adds r3=8,r2 // set up second base pointer
1139 ;;
1140 SAVE_REST
1142 // ok, enough should be saved at this point to be dangerous, and supply
1143 // information for a dump
1144 // We need to switch to Virtual mode before hitting the C functions.
1146 movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
1147 mov r3=psr // get the current psr, minimum enabled at this point
1148 ;;
1149 or r2=r2,r3
1150 ;;
1151 movl r3=IVirtual_Switch
1152 ;;
1153 mov cr.iip=r3 // short return to set the appropriate bits
1154 mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
1155 ;;
1156 rfi
1157 ;;
1158 IVirtual_Switch:
1159 //
1160 // We should now be running virtual
1161 //
1162 // Let's call the C handler to get the rest of the state info
1163 //
1164 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1165 ;;
1166 adds out0=16,sp // out0 = pointer to pt_regs
1167 ;;
1168 DO_SAVE_SWITCH_STACK
1169 .body
1170 adds out1=16,sp // out0 = pointer to switch_stack
1172 br.call.sptk.many rp=ia64_init_handler
1173 .ret1:
1175 return_from_init:
1176 br.sptk return_from_init
1177 END(ia64_monarch_init_handler)
1179 //
1180 // SAL to OS entry point for INIT on the slave processor
1181 // This has been defined for registration purposes with SAL
1182 // as a part of ia64_mca_init.
1183 //
1185 GLOBAL_ENTRY(ia64_slave_init_handler)
1186 1: br.sptk 1b
1187 END(ia64_slave_init_handler)