ia64/xen-unstable

view xen/arch/ia64/linux-xen/mca_asm.S @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents d0216f9e87c1
children 0b72d16e794b
line source
1 //
2 // assembly portion of the IA64 MCA handling
3 //
4 // Mods by cfleck to integrate into kernel build
5 // 00/03/15 davidm Added various stop bits to get a clean compile
6 //
7 // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8 // kstack, switch modes, jump to C INIT handler
9 //
10 // 02/01/04 J.Hall <jenna.s.hall@intel.com>
11 // Before entering virtual mode code:
12 // 1. Check for TLB CPU error
13 // 2. Restore current thread pointer to kr6
14 // 3. Move stack ptr 16 bytes to conform to C calling convention
15 //
16 // 04/11/12 Russ Anderson <rja@sgi.com>
17 // Added per cpu MCA/INIT stack save areas.
18 //
19 #include <linux/config.h>
20 #include <linux/threads.h>
22 #include <asm/asmmacro.h>
23 #include <asm/pgtable.h>
24 #include <asm/processor.h>
25 #include <asm/mca_asm.h>
26 #include <asm/mca.h>
27 #ifdef XEN
28 #include <asm/vhpt.h>
29 #endif
31 /*
32 * When we get a machine check, the kernel stack pointer is no longer
33 * valid, so we need to set a new stack pointer.
34 */
35 #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */
37 /*
38 * Needed for return context to SAL
39 */
40 #define IA64_MCA_SAME_CONTEXT 0
41 #define IA64_MCA_COLD_BOOT -2
43 #include "minstate.h"
45 /*
46 * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
47 * 1. GR1 = OS GP
48 * 2. GR8 = PAL_PROC physical address
49 * 3. GR9 = SAL_PROC physical address
50 * 4. GR10 = SAL GP (physical)
51 * 5. GR11 = Rendez state
52 * 6. GR12 = Return address to location within SAL_CHECK
53 */
54 #ifdef XEN
55 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
56 GET_THIS_PADDR(_tmp, ia64_sal_to_os_handoff_state_addr);; \
57 ld8 _tmp=[_tmp];; \
58 st8 [_tmp]=r1,0x08;; \
59 st8 [_tmp]=r8,0x08;; \
60 st8 [_tmp]=r9,0x08;; \
61 st8 [_tmp]=r10,0x08;; \
62 st8 [_tmp]=r11,0x08;; \
63 st8 [_tmp]=r12,0x08;; \
64 st8 [_tmp]=r17,0x08;; \
65 st8 [_tmp]=r18,0x08
66 #else
67 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
68 LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
69 st8 [_tmp]=r1,0x08;; \
70 st8 [_tmp]=r8,0x08;; \
71 st8 [_tmp]=r9,0x08;; \
72 st8 [_tmp]=r10,0x08;; \
73 st8 [_tmp]=r11,0x08;; \
74 st8 [_tmp]=r12,0x08;; \
75 st8 [_tmp]=r17,0x08;; \
76 st8 [_tmp]=r18,0x08
77 #endif /* XEN */
79 /*
80 * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
81 * (p6) is executed if we never entered virtual mode (TLB error)
82 * (p7) is executed if we entered virtual mode as expected (normal case)
83 * 1. GR8 = OS_MCA return status
84 * 2. GR9 = SAL GP (physical)
85 * 3. GR10 = 0/1 returning same/new context
86 * 4. GR22 = New min state save area pointer
87 * returns ptr to SAL rtn save loc in _tmp
88 */
89 #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
90 movl _tmp=ia64_os_to_sal_handoff_state;; \
91 DATA_VA_TO_PA(_tmp);; \
92 ld8 r8=[_tmp],0x08;; \
93 ld8 r9=[_tmp],0x08;; \
94 ld8 r10=[_tmp],0x08;; \
95 ld8 r22=[_tmp],0x08;;
96 // now _tmp is pointing to SAL rtn save location
98 /*
99 * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
100 * imots_os_status=IA64_MCA_COLD_BOOT
101 * imots_sal_gp=SAL GP
102 * imots_context=IA64_MCA_SAME_CONTEXT
103 * imots_new_min_state=Min state save area pointer
104 * imots_sal_check_ra=Return address to location within SAL_CHECK
105 *
106 */
107 #ifdef XEN
108 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
109 movl tmp=IA64_MCA_COLD_BOOT; \
110 GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);; \
111 ld8 sal_to_os_handoff=[sal_to_os_handoff];; \
112 movl os_to_sal_handoff=ia64_os_to_sal_handoff_state;; \
113 dep os_to_sal_handoff = 0, os_to_sal_handoff, 60, 4;; \
114 /*DATA_VA_TO_PA(os_to_sal_handoff);;*/ \
115 st8 [os_to_sal_handoff]=tmp,8;; \
116 ld8 tmp=[sal_to_os_handoff],48;; \
117 st8 [os_to_sal_handoff]=tmp,8;; \
118 movl tmp=IA64_MCA_SAME_CONTEXT;; \
119 st8 [os_to_sal_handoff]=tmp,8;; \
120 ld8 tmp=[sal_to_os_handoff],-8;; \
121 st8 [os_to_sal_handoff]=tmp,8;; \
122 ld8 tmp=[sal_to_os_handoff];; \
123 st8 [os_to_sal_handoff]=tmp;;
124 #else /* XEN */
125 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
126 movl tmp=IA64_MCA_COLD_BOOT; \
127 movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \
128 movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \
129 st8 [os_to_sal_handoff]=tmp,8;; \
130 ld8 tmp=[sal_to_os_handoff],48;; \
131 st8 [os_to_sal_handoff]=tmp,8;; \
132 movl tmp=IA64_MCA_SAME_CONTEXT;; \
133 st8 [os_to_sal_handoff]=tmp,8;; \
134 ld8 tmp=[sal_to_os_handoff],-8;; \
135 st8 [os_to_sal_handoff]=tmp,8;; \
136 ld8 tmp=[sal_to_os_handoff];; \
137 st8 [os_to_sal_handoff]=tmp;;
138 #endif /* XEN */
140 #define GET_IA64_MCA_DATA(reg) \
141 GET_THIS_PADDR(reg, ia64_mca_data) \
142 ;; \
143 ld8 reg=[reg]
145 .global ia64_os_mca_dispatch
146 .global ia64_os_mca_dispatch_end
147 #ifndef XEN
148 .global ia64_sal_to_os_handoff_state
149 .global ia64_os_to_sal_handoff_state
150 #endif
151 .global ia64_do_tlb_purge
153 .text
154 .align 16
156 #ifdef XEN
157 /*
158 * void set_per_cpu_data(void)
159 * {
160 * int i;
161 * for (i = 0; i < 64; i++) {
162 * if (ia64_mca_tlb_list[i].cr_lid == ia64_getreg(_IA64_REG_CR_LID)) {
163 * ia64_set_kr(IA64_KR_PER_CPU_DATA, ia64_mca_tlb_list[i].percpu_paddr);
164 * return;
165 * }
166 * }
167 * while(1); // Endless loop on error
168 * }
169 */
170 #define SET_PER_CPU_DATA() \
171 LOAD_PHYSICAL(p0,r2,ia64_mca_tlb_list);; \
172 mov r7 = r0; \
173 mov r6 = r0;; \
174 adds r3 = IA64_MCA_PERCPU_OFFSET, r2; \
175 1: add r4 = r6, r2; \
176 mov r5=cr.lid;; \
177 adds r7 = 1, r7; \
178 ld8 r4 = [r4];; \
179 cmp.ne p6, p7 = r5, r4; \
180 cmp4.lt p8, p9 = NR_CPUS-1, r7; \
181 (p7) br.cond.dpnt 3f; \
182 adds r6 = 16, r6; \
183 (p9) br.cond.sptk 1b; \
184 2: br 2b;; /* Endless loop on error */ \
185 3: add r4 = r6, r3;; \
186 ld8 r4 = [r4];; \
187 mov ar.k3=r4
189 /*
190 * GET_VA_VCPU_VHPT_MADDR() emulates 'reg = __va_ul(vcpu_vhpt_maddr(v))'.
191 */
192 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
193 #define HAS_PERVCPU_VHPT_MASK 0x2
194 #define GET_VA_VCPU_VHPT_MADDR(reg,tmp) \
195 GET_THIS_PADDR(reg,cpu_kr);; \
196 add reg=IA64_KR_CURRENT_OFFSET,reg;; \
197 ld8 reg=[reg];; \
198 dep tmp=0,reg,60,4;; /* V to P */ \
199 add tmp=IA64_VCPU_VHPT_PAGE_OFFSET,tmp;; \
200 ld8 tmp=[tmp];; \
201 cmp.eq p6,p0=tmp,r0; /* v->arch.vhpt_page == NULL */ \
202 (p6) br.cond.sptk 1f; \
203 add reg=IA64_VCPU_VHPT_MADDR_OFFSET,reg;; \
204 dep reg=0,reg,60,4;; /* V to P */ \
205 ld8 reg=[reg];; \
206 dep reg=-1,reg,60,4; /* P to V */ \
207 br.sptk 2f; \
208 1: \
209 GET_THIS_PADDR(reg, vhpt_paddr);; \
210 ld8 reg=[reg];; \
211 dep reg=-1,reg,60,4; /* P to V */ \
212 2:
213 #else /* CONFIG_XEN_IA64_PERVCPU_VHPT */
214 #define GET_VA_VCPU_VHPT_MADDR(reg,tmp) \
215 GET_THIS_PADDR(reg, vhpt_paddr);; \
216 ld8 reg=[reg];; \
217 dep reg=-1,reg,60,4 /* P to V */
218 #endif /* CONFIG_XEN_IA64_PERVCPU_VHPT */
219 #endif /* XEN */
221 /*
222 * Just the TLB purge part is moved to a separate function
223 * so we can re-use the code for cpu hotplug code as well
224 * Caller should now setup b1, so we can branch once the
225 * tlb flush is complete.
226 */
228 ia64_do_tlb_purge:
229 #ifdef XEN
230 // This needs to be called in order for GET_THIS_PADDR to work
231 SET_PER_CPU_DATA();;
232 #endif
233 #define O(member) IA64_CPUINFO_##member##_OFFSET
235 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
236 ;;
237 addl r17=O(PTCE_STRIDE),r2
238 addl r2=O(PTCE_BASE),r2
239 ;;
240 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
241 ld4 r19=[r2],4 // r19=ptce_count[0]
242 ld4 r21=[r17],4 // r21=ptce_stride[0]
243 ;;
244 ld4 r20=[r2] // r20=ptce_count[1]
245 ld4 r22=[r17] // r22=ptce_stride[1]
246 mov r24=0
247 ;;
248 adds r20=-1,r20
249 ;;
250 #undef O
252 2:
253 cmp.ltu p6,p7=r24,r19
254 (p7) br.cond.dpnt.few 4f
255 mov ar.lc=r20
256 3:
257 ptc.e r18
258 ;;
259 add r18=r22,r18
260 br.cloop.sptk.few 3b
261 ;;
262 add r18=r21,r18
263 add r24=1,r24
264 ;;
265 br.sptk.few 2b
266 4:
267 srlz.i // srlz.i implies srlz.d
268 ;;
270 // Now purge addresses formerly mapped by TR registers
271 // 1. Purge ITR&DTR for kernel.
272 movl r16=KERNEL_START
273 mov r18=KERNEL_TR_PAGE_SHIFT<<2
274 ;;
275 ptr.i r16, r18
276 ptr.d r16, r18
277 ;;
278 srlz.i
279 ;;
280 srlz.d
281 ;;
282 // 2. Purge DTR for PERCPU data.
283 movl r16=PERCPU_ADDR
284 mov r18=PERCPU_PAGE_SHIFT<<2
285 ;;
286 ptr.d r16,r18
287 ;;
288 srlz.d
289 ;;
290 // 3. Purge ITR for PAL code.
291 GET_THIS_PADDR(r2, ia64_mca_pal_base)
292 ;;
293 ld8 r16=[r2]
294 mov r18=IA64_GRANULE_SHIFT<<2
295 ;;
296 ptr.i r16,r18
297 ;;
298 srlz.i
299 ;;
300 // 4. Purge DTR for stack.
301 #ifdef XEN
302 // Kernel registers are saved in a per_cpu cpu_kr_ia64_t
303 // to allow the kernel registers themselves to be used by domains.
304 GET_THIS_PADDR(r2, cpu_kr);;
305 add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
306 ;;
307 ld8 r16=[r2]
308 #else
309 mov r16=IA64_KR(CURRENT_STACK)
310 #endif
311 ;;
312 shl r16=r16,IA64_GRANULE_SHIFT
313 movl r19=PAGE_OFFSET
314 ;;
315 add r16=r19,r16
316 mov r18=IA64_GRANULE_SHIFT<<2
317 ;;
318 ptr.d r16,r18
319 ;;
320 srlz.i
321 ;;
322 #ifdef XEN
323 // 5. VHPT
324 #if VHPT_ENABLED
325 GET_VA_VCPU_VHPT_MADDR(r2,r3);;
326 dep r16=0,r2,0,IA64_GRANULE_SHIFT
327 mov r18=IA64_GRANULE_SHIFT<<2
328 ;;
329 ptr.d r16,r18
330 ;;
331 srlz.d
332 ;;
333 #endif
334 #endif
335 // Now branch away to caller.
336 br.sptk.many b1
337 ;;
339 ia64_os_mca_dispatch:
341 // Serialize all MCA processing
342 mov r3=1;;
343 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
344 ia64_os_mca_spin:
345 xchg8 r4=[r2],r3;;
346 cmp.ne p6,p0=r4,r0
347 (p6) br ia64_os_mca_spin
349 #ifdef XEN
350 SET_PER_CPU_DATA();;
351 #endif
352 // Save the SAL to OS MCA handoff state as defined
353 // by SAL SPEC 3.0
354 // NOTE : The order in which the state gets saved
355 // is dependent on the way the C-structure
356 // for ia64_mca_sal_to_os_state_t has been
357 // defined in include/asm/mca.h
358 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
359 ;;
361 // LOG PROCESSOR STATE INFO FROM HERE ON..
362 begin_os_mca_dump:
363 br ia64_os_mca_proc_state_dump;;
365 ia64_os_mca_done_dump:
367 #ifdef XEN
368 // Set current to ar.k6
369 GET_THIS_PADDR(r2,cpu_kr);;
370 add r2=IA64_KR_CURRENT_OFFSET,r2;;
371 ld8 r2=[r2];;
372 mov ar.k6=r2;;
374 GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);;
375 ld8 r2=[r2];;
376 adds r16=56,r2
377 #else
378 LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
379 #endif
380 ;;
381 ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
382 ;;
383 tbit.nz p6,p7=r18,60
384 (p7) br.spnt done_tlb_purge_and_reload
386 // The following code purges TC and TR entries. Then reload all TC entries.
387 // Purge percpu data TC entries.
388 begin_tlb_purge_and_reload:
389 movl r18=ia64_reload_tr;;
390 LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
391 mov b1=r18;;
392 br.sptk.many ia64_do_tlb_purge;;
394 ia64_reload_tr:
395 // Finally reload the TR registers.
396 // 1. Reload DTR/ITR registers for kernel.
397 mov r18=KERNEL_TR_PAGE_SHIFT<<2
398 movl r17=KERNEL_START
399 ;;
400 mov cr.itir=r18
401 mov cr.ifa=r17
402 mov r16=IA64_TR_KERNEL
403 mov r19=ip
404 movl r18=PAGE_KERNEL
405 ;;
406 dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
407 ;;
408 or r18=r17,r18
409 ;;
410 itr.i itr[r16]=r18
411 ;;
412 itr.d dtr[r16]=r18
413 ;;
414 srlz.i
415 srlz.d
416 ;;
417 // 2. Reload DTR register for PERCPU data.
418 GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
419 ;;
420 movl r16=PERCPU_ADDR // vaddr
421 movl r18=PERCPU_PAGE_SHIFT<<2
422 ;;
423 mov cr.itir=r18
424 mov cr.ifa=r16
425 ;;
426 ld8 r18=[r2] // load per-CPU PTE
427 mov r16=IA64_TR_PERCPU_DATA;
428 ;;
429 itr.d dtr[r16]=r18
430 ;;
431 srlz.d
432 ;;
433 // 3. Reload ITR for PAL code.
434 GET_THIS_PADDR(r2, ia64_mca_pal_pte)
435 ;;
436 ld8 r18=[r2] // load PAL PTE
437 ;;
438 GET_THIS_PADDR(r2, ia64_mca_pal_base)
439 ;;
440 ld8 r16=[r2] // load PAL vaddr
441 mov r19=IA64_GRANULE_SHIFT<<2
442 ;;
443 mov cr.itir=r19
444 mov cr.ifa=r16
445 mov r20=IA64_TR_PALCODE
446 ;;
447 itr.i itr[r20]=r18
448 ;;
449 srlz.i
450 ;;
451 // 4. Reload DTR for stack.
452 #ifdef XEN
453 // Kernel registers are saved in a per_cpu cpu_kr_ia64_t
454 // to allow the kernel registers themselves to be used by domains.
455 GET_THIS_PADDR(r2, cpu_kr);;
456 add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
457 ;;
458 ld8 r16=[r2]
459 #else
460 mov r16=IA64_KR(CURRENT_STACK)
461 #endif
462 ;;
463 shl r16=r16,IA64_GRANULE_SHIFT
464 movl r19=PAGE_OFFSET
465 ;;
466 add r18=r19,r16
467 movl r20=PAGE_KERNEL
468 ;;
469 add r16=r20,r16
470 mov r19=IA64_GRANULE_SHIFT<<2
471 ;;
472 mov cr.itir=r19
473 mov cr.ifa=r18
474 mov r20=IA64_TR_CURRENT_STACK
475 ;;
476 itr.d dtr[r20]=r16
477 ;;
478 srlz.d
479 ;;
480 #ifdef XEN
481 .reload_vhpt:
482 // 5. VHPT
483 #if VHPT_ENABLED
484 GET_VA_VCPU_VHPT_MADDR(r2,r3);;
486 // avoid overlapping with stack TR
487 shr.u r17=r2,IA64_GRANULE_SHIFT
488 GET_THIS_PADDR(r3, cpu_kr);;
489 add r3=IA64_KR_CURRENT_STACK_OFFSET,r3
490 ;;
491 ld8 r3=[r3]
492 ;;
493 cmp.eq p7,p0=r3,r17
494 (p7) br.cond.sptk .overlap_vhpt
495 ;;
497 dep r16=0,r2,0,IA64_GRANULE_SHIFT
498 movl r20=PAGE_KERNEL
499 ;;
500 mov r18=IA64_TR_VHPT
501 dep r17=0,r16,60,4 // physical address of
502 // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
503 mov r19=IA64_GRANULE_SHIFT<<2
504 ;;
505 or r17=r17,r20 // construct PA | page properties
506 mov cr.itir=r19
507 mov cr.ifa=r16
508 ;;
509 itr.d dtr[r18]=r17 // wire in new mapping...
510 ;;
511 srlz.d
512 ;;
513 .overlap_vhpt:
514 #endif
515 #endif
516 br.sptk.many done_tlb_purge_and_reload
517 err:
518 COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
519 br.sptk.many ia64_os_mca_done_restore
521 done_tlb_purge_and_reload:
523 // Setup new stack frame for OS_MCA handling
524 GET_IA64_MCA_DATA(r2)
525 ;;
526 add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
527 add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
528 ;;
529 rse_switch_context(r6,r3,r2);; // RSC management in this new context
531 GET_IA64_MCA_DATA(r2)
532 ;;
533 add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
534 ;;
535 mov r12=r2 // establish new stack-pointer
537 // Enter virtual mode from physical mode
538 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
539 ia64_os_mca_virtual_begin:
541 // Call virtual mode handler
542 movl r2=ia64_mca_ucmc_handler;;
543 mov b6=r2;;
544 br.call.sptk.many b0=b6;;
545 .ret0:
546 // Revert back to physical mode before going back to SAL
547 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
548 ia64_os_mca_virtual_end:
550 // restore the original stack frame here
551 GET_IA64_MCA_DATA(r2)
552 ;;
553 add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
554 ;;
555 movl r4=IA64_PSR_MC
556 ;;
557 rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
559 // let us restore all the registers from our PSI structure
560 mov r8=gp
561 ;;
562 begin_os_mca_restore:
563 br ia64_os_mca_proc_state_restore;;
565 ia64_os_mca_done_restore:
566 OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
567 // branch back to SALE_CHECK
568 ld8 r3=[r2];;
569 mov b0=r3;; // SAL_CHECK return address
571 // release lock
572 movl r3=ia64_mca_serialize;;
573 DATA_VA_TO_PA(r3);;
574 st8.rel [r3]=r0
576 br b0
577 ;;
578 ia64_os_mca_dispatch_end:
579 //EndMain//////////////////////////////////////////////////////////////////////
582 //++
583 // Name:
584 // ia64_os_mca_proc_state_dump()
585 //
586 // Stub Description:
587 //
588 // This stub dumps the processor state during MCHK to a data area
589 //
590 //--
592 ia64_os_mca_proc_state_dump:
593 // Save bank 1 GRs 16-31 which will be used by c-language code when we switch
594 // to virtual addressing mode.
595 GET_IA64_MCA_DATA(r2)
596 ;;
597 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
598 ;;
599 // save ar.NaT
600 mov r5=ar.unat // ar.unat
602 // save banked GRs 16-31 along with NaT bits
603 bsw.1;;
604 st8.spill [r2]=r16,8;;
605 st8.spill [r2]=r17,8;;
606 st8.spill [r2]=r18,8;;
607 st8.spill [r2]=r19,8;;
608 st8.spill [r2]=r20,8;;
609 st8.spill [r2]=r21,8;;
610 st8.spill [r2]=r22,8;;
611 st8.spill [r2]=r23,8;;
612 st8.spill [r2]=r24,8;;
613 st8.spill [r2]=r25,8;;
614 st8.spill [r2]=r26,8;;
615 st8.spill [r2]=r27,8;;
616 st8.spill [r2]=r28,8;;
617 st8.spill [r2]=r29,8;;
618 st8.spill [r2]=r30,8;;
619 st8.spill [r2]=r31,8;;
621 mov r4=ar.unat;;
622 st8 [r2]=r4,8 // save User NaT bits for r16-r31
623 mov ar.unat=r5 // restore original unat
624 bsw.0;;
626 //save BRs
627 add r4=8,r2 // duplicate r2 in r4
628 add r6=2*8,r2 // duplicate r2 in r4
630 mov r3=b0
631 mov r5=b1
632 mov r7=b2;;
633 st8 [r2]=r3,3*8
634 st8 [r4]=r5,3*8
635 st8 [r6]=r7,3*8;;
637 mov r3=b3
638 mov r5=b4
639 mov r7=b5;;
640 st8 [r2]=r3,3*8
641 st8 [r4]=r5,3*8
642 st8 [r6]=r7,3*8;;
644 mov r3=b6
645 mov r5=b7;;
646 st8 [r2]=r3,2*8
647 st8 [r4]=r5,2*8;;
649 cSaveCRs:
650 // save CRs
651 add r4=8,r2 // duplicate r2 in r4
652 add r6=2*8,r2 // duplicate r2 in r4
654 mov r3=cr.dcr
655 mov r5=cr.itm
656 mov r7=cr.iva;;
658 st8 [r2]=r3,8*8
659 st8 [r4]=r5,3*8
660 st8 [r6]=r7,3*8;; // 48 byte rements
662 mov r3=cr.pta;;
663 st8 [r2]=r3,8*8;; // 64 byte rements
665 // if PSR.ic=0, reading interruption registers causes an illegal operation fault
666 mov r3=psr;;
667 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
668 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
669 begin_skip_intr_regs:
670 (p6) br SkipIntrRegs;;
672 add r4=8,r2 // duplicate r2 in r4
673 add r6=2*8,r2 // duplicate r2 in r6
675 mov r3=cr.ipsr
676 mov r5=cr.isr
677 mov r7=r0;;
678 st8 [r2]=r3,3*8
679 st8 [r4]=r5,3*8
680 st8 [r6]=r7,3*8;;
682 mov r3=cr.iip
683 mov r5=cr.ifa
684 mov r7=cr.itir;;
685 st8 [r2]=r3,3*8
686 st8 [r4]=r5,3*8
687 st8 [r6]=r7,3*8;;
689 mov r3=cr.iipa
690 mov r5=cr.ifs
691 mov r7=cr.iim;;
692 st8 [r2]=r3,3*8
693 st8 [r4]=r5,3*8
694 st8 [r6]=r7,3*8;;
696 mov r3=cr25;; // cr.iha
697 st8 [r2]=r3,160;; // 160 byte rement
699 SkipIntrRegs:
700 st8 [r2]=r0,152;; // another 152 byte .
702 add r4=8,r2 // duplicate r2 in r4
703 add r6=2*8,r2 // duplicate r2 in r6
705 mov r3=cr.lid
706 // mov r5=cr.ivr // cr.ivr, don't read it
707 mov r7=cr.tpr;;
708 st8 [r2]=r3,3*8
709 st8 [r4]=r5,3*8
710 st8 [r6]=r7,3*8;;
712 mov r3=r0 // cr.eoi => cr67
713 mov r5=r0 // cr.irr0 => cr68
714 mov r7=r0;; // cr.irr1 => cr69
715 st8 [r2]=r3,3*8
716 st8 [r4]=r5,3*8
717 st8 [r6]=r7,3*8;;
719 mov r3=r0 // cr.irr2 => cr70
720 mov r5=r0 // cr.irr3 => cr71
721 mov r7=cr.itv;;
722 st8 [r2]=r3,3*8
723 st8 [r4]=r5,3*8
724 st8 [r6]=r7,3*8;;
726 mov r3=cr.pmv
727 mov r5=cr.cmcv;;
728 st8 [r2]=r3,7*8
729 st8 [r4]=r5,7*8;;
731 mov r3=r0 // cr.lrr0 => cr80
732 mov r5=r0;; // cr.lrr1 => cr81
733 st8 [r2]=r3,23*8
734 st8 [r4]=r5,23*8;;
736 adds r2=25*8,r2;;
738 cSaveARs:
739 // save ARs
740 add r4=8,r2 // duplicate r2 in r4
741 add r6=2*8,r2 // duplicate r2 in r6
743 mov r3=ar.k0
744 mov r5=ar.k1
745 mov r7=ar.k2;;
746 st8 [r2]=r3,3*8
747 st8 [r4]=r5,3*8
748 st8 [r6]=r7,3*8;;
750 mov r3=ar.k3
751 mov r5=ar.k4
752 mov r7=ar.k5;;
753 st8 [r2]=r3,3*8
754 st8 [r4]=r5,3*8
755 st8 [r6]=r7,3*8;;
757 mov r3=ar.k6
758 mov r5=ar.k7
759 mov r7=r0;; // ar.kr8
760 st8 [r2]=r3,10*8
761 st8 [r4]=r5,10*8
762 st8 [r6]=r7,10*8;; // rement by 72 bytes
764 mov r3=ar.rsc
765 mov ar.rsc=r0 // put RSE in enforced lazy mode
766 mov r5=ar.bsp
767 ;;
768 mov r7=ar.bspstore;;
769 st8 [r2]=r3,3*8
770 st8 [r4]=r5,3*8
771 st8 [r6]=r7,3*8;;
773 mov r3=ar.rnat;;
774 st8 [r2]=r3,8*13 // increment by 13x8 bytes
776 mov r3=ar.ccv;;
777 st8 [r2]=r3,8*4
779 mov r3=ar.unat;;
780 st8 [r2]=r3,8*4
782 mov r3=ar.fpsr;;
783 st8 [r2]=r3,8*4
785 mov r3=ar.itc;;
786 st8 [r2]=r3,160 // 160
788 mov r3=ar.pfs;;
789 st8 [r2]=r3,8
791 mov r3=ar.lc;;
792 st8 [r2]=r3,8
794 mov r3=ar.ec;;
795 st8 [r2]=r3
796 add r2=8*62,r2 //padding
798 // save RRs
799 mov ar.lc=0x08-1
800 movl r4=0x00;;
802 cStRR:
803 dep.z r5=r4,61,3;;
804 mov r3=rr[r5];;
805 st8 [r2]=r3,8
806 add r4=1,r4
807 br.cloop.sptk.few cStRR
808 ;;
809 end_os_mca_dump:
810 br ia64_os_mca_done_dump;;
812 //EndStub//////////////////////////////////////////////////////////////////////
815 //++
816 // Name:
817 // ia64_os_mca_proc_state_restore()
818 //
819 // Stub Description:
820 //
821 // This is a stub to restore the saved processor state during MCHK
822 //
823 //--
825 ia64_os_mca_proc_state_restore:
827 // Restore bank1 GR16-31
828 GET_IA64_MCA_DATA(r2)
829 ;;
830 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
832 restore_GRs: // restore bank-1 GRs 16-31
833 bsw.1;;
834 add r3=16*8,r2;; // to get to NaT of GR 16-31
835 ld8 r3=[r3];;
836 mov ar.unat=r3;; // first restore NaT
838 ld8.fill r16=[r2],8;;
839 ld8.fill r17=[r2],8;;
840 ld8.fill r18=[r2],8;;
841 ld8.fill r19=[r2],8;;
842 ld8.fill r20=[r2],8;;
843 ld8.fill r21=[r2],8;;
844 ld8.fill r22=[r2],8;;
845 ld8.fill r23=[r2],8;;
846 ld8.fill r24=[r2],8;;
847 ld8.fill r25=[r2],8;;
848 ld8.fill r26=[r2],8;;
849 ld8.fill r27=[r2],8;;
850 ld8.fill r28=[r2],8;;
851 ld8.fill r29=[r2],8;;
852 ld8.fill r30=[r2],8;;
853 ld8.fill r31=[r2],8;;
855 ld8 r3=[r2],8;; // increment to skip NaT
856 bsw.0;;
858 restore_BRs:
859 add r4=8,r2 // duplicate r2 in r4
860 add r6=2*8,r2;; // duplicate r2 in r4
862 ld8 r3=[r2],3*8
863 ld8 r5=[r4],3*8
864 ld8 r7=[r6],3*8;;
865 mov b0=r3
866 mov b1=r5
867 mov b2=r7;;
869 ld8 r3=[r2],3*8
870 ld8 r5=[r4],3*8
871 ld8 r7=[r6],3*8;;
872 mov b3=r3
873 mov b4=r5
874 mov b5=r7;;
876 ld8 r3=[r2],2*8
877 ld8 r5=[r4],2*8;;
878 mov b6=r3
879 mov b7=r5;;
881 restore_CRs:
882 add r4=8,r2 // duplicate r2 in r4
883 add r6=2*8,r2;; // duplicate r2 in r4
885 ld8 r3=[r2],8*8
886 ld8 r5=[r4],3*8
887 ld8 r7=[r6],3*8;; // 48 byte increments
888 mov cr.dcr=r3
889 mov cr.itm=r5
890 mov cr.iva=r7;;
892 ld8 r3=[r2],8*8;; // 64 byte increments
893 // mov cr.pta=r3
896 // if PSR.ic=1, reading interruption registers causes an illegal operation fault
897 mov r3=psr;;
898 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
899 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
901 begin_rskip_intr_regs:
902 (p6) br rSkipIntrRegs;;
904 add r4=8,r2 // duplicate r2 in r4
905 add r6=2*8,r2;; // duplicate r2 in r4
907 ld8 r3=[r2],3*8
908 ld8 r5=[r4],3*8
909 ld8 r7=[r6],3*8;;
910 mov cr.ipsr=r3
911 // mov cr.isr=r5 // cr.isr is read only
913 ld8 r3=[r2],3*8
914 ld8 r5=[r4],3*8
915 ld8 r7=[r6],3*8;;
916 mov cr.iip=r3
917 mov cr.ifa=r5
918 mov cr.itir=r7;;
920 ld8 r3=[r2],3*8
921 ld8 r5=[r4],3*8
922 ld8 r7=[r6],3*8;;
923 mov cr.iipa=r3
924 mov cr.ifs=r5
925 mov cr.iim=r7
927 ld8 r3=[r2],160;; // 160 byte increment
928 mov cr.iha=r3
930 rSkipIntrRegs:
931 ld8 r3=[r2],152;; // another 152 byte inc.
933 add r4=8,r2 // duplicate r2 in r4
934 add r6=2*8,r2;; // duplicate r2 in r6
936 ld8 r3=[r2],8*3
937 ld8 r5=[r4],8*3
938 ld8 r7=[r6],8*3;;
939 mov cr.lid=r3
940 // mov cr.ivr=r5 // cr.ivr is read only
941 mov cr.tpr=r7;;
943 ld8 r3=[r2],8*3
944 ld8 r5=[r4],8*3
945 ld8 r7=[r6],8*3;;
946 // mov cr.eoi=r3
947 // mov cr.irr0=r5 // cr.irr0 is read only
948 // mov cr.irr1=r7;; // cr.irr1 is read only
950 ld8 r3=[r2],8*3
951 ld8 r5=[r4],8*3
952 ld8 r7=[r6],8*3;;
953 // mov cr.irr2=r3 // cr.irr2 is read only
954 // mov cr.irr3=r5 // cr.irr3 is read only
955 mov cr.itv=r7;;
957 ld8 r3=[r2],8*7
958 ld8 r5=[r4],8*7;;
959 mov cr.pmv=r3
960 mov cr.cmcv=r5;;
962 ld8 r3=[r2],8*23
963 ld8 r5=[r4],8*23;;
964 adds r2=8*23,r2
965 adds r4=8*23,r4;;
966 // mov cr.lrr0=r3
967 // mov cr.lrr1=r5
969 adds r2=8*2,r2;;
971 restore_ARs:
972 add r4=8,r2 // duplicate r2 in r4
973 add r6=2*8,r2;; // duplicate r2 in r4
975 ld8 r3=[r2],3*8
976 ld8 r5=[r4],3*8
977 ld8 r7=[r6],3*8;;
978 mov ar.k0=r3
979 mov ar.k1=r5
980 mov ar.k2=r7;;
982 ld8 r3=[r2],3*8
983 ld8 r5=[r4],3*8
984 ld8 r7=[r6],3*8;;
985 mov ar.k3=r3
986 mov ar.k4=r5
987 mov ar.k5=r7;;
989 ld8 r3=[r2],10*8
990 ld8 r5=[r4],10*8
991 ld8 r7=[r6],10*8;;
992 mov ar.k6=r3
993 mov ar.k7=r5
994 ;;
996 ld8 r3=[r2],3*8
997 ld8 r5=[r4],3*8
998 ld8 r7=[r6],3*8;;
999 // mov ar.rsc=r3
1000 // mov ar.bsp=r5 // ar.bsp is read only
1001 mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode
1002 ;;
1003 mov ar.bspstore=r7;;
1005 ld8 r9=[r2],8*13;;
1006 mov ar.rnat=r9
1008 mov ar.rsc=r3
1009 ld8 r3=[r2],8*4;;
1010 mov ar.ccv=r3
1012 ld8 r3=[r2],8*4;;
1013 mov ar.unat=r3
1015 ld8 r3=[r2],8*4;;
1016 mov ar.fpsr=r3
1018 ld8 r3=[r2],160;; // 160
1019 // mov ar.itc=r3
1021 ld8 r3=[r2],8;;
1022 mov ar.pfs=r3
1024 ld8 r3=[r2],8;;
1025 mov ar.lc=r3
1027 ld8 r3=[r2];;
1028 mov ar.ec=r3
1029 add r2=8*62,r2;; // padding
1031 restore_RRs:
1032 mov r5=ar.lc
1033 mov ar.lc=0x08-1
1034 movl r4=0x00;;
1035 cStRRr:
1036 dep.z r7=r4,61,3
1037 ld8 r3=[r2],8;;
1038 mov rr[r7]=r3 // what are its access previledges?
1039 add r4=1,r4
1040 br.cloop.sptk.few cStRRr
1041 ;;
1042 mov ar.lc=r5
1043 ;;
1044 end_os_mca_restore:
1045 br ia64_os_mca_done_restore;;
1047 //EndStub//////////////////////////////////////////////////////////////////////
1050 // ok, the issue here is that we need to save state information so
1051 // it can be useable by the kernel debugger and show regs routines.
1052 // In order to do this, our best bet is save the current state (plus
1053 // the state information obtain from the MIN_STATE_AREA) into a pt_regs
1054 // format. This way we can pass it on in a useable format.
1055 //
1057 //
1058 // SAL to OS entry point for INIT on the monarch processor
1059 // This has been defined for registration purposes with SAL
1060 // as a part of ia64_mca_init.
1061 //
1062 // When we get here, the following registers have been
1063 // set by the SAL for our use
1064 //
1065 // 1. GR1 = OS INIT GP
1066 // 2. GR8 = PAL_PROC physical address
1067 // 3. GR9 = SAL_PROC physical address
1068 // 4. GR10 = SAL GP (physical)
1069 // 5. GR11 = Init Reason
1070 // 0 = Received INIT for event other than crash dump switch
1071 // 1 = Received wakeup at the end of an OS_MCA corrected machine check
1072 // 2 = Received INIT dude to CrashDump switch assertion
1073 //
1074 // 6. GR12 = Return address to location within SAL_INIT procedure
1077 GLOBAL_ENTRY(ia64_monarch_init_handler)
1078 .prologue
1079 #ifdef XEN /* Need in ia64_monarch_init_handler? */
1080 SET_PER_CPU_DATA();;
1082 // Set current to ar.k6
1083 GET_THIS_PADDR(r2,cpu_kr);;
1084 add r2=IA64_KR_CURRENT_OFFSET,r2;;
1085 ld8 r2=[r2];;
1086 mov ar.k6=r2;;
1087 #endif
1088 // stash the information the SAL passed to os
1089 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
1090 ;;
1091 SAVE_MIN_WITH_COVER
1092 ;;
1093 mov r8=cr.ifa
1094 mov r9=cr.isr
1095 adds r3=8,r2 // set up second base pointer
1096 ;;
1097 SAVE_REST
1099 // ok, enough should be saved at this point to be dangerous, and supply
1100 // information for a dump
1101 // We need to switch to Virtual mode before hitting the C functions.
1103 movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
1104 mov r3=psr // get the current psr, minimum enabled at this point
1105 ;;
1106 or r2=r2,r3
1107 ;;
1108 movl r3=IVirtual_Switch
1109 ;;
1110 mov cr.iip=r3 // short return to set the appropriate bits
1111 mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
1112 ;;
1113 rfi
1114 ;;
1115 IVirtual_Switch:
1116 //
1117 // We should now be running virtual
1118 //
1119 // Let's call the C handler to get the rest of the state info
1120 //
1121 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1122 ;;
1123 adds out0=16,sp // out0 = pointer to pt_regs
1124 ;;
1125 DO_SAVE_SWITCH_STACK
1126 .body
1127 adds out1=16,sp // out0 = pointer to switch_stack
1129 br.call.sptk.many rp=ia64_init_handler
1130 .ret1:
1132 return_from_init:
1133 br.sptk return_from_init
1134 END(ia64_monarch_init_handler)
1136 //
1137 // SAL to OS entry point for INIT on the slave processor
1138 // This has been defined for registration purposes with SAL
1139 // as a part of ia64_mca_init.
1140 //
1142 GLOBAL_ENTRY(ia64_slave_init_handler)
1143 1: br.sptk 1b
1144 END(ia64_slave_init_handler)