ia64/xen-unstable

view xen/arch/ia64/linux-xen/mca_asm.S @ 10888:5379548bfc79

[NET] Enable TCPv4 segmentation offload in front/back drivers.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Aug 01 11:54:45 2006 +0100 (2006-08-01)
parents efdfbb40db3f
children 2afdc0066df6
line source
1 //
2 // assembly portion of the IA64 MCA handling
3 //
4 // Mods by cfleck to integrate into kernel build
5 // 00/03/15 davidm Added various stop bits to get a clean compile
6 //
7 // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8 // kstack, switch modes, jump to C INIT handler
9 //
10 // 02/01/04 J.Hall <jenna.s.hall@intel.com>
11 // Before entering virtual mode code:
12 // 1. Check for TLB CPU error
13 // 2. Restore current thread pointer to kr6
14 // 3. Move stack ptr 16 bytes to conform to C calling convention
15 //
16 // 04/11/12 Russ Anderson <rja@sgi.com>
17 // Added per cpu MCA/INIT stack save areas.
18 //
19 #include <linux/config.h>
20 #include <linux/threads.h>
22 #include <asm/asmmacro.h>
23 #include <asm/pgtable.h>
24 #include <asm/processor.h>
25 #include <asm/mca_asm.h>
26 #include <asm/mca.h>
28 /*
29 * When we get a machine check, the kernel stack pointer is no longer
30 * valid, so we need to set a new stack pointer.
31 */
32 #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */
34 /*
35 * Needed for return context to SAL
36 */
37 #define IA64_MCA_SAME_CONTEXT 0
38 #define IA64_MCA_COLD_BOOT -2
40 #include "minstate.h"
42 /*
43 * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
44 * 1. GR1 = OS GP
45 * 2. GR8 = PAL_PROC physical address
46 * 3. GR9 = SAL_PROC physical address
47 * 4. GR10 = SAL GP (physical)
48 * 5. GR11 = Rendez state
49 * 6. GR12 = Return address to location within SAL_CHECK
50 */
51 #ifdef XEN
52 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
53 movl _tmp=THIS_CPU(ia64_sal_to_os_handoff_state_addr);; \
54 tpa _tmp=_tmp;; \
55 ld8 _tmp=[_tmp];; \
56 st8 [_tmp]=r1,0x08;; \
57 st8 [_tmp]=r8,0x08;; \
58 st8 [_tmp]=r9,0x08;; \
59 st8 [_tmp]=r10,0x08;; \
60 st8 [_tmp]=r11,0x08;; \
61 st8 [_tmp]=r12,0x08;; \
62 st8 [_tmp]=r17,0x08;; \
63 st8 [_tmp]=r18,0x08
64 #else
65 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
66 LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
67 st8 [_tmp]=r1,0x08;; \
68 st8 [_tmp]=r8,0x08;; \
69 st8 [_tmp]=r9,0x08;; \
70 st8 [_tmp]=r10,0x08;; \
71 st8 [_tmp]=r11,0x08;; \
72 st8 [_tmp]=r12,0x08;; \
73 st8 [_tmp]=r17,0x08;; \
74 st8 [_tmp]=r18,0x08
76 /*
77 * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
78 * (p6) is executed if we never entered virtual mode (TLB error)
79 * (p7) is executed if we entered virtual mode as expected (normal case)
80 * 1. GR8 = OS_MCA return status
81 * 2. GR9 = SAL GP (physical)
82 * 3. GR10 = 0/1 returning same/new context
83 * 4. GR22 = New min state save area pointer
84 * returns ptr to SAL rtn save loc in _tmp
85 */
86 #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
87 movl _tmp=ia64_os_to_sal_handoff_state;; \
88 DATA_VA_TO_PA(_tmp);; \
89 ld8 r8=[_tmp],0x08;; \
90 ld8 r9=[_tmp],0x08;; \
91 ld8 r10=[_tmp],0x08;; \
92 ld8 r22=[_tmp],0x08;;
93 // now _tmp is pointing to SAL rtn save location
95 /*
96 * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
97 * imots_os_status=IA64_MCA_COLD_BOOT
98 * imots_sal_gp=SAL GP
99 * imots_context=IA64_MCA_SAME_CONTEXT
100 * imots_new_min_state=Min state save area pointer
101 * imots_sal_check_ra=Return address to location within SAL_CHECK
102 *
103 */
104 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
105 movl tmp=IA64_MCA_COLD_BOOT; \
106 movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \
107 movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \
108 st8 [os_to_sal_handoff]=tmp,8;; \
109 ld8 tmp=[sal_to_os_handoff],48;; \
110 st8 [os_to_sal_handoff]=tmp,8;; \
111 movl tmp=IA64_MCA_SAME_CONTEXT;; \
112 st8 [os_to_sal_handoff]=tmp,8;; \
113 ld8 tmp=[sal_to_os_handoff],-8;; \
114 st8 [os_to_sal_handoff]=tmp,8;; \
115 ld8 tmp=[sal_to_os_handoff];; \
116 st8 [os_to_sal_handoff]=tmp;;
118 #define GET_IA64_MCA_DATA(reg) \
119 GET_THIS_PADDR(reg, ia64_mca_data) \
120 ;; \
121 ld8 reg=[reg]
123 #endif /* XEN */
124 .global ia64_os_mca_dispatch
125 .global ia64_os_mca_dispatch_end
126 #ifndef XEN
127 .global ia64_sal_to_os_handoff_state
128 .global ia64_os_to_sal_handoff_state
129 .global ia64_do_tlb_purge
130 #endif
132 .text
133 .align 16
135 #ifndef XEN
136 /*
137 * Just the TLB purge part is moved to a separate function
138 * so we can re-use the code for cpu hotplug code as well
139 * Caller should now setup b1, so we can branch once the
140 * tlb flush is complete.
141 */
143 ia64_do_tlb_purge:
144 #define O(member) IA64_CPUINFO_##member##_OFFSET
146 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
147 ;;
148 addl r17=O(PTCE_STRIDE),r2
149 addl r2=O(PTCE_BASE),r2
150 ;;
151 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
152 ld4 r19=[r2],4 // r19=ptce_count[0]
153 ld4 r21=[r17],4 // r21=ptce_stride[0]
154 ;;
155 ld4 r20=[r2] // r20=ptce_count[1]
156 ld4 r22=[r17] // r22=ptce_stride[1]
157 mov r24=0
158 ;;
159 adds r20=-1,r20
160 ;;
161 #undef O
163 2:
164 cmp.ltu p6,p7=r24,r19
165 (p7) br.cond.dpnt.few 4f
166 mov ar.lc=r20
167 3:
168 ptc.e r18
169 ;;
170 add r18=r22,r18
171 br.cloop.sptk.few 3b
172 ;;
173 add r18=r21,r18
174 add r24=1,r24
175 ;;
176 br.sptk.few 2b
177 4:
178 srlz.i // srlz.i implies srlz.d
179 ;;
181 // Now purge addresses formerly mapped by TR registers
182 // 1. Purge ITR&DTR for kernel.
183 movl r16=KERNEL_START
184 mov r18=KERNEL_TR_PAGE_SHIFT<<2
185 ;;
186 ptr.i r16, r18
187 ptr.d r16, r18
188 ;;
189 srlz.i
190 ;;
191 srlz.d
192 ;;
193 // 2. Purge DTR for PERCPU data.
194 movl r16=PERCPU_ADDR
195 mov r18=PERCPU_PAGE_SHIFT<<2
196 ;;
197 ptr.d r16,r18
198 ;;
199 srlz.d
200 ;;
201 // 3. Purge ITR for PAL code.
202 GET_THIS_PADDR(r2, ia64_mca_pal_base)
203 ;;
204 ld8 r16=[r2]
205 mov r18=IA64_GRANULE_SHIFT<<2
206 ;;
207 ptr.i r16,r18
208 ;;
209 srlz.i
210 ;;
211 // 4. Purge DTR for stack.
212 mov r16=IA64_KR(CURRENT_STACK)
213 ;;
214 shl r16=r16,IA64_GRANULE_SHIFT
215 movl r19=PAGE_OFFSET
216 ;;
217 add r16=r19,r16
218 mov r18=IA64_GRANULE_SHIFT<<2
219 ;;
220 ptr.d r16,r18
221 ;;
222 srlz.i
223 ;;
224 // Now branch away to caller.
225 br.sptk.many b1
226 ;;
228 ia64_os_mca_dispatch:
230 // Serialize all MCA processing
231 mov r3=1;;
232 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
233 ia64_os_mca_spin:
234 xchg8 r4=[r2],r3;;
235 cmp.ne p6,p0=r4,r0
236 (p6) br ia64_os_mca_spin
238 // Save the SAL to OS MCA handoff state as defined
239 // by SAL SPEC 3.0
240 // NOTE : The order in which the state gets saved
241 // is dependent on the way the C-structure
242 // for ia64_mca_sal_to_os_state_t has been
243 // defined in include/asm/mca.h
244 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
245 ;;
247 // LOG PROCESSOR STATE INFO FROM HERE ON..
248 begin_os_mca_dump:
249 br ia64_os_mca_proc_state_dump;;
251 ia64_os_mca_done_dump:
253 LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
254 ;;
255 ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
256 ;;
257 tbit.nz p6,p7=r18,60
258 (p7) br.spnt done_tlb_purge_and_reload
260 // The following code purges TC and TR entries. Then reload all TC entries.
261 // Purge percpu data TC entries.
262 begin_tlb_purge_and_reload:
263 movl r18=ia64_reload_tr;;
264 LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
265 mov b1=r18;;
266 br.sptk.many ia64_do_tlb_purge;;
268 ia64_reload_tr:
269 // Finally reload the TR registers.
270 // 1. Reload DTR/ITR registers for kernel.
271 mov r18=KERNEL_TR_PAGE_SHIFT<<2
272 movl r17=KERNEL_START
273 ;;
274 mov cr.itir=r18
275 mov cr.ifa=r17
276 mov r16=IA64_TR_KERNEL
277 mov r19=ip
278 movl r18=PAGE_KERNEL
279 ;;
280 dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
281 ;;
282 or r18=r17,r18
283 ;;
284 itr.i itr[r16]=r18
285 ;;
286 itr.d dtr[r16]=r18
287 ;;
288 srlz.i
289 srlz.d
290 ;;
291 // 2. Reload DTR register for PERCPU data.
292 GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
293 ;;
294 movl r16=PERCPU_ADDR // vaddr
295 movl r18=PERCPU_PAGE_SHIFT<<2
296 ;;
297 mov cr.itir=r18
298 mov cr.ifa=r16
299 ;;
300 ld8 r18=[r2] // load per-CPU PTE
301 mov r16=IA64_TR_PERCPU_DATA;
302 ;;
303 itr.d dtr[r16]=r18
304 ;;
305 srlz.d
306 ;;
307 // 3. Reload ITR for PAL code.
308 GET_THIS_PADDR(r2, ia64_mca_pal_pte)
309 ;;
310 ld8 r18=[r2] // load PAL PTE
311 ;;
312 GET_THIS_PADDR(r2, ia64_mca_pal_base)
313 ;;
314 ld8 r16=[r2] // load PAL vaddr
315 mov r19=IA64_GRANULE_SHIFT<<2
316 ;;
317 mov cr.itir=r19
318 mov cr.ifa=r16
319 mov r20=IA64_TR_PALCODE
320 ;;
321 itr.i itr[r20]=r18
322 ;;
323 srlz.i
324 ;;
325 // 4. Reload DTR for stack.
326 mov r16=IA64_KR(CURRENT_STACK)
327 ;;
328 shl r16=r16,IA64_GRANULE_SHIFT
329 movl r19=PAGE_OFFSET
330 ;;
331 add r18=r19,r16
332 movl r20=PAGE_KERNEL
333 ;;
334 add r16=r20,r16
335 mov r19=IA64_GRANULE_SHIFT<<2
336 ;;
337 mov cr.itir=r19
338 mov cr.ifa=r18
339 mov r20=IA64_TR_CURRENT_STACK
340 ;;
341 itr.d dtr[r20]=r16
342 ;;
343 srlz.d
344 ;;
345 br.sptk.many done_tlb_purge_and_reload
346 err:
347 COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
348 br.sptk.many ia64_os_mca_done_restore
350 done_tlb_purge_and_reload:
352 // Setup new stack frame for OS_MCA handling
353 GET_IA64_MCA_DATA(r2)
354 ;;
355 add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
356 add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
357 ;;
358 rse_switch_context(r6,r3,r2);; // RSC management in this new context
360 GET_IA64_MCA_DATA(r2)
361 ;;
362 add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
363 ;;
364 mov r12=r2 // establish new stack-pointer
366 // Enter virtual mode from physical mode
367 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
368 ia64_os_mca_virtual_begin:
370 // Call virtual mode handler
371 movl r2=ia64_mca_ucmc_handler;;
372 mov b6=r2;;
373 br.call.sptk.many b0=b6;;
374 .ret0:
375 // Revert back to physical mode before going back to SAL
376 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
377 ia64_os_mca_virtual_end:
379 // restore the original stack frame here
380 GET_IA64_MCA_DATA(r2)
381 ;;
382 add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
383 ;;
384 movl r4=IA64_PSR_MC
385 ;;
386 rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
388 // let us restore all the registers from our PSI structure
389 mov r8=gp
390 ;;
391 begin_os_mca_restore:
392 br ia64_os_mca_proc_state_restore;;
394 ia64_os_mca_done_restore:
395 OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
396 // branch back to SALE_CHECK
397 ld8 r3=[r2];;
398 mov b0=r3;; // SAL_CHECK return address
400 // release lock
401 movl r3=ia64_mca_serialize;;
402 DATA_VA_TO_PA(r3);;
403 st8.rel [r3]=r0
405 br b0
406 ;;
407 ia64_os_mca_dispatch_end:
408 //EndMain//////////////////////////////////////////////////////////////////////
411 //++
412 // Name:
413 // ia64_os_mca_proc_state_dump()
414 //
415 // Stub Description:
416 //
417 // This stub dumps the processor state during MCHK to a data area
418 //
419 //--
421 ia64_os_mca_proc_state_dump:
422 // Save bank 1 GRs 16-31 which will be used by c-language code when we switch
423 // to virtual addressing mode.
424 GET_IA64_MCA_DATA(r2)
425 ;;
426 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
427 ;;
428 // save ar.NaT
429 mov r5=ar.unat // ar.unat
431 // save banked GRs 16-31 along with NaT bits
432 bsw.1;;
433 st8.spill [r2]=r16,8;;
434 st8.spill [r2]=r17,8;;
435 st8.spill [r2]=r18,8;;
436 st8.spill [r2]=r19,8;;
437 st8.spill [r2]=r20,8;;
438 st8.spill [r2]=r21,8;;
439 st8.spill [r2]=r22,8;;
440 st8.spill [r2]=r23,8;;
441 st8.spill [r2]=r24,8;;
442 st8.spill [r2]=r25,8;;
443 st8.spill [r2]=r26,8;;
444 st8.spill [r2]=r27,8;;
445 st8.spill [r2]=r28,8;;
446 st8.spill [r2]=r29,8;;
447 st8.spill [r2]=r30,8;;
448 st8.spill [r2]=r31,8;;
450 mov r4=ar.unat;;
451 st8 [r2]=r4,8 // save User NaT bits for r16-r31
452 mov ar.unat=r5 // restore original unat
453 bsw.0;;
455 //save BRs
456 add r4=8,r2 // duplicate r2 in r4
457 add r6=2*8,r2 // duplicate r2 in r4
459 mov r3=b0
460 mov r5=b1
461 mov r7=b2;;
462 st8 [r2]=r3,3*8
463 st8 [r4]=r5,3*8
464 st8 [r6]=r7,3*8;;
466 mov r3=b3
467 mov r5=b4
468 mov r7=b5;;
469 st8 [r2]=r3,3*8
470 st8 [r4]=r5,3*8
471 st8 [r6]=r7,3*8;;
473 mov r3=b6
474 mov r5=b7;;
475 st8 [r2]=r3,2*8
476 st8 [r4]=r5,2*8;;
478 cSaveCRs:
479 // save CRs
480 add r4=8,r2 // duplicate r2 in r4
481 add r6=2*8,r2 // duplicate r2 in r4
483 mov r3=cr.dcr
484 mov r5=cr.itm
485 mov r7=cr.iva;;
487 st8 [r2]=r3,8*8
488 st8 [r4]=r5,3*8
489 st8 [r6]=r7,3*8;; // 48 byte rements
491 mov r3=cr.pta;;
492 st8 [r2]=r3,8*8;; // 64 byte rements
494 // if PSR.ic=0, reading interruption registers causes an illegal operation fault
495 mov r3=psr;;
496 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
497 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
498 begin_skip_intr_regs:
499 (p6) br SkipIntrRegs;;
501 add r4=8,r2 // duplicate r2 in r4
502 add r6=2*8,r2 // duplicate r2 in r6
504 mov r3=cr.ipsr
505 mov r5=cr.isr
506 mov r7=r0;;
507 st8 [r2]=r3,3*8
508 st8 [r4]=r5,3*8
509 st8 [r6]=r7,3*8;;
511 mov r3=cr.iip
512 mov r5=cr.ifa
513 mov r7=cr.itir;;
514 st8 [r2]=r3,3*8
515 st8 [r4]=r5,3*8
516 st8 [r6]=r7,3*8;;
518 mov r3=cr.iipa
519 mov r5=cr.ifs
520 mov r7=cr.iim;;
521 st8 [r2]=r3,3*8
522 st8 [r4]=r5,3*8
523 st8 [r6]=r7,3*8;;
525 mov r3=cr25;; // cr.iha
526 st8 [r2]=r3,160;; // 160 byte rement
528 SkipIntrRegs:
529 st8 [r2]=r0,152;; // another 152 byte .
531 add r4=8,r2 // duplicate r2 in r4
532 add r6=2*8,r2 // duplicate r2 in r6
534 mov r3=cr.lid
535 // mov r5=cr.ivr // cr.ivr, don't read it
536 mov r7=cr.tpr;;
537 st8 [r2]=r3,3*8
538 st8 [r4]=r5,3*8
539 st8 [r6]=r7,3*8;;
541 mov r3=r0 // cr.eoi => cr67
542 mov r5=r0 // cr.irr0 => cr68
543 mov r7=r0;; // cr.irr1 => cr69
544 st8 [r2]=r3,3*8
545 st8 [r4]=r5,3*8
546 st8 [r6]=r7,3*8;;
548 mov r3=r0 // cr.irr2 => cr70
549 mov r5=r0 // cr.irr3 => cr71
550 mov r7=cr.itv;;
551 st8 [r2]=r3,3*8
552 st8 [r4]=r5,3*8
553 st8 [r6]=r7,3*8;;
555 mov r3=cr.pmv
556 mov r5=cr.cmcv;;
557 st8 [r2]=r3,7*8
558 st8 [r4]=r5,7*8;;
560 mov r3=r0 // cr.lrr0 => cr80
561 mov r5=r0;; // cr.lrr1 => cr81
562 st8 [r2]=r3,23*8
563 st8 [r4]=r5,23*8;;
565 adds r2=25*8,r2;;
567 cSaveARs:
568 // save ARs
569 add r4=8,r2 // duplicate r2 in r4
570 add r6=2*8,r2 // duplicate r2 in r6
572 mov r3=ar.k0
573 mov r5=ar.k1
574 mov r7=ar.k2;;
575 st8 [r2]=r3,3*8
576 st8 [r4]=r5,3*8
577 st8 [r6]=r7,3*8;;
579 mov r3=ar.k3
580 mov r5=ar.k4
581 mov r7=ar.k5;;
582 st8 [r2]=r3,3*8
583 st8 [r4]=r5,3*8
584 st8 [r6]=r7,3*8;;
586 mov r3=ar.k6
587 mov r5=ar.k7
588 mov r7=r0;; // ar.kr8
589 st8 [r2]=r3,10*8
590 st8 [r4]=r5,10*8
591 st8 [r6]=r7,10*8;; // rement by 72 bytes
593 mov r3=ar.rsc
594 mov ar.rsc=r0 // put RSE in enforced lazy mode
595 mov r5=ar.bsp
596 ;;
597 mov r7=ar.bspstore;;
598 st8 [r2]=r3,3*8
599 st8 [r4]=r5,3*8
600 st8 [r6]=r7,3*8;;
602 mov r3=ar.rnat;;
603 st8 [r2]=r3,8*13 // increment by 13x8 bytes
605 mov r3=ar.ccv;;
606 st8 [r2]=r3,8*4
608 mov r3=ar.unat;;
609 st8 [r2]=r3,8*4
611 mov r3=ar.fpsr;;
612 st8 [r2]=r3,8*4
614 mov r3=ar.itc;;
615 st8 [r2]=r3,160 // 160
617 mov r3=ar.pfs;;
618 st8 [r2]=r3,8
620 mov r3=ar.lc;;
621 st8 [r2]=r3,8
623 mov r3=ar.ec;;
624 st8 [r2]=r3
625 add r2=8*62,r2 //padding
627 // save RRs
628 mov ar.lc=0x08-1
629 movl r4=0x00;;
631 cStRR:
632 dep.z r5=r4,61,3;;
633 mov r3=rr[r5];;
634 st8 [r2]=r3,8
635 add r4=1,r4
636 br.cloop.sptk.few cStRR
637 ;;
638 end_os_mca_dump:
639 br ia64_os_mca_done_dump;;
641 //EndStub//////////////////////////////////////////////////////////////////////
644 //++
645 // Name:
646 // ia64_os_mca_proc_state_restore()
647 //
648 // Stub Description:
649 //
650 // This is a stub to restore the saved processor state during MCHK
651 //
652 //--
654 ia64_os_mca_proc_state_restore:
656 // Restore bank1 GR16-31
657 GET_IA64_MCA_DATA(r2)
658 ;;
659 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
661 restore_GRs: // restore bank-1 GRs 16-31
662 bsw.1;;
663 add r3=16*8,r2;; // to get to NaT of GR 16-31
664 ld8 r3=[r3];;
665 mov ar.unat=r3;; // first restore NaT
667 ld8.fill r16=[r2],8;;
668 ld8.fill r17=[r2],8;;
669 ld8.fill r18=[r2],8;;
670 ld8.fill r19=[r2],8;;
671 ld8.fill r20=[r2],8;;
672 ld8.fill r21=[r2],8;;
673 ld8.fill r22=[r2],8;;
674 ld8.fill r23=[r2],8;;
675 ld8.fill r24=[r2],8;;
676 ld8.fill r25=[r2],8;;
677 ld8.fill r26=[r2],8;;
678 ld8.fill r27=[r2],8;;
679 ld8.fill r28=[r2],8;;
680 ld8.fill r29=[r2],8;;
681 ld8.fill r30=[r2],8;;
682 ld8.fill r31=[r2],8;;
684 ld8 r3=[r2],8;; // increment to skip NaT
685 bsw.0;;
687 restore_BRs:
688 add r4=8,r2 // duplicate r2 in r4
689 add r6=2*8,r2;; // duplicate r2 in r4
691 ld8 r3=[r2],3*8
692 ld8 r5=[r4],3*8
693 ld8 r7=[r6],3*8;;
694 mov b0=r3
695 mov b1=r5
696 mov b2=r7;;
698 ld8 r3=[r2],3*8
699 ld8 r5=[r4],3*8
700 ld8 r7=[r6],3*8;;
701 mov b3=r3
702 mov b4=r5
703 mov b5=r7;;
705 ld8 r3=[r2],2*8
706 ld8 r5=[r4],2*8;;
707 mov b6=r3
708 mov b7=r5;;
710 restore_CRs:
711 add r4=8,r2 // duplicate r2 in r4
712 add r6=2*8,r2;; // duplicate r2 in r4
714 ld8 r3=[r2],8*8
715 ld8 r5=[r4],3*8
716 ld8 r7=[r6],3*8;; // 48 byte increments
717 mov cr.dcr=r3
718 mov cr.itm=r5
719 mov cr.iva=r7;;
721 ld8 r3=[r2],8*8;; // 64 byte increments
722 // mov cr.pta=r3
725 // if PSR.ic=1, reading interruption registers causes an illegal operation fault
726 mov r3=psr;;
727 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
728 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
730 begin_rskip_intr_regs:
731 (p6) br rSkipIntrRegs;;
733 add r4=8,r2 // duplicate r2 in r4
734 add r6=2*8,r2;; // duplicate r2 in r4
736 ld8 r3=[r2],3*8
737 ld8 r5=[r4],3*8
738 ld8 r7=[r6],3*8;;
739 mov cr.ipsr=r3
740 // mov cr.isr=r5 // cr.isr is read only
742 ld8 r3=[r2],3*8
743 ld8 r5=[r4],3*8
744 ld8 r7=[r6],3*8;;
745 mov cr.iip=r3
746 mov cr.ifa=r5
747 mov cr.itir=r7;;
749 ld8 r3=[r2],3*8
750 ld8 r5=[r4],3*8
751 ld8 r7=[r6],3*8;;
752 mov cr.iipa=r3
753 mov cr.ifs=r5
754 mov cr.iim=r7
756 ld8 r3=[r2],160;; // 160 byte increment
757 mov cr.iha=r3
759 rSkipIntrRegs:
760 ld8 r3=[r2],152;; // another 152 byte inc.
762 add r4=8,r2 // duplicate r2 in r4
763 add r6=2*8,r2;; // duplicate r2 in r6
765 ld8 r3=[r2],8*3
766 ld8 r5=[r4],8*3
767 ld8 r7=[r6],8*3;;
768 mov cr.lid=r3
769 // mov cr.ivr=r5 // cr.ivr is read only
770 mov cr.tpr=r7;;
772 ld8 r3=[r2],8*3
773 ld8 r5=[r4],8*3
774 ld8 r7=[r6],8*3;;
775 // mov cr.eoi=r3
776 // mov cr.irr0=r5 // cr.irr0 is read only
777 // mov cr.irr1=r7;; // cr.irr1 is read only
779 ld8 r3=[r2],8*3
780 ld8 r5=[r4],8*3
781 ld8 r7=[r6],8*3;;
782 // mov cr.irr2=r3 // cr.irr2 is read only
783 // mov cr.irr3=r5 // cr.irr3 is read only
784 mov cr.itv=r7;;
786 ld8 r3=[r2],8*7
787 ld8 r5=[r4],8*7;;
788 mov cr.pmv=r3
789 mov cr.cmcv=r5;;
791 ld8 r3=[r2],8*23
792 ld8 r5=[r4],8*23;;
793 adds r2=8*23,r2
794 adds r4=8*23,r4;;
795 // mov cr.lrr0=r3
796 // mov cr.lrr1=r5
798 adds r2=8*2,r2;;
800 restore_ARs:
801 add r4=8,r2 // duplicate r2 in r4
802 add r6=2*8,r2;; // duplicate r2 in r4
804 ld8 r3=[r2],3*8
805 ld8 r5=[r4],3*8
806 ld8 r7=[r6],3*8;;
807 mov ar.k0=r3
808 mov ar.k1=r5
809 mov ar.k2=r7;;
811 ld8 r3=[r2],3*8
812 ld8 r5=[r4],3*8
813 ld8 r7=[r6],3*8;;
814 mov ar.k3=r3
815 mov ar.k4=r5
816 mov ar.k5=r7;;
818 ld8 r3=[r2],10*8
819 ld8 r5=[r4],10*8
820 ld8 r7=[r6],10*8;;
821 mov ar.k6=r3
822 mov ar.k7=r5
823 ;;
825 ld8 r3=[r2],3*8
826 ld8 r5=[r4],3*8
827 ld8 r7=[r6],3*8;;
828 // mov ar.rsc=r3
829 // mov ar.bsp=r5 // ar.bsp is read only
830 mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode
831 ;;
832 mov ar.bspstore=r7;;
834 ld8 r9=[r2],8*13;;
835 mov ar.rnat=r9
837 mov ar.rsc=r3
838 ld8 r3=[r2],8*4;;
839 mov ar.ccv=r3
841 ld8 r3=[r2],8*4;;
842 mov ar.unat=r3
844 ld8 r3=[r2],8*4;;
845 mov ar.fpsr=r3
847 ld8 r3=[r2],160;; // 160
848 // mov ar.itc=r3
850 ld8 r3=[r2],8;;
851 mov ar.pfs=r3
853 ld8 r3=[r2],8;;
854 mov ar.lc=r3
856 ld8 r3=[r2];;
857 mov ar.ec=r3
858 add r2=8*62,r2;; // padding
860 restore_RRs:
861 mov r5=ar.lc
862 mov ar.lc=0x08-1
863 movl r4=0x00;;
864 cStRRr:
865 dep.z r7=r4,61,3
866 ld8 r3=[r2],8;;
867 mov rr[r7]=r3 // what are its access previledges?
868 add r4=1,r4
869 br.cloop.sptk.few cStRRr
870 ;;
871 mov ar.lc=r5
872 ;;
873 end_os_mca_restore:
874 br ia64_os_mca_done_restore;;
876 //EndStub//////////////////////////////////////////////////////////////////////
877 #else
878 ia64_os_mca_dispatch:
879 1:
880 br.sptk 1b
881 ia64_os_mca_dispatch_end:
882 #endif /* !XEN */
885 // ok, the issue here is that we need to save state information so
886 // it can be useable by the kernel debugger and show regs routines.
887 // In order to do this, our best bet is save the current state (plus
888 // the state information obtain from the MIN_STATE_AREA) into a pt_regs
889 // format. This way we can pass it on in a useable format.
890 //
892 //
893 // SAL to OS entry point for INIT on the monarch processor
894 // This has been defined for registration purposes with SAL
895 // as a part of ia64_mca_init.
896 //
897 // When we get here, the following registers have been
898 // set by the SAL for our use
899 //
900 // 1. GR1 = OS INIT GP
901 // 2. GR8 = PAL_PROC physical address
902 // 3. GR9 = SAL_PROC physical address
903 // 4. GR10 = SAL GP (physical)
904 // 5. GR11 = Init Reason
905 // 0 = Received INIT for event other than crash dump switch
906 // 1 = Received wakeup at the end of an OS_MCA corrected machine check
907 // 2 = Received INIT dude to CrashDump switch assertion
908 //
909 // 6. GR12 = Return address to location within SAL_INIT procedure
912 GLOBAL_ENTRY(ia64_monarch_init_handler)
913 .prologue
914 // stash the information the SAL passed to os
915 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
916 ;;
917 SAVE_MIN_WITH_COVER
918 ;;
919 mov r8=cr.ifa
920 mov r9=cr.isr
921 adds r3=8,r2 // set up second base pointer
922 ;;
923 SAVE_REST
925 // ok, enough should be saved at this point to be dangerous, and supply
926 // information for a dump
927 // We need to switch to Virtual mode before hitting the C functions.
929 movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
930 mov r3=psr // get the current psr, minimum enabled at this point
931 ;;
932 or r2=r2,r3
933 ;;
934 movl r3=IVirtual_Switch
935 ;;
936 mov cr.iip=r3 // short return to set the appropriate bits
937 mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
938 ;;
939 rfi
940 ;;
941 IVirtual_Switch:
942 //
943 // We should now be running virtual
944 //
945 // Let's call the C handler to get the rest of the state info
946 //
947 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
948 ;;
949 adds out0=16,sp // out0 = pointer to pt_regs
950 ;;
951 DO_SAVE_SWITCH_STACK
952 .body
953 adds out1=16,sp // out0 = pointer to switch_stack
955 br.call.sptk.many rp=ia64_init_handler
956 .ret1:
958 return_from_init:
959 br.sptk return_from_init
960 END(ia64_monarch_init_handler)
962 //
963 // SAL to OS entry point for INIT on the slave processor
964 // This has been defined for registration purposes with SAL
965 // as a part of ia64_mca_init.
966 //
968 GLOBAL_ENTRY(ia64_slave_init_handler)
969 1: br.sptk 1b
970 END(ia64_slave_init_handler)