ia64/linux-2.6.18-xen.hg

view arch/sparc64/kernel/entry.S @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 3e8752eb6d9c
children
line source
1 /* $Id: entry.S,v 1.144 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
10 #include <linux/errno.h>
12 #include <asm/head.h>
13 #include <asm/asi.h>
14 #include <asm/smp.h>
15 #include <asm/ptrace.h>
16 #include <asm/page.h>
17 #include <asm/signal.h>
18 #include <asm/pgtable.h>
19 #include <asm/processor.h>
20 #include <asm/visasm.h>
21 #include <asm/estate.h>
22 #include <asm/auxio.h>
23 #include <asm/sfafsr.h>
24 #include <asm/pil.h>
25 #include <asm/unistd.h>
27 #define curptr g6
29 .text
30 .align 32
32 /* This is trivial with the new code... */
33 .globl do_fpdis
34 do_fpdis:
35 sethi %hi(TSTATE_PEF), %g4
36 rdpr %tstate, %g5
37 andcc %g5, %g4, %g0
38 be,pt %xcc, 1f
39 nop
40 rd %fprs, %g5
41 andcc %g5, FPRS_FEF, %g0
42 be,pt %xcc, 1f
43 nop
45 /* Legal state when DCR_IFPOE is set in Cheetah %dcr. */
46 sethi %hi(109f), %g7
47 ba,pt %xcc, etrap
48 109: or %g7, %lo(109b), %g7
49 add %g0, %g0, %g0
50 ba,a,pt %xcc, rtrap_clr_l6
52 1: TRAP_LOAD_THREAD_REG(%g6, %g1)
53 ldub [%g6 + TI_FPSAVED], %g5
54 wr %g0, FPRS_FEF, %fprs
55 andcc %g5, FPRS_FEF, %g0
56 be,a,pt %icc, 1f
57 clr %g7
58 ldx [%g6 + TI_GSR], %g7
59 1: andcc %g5, FPRS_DL, %g0
60 bne,pn %icc, 2f
61 fzero %f0
62 andcc %g5, FPRS_DU, %g0
63 bne,pn %icc, 1f
64 fzero %f2
65 faddd %f0, %f2, %f4
66 fmuld %f0, %f2, %f6
67 faddd %f0, %f2, %f8
68 fmuld %f0, %f2, %f10
69 faddd %f0, %f2, %f12
70 fmuld %f0, %f2, %f14
71 faddd %f0, %f2, %f16
72 fmuld %f0, %f2, %f18
73 faddd %f0, %f2, %f20
74 fmuld %f0, %f2, %f22
75 faddd %f0, %f2, %f24
76 fmuld %f0, %f2, %f26
77 faddd %f0, %f2, %f28
78 fmuld %f0, %f2, %f30
79 faddd %f0, %f2, %f32
80 fmuld %f0, %f2, %f34
81 faddd %f0, %f2, %f36
82 fmuld %f0, %f2, %f38
83 faddd %f0, %f2, %f40
84 fmuld %f0, %f2, %f42
85 faddd %f0, %f2, %f44
86 fmuld %f0, %f2, %f46
87 faddd %f0, %f2, %f48
88 fmuld %f0, %f2, %f50
89 faddd %f0, %f2, %f52
90 fmuld %f0, %f2, %f54
91 faddd %f0, %f2, %f56
92 fmuld %f0, %f2, %f58
93 b,pt %xcc, fpdis_exit2
94 faddd %f0, %f2, %f60
95 1: mov SECONDARY_CONTEXT, %g3
96 add %g6, TI_FPREGS + 0x80, %g1
97 faddd %f0, %f2, %f4
98 fmuld %f0, %f2, %f6
100 661: ldxa [%g3] ASI_DMMU, %g5
101 .section .sun4v_1insn_patch, "ax"
102 .word 661b
103 ldxa [%g3] ASI_MMU, %g5
104 .previous
106 sethi %hi(sparc64_kern_sec_context), %g2
107 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
109 661: stxa %g2, [%g3] ASI_DMMU
110 .section .sun4v_1insn_patch, "ax"
111 .word 661b
112 stxa %g2, [%g3] ASI_MMU
113 .previous
115 membar #Sync
116 add %g6, TI_FPREGS + 0xc0, %g2
117 faddd %f0, %f2, %f8
118 fmuld %f0, %f2, %f10
119 membar #Sync
120 ldda [%g1] ASI_BLK_S, %f32
121 ldda [%g2] ASI_BLK_S, %f48
122 membar #Sync
123 faddd %f0, %f2, %f12
124 fmuld %f0, %f2, %f14
125 faddd %f0, %f2, %f16
126 fmuld %f0, %f2, %f18
127 faddd %f0, %f2, %f20
128 fmuld %f0, %f2, %f22
129 faddd %f0, %f2, %f24
130 fmuld %f0, %f2, %f26
131 faddd %f0, %f2, %f28
132 fmuld %f0, %f2, %f30
133 b,pt %xcc, fpdis_exit
134 nop
135 2: andcc %g5, FPRS_DU, %g0
136 bne,pt %icc, 3f
137 fzero %f32
138 mov SECONDARY_CONTEXT, %g3
139 fzero %f34
141 661: ldxa [%g3] ASI_DMMU, %g5
142 .section .sun4v_1insn_patch, "ax"
143 .word 661b
144 ldxa [%g3] ASI_MMU, %g5
145 .previous
147 add %g6, TI_FPREGS, %g1
148 sethi %hi(sparc64_kern_sec_context), %g2
149 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
151 661: stxa %g2, [%g3] ASI_DMMU
152 .section .sun4v_1insn_patch, "ax"
153 .word 661b
154 stxa %g2, [%g3] ASI_MMU
155 .previous
157 membar #Sync
158 add %g6, TI_FPREGS + 0x40, %g2
159 faddd %f32, %f34, %f36
160 fmuld %f32, %f34, %f38
161 membar #Sync
162 ldda [%g1] ASI_BLK_S, %f0
163 ldda [%g2] ASI_BLK_S, %f16
164 membar #Sync
165 faddd %f32, %f34, %f40
166 fmuld %f32, %f34, %f42
167 faddd %f32, %f34, %f44
168 fmuld %f32, %f34, %f46
169 faddd %f32, %f34, %f48
170 fmuld %f32, %f34, %f50
171 faddd %f32, %f34, %f52
172 fmuld %f32, %f34, %f54
173 faddd %f32, %f34, %f56
174 fmuld %f32, %f34, %f58
175 faddd %f32, %f34, %f60
176 fmuld %f32, %f34, %f62
177 ba,pt %xcc, fpdis_exit
178 nop
179 3: mov SECONDARY_CONTEXT, %g3
180 add %g6, TI_FPREGS, %g1
182 661: ldxa [%g3] ASI_DMMU, %g5
183 .section .sun4v_1insn_patch, "ax"
184 .word 661b
185 ldxa [%g3] ASI_MMU, %g5
186 .previous
188 sethi %hi(sparc64_kern_sec_context), %g2
189 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
191 661: stxa %g2, [%g3] ASI_DMMU
192 .section .sun4v_1insn_patch, "ax"
193 .word 661b
194 stxa %g2, [%g3] ASI_MMU
195 .previous
197 membar #Sync
198 mov 0x40, %g2
199 membar #Sync
200 ldda [%g1] ASI_BLK_S, %f0
201 ldda [%g1 + %g2] ASI_BLK_S, %f16
202 add %g1, 0x80, %g1
203 ldda [%g1] ASI_BLK_S, %f32
204 ldda [%g1 + %g2] ASI_BLK_S, %f48
205 membar #Sync
206 fpdis_exit:
208 661: stxa %g5, [%g3] ASI_DMMU
209 .section .sun4v_1insn_patch, "ax"
210 .word 661b
211 stxa %g5, [%g3] ASI_MMU
212 .previous
214 membar #Sync
215 fpdis_exit2:
216 wr %g7, 0, %gsr
217 ldx [%g6 + TI_XFSR], %fsr
218 rdpr %tstate, %g3
219 or %g3, %g4, %g3 ! anal...
220 wrpr %g3, %tstate
221 wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits
222 retry
224 .align 32
225 fp_other_bounce:
226 call do_fpother
227 add %sp, PTREGS_OFF, %o0
228 ba,pt %xcc, rtrap
229 clr %l6
231 .globl do_fpother_check_fitos
232 .align 32
233 do_fpother_check_fitos:
234 TRAP_LOAD_THREAD_REG(%g6, %g1)
235 sethi %hi(fp_other_bounce - 4), %g7
236 or %g7, %lo(fp_other_bounce - 4), %g7
238 /* NOTE: Need to preserve %g7 until we fully commit
239 * to the fitos fixup.
240 */
241 stx %fsr, [%g6 + TI_XFSR]
242 rdpr %tstate, %g3
243 andcc %g3, TSTATE_PRIV, %g0
244 bne,pn %xcc, do_fptrap_after_fsr
245 nop
246 ldx [%g6 + TI_XFSR], %g3
247 srlx %g3, 14, %g1
248 and %g1, 7, %g1
249 cmp %g1, 2 ! Unfinished FP-OP
250 bne,pn %xcc, do_fptrap_after_fsr
251 sethi %hi(1 << 23), %g1 ! Inexact
252 andcc %g3, %g1, %g0
253 bne,pn %xcc, do_fptrap_after_fsr
254 rdpr %tpc, %g1
255 lduwa [%g1] ASI_AIUP, %g3 ! This cannot ever fail
256 #define FITOS_MASK 0xc1f83fe0
257 #define FITOS_COMPARE 0x81a01880
258 sethi %hi(FITOS_MASK), %g1
259 or %g1, %lo(FITOS_MASK), %g1
260 and %g3, %g1, %g1
261 sethi %hi(FITOS_COMPARE), %g2
262 or %g2, %lo(FITOS_COMPARE), %g2
263 cmp %g1, %g2
264 bne,pn %xcc, do_fptrap_after_fsr
265 nop
266 std %f62, [%g6 + TI_FPREGS + (62 * 4)]
267 sethi %hi(fitos_table_1), %g1
268 and %g3, 0x1f, %g2
269 or %g1, %lo(fitos_table_1), %g1
270 sllx %g2, 2, %g2
271 jmpl %g1 + %g2, %g0
272 ba,pt %xcc, fitos_emul_continue
274 fitos_table_1:
275 fitod %f0, %f62
276 fitod %f1, %f62
277 fitod %f2, %f62
278 fitod %f3, %f62
279 fitod %f4, %f62
280 fitod %f5, %f62
281 fitod %f6, %f62
282 fitod %f7, %f62
283 fitod %f8, %f62
284 fitod %f9, %f62
285 fitod %f10, %f62
286 fitod %f11, %f62
287 fitod %f12, %f62
288 fitod %f13, %f62
289 fitod %f14, %f62
290 fitod %f15, %f62
291 fitod %f16, %f62
292 fitod %f17, %f62
293 fitod %f18, %f62
294 fitod %f19, %f62
295 fitod %f20, %f62
296 fitod %f21, %f62
297 fitod %f22, %f62
298 fitod %f23, %f62
299 fitod %f24, %f62
300 fitod %f25, %f62
301 fitod %f26, %f62
302 fitod %f27, %f62
303 fitod %f28, %f62
304 fitod %f29, %f62
305 fitod %f30, %f62
306 fitod %f31, %f62
308 fitos_emul_continue:
309 sethi %hi(fitos_table_2), %g1
310 srl %g3, 25, %g2
311 or %g1, %lo(fitos_table_2), %g1
312 and %g2, 0x1f, %g2
313 sllx %g2, 2, %g2
314 jmpl %g1 + %g2, %g0
315 ba,pt %xcc, fitos_emul_fini
317 fitos_table_2:
318 fdtos %f62, %f0
319 fdtos %f62, %f1
320 fdtos %f62, %f2
321 fdtos %f62, %f3
322 fdtos %f62, %f4
323 fdtos %f62, %f5
324 fdtos %f62, %f6
325 fdtos %f62, %f7
326 fdtos %f62, %f8
327 fdtos %f62, %f9
328 fdtos %f62, %f10
329 fdtos %f62, %f11
330 fdtos %f62, %f12
331 fdtos %f62, %f13
332 fdtos %f62, %f14
333 fdtos %f62, %f15
334 fdtos %f62, %f16
335 fdtos %f62, %f17
336 fdtos %f62, %f18
337 fdtos %f62, %f19
338 fdtos %f62, %f20
339 fdtos %f62, %f21
340 fdtos %f62, %f22
341 fdtos %f62, %f23
342 fdtos %f62, %f24
343 fdtos %f62, %f25
344 fdtos %f62, %f26
345 fdtos %f62, %f27
346 fdtos %f62, %f28
347 fdtos %f62, %f29
348 fdtos %f62, %f30
349 fdtos %f62, %f31
351 fitos_emul_fini:
352 ldd [%g6 + TI_FPREGS + (62 * 4)], %f62
353 done
355 .globl do_fptrap
356 .align 32
357 do_fptrap:
358 TRAP_LOAD_THREAD_REG(%g6, %g1)
359 stx %fsr, [%g6 + TI_XFSR]
360 do_fptrap_after_fsr:
361 ldub [%g6 + TI_FPSAVED], %g3
362 rd %fprs, %g1
363 or %g3, %g1, %g3
364 stb %g3, [%g6 + TI_FPSAVED]
365 rd %gsr, %g3
366 stx %g3, [%g6 + TI_GSR]
367 mov SECONDARY_CONTEXT, %g3
369 661: ldxa [%g3] ASI_DMMU, %g5
370 .section .sun4v_1insn_patch, "ax"
371 .word 661b
372 ldxa [%g3] ASI_MMU, %g5
373 .previous
375 sethi %hi(sparc64_kern_sec_context), %g2
376 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
378 661: stxa %g2, [%g3] ASI_DMMU
379 .section .sun4v_1insn_patch, "ax"
380 .word 661b
381 stxa %g2, [%g3] ASI_MMU
382 .previous
384 membar #Sync
385 add %g6, TI_FPREGS, %g2
386 andcc %g1, FPRS_DL, %g0
387 be,pn %icc, 4f
388 mov 0x40, %g3
389 stda %f0, [%g2] ASI_BLK_S
390 stda %f16, [%g2 + %g3] ASI_BLK_S
391 andcc %g1, FPRS_DU, %g0
392 be,pn %icc, 5f
393 4: add %g2, 128, %g2
394 stda %f32, [%g2] ASI_BLK_S
395 stda %f48, [%g2 + %g3] ASI_BLK_S
396 5: mov SECONDARY_CONTEXT, %g1
397 membar #Sync
399 661: stxa %g5, [%g1] ASI_DMMU
400 .section .sun4v_1insn_patch, "ax"
401 .word 661b
402 stxa %g5, [%g1] ASI_MMU
403 .previous
405 membar #Sync
406 ba,pt %xcc, etrap
407 wr %g0, 0, %fprs
409 /* The registers for cross calls will be:
410 *
411 * DATA 0: [low 32-bits] Address of function to call, jmp to this
412 * [high 32-bits] MMU Context Argument 0, place in %g5
413 * DATA 1: Address Argument 1, place in %g1
414 * DATA 2: Address Argument 2, place in %g7
415 *
416 * With this method we can do most of the cross-call tlb/cache
417 * flushing very quickly.
418 */
419 .text
420 .align 32
421 .globl do_ivec
422 do_ivec:
423 mov 0x40, %g3
424 ldxa [%g3 + %g0] ASI_INTR_R, %g3
425 sethi %hi(KERNBASE), %g4
426 cmp %g3, %g4
427 bgeu,pn %xcc, do_ivec_xcall
428 srlx %g3, 32, %g5
429 stxa %g0, [%g0] ASI_INTR_RECEIVE
430 membar #Sync
432 sethi %hi(ivector_table), %g2
433 sllx %g3, 3, %g3
434 or %g2, %lo(ivector_table), %g2
435 add %g2, %g3, %g3
437 TRAP_LOAD_IRQ_WORK(%g6, %g1)
439 lduw [%g6], %g5 /* g5 = irq_work(cpu) */
440 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
441 stw %g3, [%g6] /* irq_work(cpu) = bucket */
442 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
443 retry
444 do_ivec_xcall:
445 mov 0x50, %g1
446 ldxa [%g1 + %g0] ASI_INTR_R, %g1
447 srl %g3, 0, %g3
449 mov 0x60, %g7
450 ldxa [%g7 + %g0] ASI_INTR_R, %g7
451 stxa %g0, [%g0] ASI_INTR_RECEIVE
452 membar #Sync
453 ba,pt %xcc, 1f
454 nop
456 .align 32
457 1: jmpl %g3, %g0
458 nop
460 .globl getcc, setcc
461 getcc:
462 ldx [%o0 + PT_V9_TSTATE], %o1
463 srlx %o1, 32, %o1
464 and %o1, 0xf, %o1
465 retl
466 stx %o1, [%o0 + PT_V9_G1]
467 setcc:
468 ldx [%o0 + PT_V9_TSTATE], %o1
469 ldx [%o0 + PT_V9_G1], %o2
470 or %g0, %ulo(TSTATE_ICC), %o3
471 sllx %o3, 32, %o3
472 andn %o1, %o3, %o1
473 sllx %o2, 32, %o2
474 and %o2, %o3, %o2
475 or %o1, %o2, %o1
476 retl
477 stx %o1, [%o0 + PT_V9_TSTATE]
479 .globl utrap_trap
480 utrap_trap: /* %g3=handler,%g4=level */
481 TRAP_LOAD_THREAD_REG(%g6, %g1)
482 ldx [%g6 + TI_UTRAPS], %g1
483 brnz,pt %g1, invoke_utrap
484 nop
486 ba,pt %xcc, etrap
487 rd %pc, %g7
488 mov %l4, %o1
489 call bad_trap
490 add %sp, PTREGS_OFF, %o0
491 ba,pt %xcc, rtrap
492 clr %l6
494 invoke_utrap:
495 sllx %g3, 3, %g3
496 ldx [%g1 + %g3], %g1
497 save %sp, -128, %sp
498 rdpr %tstate, %l6
499 rdpr %cwp, %l7
500 andn %l6, TSTATE_CWP, %l6
501 wrpr %l6, %l7, %tstate
502 rdpr %tpc, %l6
503 rdpr %tnpc, %l7
504 wrpr %g1, 0, %tnpc
505 done
507 /* We need to carefully read the error status, ACK
508 * the errors, prevent recursive traps, and pass the
509 * information on to C code for logging.
510 *
511 * We pass the AFAR in as-is, and we encode the status
512 * information as described in asm-sparc64/sfafsr.h
513 */
514 .globl __spitfire_access_error
515 __spitfire_access_error:
516 /* Disable ESTATE error reporting so that we do not
517 * take recursive traps and RED state the processor.
518 */
519 stxa %g0, [%g0] ASI_ESTATE_ERROR_EN
520 membar #Sync
522 mov UDBE_UE, %g1
523 ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
525 /* __spitfire_cee_trap branches here with AFSR in %g4 and
526 * UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the
527 * ESTATE Error Enable register.
528 */
529 __spitfire_cee_trap_continue:
530 ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR
532 rdpr %tt, %g3
533 and %g3, 0x1ff, %g3 ! Paranoia
534 sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
535 or %g4, %g3, %g4
536 rdpr %tl, %g3
537 cmp %g3, 1
538 mov 1, %g3
539 bleu %xcc, 1f
540 sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
542 or %g4, %g3, %g4
544 /* Read in the UDB error register state, clearing the
545 * sticky error bits as-needed. We only clear them if
546 * the UE bit is set. Likewise, __spitfire_cee_trap
547 * below will only do so if the CE bit is set.
548 *
549 * NOTE: UltraSparc-I/II have high and low UDB error
550 * registers, corresponding to the two UDB units
551 * present on those chips. UltraSparc-IIi only
552 * has a single UDB, called "SDB" in the manual.
553 * For IIi the upper UDB register always reads
554 * as zero so for our purposes things will just
555 * work with the checks below.
556 */
557 1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3
558 and %g3, 0x3ff, %g7 ! Paranoia
559 sllx %g7, SFSTAT_UDBH_SHIFT, %g7
560 or %g4, %g7, %g4
561 andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
562 be,pn %xcc, 1f
563 nop
564 stxa %g3, [%g0] ASI_UDB_ERROR_W
565 membar #Sync
567 1: mov 0x18, %g3
568 ldxa [%g3] ASI_UDBL_ERROR_R, %g3
569 and %g3, 0x3ff, %g7 ! Paranoia
570 sllx %g7, SFSTAT_UDBL_SHIFT, %g7
571 or %g4, %g7, %g4
572 andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
573 be,pn %xcc, 1f
574 nop
575 mov 0x18, %g7
576 stxa %g3, [%g7] ASI_UDB_ERROR_W
577 membar #Sync
579 1: /* Ok, now that we've latched the error state,
580 * clear the sticky bits in the AFSR.
581 */
582 stxa %g4, [%g0] ASI_AFSR
583 membar #Sync
585 rdpr %tl, %g2
586 cmp %g2, 1
587 rdpr %pil, %g2
588 bleu,pt %xcc, 1f
589 wrpr %g0, 15, %pil
591 ba,pt %xcc, etraptl1
592 rd %pc, %g7
594 ba,pt %xcc, 2f
595 nop
597 1: ba,pt %xcc, etrap_irq
598 rd %pc, %g7
600 2: mov %l4, %o1
601 mov %l5, %o2
602 call spitfire_access_error
603 add %sp, PTREGS_OFF, %o0
604 ba,pt %xcc, rtrap
605 clr %l6
607 /* This is the trap handler entry point for ECC correctable
608 * errors. They are corrected, but we listen for the trap
609 * so that the event can be logged.
610 *
611 * Disrupting errors are either:
612 * 1) single-bit ECC errors during UDB reads to system
613 * memory
614 * 2) data parity errors during write-back events
615 *
616 * As far as I can make out from the manual, the CEE trap
617 * is only for correctable errors during memory read
618 * accesses by the front-end of the processor.
619 *
620 * The code below is only for trap level 1 CEE events,
621 * as it is the only situation where we can safely record
622 * and log. For trap level >1 we just clear the CE bit
623 * in the AFSR and return.
624 *
625 * This is just like __spiftire_access_error above, but it
626 * specifically handles correctable errors. If an
627 * uncorrectable error is indicated in the AFSR we
628 * will branch directly above to __spitfire_access_error
629 * to handle it instead. Uncorrectable therefore takes
630 * priority over correctable, and the error logging
631 * C code will notice this case by inspecting the
632 * trap type.
633 */
634 .globl __spitfire_cee_trap
635 __spitfire_cee_trap:
636 ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
637 mov 1, %g3
638 sllx %g3, SFAFSR_UE_SHIFT, %g3
639 andcc %g4, %g3, %g0 ! Check for UE
640 bne,pn %xcc, __spitfire_access_error
641 nop
643 /* Ok, in this case we only have a correctable error.
644 * Indicate we only wish to capture that state in register
645 * %g1, and we only disable CE error reporting unlike UE
646 * handling which disables all errors.
647 */
648 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3
649 andn %g3, ESTATE_ERR_CE, %g3
650 stxa %g3, [%g0] ASI_ESTATE_ERROR_EN
651 membar #Sync
653 /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
654 ba,pt %xcc, __spitfire_cee_trap_continue
655 mov UDBE_CE, %g1
657 .globl __spitfire_data_access_exception
658 .globl __spitfire_data_access_exception_tl1
659 __spitfire_data_access_exception_tl1:
660 rdpr %pstate, %g4
661 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
662 mov TLB_SFSR, %g3
663 mov DMMU_SFAR, %g5
664 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
665 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
666 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
667 membar #Sync
668 rdpr %tt, %g3
669 cmp %g3, 0x80 ! first win spill/fill trap
670 blu,pn %xcc, 1f
671 cmp %g3, 0xff ! last win spill/fill trap
672 bgu,pn %xcc, 1f
673 nop
674 ba,pt %xcc, winfix_dax
675 rdpr %tpc, %g3
676 1: sethi %hi(109f), %g7
677 ba,pt %xcc, etraptl1
678 109: or %g7, %lo(109b), %g7
679 mov %l4, %o1
680 mov %l5, %o2
681 call spitfire_data_access_exception_tl1
682 add %sp, PTREGS_OFF, %o0
683 ba,pt %xcc, rtrap
684 clr %l6
686 __spitfire_data_access_exception:
687 rdpr %pstate, %g4
688 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
689 mov TLB_SFSR, %g3
690 mov DMMU_SFAR, %g5
691 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
692 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
693 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
694 membar #Sync
695 sethi %hi(109f), %g7
696 ba,pt %xcc, etrap
697 109: or %g7, %lo(109b), %g7
698 mov %l4, %o1
699 mov %l5, %o2
700 call spitfire_data_access_exception
701 add %sp, PTREGS_OFF, %o0
702 ba,pt %xcc, rtrap
703 clr %l6
705 .globl __spitfire_insn_access_exception
706 .globl __spitfire_insn_access_exception_tl1
707 __spitfire_insn_access_exception_tl1:
708 rdpr %pstate, %g4
709 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
710 mov TLB_SFSR, %g3
711 ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
712 rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
713 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
714 membar #Sync
715 sethi %hi(109f), %g7
716 ba,pt %xcc, etraptl1
717 109: or %g7, %lo(109b), %g7
718 mov %l4, %o1
719 mov %l5, %o2
720 call spitfire_insn_access_exception_tl1
721 add %sp, PTREGS_OFF, %o0
722 ba,pt %xcc, rtrap
723 clr %l6
725 __spitfire_insn_access_exception:
726 rdpr %pstate, %g4
727 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
728 mov TLB_SFSR, %g3
729 ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
730 rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
731 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
732 membar #Sync
733 sethi %hi(109f), %g7
734 ba,pt %xcc, etrap
735 109: or %g7, %lo(109b), %g7
736 mov %l4, %o1
737 mov %l5, %o2
738 call spitfire_insn_access_exception
739 add %sp, PTREGS_OFF, %o0
740 ba,pt %xcc, rtrap
741 clr %l6
743 /* These get patched into the trap table at boot time
744 * once we know we have a cheetah processor.
745 */
746 .globl cheetah_fecc_trap_vector, cheetah_fecc_trap_vector_tl1
747 cheetah_fecc_trap_vector:
748 membar #Sync
749 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
750 andn %g1, DCU_DC | DCU_IC, %g1
751 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
752 membar #Sync
753 sethi %hi(cheetah_fast_ecc), %g2
754 jmpl %g2 + %lo(cheetah_fast_ecc), %g0
755 mov 0, %g1
756 cheetah_fecc_trap_vector_tl1:
757 membar #Sync
758 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
759 andn %g1, DCU_DC | DCU_IC, %g1
760 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
761 membar #Sync
762 sethi %hi(cheetah_fast_ecc), %g2
763 jmpl %g2 + %lo(cheetah_fast_ecc), %g0
764 mov 1, %g1
765 .globl cheetah_cee_trap_vector, cheetah_cee_trap_vector_tl1
766 cheetah_cee_trap_vector:
767 membar #Sync
768 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
769 andn %g1, DCU_IC, %g1
770 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
771 membar #Sync
772 sethi %hi(cheetah_cee), %g2
773 jmpl %g2 + %lo(cheetah_cee), %g0
774 mov 0, %g1
775 cheetah_cee_trap_vector_tl1:
776 membar #Sync
777 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
778 andn %g1, DCU_IC, %g1
779 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
780 membar #Sync
781 sethi %hi(cheetah_cee), %g2
782 jmpl %g2 + %lo(cheetah_cee), %g0
783 mov 1, %g1
784 .globl cheetah_deferred_trap_vector, cheetah_deferred_trap_vector_tl1
785 cheetah_deferred_trap_vector:
786 membar #Sync
787 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
788 andn %g1, DCU_DC | DCU_IC, %g1;
789 stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
790 membar #Sync;
791 sethi %hi(cheetah_deferred_trap), %g2
792 jmpl %g2 + %lo(cheetah_deferred_trap), %g0
793 mov 0, %g1
794 cheetah_deferred_trap_vector_tl1:
795 membar #Sync;
796 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
797 andn %g1, DCU_DC | DCU_IC, %g1;
798 stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
799 membar #Sync;
800 sethi %hi(cheetah_deferred_trap), %g2
801 jmpl %g2 + %lo(cheetah_deferred_trap), %g0
802 mov 1, %g1
804 /* Cheetah+ specific traps. These are for the new I/D cache parity
805 * error traps. The first argument to cheetah_plus_parity_handler
806 * is encoded as follows:
807 *
808 * Bit0: 0=dcache,1=icache
809 * Bit1: 0=recoverable,1=unrecoverable
810 */
811 .globl cheetah_plus_dcpe_trap_vector, cheetah_plus_dcpe_trap_vector_tl1
812 cheetah_plus_dcpe_trap_vector:
813 membar #Sync
814 sethi %hi(do_cheetah_plus_data_parity), %g7
815 jmpl %g7 + %lo(do_cheetah_plus_data_parity), %g0
816 nop
817 nop
818 nop
819 nop
820 nop
822 do_cheetah_plus_data_parity:
823 rdpr %pil, %g2
824 wrpr %g0, 15, %pil
825 ba,pt %xcc, etrap_irq
826 rd %pc, %g7
827 mov 0x0, %o0
828 call cheetah_plus_parity_error
829 add %sp, PTREGS_OFF, %o1
830 ba,a,pt %xcc, rtrap_irq
832 cheetah_plus_dcpe_trap_vector_tl1:
833 membar #Sync
834 wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
835 sethi %hi(do_dcpe_tl1), %g3
836 jmpl %g3 + %lo(do_dcpe_tl1), %g0
837 nop
838 nop
839 nop
840 nop
842 .globl cheetah_plus_icpe_trap_vector, cheetah_plus_icpe_trap_vector_tl1
843 cheetah_plus_icpe_trap_vector:
844 membar #Sync
845 sethi %hi(do_cheetah_plus_insn_parity), %g7
846 jmpl %g7 + %lo(do_cheetah_plus_insn_parity), %g0
847 nop
848 nop
849 nop
850 nop
851 nop
853 do_cheetah_plus_insn_parity:
854 rdpr %pil, %g2
855 wrpr %g0, 15, %pil
856 ba,pt %xcc, etrap_irq
857 rd %pc, %g7
858 mov 0x1, %o0
859 call cheetah_plus_parity_error
860 add %sp, PTREGS_OFF, %o1
861 ba,a,pt %xcc, rtrap_irq
863 cheetah_plus_icpe_trap_vector_tl1:
864 membar #Sync
865 wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
866 sethi %hi(do_icpe_tl1), %g3
867 jmpl %g3 + %lo(do_icpe_tl1), %g0
868 nop
869 nop
870 nop
871 nop
873 /* If we take one of these traps when tl >= 1, then we
874 * jump to interrupt globals. If some trap level above us
875 * was also using interrupt globals, we cannot recover.
876 * We may use all interrupt global registers except %g6.
877 */
878 .globl do_dcpe_tl1, do_icpe_tl1
879 do_dcpe_tl1:
880 rdpr %tl, %g1 ! Save original trap level
881 mov 1, %g2 ! Setup TSTATE checking loop
882 sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
883 1: wrpr %g2, %tl ! Set trap level to check
884 rdpr %tstate, %g4 ! Read TSTATE for this level
885 andcc %g4, %g3, %g0 ! Interrupt globals in use?
886 bne,a,pn %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable
887 wrpr %g1, %tl ! Restore original trap level
888 add %g2, 1, %g2 ! Next trap level
889 cmp %g2, %g1 ! Hit them all yet?
890 ble,pt %icc, 1b ! Not yet
891 nop
892 wrpr %g1, %tl ! Restore original trap level
893 do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
894 sethi %hi(dcache_parity_tl1_occurred), %g2
895 lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
896 add %g1, 1, %g1
897 stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
898 /* Reset D-cache parity */
899 sethi %hi(1 << 16), %g1 ! D-cache size
900 mov (1 << 5), %g2 ! D-cache line size
901 sub %g1, %g2, %g1 ! Move down 1 cacheline
902 1: srl %g1, 14, %g3 ! Compute UTAG
903 membar #Sync
904 stxa %g3, [%g1] ASI_DCACHE_UTAG
905 membar #Sync
906 sub %g2, 8, %g3 ! 64-bit data word within line
907 2: membar #Sync
908 stxa %g0, [%g1 + %g3] ASI_DCACHE_DATA
909 membar #Sync
910 subcc %g3, 8, %g3 ! Next 64-bit data word
911 bge,pt %icc, 2b
912 nop
913 subcc %g1, %g2, %g1 ! Next cacheline
914 bge,pt %icc, 1b
915 nop
916 ba,pt %xcc, dcpe_icpe_tl1_common
917 nop
919 do_dcpe_tl1_fatal:
920 sethi %hi(1f), %g7
921 ba,pt %xcc, etraptl1
922 1: or %g7, %lo(1b), %g7
923 mov 0x2, %o0
924 call cheetah_plus_parity_error
925 add %sp, PTREGS_OFF, %o1
926 ba,pt %xcc, rtrap
927 clr %l6
929 do_icpe_tl1:
930 rdpr %tl, %g1 ! Save original trap level
931 mov 1, %g2 ! Setup TSTATE checking loop
932 sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
933 1: wrpr %g2, %tl ! Set trap level to check
934 rdpr %tstate, %g4 ! Read TSTATE for this level
935 andcc %g4, %g3, %g0 ! Interrupt globals in use?
936 bne,a,pn %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable
937 wrpr %g1, %tl ! Restore original trap level
938 add %g2, 1, %g2 ! Next trap level
939 cmp %g2, %g1 ! Hit them all yet?
940 ble,pt %icc, 1b ! Not yet
941 nop
942 wrpr %g1, %tl ! Restore original trap level
943 do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
944 sethi %hi(icache_parity_tl1_occurred), %g2
945 lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1
946 add %g1, 1, %g1
947 stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
948 /* Flush I-cache */
949 sethi %hi(1 << 15), %g1 ! I-cache size
950 mov (1 << 5), %g2 ! I-cache line size
951 sub %g1, %g2, %g1
952 1: or %g1, (2 << 3), %g3
953 stxa %g0, [%g3] ASI_IC_TAG
954 membar #Sync
955 subcc %g1, %g2, %g1
956 bge,pt %icc, 1b
957 nop
958 ba,pt %xcc, dcpe_icpe_tl1_common
959 nop
961 do_icpe_tl1_fatal:
962 sethi %hi(1f), %g7
963 ba,pt %xcc, etraptl1
964 1: or %g7, %lo(1b), %g7
965 mov 0x3, %o0
966 call cheetah_plus_parity_error
967 add %sp, PTREGS_OFF, %o1
968 ba,pt %xcc, rtrap
969 clr %l6
971 dcpe_icpe_tl1_common:
972 /* Flush D-cache, re-enable D/I caches in DCU and finally
973 * retry the trapping instruction.
974 */
975 sethi %hi(1 << 16), %g1 ! D-cache size
976 mov (1 << 5), %g2 ! D-cache line size
977 sub %g1, %g2, %g1
978 1: stxa %g0, [%g1] ASI_DCACHE_TAG
979 membar #Sync
980 subcc %g1, %g2, %g1
981 bge,pt %icc, 1b
982 nop
983 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
984 or %g1, (DCU_DC | DCU_IC), %g1
985 stxa %g1, [%g0] ASI_DCU_CONTROL_REG
986 membar #Sync
987 retry
989 /* Capture I/D/E-cache state into per-cpu error scoreboard.
990 *
991 * %g1: (TL>=0) ? 1 : 0
992 * %g2: scratch
993 * %g3: scratch
994 * %g4: AFSR
995 * %g5: AFAR
996 * %g6: unused, will have current thread ptr after etrap
997 * %g7: scratch
998 */
999 __cheetah_log_error:
1000 /* Put "TL1" software bit into AFSR. */
1001 and %g1, 0x1, %g1
1002 sllx %g1, 63, %g2
1003 or %g4, %g2, %g4
1005 /* Get log entry pointer for this cpu at this trap level. */
1006 BRANCH_IF_JALAPENO(g2,g3,50f)
1007 ldxa [%g0] ASI_SAFARI_CONFIG, %g2
1008 srlx %g2, 17, %g2
1009 ba,pt %xcc, 60f
1010 and %g2, 0x3ff, %g2
1012 50: ldxa [%g0] ASI_JBUS_CONFIG, %g2
1013 srlx %g2, 17, %g2
1014 and %g2, 0x1f, %g2
1016 60: sllx %g2, 9, %g2
1017 sethi %hi(cheetah_error_log), %g3
1018 ldx [%g3 + %lo(cheetah_error_log)], %g3
1019 brz,pn %g3, 80f
1020 nop
1022 add %g3, %g2, %g3
1023 sllx %g1, 8, %g1
1024 add %g3, %g1, %g1
1026 /* %g1 holds pointer to the top of the logging scoreboard */
1027 ldx [%g1 + 0x0], %g7
1028 cmp %g7, -1
1029 bne,pn %xcc, 80f
1030 nop
1032 stx %g4, [%g1 + 0x0]
1033 stx %g5, [%g1 + 0x8]
1034 add %g1, 0x10, %g1
1036 /* %g1 now points to D-cache logging area */
1037 set 0x3ff8, %g2 /* DC_addr mask */
1038 and %g5, %g2, %g2 /* DC_addr bits of AFAR */
1039 srlx %g5, 12, %g3
1040 or %g3, 1, %g3 /* PHYS tag + valid */
1042 10: ldxa [%g2] ASI_DCACHE_TAG, %g7
1043 cmp %g3, %g7 /* TAG match? */
1044 bne,pt %xcc, 13f
1045 nop
1047 /* Yep, what we want, capture state. */
1048 stx %g2, [%g1 + 0x20]
1049 stx %g7, [%g1 + 0x28]
1051 /* A membar Sync is required before and after utag access. */
1052 membar #Sync
1053 ldxa [%g2] ASI_DCACHE_UTAG, %g7
1054 membar #Sync
1055 stx %g7, [%g1 + 0x30]
1056 ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7
1057 stx %g7, [%g1 + 0x38]
1058 clr %g3
1060 12: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7
1061 stx %g7, [%g1]
1062 add %g3, (1 << 5), %g3
1063 cmp %g3, (4 << 5)
1064 bl,pt %xcc, 12b
1065 add %g1, 0x8, %g1
1067 ba,pt %xcc, 20f
1068 add %g1, 0x20, %g1
1070 13: sethi %hi(1 << 14), %g7
1071 add %g2, %g7, %g2
1072 srlx %g2, 14, %g7
1073 cmp %g7, 4
1074 bl,pt %xcc, 10b
1075 nop
1077 add %g1, 0x40, %g1
1079 /* %g1 now points to I-cache logging area */
1080 20: set 0x1fe0, %g2 /* IC_addr mask */
1081 and %g5, %g2, %g2 /* IC_addr bits of AFAR */
1082 sllx %g2, 1, %g2 /* IC_addr[13:6]==VA[12:5] */
1083 srlx %g5, (13 - 8), %g3 /* Make PTAG */
1084 andn %g3, 0xff, %g3 /* Mask off undefined bits */
1086 21: ldxa [%g2] ASI_IC_TAG, %g7
1087 andn %g7, 0xff, %g7
1088 cmp %g3, %g7
1089 bne,pt %xcc, 23f
1090 nop
1092 /* Yep, what we want, capture state. */
1093 stx %g2, [%g1 + 0x40]
1094 stx %g7, [%g1 + 0x48]
1095 add %g2, (1 << 3), %g2
1096 ldxa [%g2] ASI_IC_TAG, %g7
1097 add %g2, (1 << 3), %g2
1098 stx %g7, [%g1 + 0x50]
1099 ldxa [%g2] ASI_IC_TAG, %g7
1100 add %g2, (1 << 3), %g2
1101 stx %g7, [%g1 + 0x60]
1102 ldxa [%g2] ASI_IC_TAG, %g7
1103 stx %g7, [%g1 + 0x68]
1104 sub %g2, (3 << 3), %g2
1105 ldxa [%g2] ASI_IC_STAG, %g7
1106 stx %g7, [%g1 + 0x58]
1107 clr %g3
1108 srlx %g2, 2, %g2
1110 22: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7
1111 stx %g7, [%g1]
1112 add %g3, (1 << 3), %g3
1113 cmp %g3, (8 << 3)
1114 bl,pt %xcc, 22b
1115 add %g1, 0x8, %g1
1117 ba,pt %xcc, 30f
1118 add %g1, 0x30, %g1
1120 23: sethi %hi(1 << 14), %g7
1121 add %g2, %g7, %g2
1122 srlx %g2, 14, %g7
1123 cmp %g7, 4
1124 bl,pt %xcc, 21b
1125 nop
1127 add %g1, 0x70, %g1
1129 /* %g1 now points to E-cache logging area */
1130 30: andn %g5, (32 - 1), %g2
1131 stx %g2, [%g1 + 0x20]
1132 ldxa [%g2] ASI_EC_TAG_DATA, %g7
1133 stx %g7, [%g1 + 0x28]
1134 ldxa [%g2] ASI_EC_R, %g0
1135 clr %g3
1137 31: ldxa [%g3] ASI_EC_DATA, %g7
1138 stx %g7, [%g1 + %g3]
1139 add %g3, 0x8, %g3
1140 cmp %g3, 0x20
1142 bl,pt %xcc, 31b
1143 nop
1144 80:
1145 rdpr %tt, %g2
1146 cmp %g2, 0x70
1147 be c_fast_ecc
1148 cmp %g2, 0x63
1149 be c_cee
1150 nop
1151 ba,pt %xcc, c_deferred
1153 /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
1154 * in the trap table. That code has done a memory barrier
1155 * and has disabled both the I-cache and D-cache in the DCU
1156 * control register. The I-cache is disabled so that we may
1157 * capture the corrupted cache line, and the D-cache is disabled
1158 * because corrupt data may have been placed there and we don't
1159 * want to reference it.
1161 * %g1 is one if this trap occurred at %tl >= 1.
1163 * Next, we turn off error reporting so that we don't recurse.
1164 */
1165 .globl cheetah_fast_ecc
1166 cheetah_fast_ecc:
1167 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
1168 andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
1169 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
1170 membar #Sync
1172 /* Fetch and clear AFSR/AFAR */
1173 ldxa [%g0] ASI_AFSR, %g4
1174 ldxa [%g0] ASI_AFAR, %g5
1175 stxa %g4, [%g0] ASI_AFSR
1176 membar #Sync
1178 ba,pt %xcc, __cheetah_log_error
1179 nop
1181 c_fast_ecc:
1182 rdpr %pil, %g2
1183 wrpr %g0, 15, %pil
1184 ba,pt %xcc, etrap_irq
1185 rd %pc, %g7
1186 mov %l4, %o1
1187 mov %l5, %o2
1188 call cheetah_fecc_handler
1189 add %sp, PTREGS_OFF, %o0
1190 ba,a,pt %xcc, rtrap_irq
1192 /* Our caller has disabled I-cache and performed membar Sync. */
1193 .globl cheetah_cee
1194 cheetah_cee:
1195 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
1196 andn %g2, ESTATE_ERROR_CEEN, %g2
1197 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
1198 membar #Sync
1200 /* Fetch and clear AFSR/AFAR */
1201 ldxa [%g0] ASI_AFSR, %g4
1202 ldxa [%g0] ASI_AFAR, %g5
1203 stxa %g4, [%g0] ASI_AFSR
1204 membar #Sync
1206 ba,pt %xcc, __cheetah_log_error
1207 nop
1209 c_cee:
1210 rdpr %pil, %g2
1211 wrpr %g0, 15, %pil
1212 ba,pt %xcc, etrap_irq
1213 rd %pc, %g7
1214 mov %l4, %o1
1215 mov %l5, %o2
1216 call cheetah_cee_handler
1217 add %sp, PTREGS_OFF, %o0
1218 ba,a,pt %xcc, rtrap_irq
1220 /* Our caller has disabled I-cache+D-cache and performed membar Sync. */
1221 .globl cheetah_deferred_trap
1222 cheetah_deferred_trap:
1223 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
1224 andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
1225 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
1226 membar #Sync
1228 /* Fetch and clear AFSR/AFAR */
1229 ldxa [%g0] ASI_AFSR, %g4
1230 ldxa [%g0] ASI_AFAR, %g5
1231 stxa %g4, [%g0] ASI_AFSR
1232 membar #Sync
1234 ba,pt %xcc, __cheetah_log_error
1235 nop
1237 c_deferred:
1238 rdpr %pil, %g2
1239 wrpr %g0, 15, %pil
1240 ba,pt %xcc, etrap_irq
1241 rd %pc, %g7
1242 mov %l4, %o1
1243 mov %l5, %o2
1244 call cheetah_deferred_handler
1245 add %sp, PTREGS_OFF, %o0
1246 ba,a,pt %xcc, rtrap_irq
1248 .globl __do_privact
1249 __do_privact:
1250 mov TLB_SFSR, %g3
1251 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
1252 membar #Sync
1253 sethi %hi(109f), %g7
1254 ba,pt %xcc, etrap
1255 109: or %g7, %lo(109b), %g7
1256 call do_privact
1257 add %sp, PTREGS_OFF, %o0
1258 ba,pt %xcc, rtrap
1259 clr %l6
1261 .globl do_mna
1262 do_mna:
1263 rdpr %tl, %g3
1264 cmp %g3, 1
1266 /* Setup %g4/%g5 now as they are used in the
1267 * winfixup code.
1268 */
1269 mov TLB_SFSR, %g3
1270 mov DMMU_SFAR, %g4
1271 ldxa [%g4] ASI_DMMU, %g4
1272 ldxa [%g3] ASI_DMMU, %g5
1273 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
1274 membar #Sync
1275 bgu,pn %icc, winfix_mna
1276 rdpr %tpc, %g3
1278 1: sethi %hi(109f), %g7
1279 ba,pt %xcc, etrap
1280 109: or %g7, %lo(109b), %g7
1281 mov %l4, %o1
1282 mov %l5, %o2
1283 call mem_address_unaligned
1284 add %sp, PTREGS_OFF, %o0
1285 ba,pt %xcc, rtrap
1286 clr %l6
1288 .globl do_lddfmna
1289 do_lddfmna:
1290 sethi %hi(109f), %g7
1291 mov TLB_SFSR, %g4
1292 ldxa [%g4] ASI_DMMU, %g5
1293 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
1294 membar #Sync
1295 mov DMMU_SFAR, %g4
1296 ldxa [%g4] ASI_DMMU, %g4
1297 ba,pt %xcc, etrap
1298 109: or %g7, %lo(109b), %g7
1299 mov %l4, %o1
1300 mov %l5, %o2
1301 call handle_lddfmna
1302 add %sp, PTREGS_OFF, %o0
1303 ba,pt %xcc, rtrap
1304 clr %l6
1306 .globl do_stdfmna
1307 do_stdfmna:
1308 sethi %hi(109f), %g7
1309 mov TLB_SFSR, %g4
1310 ldxa [%g4] ASI_DMMU, %g5
1311 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
1312 membar #Sync
1313 mov DMMU_SFAR, %g4
1314 ldxa [%g4] ASI_DMMU, %g4
1315 ba,pt %xcc, etrap
1316 109: or %g7, %lo(109b), %g7
1317 mov %l4, %o1
1318 mov %l5, %o2
1319 call handle_stdfmna
1320 add %sp, PTREGS_OFF, %o0
1321 ba,pt %xcc, rtrap
1322 clr %l6
1324 .globl breakpoint_trap
1325 breakpoint_trap:
1326 call sparc_breakpoint
1327 add %sp, PTREGS_OFF, %o0
1328 ba,pt %xcc, rtrap
1329 nop
1331 #if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
1332 defined(CONFIG_SOLARIS_EMUL_MODULE)
1333 /* SunOS uses syscall zero as the 'indirect syscall' it looks
1334 * like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
1335 * This is complete brain damage.
1336 */
1337 .globl sunos_indir
1338 sunos_indir:
1339 srl %o0, 0, %o0
1340 mov %o7, %l4
1341 cmp %o0, NR_SYSCALLS
1342 blu,a,pt %icc, 1f
1343 sll %o0, 0x2, %o0
1344 sethi %hi(sunos_nosys), %l6
1345 b,pt %xcc, 2f
1346 or %l6, %lo(sunos_nosys), %l6
1347 1: sethi %hi(sunos_sys_table), %l7
1348 or %l7, %lo(sunos_sys_table), %l7
1349 lduw [%l7 + %o0], %l6
1350 2: mov %o1, %o0
1351 mov %o2, %o1
1352 mov %o3, %o2
1353 mov %o4, %o3
1354 mov %o5, %o4
1355 call %l6
1356 mov %l4, %o7
1358 .globl sunos_getpid
1359 sunos_getpid:
1360 call sys_getppid
1361 nop
1362 call sys_getpid
1363 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
1364 b,pt %xcc, ret_sys_call
1365 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1367 /* SunOS getuid() returns uid in %o0 and euid in %o1 */
1368 .globl sunos_getuid
1369 sunos_getuid:
1370 call sys32_geteuid16
1371 nop
1372 call sys32_getuid16
1373 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
1374 b,pt %xcc, ret_sys_call
1375 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1377 /* SunOS getgid() returns gid in %o0 and egid in %o1 */
1378 .globl sunos_getgid
1379 sunos_getgid:
1380 call sys32_getegid16
1381 nop
1382 call sys32_getgid16
1383 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
1384 b,pt %xcc, ret_sys_call
1385 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1386 #endif
1388 /* SunOS's execv() call only specifies the argv argument, the
1389 * environment settings are the same as the calling processes.
1390 */
1391 .globl sunos_execv
1392 sys_execve:
1393 sethi %hi(sparc_execve), %g1
1394 ba,pt %xcc, execve_merge
1395 or %g1, %lo(sparc_execve), %g1
1396 #ifdef CONFIG_COMPAT
1397 .globl sys_execve
1398 sunos_execv:
1399 stx %g0, [%sp + PTREGS_OFF + PT_V9_I2]
1400 .globl sys32_execve
1401 sys32_execve:
1402 sethi %hi(sparc32_execve), %g1
1403 or %g1, %lo(sparc32_execve), %g1
1404 #endif
1405 execve_merge:
1406 flushw
1407 jmpl %g1, %g0
1408 add %sp, PTREGS_OFF, %o0
1410 .globl sys_pipe, sys_sigpause, sys_nis_syscall
1411 .globl sys_rt_sigreturn
1412 .globl sys_ptrace
1413 .globl sys_sigaltstack
1414 .align 32
1415 sys_pipe: ba,pt %xcc, sparc_pipe
1416 add %sp, PTREGS_OFF, %o0
1417 sys_nis_syscall:ba,pt %xcc, c_sys_nis_syscall
1418 add %sp, PTREGS_OFF, %o0
1419 sys_memory_ordering:
1420 ba,pt %xcc, sparc_memory_ordering
1421 add %sp, PTREGS_OFF, %o1
1422 sys_sigaltstack:ba,pt %xcc, do_sigaltstack
1423 add %i6, STACK_BIAS, %o2
1424 #ifdef CONFIG_COMPAT
1425 .globl sys32_sigstack
1426 sys32_sigstack: ba,pt %xcc, do_sys32_sigstack
1427 mov %i6, %o2
1428 .globl sys32_sigaltstack
1429 sys32_sigaltstack:
1430 ba,pt %xcc, do_sys32_sigaltstack
1431 mov %i6, %o2
1432 #endif
1433 .align 32
1434 #ifdef CONFIG_COMPAT
1435 .globl sys32_sigreturn
1436 sys32_sigreturn:
1437 add %sp, PTREGS_OFF, %o0
1438 call do_sigreturn32
1439 add %o7, 1f-.-4, %o7
1440 nop
1441 #endif
1442 sys_rt_sigreturn:
1443 add %sp, PTREGS_OFF, %o0
1444 call do_rt_sigreturn
1445 add %o7, 1f-.-4, %o7
1446 nop
1447 #ifdef CONFIG_COMPAT
1448 .globl sys32_rt_sigreturn
1449 sys32_rt_sigreturn:
1450 add %sp, PTREGS_OFF, %o0
1451 call do_rt_sigreturn32
1452 add %o7, 1f-.-4, %o7
1453 nop
1454 #endif
1455 sys_ptrace: add %sp, PTREGS_OFF, %o0
1456 call do_ptrace
1457 add %o7, 1f-.-4, %o7
1458 nop
1459 .align 32
1460 1: ldx [%curptr + TI_FLAGS], %l5
1461 andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
1462 be,pt %icc, rtrap
1463 clr %l6
1464 add %sp, PTREGS_OFF, %o0
1465 call syscall_trace
1466 mov 1, %o1
1468 ba,pt %xcc, rtrap
1469 clr %l6
1471 /* This is how fork() was meant to be done, 8 instruction entry.
1473 * I questioned the following code briefly, let me clear things
1474 * up so you must not reason on it like I did.
1476 * Know the fork_kpsr etc. we use in the sparc32 port? We don't
1477 * need it here because the only piece of window state we copy to
1478 * the child is the CWP register. Even if the parent sleeps,
1479 * we are safe because we stuck it into pt_regs of the parent
1480 * so it will not change.
1482 * XXX This raises the question, whether we can do the same on
1483 * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The
1484 * XXX answer is yes. We stick fork_kpsr in UREG_G0 and
1485 * XXX fork_kwim in UREG_G1 (global registers are considered
1486 * XXX volatile across a system call in the sparc ABI I think
1487 * XXX if it isn't we can use regs->y instead, anyone who depends
1488 * XXX upon the Y register being preserved across a fork deserves
1489 * XXX to lose).
1491 * In fact we should take advantage of that fact for other things
1492 * during system calls...
1493 */
1494 .globl sys_fork, sys_vfork, sys_clone, sparc_exit
1495 .globl ret_from_syscall
1496 .align 32
1497 sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */
1498 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
1499 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1500 ba,pt %xcc, sys_clone
1501 sys_fork: clr %o1
1502 mov SIGCHLD, %o0
1503 sys_clone: flushw
1504 movrz %o1, %fp, %o1
1505 mov 0, %o3
1506 ba,pt %xcc, sparc_do_fork
1507 add %sp, PTREGS_OFF, %o2
1508 ret_from_syscall:
1509 /* Clear current_thread_info()->new_child, and
1510 * check performance counter stuff too.
1511 */
1512 stb %g0, [%g6 + TI_NEW_CHILD]
1513 ldx [%g6 + TI_FLAGS], %l0
1514 call schedule_tail
1515 mov %g7, %o0
1516 andcc %l0, _TIF_PERFCTR, %g0
1517 be,pt %icc, 1f
1518 nop
1519 ldx [%g6 + TI_PCR], %o7
1520 wr %g0, %o7, %pcr
1522 /* Blackbird errata workaround. See commentary in
1523 * smp.c:smp_percpu_timer_interrupt() for more
1524 * information.
1525 */
1526 ba,pt %xcc, 99f
1527 nop
1528 .align 64
1529 99: wr %g0, %g0, %pic
1530 rd %pic, %g0
1532 1: b,pt %xcc, ret_sys_call
1533 ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0
1534 sparc_exit: rdpr %pstate, %g2
1535 wrpr %g2, PSTATE_IE, %pstate
1536 rdpr %otherwin, %g1
1537 rdpr %cansave, %g3
1538 add %g3, %g1, %g3
1539 wrpr %g3, 0x0, %cansave
1540 wrpr %g0, 0x0, %otherwin
1541 wrpr %g2, 0x0, %pstate
1542 ba,pt %xcc, sys_exit
1543 stb %g0, [%g6 + TI_WSAVED]
1545 linux_sparc_ni_syscall:
1546 sethi %hi(sys_ni_syscall), %l7
1547 b,pt %xcc, 4f
1548 or %l7, %lo(sys_ni_syscall), %l7
1550 linux_syscall_trace32:
1551 add %sp, PTREGS_OFF, %o0
1552 call syscall_trace
1553 clr %o1
1554 srl %i0, 0, %o0
1555 srl %i4, 0, %o4
1556 srl %i1, 0, %o1
1557 srl %i2, 0, %o2
1558 b,pt %xcc, 2f
1559 srl %i3, 0, %o3
1561 linux_syscall_trace:
1562 add %sp, PTREGS_OFF, %o0
1563 call syscall_trace
1564 clr %o1
1565 mov %i0, %o0
1566 mov %i1, %o1
1567 mov %i2, %o2
1568 mov %i3, %o3
1569 b,pt %xcc, 2f
1570 mov %i4, %o4
1573 /* Linux 32-bit and SunOS system calls enter here... */
1574 .align 32
1575 .globl linux_sparc_syscall32
1576 linux_sparc_syscall32:
1577 /* Direct access to user regs, much faster. */
1578 cmp %g1, NR_SYSCALLS ! IEU1 Group
1579 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
1580 srl %i0, 0, %o0 ! IEU0
1581 sll %g1, 2, %l4 ! IEU0 Group
1582 srl %i4, 0, %o4 ! IEU1
1583 lduw [%l7 + %l4], %l7 ! Load
1584 srl %i1, 0, %o1 ! IEU0 Group
1585 ldx [%curptr + TI_FLAGS], %l0 ! Load
1587 srl %i5, 0, %o5 ! IEU1
1588 srl %i2, 0, %o2 ! IEU0 Group
1589 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
1590 bne,pn %icc, linux_syscall_trace32 ! CTI
1591 mov %i0, %l5 ! IEU1
1592 call %l7 ! CTI Group brk forced
1593 srl %i3, 0, %o3 ! IEU0
1594 ba,a,pt %xcc, 3f
1596 /* Linux native and SunOS system calls enter here... */
1597 .align 32
1598 .globl linux_sparc_syscall, ret_sys_call
1599 linux_sparc_syscall:
1600 /* Direct access to user regs, much faster. */
1601 cmp %g1, NR_SYSCALLS ! IEU1 Group
1602 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
1603 mov %i0, %o0 ! IEU0
1604 sll %g1, 2, %l4 ! IEU0 Group
1605 mov %i1, %o1 ! IEU1
1606 lduw [%l7 + %l4], %l7 ! Load
1607 4: mov %i2, %o2 ! IEU0 Group
1608 ldx [%curptr + TI_FLAGS], %l0 ! Load
1610 mov %i3, %o3 ! IEU1
1611 mov %i4, %o4 ! IEU0 Group
1612 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
1613 bne,pn %icc, linux_syscall_trace ! CTI Group
1614 mov %i0, %l5 ! IEU0
1615 2: call %l7 ! CTI Group brk forced
1616 mov %i5, %o5 ! IEU0
1617 nop
1619 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1620 ret_sys_call:
1621 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
1622 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
1623 sra %o0, 0, %o0
1624 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
1625 sllx %g2, 32, %g2
1627 /* Check if force_successful_syscall_return()
1628 * was invoked.
1629 */
1630 ldub [%curptr + TI_SYS_NOERROR], %l2
1631 brnz,a,pn %l2, 80f
1632 stb %g0, [%curptr + TI_SYS_NOERROR]
1634 cmp %o0, -ERESTART_RESTARTBLOCK
1635 bgeu,pn %xcc, 1f
1636 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
1637 80:
1638 /* System call success, clear Carry condition code. */
1639 andn %g3, %g2, %g3
1640 stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
1641 bne,pn %icc, linux_syscall_trace2
1642 add %l1, 0x4, %l2 ! npc = npc+4
1643 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1644 ba,pt %xcc, rtrap_clr_l6
1645 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1647 1:
1648 /* System call failure, set Carry condition code.
1649 * Also, get abs(errno) to return to the process.
1650 */
1651 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
1652 sub %g0, %o0, %o0
1653 or %g3, %g2, %g3
1654 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
1655 mov 1, %l6
1656 stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
1657 bne,pn %icc, linux_syscall_trace2
1658 add %l1, 0x4, %l2 ! npc = npc+4
1659 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1661 b,pt %xcc, rtrap
1662 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1663 linux_syscall_trace2:
1664 add %sp, PTREGS_OFF, %o0
1665 call syscall_trace
1666 mov 1, %o1
1667 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
1668 ba,pt %xcc, rtrap
1669 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1671 .align 32
1672 .globl __flushw_user
1673 __flushw_user:
1674 rdpr %otherwin, %g1
1675 brz,pn %g1, 2f
1676 clr %g2
1677 1: save %sp, -128, %sp
1678 rdpr %otherwin, %g1
1679 brnz,pt %g1, 1b
1680 add %g2, 1, %g2
1681 1: sub %g2, 1, %g2
1682 brnz,pt %g2, 1b
1683 restore %g0, %g0, %g0
1684 2: retl
1685 nop
1687 #ifdef CONFIG_SMP
1688 .globl hard_smp_processor_id
1689 hard_smp_processor_id:
1690 #endif
1691 .globl real_hard_smp_processor_id
1692 real_hard_smp_processor_id:
1693 __GET_CPUID(%o0)
1694 retl
1695 nop
1697 /* %o0: devhandle
1698 * %o1: devino
1700 * returns %o0: sysino
1701 */
1702 .globl sun4v_devino_to_sysino
1703 sun4v_devino_to_sysino:
1704 mov HV_FAST_INTR_DEVINO2SYSINO, %o5
1705 ta HV_FAST_TRAP
1706 retl
1707 mov %o1, %o0
1709 /* %o0: sysino
1711 * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1712 */
1713 .globl sun4v_intr_getenabled
1714 sun4v_intr_getenabled:
1715 mov HV_FAST_INTR_GETENABLED, %o5
1716 ta HV_FAST_TRAP
1717 retl
1718 mov %o1, %o0
1720 /* %o0: sysino
1721 * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1722 */
1723 .globl sun4v_intr_setenabled
1724 sun4v_intr_setenabled:
1725 mov HV_FAST_INTR_SETENABLED, %o5
1726 ta HV_FAST_TRAP
1727 retl
1728 nop
1730 /* %o0: sysino
1732 * returns %o0: intr_state (HV_INTR_STATE_*)
1733 */
1734 .globl sun4v_intr_getstate
1735 sun4v_intr_getstate:
1736 mov HV_FAST_INTR_GETSTATE, %o5
1737 ta HV_FAST_TRAP
1738 retl
1739 mov %o1, %o0
1741 /* %o0: sysino
1742 * %o1: intr_state (HV_INTR_STATE_*)
1743 */
1744 .globl sun4v_intr_setstate
1745 sun4v_intr_setstate:
1746 mov HV_FAST_INTR_SETSTATE, %o5
1747 ta HV_FAST_TRAP
1748 retl
1749 nop
1751 /* %o0: sysino
1753 * returns %o0: cpuid
1754 */
1755 .globl sun4v_intr_gettarget
1756 sun4v_intr_gettarget:
1757 mov HV_FAST_INTR_GETTARGET, %o5
1758 ta HV_FAST_TRAP
1759 retl
1760 mov %o1, %o0
1762 /* %o0: sysino
1763 * %o1: cpuid
1764 */
1765 .globl sun4v_intr_settarget
1766 sun4v_intr_settarget:
1767 mov HV_FAST_INTR_SETTARGET, %o5
1768 ta HV_FAST_TRAP
1769 retl
1770 nop
1772 /* %o0: type
1773 * %o1: queue paddr
1774 * %o2: num queue entries
1776 * returns %o0: status
1777 */
1778 .globl sun4v_cpu_qconf
1779 sun4v_cpu_qconf:
1780 mov HV_FAST_CPU_QCONF, %o5
1781 ta HV_FAST_TRAP
1782 retl
1783 nop
1785 /* returns %o0: status
1786 */
1787 .globl sun4v_cpu_yield
1788 sun4v_cpu_yield:
1789 mov HV_FAST_CPU_YIELD, %o5
1790 ta HV_FAST_TRAP
1791 retl
1792 nop
1794 /* %o0: num cpus in cpu list
1795 * %o1: cpu list paddr
1796 * %o2: mondo block paddr
1798 * returns %o0: status
1799 */
1800 .globl sun4v_cpu_mondo_send
1801 sun4v_cpu_mondo_send:
1802 mov HV_FAST_CPU_MONDO_SEND, %o5
1803 ta HV_FAST_TRAP
1804 retl
1805 nop
1807 /* %o0: CPU ID
1809 * returns %o0: -status if status non-zero, else
1810 * %o0: cpu state as HV_CPU_STATE_*
1811 */
1812 .globl sun4v_cpu_state
1813 sun4v_cpu_state:
1814 mov HV_FAST_CPU_STATE, %o5
1815 ta HV_FAST_TRAP
1816 brnz,pn %o0, 1f
1817 sub %g0, %o0, %o0
1818 mov %o1, %o0
1819 1: retl
1820 nop