ia64/linux-2.6.18-xen.hg

view arch/sparc64/kernel/rtrap.S @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* $Id: rtrap.S,v 1.61 2002/02/09 19:49:31 davem Exp $
2 * rtrap.S: Preparing for return from trap on Sparc V9.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
9 #include <asm/asi.h>
10 #include <asm/pstate.h>
11 #include <asm/ptrace.h>
12 #include <asm/spitfire.h>
13 #include <asm/head.h>
14 #include <asm/visasm.h>
15 #include <asm/processor.h>
17 #define RTRAP_PSTATE (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
18 #define RTRAP_PSTATE_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
19 #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
21 /* Register %l6 keeps track of whether we are returning
22 * from a system call or not. It is cleared if we call
23 * do_notify_resume, and it must not be otherwise modified
24 * until we fully commit to returning to userspace.
25 */
27 .text
28 .align 32
29 __handle_softirq:
30 call do_softirq
31 nop
32 ba,a,pt %xcc, __handle_softirq_continue
33 nop
34 __handle_preemption:
35 call schedule
36 wrpr %g0, RTRAP_PSTATE, %pstate
37 ba,pt %xcc, __handle_preemption_continue
38 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
40 __handle_user_windows:
41 call fault_in_user_windows
42 wrpr %g0, RTRAP_PSTATE, %pstate
43 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
44 /* Redo sched+sig checks */
45 ldx [%g6 + TI_FLAGS], %l0
46 andcc %l0, _TIF_NEED_RESCHED, %g0
48 be,pt %xcc, 1f
49 nop
50 call schedule
51 wrpr %g0, RTRAP_PSTATE, %pstate
52 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
53 ldx [%g6 + TI_FLAGS], %l0
55 1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
56 be,pt %xcc, __handle_user_windows_continue
57 nop
58 mov %l5, %o1
59 mov %l6, %o2
60 add %sp, PTREGS_OFF, %o0
61 mov %l0, %o3
63 call do_notify_resume
64 wrpr %g0, RTRAP_PSTATE, %pstate
65 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
66 clr %l6
67 /* Signal delivery can modify pt_regs tstate, so we must
68 * reload it.
69 */
70 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
71 sethi %hi(0xf << 20), %l4
72 and %l1, %l4, %l4
73 ba,pt %xcc, __handle_user_windows_continue
75 andn %l1, %l4, %l1
76 __handle_perfctrs:
77 call update_perfctrs
78 wrpr %g0, RTRAP_PSTATE, %pstate
79 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
80 ldub [%g6 + TI_WSAVED], %o2
81 brz,pt %o2, 1f
82 nop
83 /* Redo userwin+sched+sig checks */
84 call fault_in_user_windows
86 wrpr %g0, RTRAP_PSTATE, %pstate
87 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
88 ldx [%g6 + TI_FLAGS], %l0
89 andcc %l0, _TIF_NEED_RESCHED, %g0
90 be,pt %xcc, 1f
92 nop
93 call schedule
94 wrpr %g0, RTRAP_PSTATE, %pstate
95 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
96 ldx [%g6 + TI_FLAGS], %l0
97 1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
99 be,pt %xcc, __handle_perfctrs_continue
100 sethi %hi(TSTATE_PEF), %o0
101 mov %l5, %o1
102 mov %l6, %o2
103 add %sp, PTREGS_OFF, %o0
104 mov %l0, %o3
105 call do_notify_resume
107 wrpr %g0, RTRAP_PSTATE, %pstate
108 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
109 clr %l6
110 /* Signal delivery can modify pt_regs tstate, so we must
111 * reload it.
112 */
113 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
114 sethi %hi(0xf << 20), %l4
115 and %l1, %l4, %l4
116 andn %l1, %l4, %l1
117 ba,pt %xcc, __handle_perfctrs_continue
119 sethi %hi(TSTATE_PEF), %o0
120 __handle_userfpu:
121 rd %fprs, %l5
122 andcc %l5, FPRS_FEF, %g0
123 sethi %hi(TSTATE_PEF), %o0
124 be,a,pn %icc, __handle_userfpu_continue
125 andn %l1, %o0, %l1
126 ba,a,pt %xcc, __handle_userfpu_continue
128 __handle_signal:
129 mov %l5, %o1
130 mov %l6, %o2
131 add %sp, PTREGS_OFF, %o0
132 mov %l0, %o3
133 call do_notify_resume
134 wrpr %g0, RTRAP_PSTATE, %pstate
135 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
136 clr %l6
138 /* Signal delivery can modify pt_regs tstate, so we must
139 * reload it.
140 */
141 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
142 sethi %hi(0xf << 20), %l4
143 and %l1, %l4, %l4
144 ba,pt %xcc, __handle_signal_continue
145 andn %l1, %l4, %l1
147 .align 64
148 .globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
149 rtrap_irq:
150 rtrap_clr_l6: clr %l6
151 rtrap:
152 #ifndef CONFIG_SMP
153 sethi %hi(per_cpu____cpu_data), %l0
154 lduw [%l0 + %lo(per_cpu____cpu_data)], %l1
155 #else
156 sethi %hi(per_cpu____cpu_data), %l0
157 or %l0, %lo(per_cpu____cpu_data), %l0
158 lduw [%l0 + %g5], %l1
159 #endif
160 cmp %l1, 0
162 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
163 bne,pn %icc, __handle_softirq
164 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
165 __handle_softirq_continue:
166 rtrap_xcall:
167 sethi %hi(0xf << 20), %l4
168 andcc %l1, TSTATE_PRIV, %l3
169 and %l1, %l4, %l4
170 bne,pn %icc, to_kernel
171 andn %l1, %l4, %l1
173 /* We must hold IRQs off and atomically test schedule+signal
174 * state, then hold them off all the way back to userspace.
175 * If we are returning to kernel, none of this matters.
176 *
177 * If we do not do this, there is a window where we would do
178 * the tests, later the signal/resched event arrives but we do
179 * not process it since we are still in kernel mode. It would
180 * take until the next local IRQ before the signal/resched
181 * event would be handled.
182 *
183 * This also means that if we have to deal with performance
184 * counters or user windows, we have to redo all of these
185 * sched+signal checks with IRQs disabled.
186 */
187 to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
188 wrpr 0, %pil
189 __handle_preemption_continue:
190 ldx [%g6 + TI_FLAGS], %l0
191 sethi %hi(_TIF_USER_WORK_MASK), %o0
192 or %o0, %lo(_TIF_USER_WORK_MASK), %o0
193 andcc %l0, %o0, %g0
194 sethi %hi(TSTATE_PEF), %o0
195 be,pt %xcc, user_nowork
196 andcc %l1, %o0, %g0
197 andcc %l0, _TIF_NEED_RESCHED, %g0
198 bne,pn %xcc, __handle_preemption
199 andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
200 bne,pn %xcc, __handle_signal
201 __handle_signal_continue:
202 ldub [%g6 + TI_WSAVED], %o2
203 brnz,pn %o2, __handle_user_windows
204 nop
205 __handle_user_windows_continue:
206 ldx [%g6 + TI_FLAGS], %l5
207 andcc %l5, _TIF_PERFCTR, %g0
208 sethi %hi(TSTATE_PEF), %o0
209 bne,pn %xcc, __handle_perfctrs
210 __handle_perfctrs_continue:
211 andcc %l1, %o0, %g0
213 /* This fpdepth clear is necessary for non-syscall rtraps only */
214 user_nowork:
215 bne,pn %xcc, __handle_userfpu
216 stb %g0, [%g6 + TI_FPDEPTH]
217 __handle_userfpu_continue:
219 rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
220 ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
222 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
223 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
224 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
225 brz,pt %l3, 1f
226 mov %g6, %l2
228 /* Must do this before thread reg is clobbered below. */
229 LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
230 1:
231 ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
232 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
234 /* Normal globals are restored, go to trap globals. */
235 661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
236 nop
237 .section .sun4v_2insn_patch, "ax"
238 .word 661b
239 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
240 SET_GL(1)
241 .previous
243 mov %l2, %g6
245 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
246 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
248 ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
249 ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
250 ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
251 ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
252 ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
253 ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
254 ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
255 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
257 ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
258 wr %o3, %g0, %y
259 srl %l4, 20, %l4
260 wrpr %l4, 0x0, %pil
261 wrpr %g0, 0x1, %tl
262 wrpr %l1, %g0, %tstate
263 wrpr %l2, %g0, %tpc
264 wrpr %o2, %g0, %tnpc
266 brnz,pn %l3, kern_rtt
267 mov PRIMARY_CONTEXT, %l7
269 661: ldxa [%l7 + %l7] ASI_DMMU, %l0
270 .section .sun4v_1insn_patch, "ax"
271 .word 661b
272 ldxa [%l7 + %l7] ASI_MMU, %l0
273 .previous
275 sethi %hi(sparc64_kern_pri_nuc_bits), %l1
276 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
277 or %l0, %l1, %l0
279 661: stxa %l0, [%l7] ASI_DMMU
280 .section .sun4v_1insn_patch, "ax"
281 .word 661b
282 stxa %l0, [%l7] ASI_MMU
283 .previous
285 sethi %hi(KERNBASE), %l7
286 flush %l7
287 rdpr %wstate, %l1
288 rdpr %otherwin, %l2
289 srl %l1, 3, %l1
291 wrpr %l2, %g0, %canrestore
292 wrpr %l1, %g0, %wstate
293 brnz,pt %l2, user_rtt_restore
294 wrpr %g0, %g0, %otherwin
296 ldx [%g6 + TI_FLAGS], %g3
297 wr %g0, ASI_AIUP, %asi
298 rdpr %cwp, %g1
299 andcc %g3, _TIF_32BIT, %g0
300 sub %g1, 1, %g1
301 bne,pt %xcc, user_rtt_fill_32bit
302 wrpr %g1, %cwp
303 ba,a,pt %xcc, user_rtt_fill_64bit
305 user_rtt_fill_fixup:
306 rdpr %cwp, %g1
307 add %g1, 1, %g1
308 wrpr %g1, 0x0, %cwp
310 rdpr %wstate, %g2
311 sll %g2, 3, %g2
312 wrpr %g2, 0x0, %wstate
314 /* We know %canrestore and %otherwin are both zero. */
316 sethi %hi(sparc64_kern_pri_context), %g2
317 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
318 mov PRIMARY_CONTEXT, %g1
320 661: stxa %g2, [%g1] ASI_DMMU
321 .section .sun4v_1insn_patch, "ax"
322 .word 661b
323 stxa %g2, [%g1] ASI_MMU
324 .previous
326 sethi %hi(KERNBASE), %g1
327 flush %g1
329 or %g4, FAULT_CODE_WINFIXUP, %g4
330 stb %g4, [%g6 + TI_FAULT_CODE]
331 stx %g5, [%g6 + TI_FAULT_ADDR]
333 mov %g6, %l1
334 wrpr %g0, 0x0, %tl
336 661: nop
337 .section .sun4v_1insn_patch, "ax"
338 .word 661b
339 SET_GL(0)
340 .previous
342 wrpr %g0, RTRAP_PSTATE, %pstate
344 mov %l1, %g6
345 ldx [%g6 + TI_TASK], %g4
346 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
347 call do_sparc64_fault
348 add %sp, PTREGS_OFF, %o0
349 ba,pt %xcc, rtrap
350 nop
352 user_rtt_pre_restore:
353 add %g1, 1, %g1
354 wrpr %g1, 0x0, %cwp
356 user_rtt_restore:
357 restore
358 rdpr %canrestore, %g1
359 wrpr %g1, 0x0, %cleanwin
360 retry
361 nop
363 kern_rtt: rdpr %canrestore, %g1
364 brz,pn %g1, kern_rtt_fill
365 nop
366 kern_rtt_restore:
367 restore
368 retry
370 to_kernel:
371 #ifdef CONFIG_PREEMPT
372 ldsw [%g6 + TI_PRE_COUNT], %l5
373 brnz %l5, kern_fpucheck
374 ldx [%g6 + TI_FLAGS], %l5
375 andcc %l5, _TIF_NEED_RESCHED, %g0
376 be,pt %xcc, kern_fpucheck
377 srl %l4, 20, %l5
378 cmp %l5, 0
379 bne,pn %xcc, kern_fpucheck
380 sethi %hi(PREEMPT_ACTIVE), %l6
381 stw %l6, [%g6 + TI_PRE_COUNT]
382 call schedule
383 nop
384 ba,pt %xcc, rtrap
385 stw %g0, [%g6 + TI_PRE_COUNT]
386 #endif
387 kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
388 brz,pt %l5, rt_continue
389 srl %l5, 1, %o0
390 add %g6, TI_FPSAVED, %l6
391 ldub [%l6 + %o0], %l2
392 sub %l5, 2, %l5
394 add %g6, TI_GSR, %o1
395 andcc %l2, (FPRS_FEF|FPRS_DU), %g0
396 be,pt %icc, 2f
397 and %l2, FPRS_DL, %l6
398 andcc %l2, FPRS_FEF, %g0
399 be,pn %icc, 5f
400 sll %o0, 3, %o5
401 rd %fprs, %g1
403 wr %g1, FPRS_FEF, %fprs
404 ldx [%o1 + %o5], %g1
405 add %g6, TI_XFSR, %o1
406 sll %o0, 8, %o2
407 add %g6, TI_FPREGS, %o3
408 brz,pn %l6, 1f
409 add %g6, TI_FPREGS+0x40, %o4
411 membar #Sync
412 ldda [%o3 + %o2] ASI_BLK_P, %f0
413 ldda [%o4 + %o2] ASI_BLK_P, %f16
414 membar #Sync
415 1: andcc %l2, FPRS_DU, %g0
416 be,pn %icc, 1f
417 wr %g1, 0, %gsr
418 add %o2, 0x80, %o2
419 membar #Sync
420 ldda [%o3 + %o2] ASI_BLK_P, %f32
421 ldda [%o4 + %o2] ASI_BLK_P, %f48
422 1: membar #Sync
423 ldx [%o1 + %o5], %fsr
424 2: stb %l5, [%g6 + TI_FPDEPTH]
425 ba,pt %xcc, rt_continue
426 nop
427 5: wr %g0, FPRS_FEF, %fprs
428 sll %o0, 8, %o2
430 add %g6, TI_FPREGS+0x80, %o3
431 add %g6, TI_FPREGS+0xc0, %o4
432 membar #Sync
433 ldda [%o3 + %o2] ASI_BLK_P, %f32
434 ldda [%o4 + %o2] ASI_BLK_P, %f48
435 membar #Sync
436 wr %g0, FPRS_DU, %fprs
437 ba,pt %xcc, rt_continue
438 stb %l5, [%g6 + TI_FPDEPTH]