ia64/linux-2.6.18-xen.hg

view arch/mips/lib-32/memset.S @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #include <asm/asm.h>
10 #include <asm/asm-offsets.h>
11 #include <asm/regdef.h>
13 #define EX(insn,reg,addr,handler) \
14 9: insn reg, addr; \
15 .section __ex_table,"a"; \
16 PTR 9b, handler; \
17 .previous
19 .macro f_fill64 dst, offset, val, fixup
20 EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup)
21 EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup)
22 EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup)
23 EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup)
24 EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup)
25 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
26 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
27 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
28 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup)
29 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup)
30 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup)
31 EX(LONG_S, \val, (\offset + 11 * LONGSIZE)(\dst), \fixup)
32 EX(LONG_S, \val, (\offset + 12 * LONGSIZE)(\dst), \fixup)
33 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup)
34 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup)
35 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup)
36 .endm
38 /*
39 * memset(void *s, int c, size_t n)
40 *
41 * a0: start of area to clear
42 * a1: char to fill with
43 * a2: size of area to clear
44 */
45 .set noreorder
46 .align 5
47 LEAF(memset)
48 beqz a1, 1f
49 move v0, a0 /* result */
51 andi a1, 0xff /* spread fillword */
52 sll t1, a1, 8
53 or a1, t1
54 sll t1, a1, 16
55 or a1, t1
56 1:
58 FEXPORT(__bzero)
59 sltiu t0, a2, LONGSIZE /* very small region? */
60 bnez t0, small_memset
61 andi t0, a0, LONGMASK /* aligned? */
63 beqz t0, 1f
64 PTR_SUBU t0, LONGSIZE /* alignment in bytes */
66 #ifdef __MIPSEB__
67 EX(swl, a1, (a0), first_fixup) /* make word aligned */
68 #endif
69 #ifdef __MIPSEL__
70 EX(swr, a1, (a0), first_fixup) /* make word aligned */
71 #endif
72 PTR_SUBU a0, t0 /* long align ptr */
73 PTR_ADDU a2, t0 /* correct size */
75 1: ori t1, a2, 0x3f /* # of full blocks */
76 xori t1, 0x3f
77 beqz t1, memset_partial /* no block to fill */
78 andi t0, a2, 0x3c
80 PTR_ADDU t1, a0 /* end address */
81 .set reorder
82 1: PTR_ADDIU a0, 64
83 f_fill64 a0, -64, a1, fwd_fixup
84 bne t1, a0, 1b
85 .set noreorder
87 memset_partial:
88 PTR_LA t1, 2f /* where to start */
89 PTR_SUBU t1, t0
90 jr t1
91 PTR_ADDU a0, t0 /* dest ptr */
93 .set push
94 .set noreorder
95 .set nomacro
96 f_fill64 a0, -64, a1, partial_fixup /* ... but first do longs ... */
97 2: .set pop
98 andi a2, LONGMASK /* At most one long to go */
100 beqz a2, 1f
101 PTR_ADDU a0, a2 /* What's left */
102 #ifdef __MIPSEB__
103 EX(swr, a1, -1(a0), last_fixup)
104 #endif
105 #ifdef __MIPSEL__
106 EX(swl, a1, -1(a0), last_fixup)
107 #endif
108 1: jr ra
109 move a2, zero
111 small_memset:
112 beqz a2, 2f
113 PTR_ADDU t1, a0, a2
115 1: PTR_ADDIU a0, 1 /* fill bytewise */
116 bne t1, a0, 1b
117 sb a1, -1(a0)
119 2: jr ra /* done */
120 move a2, zero
121 END(memset)
123 first_fixup:
124 jr ra
125 nop
127 fwd_fixup:
128 PTR_L t0, TI_TASK($28)
129 LONG_L t0, THREAD_BUADDR(t0)
130 andi a2, 0x3f
131 LONG_ADDU a2, t1
132 jr ra
133 LONG_SUBU a2, t0
135 partial_fixup:
136 PTR_L t0, TI_TASK($28)
137 LONG_L t0, THREAD_BUADDR(t0)
138 andi a2, LONGMASK
139 LONG_ADDU a2, t1
140 jr ra
141 LONG_SUBU a2, t0
143 last_fixup:
144 jr ra
145 andi v1, a2, LONGMASK