ia64/linux-2.6.18-xen.hg

view arch/sparc64/kernel/sun4v_ivec.S @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* sun4v_ivec.S: Sun4v interrupt vector handling.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
6 #include <asm/cpudata.h>
7 #include <asm/intr_queue.h>
8 #include <asm/pil.h>
10 .text
11 .align 32
13 sun4v_cpu_mondo:
14 /* Head offset in %g2, tail offset in %g4.
15 * If they are the same, no work.
16 */
17 mov INTRQ_CPU_MONDO_HEAD, %g2
18 ldxa [%g2] ASI_QUEUE, %g2
19 mov INTRQ_CPU_MONDO_TAIL, %g4
20 ldxa [%g4] ASI_QUEUE, %g4
21 cmp %g2, %g4
22 be,pn %xcc, sun4v_cpu_mondo_queue_empty
23 nop
25 /* Get &trap_block[smp_processor_id()] into %g3. */
26 ldxa [%g0] ASI_SCRATCHPAD, %g3
27 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
29 /* Get CPU mondo queue base phys address into %g7. */
30 ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
32 /* Now get the cross-call arguments and handler PC, same
33 * layout as sun4u:
34 *
35 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
36 * high half is context arg to MMU flushes, into %g5
37 * 2nd 64-bit word: 64-bit arg, load into %g1
38 * 3rd 64-bit word: 64-bit arg, load into %g7
39 */
40 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
41 add %g2, 0x8, %g2
42 srlx %g3, 32, %g5
43 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
44 add %g2, 0x8, %g2
45 srl %g3, 0, %g3
46 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
47 add %g2, 0x40 - 0x8 - 0x8, %g2
49 /* Update queue head pointer. */
50 sethi %hi(8192 - 1), %g4
51 or %g4, %lo(8192 - 1), %g4
52 and %g2, %g4, %g2
54 mov INTRQ_CPU_MONDO_HEAD, %g4
55 stxa %g2, [%g4] ASI_QUEUE
56 membar #Sync
58 jmpl %g3, %g0
59 nop
61 sun4v_cpu_mondo_queue_empty:
62 retry
64 sun4v_dev_mondo:
65 /* Head offset in %g2, tail offset in %g4. */
66 mov INTRQ_DEVICE_MONDO_HEAD, %g2
67 ldxa [%g2] ASI_QUEUE, %g2
68 mov INTRQ_DEVICE_MONDO_TAIL, %g4
69 ldxa [%g4] ASI_QUEUE, %g4
70 cmp %g2, %g4
71 be,pn %xcc, sun4v_dev_mondo_queue_empty
72 nop
74 /* Get &trap_block[smp_processor_id()] into %g3. */
75 ldxa [%g0] ASI_SCRATCHPAD, %g3
76 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
78 /* Get DEV mondo queue base phys address into %g5. */
79 ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
81 /* Load IVEC into %g3. */
82 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
83 add %g2, 0x40, %g2
85 /* XXX There can be a full 64-byte block of data here.
86 * XXX This is how we can get at MSI vector data.
87 * XXX Current we do not capture this, but when we do we'll
88 * XXX need to add a 64-byte storage area in the struct ino_bucket
89 * XXX or the struct irq_desc.
90 */
92 /* Update queue head pointer, this frees up some registers. */
93 sethi %hi(8192 - 1), %g4
94 or %g4, %lo(8192 - 1), %g4
95 and %g2, %g4, %g2
97 mov INTRQ_DEVICE_MONDO_HEAD, %g4
98 stxa %g2, [%g4] ASI_QUEUE
99 membar #Sync
101 /* Get &__irq_work[smp_processor_id()] into %g1. */
102 TRAP_LOAD_IRQ_WORK(%g1, %g4)
104 /* Get &ivector_table[IVEC] into %g4. */
105 sethi %hi(ivector_table), %g4
106 sllx %g3, 3, %g3
107 or %g4, %lo(ivector_table), %g4
108 add %g4, %g3, %g4
110 /* Insert ivector_table[] entry into __irq_work[] queue. */
111 lduw [%g1], %g2 /* g2 = irq_work(cpu) */
112 stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
113 stw %g4, [%g1] /* irq_work(cpu) = bucket */
115 /* Signal the interrupt by setting (1 << pil) in %softint. */
116 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
118 sun4v_dev_mondo_queue_empty:
119 retry
121 sun4v_res_mondo:
122 /* Head offset in %g2, tail offset in %g4. */
123 mov INTRQ_RESUM_MONDO_HEAD, %g2
124 ldxa [%g2] ASI_QUEUE, %g2
125 mov INTRQ_RESUM_MONDO_TAIL, %g4
126 ldxa [%g4] ASI_QUEUE, %g4
127 cmp %g2, %g4
128 be,pn %xcc, sun4v_res_mondo_queue_empty
129 nop
131 /* Get &trap_block[smp_processor_id()] into %g3. */
132 ldxa [%g0] ASI_SCRATCHPAD, %g3
133 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
135 /* Get RES mondo queue base phys address into %g5. */
136 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
138 /* Get RES kernel buffer base phys address into %g7. */
139 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
141 /* If the first word is non-zero, queue is full. */
142 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
143 brnz,pn %g1, sun4v_res_mondo_queue_full
144 nop
146 /* Remember this entry's offset in %g1. */
147 mov %g2, %g1
149 /* Copy 64-byte queue entry into kernel buffer. */
150 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
151 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
152 add %g2, 0x08, %g2
153 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
154 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
155 add %g2, 0x08, %g2
156 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
157 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
158 add %g2, 0x08, %g2
159 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
160 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
161 add %g2, 0x08, %g2
162 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
163 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
164 add %g2, 0x08, %g2
165 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
166 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
167 add %g2, 0x08, %g2
168 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
169 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
170 add %g2, 0x08, %g2
171 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
172 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
173 add %g2, 0x08, %g2
175 /* Update queue head pointer. */
176 sethi %hi(8192 - 1), %g4
177 or %g4, %lo(8192 - 1), %g4
178 and %g2, %g4, %g2
180 mov INTRQ_RESUM_MONDO_HEAD, %g4
181 stxa %g2, [%g4] ASI_QUEUE
182 membar #Sync
184 /* Disable interrupts and save register state so we can call
185 * C code. The etrap handling will leave %g4 in %l4 for us
186 * when it's done.
187 */
188 rdpr %pil, %g2
189 wrpr %g0, 15, %pil
190 mov %g1, %g4
191 ba,pt %xcc, etrap_irq
192 rd %pc, %g7
194 /* Log the event. */
195 add %sp, PTREGS_OFF, %o0
196 call sun4v_resum_error
197 mov %l4, %o1
199 /* Return from trap. */
200 ba,pt %xcc, rtrap_irq
201 nop
203 sun4v_res_mondo_queue_empty:
204 retry
206 sun4v_res_mondo_queue_full:
207 /* The queue is full, consolidate our damage by setting
208 * the head equal to the tail. We'll just trap again otherwise.
209 * Call C code to log the event.
210 */
211 mov INTRQ_RESUM_MONDO_HEAD, %g2
212 stxa %g4, [%g2] ASI_QUEUE
213 membar #Sync
215 rdpr %pil, %g2
216 wrpr %g0, 15, %pil
217 ba,pt %xcc, etrap_irq
218 rd %pc, %g7
220 call sun4v_resum_overflow
221 add %sp, PTREGS_OFF, %o0
223 ba,pt %xcc, rtrap_irq
224 nop
226 sun4v_nonres_mondo:
227 /* Head offset in %g2, tail offset in %g4. */
228 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
229 ldxa [%g2] ASI_QUEUE, %g2
230 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
231 ldxa [%g4] ASI_QUEUE, %g4
232 cmp %g2, %g4
233 be,pn %xcc, sun4v_nonres_mondo_queue_empty
234 nop
236 /* Get &trap_block[smp_processor_id()] into %g3. */
237 ldxa [%g0] ASI_SCRATCHPAD, %g3
238 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
240 /* Get RES mondo queue base phys address into %g5. */
241 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
243 /* Get RES kernel buffer base phys address into %g7. */
244 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
246 /* If the first word is non-zero, queue is full. */
247 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
248 brnz,pn %g1, sun4v_nonres_mondo_queue_full
249 nop
251 /* Remember this entry's offset in %g1. */
252 mov %g2, %g1
254 /* Copy 64-byte queue entry into kernel buffer. */
255 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
256 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
257 add %g2, 0x08, %g2
258 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
259 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
260 add %g2, 0x08, %g2
261 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
262 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
263 add %g2, 0x08, %g2
264 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
265 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
266 add %g2, 0x08, %g2
267 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
268 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
269 add %g2, 0x08, %g2
270 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
271 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
272 add %g2, 0x08, %g2
273 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
274 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
275 add %g2, 0x08, %g2
276 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
277 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
278 add %g2, 0x08, %g2
280 /* Update queue head pointer. */
281 sethi %hi(8192 - 1), %g4
282 or %g4, %lo(8192 - 1), %g4
283 and %g2, %g4, %g2
285 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
286 stxa %g2, [%g4] ASI_QUEUE
287 membar #Sync
289 /* Disable interrupts and save register state so we can call
290 * C code. The etrap handling will leave %g4 in %l4 for us
291 * when it's done.
292 */
293 rdpr %pil, %g2
294 wrpr %g0, 15, %pil
295 mov %g1, %g4
296 ba,pt %xcc, etrap_irq
297 rd %pc, %g7
299 /* Log the event. */
300 add %sp, PTREGS_OFF, %o0
301 call sun4v_nonresum_error
302 mov %l4, %o1
304 /* Return from trap. */
305 ba,pt %xcc, rtrap_irq
306 nop
308 sun4v_nonres_mondo_queue_empty:
309 retry
311 sun4v_nonres_mondo_queue_full:
312 /* The queue is full, consolidate our damage by setting
313 * the head equal to the tail. We'll just trap again otherwise.
314 * Call C code to log the event.
315 */
316 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
317 stxa %g4, [%g2] ASI_QUEUE
318 membar #Sync
320 rdpr %pil, %g2
321 wrpr %g0, 15, %pil
322 ba,pt %xcc, etrap_irq
323 rd %pc, %g7
325 call sun4v_nonresum_overflow
326 add %sp, PTREGS_OFF, %o0
328 ba,pt %xcc, rtrap_irq
329 nop