ia64/linux-2.6.18-xen.hg

view arch/sparc64/kernel/ktlb.S @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
2 *
3 * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
9 #include <asm/head.h>
10 #include <asm/asi.h>
11 #include <asm/page.h>
12 #include <asm/pgtable.h>
13 #include <asm/tsb.h>
15 .text
16 .align 32
18 kvmap_itlb:
19 /* g6: TAG TARGET */
20 mov TLB_TAG_ACCESS, %g4
21 ldxa [%g4] ASI_IMMU, %g4
23 /* sun4v_itlb_miss branches here with the missing virtual
24 * address already loaded into %g4
25 */
26 kvmap_itlb_4v:
28 kvmap_itlb_nonlinear:
29 /* Catch kernel NULL pointer calls. */
30 sethi %hi(PAGE_SIZE), %g5
31 cmp %g4, %g5
32 bleu,pn %xcc, kvmap_dtlb_longpath
33 nop
35 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
37 kvmap_itlb_tsb_miss:
38 sethi %hi(LOW_OBP_ADDRESS), %g5
39 cmp %g4, %g5
40 blu,pn %xcc, kvmap_itlb_vmalloc_addr
41 mov 0x1, %g5
42 sllx %g5, 32, %g5
43 cmp %g4, %g5
44 blu,pn %xcc, kvmap_itlb_obp
45 nop
47 kvmap_itlb_vmalloc_addr:
48 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
50 KTSB_LOCK_TAG(%g1, %g2, %g7)
52 /* Load and check PTE. */
53 ldxa [%g5] ASI_PHYS_USE_EC, %g5
54 mov 1, %g7
55 sllx %g7, TSB_TAG_INVALID_BIT, %g7
56 brgez,a,pn %g5, kvmap_itlb_longpath
57 KTSB_STORE(%g1, %g7)
59 KTSB_WRITE(%g1, %g5, %g6)
61 /* fallthrough to TLB load */
63 kvmap_itlb_load:
65 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
66 retry
67 .section .sun4v_2insn_patch, "ax"
68 .word 661b
69 nop
70 nop
71 .previous
73 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
74 * instruction get nop'd out and we get here to branch
75 * to the sun4v tlb load code. The registers are setup
76 * as follows:
77 *
78 * %g4: vaddr
79 * %g5: PTE
80 * %g6: TAG
81 *
82 * The sun4v TLB load wants the PTE in %g3 so we fix that
83 * up here.
84 */
85 ba,pt %xcc, sun4v_itlb_load
86 mov %g5, %g3
88 kvmap_itlb_longpath:
90 661: rdpr %pstate, %g5
91 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
92 .section .sun4v_2insn_patch, "ax"
93 .word 661b
94 SET_GL(1)
95 nop
96 .previous
98 rdpr %tpc, %g5
99 ba,pt %xcc, sparc64_realfault_common
100 mov FAULT_CODE_ITLB, %g4
102 kvmap_itlb_obp:
103 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
105 KTSB_LOCK_TAG(%g1, %g2, %g7)
107 KTSB_WRITE(%g1, %g5, %g6)
109 ba,pt %xcc, kvmap_itlb_load
110 nop
112 kvmap_dtlb_obp:
113 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
115 KTSB_LOCK_TAG(%g1, %g2, %g7)
117 KTSB_WRITE(%g1, %g5, %g6)
119 ba,pt %xcc, kvmap_dtlb_load
120 nop
122 .align 32
123 kvmap_dtlb_tsb4m_load:
124 KTSB_LOCK_TAG(%g1, %g2, %g7)
125 KTSB_WRITE(%g1, %g5, %g6)
126 ba,pt %xcc, kvmap_dtlb_load
127 nop
129 kvmap_dtlb:
130 /* %g6: TAG TARGET */
131 mov TLB_TAG_ACCESS, %g4
132 ldxa [%g4] ASI_DMMU, %g4
134 /* sun4v_dtlb_miss branches here with the missing virtual
135 * address already loaded into %g4
136 */
137 kvmap_dtlb_4v:
138 brgez,pn %g4, kvmap_dtlb_nonlinear
139 nop
141 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
142 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
144 /* TSB entry address left in %g1, lookup linear PTE.
145 * Must preserve %g1 and %g6 (TAG).
146 */
147 kvmap_dtlb_tsb4m_miss:
148 sethi %hi(kpte_linear_bitmap), %g2
149 or %g2, %lo(kpte_linear_bitmap), %g2
151 /* Clear the PAGE_OFFSET top virtual bits, then shift
152 * down to get a 256MB physical address index.
153 */
154 sllx %g4, 21, %g5
155 mov 1, %g7
156 srlx %g5, 21 + 28, %g5
158 /* Don't try this at home kids... this depends upon srlx
159 * only taking the low 6 bits of the shift count in %g5.
160 */
161 sllx %g7, %g5, %g7
163 /* Divide by 64 to get the offset into the bitmask. */
164 srlx %g5, 6, %g5
165 sllx %g5, 3, %g5
167 /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
168 ldx [%g2 + %g5], %g2
169 andcc %g2, %g7, %g0
170 sethi %hi(kern_linear_pte_xor), %g5
171 or %g5, %lo(kern_linear_pte_xor), %g5
172 bne,a,pt %xcc, 1f
173 add %g5, 8, %g5
175 1: ldx [%g5], %g2
177 .globl kvmap_linear_patch
178 kvmap_linear_patch:
179 ba,pt %xcc, kvmap_dtlb_tsb4m_load
180 xor %g2, %g4, %g5
182 kvmap_dtlb_vmalloc_addr:
183 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
185 KTSB_LOCK_TAG(%g1, %g2, %g7)
187 /* Load and check PTE. */
188 ldxa [%g5] ASI_PHYS_USE_EC, %g5
189 mov 1, %g7
190 sllx %g7, TSB_TAG_INVALID_BIT, %g7
191 brgez,a,pn %g5, kvmap_dtlb_longpath
192 KTSB_STORE(%g1, %g7)
194 KTSB_WRITE(%g1, %g5, %g6)
196 /* fallthrough to TLB load */
198 kvmap_dtlb_load:
200 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
201 retry
202 .section .sun4v_2insn_patch, "ax"
203 .word 661b
204 nop
205 nop
206 .previous
208 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
209 * instruction get nop'd out and we get here to branch
210 * to the sun4v tlb load code. The registers are setup
211 * as follows:
212 *
213 * %g4: vaddr
214 * %g5: PTE
215 * %g6: TAG
216 *
217 * The sun4v TLB load wants the PTE in %g3 so we fix that
218 * up here.
219 */
220 ba,pt %xcc, sun4v_dtlb_load
221 mov %g5, %g3
223 kvmap_dtlb_nonlinear:
224 /* Catch kernel NULL pointer derefs. */
225 sethi %hi(PAGE_SIZE), %g5
226 cmp %g4, %g5
227 bleu,pn %xcc, kvmap_dtlb_longpath
228 nop
230 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
232 kvmap_dtlb_tsbmiss:
233 sethi %hi(MODULES_VADDR), %g5
234 cmp %g4, %g5
235 blu,pn %xcc, kvmap_dtlb_longpath
236 mov (VMALLOC_END >> 24), %g5
237 sllx %g5, 24, %g5
238 cmp %g4, %g5
239 bgeu,pn %xcc, kvmap_dtlb_longpath
240 nop
242 kvmap_check_obp:
243 sethi %hi(LOW_OBP_ADDRESS), %g5
244 cmp %g4, %g5
245 blu,pn %xcc, kvmap_dtlb_vmalloc_addr
246 mov 0x1, %g5
247 sllx %g5, 32, %g5
248 cmp %g4, %g5
249 blu,pn %xcc, kvmap_dtlb_obp
250 nop
251 ba,pt %xcc, kvmap_dtlb_vmalloc_addr
252 nop
254 kvmap_dtlb_longpath:
256 661: rdpr %pstate, %g5
257 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
258 .section .sun4v_2insn_patch, "ax"
259 .word 661b
260 SET_GL(1)
261 ldxa [%g0] ASI_SCRATCHPAD, %g5
262 .previous
264 rdpr %tl, %g3
265 cmp %g3, 1
267 661: mov TLB_TAG_ACCESS, %g4
268 ldxa [%g4] ASI_DMMU, %g5
269 .section .sun4v_2insn_patch, "ax"
270 .word 661b
271 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
272 nop
273 .previous
275 be,pt %xcc, sparc64_realfault_common
276 mov FAULT_CODE_DTLB, %g4
277 ba,pt %xcc, winfix_trampoline
278 nop