ia64/linux-2.6.18-xen.hg

view drivers/md/dm-target.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited
3 *
4 * This file is released under the GPL.
5 */
7 #include "dm.h"
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/kmod.h>
12 #include <linux/bio.h>
13 #include <linux/slab.h>
15 #define DM_MSG_PREFIX "target"
17 struct tt_internal {
18 struct target_type tt;
20 struct list_head list;
21 long use;
22 };
24 static LIST_HEAD(_targets);
25 static DECLARE_RWSEM(_lock);
27 #define DM_MOD_NAME_SIZE 32
29 static inline struct tt_internal *__find_target_type(const char *name)
30 {
31 struct tt_internal *ti;
33 list_for_each_entry (ti, &_targets, list)
34 if (!strcmp(name, ti->tt.name))
35 return ti;
37 return NULL;
38 }
40 static struct tt_internal *get_target_type(const char *name)
41 {
42 struct tt_internal *ti;
44 down_read(&_lock);
46 ti = __find_target_type(name);
47 if (ti) {
48 if ((ti->use == 0) && !try_module_get(ti->tt.module))
49 ti = NULL;
50 else
51 ti->use++;
52 }
54 up_read(&_lock);
55 return ti;
56 }
58 static void load_module(const char *name)
59 {
60 request_module("dm-%s", name);
61 }
63 struct target_type *dm_get_target_type(const char *name)
64 {
65 struct tt_internal *ti = get_target_type(name);
67 if (!ti) {
68 load_module(name);
69 ti = get_target_type(name);
70 }
72 return ti ? &ti->tt : NULL;
73 }
75 void dm_put_target_type(struct target_type *t)
76 {
77 struct tt_internal *ti = (struct tt_internal *) t;
79 down_read(&_lock);
80 if (--ti->use == 0)
81 module_put(ti->tt.module);
83 BUG_ON(ti->use < 0);
84 up_read(&_lock);
86 return;
87 }
89 static struct tt_internal *alloc_target(struct target_type *t)
90 {
91 struct tt_internal *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
93 if (ti) {
94 memset(ti, 0, sizeof(*ti));
95 ti->tt = *t;
96 }
98 return ti;
99 }
102 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
103 void *param), void *param)
104 {
105 struct tt_internal *ti;
107 down_read(&_lock);
108 list_for_each_entry (ti, &_targets, list)
109 iter_func(&ti->tt, param);
110 up_read(&_lock);
112 return 0;
113 }
115 int dm_register_target(struct target_type *t)
116 {
117 int rv = 0;
118 struct tt_internal *ti = alloc_target(t);
120 if (!ti)
121 return -ENOMEM;
123 down_write(&_lock);
124 if (__find_target_type(t->name))
125 rv = -EEXIST;
126 else
127 list_add(&ti->list, &_targets);
129 up_write(&_lock);
130 if (rv)
131 kfree(ti);
132 return rv;
133 }
135 int dm_unregister_target(struct target_type *t)
136 {
137 struct tt_internal *ti;
139 down_write(&_lock);
140 if (!(ti = __find_target_type(t->name))) {
141 up_write(&_lock);
142 return -EINVAL;
143 }
145 if (ti->use) {
146 up_write(&_lock);
147 return -ETXTBSY;
148 }
150 list_del(&ti->list);
151 kfree(ti);
153 up_write(&_lock);
154 return 0;
155 }
157 /*
158 * io-err: always fails an io, useful for bringing
159 * up LVs that have holes in them.
160 */
161 static int io_err_ctr(struct dm_target *ti, unsigned int argc, char **args)
162 {
163 return 0;
164 }
166 static void io_err_dtr(struct dm_target *ti)
167 {
168 /* empty */
169 }
171 static int io_err_map(struct dm_target *ti, struct bio *bio,
172 union map_info *map_context)
173 {
174 return -EIO;
175 }
177 static struct target_type error_target = {
178 .name = "error",
179 .version = {1, 0, 1},
180 .ctr = io_err_ctr,
181 .dtr = io_err_dtr,
182 .map = io_err_map,
183 };
185 int __init dm_target_init(void)
186 {
187 return dm_register_target(&error_target);
188 }
190 void dm_target_exit(void)
191 {
192 if (dm_unregister_target(&error_target))
193 DMWARN("error target unregistration failed");
194 }
196 EXPORT_SYMBOL(dm_register_target);
197 EXPORT_SYMBOL(dm_unregister_target);