ia64/linux-2.6.18-xen.hg

view drivers/md/dm.h @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Internal header file for device mapper
3 *
4 * Copyright (C) 2001, 2002 Sistina Software
5 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6 *
7 * This file is released under the LGPL.
8 */
10 #ifndef DM_INTERNAL_H
11 #define DM_INTERNAL_H
13 #include <linux/fs.h>
14 #include <linux/device-mapper.h>
15 #include <linux/list.h>
16 #include <linux/blkdev.h>
17 #include <linux/hdreg.h>
19 #define DM_NAME "device-mapper"
21 #define DMERR(f, arg...) printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
22 #define DMWARN(f, arg...) printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
23 #define DMINFO(f, arg...) printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
25 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
26 0 : scnprintf(result + sz, maxlen - sz, x))
28 #define SECTOR_SHIFT 9
30 /*
31 * List of devices that a metadevice uses and should open/close.
32 */
33 struct dm_dev {
34 struct list_head list;
36 atomic_t count;
37 int mode;
38 struct block_device *bdev;
39 char name[16];
40 };
42 struct dm_table;
44 /*-----------------------------------------------------------------
45 * Internal table functions.
46 *---------------------------------------------------------------*/
47 void dm_table_event_callback(struct dm_table *t,
48 void (*fn)(void *), void *context);
49 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
50 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
51 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q);
52 struct list_head *dm_table_get_devices(struct dm_table *t);
53 void dm_table_presuspend_targets(struct dm_table *t);
54 void dm_table_postsuspend_targets(struct dm_table *t);
55 void dm_table_resume_targets(struct dm_table *t);
56 int dm_table_any_congested(struct dm_table *t, int bdi_bits);
57 void dm_table_unplug_all(struct dm_table *t);
58 int dm_table_flush_all(struct dm_table *t);
60 /*-----------------------------------------------------------------
61 * A registry of target types.
62 *---------------------------------------------------------------*/
63 int dm_target_init(void);
64 void dm_target_exit(void);
65 struct target_type *dm_get_target_type(const char *name);
66 void dm_put_target_type(struct target_type *t);
67 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
68 void *param), void *param);
70 /*-----------------------------------------------------------------
71 * Useful inlines.
72 *---------------------------------------------------------------*/
73 static inline int array_too_big(unsigned long fixed, unsigned long obj,
74 unsigned long num)
75 {
76 return (num > (ULONG_MAX - fixed) / obj);
77 }
79 /*
80 * Ceiling(n / sz)
81 */
82 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
84 #define dm_sector_div_up(n, sz) ( \
85 { \
86 sector_t _r = ((n) + (sz) - 1); \
87 sector_div(_r, (sz)); \
88 _r; \
89 } \
90 )
92 /*
93 * ceiling(n / size) * size
94 */
95 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
97 static inline sector_t to_sector(unsigned long n)
98 {
99 return (n >> 9);
100 }
102 static inline unsigned long to_bytes(sector_t n)
103 {
104 return (n << 9);
105 }
107 int dm_split_args(int *argc, char ***argvp, char *input);
109 /*
110 * The device-mapper can be driven through one of two interfaces;
111 * ioctl or filesystem, depending which patch you have applied.
112 */
113 int dm_interface_init(void);
114 void dm_interface_exit(void);
116 /*
117 * Targets for linear and striped mappings
118 */
119 int dm_linear_init(void);
120 void dm_linear_exit(void);
122 int dm_stripe_init(void);
123 void dm_stripe_exit(void);
125 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
126 union map_info *dm_get_mapinfo(struct bio *bio);
127 int dm_open_count(struct mapped_device *md);
128 int dm_lock_for_deletion(struct mapped_device *md);
130 #endif