ia64/linux-2.6.18-xen.hg

view drivers/md/raid6recov.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2002 H. Peter Anvin - All Rights Reserved
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
8 * Bostom MA 02111-1307, USA; either version 2 of the License, or
9 * (at your option) any later version; incorporated herein by reference.
10 *
11 * ----------------------------------------------------------------------- */
13 /*
14 * raid6recov.c
15 *
16 * RAID-6 data recovery in dual failure mode. In single failure mode,
17 * use the RAID-5 algorithm (or, in the case of Q failure, just reconstruct
18 * the syndrome.)
19 */
21 #include "raid6.h"
23 /* Recover two failed data blocks. */
24 void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
25 void **ptrs)
26 {
27 u8 *p, *q, *dp, *dq;
28 u8 px, qx, db;
29 const u8 *pbmul; /* P multiplier table for B data */
30 const u8 *qmul; /* Q multiplier table (for both) */
32 p = (u8 *)ptrs[disks-2];
33 q = (u8 *)ptrs[disks-1];
35 /* Compute syndrome with zero for the missing data pages
36 Use the dead data pages as temporary storage for
37 delta p and delta q */
38 dp = (u8 *)ptrs[faila];
39 ptrs[faila] = (void *)raid6_empty_zero_page;
40 ptrs[disks-2] = dp;
41 dq = (u8 *)ptrs[failb];
42 ptrs[failb] = (void *)raid6_empty_zero_page;
43 ptrs[disks-1] = dq;
45 raid6_call.gen_syndrome(disks, bytes, ptrs);
47 /* Restore pointer table */
48 ptrs[faila] = dp;
49 ptrs[failb] = dq;
50 ptrs[disks-2] = p;
51 ptrs[disks-1] = q;
53 /* Now, pick the proper data tables */
54 pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
55 qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
57 /* Now do it... */
58 while ( bytes-- ) {
59 px = *p ^ *dp;
60 qx = qmul[*q ^ *dq];
61 *dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */
62 *dp++ = db ^ px; /* Reconstructed A */
63 p++; q++;
64 }
65 }
70 /* Recover failure of one data block plus the P block */
71 void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs)
72 {
73 u8 *p, *q, *dq;
74 const u8 *qmul; /* Q multiplier table */
76 p = (u8 *)ptrs[disks-2];
77 q = (u8 *)ptrs[disks-1];
79 /* Compute syndrome with zero for the missing data page
80 Use the dead data page as temporary storage for delta q */
81 dq = (u8 *)ptrs[faila];
82 ptrs[faila] = (void *)raid6_empty_zero_page;
83 ptrs[disks-1] = dq;
85 raid6_call.gen_syndrome(disks, bytes, ptrs);
87 /* Restore pointer table */
88 ptrs[faila] = dq;
89 ptrs[disks-1] = q;
91 /* Now, pick the proper data tables */
92 qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
94 /* Now do it... */
95 while ( bytes-- ) {
96 *p++ ^= *dq = qmul[*q ^ *dq];
97 q++; dq++;
98 }
99 }
102 #ifndef __KERNEL__ /* Testing only */
104 /* Recover two failed blocks. */
105 void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs)
106 {
107 if ( faila > failb ) {
108 int tmp = faila;
109 faila = failb;
110 failb = tmp;
111 }
113 if ( failb == disks-1 ) {
114 if ( faila == disks-2 ) {
115 /* P+Q failure. Just rebuild the syndrome. */
116 raid6_call.gen_syndrome(disks, bytes, ptrs);
117 } else {
118 /* data+Q failure. Reconstruct data from P,
119 then rebuild syndrome. */
120 /* NOT IMPLEMENTED - equivalent to RAID-5 */
121 }
122 } else {
123 if ( failb == disks-2 ) {
124 /* data+P failure. */
125 raid6_datap_recov(disks, bytes, faila, ptrs);
126 } else {
127 /* data+data failure. */
128 raid6_2data_recov(disks, bytes, faila, failb, ptrs);
129 }
130 }
131 }
133 #endif