ia64/linux-2.6.18-xen.hg

view arch/mips/pci/ops-nile4.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/pci.h>
4 #include <asm/bootinfo.h>
6 #include <asm/lasat/lasat.h>
7 #include <asm/gt64120.h>
8 #include <asm/nile4.h>
10 #define PCI_ACCESS_READ 0
11 #define PCI_ACCESS_WRITE 1
13 #define LO(reg) (reg / 4)
14 #define HI(reg) (reg / 4 + 1)
16 volatile unsigned long *const vrc_pciregs = (void *) Vrc5074_BASE;
18 static DEFINE_SPINLOCK(nile4_pci_lock);
20 static int nile4_pcibios_config_access(unsigned char access_type,
21 struct pci_bus *bus, unsigned int devfn, int where, u32 * val)
22 {
23 unsigned char busnum = bus->number;
24 u32 adr, mask, err;
26 if ((busnum == 0) && (PCI_SLOT(devfn) > 8))
27 /* The addressing scheme chosen leaves room for just
28 * 8 devices on the first busnum (besides the PCI
29 * controller itself) */
30 return PCIBIOS_DEVICE_NOT_FOUND;
32 if ((busnum == 0) && (devfn == PCI_DEVFN(0, 0))) {
33 /* Access controller registers directly */
34 if (access_type == PCI_ACCESS_WRITE) {
35 vrc_pciregs[(0x200 + where) >> 2] = *val;
36 } else {
37 *val = vrc_pciregs[(0x200 + where) >> 2];
38 }
39 return PCIBIOS_SUCCESSFUL;
40 }
42 /* Temporarily map PCI Window 1 to config space */
43 mask = vrc_pciregs[LO(NILE4_PCIINIT1)];
44 vrc_pciregs[LO(NILE4_PCIINIT1)] = 0x0000001a | (busnum ? 0x200 : 0);
46 /* Clear PCI Error register. This also clears the Error Type
47 * bits in the Control register */
48 vrc_pciregs[LO(NILE4_PCIERR)] = 0;
49 vrc_pciregs[HI(NILE4_PCIERR)] = 0;
51 /* Setup address */
52 if (busnum == 0)
53 adr =
54 KSEG1ADDR(PCI_WINDOW1) +
55 ((1 << (PCI_SLOT(devfn) + 15)) | (PCI_FUNC(devfn) << 8)
56 | (where & ~3));
57 else
58 adr = KSEG1ADDR(PCI_WINDOW1) | (busnum << 16) | (devfn << 8) |
59 (where & ~3);
61 if (access_type == PCI_ACCESS_WRITE)
62 *(u32 *) adr = *val;
63 else
64 *val = *(u32 *) adr;
66 /* Check for master or target abort */
67 err = (vrc_pciregs[HI(NILE4_PCICTRL)] >> 5) & 0x7;
69 /* Restore PCI Window 1 */
70 vrc_pciregs[LO(NILE4_PCIINIT1)] = mask;
72 if (err)
73 return PCIBIOS_DEVICE_NOT_FOUND;
75 return PCIBIOS_SUCCESSFUL;
76 }
78 static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn,
79 int where, int size, u32 * val)
80 {
81 unsigned long flags;
82 u32 data = 0;
83 int err;
85 if ((size == 2) && (where & 1))
86 return PCIBIOS_BAD_REGISTER_NUMBER;
87 else if ((size == 4) && (where & 3))
88 return PCIBIOS_BAD_REGISTER_NUMBER;
90 spin_lock_irqsave(&nile4_pci_lock, flags);
91 err = nile4_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
92 &data);
93 spin_unlock_irqrestore(&nile4_pci_lock, flags);
95 if (err)
96 return err;
98 if (size == 1)
99 *val = (data >> ((where & 3) << 3)) & 0xff;
100 else if (size == 2)
101 *val = (data >> ((where & 3) << 3)) & 0xffff;
102 else
103 *val = data;
105 return PCIBIOS_SUCCESSFUL;
106 }
108 static int nile4_pcibios_write(struct pci_bus *bus, unsigned int devfn,
109 int where, int size, u32 val)
110 {
111 unsigned long flags;
112 u32 data = 0;
113 int err;
115 if ((size == 2) && (where & 1))
116 return PCIBIOS_BAD_REGISTER_NUMBER;
117 else if ((size == 4) && (where & 3))
118 return PCIBIOS_BAD_REGISTER_NUMBER;
120 spin_lock_irqsave(&nile4_pci_lock, flags);
121 err = nile4_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
122 &data);
123 spin_unlock_irqrestore(&nile4_pci_lock, flags);
125 if (err)
126 return err;
128 if (size == 1)
129 data = (data & ~(0xff << ((where & 3) << 3))) |
130 (val << ((where & 3) << 3));
131 else if (size == 2)
132 data = (data & ~(0xffff << ((where & 3) << 3))) |
133 (val << ((where & 3) << 3));
134 else
135 data = val;
137 if (nile4_pcibios_config_access
138 (PCI_ACCESS_WRITE, bus, devfn, where, &data))
139 return -1;
141 return PCIBIOS_SUCCESSFUL;
142 }
144 struct pci_ops nile4_pci_ops = {
145 .read = nile4_pcibios_read,
146 .write = nile4_pcibios_write,
147 };