ia64/xen-unstable

view xen/common/xmalloc.c @ 15927:b7eb2bb9b625

IRQ injection changes for HVM PCI passthru.
Signed-off-by: Allen Kay <allen.m.kay@intel.com>
Signed-off-by: Guy Zana <guy@neocleus.com>
author kfraser@localhost.localdomain
date Tue Sep 18 16:09:19 2007 +0100 (2007-09-18)
parents 18cf0c56226d
children 14a9a1629590
line source
1 /******************************************************************************
2 * Simple allocator for Xen. If larger than a page, simply use the
3 * page-order allocator.
4 *
5 * Copyright (C) 2005 Rusty Russell IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
22 /*
23 * TODO (Keir, 17/2/05):
24 * 1. Use space in page_info to avoid xmalloc_hdr in allocated blocks.
25 * 2. page_info points into free list to make xfree() O(1) complexity.
26 * 3. Perhaps make this a sub-page buddy allocator? xmalloc() == O(1).
27 * (Disadvantage is potentially greater internal fragmentation).
28 */
30 #include <xen/config.h>
31 #include <xen/mm.h>
32 #include <xen/spinlock.h>
33 #include <xen/timer.h>
34 #include <xen/cache.h>
35 #include <xen/prefetch.h>
36 #include <xen/irq.h>
37 #include <xen/smp.h>
39 /*
40 * XMALLOC_DEBUG:
41 * 1. Free data blocks are filled with poison bytes.
42 * 2. In-use data blocks have guard bytes at the start and end.
43 */
44 #ifndef NDEBUG
45 #define XMALLOC_DEBUG 1
46 #endif
48 static LIST_HEAD(freelist);
49 static DEFINE_SPINLOCK(freelist_lock);
51 struct xmalloc_hdr
52 {
53 /* Size is total including this header. */
54 size_t size;
55 struct list_head freelist;
56 } __cacheline_aligned;
58 static void add_to_freelist(struct xmalloc_hdr *hdr)
59 {
60 #if XMALLOC_DEBUG
61 memset(hdr + 1, 0xa5, hdr->size - sizeof(*hdr));
62 #endif
63 list_add(&hdr->freelist, &freelist);
64 }
66 static void del_from_freelist(struct xmalloc_hdr *hdr)
67 {
68 #if XMALLOC_DEBUG
69 size_t i;
70 unsigned char *data = (unsigned char *)(hdr + 1);
71 for ( i = 0; i < (hdr->size - sizeof(*hdr)); i++ )
72 BUG_ON(data[i] != 0xa5);
73 BUG_ON((hdr->size <= 0) || (hdr->size >= PAGE_SIZE));
74 #endif
75 list_del(&hdr->freelist);
76 }
78 static void *data_from_header(struct xmalloc_hdr *hdr)
79 {
80 #if XMALLOC_DEBUG
81 /* Data block contain SMP_CACHE_BYTES of guard canary. */
82 unsigned char *data = (unsigned char *)(hdr + 1);
83 memset(data, 0x5a, SMP_CACHE_BYTES);
84 memset(data + hdr->size - sizeof(*hdr) - SMP_CACHE_BYTES,
85 0x5a, SMP_CACHE_BYTES);
86 return data + SMP_CACHE_BYTES;
87 #else
88 return hdr + 1;
89 #endif
90 }
92 static struct xmalloc_hdr *header_from_data(void *p)
93 {
94 #if XMALLOC_DEBUG
95 unsigned char *data = (unsigned char *)p - SMP_CACHE_BYTES;
96 struct xmalloc_hdr *hdr = (struct xmalloc_hdr *)data - 1;
97 size_t i;
99 /* Check header guard canary. */
100 for ( i = 0; i < SMP_CACHE_BYTES; i++ )
101 BUG_ON(data[i] != 0x5a);
103 /* Check footer guard canary. */
104 data += hdr->size - sizeof(*hdr) - SMP_CACHE_BYTES;
105 for ( i = 0; i < SMP_CACHE_BYTES; i++ )
106 BUG_ON(data[i] != 0x5a);
108 return hdr;
109 #else
110 return (struct xmalloc_hdr *)p - 1;
111 #endif
112 }
114 static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
115 {
116 struct xmalloc_hdr *extra;
117 size_t leftover = block - size;
119 /* If enough is left to make a block, put it on free list. */
120 if ( leftover >= (2 * sizeof(struct xmalloc_hdr)) )
121 {
122 extra = (struct xmalloc_hdr *)((unsigned long)hdr + size);
123 extra->size = leftover;
124 add_to_freelist(extra);
125 }
126 else
127 {
128 size = block;
129 }
131 hdr->size = size;
132 /* Debugging aid. */
133 hdr->freelist.next = hdr->freelist.prev = NULL;
134 }
136 static void *xmalloc_new_page(size_t size)
137 {
138 struct xmalloc_hdr *hdr;
139 unsigned long flags;
141 hdr = alloc_xenheap_page();
142 if ( hdr == NULL )
143 return NULL;
145 spin_lock_irqsave(&freelist_lock, flags);
146 maybe_split(hdr, size, PAGE_SIZE);
147 spin_unlock_irqrestore(&freelist_lock, flags);
149 return data_from_header(hdr);
150 }
152 /* Big object? Just use the page allocator. */
153 static void *xmalloc_whole_pages(size_t size)
154 {
155 struct xmalloc_hdr *hdr;
156 unsigned int pageorder = get_order_from_bytes(size);
158 hdr = alloc_xenheap_pages(pageorder);
159 if ( hdr == NULL )
160 return NULL;
162 hdr->size = (1 << (pageorder + PAGE_SHIFT));
163 /* Debugging aid. */
164 hdr->freelist.next = hdr->freelist.prev = NULL;
166 return data_from_header(hdr);
167 }
169 /* Return size, increased to alignment with align. */
170 static inline size_t align_up(size_t size, size_t align)
171 {
172 return (size + align - 1) & ~(align - 1);
173 }
175 void *_xmalloc(size_t size, size_t align)
176 {
177 struct xmalloc_hdr *i;
178 unsigned long flags;
180 ASSERT(!in_irq());
182 /* We currently always return cacheline aligned. */
183 BUG_ON(align > SMP_CACHE_BYTES);
185 #if XMALLOC_DEBUG
186 /* Add room for canaries at start and end of data block. */
187 size += 2 * SMP_CACHE_BYTES;
188 #endif
190 /* Add room for header, pad to align next header. */
191 size += sizeof(struct xmalloc_hdr);
192 size = align_up(size, __alignof__(struct xmalloc_hdr));
194 /* For big allocs, give them whole pages. */
195 if ( size >= PAGE_SIZE )
196 return xmalloc_whole_pages(size);
198 /* Search free list. */
199 spin_lock_irqsave(&freelist_lock, flags);
200 list_for_each_entry( i, &freelist, freelist )
201 {
202 if ( i->size < size )
203 continue;
204 del_from_freelist(i);
205 maybe_split(i, size, i->size);
206 spin_unlock_irqrestore(&freelist_lock, flags);
207 return data_from_header(i);
208 }
209 spin_unlock_irqrestore(&freelist_lock, flags);
211 /* Alloc a new page and return from that. */
212 return xmalloc_new_page(size);
213 }
215 void xfree(void *p)
216 {
217 unsigned long flags;
218 struct xmalloc_hdr *i, *tmp, *hdr;
220 ASSERT(!in_irq());
222 if ( p == NULL )
223 return;
225 hdr = header_from_data(p);
227 /* We know hdr will be on same page. */
228 BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK));
230 /* Not previously freed. */
231 BUG_ON(hdr->freelist.next || hdr->freelist.prev);
233 /* Big allocs free directly. */
234 if ( hdr->size >= PAGE_SIZE )
235 {
236 free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
237 return;
238 }
240 /* Merge with other free block, or put in list. */
241 spin_lock_irqsave(&freelist_lock, flags);
242 list_for_each_entry_safe( i, tmp, &freelist, freelist )
243 {
244 unsigned long _i = (unsigned long)i;
245 unsigned long _hdr = (unsigned long)hdr;
247 /* Do not merge across page boundaries. */
248 if ( ((_i ^ _hdr) & PAGE_MASK) != 0 )
249 continue;
251 /* We follow this block? Swallow it. */
252 if ( (_i + i->size) == _hdr )
253 {
254 del_from_freelist(i);
255 i->size += hdr->size;
256 hdr = i;
257 }
259 /* We precede this block? Swallow it. */
260 if ( (_hdr + hdr->size) == _i )
261 {
262 del_from_freelist(i);
263 hdr->size += i->size;
264 }
265 }
267 /* Did we merge an entire page? */
268 if ( hdr->size == PAGE_SIZE )
269 {
270 BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0);
271 free_xenheap_pages(hdr, 0);
272 }
273 else
274 {
275 add_to_freelist(hdr);
276 }
278 spin_unlock_irqrestore(&freelist_lock, flags);
279 }
281 /*
282 * Local variables:
283 * mode: C
284 * c-set-style: "BSD"
285 * c-basic-offset: 4
286 * tab-width: 4
287 * indent-tabs-mode: nil
288 * End:
289 */