ia64/xen-unstable

view xen/common/xmalloc.c @ 18594:5e4e234d58be

x86: Define __per_cpu_shift label to help kdump/crashdump.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 08 13:11:06 2008 +0100 (2008-10-08)
parents 14a9a1629590
children
line source
1 /******************************************************************************
2 * Simple allocator for Xen. If larger than a page, simply use the
3 * page-order allocator.
4 *
5 * Copyright (C) 2005 Rusty Russell IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
22 /*
23 * TODO (Keir, 17/2/05):
24 * 1. Use space in page_info to avoid xmalloc_hdr in allocated blocks.
25 * 2. page_info points into free list to make xfree() O(1) complexity.
26 * 3. Perhaps make this a sub-page buddy allocator? xmalloc() == O(1).
27 * (Disadvantage is potentially greater internal fragmentation).
28 */
30 #include <xen/config.h>
31 #include <xen/mm.h>
32 #include <xen/spinlock.h>
33 #include <xen/timer.h>
34 #include <xen/cache.h>
35 #include <xen/prefetch.h>
36 #include <xen/irq.h>
37 #include <xen/smp.h>
39 /*
40 * XMALLOC_DEBUG:
41 * 1. Free data blocks are filled with poison bytes.
42 * 2. In-use data blocks have guard bytes at the start and end.
43 */
44 #ifndef NDEBUG
45 #define XMALLOC_DEBUG 1
46 #endif
48 static LIST_HEAD(freelist);
49 static DEFINE_SPINLOCK(freelist_lock);
51 struct xmalloc_hdr
52 {
53 /* Size is total including this header. */
54 size_t size;
55 struct list_head freelist;
56 } __cacheline_aligned;
58 static void add_to_freelist(struct xmalloc_hdr *hdr)
59 {
60 #if XMALLOC_DEBUG
61 memset(hdr + 1, 0xa5, hdr->size - sizeof(*hdr));
62 #endif
63 list_add(&hdr->freelist, &freelist);
64 }
66 static void del_from_freelist(struct xmalloc_hdr *hdr)
67 {
68 #if XMALLOC_DEBUG
69 size_t i;
70 unsigned char *data = (unsigned char *)(hdr + 1);
71 for ( i = 0; i < (hdr->size - sizeof(*hdr)); i++ )
72 BUG_ON(data[i] != 0xa5);
73 BUG_ON((hdr->size <= 0) || (hdr->size >= PAGE_SIZE));
74 #endif
75 list_del(&hdr->freelist);
76 }
78 static void *data_from_header(struct xmalloc_hdr *hdr)
79 {
80 #if XMALLOC_DEBUG
81 /* Data block contain SMP_CACHE_BYTES of guard canary. */
82 unsigned char *data = (unsigned char *)(hdr + 1);
83 memset(data, 0x5a, SMP_CACHE_BYTES);
84 memset(data + hdr->size - sizeof(*hdr) - SMP_CACHE_BYTES,
85 0x5a, SMP_CACHE_BYTES);
86 return data + SMP_CACHE_BYTES;
87 #else
88 return hdr + 1;
89 #endif
90 }
92 static struct xmalloc_hdr *header_from_data(void *p)
93 {
94 #if XMALLOC_DEBUG
95 unsigned char *data = (unsigned char *)p - SMP_CACHE_BYTES;
96 struct xmalloc_hdr *hdr = (struct xmalloc_hdr *)data - 1;
97 size_t i;
99 /* Check header guard canary. */
100 for ( i = 0; i < SMP_CACHE_BYTES; i++ )
101 BUG_ON(data[i] != 0x5a);
103 /* Check footer guard canary. */
104 data += hdr->size - sizeof(*hdr) - SMP_CACHE_BYTES;
105 for ( i = 0; i < SMP_CACHE_BYTES; i++ )
106 BUG_ON(data[i] != 0x5a);
108 return hdr;
109 #else
110 return (struct xmalloc_hdr *)p - 1;
111 #endif
112 }
114 static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
115 {
116 struct xmalloc_hdr *extra;
117 size_t leftover = block - size;
119 /* If enough is left to make a block, put it on free list. */
120 if ( leftover >= (2 * sizeof(struct xmalloc_hdr)) )
121 {
122 extra = (struct xmalloc_hdr *)((unsigned long)hdr + size);
123 extra->size = leftover;
124 add_to_freelist(extra);
125 }
126 else
127 {
128 size = block;
129 }
131 hdr->size = size;
132 /* Debugging aid. */
133 hdr->freelist.next = hdr->freelist.prev = NULL;
134 }
136 static void *xmalloc_new_page(size_t size)
137 {
138 struct xmalloc_hdr *hdr;
140 hdr = alloc_xenheap_page();
141 if ( hdr == NULL )
142 return NULL;
144 spin_lock(&freelist_lock);
145 maybe_split(hdr, size, PAGE_SIZE);
146 spin_unlock(&freelist_lock);
148 return data_from_header(hdr);
149 }
151 /* Big object? Just use the page allocator. */
152 static void *xmalloc_whole_pages(size_t size)
153 {
154 struct xmalloc_hdr *hdr;
155 unsigned int pageorder = get_order_from_bytes(size);
157 hdr = alloc_xenheap_pages(pageorder);
158 if ( hdr == NULL )
159 return NULL;
161 hdr->size = (1 << (pageorder + PAGE_SHIFT));
162 /* Debugging aid. */
163 hdr->freelist.next = hdr->freelist.prev = NULL;
165 return data_from_header(hdr);
166 }
168 /* Return size, increased to alignment with align. */
169 static inline size_t align_up(size_t size, size_t align)
170 {
171 return (size + align - 1) & ~(align - 1);
172 }
174 void *_xmalloc(size_t size, size_t align)
175 {
176 struct xmalloc_hdr *i;
178 ASSERT(!in_irq());
180 /* We currently always return cacheline aligned. */
181 BUG_ON(align > SMP_CACHE_BYTES);
183 #if XMALLOC_DEBUG
184 /* Add room for canaries at start and end of data block. */
185 size += 2 * SMP_CACHE_BYTES;
186 #endif
188 /* Add room for header, pad to align next header. */
189 size += sizeof(struct xmalloc_hdr);
190 size = align_up(size, __alignof__(struct xmalloc_hdr));
192 /* For big allocs, give them whole pages. */
193 if ( size >= PAGE_SIZE )
194 return xmalloc_whole_pages(size);
196 /* Search free list. */
197 spin_lock(&freelist_lock);
198 list_for_each_entry( i, &freelist, freelist )
199 {
200 if ( i->size < size )
201 continue;
202 del_from_freelist(i);
203 maybe_split(i, size, i->size);
204 spin_unlock(&freelist_lock);
205 return data_from_header(i);
206 }
207 spin_unlock(&freelist_lock);
209 /* Alloc a new page and return from that. */
210 return xmalloc_new_page(size);
211 }
213 void xfree(void *p)
214 {
215 struct xmalloc_hdr *i, *tmp, *hdr;
217 ASSERT(!in_irq());
219 if ( p == NULL )
220 return;
222 hdr = header_from_data(p);
224 /* We know hdr will be on same page. */
225 BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK));
227 /* Not previously freed. */
228 BUG_ON(hdr->freelist.next || hdr->freelist.prev);
230 /* Big allocs free directly. */
231 if ( hdr->size >= PAGE_SIZE )
232 {
233 free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
234 return;
235 }
237 /* Merge with other free block, or put in list. */
238 spin_lock(&freelist_lock);
239 list_for_each_entry_safe( i, tmp, &freelist, freelist )
240 {
241 unsigned long _i = (unsigned long)i;
242 unsigned long _hdr = (unsigned long)hdr;
244 /* Do not merge across page boundaries. */
245 if ( ((_i ^ _hdr) & PAGE_MASK) != 0 )
246 continue;
248 /* We follow this block? Swallow it. */
249 if ( (_i + i->size) == _hdr )
250 {
251 del_from_freelist(i);
252 i->size += hdr->size;
253 hdr = i;
254 }
256 /* We precede this block? Swallow it. */
257 if ( (_hdr + hdr->size) == _i )
258 {
259 del_from_freelist(i);
260 hdr->size += i->size;
261 }
262 }
264 /* Did we merge an entire page? */
265 if ( hdr->size == PAGE_SIZE )
266 {
267 BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0);
268 free_xenheap_pages(hdr, 0);
269 }
270 else
271 {
272 add_to_freelist(hdr);
273 }
275 spin_unlock(&freelist_lock);
276 }
278 /*
279 * Local variables:
280 * mode: C
281 * c-set-style: "BSD"
282 * c-basic-offset: 4
283 * tab-width: 4
284 * indent-tabs-mode: nil
285 * End:
286 */