ia64/xen-unstable

view xen/common/xmalloc.c @ 12390:e28beea6d228

[IA64] Fix time services of EFI emulation

This patch serializes the execution of following efi.runtimes.
- GetTime
- SetTime
- GetWakeTime
- SetWakeTime

Linux/ia64 uses similar spinlocks in the EFI RTC driver.

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Fri Nov 10 12:03:19 2006 -0700 (2006-11-10)
parents 092170a14212
children 3e2d3d737624
line source
1 /******************************************************************************
2 * Simple allocator for Xen. If larger than a page, simply use the
3 * page-order allocator.
4 *
5 * Copyright (C) 2005 Rusty Russell IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
22 /*
23 * TODO (Keir, 17/2/05):
24 * 1. Use space in page_info to avoid xmalloc_hdr in allocated blocks.
25 * 2. page_info points into free list to make xfree() O(1) complexity.
26 * 3. Perhaps make this a sub-page buddy allocator? xmalloc() == O(1).
27 * (Disadvantage is potentially greater internal fragmentation).
28 */
30 #include <xen/config.h>
31 #include <xen/mm.h>
32 #include <xen/spinlock.h>
33 #include <xen/timer.h>
34 #include <xen/cache.h>
35 #include <xen/prefetch.h>
37 /*
38 * XMALLOC_DEBUG:
39 * 1. Free data blocks are filled with poison bytes.
40 * 2. In-use data blocks have guard bytes at the start and end.
41 */
42 #ifndef NDEBUG
43 #define XMALLOC_DEBUG 1
44 #endif
46 static LIST_HEAD(freelist);
47 static DEFINE_SPINLOCK(freelist_lock);
49 struct xmalloc_hdr
50 {
51 /* Size is total including this header. */
52 size_t size;
53 struct list_head freelist;
54 } __cacheline_aligned;
56 static void add_to_freelist(struct xmalloc_hdr *hdr)
57 {
58 #if XMALLOC_DEBUG
59 memset(hdr + 1, 0xa5, hdr->size - sizeof(*hdr));
60 #endif
61 list_add(&hdr->freelist, &freelist);
62 }
64 static void del_from_freelist(struct xmalloc_hdr *hdr)
65 {
66 #if XMALLOC_DEBUG
67 size_t i;
68 unsigned char *data = (unsigned char *)(hdr + 1);
69 for ( i = 0; i < (hdr->size - sizeof(*hdr)); i++ )
70 BUG_ON(data[i] != 0xa5);
71 BUG_ON((hdr->size <= 0) || (hdr->size >= PAGE_SIZE));
72 #endif
73 list_del(&hdr->freelist);
74 }
76 static void *data_from_header(struct xmalloc_hdr *hdr)
77 {
78 #if XMALLOC_DEBUG
79 /* Data block contain SMP_CACHE_BYTES of guard canary. */
80 unsigned char *data = (unsigned char *)(hdr + 1);
81 memset(data, 0x5a, SMP_CACHE_BYTES);
82 memset(data + hdr->size - sizeof(*hdr) - SMP_CACHE_BYTES,
83 0x5a, SMP_CACHE_BYTES);
84 return data + SMP_CACHE_BYTES;
85 #else
86 return hdr + 1;
87 #endif
88 }
90 static struct xmalloc_hdr *header_from_data(const void *p)
91 {
92 #if XMALLOC_DEBUG
93 unsigned char *data = (unsigned char *)p - SMP_CACHE_BYTES;
94 struct xmalloc_hdr *hdr = (struct xmalloc_hdr *)data - 1;
95 size_t i;
97 /* Check header guard canary. */
98 for ( i = 0; i < SMP_CACHE_BYTES; i++ )
99 BUG_ON(data[i] != 0x5a);
101 /* Check footer guard canary. */
102 data += hdr->size - sizeof(*hdr) - SMP_CACHE_BYTES;
103 for ( i = 0; i < SMP_CACHE_BYTES; i++ )
104 BUG_ON(data[i] != 0x5a);
106 return hdr;
107 #else
108 return (struct xmalloc_hdr *)p - 1;
109 #endif
110 }
112 static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
113 {
114 struct xmalloc_hdr *extra;
115 size_t leftover = block - size;
117 /* If enough is left to make a block, put it on free list. */
118 if ( leftover >= (2 * sizeof(struct xmalloc_hdr)) )
119 {
120 extra = (struct xmalloc_hdr *)((unsigned long)hdr + size);
121 extra->size = leftover;
122 add_to_freelist(extra);
123 }
124 else
125 {
126 size = block;
127 }
129 hdr->size = size;
130 /* Debugging aid. */
131 hdr->freelist.next = hdr->freelist.prev = NULL;
132 }
134 static void *xmalloc_new_page(size_t size)
135 {
136 struct xmalloc_hdr *hdr;
137 unsigned long flags;
139 hdr = alloc_xenheap_page();
140 if ( hdr == NULL )
141 return NULL;
143 spin_lock_irqsave(&freelist_lock, flags);
144 maybe_split(hdr, size, PAGE_SIZE);
145 spin_unlock_irqrestore(&freelist_lock, flags);
147 return data_from_header(hdr);
148 }
150 /* Big object? Just use the page allocator. */
151 static void *xmalloc_whole_pages(size_t size)
152 {
153 struct xmalloc_hdr *hdr;
154 unsigned int pageorder = get_order_from_bytes(size);
156 hdr = alloc_xenheap_pages(pageorder);
157 if ( hdr == NULL )
158 return NULL;
160 hdr->size = (1 << (pageorder + PAGE_SHIFT));
161 /* Debugging aid. */
162 hdr->freelist.next = hdr->freelist.prev = NULL;
164 return data_from_header(hdr);
165 }
167 /* Return size, increased to alignment with align. */
168 static inline size_t align_up(size_t size, size_t align)
169 {
170 return (size + align - 1) & ~(align - 1);
171 }
173 void *_xmalloc(size_t size, size_t align)
174 {
175 struct xmalloc_hdr *i;
176 unsigned long flags;
178 /* We currently always return cacheline aligned. */
179 BUG_ON(align > SMP_CACHE_BYTES);
181 #if XMALLOC_DEBUG
182 /* Add room for canaries at start and end of data block. */
183 size += 2 * SMP_CACHE_BYTES;
184 #endif
186 /* Add room for header, pad to align next header. */
187 size += sizeof(struct xmalloc_hdr);
188 size = align_up(size, __alignof__(struct xmalloc_hdr));
190 /* For big allocs, give them whole pages. */
191 if ( size >= PAGE_SIZE )
192 return xmalloc_whole_pages(size);
194 /* Search free list. */
195 spin_lock_irqsave(&freelist_lock, flags);
196 list_for_each_entry( i, &freelist, freelist )
197 {
198 if ( i->size < size )
199 continue;
200 del_from_freelist(i);
201 maybe_split(i, size, i->size);
202 spin_unlock_irqrestore(&freelist_lock, flags);
203 return data_from_header(i);
204 }
205 spin_unlock_irqrestore(&freelist_lock, flags);
207 /* Alloc a new page and return from that. */
208 return xmalloc_new_page(size);
209 }
211 void xfree(const void *p)
212 {
213 unsigned long flags;
214 struct xmalloc_hdr *i, *tmp, *hdr;
216 if ( p == NULL )
217 return;
219 hdr = header_from_data(p);
221 /* We know hdr will be on same page. */
222 BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK));
224 /* Not previously freed. */
225 BUG_ON(hdr->freelist.next || hdr->freelist.prev);
227 /* Big allocs free directly. */
228 if ( hdr->size >= PAGE_SIZE )
229 {
230 free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
231 return;
232 }
234 /* Merge with other free block, or put in list. */
235 spin_lock_irqsave(&freelist_lock, flags);
236 list_for_each_entry_safe( i, tmp, &freelist, freelist )
237 {
238 unsigned long _i = (unsigned long)i;
239 unsigned long _hdr = (unsigned long)hdr;
241 /* Do not merge across page boundaries. */
242 if ( ((_i ^ _hdr) & PAGE_MASK) != 0 )
243 continue;
245 /* We follow this block? Swallow it. */
246 if ( (_i + i->size) == _hdr )
247 {
248 del_from_freelist(i);
249 i->size += hdr->size;
250 hdr = i;
251 }
253 /* We precede this block? Swallow it. */
254 if ( (_hdr + hdr->size) == _i )
255 {
256 del_from_freelist(i);
257 hdr->size += i->size;
258 }
259 }
261 /* Did we merge an entire page? */
262 if ( hdr->size == PAGE_SIZE )
263 {
264 BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0);
265 free_xenheap_pages(hdr, 0);
266 }
267 else
268 {
269 add_to_freelist(hdr);
270 }
272 spin_unlock_irqrestore(&freelist_lock, flags);
273 }
275 /*
276 * Local variables:
277 * mode: C
278 * c-set-style: "BSD"
279 * c-basic-offset: 4
280 * tab-width: 4
281 * indent-tabs-mode: nil
282 * End:
283 */