ia64/xen-unstable

view xen/common/xmalloc.c @ 9776:72f9c751d3ea

Replace &foo[0] with foo where the latter seems cleaner
(which is usually, and particularly when its an argument
to one of the bitops functions).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Apr 19 18:32:20 2006 +0100 (2006-04-19)
parents 0c94043f5c5b
children 808430428622
line source
1 /******************************************************************************
2 * Simple allocator for Xen. If larger than a page, simply use the
3 * page-order allocator.
4 *
5 * Copyright (C) 2005 Rusty Russell IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
22 /*
23 * TODO (Keir, 17/2/05):
24 * 1. Use space in page_info to avoid xmalloc_hdr in allocated blocks.
25 * 2. page_info points into free list to make xfree() O(1) complexity.
26 * 3. Perhaps make this a sub-page buddy allocator? xmalloc() == O(1).
27 * (Disadvantage is potentially greater internal fragmentation).
28 */
30 #include <xen/config.h>
31 #include <xen/mm.h>
32 #include <xen/spinlock.h>
33 #include <xen/timer.h>
34 #include <xen/cache.h>
35 #include <xen/prefetch.h>
37 static LIST_HEAD(freelist);
38 static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED;
40 struct xmalloc_hdr
41 {
42 /* Total including this hdr. */
43 size_t size;
44 struct list_head freelist;
45 } __cacheline_aligned;
47 static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
48 {
49 struct xmalloc_hdr *extra;
50 size_t leftover = block - size;
52 /* If enough is left to make a block, put it on free list. */
53 if ( leftover >= (2 * sizeof(struct xmalloc_hdr)) )
54 {
55 extra = (struct xmalloc_hdr *)((unsigned long)hdr + size);
56 extra->size = leftover;
57 list_add(&extra->freelist, &freelist);
58 }
59 else
60 {
61 size = block;
62 }
64 hdr->size = size;
65 /* Debugging aid. */
66 hdr->freelist.next = hdr->freelist.prev = NULL;
67 }
69 static void *xmalloc_new_page(size_t size)
70 {
71 struct xmalloc_hdr *hdr;
72 unsigned long flags;
74 hdr = alloc_xenheap_page();
75 if ( hdr == NULL )
76 return NULL;
78 spin_lock_irqsave(&freelist_lock, flags);
79 maybe_split(hdr, size, PAGE_SIZE);
80 spin_unlock_irqrestore(&freelist_lock, flags);
82 return hdr+1;
83 }
85 /* Big object? Just use the page allocator. */
86 static void *xmalloc_whole_pages(size_t size)
87 {
88 struct xmalloc_hdr *hdr;
89 unsigned int pageorder = get_order_from_bytes(size);
91 hdr = alloc_xenheap_pages(pageorder);
92 if ( hdr == NULL )
93 return NULL;
95 hdr->size = (1 << (pageorder + PAGE_SHIFT));
96 /* Debugging aid. */
97 hdr->freelist.next = hdr->freelist.prev = NULL;
99 return hdr+1;
100 }
102 /* Return size, increased to alignment with align. */
103 static inline size_t align_up(size_t size, size_t align)
104 {
105 return (size + align - 1) & ~(align - 1);
106 }
108 void *_xmalloc(size_t size, size_t align)
109 {
110 struct xmalloc_hdr *i;
111 unsigned long flags;
113 /* We currently always return cacheline aligned. */
114 BUG_ON(align > SMP_CACHE_BYTES);
116 /* Add room for header, pad to align next header. */
117 size += sizeof(struct xmalloc_hdr);
118 size = align_up(size, __alignof__(struct xmalloc_hdr));
120 /* For big allocs, give them whole pages. */
121 if ( size >= PAGE_SIZE )
122 return xmalloc_whole_pages(size);
124 /* Search free list. */
125 spin_lock_irqsave(&freelist_lock, flags);
126 list_for_each_entry( i, &freelist, freelist )
127 {
128 if ( i->size < size )
129 continue;
130 list_del(&i->freelist);
131 maybe_split(i, size, i->size);
132 spin_unlock_irqrestore(&freelist_lock, flags);
133 return i+1;
134 }
135 spin_unlock_irqrestore(&freelist_lock, flags);
137 /* Alloc a new page and return from that. */
138 return xmalloc_new_page(size);
139 }
141 void xfree(const void *p)
142 {
143 unsigned long flags;
144 struct xmalloc_hdr *i, *tmp, *hdr;
146 if ( p == NULL )
147 return;
149 hdr = (struct xmalloc_hdr *)p - 1;
151 /* We know hdr will be on same page. */
152 BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK));
154 /* Not previously freed. */
155 BUG_ON(hdr->freelist.next || hdr->freelist.prev);
157 /* Big allocs free directly. */
158 if ( hdr->size >= PAGE_SIZE )
159 {
160 free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
161 return;
162 }
164 /* Merge with other free block, or put in list. */
165 spin_lock_irqsave(&freelist_lock, flags);
166 list_for_each_entry_safe( i, tmp, &freelist, freelist )
167 {
168 unsigned long _i = (unsigned long)i;
169 unsigned long _hdr = (unsigned long)hdr;
171 /* Do not merge across page boundaries. */
172 if ( ((_i ^ _hdr) & PAGE_MASK) != 0 )
173 continue;
175 /* We follow this block? Swallow it. */
176 if ( (_i + i->size) == _hdr )
177 {
178 list_del(&i->freelist);
179 i->size += hdr->size;
180 hdr = i;
181 }
183 /* We precede this block? Swallow it. */
184 if ( (_hdr + hdr->size) == _i )
185 {
186 list_del(&i->freelist);
187 hdr->size += i->size;
188 }
189 }
191 /* Did we merge an entire page? */
192 if ( hdr->size == PAGE_SIZE )
193 {
194 BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0);
195 free_xenheap_pages(hdr, 0);
196 }
197 else
198 {
199 list_add(&hdr->freelist, &freelist);
200 }
202 spin_unlock_irqrestore(&freelist_lock, flags);
203 }
205 /*
206 * Local variables:
207 * mode: C
208 * c-set-style: "BSD"
209 * c-basic-offset: 4
210 * tab-width: 4
211 * indent-tabs-mode: nil
212 * End:
213 */