ia64/xen-unstable

view xen/common/xmalloc.c @ 6708:aa0990ef260f

merge
author iap10@freefall.cl.cam.ac.uk
date Thu Sep 08 17:42:49 2005 +0000 (2005-09-08)
parents 3bde4219c681 e3fd0fa58364
children 2704a88c3295 cdfa7dd00c44
line source
1 /******************************************************************************
2 * Simple allocator for Xen. If larger than a page, simply use the
3 * page-order allocator.
4 *
5 * Copyright (C) 2005 Rusty Russell IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
22 /*
23 * TODO (Keir, 17/2/05):
24 * 1. Use space in pfn_info to avoid xmalloc_hdr in allocated blocks.
25 * 2. pfn_info points into free list to make xfree() O(1) complexity.
26 * 3. Perhaps make this a sub-page buddy allocator? xmalloc() == O(1).
27 * (Disadvantage is potentially greater internal fragmentation).
28 */
30 #include <xen/config.h>
31 #include <xen/mm.h>
32 #include <xen/spinlock.h>
33 #include <xen/ac_timer.h>
34 #include <xen/cache.h>
35 #include <xen/prefetch.h>
37 static LIST_HEAD(freelist);
38 static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED;
40 struct xmalloc_hdr
41 {
42 /* Total including this hdr. */
43 size_t size;
44 struct list_head freelist;
45 } __cacheline_aligned;
47 static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
48 {
49 struct xmalloc_hdr *extra;
50 size_t leftover = block - size;
52 /* If enough is left to make a block, put it on free list. */
53 if ( leftover >= (2 * sizeof(struct xmalloc_hdr)) )
54 {
55 extra = (struct xmalloc_hdr *)((unsigned long)hdr + size);
56 extra->size = leftover;
57 list_add(&extra->freelist, &freelist);
58 }
59 else
60 {
61 size = block;
62 }
64 hdr->size = size;
65 /* Debugging aid. */
66 hdr->freelist.next = hdr->freelist.prev = NULL;
67 }
69 static void *xmalloc_new_page(size_t size)
70 {
71 struct xmalloc_hdr *hdr;
72 unsigned long flags;
74 hdr = alloc_xenheap_page();
75 if ( hdr == NULL )
76 return NULL;
78 spin_lock_irqsave(&freelist_lock, flags);
79 maybe_split(hdr, size, PAGE_SIZE);
80 spin_unlock_irqrestore(&freelist_lock, flags);
82 return hdr+1;
83 }
85 /* Big object? Just use the page allocator. */
86 static void *xmalloc_whole_pages(size_t size)
87 {
88 struct xmalloc_hdr *hdr;
89 unsigned int pageorder = get_order_from_bytes(size);
91 hdr = alloc_xenheap_pages(pageorder);
92 if ( hdr == NULL )
93 return NULL;
95 hdr->size = (1 << (pageorder + PAGE_SHIFT));
96 /* Debugging aid. */
97 hdr->freelist.next = hdr->freelist.prev = NULL;
99 return hdr+1;
100 }
102 /* Return size, increased to alignment with align. */
103 static inline size_t align_up(size_t size, size_t align)
104 {
105 return (size + align - 1) & ~(align - 1);
106 }
108 void *_xmalloc(size_t size, size_t align)
109 {
110 struct xmalloc_hdr *i;
111 unsigned long flags;
113 /* We currently always return cacheline aligned. */
114 #ifndef __ia64__
115 BUG_ON(align > SMP_CACHE_BYTES);
116 #endif
118 /* Add room for header, pad to align next header. */
119 size += sizeof(struct xmalloc_hdr);
120 size = align_up(size, __alignof__(struct xmalloc_hdr));
122 /* For big allocs, give them whole pages. */
123 if ( size >= PAGE_SIZE )
124 return xmalloc_whole_pages(size);
126 /* Search free list. */
127 spin_lock_irqsave(&freelist_lock, flags);
128 list_for_each_entry( i, &freelist, freelist )
129 {
130 if ( i->size < size )
131 continue;
132 list_del(&i->freelist);
133 maybe_split(i, size, i->size);
134 spin_unlock_irqrestore(&freelist_lock, flags);
135 return i+1;
136 }
137 spin_unlock_irqrestore(&freelist_lock, flags);
139 /* Alloc a new page and return from that. */
140 return xmalloc_new_page(size);
141 }
143 void xfree(const void *p)
144 {
145 unsigned long flags;
146 struct xmalloc_hdr *i, *tmp, *hdr;
148 if ( p == NULL )
149 return;
151 hdr = (struct xmalloc_hdr *)p - 1;
153 /* We know hdr will be on same page. */
154 BUG_ON(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK));
156 /* Not previously freed. */
157 BUG_ON(hdr->freelist.next || hdr->freelist.prev);
159 /* Big allocs free directly. */
160 if ( hdr->size >= PAGE_SIZE )
161 {
162 free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
163 return;
164 }
166 /* Merge with other free block, or put in list. */
167 spin_lock_irqsave(&freelist_lock, flags);
168 list_for_each_entry_safe( i, tmp, &freelist, freelist )
169 {
170 unsigned long _i = (unsigned long)i;
171 unsigned long _hdr = (unsigned long)hdr;
173 /* Do not merge across page boundaries. */
174 if ( ((_i ^ _hdr) & PAGE_MASK) != 0 )
175 continue;
177 /* We follow this block? Swallow it. */
178 if ( (_i + i->size) == _hdr )
179 {
180 list_del(&i->freelist);
181 i->size += hdr->size;
182 hdr = i;
183 }
185 /* We precede this block? Swallow it. */
186 if ( (_hdr + hdr->size) == _i )
187 {
188 list_del(&i->freelist);
189 hdr->size += i->size;
190 }
191 }
193 /* Did we merge an entire page? */
194 if ( hdr->size == PAGE_SIZE )
195 {
196 BUG_ON((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0);
197 free_xenheap_pages(hdr, 0);
198 }
199 else
200 {
201 list_add(&hdr->freelist, &freelist);
202 }
204 spin_unlock_irqrestore(&freelist_lock, flags);
205 }
207 /*
208 * Local variables:
209 * mode: C
210 * c-set-style: "BSD"
211 * c-basic-offset: 4
212 * tab-width: 4
213 * indent-tabs-mode: nil
214 * End:
215 */