ia64/xen-unstable

view extras/mini-os/lib/xmalloc.c @ 6806:4ad19fe76d50

Store dom0 store ring-ref in store.
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:32:38 2005 +0000 (2005-09-13)
parents dd668f7527cb
children b2f4823b6ff0 b35215021b32 9af349b055e5 3233e7ecfa9f
line source
1 /*
2 ****************************************************************************
3 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
4 ****************************************************************************
5 *
6 * File: xmaloc.c
7 * Author: Grzegorz Milos (gm281@cam.ac.uk)
8 * Changes:
9 *
10 * Date: Aug 2005
11 *
12 * Environment: Xen Minimal OS
13 * Description: simple memory allocator
14 *
15 ****************************************************************************
16 * Simple allocator for Mini-os. If larger than a page, simply use the
17 * page-order allocator.
18 *
19 * Copy of the allocator for Xen by Rusty Russell:
20 * Copyright (C) 2005 Rusty Russell IBM Corporation
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
35 */
37 #include <os.h>
38 #include <mm.h>
39 #include <types.h>
40 #include <lib.h>
41 #include <list.h>
43 static LIST_HEAD(freelist);
44 /* static spinlock_t freelist_lock = SPIN_LOCK_UNLOCKED; */
46 struct xmalloc_hdr
47 {
48 /* Total including this hdr. */
49 size_t size;
50 struct list_head freelist;
51 } __cacheline_aligned;
53 static void maybe_split(struct xmalloc_hdr *hdr, size_t size, size_t block)
54 {
55 struct xmalloc_hdr *extra;
56 size_t leftover = block - size;
58 /* If enough is left to make a block, put it on free list. */
59 if ( leftover >= (2 * sizeof(struct xmalloc_hdr)) )
60 {
61 extra = (struct xmalloc_hdr *)((unsigned long)hdr + size);
62 extra->size = leftover;
63 list_add(&extra->freelist, &freelist);
64 }
65 else
66 {
67 size = block;
68 }
70 hdr->size = size;
71 /* Debugging aid. */
72 hdr->freelist.next = hdr->freelist.prev = NULL;
73 }
75 static void *xmalloc_new_page(size_t size)
76 {
77 struct xmalloc_hdr *hdr;
78 /* unsigned long flags; */
80 hdr = (struct xmalloc_hdr *)alloc_page();
81 if ( hdr == NULL )
82 return NULL;
84 /* spin_lock_irqsave(&freelist_lock, flags); */
85 maybe_split(hdr, size, PAGE_SIZE);
86 /* spin_unlock_irqrestore(&freelist_lock, flags); */
88 return hdr+1;
89 }
91 /* Big object? Just use the page allocator. */
92 static void *xmalloc_whole_pages(size_t size)
93 {
94 struct xmalloc_hdr *hdr;
95 unsigned int pageorder = get_order(size);
97 hdr = (struct xmalloc_hdr *)alloc_pages(pageorder);
98 if ( hdr == NULL )
99 return NULL;
101 hdr->size = (1 << (pageorder + PAGE_SHIFT));
102 /* Debugging aid. */
103 hdr->freelist.next = hdr->freelist.prev = NULL;
105 return hdr+1;
106 }
108 /* Return size, increased to alignment with align. */
109 static inline size_t align_up(size_t size, size_t align)
110 {
111 return (size + align - 1) & ~(align - 1);
112 }
114 void *_xmalloc(size_t size, size_t align)
115 {
116 struct xmalloc_hdr *i;
117 /* unsigned long flags; */
119 /* Add room for header, pad to align next header. */
120 size += sizeof(struct xmalloc_hdr);
121 size = align_up(size, __alignof__(struct xmalloc_hdr));
123 /* For big allocs, give them whole pages. */
124 if ( size >= PAGE_SIZE )
125 return xmalloc_whole_pages(size);
127 /* Search free list. */
128 /* spin_lock_irqsave(&freelist_lock, flags); */
129 list_for_each_entry( i, &freelist, freelist )
130 {
131 if ( i->size < size )
132 continue;
133 list_del(&i->freelist);
134 maybe_split(i, size, i->size);
135 /* spin_unlock_irqrestore(&freelist_lock, flags); */
136 return i+1;
137 }
138 /* spin_unlock_irqrestore(&freelist_lock, flags); */
140 /* Alloc a new page and return from that. */
141 return xmalloc_new_page(size);
142 }
144 void xfree(const void *p)
145 {
146 /* unsigned long flags; */
147 struct xmalloc_hdr *i, *tmp, *hdr;
149 if ( p == NULL )
150 return;
152 hdr = (struct xmalloc_hdr *)p - 1;
154 /* We know hdr will be on same page. */
155 if(((long)p & PAGE_MASK) != ((long)hdr & PAGE_MASK))
156 {
157 printk("Header should be on the same page\n");
158 *(int*)0=0;
159 }
161 /* Not previously freed. */
162 if(hdr->freelist.next || hdr->freelist.prev)
163 {
164 printk("Should not be previously freed\n");
165 *(int*)0=0;
166 }
168 /* Big allocs free directly. */
169 if ( hdr->size >= PAGE_SIZE )
170 {
171 free_pages(hdr, get_order(hdr->size));
172 return;
173 }
175 /* Merge with other free block, or put in list. */
176 /* spin_lock_irqsave(&freelist_lock, flags); */
177 list_for_each_entry_safe( i, tmp, &freelist, freelist )
178 {
179 unsigned long _i = (unsigned long)i;
180 unsigned long _hdr = (unsigned long)hdr;
182 /* Do not merge across page boundaries. */
183 if ( ((_i ^ _hdr) & PAGE_MASK) != 0 )
184 continue;
186 /* We follow this block? Swallow it. */
187 if ( (_i + i->size) == _hdr )
188 {
189 list_del(&i->freelist);
190 i->size += hdr->size;
191 hdr = i;
192 }
194 /* We precede this block? Swallow it. */
195 if ( (_hdr + hdr->size) == _i )
196 {
197 list_del(&i->freelist);
198 hdr->size += i->size;
199 }
200 }
202 /* Did we merge an entire page? */
203 if ( hdr->size == PAGE_SIZE )
204 {
205 if((((unsigned long)hdr) & (PAGE_SIZE-1)) != 0)
206 {
207 printk("Bug\n");
208 *(int*)0=0;
209 }
210 free_pages(hdr, 0);
211 }
212 else
213 {
214 list_add(&hdr->freelist, &freelist);
215 }
217 /* spin_unlock_irqrestore(&freelist_lock, flags); */
218 }