ia64/xen-unstable

view tools/vtpm_manager/util/hashtable.c @ 6946:e703abaf6e3d

Add behaviour to the remove methods to remove the transaction's path itself. This allows us to write Remove(path) to remove the specified path rather than having to slice the path ourselves.
author emellor@ewan
date Sun Sep 18 14:42:13 2005 +0100 (2005-09-18)
parents 3233e7ecfa9f
children 06d84bf87159
line source
1 /*
2 * Copyright (c) 2005, Intel Corp
3 * Copyright (c) 2002, Christopher Clark <firstname.lastname@cl.cam.ac.uk>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * * Neither the name of the original author; nor the names of any contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
26 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
35 #include "hashtable.h"
36 #include "hashtable_private.h"
37 #include <stdlib.h>
38 #include <stdio.h>
39 #include <string.h>
40 #include <math.h>
42 /*
43 Credit for primes table: Aaron Krowne
44 http://br.endernet.org/~akrowne/
45 http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
46 */
47 static const unsigned int primes[] = {
48 53, 97, 193, 389,
49 769, 1543, 3079, 6151,
50 12289, 24593, 49157, 98317,
51 196613, 393241, 786433, 1572869,
52 3145739, 6291469, 12582917, 25165843,
53 50331653, 100663319, 201326611, 402653189,
54 805306457, 1610612741
55 };
56 const unsigned int prime_table_length = sizeof(primes)/sizeof(primes[0]);
57 const float max_load_factor = 0.65;
59 /*****************************************************************************/
60 struct hashtable *
61 create_hashtable(unsigned int minsize,
62 unsigned int (*hashf) (void*),
63 int (*eqf) (void*,void*))
64 {
65 struct hashtable *h;
66 unsigned int pindex, size = primes[0];
67 /* Check requested hashtable isn't too large */
68 if (minsize > (1u << 30)) return NULL;
69 /* Enforce size as prime */
70 for (pindex=0; pindex < prime_table_length; pindex++) {
71 if (primes[pindex] > minsize) { size = primes[pindex]; break; }
72 }
73 h = (struct hashtable *)malloc(sizeof(struct hashtable));
74 if (NULL == h) return NULL; /*oom*/
75 h->table = (struct entry **)malloc(sizeof(struct entry*) * size);
76 if (NULL == h->table) { free(h); return NULL; } /*oom*/
77 memset(h->table, 0, size * sizeof(struct entry *));
78 h->tablelength = size;
79 h->primeindex = pindex;
80 h->entrycount = 0;
81 h->hashfn = hashf;
82 h->eqfn = eqf;
83 h->loadlimit = (unsigned int) ceil(size * max_load_factor);
84 #ifdef HASHTABLE_THREADED
85 pthread_mutex_init(&h->mutex, NULL);
86 #endif
87 return h;
88 }
90 /*****************************************************************************/
91 unsigned int
92 hash(struct hashtable *h, void *k)
93 {
94 unsigned int i = h->hashfn(k);
95 i += ~(i << 9);
96 i ^= ((i >> 14) | (i << 18)); /* >>> */
97 i += (i << 4);
98 i ^= ((i >> 10) | (i << 22)); /* >>> */
99 return i;
100 }
102 /*****************************************************************************/
103 static int
104 hashtable_expand(struct hashtable *h)
105 {
106 /* Double the size of the table to accomodate more entries */
107 struct entry **newtable;
108 struct entry *e;
109 struct entry **pE;
110 unsigned int newsize, i, index;
111 /* Check we're not hitting max capacity */
112 if (h->primeindex == (prime_table_length - 1)) return 0;
113 newsize = primes[++(h->primeindex)];
115 newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize);
116 if (NULL != newtable)
117 {
118 memset(newtable, 0, newsize * sizeof(struct entry *));
119 /* This algorithm is not 'stable'. ie. it reverses the list
120 * when it transfers entries between the tables */
121 for (i = 0; i < h->tablelength; i++) {
122 while (NULL != (e = h->table[i])) {
123 h->table[i] = e->next;
124 index = indexFor(newsize,e->h);
125 e->next = newtable[index];
126 newtable[index] = e;
127 }
128 }
129 free(h->table);
130 h->table = newtable;
131 }
132 /* Plan B: realloc instead */
133 else
134 {
135 newtable = (struct entry **)
136 realloc(h->table, newsize * sizeof(struct entry *));
137 if (NULL == newtable) { (h->primeindex)--; return 0; }
138 h->table = newtable;
139 memset(newtable[h->tablelength], 0, newsize - h->tablelength);
140 for (i = 0; i < h->tablelength; i++) {
141 for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
142 index = indexFor(newsize,e->h);
143 if (index == i)
144 {
145 pE = &(e->next);
146 }
147 else
148 {
149 *pE = e->next;
150 e->next = newtable[index];
151 newtable[index] = e;
152 }
153 }
154 }
155 }
156 h->tablelength = newsize;
157 h->loadlimit = (unsigned int) ceil(newsize * max_load_factor);
158 return -1;
159 }
161 /*****************************************************************************/
162 unsigned int
163 hashtable_count(struct hashtable *h)
164 {
165 unsigned int count;
166 #ifdef HASHTABLE_THREADED
167 pthread_mutex_lock(&h->mutex);
168 #endif
169 count = h->entrycount;
170 #ifdef HASHTABLE_THREADED
171 pthread_mutex_unlock(&h->mutex);
172 #endif
173 return count;
174 }
176 /*****************************************************************************/
177 int
178 hashtable_insert(struct hashtable *h, void *k, void *v)
179 {
180 /* This method allows duplicate keys - but they shouldn't be used */
181 unsigned int index;
182 struct entry *e;
183 #ifdef HASHTABLE_THREADED
184 pthread_mutex_lock(&h->mutex);
185 #endif
186 if (++(h->entrycount) > h->loadlimit)
187 {
188 /* Ignore the return value. If expand fails, we should
189 * still try cramming just this value into the existing table
190 * -- we may not have memory for a larger table, but one more
191 * element may be ok. Next time we insert, we'll try expanding again.*/
192 hashtable_expand(h);
193 }
194 e = (struct entry *)malloc(sizeof(struct entry));
195 if (NULL == e) { --(h->entrycount); return 0; } /*oom*/
196 e->h = hash(h,k);
197 index = indexFor(h->tablelength,e->h);
198 e->k = k;
199 e->v = v;
200 e->next = h->table[index];
201 h->table[index] = e;
202 #ifdef HASHTABLE_THREADED
203 pthread_mutex_unlock(&h->mutex);
204 #endif
205 return -1;
206 }
208 /*****************************************************************************/
209 void * /* returns value associated with key */
210 hashtable_search(struct hashtable *h, void *k)
211 {
212 #ifdef HASHTABLE_THREADED
213 pthread_mutex_lock(&h->mutex);
214 #endif
215 struct entry *e;
216 unsigned int hashvalue, index;
217 hashvalue = hash(h,k);
218 index = indexFor(h->tablelength,hashvalue);
219 e = h->table[index];
220 while (NULL != e)
221 {
222 /* Check hash value to short circuit heavier comparison */
223 if ((hashvalue == e->h) && (h->eqfn(k, e->k))) {
224 #ifdef HASHTABLE_THREADED
225 pthread_mutex_unlock(&h->mutex);
226 #endif
227 return e->v;
228 }
229 e = e->next;
230 }
231 #ifdef HASHTABLE_THREADED
232 pthread_mutex_unlock(&h->mutex);
233 #endif
234 return NULL;
235 }
237 /*****************************************************************************/
238 void * /* returns value associated with key */
239 hashtable_remove(struct hashtable *h, void *k)
240 {
241 /* TODO: consider compacting the table when the load factor drops enough,
242 * or provide a 'compact' method. */
243 #ifdef HASHTABLE_THREADED
244 pthread_mutex_lock(&h->mutex);
245 #endif
246 struct entry *e;
247 struct entry **pE;
248 void *v;
249 unsigned int hashvalue, index;
251 hashvalue = hash(h,k);
252 index = indexFor(h->tablelength,hash(h,k));
253 pE = &(h->table[index]);
254 e = *pE;
255 while (NULL != e)
256 {
257 /* Check hash value to short circuit heavier comparison */
258 if ((hashvalue == e->h) && (h->eqfn(k, e->k)))
259 {
260 *pE = e->next;
261 h->entrycount--;
262 v = e->v;
263 freekey(e->k);
264 free(e);
265 return v;
266 }
267 pE = &(e->next);
268 e = e->next;
269 }
270 #ifdef HASHTABLE_THREADED
271 pthread_mutex_unlock(&h->mutex);
272 #endif
273 return NULL;
274 }
276 /*****************************************************************************/
277 /* destroy */
278 void
279 hashtable_destroy(struct hashtable *h, int free_values)
280 {
281 #ifdef HASHTABLE_THREADED
282 pthread_mutex_lock(&h->mutex);
283 #endif
284 unsigned int i;
285 struct entry *e, *f;
286 struct entry **table = h->table;
287 if (free_values)
288 {
289 for (i = 0; i < h->tablelength; i++)
290 {
291 e = table[i];
292 while (NULL != e)
293 { f = e; e = e->next; freekey(f->k); free(f->v); free(f); }
294 }
295 }
296 else
297 {
298 for (i = 0; i < h->tablelength; i++)
299 {
300 e = table[i];
301 while (NULL != e)
302 { f = e; e = e->next; freekey(f->k); free(f); }
303 }
304 }
305 free(h->table);
306 #ifdef HASHTABLE_THREADED
307 pthread_mutex_destroy(&h->mutex);
308 #endif
309 free(h);
310 }