ia64/xen-unstable

changeset 17822:189597fbb882

xenstore: cleanups

Attached patch uses calloc() for hash allocation.
This makes sure, the allocated memory is always initialized.
Also cleanup error handling a bit.

On *BSD avoid conflicts with BSD list macros.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 10 13:49:54 2008 +0100 (2008-06-10)
parents 8a36f7f70859
children d77214a97e04
files tools/xenstore/hashtable.c tools/xenstore/list.h
line diff
     1.1 --- a/tools/xenstore/hashtable.c	Tue Jun 10 13:49:02 2008 +0100
     1.2 +++ b/tools/xenstore/hashtable.c	Tue Jun 10 13:49:54 2008 +0100
     1.3 @@ -33,17 +33,22 @@ create_hashtable(unsigned int minsize,
     1.4  {
     1.5      struct hashtable *h;
     1.6      unsigned int pindex, size = primes[0];
     1.7 +
     1.8      /* Check requested hashtable isn't too large */
     1.9      if (minsize > (1u << 30)) return NULL;
    1.10 +
    1.11      /* Enforce size as prime */
    1.12      for (pindex=0; pindex < prime_table_length; pindex++) {
    1.13          if (primes[pindex] > minsize) { size = primes[pindex]; break; }
    1.14      }
    1.15 -    h = (struct hashtable *)malloc(sizeof(struct hashtable));
    1.16 -    if (NULL == h) return NULL; /*oom*/
    1.17 -    h->table = (struct entry **)malloc(sizeof(struct entry*) * size);
    1.18 -    if (NULL == h->table) { free(h); return NULL; } /*oom*/
    1.19 -    memset(h->table, 0, size * sizeof(struct entry *));
    1.20 +
    1.21 +    h = (struct hashtable *)calloc(1, sizeof(struct hashtable));
    1.22 +    if (NULL == h)
    1.23 +        goto err0;
    1.24 +    h->table = (struct entry **)calloc(size, sizeof(struct entry *));
    1.25 +    if (NULL == h->table)
    1.26 +        goto err1;
    1.27 +
    1.28      h->tablelength  = size;
    1.29      h->primeindex   = pindex;
    1.30      h->entrycount   = 0;
    1.31 @@ -51,6 +56,11 @@ create_hashtable(unsigned int minsize,
    1.32      h->eqfn         = eqf;
    1.33      h->loadlimit    = (unsigned int)(((uint64_t)size * max_load_factor) / 100);
    1.34      return h;
    1.35 +
    1.36 +err0:
    1.37 +   free(h);
    1.38 +err1:
    1.39 +   return NULL;
    1.40  }
    1.41  
    1.42  /*****************************************************************************/
    1.43 @@ -80,10 +90,9 @@ hashtable_expand(struct hashtable *h)
    1.44      if (h->primeindex == (prime_table_length - 1)) return 0;
    1.45      newsize = primes[++(h->primeindex)];
    1.46  
    1.47 -    newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize);
    1.48 +    newtable = (struct entry **)calloc(newsize, sizeof(struct entry*));
    1.49      if (NULL != newtable)
    1.50      {
    1.51 -        memset(newtable, 0, newsize * sizeof(struct entry *));
    1.52          /* This algorithm is not 'stable'. ie. it reverses the list
    1.53           * when it transfers entries between the tables */
    1.54          for (i = 0; i < h->tablelength; i++) {
    1.55 @@ -149,7 +158,7 @@ hashtable_insert(struct hashtable *h, vo
    1.56           * element may be ok. Next time we insert, we'll try expanding again.*/
    1.57          hashtable_expand(h);
    1.58      }
    1.59 -    e = (struct entry *)malloc(sizeof(struct entry));
    1.60 +    e = (struct entry *)calloc(1, sizeof(struct entry));
    1.61      if (NULL == e) { --(h->entrycount); return 0; } /*oom*/
    1.62      e->h = hash(h,k);
    1.63      index = indexFor(h->tablelength,e->h);
     2.1 --- a/tools/xenstore/list.h	Tue Jun 10 13:49:02 2008 +0100
     2.2 +++ b/tools/xenstore/list.h	Tue Jun 10 13:49:54 2008 +0100
     2.3 @@ -3,6 +3,10 @@
     2.4  /* Taken from Linux kernel code, but de-kernelized for userspace. */
     2.5  #include <stddef.h>
     2.6  
     2.7 +#undef LIST_HEAD_INIT
     2.8 +#undef LIST_HEAD
     2.9 +#undef INIT_LIST_HEAD
    2.10 +
    2.11  /*
    2.12   * These are non-NULL pointers that will result in page faults
    2.13   * under normal circumstances, used to verify that nobody uses