ia64/xen-unstable

changeset 12562:7a38b70788a5

[XEN] Simplify the shadow hash table.
Chain hash buckets through the shadow page_info structs instead
of in separately allocated structures. This lets us get rid of
some xenheap allocations and a domain_crash_synchronous() call.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Thu Nov 23 17:42:29 2006 +0000 (2006-11-23)
parents 6f0d8434d23f
children 47a8bb3cd123
files xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/private.h xen/include/asm-x86/domain.h xen/include/asm-x86/shadow.h
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/common.c	Thu Nov 23 17:40:28 2006 +0000
     1.2 +++ b/xen/arch/x86/mm/shadow/common.c	Thu Nov 23 17:42:29 2006 +0000
     1.3 @@ -1265,17 +1265,22 @@ unsigned int shadow_set_allocation(struc
     1.4  }
     1.5  
     1.6  /**************************************************************************/
     1.7 -/* Hash table for storing the guest->shadow mappings */
     1.8 +/* Hash table for storing the guest->shadow mappings.
     1.9 + * The table itself is an array of pointers to shadows; the shadows are then 
    1.10 + * threaded on a singly-linked list of shadows with the same hash value */
    1.11 +
    1.12 +#define SHADOW_HASH_BUCKETS 251
    1.13 +/* Other possibly useful primes are 509, 1021, 2039, 4093, 8191, 16381 */
    1.14  
    1.15  /* Hash function that takes a gfn or mfn, plus another byte of type info */
    1.16  typedef u32 key_t;
    1.17 -static inline key_t sh_hash(unsigned long n, u8 t) 
    1.18 +static inline key_t sh_hash(unsigned long n, unsigned int t) 
    1.19  {
    1.20      unsigned char *p = (unsigned char *)&n;
    1.21      key_t k = t;
    1.22      int i;
    1.23      for ( i = 0; i < sizeof(n) ; i++ ) k = (u32)p[i] + (k<<6) + (k<<16) - k;
    1.24 -    return k;
    1.25 +    return k % SHADOW_HASH_BUCKETS;
    1.26  }
    1.27  
    1.28  #if SHADOW_AUDIT & (SHADOW_AUDIT_HASH|SHADOW_AUDIT_HASH_FULL)
    1.29 @@ -1285,58 +1290,50 @@ static inline key_t sh_hash(unsigned lon
    1.30  static void sh_hash_audit_bucket(struct domain *d, int bucket)
    1.31  /* Audit one bucket of the hash table */
    1.32  {
    1.33 -    struct shadow_hash_entry *e, *x;
    1.34 -    struct shadow_page_info *sp;
    1.35 +    struct shadow_page_info *sp, *x;
    1.36  
    1.37      if ( !(SHADOW_AUDIT_ENABLE) )
    1.38          return;
    1.39  
    1.40 -    e = &d->arch.shadow.hash_table[bucket];
    1.41 -    if ( e->t == 0 ) return; /* Bucket is empty */ 
    1.42 -    while ( e )
    1.43 +    sp = d->arch.shadow.hash_table[bucket];
    1.44 +    while ( sp )
    1.45      {
    1.46 -        /* Empty link? */
    1.47 -        BUG_ON( e->t == 0 ); 
    1.48 -        /* Bogus type? */
    1.49 -        BUG_ON( e->t > SH_type_max_shadow );
    1.50 -        /* Wrong bucket? */
    1.51 -        BUG_ON( sh_hash(e->n, e->t) % SHADOW_HASH_BUCKETS != bucket ); 
    1.52 -        /* Duplicate entry? */
    1.53 -        for ( x = e->next; x; x = x->next )
    1.54 -            BUG_ON( x->n == e->n && x->t == e->t );
    1.55 -        /* Bogus MFN? */
    1.56 -        BUG_ON( !valid_mfn(e->smfn) );
    1.57 -        sp = mfn_to_shadow_page(e->smfn);
    1.58          /* Not a shadow? */
    1.59          BUG_ON( sp->mbz != 0 );
    1.60 -        /* Wrong kind of shadow? */
    1.61 -        BUG_ON( sp->type != e->t ); 
    1.62 -        /* Bad backlink? */
    1.63 -        BUG_ON( sp->backpointer != e->n );
    1.64 -        if ( e->t != SH_type_fl1_32_shadow
    1.65 -             && e->t != SH_type_fl1_pae_shadow
    1.66 -             && e->t != SH_type_fl1_64_shadow )
    1.67 +        /* Bogus type? */
    1.68 +        BUG_ON( sp->type == 0 ); 
    1.69 +        BUG_ON( sp->type > SH_type_max_shadow );
    1.70 +        /* Wrong bucket? */
    1.71 +        BUG_ON( sh_hash(sp->backpointer, sp->type) != bucket ); 
    1.72 +        /* Duplicate entry? */
    1.73 +        for ( x = sp->next_shadow; x; x = x->next_shadow )
    1.74 +            BUG_ON( x->backpointer == sp->backpointer && x->type == sp->type );
    1.75 +        /* Follow the backpointer to the guest pagetable */
    1.76 +        if ( sp->type != SH_type_fl1_32_shadow
    1.77 +             && sp->type != SH_type_fl1_pae_shadow
    1.78 +             && sp->type != SH_type_fl1_64_shadow )
    1.79          {
    1.80 -            struct page_info *gpg = mfn_to_page(_mfn(e->n));
    1.81 +            struct page_info *gpg = mfn_to_page(_mfn(sp->backpointer));
    1.82              /* Bad shadow flags on guest page? */
    1.83 -            BUG_ON( !(gpg->shadow_flags & (1<<e->t)) );
    1.84 +            BUG_ON( !(gpg->shadow_flags & (1<<sp->type)) );
    1.85              /* Bad type count on guest page? */
    1.86              if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page 
    1.87                   && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
    1.88              {
    1.89 -                SHADOW_ERROR("MFN %#"SH_PRI_mfn" shadowed (by %#"SH_PRI_mfn")"
    1.90 +                SHADOW_ERROR("MFN %#lx shadowed (by %#"SH_PRI_mfn")"
    1.91                               " but has typecount %#lx\n",
    1.92 -                             e->n, mfn_x(e->smfn), gpg->u.inuse.type_info);
    1.93 +                             sp->backpointer, mfn_x(shadow_page_to_mfn(sp)), 
    1.94 +                             gpg->u.inuse.type_info);
    1.95                  BUG();
    1.96              }
    1.97          }
    1.98          /* That entry was OK; on we go */
    1.99 -        e = e->next;
   1.100 +        sp = sp->next_shadow;
   1.101      }
   1.102  }
   1.103  
   1.104  #else
   1.105 -#define sh_hash_audit_bucket(_d, _b)
   1.106 +#define sh_hash_audit_bucket(_d, _b) do {} while(0)
   1.107  #endif /* Hashtable bucket audit */
   1.108  
   1.109  
   1.110 @@ -1357,75 +1354,22 @@ static void sh_hash_audit(struct domain 
   1.111  }
   1.112  
   1.113  #else
   1.114 -#define sh_hash_audit(_d)
   1.115 +#define sh_hash_audit(_d) do {} while(0)
   1.116  #endif /* Hashtable bucket audit */
   1.117  
   1.118 -/* Memory management interface for bucket allocation.
   1.119 - * These ought to come out of shadow memory, but at least on 32-bit
   1.120 - * machines we are forced to allocate them from xenheap so that we can
   1.121 - * address them. */
   1.122 -static struct shadow_hash_entry *sh_alloc_hash_entry(struct domain *d)
   1.123 -{
   1.124 -    struct shadow_hash_entry *extra, *x;
   1.125 -    int i;
   1.126 -
   1.127 -    /* We need to allocate a new node. Ensure the free list is not empty. 
   1.128 -     * Allocate new entries in units the same size as the original table. */
   1.129 -    if ( unlikely(d->arch.shadow.hash_freelist == NULL) )
   1.130 -    {
   1.131 -        size_t sz = sizeof(void *) + (SHADOW_HASH_BUCKETS * sizeof(*x));
   1.132 -        extra = xmalloc_bytes(sz);
   1.133 -
   1.134 -        if ( extra == NULL )
   1.135 -        {
   1.136 -            /* No memory left! */
   1.137 -            SHADOW_ERROR("xmalloc() failed when allocating hash buckets.\n");
   1.138 -            domain_crash_synchronous();
   1.139 -        }
   1.140 -        memset(extra, 0, sz);
   1.141 -
   1.142 -        /* Record the allocation block so it can be correctly freed later. */
   1.143 -        *((struct shadow_hash_entry **)&extra[SHADOW_HASH_BUCKETS]) = 
   1.144 -            d->arch.shadow.hash_allocations;
   1.145 -        d->arch.shadow.hash_allocations = &extra[0];
   1.146 -
   1.147 -        /* Thread a free chain through the newly-allocated nodes. */
   1.148 -        for ( i = 0; i < (SHADOW_HASH_BUCKETS - 1); i++ )
   1.149 -            extra[i].next = &extra[i+1];
   1.150 -        extra[i].next = NULL;
   1.151 -
   1.152 -        /* Add the new nodes to the free list. */
   1.153 -        d->arch.shadow.hash_freelist = &extra[0];
   1.154 -    }
   1.155 -
   1.156 -    /* Allocate a new node from the free list. */
   1.157 -    x = d->arch.shadow.hash_freelist;
   1.158 -    d->arch.shadow.hash_freelist = x->next;
   1.159 -    return x;
   1.160 -}
   1.161 -
   1.162 -static void sh_free_hash_entry(struct domain *d, struct shadow_hash_entry *e)
   1.163 -{
   1.164 -    /* Mark the bucket as empty and return it to the free list */
   1.165 -    e->t = 0; 
   1.166 -    e->next = d->arch.shadow.hash_freelist;
   1.167 -    d->arch.shadow.hash_freelist = e;
   1.168 -}
   1.169 -
   1.170 -
   1.171  /* Allocate and initialise the table itself.  
   1.172   * Returns 0 for success, 1 for error. */
   1.173  static int shadow_hash_alloc(struct domain *d)
   1.174  {
   1.175 -    struct shadow_hash_entry *table;
   1.176 +    struct shadow_page_info **table;
   1.177  
   1.178      ASSERT(shadow_lock_is_acquired(d));
   1.179      ASSERT(!d->arch.shadow.hash_table);
   1.180  
   1.181 -    table = xmalloc_array(struct shadow_hash_entry, SHADOW_HASH_BUCKETS);
   1.182 +    table = xmalloc_array(struct shadow_page_info *, SHADOW_HASH_BUCKETS);
   1.183      if ( !table ) return 1;
   1.184      memset(table, 0, 
   1.185 -           SHADOW_HASH_BUCKETS * sizeof (struct shadow_hash_entry));
   1.186 +           SHADOW_HASH_BUCKETS * sizeof (struct shadow_page_info *));
   1.187      d->arch.shadow.hash_table = table;
   1.188      return 0;
   1.189  }
   1.190 @@ -1434,35 +1378,20 @@ static int shadow_hash_alloc(struct doma
   1.191   * This function does not care whether the table is populated. */
   1.192  static void shadow_hash_teardown(struct domain *d)
   1.193  {
   1.194 -    struct shadow_hash_entry *a, *n;
   1.195 -
   1.196      ASSERT(shadow_lock_is_acquired(d));
   1.197      ASSERT(d->arch.shadow.hash_table);
   1.198  
   1.199 -    /* Return the table itself */
   1.200      xfree(d->arch.shadow.hash_table);
   1.201      d->arch.shadow.hash_table = NULL;
   1.202 -
   1.203 -    /* Return any extra allocations */
   1.204 -    a = d->arch.shadow.hash_allocations;
   1.205 -    while ( a ) 
   1.206 -    {
   1.207 -        /* We stored a linked-list pointer at the end of each allocation */
   1.208 -        n = *((struct shadow_hash_entry **)(&a[SHADOW_HASH_BUCKETS]));
   1.209 -        xfree(a);
   1.210 -        a = n;
   1.211 -    }
   1.212 -    d->arch.shadow.hash_allocations = NULL;
   1.213 -    d->arch.shadow.hash_freelist = NULL;
   1.214  }
   1.215  
   1.216  
   1.217 -mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long n, u8 t)
   1.218 +mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long n, unsigned int t)
   1.219  /* Find an entry in the hash table.  Returns the MFN of the shadow,
   1.220   * or INVALID_MFN if it doesn't exist */
   1.221  {
   1.222      struct domain *d = v->domain;
   1.223 -    struct shadow_hash_entry *p, *x, *head;
   1.224 +    struct shadow_page_info *sp, *prev;
   1.225      key_t key;
   1.226  
   1.227      ASSERT(shadow_lock_is_acquired(d));
   1.228 @@ -1473,58 +1402,50 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
   1.229  
   1.230      perfc_incrc(shadow_hash_lookups);
   1.231      key = sh_hash(n, t);
   1.232 -
   1.233 -    x = head = &d->arch.shadow.hash_table[key % SHADOW_HASH_BUCKETS];
   1.234 -    p = NULL;
   1.235 -
   1.236 -    sh_hash_audit_bucket(d, key % SHADOW_HASH_BUCKETS);
   1.237 -
   1.238 -    do
   1.239 +    sh_hash_audit_bucket(d, key);
   1.240 +
   1.241 +    sp = d->arch.shadow.hash_table[key];
   1.242 +    prev = NULL;
   1.243 +    while(sp)
   1.244      {
   1.245 -        ASSERT(x->t || ((x == head) && (x->next == NULL)));
   1.246 -
   1.247 -        if ( x->n == n && x->t == t )
   1.248 +        if ( sp->backpointer == n && sp->type == t )
   1.249          {
   1.250 -            /* Pull-to-front if 'x' isn't already the head item */
   1.251 -            if ( unlikely(x != head) )
   1.252 +            /* Pull-to-front if 'sp' isn't already the head item */
   1.253 +            if ( unlikely(sp != d->arch.shadow.hash_table[key]) )
   1.254              {
   1.255                  if ( unlikely(d->arch.shadow.hash_walking != 0) )
   1.256                      /* Can't reorder: someone is walking the hash chains */
   1.257 -                    return x->smfn;
   1.258 +                    return shadow_page_to_mfn(sp);
   1.259                  else 
   1.260                  {
   1.261 -                    /* Delete 'x' from list and reinsert after head. */
   1.262 -                    p->next = x->next;
   1.263 -                    x->next = head->next;
   1.264 -                    head->next = x;
   1.265 -                    
   1.266 -                    /* Swap 'x' contents with head contents. */
   1.267 -                    SWAP(head->n, x->n);
   1.268 -                    SWAP(head->t, x->t);
   1.269 -                    SWAP(head->smfn, x->smfn);
   1.270 +                    ASSERT(prev);
   1.271 +                    /* Delete sp from the list */
   1.272 +                    prev->next_shadow = sp->next_shadow;                    
   1.273 +                    /* Re-insert it at the head of the list */
   1.274 +                    sp->next_shadow = d->arch.shadow.hash_table[key];
   1.275 +                    d->arch.shadow.hash_table[key] = sp;
   1.276                  }
   1.277              }
   1.278              else
   1.279              {
   1.280                  perfc_incrc(shadow_hash_lookup_head);
   1.281              }
   1.282 -            return head->smfn;
   1.283 +            return shadow_page_to_mfn(sp);
   1.284          }
   1.285 -
   1.286 -        p = x;
   1.287 -        x = x->next;
   1.288 +        prev = sp;
   1.289 +        sp = sp->next_shadow;
   1.290      }
   1.291 -    while ( x != NULL );
   1.292  
   1.293      perfc_incrc(shadow_hash_lookup_miss);
   1.294      return _mfn(INVALID_MFN);
   1.295  }
   1.296  
   1.297 -void shadow_hash_insert(struct vcpu *v, unsigned long n, u8 t, mfn_t smfn)
   1.298 +void shadow_hash_insert(struct vcpu *v, unsigned long n, unsigned int t, 
   1.299 +                        mfn_t smfn)
   1.300  /* Put a mapping (n,t)->smfn into the hash table */
   1.301  {
   1.302      struct domain *d = v->domain;
   1.303 -    struct shadow_hash_entry *x, *head;
   1.304 +    struct shadow_page_info *sp;
   1.305      key_t key;
   1.306      
   1.307      ASSERT(shadow_lock_is_acquired(d));
   1.308 @@ -1535,38 +1456,22 @@ void shadow_hash_insert(struct vcpu *v, 
   1.309  
   1.310      perfc_incrc(shadow_hash_inserts);
   1.311      key = sh_hash(n, t);
   1.312 -
   1.313 -    head = &d->arch.shadow.hash_table[key % SHADOW_HASH_BUCKETS];
   1.314 -
   1.315 -    sh_hash_audit_bucket(d, key % SHADOW_HASH_BUCKETS);
   1.316 -
   1.317 -    /* If the bucket is empty then insert the new page as the head item. */
   1.318 -    if ( head->t == 0 )
   1.319 -    {
   1.320 -        head->n = n;
   1.321 -        head->t = t;
   1.322 -        head->smfn = smfn;
   1.323 -        ASSERT(head->next == NULL);
   1.324 -    }
   1.325 -    else 
   1.326 -    {
   1.327 -        /* Insert a new entry directly after the head item. */
   1.328 -        x = sh_alloc_hash_entry(d);
   1.329 -        x->n = n; 
   1.330 -        x->t = t;
   1.331 -        x->smfn = smfn;
   1.332 -        x->next = head->next;
   1.333 -        head->next = x;
   1.334 -    }
   1.335 +    sh_hash_audit_bucket(d, key);
   1.336      
   1.337 -    sh_hash_audit_bucket(d, key % SHADOW_HASH_BUCKETS);
   1.338 +    /* Insert this shadow at the top of the bucket */
   1.339 +    sp = mfn_to_shadow_page(smfn);
   1.340 +    sp->next_shadow = d->arch.shadow.hash_table[key];
   1.341 +    d->arch.shadow.hash_table[key] = sp;
   1.342 +    
   1.343 +    sh_hash_audit_bucket(d, key);
   1.344  }
   1.345  
   1.346 -void shadow_hash_delete(struct vcpu *v, unsigned long n, u8 t, mfn_t smfn)
   1.347 +void shadow_hash_delete(struct vcpu *v, unsigned long n, unsigned int t, 
   1.348 +                        mfn_t smfn)
   1.349  /* Excise the mapping (n,t)->smfn from the hash table */
   1.350  {
   1.351      struct domain *d = v->domain;
   1.352 -    struct shadow_hash_entry *p, *x, *head;
   1.353 +    struct shadow_page_info *sp, *x;
   1.354      key_t key;
   1.355  
   1.356      ASSERT(shadow_lock_is_acquired(d));
   1.357 @@ -1577,54 +1482,31 @@ void shadow_hash_delete(struct vcpu *v, 
   1.358  
   1.359      perfc_incrc(shadow_hash_deletes);
   1.360      key = sh_hash(n, t);
   1.361 -
   1.362 -    head = &d->arch.shadow.hash_table[key % SHADOW_HASH_BUCKETS];
   1.363 -
   1.364 -    sh_hash_audit_bucket(d, key % SHADOW_HASH_BUCKETS);
   1.365 -
   1.366 -    /* Match on head item? */
   1.367 -    if ( head->n == n && head->t == t )
   1.368 -    {
   1.369 -        if ( (x = head->next) != NULL )
   1.370 -        {
   1.371 -            /* Overwrite head with contents of following node. */
   1.372 -            head->n = x->n;
   1.373 -            head->t = x->t;
   1.374 -            head->smfn = x->smfn;
   1.375 -
   1.376 -            /* Delete following node. */
   1.377 -            head->next = x->next;
   1.378 -            sh_free_hash_entry(d, x);
   1.379 -        }
   1.380 -        else
   1.381 -        {
   1.382 -            /* This bucket is now empty. Initialise the head node. */
   1.383 -            head->t = 0;
   1.384 -        }
   1.385 -    }
   1.386 +    sh_hash_audit_bucket(d, key);
   1.387 +    
   1.388 +    sp = mfn_to_shadow_page(smfn);
   1.389 +    if ( d->arch.shadow.hash_table[key] == sp ) 
   1.390 +        /* Easy case: we're deleting the head item. */
   1.391 +        d->arch.shadow.hash_table[key] = sp->next_shadow;
   1.392      else 
   1.393      {
   1.394 -        /* Not at the head; need to walk the chain */
   1.395 -        p = head;
   1.396 -        x = head->next; 
   1.397 -        
   1.398 -        while(1)
   1.399 +        /* Need to search for the one we want */
   1.400 +        x = d->arch.shadow.hash_table[key];
   1.401 +        while ( 1 )
   1.402          {
   1.403              ASSERT(x); /* We can't have hit the end, since our target is
   1.404                          * still in the chain somehwere... */
   1.405 -            if ( x->n == n && x->t == t )
   1.406 +            if ( x->next_shadow == sp ) 
   1.407              {
   1.408 -                /* Delete matching node. */
   1.409 -                p->next = x->next;
   1.410 -                sh_free_hash_entry(d, x);
   1.411 +                x->next_shadow = sp->next_shadow;
   1.412                  break;
   1.413              }
   1.414 -            p = x;
   1.415 -            x = x->next;
   1.416 +            x = x->next_shadow;
   1.417          }
   1.418      }
   1.419 -
   1.420 -    sh_hash_audit_bucket(d, key % SHADOW_HASH_BUCKETS);
   1.421 +    sp->next_shadow = NULL;
   1.422 +
   1.423 +    sh_hash_audit_bucket(d, key);
   1.424  }
   1.425  
   1.426  typedef int (*hash_callback_t)(struct vcpu *v, mfn_t smfn, mfn_t other_mfn);
   1.427 @@ -1644,27 +1526,27 @@ static void hash_foreach(struct vcpu *v,
   1.428  {
   1.429      int i, done = 0;
   1.430      struct domain *d = v->domain;
   1.431 -    struct shadow_hash_entry *x;
   1.432 +    struct shadow_page_info *x;
   1.433  
   1.434      /* Say we're here, to stop hash-lookups reordering the chains */
   1.435      ASSERT(shadow_lock_is_acquired(d));
   1.436      ASSERT(d->arch.shadow.hash_walking == 0);
   1.437      d->arch.shadow.hash_walking = 1;
   1.438  
   1.439 -    callback_mask &= ~1; /* Never attempt to call back on empty buckets */
   1.440      for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ ) 
   1.441      {
   1.442          /* WARNING: This is not safe against changes to the hash table.
   1.443           * The callback *must* return non-zero if it has inserted or
   1.444           * deleted anything from the hash (lookups are OK, though). */
   1.445 -        for ( x = &d->arch.shadow.hash_table[i]; x; x = x->next )
   1.446 +        for ( x = d->arch.shadow.hash_table[i]; x; x = x->next_shadow )
   1.447          {
   1.448 -            if ( callback_mask & (1 << x->t) ) 
   1.449 +            if ( callback_mask & (1 << x->type) ) 
   1.450              {
   1.451 -                ASSERT(x->t <= 15);
   1.452 -                ASSERT(callbacks[x->t] != NULL);
   1.453 -                if ( (done = callbacks[x->t](v, x->smfn, callback_mfn)) != 0 )
   1.454 -                    break;
   1.455 +                ASSERT(x->type <= 15);
   1.456 +                ASSERT(callbacks[x->type] != NULL);
   1.457 +                done = callbacks[x->type](v, shadow_page_to_mfn(x), 
   1.458 +                                          callback_mfn);
   1.459 +                if ( done ) break;
   1.460              }
   1.461          }
   1.462          if ( done ) break; 
     2.1 --- a/xen/arch/x86/mm/shadow/private.h	Thu Nov 23 17:40:28 2006 +0000
     2.2 +++ b/xen/arch/x86/mm/shadow/private.h	Thu Nov 23 17:42:29 2006 +0000
     2.3 @@ -229,9 +229,11 @@ static inline void shadow_check_page_str
     2.4  extern struct x86_emulate_ops shadow_emulator_ops;
     2.5  
     2.6  /* Hash table functions */
     2.7 -mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long n, u8 t);
     2.8 -void  shadow_hash_insert(struct vcpu *v, unsigned long n, u8 t, mfn_t smfn);
     2.9 -void  shadow_hash_delete(struct vcpu *v, unsigned long n, u8 t, mfn_t smfn);
    2.10 +mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long n, unsigned int t);
    2.11 +void  shadow_hash_insert(struct vcpu *v, 
    2.12 +                         unsigned long n, unsigned int t, mfn_t smfn);
    2.13 +void  shadow_hash_delete(struct vcpu *v, 
    2.14 +                         unsigned long n, unsigned int t, mfn_t smfn);
    2.15  
    2.16  /* shadow promotion */
    2.17  void shadow_promote(struct vcpu *v, mfn_t gmfn, u32 type);
     3.1 --- a/xen/include/asm-x86/domain.h	Thu Nov 23 17:40:28 2006 +0000
     3.2 +++ b/xen/include/asm-x86/domain.h	Thu Nov 23 17:42:29 2006 +0000
     3.3 @@ -71,9 +71,7 @@ struct shadow_domain {
     3.4      unsigned int      p2m_pages;    /* number of pages in p2m map */
     3.5  
     3.6      /* Shadow hashtable */
     3.7 -    struct shadow_hash_entry *hash_table;
     3.8 -    struct shadow_hash_entry *hash_freelist;
     3.9 -    struct shadow_hash_entry *hash_allocations;
    3.10 +    struct shadow_page_info **hash_table;
    3.11      int hash_walking;  /* Some function is walking the hash table */
    3.12  
    3.13      /* Shadow log-dirty bitmap */
     4.1 --- a/xen/include/asm-x86/shadow.h	Thu Nov 23 17:40:28 2006 +0000
     4.2 +++ b/xen/include/asm-x86/shadow.h	Thu Nov 23 17:42:29 2006 +0000
     4.3 @@ -599,24 +599,6 @@ static inline unsigned int shadow_get_al
     4.4              + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
     4.5  }
     4.6  
     4.7 -/*
     4.8 - * Linked list for chaining entries in the shadow hash table. 
     4.9 - */
    4.10 -struct shadow_hash_entry {
    4.11 -    struct shadow_hash_entry *next;
    4.12 -    mfn_t smfn;                 /* MFN of the shadow */
    4.13 -#ifdef _x86_64_ /* Shorten 'n' so we don't waste a whole word on storing 't' */
    4.14 -    unsigned long n:56;         /* MFN of guest PT or GFN of guest superpage */
    4.15 -#else
    4.16 -    unsigned long n;            /* MFN of guest PT or GFN of guest superpage */
    4.17 -#endif
    4.18 -    unsigned char t;            /* shadow type bits, or 0 for empty */
    4.19 -};
    4.20 -
    4.21 -#define SHADOW_HASH_BUCKETS 251
    4.22 -/* Other possibly useful primes are 509, 1021, 2039, 4093, 8191, 16381 */
    4.23 -
    4.24 -
    4.25  #if SHADOW_OPTIMIZATIONS & SHOPT_CACHE_WALKS
    4.26  /* Optimization: cache the results of guest walks.  This helps with MMIO
    4.27   * and emulated writes, which tend to issue very similar walk requests