direct-io.hg

changeset 11205:45a84091144e

[XEN] More shadow2 cleanups -- primarily moving arch vcpu/domain
fields into separate shadow2-specific structures.
Also rename shadow2_entry_points to shadow2_paging_mode.
Remove VCPUF_shadow2_translate_mode and replace with a better-named
field in vcpu.arch.shadow2
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@localhost.localdomain
date Sun Aug 20 17:55:33 2006 +0100 (2006-08-20)
parents 5fc1fe790835
children fc3e7e65b953
files xen/arch/x86/domain.c xen/arch/x86/hvm/hvm.c xen/arch/x86/shadow2-common.c xen/arch/x86/shadow2.c xen/arch/x86/traps.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/domain.h xen/include/asm-x86/shadow2-multi.h xen/include/asm-x86/shadow2-private.h xen/include/asm-x86/shadow2-types.h xen/include/asm-x86/shadow2.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Sat Aug 19 17:07:54 2006 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Sun Aug 20 17:55:33 2006 +0100
     1.3 @@ -195,10 +195,10 @@ int arch_domain_create(struct domain *d)
     1.4  
     1.5      shadow2_lock_init(d);
     1.6      for ( i = 0; i <= SHADOW2_MAX_ORDER; i++ )
     1.7 -        INIT_LIST_HEAD(&d->arch.shadow2_freelists[i]);
     1.8 -    INIT_LIST_HEAD(&d->arch.shadow2_p2m_freelist);
     1.9 -    INIT_LIST_HEAD(&d->arch.shadow2_p2m_inuse);
    1.10 -    INIT_LIST_HEAD(&d->arch.shadow2_toplevel_shadows);
    1.11 +        INIT_LIST_HEAD(&d->arch.shadow2.freelists[i]);
    1.12 +    INIT_LIST_HEAD(&d->arch.shadow2.p2m_freelist);
    1.13 +    INIT_LIST_HEAD(&d->arch.shadow2.p2m_inuse);
    1.14 +    INIT_LIST_HEAD(&d->arch.shadow2.toplevel_shadows);
    1.15  
    1.16      if ( !is_idle_domain(d) )
    1.17      {
    1.18 @@ -338,7 +338,7 @@ int arch_set_info_guest(
    1.19      /* Shadow2: make sure the domain has enough shadow memory to
    1.20       * boot another vcpu */
    1.21      if ( shadow2_mode_enabled(d) 
    1.22 -         && d->arch.shadow2_total_pages < shadow2_min_acceptable_pages(d) )
    1.23 +         && d->arch.shadow2.total_pages < shadow2_min_acceptable_pages(d) )
    1.24      {
    1.25          destroy_gdt(v);
    1.26          return -ENOMEM;
    1.27 @@ -977,7 +977,7 @@ void arch_dump_domain_info(struct domain
    1.28      if ( shadow2_mode_enabled(d) )
    1.29      {
    1.30          printk("    shadow2 mode: ");
    1.31 -        if ( d->arch.shadow2_mode & SHM2_enable )
    1.32 +        if ( d->arch.shadow2.mode & SHM2_enable )
    1.33              printk("enabled ");
    1.34          if ( shadow2_mode_refcounts(d) )
    1.35              printk("refcounts ");
     2.1 --- a/xen/arch/x86/hvm/hvm.c	Sat Aug 19 17:07:54 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/hvm.c	Sun Aug 20 17:55:33 2006 +0100
     2.3 @@ -260,14 +260,6 @@ void hvm_setup_platform(struct domain* d
     2.4      if ( !hvm_guest(v) || (v->vcpu_id != 0) )
     2.5          return;
     2.6  
     2.7 -#if 0 /* SHADOW2 does not have this */
     2.8 -    if ( shadow_direct_map_init(d) == 0 )
     2.9 -    {
    2.10 -        printk("Can not allocate shadow direct map for HVM domain.\n");
    2.11 -        domain_crash_synchronous();
    2.12 -    }
    2.13 -#endif
    2.14 -
    2.15      hvm_zap_iommu_pages(d);
    2.16  
    2.17      platform = &d->arch.hvm_domain;
    2.18 @@ -547,7 +539,7 @@ void hvm_do_hypercall(struct cpu_user_re
    2.19          return;
    2.20      }
    2.21  
    2.22 -    if ( current->arch.shadow2->guest_levels == 4 )
    2.23 +    if ( current->arch.shadow2.mode->guest_levels == 4 )
    2.24      {
    2.25          pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
    2.26                                                         pregs->rsi,
     3.1 --- a/xen/arch/x86/shadow2-common.c	Sat Aug 19 17:07:54 2006 +0100
     3.2 +++ b/xen/arch/x86/shadow2-common.c	Sun Aug 20 17:55:33 2006 +0100
     3.3 @@ -156,7 +156,7 @@ sh2_x86_emulate_write_emulated(unsigned 
     3.4  #endif
     3.5      if ( hvm_guest(v) )
     3.6      {
     3.7 -        return v->arch.shadow2->x86_emulate_write(v, addr, &val, bytes, ctxt);
     3.8 +        return v->arch.shadow2.mode->x86_emulate_write(v, addr, &val, bytes, ctxt);
     3.9      }
    3.10      else 
    3.11      {
    3.12 @@ -179,7 +179,7 @@ sh2_x86_emulate_cmpxchg_emulated(unsigne
    3.13  #endif
    3.14      if ( hvm_guest(v) )
    3.15      {
    3.16 -        return v->arch.shadow2->x86_emulate_cmpxchg(v, addr, old, new, 
    3.17 +        return v->arch.shadow2.mode->x86_emulate_cmpxchg(v, addr, old, new, 
    3.18                                                      bytes, ctxt);
    3.19      }
    3.20      else 
    3.21 @@ -205,7 +205,7 @@ sh2_x86_emulate_cmpxchg8b_emulated(unsig
    3.22  #endif
    3.23      if ( hvm_guest(v) )
    3.24      {
    3.25 -        return v->arch.shadow2->x86_emulate_cmpxchg8b(v, addr, old_lo, old_hi,
    3.26 +        return v->arch.shadow2.mode->x86_emulate_cmpxchg8b(v, addr, old_lo, old_hi,
    3.27                                                        new_lo, new_hi, ctxt);
    3.28      }
    3.29      else 
    3.30 @@ -423,7 +423,7 @@ shadow2_validate_guest_pt_write(struct v
    3.31   * ----------------------------------------------
    3.32   * 
    3.33   * A count of all references to this page from other shadow pages and
    3.34 - * guest CR3s (a.k.a. v->arch.shadow_table).  
    3.35 + * guest CR3s (a.k.a. v->arch.shadow2.table).  
    3.36   *
    3.37   * The top bits hold the shadow type and the pinned bit.  Top-level
    3.38   * shadows are pinned so that they don't disappear when not in a CR3
    3.39 @@ -593,7 +593,7 @@ static inline int chunk_is_available(str
    3.40      int i;
    3.41      
    3.42      for ( i = order; i <= SHADOW2_MAX_ORDER; i++ )
    3.43 -        if ( !list_empty(&d->arch.shadow2_freelists[i]) )
    3.44 +        if ( !list_empty(&d->arch.shadow2.freelists[i]) )
    3.45              return 1;
    3.46      return 0;
    3.47  }
    3.48 @@ -649,7 +649,7 @@ void shadow2_prealloc(struct domain *d, 
    3.49      
    3.50      /* Stage one: walk the list of top-level pages, unpinning them */
    3.51      perfc_incrc(shadow2_prealloc_1);
    3.52 -    list_for_each_backwards_safe(l, t, &d->arch.shadow2_toplevel_shadows)
    3.53 +    list_for_each_backwards_safe(l, t, &d->arch.shadow2.toplevel_shadows)
    3.54      {
    3.55          pg = list_entry(l, struct page_info, list);
    3.56          smfn = page_to_mfn(pg);
    3.57 @@ -680,7 +680,7 @@ void shadow2_prealloc(struct domain *d, 
    3.58          v = d->vcpu[0];
    3.59      /* Walk the list from the tail: recently used toplevels have been pulled
    3.60       * to the head */
    3.61 -    list_for_each_backwards_safe(l, t, &d->arch.shadow2_toplevel_shadows)
    3.62 +    list_for_each_backwards_safe(l, t, &d->arch.shadow2.toplevel_shadows)
    3.63      {
    3.64          pg = list_entry(l, struct page_info, list);
    3.65          smfn = page_to_mfn(pg);
    3.66 @@ -700,9 +700,9 @@ void shadow2_prealloc(struct domain *d, 
    3.67      SHADOW2_PRINTK("Can't pre-allocate %i shadow pages!\n"
    3.68                     "  shadow pages total = %u, free = %u, p2m=%u\n",
    3.69                     1 << order, 
    3.70 -                   d->arch.shadow2_total_pages, 
    3.71 -                   d->arch.shadow2_free_pages, 
    3.72 -                   d->arch.shadow2_p2m_pages);
    3.73 +                   d->arch.shadow2.total_pages, 
    3.74 +                   d->arch.shadow2.free_pages, 
    3.75 +                   d->arch.shadow2.p2m_pages);
    3.76      BUG();
    3.77  }
    3.78  
    3.79 @@ -727,9 +727,9 @@ mfn_t shadow2_alloc(struct domain *d,
    3.80  
    3.81      /* Find smallest order which can satisfy the request. */
    3.82      for ( i = order; i <= SHADOW2_MAX_ORDER; i++ )
    3.83 -        if ( !list_empty(&d->arch.shadow2_freelists[i]) )
    3.84 +        if ( !list_empty(&d->arch.shadow2.freelists[i]) )
    3.85          {
    3.86 -            pg = list_entry(d->arch.shadow2_freelists[i].next, 
    3.87 +            pg = list_entry(d->arch.shadow2.freelists[i].next, 
    3.88                              struct page_info, list);
    3.89              list_del(&pg->list);
    3.90              
    3.91 @@ -738,10 +738,10 @@ mfn_t shadow2_alloc(struct domain *d,
    3.92              {
    3.93                  i--;
    3.94                  SH2_SET_PFN_ORDER(pg, i);
    3.95 -                list_add_tail(&pg->list, &d->arch.shadow2_freelists[i]);
    3.96 +                list_add_tail(&pg->list, &d->arch.shadow2.freelists[i]);
    3.97                  pg += 1 << i;
    3.98              }
    3.99 -            d->arch.shadow2_free_pages -= 1 << order;
   3.100 +            d->arch.shadow2.free_pages -= 1 << order;
   3.101  
   3.102              /* Init page info fields and clear the pages */
   3.103              for ( i = 0; i < 1<<order ; i++ ) 
   3.104 @@ -795,7 +795,7 @@ void shadow2_free(struct domain *d, mfn_
   3.105      ASSERT(shadow_type != PGC_SH2_p2m_table);
   3.106      order = shadow_order(shadow_type);
   3.107  
   3.108 -    d->arch.shadow2_free_pages += 1 << order;
   3.109 +    d->arch.shadow2.free_pages += 1 << order;
   3.110  
   3.111      for ( i = 0; i < 1<<order; i++ ) 
   3.112      {
   3.113 @@ -831,7 +831,7 @@ void shadow2_free(struct domain *d, mfn_
   3.114      }
   3.115  
   3.116      SH2_SET_PFN_ORDER(pg, order);
   3.117 -    list_add_tail(&pg->list, &d->arch.shadow2_freelists[order]);
   3.118 +    list_add_tail(&pg->list, &d->arch.shadow2.freelists[order]);
   3.119  }
   3.120  
   3.121  /* Divert some memory from the pool to be used by the p2m mapping.
   3.122 @@ -851,18 +851,18 @@ shadow2_alloc_p2m_pages(struct domain *d
   3.123      u32 i;
   3.124      ASSERT(shadow2_lock_is_acquired(d));
   3.125      
   3.126 -    if ( d->arch.shadow2_total_pages 
   3.127 +    if ( d->arch.shadow2.total_pages 
   3.128           < (shadow2_min_acceptable_pages(d) + (1<<SHADOW2_MAX_ORDER)) )
   3.129          return 0; /* Not enough shadow memory: need to increase it first */
   3.130      
   3.131      pg = mfn_to_page(shadow2_alloc(d, PGC_SH2_p2m_table, 0));
   3.132 -    d->arch.shadow2_p2m_pages += (1<<SHADOW2_MAX_ORDER);
   3.133 -    d->arch.shadow2_total_pages -= (1<<SHADOW2_MAX_ORDER);
   3.134 +    d->arch.shadow2.p2m_pages += (1<<SHADOW2_MAX_ORDER);
   3.135 +    d->arch.shadow2.total_pages -= (1<<SHADOW2_MAX_ORDER);
   3.136      for (i = 0; i < (1<<SHADOW2_MAX_ORDER); i++)
   3.137      {
   3.138          /* Unlike shadow pages, mark p2m pages as owned by the domain */
   3.139          page_set_owner(&pg[i], d);
   3.140 -        list_add_tail(&pg[i].list, &d->arch.shadow2_p2m_freelist);
   3.141 +        list_add_tail(&pg[i].list, &d->arch.shadow2.p2m_freelist);
   3.142      }
   3.143      return 1;
   3.144  }
   3.145 @@ -875,12 +875,12 @@ shadow2_alloc_p2m_page(struct domain *d)
   3.146      mfn_t mfn;
   3.147      void *p;
   3.148  
   3.149 -    if ( list_empty(&d->arch.shadow2_p2m_freelist) &&
   3.150 +    if ( list_empty(&d->arch.shadow2.p2m_freelist) &&
   3.151           !shadow2_alloc_p2m_pages(d) )
   3.152          return _mfn(0);
   3.153 -    entry = d->arch.shadow2_p2m_freelist.next;
   3.154 +    entry = d->arch.shadow2.p2m_freelist.next;
   3.155      list_del(entry);
   3.156 -    list_add_tail(entry, &d->arch.shadow2_p2m_inuse);
   3.157 +    list_add_tail(entry, &d->arch.shadow2.p2m_inuse);
   3.158      mfn = page_to_mfn(list_entry(entry, struct page_info, list));
   3.159      sh2_get_ref(mfn, 0);
   3.160      p = sh2_map_domain_page(mfn);
   3.161 @@ -1201,7 +1201,7 @@ static void shadow2_p2m_teardown(struct 
   3.162  
   3.163      d->arch.phys_table = pagetable_null();
   3.164  
   3.165 -    list_for_each_safe(entry, n, &d->arch.shadow2_p2m_inuse)
   3.166 +    list_for_each_safe(entry, n, &d->arch.shadow2.p2m_inuse)
   3.167      {
   3.168          pg = list_entry(entry, struct page_info, list);
   3.169          list_del(entry);
   3.170 @@ -1216,10 +1216,10 @@ static void shadow2_p2m_teardown(struct 
   3.171           * these pages were allocated without an owner. */
   3.172          page_set_owner(pg, NULL); 
   3.173          free_domheap_pages(pg, 0);
   3.174 -        d->arch.shadow2_p2m_pages--;
   3.175 +        d->arch.shadow2.p2m_pages--;
   3.176          perfc_decr(shadow2_alloc_count);
   3.177      }
   3.178 -    list_for_each_safe(entry, n, &d->arch.shadow2_p2m_freelist)
   3.179 +    list_for_each_safe(entry, n, &d->arch.shadow2.p2m_freelist)
   3.180      {
   3.181          list_del(entry);
   3.182          pg = list_entry(entry, struct page_info, list);
   3.183 @@ -1227,10 +1227,10 @@ static void shadow2_p2m_teardown(struct 
   3.184          /* Free should not decrement domain's total allocation. */
   3.185          page_set_owner(pg, NULL); 
   3.186          free_domheap_pages(pg, 0);
   3.187 -        d->arch.shadow2_p2m_pages--;
   3.188 +        d->arch.shadow2.p2m_pages--;
   3.189          perfc_decr(shadow2_alloc_count);
   3.190      }
   3.191 -    ASSERT(d->arch.shadow2_p2m_pages == 0);
   3.192 +    ASSERT(d->arch.shadow2.p2m_pages == 0);
   3.193  }
   3.194  
   3.195  /* Set the pool of shadow pages to the required number of pages.
   3.196 @@ -1256,11 +1256,11 @@ static unsigned int set_sh2_allocation(s
   3.197      pages = (pages + ((1<<SHADOW2_MAX_ORDER)-1)) & ~((1<<SHADOW2_MAX_ORDER)-1);
   3.198  
   3.199      SHADOW2_PRINTK("current %i target %i\n", 
   3.200 -                   d->arch.shadow2_total_pages, pages);
   3.201 -
   3.202 -    while ( d->arch.shadow2_total_pages != pages ) 
   3.203 +                   d->arch.shadow2.total_pages, pages);
   3.204 +
   3.205 +    while ( d->arch.shadow2.total_pages != pages ) 
   3.206      {
   3.207 -        if ( d->arch.shadow2_total_pages < pages ) 
   3.208 +        if ( d->arch.shadow2.total_pages < pages ) 
   3.209          {
   3.210              /* Need to allocate more memory from domheap */
   3.211              pg = alloc_domheap_pages(NULL, SHADOW2_MAX_ORDER, 0); 
   3.212 @@ -1269,8 +1269,8 @@ static unsigned int set_sh2_allocation(s
   3.213                  SHADOW2_PRINTK("failed to allocate shadow pages.\n");
   3.214                  return -ENOMEM;
   3.215              }
   3.216 -            d->arch.shadow2_free_pages += 1<<SHADOW2_MAX_ORDER;
   3.217 -            d->arch.shadow2_total_pages += 1<<SHADOW2_MAX_ORDER;
   3.218 +            d->arch.shadow2.free_pages += 1<<SHADOW2_MAX_ORDER;
   3.219 +            d->arch.shadow2.total_pages += 1<<SHADOW2_MAX_ORDER;
   3.220              for ( j = 0; j < 1<<SHADOW2_MAX_ORDER; j++ ) 
   3.221              {
   3.222                  pg[j].u.inuse.type_info = 0;  /* Free page */
   3.223 @@ -1278,18 +1278,18 @@ static unsigned int set_sh2_allocation(s
   3.224              }
   3.225              SH2_SET_PFN_ORDER(pg, SHADOW2_MAX_ORDER);
   3.226              list_add_tail(&pg->list, 
   3.227 -                          &d->arch.shadow2_freelists[SHADOW2_MAX_ORDER]);
   3.228 +                          &d->arch.shadow2.freelists[SHADOW2_MAX_ORDER]);
   3.229          } 
   3.230 -        else if ( d->arch.shadow2_total_pages > pages ) 
   3.231 +        else if ( d->arch.shadow2.total_pages > pages ) 
   3.232          {
   3.233              /* Need to return memory to domheap */
   3.234              shadow2_prealloc(d, SHADOW2_MAX_ORDER);
   3.235 -            ASSERT(!list_empty(&d->arch.shadow2_freelists[SHADOW2_MAX_ORDER]));
   3.236 -            pg = list_entry(d->arch.shadow2_freelists[SHADOW2_MAX_ORDER].next, 
   3.237 +            ASSERT(!list_empty(&d->arch.shadow2.freelists[SHADOW2_MAX_ORDER]));
   3.238 +            pg = list_entry(d->arch.shadow2.freelists[SHADOW2_MAX_ORDER].next, 
   3.239                              struct page_info, list);
   3.240              list_del(&pg->list);
   3.241 -            d->arch.shadow2_free_pages -= 1<<SHADOW2_MAX_ORDER;
   3.242 -            d->arch.shadow2_total_pages -= 1<<SHADOW2_MAX_ORDER;
   3.243 +            d->arch.shadow2.free_pages -= 1<<SHADOW2_MAX_ORDER;
   3.244 +            d->arch.shadow2.total_pages -= 1<<SHADOW2_MAX_ORDER;
   3.245              free_domheap_pages(pg, SHADOW2_MAX_ORDER);
   3.246          }
   3.247  
   3.248 @@ -1314,7 +1314,7 @@ unsigned int shadow2_set_allocation(stru
   3.249      rv = set_sh2_allocation(d, megabytes << (20 - PAGE_SHIFT), preempted); 
   3.250      SHADOW2_PRINTK("dom %u allocation now %u pages (%u MB)\n",
   3.251                     d->domain_id,
   3.252 -                   d->arch.shadow2_total_pages,
   3.253 +                   d->arch.shadow2.total_pages,
   3.254                     shadow2_get_allocation(d));
   3.255      shadow2_unlock(d);
   3.256      return rv;
   3.257 @@ -1347,7 +1347,7 @@ static void sh2_hash_audit_bucket(struct
   3.258      if ( !(SHADOW2_AUDIT_ENABLE) )
   3.259          return;
   3.260  
   3.261 -    e = &d->arch.shadow2_hash_table[bucket];
   3.262 +    e = &d->arch.shadow2.hash_table[bucket];
   3.263      if ( e->t == 0 ) return; /* Bucket is empty */ 
   3.264      while ( e )
   3.265      {
   3.266 @@ -1418,7 +1418,7 @@ static struct shadow2_hash_entry *sh2_al
   3.267  
   3.268      /* We need to allocate a new node. Ensure the free list is not empty. 
   3.269       * Allocate new entries in units the same size as the original table. */
   3.270 -    if ( unlikely(d->arch.shadow2_hash_freelist == NULL) )
   3.271 +    if ( unlikely(d->arch.shadow2.hash_freelist == NULL) )
   3.272      {
   3.273          size_t sz = sizeof(void *) + (SHADOW2_HASH_BUCKETS * sizeof(*x));
   3.274          extra = xmalloc_bytes(sz);
   3.275 @@ -1433,8 +1433,8 @@ static struct shadow2_hash_entry *sh2_al
   3.276  
   3.277          /* Record the allocation block so it can be correctly freed later. */
   3.278          *((struct shadow2_hash_entry **)&extra[SHADOW2_HASH_BUCKETS]) = 
   3.279 -            d->arch.shadow2_hash_allocations;
   3.280 -        d->arch.shadow2_hash_allocations = &extra[0];
   3.281 +            d->arch.shadow2.hash_allocations;
   3.282 +        d->arch.shadow2.hash_allocations = &extra[0];
   3.283  
   3.284          /* Thread a free chain through the newly-allocated nodes. */
   3.285          for ( i = 0; i < (SHADOW2_HASH_BUCKETS - 1); i++ )
   3.286 @@ -1442,12 +1442,12 @@ static struct shadow2_hash_entry *sh2_al
   3.287          extra[i].next = NULL;
   3.288  
   3.289          /* Add the new nodes to the free list. */
   3.290 -        d->arch.shadow2_hash_freelist = &extra[0];
   3.291 +        d->arch.shadow2.hash_freelist = &extra[0];
   3.292      }
   3.293  
   3.294      /* Allocate a new node from the free list. */
   3.295 -    x = d->arch.shadow2_hash_freelist;
   3.296 -    d->arch.shadow2_hash_freelist = x->next;
   3.297 +    x = d->arch.shadow2.hash_freelist;
   3.298 +    d->arch.shadow2.hash_freelist = x->next;
   3.299      return x;
   3.300  }
   3.301  
   3.302 @@ -1455,8 +1455,8 @@ static void sh2_free_hash_entry(struct d
   3.303  {
   3.304      /* Mark the bucket as empty and return it to the free list */
   3.305      e->t = 0; 
   3.306 -    e->next = d->arch.shadow2_hash_freelist;
   3.307 -    d->arch.shadow2_hash_freelist = e;
   3.308 +    e->next = d->arch.shadow2.hash_freelist;
   3.309 +    d->arch.shadow2.hash_freelist = e;
   3.310  }
   3.311  
   3.312  
   3.313 @@ -1467,13 +1467,13 @@ static int shadow2_hash_alloc(struct dom
   3.314      struct shadow2_hash_entry *table;
   3.315  
   3.316      ASSERT(shadow2_lock_is_acquired(d));
   3.317 -    ASSERT(!d->arch.shadow2_hash_table);
   3.318 +    ASSERT(!d->arch.shadow2.hash_table);
   3.319  
   3.320      table = xmalloc_array(struct shadow2_hash_entry, SHADOW2_HASH_BUCKETS);
   3.321      if ( !table ) return 1;
   3.322      memset(table, 0, 
   3.323             SHADOW2_HASH_BUCKETS * sizeof (struct shadow2_hash_entry));
   3.324 -    d->arch.shadow2_hash_table = table;
   3.325 +    d->arch.shadow2.hash_table = table;
   3.326      return 0;
   3.327  }
   3.328  
   3.329 @@ -1484,14 +1484,14 @@ static void shadow2_hash_teardown(struct
   3.330      struct shadow2_hash_entry *a, *n;
   3.331  
   3.332      ASSERT(shadow2_lock_is_acquired(d));
   3.333 -    ASSERT(d->arch.shadow2_hash_table);
   3.334 +    ASSERT(d->arch.shadow2.hash_table);
   3.335  
   3.336      /* Return the table itself */
   3.337 -    xfree(d->arch.shadow2_hash_table);
   3.338 -    d->arch.shadow2_hash_table = NULL;
   3.339 +    xfree(d->arch.shadow2.hash_table);
   3.340 +    d->arch.shadow2.hash_table = NULL;
   3.341  
   3.342      /* Return any extra allocations */
   3.343 -    a = d->arch.shadow2_hash_allocations;
   3.344 +    a = d->arch.shadow2.hash_allocations;
   3.345      while ( a ) 
   3.346      {
   3.347          /* We stored a linked-list pointer at the end of each allocation */
   3.348 @@ -1499,8 +1499,8 @@ static void shadow2_hash_teardown(struct
   3.349          xfree(a);
   3.350          a = n;
   3.351      }
   3.352 -    d->arch.shadow2_hash_allocations = NULL;
   3.353 -    d->arch.shadow2_hash_freelist = NULL;
   3.354 +    d->arch.shadow2.hash_allocations = NULL;
   3.355 +    d->arch.shadow2.hash_freelist = NULL;
   3.356  }
   3.357  
   3.358  
   3.359 @@ -1513,7 +1513,7 @@ mfn_t shadow2_hash_lookup(struct vcpu *v
   3.360      key_t key;
   3.361  
   3.362      ASSERT(shadow2_lock_is_acquired(d));
   3.363 -    ASSERT(d->arch.shadow2_hash_table);
   3.364 +    ASSERT(d->arch.shadow2.hash_table);
   3.365      ASSERT(t);
   3.366  
   3.367      sh2_hash_audit(d);
   3.368 @@ -1521,7 +1521,7 @@ mfn_t shadow2_hash_lookup(struct vcpu *v
   3.369      perfc_incrc(shadow2_hash_lookups);
   3.370      key = sh2_hash(n, t);
   3.371  
   3.372 -    x = head = &d->arch.shadow2_hash_table[key % SHADOW2_HASH_BUCKETS];
   3.373 +    x = head = &d->arch.shadow2.hash_table[key % SHADOW2_HASH_BUCKETS];
   3.374      p = NULL;
   3.375  
   3.376      sh2_hash_audit_bucket(d, key % SHADOW2_HASH_BUCKETS);
   3.377 @@ -1535,7 +1535,7 @@ mfn_t shadow2_hash_lookup(struct vcpu *v
   3.378              /* Pull-to-front if 'x' isn't already the head item */
   3.379              if ( unlikely(x != head) )
   3.380              {
   3.381 -                if ( unlikely(d->arch.shadow2_hash_walking != 0) )
   3.382 +                if ( unlikely(d->arch.shadow2.hash_walking != 0) )
   3.383                      /* Can't reorder: someone is walking the hash chains */
   3.384                      return x->smfn;
   3.385                  else 
   3.386 @@ -1575,7 +1575,7 @@ void shadow2_hash_insert(struct vcpu *v,
   3.387      key_t key;
   3.388      
   3.389      ASSERT(shadow2_lock_is_acquired(d));
   3.390 -    ASSERT(d->arch.shadow2_hash_table);
   3.391 +    ASSERT(d->arch.shadow2.hash_table);
   3.392      ASSERT(t);
   3.393  
   3.394      sh2_hash_audit(d);
   3.395 @@ -1583,7 +1583,7 @@ void shadow2_hash_insert(struct vcpu *v,
   3.396      perfc_incrc(shadow2_hash_inserts);
   3.397      key = sh2_hash(n, t);
   3.398  
   3.399 -    head = &d->arch.shadow2_hash_table[key % SHADOW2_HASH_BUCKETS];
   3.400 +    head = &d->arch.shadow2.hash_table[key % SHADOW2_HASH_BUCKETS];
   3.401  
   3.402      sh2_hash_audit_bucket(d, key % SHADOW2_HASH_BUCKETS);
   3.403  
   3.404 @@ -1617,7 +1617,7 @@ void shadow2_hash_delete(struct vcpu *v,
   3.405      key_t key;
   3.406  
   3.407      ASSERT(shadow2_lock_is_acquired(d));
   3.408 -    ASSERT(d->arch.shadow2_hash_table);
   3.409 +    ASSERT(d->arch.shadow2.hash_table);
   3.410      ASSERT(t);
   3.411  
   3.412      sh2_hash_audit(d);
   3.413 @@ -1625,7 +1625,7 @@ void shadow2_hash_delete(struct vcpu *v,
   3.414      perfc_incrc(shadow2_hash_deletes);
   3.415      key = sh2_hash(n, t);
   3.416  
   3.417 -    head = &d->arch.shadow2_hash_table[key % SHADOW2_HASH_BUCKETS];
   3.418 +    head = &d->arch.shadow2.hash_table[key % SHADOW2_HASH_BUCKETS];
   3.419  
   3.420      sh2_hash_audit_bucket(d, key % SHADOW2_HASH_BUCKETS);
   3.421  
   3.422 @@ -1695,8 +1695,8 @@ static void hash_foreach(struct vcpu *v,
   3.423  
   3.424      /* Say we're here, to stop hash-lookups reordering the chains */
   3.425      ASSERT(shadow2_lock_is_acquired(d));
   3.426 -    ASSERT(d->arch.shadow2_hash_walking == 0);
   3.427 -    d->arch.shadow2_hash_walking = 1;
   3.428 +    ASSERT(d->arch.shadow2.hash_walking == 0);
   3.429 +    d->arch.shadow2.hash_walking = 1;
   3.430  
   3.431      callback_mask &= ~1; /* Never attempt to call back on empty buckets */
   3.432      for ( i = 0; i < SHADOW2_HASH_BUCKETS; i++ ) 
   3.433 @@ -1704,7 +1704,7 @@ static void hash_foreach(struct vcpu *v,
   3.434          /* WARNING: This is not safe against changes to the hash table.
   3.435           * The callback *must* return non-zero if it has inserted or
   3.436           * deleted anything from the hash (lookups are OK, though). */
   3.437 -        for ( x = &d->arch.shadow2_hash_table[i]; x; x = x->next )
   3.438 +        for ( x = &d->arch.shadow2.hash_table[i]; x; x = x->next )
   3.439          {
   3.440              if ( callback_mask & (1 << x->t) ) 
   3.441              {
   3.442 @@ -1716,7 +1716,7 @@ static void hash_foreach(struct vcpu *v,
   3.443          }
   3.444          if ( done ) break; 
   3.445      }
   3.446 -    d->arch.shadow2_hash_walking = 0; 
   3.447 +    d->arch.shadow2.hash_walking = 0; 
   3.448  }
   3.449  
   3.450  
   3.451 @@ -1891,7 +1891,7 @@ int shadow2_remove_write_access(struct v
   3.452           * magic slot used to map high memory regions (linux HIGHTPTE) */
   3.453  
   3.454  #define GUESS(_a, _h) do {                                              \
   3.455 -            if ( v->arch.shadow2->guess_wrmap(v, (_a), gmfn) )          \
   3.456 +            if ( v->arch.shadow2.mode->guess_wrmap(v, (_a), gmfn) )          \
   3.457                  perfc_incrc(shadow2_writeable_h_ ## _h);                \
   3.458              if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )        \
   3.459                  return 1;                                               \
   3.460 @@ -1903,14 +1903,14 @@ int shadow2_remove_write_access(struct v
   3.461               && (gfn = sh2_mfn_to_gfn(v->domain, gmfn)) < 0x40000000 )
   3.462              GUESS(0xC0000000 + (gfn << PAGE_SHIFT), 4);
   3.463  
   3.464 -        if ( v->arch.shadow2->guest_levels == 2 )
   3.465 +        if ( v->arch.shadow2.mode->guest_levels == 2 )
   3.466          {
   3.467              if ( level == 1 )
   3.468                  /* 32bit non-PAE w2k3: linear map at 0xC0000000 */
   3.469                  GUESS(0xC0000000UL + (fault_addr >> 10), 1);
   3.470          }
   3.471  #if CONFIG_PAGING_LEVELS >= 3
   3.472 -        else if ( v->arch.shadow2->guest_levels == 3 )
   3.473 +        else if ( v->arch.shadow2.mode->guest_levels == 3 )
   3.474          {
   3.475              /* 32bit PAE w2k3: linear map at 0xC0000000 */
   3.476              switch ( level ) 
   3.477 @@ -1920,7 +1920,7 @@ int shadow2_remove_write_access(struct v
   3.478              }
   3.479          }
   3.480  #if CONFIG_PAGING_LEVELS >= 4
   3.481 -        else if ( v->arch.shadow2->guest_levels == 4 )
   3.482 +        else if ( v->arch.shadow2.mode->guest_levels == 4 )
   3.483          {
   3.484              /* 64bit w2k3: linear map at 0x0000070000000000 */
   3.485              switch ( level ) 
   3.486 @@ -2273,7 +2273,7 @@ shadow2_remove_all_shadows_and_parents(s
   3.487  void sh2_update_paging_modes(struct vcpu *v)
   3.488  {
   3.489      struct domain *d = v->domain;
   3.490 -    struct shadow2_entry_points *old_entries = v->arch.shadow2;
   3.491 +    struct shadow2_paging_mode *old_mode = v->arch.shadow2.mode;
   3.492      mfn_t old_guest_table;
   3.493  
   3.494      ASSERT(shadow2_lock_is_acquired(d));
   3.495 @@ -2297,8 +2297,7 @@ void sh2_update_paging_modes(struct vcpu
   3.496  
   3.497      // First, tear down any old shadow tables held by this vcpu.
   3.498      //
   3.499 -    if ( v->arch.shadow2 )
   3.500 -        shadow2_detach_old_tables(v);
   3.501 +    shadow2_detach_old_tables(v);
   3.502  
   3.503      if ( !hvm_guest(v) )
   3.504      {
   3.505 @@ -2307,13 +2306,13 @@ void sh2_update_paging_modes(struct vcpu
   3.506          ///
   3.507  #if CONFIG_PAGING_LEVELS == 4
   3.508          if ( pv_32bit_guest(v) )
   3.509 -            v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 4, 3);
   3.510 +            v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,4,3);
   3.511          else
   3.512 -            v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 4, 4);
   3.513 +            v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,4,4);
   3.514  #elif CONFIG_PAGING_LEVELS == 3
   3.515 -        v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 3, 3);
   3.516 +        v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,3,3);
   3.517  #elif CONFIG_PAGING_LEVELS == 2
   3.518 -        v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 2, 2);
   3.519 +        v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,2,2);
   3.520  #else
   3.521  #error unexpected paging mode
   3.522  #endif
   3.523 @@ -2326,10 +2325,9 @@ void sh2_update_paging_modes(struct vcpu
   3.524          ASSERT(shadow2_mode_translate(d));
   3.525          ASSERT(shadow2_mode_external(d));
   3.526  
   3.527 -        if ( !hvm_paging_enabled(v) )
   3.528 +        v->arch.shadow2.hvm_paging_enabled = !!hvm_paging_enabled(v);
   3.529 +        if ( !v->arch.shadow2.hvm_paging_enabled )
   3.530          {
   3.531 -            // paging disabled...
   3.532 -            clear_bit(_VCPUF_shadow2_translate, &v->vcpu_flags);
   3.533              
   3.534              /* Set v->arch.guest_table to use the p2m map, and choose
   3.535               * the appropriate shadow mode */
   3.536 @@ -2337,11 +2335,11 @@ void sh2_update_paging_modes(struct vcpu
   3.537  #if CONFIG_PAGING_LEVELS == 2
   3.538              v->arch.guest_table =
   3.539                  pagetable_from_pfn(pagetable_get_pfn(d->arch.phys_table));
   3.540 -            v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry,2,2);
   3.541 +            v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,2,2);
   3.542  #elif CONFIG_PAGING_LEVELS == 3 
   3.543              v->arch.guest_table =
   3.544                  pagetable_from_pfn(pagetable_get_pfn(d->arch.phys_table));
   3.545 -            v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry,3,3);
   3.546 +            v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,3,3);
   3.547  #else /* CONFIG_PAGING_LEVELS == 4 */
   3.548              { 
   3.549                  l4_pgentry_t *l4e; 
   3.550 @@ -2353,7 +2351,7 @@ void sh2_update_paging_modes(struct vcpu
   3.551                      pagetable_from_pfn(l4e_get_pfn(l4e[0]));
   3.552                  sh2_unmap_domain_page(l4e);
   3.553              }
   3.554 -            v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry,3,3);
   3.555 +            v->arch.shadow2.mode = &SHADOW2_INTERNAL_NAME(sh2_paging_mode,3,3);
   3.556  #endif
   3.557              /* Fix up refcounts on guest_table */
   3.558              get_page(mfn_to_page(pagetable_get_mfn(v->arch.guest_table)), d);
   3.559 @@ -2362,13 +2360,12 @@ void sh2_update_paging_modes(struct vcpu
   3.560          }
   3.561          else
   3.562          {
   3.563 -            set_bit(_VCPUF_shadow2_translate, &v->vcpu_flags);
   3.564 -
   3.565  #ifdef __x86_64__
   3.566              if ( hvm_long_mode_enabled(v) )
   3.567              {
   3.568                  // long mode guest...
   3.569 -                v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 4, 4);
   3.570 +                v->arch.shadow2.mode =
   3.571 +                    &SHADOW2_INTERNAL_NAME(sh2_paging_mode, 4, 4);
   3.572              }
   3.573              else
   3.574  #endif
   3.575 @@ -2376,7 +2373,8 @@ void sh2_update_paging_modes(struct vcpu
   3.576                  {
   3.577  #if CONFIG_PAGING_LEVELS >= 3
   3.578                      // 32-bit PAE mode guest...
   3.579 -                    v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 3, 3);
   3.580 +                    v->arch.shadow2.mode =
   3.581 +                        &SHADOW2_INTERNAL_NAME(sh2_paging_mode, 3, 3);
   3.582  #else
   3.583                      SHADOW2_ERROR("PAE not supported in 32-bit Xen\n");
   3.584                      domain_crash(d);
   3.585 @@ -2387,13 +2385,15 @@ void sh2_update_paging_modes(struct vcpu
   3.586                  {
   3.587                      // 32-bit 2 level guest...
   3.588  #if CONFIG_PAGING_LEVELS >= 3
   3.589 -                    v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 3, 2);
   3.590 +                    v->arch.shadow2.mode =
   3.591 +                        &SHADOW2_INTERNAL_NAME(sh2_paging_mode, 3, 2);
   3.592  #else
   3.593 -                    v->arch.shadow2 = &SHADOW2_INTERNAL_NAME(shadow2_entry, 2, 2);
   3.594 +                    v->arch.shadow2.mode =
   3.595 +                        &SHADOW2_INTERNAL_NAME(sh2_paging_mode, 2, 2);
   3.596  #endif
   3.597                  }
   3.598          }
   3.599 -        
   3.600 +
   3.601          if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
   3.602          {
   3.603              mfn_t mmfn = shadow2_make_monitor_table(v);
   3.604 @@ -2401,18 +2401,18 @@ void sh2_update_paging_modes(struct vcpu
   3.605              v->arch.monitor_vtable = sh2_map_domain_page(mmfn);
   3.606          } 
   3.607  
   3.608 -        if ( v->arch.shadow2 != old_entries )
   3.609 +        if ( v->arch.shadow2.mode != old_mode )
   3.610          {
   3.611              SHADOW2_PRINTK("new paging mode: d=%u v=%u g=%u s=%u "
   3.612                             "(was g=%u s=%u)\n",
   3.613                             d->domain_id, v->vcpu_id, 
   3.614 -                           v->arch.shadow2->guest_levels,
   3.615 -                           v->arch.shadow2->shadow_levels,
   3.616 -                           old_entries ? old_entries->guest_levels : 0,
   3.617 -                           old_entries ? old_entries->shadow_levels : 0);
   3.618 -            if ( old_entries &&
   3.619 -                 (v->arch.shadow2->shadow_levels !=
   3.620 -                  old_entries->shadow_levels) )
   3.621 +                           v->arch.shadow2.mode->guest_levels,
   3.622 +                           v->arch.shadow2.mode->shadow_levels,
   3.623 +                           old_mode ? old_mode->guest_levels : 0,
   3.624 +                           old_mode ? old_mode->shadow_levels : 0);
   3.625 +            if ( old_mode &&
   3.626 +                 (v->arch.shadow2.mode->shadow_levels !=
   3.627 +                  old_mode->shadow_levels) )
   3.628              {
   3.629                  /* Need to make a new monitor table for the new mode */
   3.630                  mfn_t new_mfn, old_mfn;
   3.631 @@ -2430,7 +2430,7 @@ void sh2_update_paging_modes(struct vcpu
   3.632                  sh2_unmap_domain_page(v->arch.monitor_vtable);
   3.633                  old_mfn = pagetable_get_mfn(v->arch.monitor_table);
   3.634                  v->arch.monitor_table = pagetable_null();
   3.635 -                new_mfn = v->arch.shadow2->make_monitor_table(v);            
   3.636 +                new_mfn = v->arch.shadow2.mode->make_monitor_table(v);            
   3.637                  v->arch.monitor_table = pagetable_from_mfn(new_mfn);
   3.638                  v->arch.monitor_vtable = sh2_map_domain_page(new_mfn);
   3.639                  SHADOW2_PRINTK("new monitor table %"SH2_PRI_mfn "\n",
   3.640 @@ -2442,7 +2442,7 @@ void sh2_update_paging_modes(struct vcpu
   3.641                  make_cr3(v, mfn_x(new_mfn));
   3.642                  write_ptbase(v);
   3.643                  hvm_update_host_cr3(v);
   3.644 -                old_entries->destroy_monitor_table(v, old_mfn);
   3.645 +                old_mode->destroy_monitor_table(v, old_mfn);
   3.646              }
   3.647          }
   3.648  
   3.649 @@ -2452,7 +2452,7 @@ void sh2_update_paging_modes(struct vcpu
   3.650          //        This *does* happen, at least for CR4.PGE...
   3.651      }
   3.652  
   3.653 -    v->arch.shadow2->update_cr3(v);
   3.654 +    v->arch.shadow2.mode->update_cr3(v);
   3.655  }
   3.656  
   3.657  /**************************************************************************/
   3.658 @@ -2465,7 +2465,7 @@ static void sh2_new_mode(struct domain *
   3.659  
   3.660      ASSERT(shadow2_lock_is_acquired(d));
   3.661      ASSERT(d != current->domain);
   3.662 -    d->arch.shadow2_mode = new_mode;
   3.663 +    d->arch.shadow2.mode = new_mode;
   3.664      if ( new_mode & SHM2_translate ) 
   3.665          shadow2_audit_p2m(d);
   3.666      for_each_vcpu(d, v)
   3.667 @@ -2509,7 +2509,7 @@ static int shadow2_enable(struct domain 
   3.668  #endif
   3.669  
   3.670      /* Init the shadow memory allocation if the user hasn't done so */
   3.671 -    old_pages = d->arch.shadow2_total_pages;
   3.672 +    old_pages = d->arch.shadow2.total_pages;
   3.673      if ( old_pages == 0 )
   3.674          if ( set_sh2_allocation(d, 256, NULL) != 0 ) /* Use at least 1MB */
   3.675          {
   3.676 @@ -2564,8 +2564,7 @@ void shadow2_teardown(struct domain *d)
   3.677          /* Release the shadow and monitor tables held by each vcpu */
   3.678          for_each_vcpu(d, v)
   3.679          {
   3.680 -            if ( v->arch.shadow2 )
   3.681 -                shadow2_detach_old_tables(v);
   3.682 +            shadow2_detach_old_tables(v);
   3.683              if ( shadow2_mode_external(d) )
   3.684              {
   3.685                  mfn = pagetable_get_mfn(v->arch.monitor_table);
   3.686 @@ -2576,34 +2575,34 @@ void shadow2_teardown(struct domain *d)
   3.687          }
   3.688      }
   3.689  
   3.690 -    if ( d->arch.shadow2_total_pages != 0 )
   3.691 +    if ( d->arch.shadow2.total_pages != 0 )
   3.692      {
   3.693          SHADOW2_PRINTK("teardown of domain %u starts."
   3.694                         "  Shadow pages total = %u, free = %u, p2m=%u\n",
   3.695                         d->domain_id,
   3.696 -                       d->arch.shadow2_total_pages, 
   3.697 -                       d->arch.shadow2_free_pages, 
   3.698 -                       d->arch.shadow2_p2m_pages);
   3.699 +                       d->arch.shadow2.total_pages, 
   3.700 +                       d->arch.shadow2.free_pages, 
   3.701 +                       d->arch.shadow2.p2m_pages);
   3.702          /* Destroy all the shadows and release memory to domheap */
   3.703          set_sh2_allocation(d, 0, NULL);
   3.704          /* Release the hash table back to xenheap */
   3.705 -        if (d->arch.shadow2_hash_table) 
   3.706 +        if (d->arch.shadow2.hash_table) 
   3.707              shadow2_hash_teardown(d);
   3.708          /* Release the log-dirty bitmap of dirtied pages */
   3.709          sh2_free_log_dirty_bitmap(d);
   3.710          /* Should not have any more memory held */
   3.711          SHADOW2_PRINTK("teardown done."
   3.712                         "  Shadow pages total = %u, free = %u, p2m=%u\n",
   3.713 -                       d->arch.shadow2_total_pages, 
   3.714 -                       d->arch.shadow2_free_pages, 
   3.715 -                       d->arch.shadow2_p2m_pages);
   3.716 -        ASSERT(d->arch.shadow2_total_pages == 0);
   3.717 +                       d->arch.shadow2.total_pages, 
   3.718 +                       d->arch.shadow2.free_pages, 
   3.719 +                       d->arch.shadow2.p2m_pages);
   3.720 +        ASSERT(d->arch.shadow2.total_pages == 0);
   3.721      }
   3.722  
   3.723      /* We leave the "permanent" shadow modes enabled, but clear the
   3.724       * log-dirty mode bit.  We don't want any more mark_dirty()
   3.725       * calls now that we've torn down the bitmap */
   3.726 -    d->arch.shadow2_mode &= ~SHM2_log_dirty;
   3.727 +    d->arch.shadow2.mode &= ~SHM2_log_dirty;
   3.728  
   3.729      shadow2_unlock(d);
   3.730  }
   3.731 @@ -2615,26 +2614,26 @@ void shadow2_final_teardown(struct domai
   3.732      SHADOW2_PRINTK("dom %u final teardown starts."
   3.733                     "  Shadow pages total = %u, free = %u, p2m=%u\n",
   3.734                     d->domain_id,
   3.735 -                   d->arch.shadow2_total_pages, 
   3.736 -                   d->arch.shadow2_free_pages, 
   3.737 -                   d->arch.shadow2_p2m_pages);
   3.738 +                   d->arch.shadow2.total_pages, 
   3.739 +                   d->arch.shadow2.free_pages, 
   3.740 +                   d->arch.shadow2.p2m_pages);
   3.741  
   3.742      /* Double-check that the domain didn't have any shadow memory.  
   3.743       * It is possible for a domain that never got domain_kill()ed
   3.744       * to get here with its shadow allocation intact. */
   3.745 -    if ( d->arch.shadow2_total_pages != 0 )
   3.746 +    if ( d->arch.shadow2.total_pages != 0 )
   3.747          shadow2_teardown(d);
   3.748  
   3.749      /* It is now safe to pull down the p2m map. */
   3.750 -    if ( d->arch.shadow2_p2m_pages != 0 )
   3.751 +    if ( d->arch.shadow2.p2m_pages != 0 )
   3.752          shadow2_p2m_teardown(d);
   3.753  
   3.754      SHADOW2_PRINTK("dom %u final teardown done."
   3.755                     "  Shadow pages total = %u, free = %u, p2m=%u\n",
   3.756                     d->domain_id,
   3.757 -                   d->arch.shadow2_total_pages, 
   3.758 -                   d->arch.shadow2_free_pages, 
   3.759 -                   d->arch.shadow2_p2m_pages);
   3.760 +                   d->arch.shadow2.total_pages, 
   3.761 +                   d->arch.shadow2.free_pages, 
   3.762 +                   d->arch.shadow2.p2m_pages);
   3.763  }
   3.764  
   3.765  static int shadow2_one_bit_enable(struct domain *d, u32 mode)
   3.766 @@ -2643,12 +2642,12 @@ static int shadow2_one_bit_enable(struct
   3.767      ASSERT(shadow2_lock_is_acquired(d));
   3.768  
   3.769      /* Sanity check the call */
   3.770 -    if ( d == current->domain || (d->arch.shadow2_mode & mode) )
   3.771 +    if ( d == current->domain || (d->arch.shadow2.mode & mode) )
   3.772      {
   3.773          return -EINVAL;
   3.774      }
   3.775  
   3.776 -    if ( d->arch.shadow2_mode == 0 )
   3.777 +    if ( d->arch.shadow2.mode == 0 )
   3.778      {
   3.779          /* Init the shadow memory allocation and the hash table */
   3.780          if ( set_sh2_allocation(d, 1, NULL) != 0 
   3.781 @@ -2660,7 +2659,7 @@ static int shadow2_one_bit_enable(struct
   3.782      }
   3.783  
   3.784      /* Update the bits */
   3.785 -    sh2_new_mode(d, d->arch.shadow2_mode | mode);
   3.786 +    sh2_new_mode(d, d->arch.shadow2.mode | mode);
   3.787  
   3.788      return 0;
   3.789  }
   3.790 @@ -2672,26 +2671,25 @@ static int shadow2_one_bit_disable(struc
   3.791      ASSERT(shadow2_lock_is_acquired(d));
   3.792  
   3.793      /* Sanity check the call */
   3.794 -    if ( d == current->domain || !(d->arch.shadow2_mode & mode) )
   3.795 +    if ( d == current->domain || !(d->arch.shadow2.mode & mode) )
   3.796      {
   3.797          return -EINVAL;
   3.798      }
   3.799  
   3.800      /* Update the bits */
   3.801 -    sh2_new_mode(d, d->arch.shadow2_mode & ~mode);
   3.802 -    if ( d->arch.shadow2_mode == 0 )
   3.803 +    sh2_new_mode(d, d->arch.shadow2.mode & ~mode);
   3.804 +    if ( d->arch.shadow2.mode == 0 )
   3.805      {
   3.806          /* Get this domain off shadows */
   3.807          SHADOW2_PRINTK("un-shadowing of domain %u starts."
   3.808                         "  Shadow pages total = %u, free = %u, p2m=%u\n",
   3.809                         d->domain_id,
   3.810 -                       d->arch.shadow2_total_pages, 
   3.811 -                       d->arch.shadow2_free_pages, 
   3.812 -                       d->arch.shadow2_p2m_pages);
   3.813 +                       d->arch.shadow2.total_pages, 
   3.814 +                       d->arch.shadow2.free_pages, 
   3.815 +                       d->arch.shadow2.p2m_pages);
   3.816          for_each_vcpu(d, v)
   3.817          {
   3.818 -            if ( v->arch.shadow2 )
   3.819 -                shadow2_detach_old_tables(v);
   3.820 +            shadow2_detach_old_tables(v);
   3.821  #if CONFIG_PAGING_LEVELS == 4
   3.822              if ( !(v->arch.flags & TF_kernel_mode) )
   3.823                  make_cr3(v, pagetable_get_pfn(v->arch.guest_table_user));
   3.824 @@ -2714,9 +2712,9 @@ static int shadow2_one_bit_disable(struc
   3.825          SHADOW2_PRINTK("un-shadowing of domain %u done."
   3.826                         "  Shadow pages total = %u, free = %u, p2m=%u\n",
   3.827                         d->domain_id,
   3.828 -                       d->arch.shadow2_total_pages, 
   3.829 -                       d->arch.shadow2_free_pages, 
   3.830 -                       d->arch.shadow2_p2m_pages);
   3.831 +                       d->arch.shadow2.total_pages, 
   3.832 +                       d->arch.shadow2.free_pages, 
   3.833 +                       d->arch.shadow2.p2m_pages);
   3.834      }
   3.835  
   3.836      return 0;
   3.837 @@ -2762,19 +2760,19 @@ int shadow2_test_disable(struct domain *
   3.838  static int
   3.839  sh2_alloc_log_dirty_bitmap(struct domain *d)
   3.840  {
   3.841 -    ASSERT(d->arch.shadow_dirty_bitmap == NULL);
   3.842 -    d->arch.shadow_dirty_bitmap_size =
   3.843 +    ASSERT(d->arch.shadow2.dirty_bitmap == NULL);
   3.844 +    d->arch.shadow2.dirty_bitmap_size =
   3.845          (d->shared_info->arch.max_pfn + (BITS_PER_LONG - 1)) &
   3.846          ~(BITS_PER_LONG - 1);
   3.847 -    d->arch.shadow_dirty_bitmap =
   3.848 +    d->arch.shadow2.dirty_bitmap =
   3.849          xmalloc_array(unsigned long,
   3.850 -                      d->arch.shadow_dirty_bitmap_size / BITS_PER_LONG);
   3.851 -    if ( d->arch.shadow_dirty_bitmap == NULL )
   3.852 +                      d->arch.shadow2.dirty_bitmap_size / BITS_PER_LONG);
   3.853 +    if ( d->arch.shadow2.dirty_bitmap == NULL )
   3.854      {
   3.855 -        d->arch.shadow_dirty_bitmap_size = 0;
   3.856 +        d->arch.shadow2.dirty_bitmap_size = 0;
   3.857          return -ENOMEM;
   3.858      }
   3.859 -    memset(d->arch.shadow_dirty_bitmap, 0, d->arch.shadow_dirty_bitmap_size/8);
   3.860 +    memset(d->arch.shadow2.dirty_bitmap, 0, d->arch.shadow2.dirty_bitmap_size/8);
   3.861  
   3.862      return 0;
   3.863  }
   3.864 @@ -2782,11 +2780,11 @@ sh2_alloc_log_dirty_bitmap(struct domain
   3.865  static void
   3.866  sh2_free_log_dirty_bitmap(struct domain *d)
   3.867  {
   3.868 -    d->arch.shadow_dirty_bitmap_size = 0;
   3.869 -    if ( d->arch.shadow_dirty_bitmap )
   3.870 +    d->arch.shadow2.dirty_bitmap_size = 0;
   3.871 +    if ( d->arch.shadow2.dirty_bitmap )
   3.872      {
   3.873 -        xfree(d->arch.shadow_dirty_bitmap);
   3.874 -        d->arch.shadow_dirty_bitmap = NULL;
   3.875 +        xfree(d->arch.shadow2.dirty_bitmap);
   3.876 +        d->arch.shadow2.dirty_bitmap = NULL;
   3.877      }
   3.878  }
   3.879  
   3.880 @@ -2968,11 +2966,11 @@ static int shadow2_log_dirty_op(struct d
   3.881      SHADOW2_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n", 
   3.882                    (clean) ? "clean" : "peek",
   3.883                    d->domain_id,
   3.884 -                  d->arch.shadow_fault_count, 
   3.885 -                  d->arch.shadow_dirty_count);
   3.886 -
   3.887 -    sc->stats.fault_count = d->arch.shadow_fault_count;
   3.888 -    sc->stats.dirty_count = d->arch.shadow_dirty_count;    
   3.889 +                  d->arch.shadow2.fault_count, 
   3.890 +                  d->arch.shadow2.dirty_count);
   3.891 +
   3.892 +    sc->stats.fault_count = d->arch.shadow2.fault_count;
   3.893 +    sc->stats.dirty_count = d->arch.shadow2.dirty_count;    
   3.894          
   3.895      if ( clean ) 
   3.896      {
   3.897 @@ -2982,25 +2980,25 @@ static int shadow2_log_dirty_op(struct d
   3.898          /* Need to revoke write access to the domain's pages again. 
   3.899           * In future, we'll have a less heavy-handed approach to this, 
   3.900           * but for now, we just unshadow everything except Xen. */
   3.901 -        list_for_each_safe(l, t, &d->arch.shadow2_toplevel_shadows)
   3.902 +        list_for_each_safe(l, t, &d->arch.shadow2.toplevel_shadows)
   3.903          {
   3.904              pg = list_entry(l, struct page_info, list);
   3.905              shadow2_unhook_mappings(d->vcpu[0], page_to_mfn(pg));
   3.906          }
   3.907  
   3.908 -        d->arch.shadow_fault_count = 0;
   3.909 -        d->arch.shadow_dirty_count = 0;
   3.910 +        d->arch.shadow2.fault_count = 0;
   3.911 +        d->arch.shadow2.dirty_count = 0;
   3.912      }
   3.913  
   3.914      if ( guest_handle_is_null(sc->dirty_bitmap) ||
   3.915 -         (d->arch.shadow_dirty_bitmap == NULL) )
   3.916 +         (d->arch.shadow2.dirty_bitmap == NULL) )
   3.917      {
   3.918          rv = -EINVAL;
   3.919          goto out;
   3.920      }
   3.921   
   3.922 -    if ( sc->pages > d->arch.shadow_dirty_bitmap_size )
   3.923 -        sc->pages = d->arch.shadow_dirty_bitmap_size; 
   3.924 +    if ( sc->pages > d->arch.shadow2.dirty_bitmap_size )
   3.925 +        sc->pages = d->arch.shadow2.dirty_bitmap_size; 
   3.926  
   3.927  #define CHUNK (8*1024) /* Transfer and clear in 1kB chunks for L1 cache. */
   3.928      for ( i = 0; i < sc->pages; i += CHUNK )
   3.929 @@ -3012,7 +3010,7 @@ static int shadow2_log_dirty_op(struct d
   3.930          if ( copy_to_guest_offset(
   3.931                   sc->dirty_bitmap, 
   3.932                   i/(8*sizeof(unsigned long)),
   3.933 -                 d->arch.shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
   3.934 +                 d->arch.shadow2.dirty_bitmap + (i/(8*sizeof(unsigned long))),
   3.935                   (bytes + sizeof(unsigned long) - 1) / sizeof(unsigned long)) )
   3.936          {
   3.937              rv = -EINVAL;
   3.938 @@ -3020,7 +3018,7 @@ static int shadow2_log_dirty_op(struct d
   3.939          }
   3.940  
   3.941          if ( clean )
   3.942 -            memset(d->arch.shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
   3.943 +            memset(d->arch.shadow2.dirty_bitmap + (i/(8*sizeof(unsigned long))),
   3.944                     0, bytes);
   3.945      }
   3.946  #undef CHUNK
   3.947 @@ -3043,7 +3041,7 @@ void sh2_do_mark_dirty(struct domain *d,
   3.948      if ( !valid_mfn(gmfn) )
   3.949          return;
   3.950  
   3.951 -    ASSERT(d->arch.shadow_dirty_bitmap != NULL);
   3.952 +    ASSERT(d->arch.shadow2.dirty_bitmap != NULL);
   3.953  
   3.954      /* We /really/ mean PFN here, even for non-translated guests. */
   3.955      pfn = get_gpfn_from_mfn(mfn_x(gmfn));
   3.956 @@ -3057,14 +3055,14 @@ void sh2_do_mark_dirty(struct domain *d,
   3.957          return;
   3.958  
   3.959      /* N.B. Can use non-atomic TAS because protected by shadow2_lock. */
   3.960 -    if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) ) 
   3.961 +    if ( likely(pfn < d->arch.shadow2.dirty_bitmap_size) ) 
   3.962      { 
   3.963 -        if ( !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) )
   3.964 +        if ( !__test_and_set_bit(pfn, d->arch.shadow2.dirty_bitmap) )
   3.965          {
   3.966              SHADOW2_DEBUG(LOGDIRTY, 
   3.967                            "marked mfn %" SH2_PRI_mfn " (pfn=%lx), dom %d\n",
   3.968                            mfn_x(gmfn), pfn, d->domain_id);
   3.969 -            d->arch.shadow_dirty_count++;
   3.970 +            d->arch.shadow2.dirty_count++;
   3.971          }
   3.972      }
   3.973      else
   3.974 @@ -3074,7 +3072,7 @@ void sh2_do_mark_dirty(struct domain *d,
   3.975                         "owner=%d c=%08x t=%" PRtype_info "\n",
   3.976                         mfn_x(gmfn), 
   3.977                         pfn, 
   3.978 -                       d->arch.shadow_dirty_bitmap_size,
   3.979 +                       d->arch.shadow2.dirty_bitmap_size,
   3.980                         d->domain_id,
   3.981                         (page_get_owner(mfn_to_page(gmfn))
   3.982                          ? page_get_owner(mfn_to_page(gmfn))->domain_id
   3.983 @@ -3106,7 +3104,7 @@ int shadow2_control_op(struct domain *d,
   3.984          if ( shadow2_mode_log_dirty(d) )
   3.985              if ( (rc = shadow2_log_dirty_disable(d)) != 0 ) 
   3.986                  return rc;
   3.987 -        if ( d->arch.shadow2_mode & SHM2_enable )
   3.988 +        if ( d->arch.shadow2.mode & SHM2_enable )
   3.989              if ( (rc = shadow2_test_disable(d)) != 0 ) 
   3.990                  return rc;
   3.991          return 0;
   3.992 @@ -3193,7 +3191,7 @@ void shadow2_audit_tables(struct vcpu *v
   3.993      else 
   3.994      {
   3.995          /* Audit only the current mode's tables */
   3.996 -        switch (v->arch.shadow2->guest_levels)
   3.997 +        switch ( v->arch.shadow2.mode->guest_levels )
   3.998          {
   3.999          case 2: mask = (SH2F_L1_32|SH2F_FL1_32|SH2F_L2_32); break;
  3.1000          case 3: mask = (SH2F_L1_PAE|SH2F_FL1_PAE|SH2F_L2_PAE
     4.1 --- a/xen/arch/x86/shadow2.c	Sat Aug 19 17:07:54 2006 +0100
     4.2 +++ b/xen/arch/x86/shadow2.c	Sun Aug 20 17:55:33 2006 +0100
     4.3 @@ -82,7 +82,7 @@
     4.4   * mappings (ugh! PAE linear mappings) and we copy it to the low-memory
     4.5   * buffer so it fits in CR3.  Maybe we can avoid some of this recopying 
     4.6   * by using the shadow directly in some places. 
     4.7 - * Also, for SMP, need to actually respond to seeing shadow2_pae_flip_pending.
     4.8 + * Also, for SMP, need to actually respond to seeing shadow2.pae_flip_pending.
     4.9   *
    4.10   * GUEST_WALK_TABLES TLB FLUSH COALESCE
    4.11   * guest_walk_tables can do up to three remote TLB flushes as it walks to
    4.12 @@ -1245,7 +1245,7 @@ static int shadow_set_l3e(struct vcpu *v
    4.13              if (info->vcpus & (1 << vcpu->vcpu_id))
    4.14              {
    4.15                  // Remember that this flip/update needs to occur.
    4.16 -                vcpu->arch.shadow2_pae_flip_pending = 1;
    4.17 +                vcpu->arch.shadow2.pae_flip_pending = 1;
    4.18                  flags |= SHADOW2_SET_L3PAE_RECOPY;
    4.19              }
    4.20          }
    4.21 @@ -2772,7 +2772,7 @@ sh2_map_and_validate_gl1e(struct vcpu *v
    4.22  static inline void check_for_early_unshadow(struct vcpu *v, mfn_t gmfn)
    4.23  {
    4.24  #if SHADOW2_OPTIMIZATIONS & SH2OPT_EARLY_UNSHADOW
    4.25 -    if ( v->arch.last_emulated_mfn == mfn_x(gmfn) &&
    4.26 +    if ( v->arch.shadow2.last_emulated_mfn == mfn_x(gmfn) &&
    4.27           sh2_mfn_is_a_page_table(gmfn) )
    4.28      {
    4.29          u32 flags = mfn_to_page(gmfn)->shadow2_flags;
    4.30 @@ -2807,7 +2807,7 @@ static inline void check_for_early_unsha
    4.31              }
    4.32          }
    4.33      }
    4.34 -    v->arch.last_emulated_mfn = mfn_x(gmfn);
    4.35 +    v->arch.shadow2.last_emulated_mfn = mfn_x(gmfn);
    4.36  #endif
    4.37  }
    4.38  
    4.39 @@ -2815,7 +2815,7 @@ static inline void check_for_early_unsha
    4.40  static inline void reset_early_unshadow(struct vcpu *v)
    4.41  {
    4.42  #if SHADOW2_OPTIMIZATIONS & SH2OPT_EARLY_UNSHADOW
    4.43 -    v->arch.last_emulated_mfn = INVALID_MFN;
    4.44 +    v->arch.shadow2.last_emulated_mfn = INVALID_MFN;
    4.45  #endif
    4.46  }
    4.47  
    4.48 @@ -3000,7 +3000,7 @@ static int sh2_page_fault(struct vcpu *v
    4.49  #endif
    4.50  
    4.51      perfc_incrc(shadow2_fault_fixed);
    4.52 -    d->arch.shadow_fault_count++;
    4.53 +    d->arch.shadow2.fault_count++;
    4.54      reset_early_unshadow(v);
    4.55  
    4.56   done:
    4.57 @@ -3026,7 +3026,7 @@ static int sh2_page_fault(struct vcpu *v
    4.58  
    4.59      SHADOW2_PRINTK("emulate: eip=%#lx\n", emul_regs.eip);
    4.60  
    4.61 -    v->arch.shadow2_propagate_fault = 0;
    4.62 +    v->arch.shadow2.propagate_fault = 0;
    4.63      if ( x86_emulate_memop(&emul_ctxt, &shadow2_emulator_ops) )
    4.64      {
    4.65          SHADOW2_PRINTK("emulator failure, unshadowing mfn %#lx\n", 
    4.66 @@ -3040,7 +3040,7 @@ static int sh2_page_fault(struct vcpu *v
    4.67           * guest to loop on the same page fault. */
    4.68          goto done;
    4.69      }
    4.70 -    if ( v->arch.shadow2_propagate_fault )
    4.71 +    if ( v->arch.shadow2.propagate_fault )
    4.72      {
    4.73          /* Emulation triggered another page fault */
    4.74          goto not_a_shadow_fault;
    4.75 @@ -3493,7 +3493,7 @@ void sh2_pae_recopy(struct domain *d)
    4.76      
    4.77      for_each_vcpu(d, v)
    4.78      {
    4.79 -        if ( !v->arch.shadow2_pae_flip_pending ) 
    4.80 +        if ( !v->arch.shadow2.pae_flip_pending ) 
    4.81              continue;
    4.82  
    4.83          cpu_set(v->processor, flush_mask);
    4.84 @@ -3526,7 +3526,7 @@ void sh2_pae_recopy(struct domain *d)
    4.85              }
    4.86          }
    4.87  #endif
    4.88 -        v->arch.shadow2_pae_flip_pending = 0;        
    4.89 +        v->arch.shadow2.pae_flip_pending = 0;        
    4.90      }
    4.91  
    4.92      flush_tlb_mask(flush_mask);
    4.93 @@ -3612,7 +3612,7 @@ sh2_update_cr3(struct vcpu *v)
    4.94  #endif
    4.95  
    4.96      ASSERT(shadow2_lock_is_acquired(v->domain));
    4.97 -    ASSERT(v->arch.shadow2);
    4.98 +    ASSERT(v->arch.shadow2.mode);
    4.99  
   4.100      ////
   4.101      //// vcpu->arch.guest_table is already set
   4.102 @@ -3713,7 +3713,7 @@ sh2_update_cr3(struct vcpu *v)
   4.103      {
   4.104          /* Pull this root shadow to the front of the list of roots. */
   4.105          list_del(&mfn_to_page(smfn)->list);
   4.106 -        list_add(&mfn_to_page(smfn)->list, &d->arch.shadow2_toplevel_shadows);
   4.107 +        list_add(&mfn_to_page(smfn)->list, &d->arch.shadow2.toplevel_shadows);
   4.108      }
   4.109      else
   4.110      {
   4.111 @@ -3725,7 +3725,7 @@ sh2_update_cr3(struct vcpu *v)
   4.112          shadow2_prealloc(d, SHADOW2_MAX_ORDER); 
   4.113          /* Shadow the page. */
   4.114          smfn = sh2_make_shadow(v, gmfn, PGC_SH2_guest_root_type);
   4.115 -        list_add(&mfn_to_page(smfn)->list, &d->arch.shadow2_toplevel_shadows);
   4.116 +        list_add(&mfn_to_page(smfn)->list, &d->arch.shadow2.toplevel_shadows);
   4.117      }
   4.118      ASSERT(valid_mfn(smfn));
   4.119      v->arch.shadow_table = pagetable_from_mfn(smfn);
   4.120 @@ -4082,7 +4082,7 @@ static inline void * emulate_map_dest(st
   4.121           || (!(flags & _PAGE_USER) && ring_3(ctxt->regs)) )
   4.122      {
   4.123          /* This write would have faulted even on bare metal */
   4.124 -        v->arch.shadow2_propagate_fault = 1;
   4.125 +        v->arch.shadow2.propagate_fault = 1;
   4.126          return NULL;
   4.127      }
   4.128      
   4.129 @@ -4458,7 +4458,7 @@ int sh2_audit_l4_table(struct vcpu *v, m
   4.130  /**************************************************************************/
   4.131  /* Entry points into this mode of the shadow code.
   4.132   * This will all be mangled by the preprocessor to uniquify everything. */
   4.133 -struct shadow2_entry_points shadow2_entry = {
   4.134 +struct shadow2_paging_mode sh2_paging_mode = {
   4.135      .page_fault             = sh2_page_fault, 
   4.136      .invlpg                 = sh2_invlpg,
   4.137      .gva_to_gpa             = sh2_gva_to_gpa,
     5.1 --- a/xen/arch/x86/traps.c	Sat Aug 19 17:07:54 2006 +0100
     5.2 +++ b/xen/arch/x86/traps.c	Sun Aug 20 17:55:33 2006 +0100
     5.3 @@ -923,13 +923,6 @@ asmlinkage int do_page_fault(struct cpu_
     5.4  
     5.5      perfc_incrc(page_faults);
     5.6  
     5.7 -    if ( shadow2_mode_enabled(current->domain) )
     5.8 -        debugtrace_printk("%s %s %d dom=%d eip=%p cr2=%p code=%d cs=%x\n",
     5.9 -                          __func__, __FILE__, __LINE__,
    5.10 -                          current->domain->domain_id,
    5.11 -                          (void *)regs->eip, (void *)addr, regs->error_code,
    5.12 -                          regs->cs);
    5.13 -
    5.14      if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) )
    5.15          return rc;
    5.16  
     6.1 --- a/xen/arch/x86/x86_32/traps.c	Sat Aug 19 17:07:54 2006 +0100
     6.2 +++ b/xen/arch/x86/x86_32/traps.c	Sun Aug 20 17:55:33 2006 +0100
     6.3 @@ -89,7 +89,8 @@ void show_page_walk(unsigned long addr)
     6.4      l3e = l3t[l3_table_offset(addr)];
     6.5      mfn = l3e_get_pfn(l3e);
     6.6      pfn = get_gpfn_from_mfn(mfn);
     6.7 -    printk(" L3 = %"PRIpte" %08lx\n", l3e_get_intpte(l3e), pfn);
     6.8 +    printk(" L3[0x%03lx] = %"PRIpte" %08lx\n",
     6.9 +           l3_table_offset(addr), l3e_get_intpte(l3e), pfn);
    6.10      unmap_domain_page(l3t);
    6.11      if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
    6.12          return;
    6.13 @@ -99,7 +100,8 @@ void show_page_walk(unsigned long addr)
    6.14      l2e = l2t[l2_table_offset(addr)];
    6.15      mfn = l2e_get_pfn(l2e);
    6.16      pfn = get_gpfn_from_mfn(mfn);
    6.17 -    printk("  L2 = %"PRIpte" %08lx %s\n", l2e_get_intpte(l2e), pfn, 
    6.18 +    printk(" L2[0x%03lx] = %"PRIpte" %08lx %s\n",
    6.19 +           l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
    6.20             (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : "");
    6.21      unmap_domain_page(l2t);
    6.22      if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
    6.23 @@ -110,7 +112,8 @@ void show_page_walk(unsigned long addr)
    6.24      l1e = l1t[l1_table_offset(addr)];
    6.25      mfn = l1e_get_pfn(l1e);
    6.26      pfn = get_gpfn_from_mfn(mfn);
    6.27 -    printk("   L1 = %"PRIpte" %08lx\n", l1e_get_intpte(l1e), pfn);
    6.28 +    printk(" L1[0x%03lx] = %"PRIpte" %08lx\n",
    6.29 +           l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
    6.30      unmap_domain_page(l1t);
    6.31  }
    6.32  
     7.1 --- a/xen/arch/x86/x86_64/traps.c	Sat Aug 19 17:07:54 2006 +0100
     7.2 +++ b/xen/arch/x86/x86_64/traps.c	Sun Aug 20 17:55:33 2006 +0100
     7.3 @@ -84,7 +84,7 @@ void show_page_walk(unsigned long addr)
     7.4      l4e = l4t[l4_table_offset(addr)];
     7.5      mfn = l4e_get_pfn(l4e);
     7.6      pfn = get_gpfn_from_mfn(mfn);
     7.7 -    printk(" L4[0x%lx] = %"PRIpte" %016lx\n",
     7.8 +    printk(" L4[0x%03lx] = %"PRIpte" %016lx\n",
     7.9             l4_table_offset(addr), l4e_get_intpte(l4e), pfn);
    7.10      if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
    7.11          return;
    7.12 @@ -93,7 +93,7 @@ void show_page_walk(unsigned long addr)
    7.13      l3e = l3t[l3_table_offset(addr)];
    7.14      mfn = l3e_get_pfn(l3e);
    7.15      pfn = get_gpfn_from_mfn(mfn);
    7.16 -    printk("  L3[0x%lx] = %"PRIpte" %016lx\n",
    7.17 +    printk(" L3[0x%03lx] = %"PRIpte" %016lx\n",
    7.18             l3_table_offset(addr), l3e_get_intpte(l3e), pfn);
    7.19      if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
    7.20          return;
    7.21 @@ -102,7 +102,7 @@ void show_page_walk(unsigned long addr)
    7.22      l2e = l2t[l2_table_offset(addr)];
    7.23      mfn = l2e_get_pfn(l2e);
    7.24      pfn = get_gpfn_from_mfn(mfn);
    7.25 -    printk("   L2[0x%lx] = %"PRIpte" %016lx %s\n",
    7.26 +    printk(" L2[0x%03lx] = %"PRIpte" %016lx %s\n",
    7.27             l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
    7.28             (l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : "");
    7.29      if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
    7.30 @@ -113,7 +113,7 @@ void show_page_walk(unsigned long addr)
    7.31      l1e = l1t[l1_table_offset(addr)];
    7.32      mfn = l1e_get_pfn(l1e);
    7.33      pfn = get_gpfn_from_mfn(mfn);
    7.34 -    printk("    L1[0x%lx] = %"PRIpte" %016lx\n",
    7.35 +    printk(" L1[0x%03lx] = %"PRIpte" %016lx\n",
    7.36             l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
    7.37  }
    7.38  
     8.1 --- a/xen/include/asm-x86/domain.h	Sat Aug 19 17:07:54 2006 +0100
     8.2 +++ b/xen/include/asm-x86/domain.h	Sun Aug 20 17:55:33 2006 +0100
     8.3 @@ -57,6 +57,34 @@ extern void toggle_guest_mode(struct vcp
     8.4   */
     8.5  extern void hypercall_page_initialise(struct domain *d, void *);
     8.6  
     8.7 +struct shadow_domain {
     8.8 +    u32               mode;  /* flags to control shadow operation */
     8.9 +    spinlock_t        lock;  /* shadow2 domain lock */
    8.10 +    int               locker; /* processor which holds the lock */
    8.11 +    const char       *locker_function; /* Func that took it */
    8.12 +    struct list_head  freelists[SHADOW2_MAX_ORDER + 1]; 
    8.13 +    struct list_head  p2m_freelist;
    8.14 +    struct list_head  p2m_inuse;
    8.15 +    struct list_head  toplevel_shadows;
    8.16 +    unsigned int      total_pages;  /* number of pages allocated */
    8.17 +    unsigned int      free_pages;   /* number of pages on freelists */
    8.18 +    unsigned int      p2m_pages;    /* number of pages in p2m map */
    8.19 +
    8.20 +    /* Shadow2 hashtable */
    8.21 +    struct shadow2_hash_entry *hash_table;
    8.22 +    struct shadow2_hash_entry *hash_freelist;
    8.23 +    struct shadow2_hash_entry *hash_allocations;
    8.24 +    int hash_walking;  /* Some function is walking the hash table */
    8.25 +
    8.26 +    /* Shadow log-dirty bitmap */
    8.27 +    unsigned long *dirty_bitmap;
    8.28 +    unsigned int dirty_bitmap_size;  /* in pages, bit per page */
    8.29 +
    8.30 +    /* Shadow log-dirty mode stats */
    8.31 +    unsigned int fault_count;
    8.32 +    unsigned int dirty_count;
    8.33 +};
    8.34 +
    8.35  struct arch_domain
    8.36  {
    8.37      l1_pgentry_t *mm_perdomain_pt;
    8.38 @@ -79,32 +107,7 @@ struct arch_domain
    8.39      /* Shadow-translated guest: Pseudophys base address of reserved area. */
    8.40      unsigned long first_reserved_pfn;
    8.41  
    8.42 -    /* Shadow2 stuff */
    8.43 -    u32               shadow2_mode;  /* flags to control shadow operation */
    8.44 -    spinlock_t        shadow2_lock;  /* shadow2 domain lock */
    8.45 -    int               shadow2_locker; /* processor which holds the lock */
    8.46 -    const char       *shadow2_locker_function; /* Func that took it */
    8.47 -    struct list_head  shadow2_freelists[SHADOW2_MAX_ORDER + 1]; 
    8.48 -    struct list_head  shadow2_p2m_freelist;
    8.49 -    struct list_head  shadow2_p2m_inuse;
    8.50 -    struct list_head  shadow2_toplevel_shadows;
    8.51 -    unsigned int      shadow2_total_pages;  /* number of pages allocated */
    8.52 -    unsigned int      shadow2_free_pages;   /* number of pages on freelists */
    8.53 -    unsigned int      shadow2_p2m_pages;    /* number of pages in p2m map */
    8.54 -
    8.55 -    /* Shadow2 hashtable */
    8.56 -    struct shadow2_hash_entry *shadow2_hash_table;
    8.57 -    struct shadow2_hash_entry *shadow2_hash_freelist;
    8.58 -    struct shadow2_hash_entry *shadow2_hash_allocations;
    8.59 -    int shadow2_hash_walking;  /* Some function is walking the hash table */
    8.60 -
    8.61 -    /* Shadow log-dirty bitmap */
    8.62 -    unsigned long *shadow_dirty_bitmap;
    8.63 -    unsigned int shadow_dirty_bitmap_size;  /* in pages, bit per page */
    8.64 -
    8.65 -    /* Shadow log-dirty mode stats */
    8.66 -    unsigned int shadow_fault_count;
    8.67 -    unsigned int shadow_dirty_count;
    8.68 +    struct shadow_domain shadow2;
    8.69  
    8.70      /* Shadow translated domain: P2M mapping */
    8.71      pagetable_t phys_table;
    8.72 @@ -130,6 +133,21 @@ struct pae_l3_cache { };
    8.73  #define pae_l3_cache_init(c) ((void)0)
    8.74  #endif
    8.75  
    8.76 +struct shadow_vcpu {
    8.77 +    /* Pointers to mode-specific entry points. */
    8.78 +    struct shadow2_paging_mode *mode;
    8.79 +    /* Last MFN that we emulated a write to. */
    8.80 +    unsigned long last_emulated_mfn;
    8.81 +    /* HVM guest: paging enabled (CR0.PG)?  */
    8.82 +    unsigned int hvm_paging_enabled:1;
    8.83 +    /* Emulated fault needs to be propagated to guest? */
    8.84 +    unsigned int propagate_fault:1;
    8.85 +#if CONFIG_PAGING_LEVELS >= 3
    8.86 +    /* Shadow update requires this PAE cpu to recopy/install its L3 table. */
    8.87 +    unsigned int pae_flip_pending:1;
    8.88 +#endif
    8.89 +};
    8.90 +
    8.91  struct arch_vcpu
    8.92  {
    8.93      /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */
    8.94 @@ -183,17 +201,7 @@ struct arch_vcpu
    8.95      /* Current LDT details. */
    8.96      unsigned long shadow_ldt_mapcnt;
    8.97  
    8.98 -    /* Shadow2 stuff */
    8.99 -    /* -- pointers to mode-specific entry points */
   8.100 -    struct shadow2_entry_points *shadow2; 
   8.101 -    unsigned long last_emulated_mfn;    /* last mfn we emulated a write to */
   8.102 -    u8 shadow2_propagate_fault;         /* emulated fault needs to be */
   8.103 -                                        /* propagated to guest */
   8.104 -#if CONFIG_PAGING_LEVELS >= 3
   8.105 -    u8 shadow2_pae_flip_pending;        /* shadow update requires this PAE cpu
   8.106 -                                         * to recopy/install its L3 table.
   8.107 -                                         */
   8.108 -#endif
   8.109 +    struct shadow_vcpu shadow2;
   8.110  } __cacheline_aligned;
   8.111  
   8.112  /* shorthands to improve code legibility */
     9.1 --- a/xen/include/asm-x86/shadow2-multi.h	Sat Aug 19 17:07:54 2006 +0100
     9.2 +++ b/xen/include/asm-x86/shadow2-multi.h	Sun Aug 20 17:55:33 2006 +0100
     9.3 @@ -112,5 +112,5 @@ SHADOW2_INTERNAL_NAME(sh2_destroy_monito
     9.4      (struct vcpu *v, mfn_t mmfn);
     9.5  #endif
     9.6  
     9.7 -extern struct shadow2_entry_points 
     9.8 -SHADOW2_INTERNAL_NAME(shadow2_entry, SHADOW_LEVELS, GUEST_LEVELS);
     9.9 +extern struct shadow2_paging_mode 
    9.10 +SHADOW2_INTERNAL_NAME(sh2_paging_mode, SHADOW_LEVELS, GUEST_LEVELS);
    10.1 --- a/xen/include/asm-x86/shadow2-private.h	Sat Aug 19 17:07:54 2006 +0100
    10.2 +++ b/xen/include/asm-x86/shadow2-private.h	Sun Aug 20 17:55:33 2006 +0100
    10.3 @@ -200,40 +200,40 @@ enum sh2_log_type { log_slow = 0, log_fa
    10.4  /* Alloc and zero the logs */
    10.5  static inline void sh2_init_log(struct vcpu *v) 
    10.6  {
    10.7 -    if ( unlikely(!v->arch.shadow2_action_log) ) 
    10.8 -        v->arch.shadow2_action_log = xmalloc_array(sh2_log_t, 2);
    10.9 -    ASSERT(v->arch.shadow2_action_log);
   10.10 -    memset(v->arch.shadow2_action_log, 0, 2 * sizeof (sh2_log_t));
   10.11 +    if ( unlikely(!v->arch.shadow2.action_log) ) 
   10.12 +        v->arch.shadow2.action_log = xmalloc_array(sh2_log_t, 2);
   10.13 +    ASSERT(v->arch.shadow2.action_log);
   10.14 +    memset(v->arch.shadow2.action_log, 0, 2 * sizeof (sh2_log_t));
   10.15  }
   10.16  
   10.17  /* Log an A&D-bit update */
   10.18  static inline void sh2_log_ad(struct vcpu *v, paddr_t e, unsigned int level)
   10.19  {
   10.20 -    v->arch.shadow2_action_log[v->arch.shadow2_action_index].ad[level] = e;
   10.21 +    v->arch.shadow2.action_log[v->arch.shadow2.action_index].ad[level] = e;
   10.22  }
   10.23  
   10.24  /* Log an MMIO address */
   10.25  static inline void sh2_log_mmio(struct vcpu *v, paddr_t m)
   10.26  {
   10.27 -    v->arch.shadow2_action_log[v->arch.shadow2_action_index].mmio = m;
   10.28 +    v->arch.shadow2.action_log[v->arch.shadow2.action_index].mmio = m;
   10.29  }
   10.30  
   10.31  /* Log the result */
   10.32  static inline void sh2_log_rv(struct vcpu *v, int rv)
   10.33  {
   10.34 -    v->arch.shadow2_action_log[v->arch.shadow2_action_index].rv = rv;
   10.35 +    v->arch.shadow2.action_log[v->arch.shadow2.action_index].rv = rv;
   10.36  }
   10.37  
   10.38  /* Set which mode we're in */
   10.39  static inline void sh2_set_log_mode(struct vcpu *v, enum sh2_log_type t) 
   10.40  {
   10.41 -    v->arch.shadow2_action_index = t;
   10.42 +    v->arch.shadow2.action_index = t;
   10.43  }
   10.44  
   10.45  /* Know not to take action, because we're only checking the mechanism */
   10.46  static inline int sh2_take_no_action(struct vcpu *v) 
   10.47  {
   10.48 -    return (v->arch.shadow2_action_index == log_fast);
   10.49 +    return (v->arch.shadow2.action_index == log_fast);
   10.50  }
   10.51  
   10.52  #else /* Non-paranoid mode: these logs do not exist */
   10.53 @@ -400,13 +400,13 @@ sh2_mfn_is_dirty(struct domain *d, mfn_t
   10.54  {
   10.55      unsigned long pfn;
   10.56      ASSERT(shadow2_mode_log_dirty(d));
   10.57 -    ASSERT(d->arch.shadow_dirty_bitmap != NULL);
   10.58 +    ASSERT(d->arch.shadow2.dirty_bitmap != NULL);
   10.59  
   10.60      /* We /really/ mean PFN here, even for non-translated guests. */
   10.61      pfn = get_gpfn_from_mfn(mfn_x(gmfn));
   10.62      if ( likely(VALID_M2P(pfn))
   10.63 -         && likely(pfn < d->arch.shadow_dirty_bitmap_size) 
   10.64 -         && test_bit(pfn, d->arch.shadow_dirty_bitmap) )
   10.65 +         && likely(pfn < d->arch.shadow2.dirty_bitmap_size) 
   10.66 +         && test_bit(pfn, d->arch.shadow2.dirty_bitmap) )
   10.67          return 1;
   10.68  
   10.69      return 0;
    11.1 --- a/xen/include/asm-x86/shadow2-types.h	Sat Aug 19 17:07:54 2006 +0100
    11.2 +++ b/xen/include/asm-x86/shadow2-types.h	Sun Aug 20 17:55:33 2006 +0100
    11.3 @@ -507,7 +507,7 @@ struct shadow2_walk_t
    11.4  #define sh2_unhook_32b_mappings     INTERNAL_NAME(sh2_unhook_32b_mappings)
    11.5  #define sh2_unhook_pae_mappings     INTERNAL_NAME(sh2_unhook_pae_mappings)
    11.6  #define sh2_unhook_64b_mappings     INTERNAL_NAME(sh2_unhook_64b_mappings)
    11.7 -#define shadow2_entry               INTERNAL_NAME(shadow2_entry)
    11.8 +#define sh2_paging_mode             INTERNAL_NAME(sh2_paging_mode)
    11.9  #define sh2_detach_old_tables       INTERNAL_NAME(sh2_detach_old_tables)
   11.10  #define sh2_x86_emulate_write       INTERNAL_NAME(sh2_x86_emulate_write)
   11.11  #define sh2_x86_emulate_cmpxchg     INTERNAL_NAME(sh2_x86_emulate_cmpxchg)
    12.1 --- a/xen/include/asm-x86/shadow2.h	Sat Aug 19 17:07:54 2006 +0100
    12.2 +++ b/xen/include/asm-x86/shadow2.h	Sun Aug 20 17:55:33 2006 +0100
    12.3 @@ -43,11 +43,11 @@
    12.4   * requires VT or similar mechanisms */
    12.5  #define SHM2_external  (DOM0_SHADOW2_CONTROL_FLAG_EXTERNAL << SHM2_shift)
    12.6  
    12.7 -#define shadow2_mode_enabled(_d)   ((_d)->arch.shadow2_mode)
    12.8 -#define shadow2_mode_refcounts(_d) ((_d)->arch.shadow2_mode & SHM2_refcounts)
    12.9 -#define shadow2_mode_log_dirty(_d) ((_d)->arch.shadow2_mode & SHM2_log_dirty)
   12.10 -#define shadow2_mode_translate(_d) ((_d)->arch.shadow2_mode & SHM2_translate)
   12.11 -#define shadow2_mode_external(_d)  ((_d)->arch.shadow2_mode & SHM2_external)
   12.12 +#define shadow2_mode_enabled(_d)   ((_d)->arch.shadow2.mode)
   12.13 +#define shadow2_mode_refcounts(_d) ((_d)->arch.shadow2.mode & SHM2_refcounts)
   12.14 +#define shadow2_mode_log_dirty(_d) ((_d)->arch.shadow2.mode & SHM2_log_dirty)
   12.15 +#define shadow2_mode_translate(_d) ((_d)->arch.shadow2.mode & SHM2_translate)
   12.16 +#define shadow2_mode_external(_d)  ((_d)->arch.shadow2.mode & SHM2_external)
   12.17  
   12.18  /* Xen traps & emulates all reads of all page table pages:
   12.19   *not yet supported
   12.20 @@ -92,34 +92,34 @@
   12.21  
   12.22  #define shadow2_lock_init(_d)                                   \
   12.23      do {                                                        \
   12.24 -        spin_lock_init(&(_d)->arch.shadow2_lock);               \
   12.25 -        (_d)->arch.shadow2_locker = -1;                         \
   12.26 -        (_d)->arch.shadow2_locker_function = "nobody";          \
   12.27 +        spin_lock_init(&(_d)->arch.shadow2.lock);               \
   12.28 +        (_d)->arch.shadow2.locker = -1;                         \
   12.29 +        (_d)->arch.shadow2.locker_function = "nobody";          \
   12.30      } while (0)
   12.31  
   12.32  #define shadow2_lock_is_acquired(_d)                            \
   12.33 -    (current->processor == (_d)->arch.shadow2_locker)
   12.34 +    (current->processor == (_d)->arch.shadow2.locker)
   12.35  
   12.36  #define shadow2_lock(_d)                                                 \
   12.37      do {                                                                 \
   12.38 -        if ( unlikely((_d)->arch.shadow2_locker == current->processor) ) \
   12.39 +        if ( unlikely((_d)->arch.shadow2.locker == current->processor) ) \
   12.40          {                                                                \
   12.41              printk("Error: shadow2 lock held by %s\n",                   \
   12.42 -                   (_d)->arch.shadow2_locker_function);                  \
   12.43 +                   (_d)->arch.shadow2.locker_function);                  \
   12.44              BUG();                                                       \
   12.45          }                                                                \
   12.46 -        spin_lock(&(_d)->arch.shadow2_lock);                             \
   12.47 -        ASSERT((_d)->arch.shadow2_locker == -1);                         \
   12.48 -        (_d)->arch.shadow2_locker = current->processor;                  \
   12.49 -        (_d)->arch.shadow2_locker_function = __func__;                   \
   12.50 +        spin_lock(&(_d)->arch.shadow2.lock);                             \
   12.51 +        ASSERT((_d)->arch.shadow2.locker == -1);                         \
   12.52 +        (_d)->arch.shadow2.locker = current->processor;                  \
   12.53 +        (_d)->arch.shadow2.locker_function = __func__;                   \
   12.54      } while (0)
   12.55  
   12.56  #define shadow2_unlock(_d)                                              \
   12.57      do {                                                                \
   12.58 -        ASSERT((_d)->arch.shadow2_locker == current->processor);        \
   12.59 -        (_d)->arch.shadow2_locker = -1;                                 \
   12.60 -        (_d)->arch.shadow2_locker_function = "nobody";                  \
   12.61 -        spin_unlock(&(_d)->arch.shadow2_lock);                          \
   12.62 +        ASSERT((_d)->arch.shadow2.locker == current->processor);        \
   12.63 +        (_d)->arch.shadow2.locker = -1;                                 \
   12.64 +        (_d)->arch.shadow2.locker_function = "nobody";                  \
   12.65 +        spin_unlock(&(_d)->arch.shadow2.lock);                          \
   12.66      } while (0)
   12.67  
   12.68  /* 
   12.69 @@ -232,7 +232,7 @@ shadow2_vcpu_mode_translate(struct vcpu 
   12.70      // enabled.  (HVM vcpu's with paging disabled are using the p2m table as
   12.71      // its paging table, so no translation occurs in this case.)
   12.72      //
   12.73 -    return v->vcpu_flags & VCPUF_shadow2_translate;
   12.74 +    return v->arch.shadow2.hvm_paging_enabled;
   12.75  }
   12.76  
   12.77  
   12.78 @@ -240,7 +240,7 @@ shadow2_vcpu_mode_translate(struct vcpu 
   12.79  /* Mode-specific entry points into the shadow code */
   12.80  
   12.81  struct x86_emulate_ctxt;
   12.82 -struct shadow2_entry_points {
   12.83 +struct shadow2_paging_mode {
   12.84      int           (*page_fault            )(struct vcpu *v, unsigned long va,
   12.85                                              struct cpu_user_regs *regs);
   12.86      int           (*invlpg                )(struct vcpu *v, unsigned long va);
   12.87 @@ -285,8 +285,8 @@ struct shadow2_entry_points {
   12.88  
   12.89  static inline int shadow2_guest_paging_levels(struct vcpu *v)
   12.90  {
   12.91 -    ASSERT(v->arch.shadow2 != NULL);
   12.92 -    return v->arch.shadow2->guest_levels;
   12.93 +    ASSERT(v->arch.shadow2.mode != NULL);
   12.94 +    return v->arch.shadow2.mode->guest_levels;
   12.95  }
   12.96  
   12.97  /**************************************************************************/
   12.98 @@ -337,7 +337,7 @@ shadow2_fault(unsigned long va, struct c
   12.99  {
  12.100      struct vcpu *v = current;
  12.101      perfc_incrc(shadow2_fault);
  12.102 -    return v->arch.shadow2->page_fault(v, va, regs);
  12.103 +    return v->arch.shadow2.mode->page_fault(v, va, regs);
  12.104  }
  12.105  
  12.106  static inline int
  12.107 @@ -346,7 +346,7 @@ shadow2_invlpg(struct vcpu *v, unsigned 
  12.108   * instruction should be issued on the hardware, or 0 if it's safe not
  12.109   * to do so. */
  12.110  {
  12.111 -    return v->arch.shadow2->invlpg(v, va);
  12.112 +    return v->arch.shadow2.mode->invlpg(v, va);
  12.113  }
  12.114  
  12.115  static inline unsigned long
  12.116 @@ -354,7 +354,7 @@ shadow2_gva_to_gpa(struct vcpu *v, unsig
  12.117  /* Called to translate a guest virtual address to what the *guest*
  12.118   * pagetables would map it to. */
  12.119  {
  12.120 -    return v->arch.shadow2->gva_to_gpa(v, va);
  12.121 +    return v->arch.shadow2.mode->gva_to_gpa(v, va);
  12.122  }
  12.123  
  12.124  static inline unsigned long
  12.125 @@ -362,7 +362,7 @@ shadow2_gva_to_gfn(struct vcpu *v, unsig
  12.126  /* Called to translate a guest virtual address to what the *guest*
  12.127   * pagetables would map it to. */
  12.128  {
  12.129 -    return v->arch.shadow2->gva_to_gfn(v, va);
  12.130 +    return v->arch.shadow2.mode->gva_to_gfn(v, va);
  12.131  }
  12.132  
  12.133  static inline void
  12.134 @@ -371,7 +371,7 @@ shadow2_update_cr3(struct vcpu *v)
  12.135   * Called when the guest changes CR3. */
  12.136  {
  12.137      shadow2_lock(v->domain);
  12.138 -    v->arch.shadow2->update_cr3(v);
  12.139 +    v->arch.shadow2.mode->update_cr3(v);
  12.140      shadow2_unlock(v->domain);
  12.141  }
  12.142  
  12.143 @@ -425,19 +425,20 @@ static inline void shadow2_update_paging
  12.144  static inline void
  12.145  shadow2_detach_old_tables(struct vcpu *v)
  12.146  {
  12.147 -    v->arch.shadow2->detach_old_tables(v);
  12.148 +    if ( v->arch.shadow2.mode )
  12.149 +        v->arch.shadow2.mode->detach_old_tables(v);
  12.150  }
  12.151  
  12.152  static inline mfn_t
  12.153  shadow2_make_monitor_table(struct vcpu *v)
  12.154  {
  12.155 -    return v->arch.shadow2->make_monitor_table(v);
  12.156 +    return v->arch.shadow2.mode->make_monitor_table(v);
  12.157  }
  12.158  
  12.159  static inline void
  12.160  shadow2_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
  12.161  {
  12.162 -    v->arch.shadow2->destroy_monitor_table(v, mmfn);
  12.163 +    v->arch.shadow2.mode->destroy_monitor_table(v, mmfn);
  12.164  }
  12.165  
  12.166  /* Validate a pagetable change from the guest and update the shadows. */
  12.167 @@ -526,7 +527,7 @@ unsigned int shadow2_set_allocation(stru
  12.168  /* Return the size of the shadow2 pool, rounded up to the nearest MB */
  12.169  static inline unsigned int shadow2_get_allocation(struct domain *d)
  12.170  {
  12.171 -    unsigned int pg = d->arch.shadow2_total_pages;
  12.172 +    unsigned int pg = d->arch.shadow2.total_pages;
  12.173      return ((pg >> (20 - PAGE_SHIFT))
  12.174              + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
  12.175  }
    13.1 --- a/xen/include/xen/sched.h	Sat Aug 19 17:07:54 2006 +0100
    13.2 +++ b/xen/include/xen/sched.h	Sun Aug 20 17:55:33 2006 +0100
    13.3 @@ -379,9 +379,6 @@ extern struct domain *domain_list;
    13.4  /* VCPU is blocked awaiting an event to be consumed by Xen. */
    13.5  #define _VCPUF_blocked_in_xen  12
    13.6  #define VCPUF_blocked_in_xen   (1UL<<_VCPUF_blocked_in_xen)
    13.7 - /* HVM vcpu thinks CR0.PG == 0 */
    13.8 -#define _VCPUF_shadow2_translate 13
    13.9 -#define VCPUF_shadow2_translate  (1UL<<_VCPUF_shadow2_translate)
   13.10  
   13.11  /*
   13.12   * Per-domain flags (domain_flags).