ia64/xen-unstable

changeset 3826:3f4b97105a33

bitkeeper revision 1.1199 (42100c75YrRV-rqA2PA8zYLZf22hrw)

Further shadow_mode cleanups in preparation for the new
implementation of translate mode.

Signed-off-by: ian@xensource.com
author iap10@freefall.cl.cam.ac.uk
date Mon Feb 14 02:27:01 2005 +0000 (2005-02-14)
parents 758a7900615a
children 33647e6b3f4e
files xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/x86_32/domain_build.c xen/include/asm-x86/shadow.h xen/include/public/dom0_ops.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Sun Feb 13 22:00:50 2005 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Mon Feb 14 02:27:01 2005 +0000
     1.3 @@ -404,10 +404,15 @@ static int vmx_final_setup_guest(struct 
     1.4  
     1.5          /* Put the domain in shadow mode even though we're going to be using
     1.6           * the shared 1:1 page table initially. It shouldn't hurt */
     1.7 -        shadow_mode_enable(ed->domain, SHM_full_32);
     1.8 +        shadow_mode_enable(ed->domain, SHM_enable|SHM_translate|SHM_external);
     1.9      }
    1.10  
    1.11 -    update_pagetables(ed);     /* this assigns shadow_pagetable */
    1.12 +    /* We don't call update_pagetables() as we actively want fields such as 
    1.13 +     * the linear_pg_table to be null so that we bail out early of 
    1.14 +     * shadow_fault in case the vmx guest tries illegal accesses with
    1.15 +     * paging turned of. 
    1.16 +     */
    1.17 +    //update_pagetables(ed);     /* this assigns shadow_pagetable */
    1.18      alloc_monitor_pagetable(ed); /* this assigns monitor_pagetable */
    1.19  
    1.20      return 0;
    1.21 @@ -502,11 +507,7 @@ int arch_final_setup_guest(
    1.22          return vmx_final_setup_guest(d, c);
    1.23  #endif
    1.24  
    1.25 -    /* We don't call update_pagetables() as we actively want fields such as 
    1.26 -     * the linear_pg_table to be null so that we bail out early of 
    1.27 -     * shadow_fault in case the vmx guest tries illegal accesses with
    1.28 -     * paging turned of. 
    1.29 -     */
    1.30 +    update_pagetables(d);
    1.31  
    1.32      return 0;
    1.33  }
     2.1 --- a/xen/arch/x86/mm.c	Sun Feb 13 22:00:50 2005 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Mon Feb 14 02:27:01 2005 +0000
     2.3 @@ -1052,7 +1052,7 @@ void free_page_type(struct pfn_info *pag
     2.4          BUG();
     2.5      }
     2.6  
     2.7 -    if ( unlikely(shadow_mode(d)) && 
     2.8 +    if ( unlikely(shadow_mode_enabled(d)) && 
     2.9           (get_shadow_status(d, page_to_pfn(page)) & PSH_shadowed) )
    2.10      {
    2.11          unshadow_table(page_to_pfn(page), type);
    2.12 @@ -1653,9 +1653,12 @@ int do_mmu_update(
    2.13  
    2.14      cleanup_writable_pagetable(d);
    2.15  
    2.16 -    if ( unlikely(shadow_mode(d)) )
    2.17 +    if ( unlikely(shadow_mode_enabled(d)) )
    2.18          check_pagetable(d, ed->arch.guest_table, "pre-mmu"); /* debug */
    2.19  
    2.20 +    if ( unlikely(shadow_mode_translate(d) ) )
    2.21 +        domain_crash();
    2.22 +
    2.23      /*
    2.24       * If we are resuming after preemption, read how much work we have already
    2.25       * done. This allows us to set the @done output parameter correctly.
    2.26 @@ -1750,7 +1753,7 @@ int do_mmu_update(
    2.27                      okay = mod_l1_entry((l1_pgentry_t *)va, 
    2.28                                          mk_l1_pgentry(req.val)); 
    2.29  
    2.30 -                    if ( unlikely(shadow_mode(d)) && okay &&
    2.31 +                    if ( unlikely(shadow_mode_enabled(d)) && okay &&
    2.32                           (get_shadow_status(d, page-frame_table) &
    2.33                            PSH_shadowed) )
    2.34                      {
    2.35 @@ -1769,7 +1772,7 @@ int do_mmu_update(
    2.36                                          mk_l2_pgentry(req.val),
    2.37                                          pfn); 
    2.38  
    2.39 -                    if ( unlikely(shadow_mode(d)) && okay &&
    2.40 +                    if ( unlikely(shadow_mode_enabled(d)) && okay &&
    2.41                           (get_shadow_status(d, page-frame_table) & 
    2.42                            PSH_shadowed) )
    2.43                      {
    2.44 @@ -1788,7 +1791,7 @@ int do_mmu_update(
    2.45                                          mk_l3_pgentry(req.val),
    2.46                                          pfn); 
    2.47  
    2.48 -                    if ( unlikely(shadow_mode(d)) && okay &&
    2.49 +                    if ( unlikely(shadow_mode_enabled(d)) && okay &&
    2.50                           (get_shadow_status(d, page-frame_table) & 
    2.51                            PSH_shadowed) )
    2.52                      {
    2.53 @@ -1806,7 +1809,7 @@ int do_mmu_update(
    2.54                                          mk_l4_pgentry(req.val),
    2.55                                          pfn); 
    2.56  
    2.57 -                    if ( unlikely(shadow_mode(d)) && okay &&
    2.58 +                    if ( unlikely(shadow_mode_enabled(d)) && okay &&
    2.59                           (get_shadow_status(d, page-frame_table) & 
    2.60                            PSH_shadowed) )
    2.61                      {
    2.62 @@ -1845,7 +1848,7 @@ int do_mmu_update(
    2.63               * If in log-dirty mode, mark the corresponding pseudo-physical
    2.64               * page as dirty.
    2.65               */
    2.66 -            if ( unlikely(shadow_mode(d) == SHM_logdirty) && 
    2.67 +            if ( unlikely(shadow_mode_log_dirty(d)) && 
    2.68                   mark_dirty(d, pfn) )
    2.69                  d->arch.shadow_dirty_block_count++;
    2.70  
    2.71 @@ -1901,7 +1904,7 @@ int do_mmu_update(
    2.72      if ( unlikely(pdone != NULL) )
    2.73          __put_user(done + i, pdone);
    2.74  
    2.75 -    if ( unlikely(shadow_mode(d)) )
    2.76 +    if ( unlikely(shadow_mode_enabled(d)) )
    2.77          check_pagetable(d, ed->arch.guest_table, "post-mmu"); /* debug */
    2.78  
    2.79      UNLOCK_BIGLOCK(d);
    2.80 @@ -1924,6 +1927,9 @@ int do_update_va_mapping(unsigned long v
    2.81      if ( unlikely(!__addr_ok(va)) )
    2.82          return -EINVAL;
    2.83  
    2.84 +    if ( unlikely(shadow_mode_translate(d) ) )
    2.85 +        domain_crash();
    2.86 +
    2.87      LOCK_BIGLOCK(d);
    2.88  
    2.89      cleanup_writable_pagetable(d);
    2.90 @@ -1937,7 +1943,7 @@ int do_update_va_mapping(unsigned long v
    2.91                                  mk_l1_pgentry(val))) )
    2.92          err = -EINVAL;
    2.93  
    2.94 -    if ( unlikely(shadow_mode(d)) )
    2.95 +    if ( unlikely(shadow_mode_enabled(d)) )
    2.96      {
    2.97          unsigned long sval = 0;
    2.98  
    2.99 @@ -1974,7 +1980,7 @@ int do_update_va_mapping(unsigned long v
   2.100           * the PTE in the PT-holding page. We need the machine frame number
   2.101           * for this.
   2.102           */
   2.103 -        if ( shadow_mode(d) == SHM_logdirty )
   2.104 +        if ( shadow_mode_log_dirty(d) )
   2.105              mark_dirty(d, va_to_l1mfn(va));
   2.106    
   2.107          check_pagetable(d, ed->arch.guest_table, "va"); /* debug */
   2.108 @@ -2247,7 +2253,7 @@ void ptwr_flush(const int which)
   2.109                  PTWR_PRINT_WHICH, ptep, pte);
   2.110      pte &= ~_PAGE_RW;
   2.111  
   2.112 -    if ( unlikely(shadow_mode(d)) )
   2.113 +    if ( unlikely(shadow_mode_enabled(d)) )
   2.114      {
   2.115          /* Write-protect the p.t. page in the shadow page table. */
   2.116          l1pte_propagate_from_guest(d, &pte, &spte);
   2.117 @@ -2339,7 +2345,7 @@ void ptwr_flush(const int which)
   2.118       * STEP 3. Reattach the L1 p.t. page into the current address space.
   2.119       */
   2.120  
   2.121 -    if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode(d)) )
   2.122 +    if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode_enabled(d)) )
   2.123      {
   2.124          pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
   2.125          *pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); 
   2.126 @@ -2448,7 +2454,7 @@ int ptwr_do_page_fault(unsigned long add
   2.127      
   2.128      /* For safety, disconnect the L1 p.t. page from current space. */
   2.129      if ( (which == PTWR_PT_ACTIVE) && 
   2.130 -         likely(!shadow_mode(current->domain)) )
   2.131 +         likely(!shadow_mode_enabled(current->domain)) )
   2.132      {
   2.133          *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
   2.134  #if 1
     3.1 --- a/xen/arch/x86/shadow.c	Sun Feb 13 22:00:50 2005 +0000
     3.2 +++ b/xen/arch/x86/shadow.c	Mon Feb 14 02:27:01 2005 +0000
     3.3 @@ -57,6 +57,8 @@ void free_shadow_state(struct domain *d)
     3.4  
     3.5      shadow_audit(d, 1);
     3.6  
     3.7 +    if( !d->arch.shadow_ht ) return;
     3.8 +
     3.9      /* Free each hash chain in turn. */
    3.10      for ( i = 0; i < shadow_ht_buckets; i++ )
    3.11      {
    3.12 @@ -114,7 +116,7 @@ static inline int clear_shadow_page(
    3.13          /* We clear L2 pages by zeroing the guest entries. */
    3.14      case PGT_l2_page_table:
    3.15          p = map_domain_mem((spage - frame_table) << PAGE_SHIFT);
    3.16 -        if ( shadow_mode(d) == SHM_full_32 )
    3.17 +        if ( shadow_mode_external(d) )
    3.18              memset(p, 0, L2_PAGETABLE_ENTRIES * sizeof(*p));
    3.19          else 
    3.20              memset(p, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
    3.21 @@ -169,6 +171,8 @@ void shadow_mode_init(void)
    3.22  
    3.23  int __shadow_mode_enable(struct domain *d, unsigned int mode)
    3.24  {
    3.25 +    d->arch.shadow_mode = mode;
    3.26 +
    3.27      if (!d->arch.shadow_ht)
    3.28      {
    3.29          d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
    3.30 @@ -179,7 +183,7 @@ int __shadow_mode_enable(struct domain *
    3.31             shadow_ht_buckets * sizeof(struct shadow_status));
    3.32      }
    3.33  
    3.34 -    if ( mode == SHM_logdirty && !d->arch.shadow_dirty_bitmap)
    3.35 +    if ( shadow_mode_log_dirty(d) && !d->arch.shadow_dirty_bitmap)
    3.36      {
    3.37          d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63;
    3.38          d->arch.shadow_dirty_bitmap = 
    3.39 @@ -194,8 +198,6 @@ int __shadow_mode_enable(struct domain *
    3.40                 d->arch.shadow_dirty_bitmap_size/8);
    3.41      }
    3.42  
    3.43 -    d->arch.shadow_mode = mode;
    3.44 -
    3.45      return 0;
    3.46  
    3.47   nomem:
    3.48 @@ -389,17 +391,17 @@ int shadow_mode_control(struct domain *d
    3.49          break;
    3.50  
    3.51      case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
    3.52 -        shadow_mode_disable(d);
    3.53 -        rc = __shadow_mode_enable(d, SHM_test);
    3.54 +        free_shadow_state(d);
    3.55 +        rc = __shadow_mode_enable(d, SHM_enable);
    3.56          break;
    3.57  
    3.58      case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
    3.59 -        shadow_mode_disable(d);
    3.60 -        rc = __shadow_mode_enable(d, SHM_logdirty);
    3.61 +        free_shadow_state(d);
    3.62 +        rc = __shadow_mode_enable(d, d->arch.shadow_mode|SHM_log_dirty);
    3.63          break;
    3.64  
    3.65      default:
    3.66 -        rc = shadow_mode(d) ? shadow_mode_table_op(d, sc) : -EINVAL;
    3.67 +        rc = shadow_mode_enabled(d) ? shadow_mode_table_op(d, sc) : -EINVAL;
    3.68          break;
    3.69      }
    3.70  
    3.71 @@ -488,7 +490,7 @@ unsigned long shadow_l2_table(
    3.72   
    3.73  #ifdef __i386__
    3.74      /* Install hypervisor and 2x linear p.t. mapings. */
    3.75 -    if ( shadow_mode(d) == SHM_full_32 )
    3.76 +    if ( shadow_mode_translate(d) )
    3.77      {
    3.78  #ifdef CONFIG_VMX
    3.79          vmx_update_shadow_state(d->exec_domain[0], gpfn, spfn);
    3.80 @@ -519,12 +521,11 @@ unsigned long shadow_l2_table(
    3.81              mk_l2_pgentry(__pa(page_get_owner(
    3.82                  &frame_table[gpfn])->arch.mm_perdomain_pt) |
    3.83                            __PAGE_HYPERVISOR);
    3.84 +
    3.85 +        unmap_domain_mem(spl2e);
    3.86      }
    3.87  #endif
    3.88  
    3.89 -    if ( shadow_mode(d) != SHM_full_32 ) 
    3.90 -        unmap_domain_mem(spl2e);
    3.91 -
    3.92      SH_VLOG("shadow_l2_table( %p -> %p)", gpfn, spfn);
    3.93      return spfn;
    3.94  }
    3.95 @@ -954,7 +955,7 @@ int check_l2_table(
    3.96                                      L2_PAGETABLE_SHIFT]),
    3.97                 (smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    3.98  
    3.99 -    if ( shadow_mode(d) != SHM_full_32 ) {
   3.100 +    if ( !shadow_mode_translate(d) ) {
   3.101          if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
   3.102                ((v2m(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt) |
   3.103                  __PAGE_HYPERVISOR))) )
     4.1 --- a/xen/arch/x86/x86_32/domain_build.c	Sun Feb 13 22:00:50 2005 +0000
     4.2 +++ b/xen/arch/x86/x86_32/domain_build.c	Mon Feb 14 02:27:01 2005 +0000
     4.3 @@ -384,7 +384,7 @@ int construct_dom0(struct domain *d,
     4.4  #ifndef NDEBUG
     4.5      if (0) /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */
     4.6      {
     4.7 -        shadow_mode_enable(d, SHM_test); 
     4.8 +        shadow_mode_enable(d, SHM_enable); 
     4.9          update_pagetables(ed); /* XXX SMP */
    4.10      }
    4.11  #endif
     5.1 --- a/xen/include/asm-x86/shadow.h	Sun Feb 13 22:00:50 2005 +0000
     5.2 +++ b/xen/include/asm-x86/shadow.h	Mon Feb 14 02:27:01 2005 +0000
     5.3 @@ -17,17 +17,21 @@
     5.4  #define PSH_pfn_mask    ((1<<21)-1)
     5.5  
     5.6  /* Shadow PT operation mode : shadow-mode variable in arch_domain. */
     5.7 -#define SHM_test        (1) /* just run domain on shadow PTs */
     5.8 -#define SHM_logdirty    (2) /* log pages that are dirtied */
     5.9 -#define SHM_translate   (3) /* lookup machine pages in translation table */
    5.10 -#define SHM_cow         (4) /* copy on write all dirtied pages */
    5.11 -#define SHM_full_32     (8) /* full virtualization for 32-bit */
    5.12 +
    5.13 +#define SHM_enable    (1<<0) /* we're in one of the shadow modes */
    5.14 +#define SHM_log_dirty (1<<1) /* enable log dirty mode */
    5.15 +#define SHM_translate (1<<2) /* do p2m tranaltion on guest tables */
    5.16 +#define SHM_external  (1<<3) /* external page table, not used by Xen */
    5.17 +
    5.18 +#define shadow_mode_enabled(_d)   ((_d)->arch.shadow_mode)
    5.19 +#define shadow_mode_log_dirty(_d) ((_d)->arch.shadow_mode & SHM_log_dirty)
    5.20 +#define shadow_mode_translate(_d) ((_d)->arch.shadow_mode & SHM_translate)
    5.21 +#define shadow_mode_external(_d)  ((_d)->arch.shadow_mode & SHM_external)
    5.22  
    5.23  #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
    5.24  #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
    5.25       (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
    5.26  
    5.27 -#define shadow_mode(_d)      ((_d)->arch.shadow_mode)
    5.28  #define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock)
    5.29  #define shadow_lock(_d)      spin_lock(&(_d)->arch.shadow_lock)
    5.30  #define shadow_unlock(_d)    spin_unlock(&(_d)->arch.shadow_lock)
    5.31 @@ -49,19 +53,19 @@ extern void vmx_shadow_invlpg(struct dom
    5.32  #endif
    5.33  
    5.34  #define __mfn_to_gpfn(_d, mfn)                         \
    5.35 -    ( (shadow_mode(_d) == SHM_full_32)                 \
    5.36 +    ( (shadow_mode_translate(_d))                      \
    5.37        ? machine_to_phys_mapping[(mfn)]                 \
    5.38        : (mfn) )
    5.39  
    5.40  #define __gpfn_to_mfn(_d, gpfn)                        \
    5.41 -    ( (shadow_mode(_d) == SHM_full_32)                 \
    5.42 +    ( (shadow_mode_translate(_d))                      \
    5.43        ? phys_to_machine_mapping(gpfn)                  \
    5.44        : (gpfn) )
    5.45  
    5.46  extern void __shadow_mode_disable(struct domain *d);
    5.47  static inline void shadow_mode_disable(struct domain *d)
    5.48  {
    5.49 -    if ( shadow_mode(d) )
    5.50 +    if ( shadow_mode_enabled(d) )
    5.51          __shadow_mode_disable(d);
    5.52  }
    5.53  
    5.54 @@ -69,7 +73,7 @@ extern unsigned long shadow_l2_table(
    5.55      struct domain *d, unsigned long gpfn);
    5.56    
    5.57  static inline void shadow_invalidate(struct exec_domain *ed) {
    5.58 -    if ( shadow_mode(ed->domain) != SHM_full_32 )
    5.59 +    if ( !shadow_mode_translate(ed->domain))
    5.60          BUG();
    5.61      memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
    5.62  }
    5.63 @@ -119,33 +123,40 @@ struct shadow_status {
    5.64  static inline void __shadow_get_l2e(
    5.65      struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
    5.66  {
    5.67 -    if ( shadow_mode(ed->domain) == SHM_full_32 ) {
    5.68 -        *sl2e = l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]);
    5.69 +    if ( likely(shadow_mode_enabled(ed->domain)) ) {
    5.70 +        if ( shadow_mode_translate(ed->domain) )
    5.71 +            *sl2e = l2_pgentry_val(
    5.72 +                ed->arch.shadow_vtable[l2_table_offset(va)]);       
    5.73 +        else 
    5.74 +            *sl2e = l2_pgentry_val(
    5.75 +                shadow_linear_l2_table[l2_table_offset(va)]);
    5.76      }
    5.77 -    else if ( shadow_mode(ed->domain) ) {
    5.78 -        *sl2e = l2_pgentry_val(shadow_linear_l2_table[l2_table_offset(va)]);
    5.79 +    else {
    5.80 +        BUG(); /* why do we need this case? */
    5.81 +        *sl2e = l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
    5.82      }
    5.83 -    else
    5.84 -        *sl2e = l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
    5.85  }
    5.86  
    5.87  static inline void __shadow_set_l2e(
    5.88      struct exec_domain *ed, unsigned long va, unsigned long value)
    5.89  {
    5.90 -    if ( shadow_mode(ed->domain) == SHM_full_32 ) {
    5.91 -        ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
    5.92 -    }
    5.93 -    else if ( shadow_mode(ed->domain) ) {
    5.94 -        shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
    5.95 +    if ( likely(shadow_mode_enabled(ed->domain)) ) {
    5.96 +        if ( shadow_mode_translate(ed->domain) ) 
    5.97 +            ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
    5.98 +        else 
    5.99 +            shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
   5.100      }
   5.101      else
   5.102 +    {
   5.103 +        BUG(); /* why do we need this case? */
   5.104          linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
   5.105 +    }
   5.106  }
   5.107  
   5.108  static inline void __guest_get_l2e(
   5.109      struct exec_domain *ed, unsigned long va, unsigned long *l2e)
   5.110  {
   5.111 -    *l2e = ( shadow_mode(ed->domain) == SHM_full_32) ?
   5.112 +    *l2e = ( shadow_mode_translate(ed->domain) ) ?
   5.113          l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) :
   5.114          l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
   5.115  }
   5.116 @@ -153,7 +164,7 @@ static inline void __guest_get_l2e(
   5.117  static inline void __guest_set_l2e(
   5.118      struct exec_domain *ed, unsigned long va, unsigned long value)
   5.119  {
   5.120 -    if ( shadow_mode(ed->domain) == SHM_full_32 )
   5.121 +    if ( shadow_mode_translate(ed->domain) )
   5.122      {
   5.123          unsigned long pfn;
   5.124  
   5.125 @@ -237,7 +248,7 @@ static inline void l1pte_write_fault(
   5.126      ASSERT(gpte & _PAGE_RW);
   5.127      gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
   5.128  
   5.129 -    if ( shadow_mode(d) == SHM_logdirty )
   5.130 +    if ( shadow_mode_log_dirty(d) )
   5.131          __mark_dirty(d, pfn);
   5.132  
   5.133      spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
   5.134 @@ -258,7 +269,7 @@ static inline void l1pte_read_fault(
   5.135      gpte |= _PAGE_ACCESSED;
   5.136      spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
   5.137  
   5.138 -    if ( (shadow_mode(d) == SHM_logdirty) || ! (gpte & _PAGE_DIRTY) )
   5.139 +    if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) )
   5.140          spte &= ~_PAGE_RW;
   5.141  
   5.142      SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
   5.143 @@ -271,47 +282,28 @@ static inline void l1pte_propagate_from_
   5.144  { 
   5.145      unsigned long gpte = *gpte_p;
   5.146      unsigned long spte = *spte_p;
   5.147 -    unsigned long host_pfn, host_gpte;
   5.148 +    unsigned long pfn = gpte >> PAGE_SHIFT;
   5.149 +    unsigned long mfn = __gpfn_to_mfn(d, pfn);
   5.150 +
   5.151  #if SHADOW_VERBOSE_DEBUG
   5.152      unsigned long old_spte = spte;
   5.153  #endif
   5.154  
   5.155 -    switch ( shadow_mode(d) )
   5.156 -    {
   5.157 -    case SHM_test:
   5.158 -        spte = 0;
   5.159 -        if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
   5.160 -             (_PAGE_PRESENT|_PAGE_ACCESSED) )
   5.161 -            spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
   5.162 -        break;
   5.163 -
   5.164 -    case SHM_logdirty:
   5.165 -        spte = 0;
   5.166 -        if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
   5.167 -             (_PAGE_PRESENT|_PAGE_ACCESSED) )
   5.168 -            spte = gpte & ~_PAGE_RW;
   5.169 -        break;
   5.170 -
   5.171 -    case SHM_full_32:
   5.172 -        spte = 0;
   5.173 -
   5.174 -        if ( mmio_space(gpte & 0xFFFFF000) )
   5.175 -        {
   5.176 -            *spte_p = spte;
   5.177 -            return;
   5.178 -        }
   5.179 +    if ( shadow_mode_external(d) && mmio_space(gpte & 0xFFFFF000) ) {
   5.180 +        *spte_p = 0;
   5.181 +        return;
   5.182 +    }
   5.183 +    
   5.184 +    spte = 0;
   5.185 +    if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
   5.186 +         (_PAGE_PRESENT|_PAGE_ACCESSED) ) {
   5.187          
   5.188 -        host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
   5.189 -        host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
   5.190 -
   5.191 -        if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
   5.192 -             (_PAGE_PRESENT|_PAGE_ACCESSED) )
   5.193 -            spte = (host_gpte & _PAGE_DIRTY) ? 
   5.194 -                host_gpte : (host_gpte & ~_PAGE_RW);
   5.195 -
   5.196 -        break;
   5.197 +        spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
   5.198 +        
   5.199 +        if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) )
   5.200 +            spte &= ~_PAGE_RW;
   5.201      }
   5.202 -
   5.203 +        
   5.204  #if SHADOW_VERBOSE_DEBUG
   5.205      if ( old_spte || spte || gpte )
   5.206          SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p, new spte=0x%p ", gpte, old_spte, spte);
   5.207 @@ -321,6 +313,8 @@ static inline void l1pte_propagate_from_
   5.208      *spte_p = spte;
   5.209  }
   5.210  
   5.211 +
   5.212 +
   5.213  static inline void l2pde_general(
   5.214      struct domain *d,
   5.215      unsigned long *gpde_p,
   5.216 @@ -342,7 +336,7 @@ static inline void l2pde_general(
   5.217          if ( (frame_table[sl1mfn].u.inuse.type_info & PGT_type_mask) ==
   5.218               PGT_l2_page_table ) 
   5.219          {
   5.220 -            if ( shadow_mode(d) != SHM_full_32 )
   5.221 +            if ( !shadow_mode_translate(d) )
   5.222                  spde = gpde & ~_PAGE_RW;
   5.223  
   5.224          }
   5.225 @@ -474,7 +468,7 @@ static inline unsigned long get_shadow_s
   5.226  {
   5.227      unsigned long res;
   5.228  
   5.229 -    ASSERT(shadow_mode(d));
   5.230 +    ASSERT(shadow_mode_enabled(d));
   5.231  
   5.232      /*
   5.233       * If we get here we know that some sort of update has happened to the
   5.234 @@ -482,11 +476,13 @@ static inline unsigned long get_shadow_s
   5.235       * has changed type. If we're in log dirty mode, we should set the
   5.236       * appropriate bit in the dirty bitmap.
   5.237       * N.B. The VA update path doesn't use this and is handled independently. 
   5.238 +
   5.239 +     XXX need to think this through for vmx guests, but probably OK
   5.240       */
   5.241  
   5.242      shadow_lock(d);
   5.243  
   5.244 -    if ( shadow_mode(d) == SHM_logdirty )
   5.245 +    if ( shadow_mode_log_dirty(d) )
   5.246          __mark_dirty(d, gpfn);
   5.247  
   5.248      if ( !(res = __shadow_status(d, gpfn)) )
   5.249 @@ -744,7 +740,7 @@ static inline void __update_pagetables(s
   5.250          smfn = shadow_l2_table(d, gpfn);
   5.251  #ifdef CONFIG_VMX
   5.252      else
   5.253 -        if (d->arch.shadow_mode == SHM_full_32)
   5.254 +        if (shadow_mode_translate(ed->domain) )
   5.255          {
   5.256              vmx_update_shadow_state(ed, gpfn, smfn);
   5.257          }
   5.258 @@ -752,13 +748,13 @@ static inline void __update_pagetables(s
   5.259  
   5.260      ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
   5.261  
   5.262 -    if (d->arch.shadow_mode != SHM_full_32)
   5.263 +    if ( !shadow_mode_external(ed->domain) )
   5.264          ed->arch.monitor_table = ed->arch.shadow_table;
   5.265  }
   5.266  
   5.267  static inline void update_pagetables(struct exec_domain *ed)
   5.268  {
   5.269 -     if ( unlikely(shadow_mode(ed->domain)) )
   5.270 +     if ( unlikely(shadow_mode_enabled(ed->domain)) )
   5.271       {
   5.272           SH_VVLOG("update_pagetables( gptbase=%p, mode=%d )",
   5.273               pagetable_val(ed->arch.guest_table),
     6.1 --- a/xen/include/public/dom0_ops.h	Sun Feb 13 22:00:50 2005 +0000
     6.2 +++ b/xen/include/public/dom0_ops.h	Mon Feb 14 02:27:01 2005 +0000
     6.3 @@ -267,7 +267,7 @@ typedef struct {
     6.4  #define DOM0_SHADOW_CONTROL_OP_OFF         0
     6.5  #define DOM0_SHADOW_CONTROL_OP_ENABLE_TEST 1
     6.6  #define DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY 2
     6.7 -#define DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE 3
     6.8 +
     6.9  #define DOM0_SHADOW_CONTROL_OP_FLUSH       10     /* table ops */
    6.10  #define DOM0_SHADOW_CONTROL_OP_CLEAN       11
    6.11  #define DOM0_SHADOW_CONTROL_OP_PEEK        12