ia64/xen-unstable

changeset 3823:b3331d66a0bf

bitkeeper revision 1.1194.1.1 (420fb929A6i2BgwaqAFiDYqZMrpIQg)

Re-organise guest_table, shadow_table and monitor_table so that
they always have the meaning their names suggest i.e. in the
hypervisor CR3 always contains monitor_table.

After updating guest_table or any of the shadow state remeber
to call update_pagetables(ed)

One side-effect of this change is that VMX guests now start off with
shadow_mode set to full_32, but actually running on the 1:1 physmap.
We don't actually call update_pagetables() until the VCPU enables
paging, hence ensuring that linear_pg_table is NULL so we bail out
early in shadow_fault if a vmx guest tries to access outside its
memory map. We'll need this for SMP VMX guests so that each VCPU can
enable paging independently. We might need to think further about
this for guests that do IO without paging on - possibly having a
generated pseudo phys pt that the full shadow mode can translate and
shadow.

Signed-off-by: ian@xensource.com
author iap10@freefall.cl.cam.ac.uk
date Sun Feb 13 20:31:37 2005 +0000 (2005-02-13)
parents c695b365394d
children f4d1946b8db2
files xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/vmx.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/domain_build.c xen/arch/x86/x86_64/domain_build.c xen/common/domain.c xen/include/asm-x86/domain.h xen/include/asm-x86/shadow.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Sat Feb 12 03:11:43 2005 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Sun Feb 13 20:31:37 2005 +0000
     1.3 @@ -259,6 +259,8 @@ void arch_do_createdomain(struct exec_do
     1.4          d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = 
     1.5              mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
     1.6  #endif
     1.7 +
     1.8 +        shadow_lock_init(d);        
     1.9      }
    1.10  }
    1.11  
    1.12 @@ -290,13 +292,15 @@ void arch_vmx_do_launch(struct exec_doma
    1.13      reset_stack_and_jump(vmx_asm_do_launch);
    1.14  }
    1.15  
    1.16 -static void monitor_mk_pagetable(struct exec_domain *ed)
    1.17 +static void alloc_monitor_pagetable(struct exec_domain *ed)
    1.18  {
    1.19      unsigned long mpfn;
    1.20      l2_pgentry_t *mpl2e, *phys_table;
    1.21      struct pfn_info *mpfn_info;
    1.22      struct domain *d = ed->domain;
    1.23  
    1.24 +    ASSERT(!ed->arch.monitor_table); /* we should only get called once */
    1.25 +
    1.26      mpfn_info = alloc_domheap_page(NULL);
    1.27      ASSERT( mpfn_info ); 
    1.28  
    1.29 @@ -309,7 +313,6 @@ static void monitor_mk_pagetable(struct 
    1.30             HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
    1.31  
    1.32      ed->arch.monitor_table = mk_pagetable(mpfn << PAGE_SHIFT);
    1.33 -    d->arch.shadow_mode = SHM_full_32;
    1.34  
    1.35      mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
    1.36          mk_l2_pgentry((__pa(d->arch.mm_perdomain_pt) & PAGE_MASK) 
    1.37 @@ -327,7 +330,7 @@ static void monitor_mk_pagetable(struct 
    1.38  /*
    1.39   * Free the pages for monitor_table and guest_pl2e_cache
    1.40   */
    1.41 -static void monitor_rm_pagetable(struct exec_domain *ed)
    1.42 +static void free_monitor_pagetable(struct exec_domain *ed)
    1.43  {
    1.44      l2_pgentry_t *mpl2e;
    1.45      unsigned long mpfn;
    1.46 @@ -382,7 +385,6 @@ static int vmx_final_setup_guest(struct 
    1.47          goto out;
    1.48      }
    1.49  
    1.50 -    monitor_mk_pagetable(ed);
    1.51      ed->arch.schedule_tail = arch_vmx_do_launch;
    1.52      clear_bit(VMX_CPU_STATE_PG_ENABLED, &ed->arch.arch_vmx.cpu_state);
    1.53  
    1.54 @@ -394,12 +396,20 @@ static int vmx_final_setup_guest(struct 
    1.55      if (ed == ed->domain->exec_domain[0]) {
    1.56          /* 
    1.57           * Required to do this once per domain
    1.58 +         * XXX todo: add a seperate function to do these.
    1.59           */
    1.60          memset(&ed->domain->shared_info->evtchn_mask[0], 0xff, 
    1.61                 sizeof(ed->domain->shared_info->evtchn_mask));
    1.62          clear_bit(IOPACKET_PORT, &ed->domain->shared_info->evtchn_mask[0]);
    1.63 +
    1.64 +        /* Put the domain in shadow mode even though we're going to be using
    1.65 +         * the shared 1:1 page table initially. It shouldn't hurt */
    1.66 +        shadow_mode_enable(ed->domain, SHM_full_32);
    1.67      }
    1.68  
    1.69 +    update_pagetables(ed);     /* this assigns shadow_pagetable */
    1.70 +    alloc_monitor_pagetable(ed); /* this assigns monitor_pagetable */
    1.71 +
    1.72      return 0;
    1.73  
    1.74  out:
    1.75 @@ -409,6 +419,8 @@ out:
    1.76  }
    1.77  #endif
    1.78  
    1.79 +
    1.80 +/* This is called by arch_final_setup_guest and do_boot_vcpu */
    1.81  int arch_final_setup_guest(
    1.82      struct exec_domain *d, full_execution_context_t *c)
    1.83  {
    1.84 @@ -467,8 +479,8 @@ int arch_final_setup_guest(
    1.85      d->arch.failsafe_address  = c->failsafe_callback_eip;
    1.86      
    1.87      phys_basetab = c->pt_base;
    1.88 -    d->arch.guest_table = mk_pagetable(phys_basetab);
    1.89 -    d->arch.phys_table = d->arch.guest_table;
    1.90 +    d->arch.guest_table = d->arch.phys_table = mk_pagetable(phys_basetab);
    1.91 +
    1.92      if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d->domain, 
    1.93                              PGT_base_page_table) )
    1.94          return -EINVAL;
    1.95 @@ -490,6 +502,9 @@ int arch_final_setup_guest(
    1.96          return vmx_final_setup_guest(d, c);
    1.97  #endif
    1.98  
    1.99 +    update_pagetables(d);  /* this assigns shadow_pagetable 
   1.100 +                                and monitor_table */
   1.101 +
   1.102      return 0;
   1.103  }
   1.104  
   1.105 @@ -639,6 +654,7 @@ static void switch_segments(
   1.106          {
   1.107              n->arch.flags |= TF_kernel_mode;
   1.108              __asm__ __volatile__ ( "swapgs" );
   1.109 +            update_pagetables(ed);
   1.110              write_ptbase(n);
   1.111          }
   1.112  
   1.113 @@ -663,6 +679,7 @@ long do_switch_to_user(void)
   1.114  
   1.115      ed->arch.flags &= ~TF_kernel_mode;
   1.116      __asm__ __volatile__ ( "swapgs" );
   1.117 +    update_pagetables(ed);
   1.118      write_ptbase(ed);
   1.119  
   1.120      regs->rip    = stu.rip;
   1.121 @@ -929,7 +946,7 @@ static void vmx_domain_relinquish_memory
   1.122      free_vmcs(ed->arch.arch_vmx.vmcs);
   1.123      ed->arch.arch_vmx.vmcs = 0;
   1.124      
   1.125 -    monitor_rm_pagetable(ed);
   1.126 +    free_monitor_pagetable(ed);
   1.127      rem_ac_timer(&(vpit->pit_timer));
   1.128  }
   1.129  #endif
     2.1 --- a/xen/arch/x86/mm.c	Sat Feb 12 03:11:43 2005 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Sun Feb 13 20:31:37 2005 +0000
     2.3 @@ -196,28 +196,7 @@ void arch_init_memory(void)
     2.4  
     2.5  void write_ptbase(struct exec_domain *ed)
     2.6  {
     2.7 -    struct domain *d = ed->domain;
     2.8 -    unsigned long pa;
     2.9 -
    2.10 -#ifdef CONFIG_VMX
    2.11 -    if ( unlikely(shadow_mode(d)) )
    2.12 -        pa = ((shadow_mode(d) == SHM_full_32) ?
    2.13 -              pagetable_val(ed->arch.monitor_table) :
    2.14 -              pagetable_val(ed->arch.shadow_table));
    2.15 -    else
    2.16 -        pa = pagetable_val(ed->arch.guest_table);
    2.17 -#else
    2.18 -    if ( unlikely(shadow_mode(d)) )
    2.19 -        pa = pagetable_val(ed->arch.shadow_table);    
    2.20 -#ifdef __x86_64__
    2.21 -    else if ( !(ed->arch.flags & TF_kernel_mode) )
    2.22 -        pa = pagetable_val(ed->arch.guest_table_user);
    2.23 -#endif
    2.24 -    else
    2.25 -        pa = pagetable_val(ed->arch.guest_table);
    2.26 -#endif
    2.27 -
    2.28 -    write_cr3(pa);
    2.29 +    write_cr3(pagetable_val(ed->arch.monitor_table));
    2.30  }
    2.31  
    2.32  static void __invalidate_shadow_ldt(struct exec_domain *d)
    2.33 @@ -1251,8 +1230,7 @@ int new_guest_cr3(unsigned long pfn)
    2.34          percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
    2.35          old_base_pfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
    2.36          ed->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
    2.37 -
    2.38 -        shadow_mk_pagetable(ed);
    2.39 +        update_pagetables(ed); /* update shadow_table and monitor_table */
    2.40  
    2.41          write_ptbase(ed);
    2.42  
     3.1 --- a/xen/arch/x86/shadow.c	Sat Feb 12 03:11:43 2005 +0000
     3.2 +++ b/xen/arch/x86/shadow.c	Sun Feb 13 20:31:37 2005 +0000
     3.3 @@ -45,7 +45,7 @@ static inline void free_shadow_page(
     3.4      free_domheap_page(page);
     3.5  }
     3.6  
     3.7 -static void free_shadow_state(struct domain *d)
     3.8 +void free_shadow_state(struct domain *d)
     3.9  {
    3.10      int                   i, free = 0;
    3.11      struct shadow_status *x, *n;
    3.12 @@ -166,15 +166,20 @@ void shadow_mode_init(void)
    3.13  {
    3.14  }
    3.15  
    3.16 -int shadow_mode_enable(struct domain *d, unsigned int mode)
    3.17 +
    3.18 +int __shadow_mode_enable(struct domain *d, unsigned int mode)
    3.19  {
    3.20 -    d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
    3.21 -    if ( d->arch.shadow_ht == NULL )
    3.22 -        goto nomem;
    3.23 -    memset(d->arch.shadow_ht, 0,
    3.24 +    if (!d->arch.shadow_ht)
    3.25 +    {
    3.26 +        d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
    3.27 +        if ( d->arch.shadow_ht == NULL )
    3.28 +            goto nomem;
    3.29 +
    3.30 +        memset(d->arch.shadow_ht, 0,
    3.31             shadow_ht_buckets * sizeof(struct shadow_status));
    3.32 +    }
    3.33  
    3.34 -    if ( mode == SHM_logdirty )
    3.35 +    if ( mode == SHM_logdirty && !d->arch.shadow_dirty_bitmap)
    3.36      {
    3.37          d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63;
    3.38          d->arch.shadow_dirty_bitmap = 
    3.39 @@ -191,7 +196,6 @@ int shadow_mode_enable(struct domain *d,
    3.40  
    3.41      d->arch.shadow_mode = mode;
    3.42  
    3.43 -    __shadow_mk_pagetable(d->exec_domain[0]); /* XXX SMP */
    3.44      return 0;
    3.45  
    3.46   nomem:
    3.47 @@ -201,6 +205,15 @@ int shadow_mode_enable(struct domain *d,
    3.48      return -ENOMEM;
    3.49  }
    3.50  
    3.51 +int shadow_mode_enable(struct domain *d, unsigned int mode)
    3.52 +{
    3.53 +    int rc;
    3.54 +    shadow_lock(d);
    3.55 +    rc = __shadow_mode_enable(d, mode);
    3.56 +    shadow_unlock(d);
    3.57 +    return rc;
    3.58 +}
    3.59 +
    3.60  void __shadow_mode_disable(struct domain *d)
    3.61  {
    3.62      struct shadow_status *x, *n;
    3.63 @@ -240,6 +253,7 @@ static int shadow_mode_table_op(
    3.64  {
    3.65      unsigned int      op = sc->op;
    3.66      int               i, rc = 0;
    3.67 +    struct exec_domain *ed;
    3.68  
    3.69      ASSERT(spin_is_locked(&d->arch.shadow_lock));
    3.70  
    3.71 @@ -344,7 +358,10 @@ static int shadow_mode_table_op(
    3.72  
    3.73      SH_VLOG("shadow mode table op : page count %d", d->arch.shadow_page_count);
    3.74      shadow_audit(d, 1);
    3.75 -    __shadow_mk_pagetable(d->exec_domain[0]); /* XXX SMP */
    3.76 +
    3.77 +    for_each_exec_domain(d,ed)
    3.78 +        __update_pagetables(ed);
    3.79 +
    3.80      return rc;
    3.81  }
    3.82  
    3.83 @@ -352,6 +369,7 @@ int shadow_mode_control(struct domain *d
    3.84  {
    3.85      unsigned int op = sc->op;
    3.86      int          rc = 0;
    3.87 +    struct exec_domain *ed;
    3.88  
    3.89      if ( unlikely(d == current->domain) )
    3.90      {
    3.91 @@ -372,12 +390,12 @@ int shadow_mode_control(struct domain *d
    3.92  
    3.93      case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
    3.94          shadow_mode_disable(d);
    3.95 -        rc = shadow_mode_enable(d, SHM_test);
    3.96 +        rc = __shadow_mode_enable(d, SHM_test);
    3.97          break;
    3.98  
    3.99      case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
   3.100          shadow_mode_disable(d);
   3.101 -        rc = shadow_mode_enable(d, SHM_logdirty);
   3.102 +        rc = __shadow_mode_enable(d, SHM_logdirty);
   3.103          break;
   3.104  
   3.105      default:
   3.106 @@ -387,6 +405,9 @@ int shadow_mode_control(struct domain *d
   3.107  
   3.108      shadow_unlock(d);
   3.109  
   3.110 +    for_each_exec_domain(d,ed)
   3.111 +        update_pagetables(ed);
   3.112 +
   3.113      domain_unpause(d);
   3.114  
   3.115      return rc;
     4.1 --- a/xen/arch/x86/vmx.c	Sat Feb 12 03:11:43 2005 +0000
     4.2 +++ b/xen/arch/x86/vmx.c	Sun Feb 13 20:31:37 2005 +0000
     4.3 @@ -36,6 +36,7 @@
     4.4  #include <asm/vmx.h>
     4.5  #include <asm/vmx_vmcs.h>
     4.6  #include <asm/vmx_intercept.h>
     4.7 +#include <asm/shadow.h>
     4.8  #include <public/io/ioreq.h>
     4.9  
    4.10  #ifdef CONFIG_VMX
    4.11 @@ -420,24 +421,31 @@ static void mov_to_cr(int gp, int cr, st
    4.12                  domain_crash(); /* need to take a clean path */
    4.13              }
    4.14              old_base_pfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
    4.15 +
    4.16 +            /* We know that none of the previous 1:1 shadow pages are
    4.17 +             * going to be used again, so might as well flush them.
    4.18 +             * XXXX wait until the last VCPU boots before doing the flush !!
    4.19 +             */
    4.20 +            shadow_lock(d->domain);
    4.21 +            free_shadow_state(d->domain); // XXX SMP
    4.22 +            shadow_unlock(d->domain);
    4.23 +
    4.24              /*
    4.25               * Now arch.guest_table points to machine physical.
    4.26               */
    4.27              d->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
    4.28 +            update_pagetables(d);
    4.29  
    4.30              VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx\n", 
    4.31                      (unsigned long) (pfn << PAGE_SHIFT));
    4.32  
    4.33 -            shadow_lock(d->domain);
    4.34 -            shadow_mode_enable(d->domain, SHM_full_32); 
    4.35 -            shadow_unlock(d->domain);
    4.36 -
    4.37              __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
    4.38              /* 
    4.39 -             * mm->shadow_table should hold the next CR3 for shadow
    4.40 +             * arch->shadow_table should hold the next CR3 for shadow
    4.41               */
    4.42              VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, pfn = %lx\n", 
    4.43                      d->arch.arch_vmx.cpu_cr3, pfn);
    4.44 +            /* undo the get_page done in the para virt case */
    4.45              put_page_and_type(&frame_table[old_base_pfn]);
    4.46  
    4.47          }
    4.48 @@ -448,11 +456,11 @@ static void mov_to_cr(int gp, int cr, st
    4.49          unsigned long pfn;
    4.50  
    4.51          /*
    4.52 -         * If paging is not enabled yet, simply copy the valut to CR3.
    4.53 +         * If paging is not enabled yet, simply copy the value to CR3.
    4.54           */
    4.55          if (!test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
    4.56              d->arch.arch_vmx.cpu_cr3 = value;
    4.57 -            return;
    4.58 +            break;
    4.59          }
    4.60          
    4.61          guest_pl2e_cache_invalidate(d);
    4.62 @@ -484,10 +492,10 @@ static void mov_to_cr(int gp, int cr, st
    4.63              }
    4.64              pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
    4.65              vmx_shadow_clear_state(d->domain);
    4.66 -            d->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
    4.67 -            shadow_mk_pagetable(d);
    4.68 +            d->arch.guest_table  = mk_pagetable(pfn << PAGE_SHIFT);
    4.69 +            update_pagetables(d); 
    4.70              /* 
    4.71 -             * mm->shadow_table should hold the next CR3 for shadow
    4.72 +             * arch.shadow_table should now hold the next CR3 for shadow
    4.73               */
    4.74              d->arch.arch_vmx.cpu_cr3 = value;
    4.75              VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx\n", 
     5.1 --- a/xen/arch/x86/vmx_vmcs.c	Sat Feb 12 03:11:43 2005 +0000
     5.2 +++ b/xen/arch/x86/vmx_vmcs.c	Sun Feb 13 20:31:37 2005 +0000
     5.3 @@ -219,7 +219,6 @@ void vmx_do_launch(struct exec_domain *e
     5.4      error |= __vmwrite(GUEST_TR_BASE, 0);
     5.5      error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
     5.6  
     5.7 -    ed->arch.shadow_table = ed->arch.guest_table;
     5.8      __vmwrite(GUEST_CR3, pagetable_val(ed->arch.guest_table));
     5.9      __vmwrite(HOST_CR3, pagetable_val(ed->arch.monitor_table));
    5.10      __vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
     6.1 --- a/xen/arch/x86/x86_32/domain_build.c	Sat Feb 12 03:11:43 2005 +0000
     6.2 +++ b/xen/arch/x86/x86_32/domain_build.c	Sun Feb 13 20:31:37 2005 +0000
     6.3 @@ -307,6 +307,9 @@ int construct_dom0(struct domain *d,
     6.4          d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
     6.5      d->shared_info->n_vcpu = smp_num_cpus;
     6.6  
     6.7 +    /* setup shadow and monitor tables */
     6.8 +    update_pagetables(ed);
     6.9 +
    6.10      /* Install the new page tables. */
    6.11      __cli();
    6.12      write_ptbase(ed);
    6.13 @@ -381,9 +384,8 @@ int construct_dom0(struct domain *d,
    6.14  #ifndef NDEBUG
    6.15      if (0) /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */
    6.16      {
    6.17 -        shadow_lock(d);
    6.18          shadow_mode_enable(d, SHM_test); 
    6.19 -        shadow_unlock(d);
    6.20 +        update_pagetable(ed); /* XXX SMP */
    6.21      }
    6.22  #endif
    6.23  
     7.1 --- a/xen/arch/x86/x86_64/domain_build.c	Sat Feb 12 03:11:43 2005 +0000
     7.2 +++ b/xen/arch/x86/x86_64/domain_build.c	Sun Feb 13 20:31:37 2005 +0000
     7.3 @@ -1,4 +1,4 @@
     7.4 -/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     7.5 +/* -*-  Modes:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     7.6  /******************************************************************************
     7.7   * domain_build.c
     7.8   * 
     7.9 @@ -328,6 +328,9 @@ int construct_dom0(struct domain *d,
    7.10          d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
    7.11      d->shared_info->n_vcpu = smp_num_cpus;
    7.12  
    7.13 +    /* setup shadow and monitor tables */
    7.14 +    update_pagetable(ed);
    7.15 +
    7.16      /* Install the new page tables. */
    7.17      __cli();
    7.18      write_ptbase(ed);
     8.1 --- a/xen/common/domain.c	Sat Feb 12 03:11:43 2005 +0000
     8.2 +++ b/xen/common/domain.c	Sun Feb 13 20:31:37 2005 +0000
     8.3 @@ -40,8 +40,6 @@ struct domain *do_createdomain(domid_t d
     8.4      atomic_set(&d->refcnt, 1);
     8.5      atomic_set(&ed->pausecnt, 0);
     8.6  
     8.7 -    shadow_lock_init(d);
     8.8 -
     8.9      d->id          = dom_id;
    8.10      ed->processor  = cpu;
    8.11      d->create_time = NOW();
    8.12 @@ -330,7 +328,6 @@ long do_boot_vcpu(unsigned long vcpu, fu
    8.13      ed = d->exec_domain[vcpu];
    8.14  
    8.15      atomic_set(&ed->pausecnt, 0);
    8.16 -    shadow_lock_init(d);
    8.17  
    8.18      memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch));
    8.19  
     9.1 --- a/xen/include/asm-x86/domain.h	Sat Feb 12 03:11:43 2005 +0000
     9.2 +++ b/xen/include/asm-x86/domain.h	Sun Feb 13 20:31:37 2005 +0000
     9.3 @@ -124,7 +124,7 @@ struct arch_exec_domain
     9.4  #define IDLE0_ARCH_EXEC_DOMAIN                                      \
     9.5  {                                                                   \
     9.6      perdomain_ptes: 0,                                              \
     9.7 -    guest_table:    mk_pagetable(__pa(idle_pg_table))               \
     9.8 +    monitor_table:  mk_pagetable(__pa(idle_pg_table))               \
     9.9  }
    9.10  
    9.11  #endif /* __ASM_DOMAIN_H__ */
    10.1 --- a/xen/include/asm-x86/shadow.h	Sat Feb 12 03:11:43 2005 +0000
    10.2 +++ b/xen/include/asm-x86/shadow.h	Sun Feb 13 20:31:37 2005 +0000
    10.3 @@ -41,6 +41,7 @@ extern void shadow_l1_normal_pt_update(
    10.4  extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde);
    10.5  extern void unshadow_table(unsigned long gpfn, unsigned int type);
    10.6  extern int shadow_mode_enable(struct domain *p, unsigned int mode);
    10.7 +extern void free_shadow_state(struct domain *d);
    10.8  
    10.9  #ifdef CONFIG_VMX
   10.10  extern void vmx_shadow_clear_state(struct domain *);
   10.11 @@ -723,43 +724,56 @@ static inline unsigned long gva_to_gpa(u
   10.12  
   10.13  #endif /* CONFIG_VMX */
   10.14  
   10.15 -static inline void __shadow_mk_pagetable(struct exec_domain *ed)
   10.16 +static inline void __update_pagetables(struct exec_domain *ed)
   10.17  {
   10.18      struct domain *d = ed->domain;
   10.19      unsigned long gpfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
   10.20      unsigned long smfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
   10.21  
   10.22 -    SH_VVLOG("0: __shadow_mk_pagetable(gpfn=%p, smfn=%p)", gpfn, smfn);
   10.23 +    SH_VVLOG("0: __update_pagetables(gpfn=%p, smfn=%p)", gpfn, smfn);
   10.24  
   10.25      if ( unlikely(smfn == 0) )
   10.26          smfn = shadow_l2_table(d, gpfn);
   10.27  #ifdef CONFIG_VMX
   10.28      else
   10.29          if (d->arch.shadow_mode == SHM_full_32)
   10.30 +        {
   10.31              vmx_update_shadow_state(ed, gpfn, smfn);
   10.32 +        }
   10.33  #endif
   10.34  
   10.35      ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
   10.36 +
   10.37 +    if (d->arch.shadow_mode != SHM_full_32)
   10.38 +        ed->arch.monitor_table = ed->arch.shadow_table;
   10.39  }
   10.40  
   10.41 -static inline void shadow_mk_pagetable(struct exec_domain *ed)
   10.42 +static inline void update_pagetables(struct exec_domain *ed)
   10.43  {
   10.44       if ( unlikely(shadow_mode(ed->domain)) )
   10.45       {
   10.46 -         SH_VVLOG("shadow_mk_pagetable( gptbase=%p, mode=%d )",
   10.47 +         SH_VVLOG("update_pagetables( gptbase=%p, mode=%d )",
   10.48               pagetable_val(ed->arch.guest_table),
   10.49                    shadow_mode(ed->domain)); 
   10.50  
   10.51           shadow_lock(ed->domain);
   10.52 -         __shadow_mk_pagetable(ed);
   10.53 +         __update_pagetables(ed);
   10.54           shadow_unlock(ed->domain);
   10.55  
   10.56 -     SH_VVLOG("leaving shadow_mk_pagetable:\n"
   10.57 -              "( gptbase=%p, mode=%d ) sh=%p",
   10.58 -              pagetable_val(ed->arch.guest_table),
   10.59 -              shadow_mode(ed->domain), 
   10.60 -              pagetable_val(ed->arch.shadow_table) );
   10.61 +         SH_VVLOG("leaving update_pagetables:\n"
   10.62 +                  "( gptbase=%p, mode=%d ) sh=%p",
   10.63 +                  pagetable_val(ed->arch.guest_table),
   10.64 +                  shadow_mode(ed->domain), 
   10.65 +                  pagetable_val(ed->arch.shadow_table) );
   10.66       }
   10.67 +     else
   10.68 +#ifdef __x86_64__
   10.69 +         if ( !(ed->arch.flags & TF_kernel_mode) )
   10.70 +             ed->arch.monitor_table = ed->arch.guest_table_user;
   10.71 +         else
   10.72 +#endif
   10.73 +             ed->arch.monitor_table = ed->arch.guest_table;
   10.74 +
   10.75  }
   10.76  
   10.77  #if SHADOW_DEBUG