ia64/xen-unstable

changeset 3874:ad1d06d64313

bitkeeper revision 1.1227 (42179b08SzTlAiKTeTL94ij1uugTCw)

Merge burn.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into burn.cl.cam.ac.uk:/local/scratch-1/maf46/xen-unstable.bk
author maf46@burn.cl.cam.ac.uk
date Sat Feb 19 20:01:12 2005 +0000 (2005-02-19)
parents 8cf0e6d01dab 523423e2510b
children ffb2d7ac4b7f
files BitKeeper/etc/logging_ok xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/vmx.c xen/arch/x86/vmx_intercept.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_vmcs.c xen/common/keyhandler.c xen/drivers/char/console.c xen/include/asm-x86/domain.h xen/include/asm-x86/shadow.h xen/include/xen/lib.h
line diff
     1.1 --- a/BitKeeper/etc/logging_ok	Sat Feb 19 10:25:07 2005 +0000
     1.2 +++ b/BitKeeper/etc/logging_ok	Sat Feb 19 20:01:12 2005 +0000
     1.3 @@ -43,6 +43,7 @@ kaf24@striker.cl.cam.ac.uk
     1.4  kaf24@viper.(none)
     1.5  laudney@eclipse.(none)
     1.6  lynx@idefix.cl.cam.ac.uk
     1.7 +maf46@burn.cl.cam.ac.uk
     1.8  mafetter@fleming.research
     1.9  mark@maw48.kings.cam.ac.uk
    1.10  maw48@labyrinth.cl.cam.ac.uk
     2.1 --- a/xen/arch/x86/domain.c	Sat Feb 19 10:25:07 2005 +0000
     2.2 +++ b/xen/arch/x86/domain.c	Sat Feb 19 20:01:12 2005 +0000
     2.3 @@ -248,6 +248,10 @@ void arch_do_createdomain(struct exec_do
     2.4          machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> 
     2.5                                 PAGE_SHIFT] = INVALID_M2P_ENTRY;
     2.6          ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
     2.7 +#if 0 /* don't need this yet, but maybe soon! */
     2.8 +        ed->arch.guest_vtable = linear_l2_table;
     2.9 +        ed->arch.shadow_vtable = shadow_linear_l2_table;
    2.10 +#endif
    2.11  
    2.12  #ifdef __x86_64__
    2.13          d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
    2.14 @@ -312,23 +316,23 @@ static void alloc_monitor_pagetable(stru
    2.15             &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
    2.16             HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
    2.17  
    2.18 -    ed->arch.monitor_table = mk_pagetable(mpfn << PAGE_SHIFT);
    2.19 -
    2.20      mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
    2.21          mk_l2_pgentry((__pa(d->arch.mm_perdomain_pt) & PAGE_MASK) 
    2.22                        | __PAGE_HYPERVISOR);
    2.23  
    2.24 +    ed->arch.monitor_table = mk_pagetable(mpfn << PAGE_SHIFT);
    2.25 +    ed->arch.monitor_vtable = mpl2e;
    2.26 +
    2.27      phys_table = (l2_pgentry_t *)
    2.28          map_domain_mem(pagetable_val(ed->arch.phys_table));
    2.29      memcpy(d->arch.mm_perdomain_pt, phys_table,
    2.30             L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t));
    2.31  
    2.32      unmap_domain_mem(phys_table);
    2.33 -    unmap_domain_mem(mpl2e);
    2.34  }
    2.35  
    2.36  /*
    2.37 - * Free the pages for monitor_table and guest_pl2e_cache
    2.38 + * Free the pages for monitor_table and hl2_table
    2.39   */
    2.40  static void free_monitor_pagetable(struct exec_domain *ed)
    2.41  {
    2.42 @@ -337,10 +341,10 @@ static void free_monitor_pagetable(struc
    2.43  
    2.44      ASSERT( pagetable_val(ed->arch.monitor_table) );
    2.45      
    2.46 -    mpl2e = (l2_pgentry_t *)
    2.47 -        map_domain_mem(pagetable_val(ed->arch.monitor_table));
    2.48 +    mpl2e = ed->arch.monitor_vtable;
    2.49 +
    2.50      /*
    2.51 -     * First get the pfn for guest_pl2e_cache by looking at monitor_table
    2.52 +     * First get the pfn for hl2_table by looking at monitor_table
    2.53       */
    2.54      mpfn = l2_pgentry_val(mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])
    2.55          >> PAGE_SHIFT;
    2.56 @@ -355,6 +359,7 @@ static void free_monitor_pagetable(struc
    2.57      free_domheap_page(&frame_table[mpfn]);
    2.58  
    2.59      ed->arch.monitor_table = mk_pagetable(0);
    2.60 +    ed->arch.monitor_vtable = 0;
    2.61  }
    2.62  
    2.63  static int vmx_final_setup_guest(struct exec_domain *ed,
    2.64 @@ -410,7 +415,7 @@ static int vmx_final_setup_guest(struct 
    2.65      /* We don't call update_pagetables() as we actively want fields such as 
    2.66       * the linear_pg_table to be null so that we bail out early of 
    2.67       * shadow_fault in case the vmx guest tries illegal accesses with
    2.68 -     * paging turned of. 
    2.69 +     * paging turned off. 
    2.70       */
    2.71      //update_pagetables(ed);     /* this assigns shadow_pagetable */
    2.72      alloc_monitor_pagetable(ed); /* this assigns monitor_pagetable */
    2.73 @@ -427,26 +432,27 @@ out:
    2.74  
    2.75  /* This is called by arch_final_setup_guest and do_boot_vcpu */
    2.76  int arch_final_setup_guest(
    2.77 -    struct exec_domain *d, full_execution_context_t *c)
    2.78 +    struct exec_domain *ed, full_execution_context_t *c)
    2.79  {
    2.80 +    struct domain *d = ed->domain;
    2.81      unsigned long phys_basetab;
    2.82      int i, rc;
    2.83  
    2.84 -    clear_bit(EDF_DONEFPUINIT, &d->ed_flags);
    2.85 +    clear_bit(EDF_DONEFPUINIT, &ed->ed_flags);
    2.86      if ( c->flags & ECF_I387_VALID )
    2.87 -        set_bit(EDF_DONEFPUINIT, &d->ed_flags);
    2.88 +        set_bit(EDF_DONEFPUINIT, &ed->ed_flags);
    2.89  
    2.90 -    d->arch.flags &= ~TF_kernel_mode;
    2.91 +    ed->arch.flags &= ~TF_kernel_mode;
    2.92      if ( c->flags & ECF_IN_KERNEL )
    2.93 -        d->arch.flags |= TF_kernel_mode;
    2.94 +        ed->arch.flags |= TF_kernel_mode;
    2.95  
    2.96 -    memcpy(&d->arch.user_ctxt,
    2.97 +    memcpy(&ed->arch.user_ctxt,
    2.98             &c->cpu_ctxt,
    2.99 -           sizeof(d->arch.user_ctxt));
   2.100 +           sizeof(ed->arch.user_ctxt));
   2.101  
   2.102      /* Clear IOPL for unprivileged domains. */
   2.103 -    if (!IS_PRIV(d->domain))
   2.104 -        d->arch.user_ctxt.eflags &= 0xffffcfff;
   2.105 +    if (!IS_PRIV(d))
   2.106 +        ed->arch.user_ctxt.eflags &= 0xffffcfff;
   2.107  
   2.108      /*
   2.109       * This is sufficient! If the descriptor DPL differs from CS RPL then we'll
   2.110 @@ -454,48 +460,48 @@ int arch_final_setup_guest(
   2.111       * If SS RPL or DPL differs from CS RPL then we'll #GP.
   2.112       */
   2.113      if (!(c->flags & ECF_VMX_GUEST)) 
   2.114 -        if ( ((d->arch.user_ctxt.cs & 3) == 0) ||
   2.115 -             ((d->arch.user_ctxt.ss & 3) == 0) )
   2.116 +        if ( ((ed->arch.user_ctxt.cs & 3) == 0) ||
   2.117 +             ((ed->arch.user_ctxt.ss & 3) == 0) )
   2.118                  return -EINVAL;
   2.119  
   2.120 -    memcpy(&d->arch.i387,
   2.121 +    memcpy(&ed->arch.i387,
   2.122             &c->fpu_ctxt,
   2.123 -           sizeof(d->arch.i387));
   2.124 +           sizeof(ed->arch.i387));
   2.125  
   2.126 -    memcpy(d->arch.traps,
   2.127 +    memcpy(ed->arch.traps,
   2.128             &c->trap_ctxt,
   2.129 -           sizeof(d->arch.traps));
   2.130 +           sizeof(ed->arch.traps));
   2.131  
   2.132 -    if ( (rc = (int)set_fast_trap(d, c->fast_trap_idx)) != 0 )
   2.133 +    if ( (rc = (int)set_fast_trap(ed, c->fast_trap_idx)) != 0 )
   2.134          return rc;
   2.135  
   2.136 -    d->arch.ldt_base = c->ldt_base;
   2.137 -    d->arch.ldt_ents = c->ldt_ents;
   2.138 +    ed->arch.ldt_base = c->ldt_base;
   2.139 +    ed->arch.ldt_ents = c->ldt_ents;
   2.140  
   2.141 -    d->arch.kernel_ss = c->kernel_ss;
   2.142 -    d->arch.kernel_sp = c->kernel_esp;
   2.143 +    ed->arch.kernel_ss = c->kernel_ss;
   2.144 +    ed->arch.kernel_sp = c->kernel_esp;
   2.145  
   2.146      for ( i = 0; i < 8; i++ )
   2.147 -        (void)set_debugreg(d, i, c->debugreg[i]);
   2.148 +        (void)set_debugreg(ed, i, c->debugreg[i]);
   2.149  
   2.150 -    d->arch.event_selector    = c->event_callback_cs;
   2.151 -    d->arch.event_address     = c->event_callback_eip;
   2.152 -    d->arch.failsafe_selector = c->failsafe_callback_cs;
   2.153 -    d->arch.failsafe_address  = c->failsafe_callback_eip;
   2.154 -    
   2.155 +    ed->arch.event_selector    = c->event_callback_cs;
   2.156 +    ed->arch.event_address     = c->event_callback_eip;
   2.157 +    ed->arch.failsafe_selector = c->failsafe_callback_cs;
   2.158 +    ed->arch.failsafe_address  = c->failsafe_callback_eip;
   2.159 +
   2.160      phys_basetab = c->pt_base;
   2.161 -    d->arch.guest_table = d->arch.phys_table = mk_pagetable(phys_basetab);
   2.162 +    ed->arch.guest_table = ed->arch.phys_table = mk_pagetable(phys_basetab);
   2.163  
   2.164 -    if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d->domain, 
   2.165 +    if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d, 
   2.166                              PGT_base_page_table) )
   2.167          return -EINVAL;
   2.168  
   2.169      /* Failure to set GDT is harmless. */
   2.170 -    SET_GDT_ENTRIES(d, DEFAULT_GDT_ENTRIES);
   2.171 -    SET_GDT_ADDRESS(d, DEFAULT_GDT_ADDRESS);
   2.172 +    SET_GDT_ENTRIES(ed, DEFAULT_GDT_ENTRIES);
   2.173 +    SET_GDT_ADDRESS(ed, DEFAULT_GDT_ADDRESS);
   2.174      if ( c->gdt_ents != 0 )
   2.175      {
   2.176 -        if ( (rc = (int)set_gdt(d, c->gdt_frames, c->gdt_ents)) != 0 )
   2.177 +        if ( (rc = (int)set_gdt(ed, c->gdt_frames, c->gdt_ents)) != 0 )
   2.178          {
   2.179              put_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT]);
   2.180              return rc;
   2.181 @@ -504,10 +510,10 @@ int arch_final_setup_guest(
   2.182  
   2.183  #ifdef CONFIG_VMX
   2.184      if (c->flags & ECF_VMX_GUEST)
   2.185 -        return vmx_final_setup_guest(d, c);
   2.186 +        return vmx_final_setup_guest(ed, c);
   2.187  #endif
   2.188  
   2.189 -    update_pagetables(d);
   2.190 +    update_pagetables(ed);
   2.191  
   2.192      return 0;
   2.193  }
     3.1 --- a/xen/arch/x86/mm.c	Sat Feb 19 10:25:07 2005 +0000
     3.2 +++ b/xen/arch/x86/mm.c	Sat Feb 19 20:01:12 2005 +0000
     3.3 @@ -2974,4 +2974,4 @@ void audit_domains_key(unsigned char key
     3.4      audit_domains();
     3.5  }
     3.6  
     3.7 -#endif
     3.8 +#endif /* NDEBUG */
     4.1 --- a/xen/arch/x86/shadow.c	Sat Feb 19 10:25:07 2005 +0000
     4.2 +++ b/xen/arch/x86/shadow.c	Sat Feb 19 20:01:12 2005 +0000
     4.3 @@ -260,7 +260,7 @@ static int shadow_mode_table_op(
     4.4      ASSERT(spin_is_locked(&d->arch.shadow_lock));
     4.5  
     4.6      SH_VLOG("shadow mode table op %p %p count %d",
     4.7 -            pagetable_val(d->exec_domain[0]->arch.pagetable),    /* XXX SMP */
     4.8 +            pagetable_val(d->exec_domain[0]->arch.guest_table),  /* XXX SMP */
     4.9              pagetable_val(d->exec_domain[0]->arch.shadow_table), /* XXX SMP */
    4.10              d->arch.shadow_page_count);
    4.11  
    4.12 @@ -465,15 +465,15 @@ void vmx_shadow_clear_state(struct domai
    4.13  
    4.14  
    4.15  unsigned long shadow_l2_table( 
    4.16 -    struct domain *d, unsigned long gpfn)
    4.17 +    struct domain *d, unsigned long gmfn)
    4.18  {
    4.19      struct pfn_info *spfn_info;
    4.20      unsigned long    spfn;
    4.21 -    unsigned long guest_gpfn;
    4.22 +    unsigned long    gpfn;
    4.23  
    4.24 -    guest_gpfn = __mfn_to_gpfn(d, gpfn);
    4.25 +    gpfn = __mfn_to_gpfn(d, gmfn);
    4.26  
    4.27 -    SH_VVLOG("shadow_l2_table( %p )", gpfn);
    4.28 +    SH_VVLOG("shadow_l2_table( %p )", gmfn);
    4.29  
    4.30      perfc_incrc(shadow_l2_table_count);
    4.31  
    4.32 @@ -485,19 +485,11 @@ unsigned long shadow_l2_table(
    4.33  
    4.34      spfn = spfn_info - frame_table;
    4.35    /* Mark pfn as being shadowed; update field to point at shadow. */
    4.36 -    set_shadow_status(d, guest_gpfn, spfn | PSH_shadowed);
    4.37 +    set_shadow_status(d, gpfn, spfn | PSH_shadowed);
    4.38   
    4.39  #ifdef __i386__
    4.40      /* Install hypervisor and 2x linear p.t. mapings. */
    4.41 -    if ( shadow_mode_translate(d) )
    4.42 -    {
    4.43 -#ifdef CONFIG_VMX
    4.44 -        vmx_update_shadow_state(d->exec_domain[0], gpfn, spfn);
    4.45 -#else
    4.46 -        panic("Shadow Full 32 not yet implemented without VMX\n");
    4.47 -#endif
    4.48 -    }
    4.49 -    else
    4.50 +    if ( !shadow_mode_translate(d) )
    4.51      {
    4.52          l2_pgentry_t *spl2e;
    4.53          spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
    4.54 @@ -514,19 +506,19 @@ unsigned long shadow_l2_table(
    4.55                 &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
    4.56                 HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
    4.57          spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    4.58 -            mk_l2_pgentry((gpfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    4.59 +            mk_l2_pgentry((gmfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    4.60          spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    4.61              mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    4.62          spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
    4.63              mk_l2_pgentry(__pa(page_get_owner(
    4.64 -                &frame_table[gpfn])->arch.mm_perdomain_pt) |
    4.65 +                &frame_table[gmfn])->arch.mm_perdomain_pt) |
    4.66                            __PAGE_HYPERVISOR);
    4.67  
    4.68          unmap_domain_mem(spl2e);
    4.69      }
    4.70  #endif
    4.71  
    4.72 -    SH_VLOG("shadow_l2_table( %p -> %p)", gpfn, spfn);
    4.73 +    SH_VLOG("shadow_l2_table( %p -> %p)", gmfn, spfn);
    4.74      return spfn;
    4.75  }
    4.76  
    4.77 @@ -546,7 +538,7 @@ static void shadow_map_l1_into_current_l
    4.78      if ( !(sl1ss & PSH_shadowed) )
    4.79      {
    4.80          /* This L1 is NOT already shadowed so we need to shadow it. */
    4.81 -        SH_VVLOG("4a: l1 not shadowed ( %p )", sl1pfn);
    4.82 +        SH_VVLOG("4a: l1 not shadowed ( %p )", sl1ss);
    4.83  
    4.84          sl1mfn_info = alloc_shadow_page(d);
    4.85          sl1mfn_info->u.inuse.type_info = PGT_l1_page_table;
    4.86 @@ -584,14 +576,15 @@ static void shadow_map_l1_into_current_l
    4.87      }              
    4.88  }
    4.89  
    4.90 -#ifdef CONFIG_VMX
    4.91 -void vmx_shadow_invlpg(struct domain *d, unsigned long va)
    4.92 +void shadow_invlpg(struct exec_domain *ed, unsigned long va)
    4.93  {
    4.94 -    unsigned long gpte, spte, host_pfn;
    4.95 +    unsigned long gpte, spte;
    4.96 +
    4.97 +    ASSERT(shadow_mode_enabled(ed->domain));
    4.98  
    4.99      if (__put_user(0L, (unsigned long *)
   4.100                     &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
   4.101 -        vmx_shadow_clear_state(d);
   4.102 +        vmx_shadow_clear_state(ed->domain);
   4.103          return;
   4.104      }
   4.105  
   4.106 @@ -600,15 +593,13 @@ void vmx_shadow_invlpg(struct domain *d,
   4.107          return;
   4.108      }
   4.109  
   4.110 -    host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
   4.111 -    spte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
   4.112 +    l1pte_propagate_from_guest(ed->domain, &gpte, &spte);
   4.113  
   4.114      if (__put_user(spte, (unsigned long *)
   4.115                     &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
   4.116          return;
   4.117      }
   4.118  }
   4.119 -#endif
   4.120  
   4.121  int shadow_fault(unsigned long va, long error_code)
   4.122  {
   4.123 @@ -616,9 +607,9 @@ int shadow_fault(unsigned long va, long 
   4.124      struct exec_domain *ed = current;
   4.125      struct domain *d = ed->domain;
   4.126  
   4.127 -    SH_VVLOG("shadow_fault( va=%p, code=%ld )", va, error_code );
   4.128 +    SH_VVLOG("shadow_fault( va=%p, code=%lu )", va, error_code );
   4.129  
   4.130 -    check_pagetable(d, ed->arch.pagetable, "pre-sf");
   4.131 +    check_pagetable(d, ed->arch.guest_table, "pre-sf");
   4.132  
   4.133      /*
   4.134       * STEP 1. A fast-reject set of checks with no locking.
   4.135 @@ -708,7 +699,7 @@ int shadow_fault(unsigned long va, long 
   4.136  
   4.137      shadow_unlock(d);
   4.138  
   4.139 -    check_pagetable(d, ed->arch.pagetable, "post-sf");
   4.140 +    check_pagetable(d, ed->arch.guest_table, "post-sf");
   4.141      return EXCRET_fault_fixed;
   4.142  }
   4.143  
     5.1 --- a/xen/arch/x86/vmx.c	Sat Feb 19 10:25:07 2005 +0000
     5.2 +++ b/xen/arch/x86/vmx.c	Sat Feb 19 20:01:12 2005 +0000
     5.3 @@ -42,7 +42,7 @@
     5.4  #ifdef CONFIG_VMX
     5.5  
     5.6  int vmcs_size;
     5.7 -unsigned int opt_vmx_debug_level;
     5.8 +unsigned int opt_vmx_debug_level = 0;
     5.9  
    5.10  extern long evtchn_send(int lport);
    5.11  extern long do_block(void);
    5.12 @@ -115,11 +115,21 @@ static int vmx_do_page_fault(unsigned lo
    5.13      {
    5.14          __vmread(GUEST_EIP, &eip);
    5.15          VMX_DBG_LOG(DBG_LEVEL_VMMU, 
    5.16 -                "vmx_do_page_fault = 0x%lx, eip = %lx, erro_code = %lx\n", 
    5.17 +                "vmx_do_page_fault = 0x%lx, eip = %lx, erro_code = %lx",
    5.18                  va, eip, error_code);
    5.19      }
    5.20  #endif
    5.21  
    5.22 +    /*
    5.23 +     * If vpagetable is zero, then we are still emulating 1:1 page tables,
    5.24 +     * and we should have never gotten here.
    5.25 +     */
    5.26 +    if ( !current->arch.guest_vtable )
    5.27 +    {
    5.28 +        printk("vmx_do_page_fault while still running on 1:1 page table\n");
    5.29 +        return 0;
    5.30 +    }
    5.31 +
    5.32      gpa = gva_to_gpa(va);
    5.33      if (!gpa)
    5.34          return 0;
    5.35 @@ -142,11 +152,11 @@ static void vmx_do_general_protection_fa
    5.36      __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
    5.37  
    5.38      VMX_DBG_LOG(DBG_LEVEL_1,
    5.39 -            "vmx_general_protection_fault: eip = %lx, erro_code = %lx\n",
    5.40 +            "vmx_general_protection_fault: eip = %lx, erro_code = %lx",
    5.41              eip, error_code);
    5.42  
    5.43      VMX_DBG_LOG(DBG_LEVEL_1,
    5.44 -            "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx\n",
    5.45 +            "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
    5.46              regs->eax, regs->ebx, regs->ecx, regs->edx, regs->esi, regs->edi);
    5.47  
    5.48      /* Reflect it back into the guest */
    5.49 @@ -167,7 +177,7 @@ static void vmx_vmexit_do_cpuid(unsigned
    5.50  
    5.51      VMX_DBG_LOG(DBG_LEVEL_1, 
    5.52                  "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
    5.53 -                " (esi) %lx, (edi) %lx\n",
    5.54 +                " (esi) %lx, (edi) %lx",
    5.55                  regs->eax, regs->ebx, regs->ecx, regs->edx,
    5.56                  regs->esi, regs->edi);
    5.57  
    5.58 @@ -185,7 +195,7 @@ static void vmx_vmexit_do_cpuid(unsigned
    5.59      regs->edx = (unsigned long) edx;
    5.60  
    5.61      VMX_DBG_LOG(DBG_LEVEL_1, 
    5.62 -            "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x\n", 
    5.63 +            "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x",
    5.64              eip, input, eax, ebx, ecx, edx);
    5.65  
    5.66  }
    5.67 @@ -205,7 +215,7 @@ static void vmx_dr_access (unsigned long
    5.68      reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
    5.69  
    5.70      VMX_DBG_LOG(DBG_LEVEL_1, 
    5.71 -                "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx\n",
    5.72 +                "vmx_dr_access : eip=%lx, reg=%d, exit_qualification = %lx",
    5.73                  eip, reg, exit_qualification);
    5.74  
    5.75      switch(exit_qualification & DEBUG_REG_ACCESS_REG) {
    5.76 @@ -255,25 +265,25 @@ static void vmx_vmexit_do_invlpg(unsigne
    5.77  
    5.78      __vmread(GUEST_EIP, &eip);
    5.79  
    5.80 -    VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg:eip=%p, va=%p\n",
    5.81 +    VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg:eip=%p, va=%p",
    5.82              eip, va);
    5.83  
    5.84      /*
    5.85       * We do the safest things first, then try to update the shadow
    5.86       * copying from guest
    5.87       */
    5.88 -    vmx_shadow_invlpg(ed->domain, va);
    5.89 +    shadow_invlpg(ed, va);
    5.90      index = (va >> L2_PAGETABLE_SHIFT);
    5.91 -    ed->arch.guest_pl2e_cache[index] = 
    5.92 +    ed->arch.hl2_vtable[index] = 
    5.93          mk_l2_pgentry(0); /* invalidate pgd cache */
    5.94  }
    5.95  
    5.96 -static inline void guest_pl2e_cache_invalidate(struct exec_domain *ed)
    5.97 +static inline void hl2_table_invalidate(struct exec_domain *ed)
    5.98  {
    5.99      /*
   5.100       * Need to optimize this
   5.101       */
   5.102 -    memset(ed->arch.guest_pl2e_cache, 0, PAGE_SIZE);
   5.103 +    memset(ed->arch.hl2_vtable, 0, PAGE_SIZE);
   5.104  }
   5.105  
   5.106  static void vmx_io_instruction(struct xen_regs *regs, 
   5.107 @@ -288,7 +298,7 @@ static void vmx_io_instruction(struct xe
   5.108      __vmread(GUEST_EIP, &eip);
   5.109  
   5.110      VMX_DBG_LOG(DBG_LEVEL_1, 
   5.111 -            "vmx_io_instruction: eip=%p, exit_qualification = %lx\n",
   5.112 +            "vmx_io_instruction: eip=%p, exit_qualification = %lx",
   5.113              eip, exit_qualification);
   5.114  
   5.115      if (test_bit(6, &exit_qualification))
   5.116 @@ -303,7 +313,7 @@ static void vmx_io_instruction(struct xe
   5.117  
   5.118      vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
   5.119      if (vio == 0) {
   5.120 -        VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx\n", (unsigned long) vio);
   5.121 +        VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx", (unsigned long) vio);
   5.122          domain_crash(); 
   5.123      }
   5.124      p = &vio->vp_ioreq;
   5.125 @@ -385,13 +395,13 @@ static void mov_to_cr(int gp, int cr, st
   5.126          __vmx_bug(regs);
   5.127      }
   5.128      
   5.129 -    VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx, \n", cr, value);
   5.130 -    VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx, \n", (unsigned long) current);
   5.131 +    VMX_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
   5.132 +    VMX_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
   5.133  
   5.134      switch(cr) {
   5.135      case 0: 
   5.136      {
   5.137 -        unsigned long old_base_pfn = 0, pfn;
   5.138 +        unsigned long old_base_mfn = 0, mfn;
   5.139  
   5.140          /* 
   5.141           * CR0:
   5.142 @@ -409,14 +419,14 @@ static void mov_to_cr(int gp, int cr, st
   5.143              /*
   5.144               * The guest CR3 must be pointing to the guest physical.
   5.145               */
   5.146 -            if (!(pfn = phys_to_machine_mapping(
   5.147 +            if (!(mfn = phys_to_machine_mapping(
   5.148                        d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT))) 
   5.149              {
   5.150 -                VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx\n", 
   5.151 +                VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx", 
   5.152                          d->arch.arch_vmx.cpu_cr3);
   5.153                  domain_crash(); /* need to take a clean path */
   5.154              }
   5.155 -            old_base_pfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
   5.156 +            old_base_mfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
   5.157  
   5.158              /* We know that none of the previous 1:1 shadow pages are
   5.159               * going to be used again, so might as well flush them.
   5.160 @@ -429,27 +439,27 @@ static void mov_to_cr(int gp, int cr, st
   5.161              /*
   5.162               * Now arch.guest_table points to machine physical.
   5.163               */
   5.164 -            d->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
   5.165 +            d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
   5.166              update_pagetables(d);
   5.167  
   5.168 -            VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx\n", 
   5.169 -                    (unsigned long) (pfn << PAGE_SHIFT));
   5.170 +            VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 
   5.171 +                    (unsigned long) (mfn << PAGE_SHIFT));
   5.172  
   5.173              __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
   5.174              /* 
   5.175               * arch->shadow_table should hold the next CR3 for shadow
   5.176               */
   5.177 -            VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, pfn = %lx\n", 
   5.178 -                    d->arch.arch_vmx.cpu_cr3, pfn);
   5.179 +            VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx", 
   5.180 +                    d->arch.arch_vmx.cpu_cr3, mfn);
   5.181              /* undo the get_page done in the para virt case */
   5.182 -            put_page_and_type(&frame_table[old_base_pfn]);
   5.183 +            put_page_and_type(&frame_table[old_base_mfn]);
   5.184  
   5.185          }
   5.186          break;
   5.187      }
   5.188      case 3: 
   5.189      {
   5.190 -        unsigned long pfn;
   5.191 +        unsigned long mfn;
   5.192  
   5.193          /*
   5.194           * If paging is not enabled yet, simply copy the value to CR3.
   5.195 @@ -459,7 +469,7 @@ static void mov_to_cr(int gp, int cr, st
   5.196              break;
   5.197          }
   5.198          
   5.199 -        guest_pl2e_cache_invalidate(d);
   5.200 +        hl2_table_invalidate(d);
   5.201          /*
   5.202           * We make a new one if the shadow does not exist.
   5.203           */
   5.204 @@ -469,8 +479,8 @@ static void mov_to_cr(int gp, int cr, st
   5.205               * removed some translation or changed page attributes.
   5.206               * We simply invalidate the shadow.
   5.207               */
   5.208 -            pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
   5.209 -            if ((pfn << PAGE_SHIFT) != pagetable_val(d->arch.guest_table))
   5.210 +            mfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
   5.211 +            if ((mfn << PAGE_SHIFT) != pagetable_val(d->arch.guest_table))
   5.212                  __vmx_bug(regs);
   5.213              vmx_shadow_clear_state(d->domain);
   5.214              shadow_invalidate(d);
   5.215 @@ -479,22 +489,22 @@ static void mov_to_cr(int gp, int cr, st
   5.216               * If different, make a shadow. Check if the PDBR is valid
   5.217               * first.
   5.218               */
   5.219 -            VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx\n", value);
   5.220 +            VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
   5.221              if ((value >> PAGE_SHIFT) > d->domain->max_pages)
   5.222              {
   5.223                  VMX_DBG_LOG(DBG_LEVEL_VMMU, 
   5.224 -                        "Invalid CR3 value=%lx\n", value);
   5.225 +                        "Invalid CR3 value=%lx", value);
   5.226                  domain_crash(); /* need to take a clean path */
   5.227              }
   5.228 -            pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
   5.229 +            mfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
   5.230              vmx_shadow_clear_state(d->domain);
   5.231 -            d->arch.guest_table  = mk_pagetable(pfn << PAGE_SHIFT);
   5.232 +            d->arch.guest_table  = mk_pagetable(mfn << PAGE_SHIFT);
   5.233              update_pagetables(d); 
   5.234              /* 
   5.235               * arch.shadow_table should now hold the next CR3 for shadow
   5.236               */
   5.237              d->arch.arch_vmx.cpu_cr3 = value;
   5.238 -            VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx\n", 
   5.239 +            VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
   5.240                      value);
   5.241              __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
   5.242          }
   5.243 @@ -516,7 +526,7 @@ static void mov_to_cr(int gp, int cr, st
   5.244          if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
   5.245              vmx_shadow_clear_state(d->domain);
   5.246              shadow_invalidate(d);
   5.247 -            guest_pl2e_cache_invalidate(d);
   5.248 +            hl2_table_invalidate(d);
   5.249          }
   5.250          break;
   5.251      default:
   5.252 @@ -561,7 +571,7 @@ static void mov_from_cr(int cr, int gp, 
   5.253          __vmx_bug(regs);
   5.254      }
   5.255  
   5.256 -    VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx, \n", cr, value);
   5.257 +    VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
   5.258  }
   5.259  
   5.260  static void vmx_cr_access (unsigned long exit_qualification, struct xen_regs *regs)
   5.261 @@ -616,7 +626,7 @@ static inline void vmx_vmexit_do_hlt()
   5.262      unsigned long eip;
   5.263      __vmread(GUEST_EIP, &eip);
   5.264  #endif
   5.265 -    VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%p\n", eip);
   5.266 +    VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%p", eip);
   5.267      __enter_scheduler();
   5.268  }
   5.269  
   5.270 @@ -626,7 +636,7 @@ static inline void vmx_vmexit_do_mwait()
   5.271      unsigned long eip;
   5.272      __vmread(GUEST_EIP, &eip);
   5.273  #endif
   5.274 -    VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%p\n", eip);
   5.275 +    VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%p", eip);
   5.276      __enter_scheduler();
   5.277  }
   5.278  
   5.279 @@ -718,7 +728,7 @@ asmlinkage void vmx_vmexit_handler(struc
   5.280              __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
   5.281              printk("#PG error code: %lx\n", error_code);
   5.282          }
   5.283 -        VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x\n", 
   5.284 +        VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x",
   5.285                  idtv_info_field);
   5.286      }
   5.287  
   5.288 @@ -726,7 +736,7 @@ asmlinkage void vmx_vmexit_handler(struc
   5.289      if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
   5.290          exit_reason != EXIT_REASON_VMCALL &&
   5.291          exit_reason != EXIT_REASON_IO_INSTRUCTION)
   5.292 -        VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x\n", exit_reason);
   5.293 +        VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
   5.294  
   5.295      if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
   5.296          domain_crash();         
   5.297 @@ -784,7 +794,7 @@ asmlinkage void vmx_vmexit_handler(struc
   5.298              __vmread(EXIT_QUALIFICATION, &va);
   5.299              __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
   5.300              VMX_DBG_LOG(DBG_LEVEL_VMMU, 
   5.301 -                    "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx\n",
   5.302 +                    "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
   5.303                          regs.eax, regs.ebx, regs.ecx, regs.edx, regs.esi,
   5.304                          regs.edi);
   5.305              ed->arch.arch_vmx.vmx_platform.mpci.inst_decoder_regs = &regs;
   5.306 @@ -809,7 +819,8 @@ asmlinkage void vmx_vmexit_handler(struc
   5.307              do_nmi(&regs, 0);
   5.308              break;
   5.309          default:
   5.310 -            __vmx_bug(&regs);
   5.311 +            printk("unexpected VMexit for exception vector 0x%x\n", vector);
   5.312 +            //__vmx_bug(&regs);
   5.313              break;
   5.314          }
   5.315          break;
   5.316 @@ -880,7 +891,7 @@ asmlinkage void vmx_vmexit_handler(struc
   5.317          __get_instruction_length(inst_len);
   5.318          __vmread(EXIT_QUALIFICATION, &exit_qualification);
   5.319  
   5.320 -        VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx\n", 
   5.321 +        VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx", 
   5.322                  eip, inst_len, exit_qualification);
   5.323          vmx_cr_access(exit_qualification, &regs);
   5.324          __update_guest_eip(inst_len);
     6.1 --- a/xen/arch/x86/vmx_intercept.c	Sat Feb 19 10:25:07 2005 +0000
     6.2 +++ b/xen/arch/x86/vmx_intercept.c	Sat Feb 19 20:01:12 2005 +0000
     6.3 @@ -69,7 +69,7 @@ static void pit_cal_count(struct vmx_vir
     6.4  {
     6.5      unsigned int usec_delta = (unsigned int)((NOW() - vpit->inject_point) / 1000);
     6.6      if (usec_delta > vpit->period * 1000)
     6.7 -        VMX_DBG_LOG(DBG_LEVEL_1, "VMX_PIT:long time has passed from last injection!\n");
     6.8 +        VMX_DBG_LOG(DBG_LEVEL_1, "VMX_PIT:long time has passed from last injection!");
     6.9      vpit->count = vpit->init_val - ((usec_delta * PIT_FREQ / 1000000) % vpit->init_val );
    6.10  }
    6.11  
     7.1 --- a/xen/arch/x86/vmx_io.c	Sat Feb 19 10:25:07 2005 +0000
     7.2 +++ b/xen/arch/x86/vmx_io.c	Sat Feb 19 20:01:12 2005 +0000
     7.3 @@ -194,7 +194,7 @@ void vmx_io_assist(struct exec_domain *e
     7.4      vio = (vcpu_iodata_t *) ed->arch.arch_vmx.vmx_platform.shared_page_va;
     7.5      if (vio == 0) {
     7.6          VMX_DBG_LOG(DBG_LEVEL_1, 
     7.7 -                    "bad shared page: %lx\n", (unsigned long) vio);
     7.8 +                    "bad shared page: %lx", (unsigned long) vio);
     7.9          domain_crash();
    7.10      }
    7.11      p = &vio->vp_ioreq;
    7.12 @@ -313,7 +313,7 @@ static inline int find_highest_pending_i
    7.13      vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
    7.14      if (vio == 0) {
    7.15          VMX_DBG_LOG(DBG_LEVEL_1, 
    7.16 -                    "bad shared page: %lx\n", (unsigned long) vio);
    7.17 +                    "bad shared page: %lx", (unsigned long) vio);
    7.18          domain_crash();
    7.19      }
    7.20          
    7.21 @@ -327,7 +327,7 @@ static inline void clear_highest_bit(str
    7.22      vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
    7.23      if (vio == 0) {
    7.24          VMX_DBG_LOG(DBG_LEVEL_1, 
    7.25 -                    "bad shared page: %lx\n", (unsigned long) vio);
    7.26 +                    "bad shared page: %lx", (unsigned long) vio);
    7.27          domain_crash();
    7.28      }
    7.29          
    7.30 @@ -350,14 +350,14 @@ void vmx_intr_assist(struct exec_domain 
    7.31  
    7.32      __vmread(VM_ENTRY_INTR_INFO_FIELD, &intr_fields);
    7.33      if (intr_fields & INTR_INFO_VALID_MASK) {
    7.34 -        VMX_DBG_LOG(DBG_LEVEL_1, "vmx_intr_assist: intr_fields: %lx\n", 
    7.35 +        VMX_DBG_LOG(DBG_LEVEL_1, "vmx_intr_assist: intr_fields: %lx",
    7.36                      intr_fields);
    7.37          return;
    7.38      }
    7.39  
    7.40      __vmread(GUEST_EFLAGS, &eflags);
    7.41      if (irq_masked(eflags)) {
    7.42 -        VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, eflags: %lx\n", 
    7.43 +        VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, eflags: %lx",
    7.44                      highest_vector, eflags);
    7.45          return;
    7.46      }
    7.47 @@ -380,8 +380,13 @@ void vmx_intr_assist(struct exec_domain 
    7.48  
    7.49  void vmx_do_resume(struct exec_domain *d) 
    7.50  {
    7.51 +    if ( d->arch.guest_vtable )
    7.52 +        __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
    7.53 +    else
    7.54 +        // we haven't switched off the 1:1 pagetable yet...
    7.55 +        __vmwrite(GUEST_CR3, pagetable_val(d->arch.guest_table));
    7.56 +
    7.57      __vmwrite(HOST_CR3, pagetable_val(d->arch.monitor_table));
    7.58 -    __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
    7.59      __vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
    7.60  
    7.61      if (event_pending(d)) {
     8.1 --- a/xen/arch/x86/vmx_vmcs.c	Sat Feb 19 10:25:07 2005 +0000
     8.2 +++ b/xen/arch/x86/vmx_vmcs.c	Sat Feb 19 20:01:12 2005 +0000
     8.3 @@ -113,7 +113,7 @@ int vmx_setup_platform(struct exec_domai
     8.4  
     8.5      n = context->ecx;
     8.6      if (n > 32) {
     8.7 -        VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d\n", n);
     8.8 +        VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d", n);
     8.9          return -1;
    8.10      }
    8.11  
    8.12 @@ -158,7 +158,7 @@ void vmx_do_launch(struct exec_domain *e
    8.13      struct host_execution_env host_env;
    8.14      struct Xgt_desc_struct desc;
    8.15      struct list_head *list_ent;
    8.16 -    l2_pgentry_t *mpl2e, *guest_pl2e_cache;
    8.17 +    l2_pgentry_t *mpl2e, *hl2_vtable;
    8.18      unsigned long i, pfn = 0;
    8.19      struct pfn_info *page;
    8.20      execution_context_t *ec = get_execution_context();
    8.21 @@ -191,9 +191,9 @@ void vmx_do_launch(struct exec_domain *e
    8.22      mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    8.23          mk_l2_pgentry((pfn << PAGE_SHIFT)| __PAGE_HYPERVISOR);
    8.24  
    8.25 -    guest_pl2e_cache = map_domain_mem(pfn << PAGE_SHIFT);
    8.26 -    memset(guest_pl2e_cache, 0, PAGE_SIZE); /* clean it up */
    8.27 -    ed->arch.guest_pl2e_cache = guest_pl2e_cache; 
    8.28 +    hl2_vtable = map_domain_mem(pfn << PAGE_SHIFT);
    8.29 +    memset(hl2_vtable, 0, PAGE_SIZE); /* clean it up */
    8.30 +    ed->arch.hl2_vtable = hl2_vtable; 
    8.31          
    8.32      unmap_domain_mem(mpl2e);
    8.33  
     9.1 --- a/xen/common/keyhandler.c	Sat Feb 19 10:25:07 2005 +0000
     9.2 +++ b/xen/common/keyhandler.c	Sat Feb 19 20:01:12 2005 +0000
     9.3 @@ -155,6 +155,17 @@ void do_debug_key(unsigned char key, str
     9.4                               bit. */
     9.5  }
     9.6  
     9.7 +#ifndef NDEBUG
     9.8 +void debugtrace_key(unsigned char key)
     9.9 +{
    9.10 +    static int send_to_console = 0;
    9.11 +
    9.12 +    send_to_console = !send_to_console;
    9.13 +    printk("Toggling the state of debugtrace_printk\n");
    9.14 +    debugtrace_dump(send_to_console);
    9.15 +}
    9.16 +#endif
    9.17 +
    9.18  void initialize_keytable(void)
    9.19  {
    9.20      open_softirq(KEYPRESS_SOFTIRQ, keypress_softirq);
    9.21 @@ -176,7 +187,9 @@ void initialize_keytable(void)
    9.22  
    9.23  #ifndef NDEBUG
    9.24      register_keyhandler(
    9.25 -        'o', audit_domains_key,  "audit domains >0 EXPERIMENTAL"); 
    9.26 +        'o', audit_domains_key,  "audit domains >0 EXPERIMENTAL");
    9.27 +    register_keyhandler(
    9.28 +        'T', debugtrace_key, "dump debugtrace");
    9.29  #endif
    9.30  
    9.31  #ifdef PERF_COUNTERS
    10.1 --- a/xen/drivers/char/console.c	Sat Feb 19 10:25:07 2005 +0000
    10.2 +++ b/xen/drivers/char/console.c	Sat Feb 19 20:01:12 2005 +0000
    10.3 @@ -479,16 +479,35 @@ void console_force_lock(void)
    10.4  static unsigned char *debugtrace_buf; /* Debug-trace buffer */
    10.5  static unsigned int   debugtrace_prd; /* Producer index     */
    10.6  static unsigned int   debugtrace_kilobytes = 128, debugtrace_bytes;
    10.7 +static int            debugtrace_send_to_console = 0;
    10.8 +static spinlock_t debugtrace_lock = SPIN_LOCK_UNLOCKED;
    10.9  integer_param("debugtrace", debugtrace_kilobytes);
   10.10 -#define DEBUGTRACE_MASK(_p) ((_p) & (debugtrace_bytes-1))
   10.11  
   10.12 -void debugtrace_reset(void)
   10.13 +
   10.14 +static void _debugtrace_reset(int send_to_console)
   10.15  {
   10.16 +    if (send_to_console)
   10.17 +        printk("debugtrace_printk now writting to console\n");
   10.18 +    else
   10.19 +        printk("debugtrace_printk now writting to buffer\n");
   10.20 +
   10.21      if ( debugtrace_bytes != 0 )
   10.22          memset(debugtrace_buf, '\0', debugtrace_bytes);
   10.23 +
   10.24 +    debugtrace_prd = 0;
   10.25 +    debugtrace_send_to_console = send_to_console;
   10.26  }
   10.27  
   10.28 -void debugtrace_dump(void)
   10.29 +void debugtrace_reset(int send_to_console)
   10.30 +{
   10.31 +    unsigned long flags;
   10.32 +
   10.33 +    spin_lock_irqsave(&debugtrace_lock, flags);
   10.34 +    _debugtrace_reset(send_to_console);
   10.35 +    spin_unlock_irqrestore(&debugtrace_lock, flags);
   10.36 +}
   10.37 +
   10.38 +void debugtrace_dump(int send_to_console)
   10.39  {
   10.40      int _watchdog_on = watchdog_on;
   10.41  
   10.42 @@ -498,23 +517,24 @@ void debugtrace_dump(void)
   10.43      /* Watchdog can trigger if we print a really large buffer. */
   10.44      watchdog_on = 0;
   10.45  
   10.46 +    spin_lock(&debugtrace_lock);
   10.47 +
   10.48      /* Print oldest portion of the ring. */
   10.49 -    serial_puts(sercon_handle,
   10.50 -                &debugtrace_buf[DEBUGTRACE_MASK(debugtrace_prd)]);
   10.51 +    serial_puts(sercon_handle, &debugtrace_buf[debugtrace_prd]);
   10.52  
   10.53      /* Print youngest portion of the ring. */
   10.54 -    debugtrace_buf[DEBUGTRACE_MASK(debugtrace_prd)] = '\0';
   10.55 -    serial_puts(sercon_handle,
   10.56 -                &debugtrace_buf[0]);
   10.57 +    debugtrace_buf[debugtrace_prd] = '\0';
   10.58 +    serial_puts(sercon_handle, &debugtrace_buf[0]);
   10.59  
   10.60 -    debugtrace_reset();
   10.61 +    _debugtrace_reset(send_to_console);
   10.62 +
   10.63 +    spin_unlock(&debugtrace_lock);
   10.64  
   10.65      watchdog_on = _watchdog_on;
   10.66  }
   10.67  
   10.68  void debugtrace_printk(const char *fmt, ...)
   10.69  {
   10.70 -    static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
   10.71      static char       buf[1024];
   10.72  
   10.73      va_list       args;
   10.74 @@ -524,16 +544,29 @@ void debugtrace_printk(const char *fmt, 
   10.75      if ( debugtrace_bytes == 0 )
   10.76          return;
   10.77  
   10.78 -    spin_lock_irqsave(&_lock, flags);
   10.79 +    spin_lock_irqsave(&debugtrace_lock, flags);
   10.80  
   10.81      va_start(args, fmt);
   10.82      (void)vsnprintf(buf, sizeof(buf), fmt, args);
   10.83 -    va_end(args);        
   10.84 +    va_end(args);
   10.85  
   10.86 -    for ( p = buf; *p != '\0'; p++ )
   10.87 -        debugtrace_buf[DEBUGTRACE_MASK(debugtrace_prd++)] = *p;
   10.88 +    if (debugtrace_send_to_console)
   10.89 +    {
   10.90 +        serial_puts(sercon_handle, buf);
   10.91 +    }
   10.92 +    else
   10.93 +    {
   10.94 +        for ( p = buf; *p != '\0'; p++ )
   10.95 +        {
   10.96 +            debugtrace_buf[debugtrace_prd++] = *p;
   10.97 +            
   10.98 +            /* always leave a null byte at the end of the buffer */
   10.99 +            if (debugtrace_prd == debugtrace_bytes-1)
  10.100 +                debugtrace_prd = 0;
  10.101 +        }
  10.102 +    }
  10.103  
  10.104 -    spin_unlock_irqrestore(&_lock, flags);
  10.105 +    spin_unlock_irqrestore(&debugtrace_lock, flags);
  10.106  }
  10.107  
  10.108  static int __init debugtrace_init(void)
  10.109 @@ -553,7 +586,7 @@ static int __init debugtrace_init(void)
  10.110      debugtrace_buf = (unsigned char *)alloc_xenheap_pages(order);
  10.111      ASSERT(debugtrace_buf != NULL);
  10.112  
  10.113 -    memset(debugtrace_buf, '\0', debugtrace_bytes);
  10.114 +    debugtrace_reset(0);
  10.115  
  10.116      return 0;
  10.117  }
  10.118 @@ -578,6 +611,8 @@ void panic(const char *fmt, ...)
  10.119      unsigned long flags;
  10.120      extern void machine_restart(char *);
  10.121      
  10.122 +    debugtrace_dump(0);
  10.123 +
  10.124      va_start(args, fmt);
  10.125      (void)vsnprintf(buf, sizeof(buf), fmt, args);
  10.126      va_end(args);
  10.127 @@ -596,8 +631,6 @@ void panic(const char *fmt, ...)
  10.128      __putstr("Reboot in five seconds...\n");
  10.129      spin_unlock_irqrestore(&console_lock, flags);
  10.130  
  10.131 -    debugtrace_dump();
  10.132 -
  10.133      watchdog_on = 0;
  10.134      mdelay(5000);
  10.135      machine_restart(0);
    11.1 --- a/xen/include/asm-x86/domain.h	Sat Feb 19 10:25:07 2005 +0000
    11.2 +++ b/xen/include/asm-x86/domain.h	Sat Feb 19 20:01:12 2005 +0000
    11.3 @@ -66,7 +66,7 @@ struct arch_exec_domain
    11.4       * The stack frame for events is exactly that of an x86 hardware interrupt.
    11.5       * The stack frame for a failsafe callback is augmented with saved values
    11.6       * for segment registers %ds, %es, %fs and %gs:
    11.7 -     * 	%ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
    11.8 +     *  %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
    11.9       */
   11.10  
   11.11      unsigned long event_selector;    /* entry CS  (x86/32 only) */
   11.12 @@ -102,15 +102,16 @@ struct arch_exec_domain
   11.13      l1_pgentry_t *perdomain_ptes;
   11.14  
   11.15      pagetable_t  guest_table_user;      /* x86/64: user-space pagetable. */
   11.16 -    pagetable_t  guest_table;           /* guest notion of cr3 */
   11.17 -    pagetable_t  shadow_table;          /* shadow of guest */
   11.18 -    pagetable_t  monitor_table;         /* used in hypervisor */
   11.19 +    pagetable_t  guest_table;           /* (MA) guest notion of cr3 */
   11.20 +    pagetable_t  shadow_table;          /* (MA) shadow of guest */
   11.21 +    pagetable_t  monitor_table;         /* (MA) used in hypervisor */
   11.22  
   11.23      pagetable_t  phys_table;            /* guest 1:1 pagetable */
   11.24  
   11.25 -    l2_pgentry_t *vpagetable;	        /* virtual address of pagetable */
   11.26 -    l2_pgentry_t *shadow_vtable;	/* virtual address of shadow_table */
   11.27 -    l2_pgentry_t *guest_pl2e_cache;	/* guest page directory cache */
   11.28 +    l2_pgentry_t *guest_vtable;         /* virtual address of pagetable */
   11.29 +    l2_pgentry_t *shadow_vtable;        /* virtual address of shadow_table */
   11.30 +    l2_pgentry_t *hl2_vtable;			/* virtual address of hl2_table */
   11.31 +    l2_pgentry_t *monitor_vtable;		/* virtual address of monitor_table */
   11.32  
   11.33      /* Virtual CR2 value. Can be read/written by guest. */
   11.34      unsigned long guest_cr2;
    12.1 --- a/xen/include/asm-x86/shadow.h	Sat Feb 19 10:25:07 2005 +0000
    12.2 +++ b/xen/include/asm-x86/shadow.h	Sat Feb 19 20:01:12 2005 +0000
    12.3 @@ -46,10 +46,10 @@ extern void shadow_l2_normal_pt_update(u
    12.4  extern void unshadow_table(unsigned long gpfn, unsigned int type);
    12.5  extern int shadow_mode_enable(struct domain *p, unsigned int mode);
    12.6  extern void free_shadow_state(struct domain *d);
    12.7 +extern void shadow_invlpg(struct exec_domain *, unsigned long);
    12.8  
    12.9  #ifdef CONFIG_VMX
   12.10  extern void vmx_shadow_clear_state(struct domain *);
   12.11 -extern void vmx_shadow_invlpg(struct domain *, unsigned long);
   12.12  #endif
   12.13  
   12.14  #define __mfn_to_gpfn(_d, mfn)                         \
   12.15 @@ -70,7 +70,7 @@ static inline void shadow_mode_disable(s
   12.16  }
   12.17  
   12.18  extern unsigned long shadow_l2_table( 
   12.19 -    struct domain *d, unsigned long gpfn);
   12.20 +    struct domain *d, unsigned long gmfn);
   12.21    
   12.22  static inline void shadow_invalidate(struct exec_domain *ed) {
   12.23      if ( !shadow_mode_translate(ed->domain))
   12.24 @@ -131,10 +131,8 @@ static inline void __shadow_get_l2e(
   12.25              *sl2e = l2_pgentry_val(
   12.26                  shadow_linear_l2_table[l2_table_offset(va)]);
   12.27      }
   12.28 -    else {
   12.29 -        BUG(); /* why do we need this case? */
   12.30 -        *sl2e = l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
   12.31 -    }
   12.32 +    else
   12.33 +        BUG();
   12.34  }
   12.35  
   12.36  static inline void __shadow_set_l2e(
   12.37 @@ -147,17 +145,14 @@ static inline void __shadow_set_l2e(
   12.38              shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
   12.39      }
   12.40      else
   12.41 -    {
   12.42 -        BUG(); /* why do we need this case? */
   12.43 -        linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
   12.44 -    }
   12.45 +        BUG();
   12.46  }
   12.47  
   12.48  static inline void __guest_get_l2e(
   12.49      struct exec_domain *ed, unsigned long va, unsigned long *l2e)
   12.50  {
   12.51      *l2e = ( shadow_mode_translate(ed->domain) ) ?
   12.52 -        l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) :
   12.53 +        l2_pgentry_val(ed->arch.guest_vtable[l2_table_offset(va)]) :
   12.54          l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
   12.55  }
   12.56  
   12.57 @@ -169,10 +164,10 @@ static inline void __guest_set_l2e(
   12.58          unsigned long pfn;
   12.59  
   12.60          pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
   12.61 -        ed->arch.guest_pl2e_cache[l2_table_offset(va)] =
   12.62 +        ed->arch.hl2_vtable[l2_table_offset(va)] =
   12.63              mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   12.64  
   12.65 -        ed->arch.vpagetable[l2_table_offset(va)] = mk_l2_pgentry(value);
   12.66 +        ed->arch.guest_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
   12.67      }
   12.68      else
   12.69      {
   12.70 @@ -661,36 +656,6 @@ static inline void set_shadow_status(
   12.71    
   12.72  #ifdef CONFIG_VMX
   12.73  
   12.74 -static inline void vmx_update_shadow_state(
   12.75 -    struct exec_domain *ed, unsigned long gpfn, unsigned long smfn)
   12.76 -{
   12.77 -
   12.78 -    l2_pgentry_t *mpl2e = 0;
   12.79 -    l2_pgentry_t *gpl2e, *spl2e;
   12.80 -
   12.81 -    /* unmap the old mappings */
   12.82 -    if ( ed->arch.shadow_vtable )
   12.83 -        unmap_domain_mem(ed->arch.shadow_vtable);
   12.84 -    if ( ed->arch.vpagetable )
   12.85 -        unmap_domain_mem(ed->arch.vpagetable);
   12.86 -
   12.87 -    /* new mapping */
   12.88 -    mpl2e = (l2_pgentry_t *)
   12.89 -        map_domain_mem(pagetable_val(ed->arch.monitor_table));
   12.90 -
   12.91 -    mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
   12.92 -        mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   12.93 -    __flush_tlb_one(SH_LINEAR_PT_VIRT_START);
   12.94 -
   12.95 -    spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
   12.96 -    gpl2e = (l2_pgentry_t *)map_domain_mem(gpfn << PAGE_SHIFT);
   12.97 -    memset(spl2e, 0, L2_PAGETABLE_ENTRIES * sizeof(l2_pgentry_t));
   12.98 -
   12.99 -    ed->arch.shadow_vtable = spl2e;
  12.100 -    ed->arch.vpagetable = gpl2e; /* expect the guest did clean this up */
  12.101 -    unmap_domain_mem(mpl2e);
  12.102 -}
  12.103 -
  12.104  static inline unsigned long gva_to_gpte(unsigned long gva)
  12.105  {
  12.106      unsigned long gpde, gpte, pfn, index;
  12.107 @@ -702,9 +667,9 @@ static inline unsigned long gva_to_gpte(
  12.108  
  12.109      index = (gva >> L2_PAGETABLE_SHIFT);
  12.110  
  12.111 -    if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) {
  12.112 +    if (!l2_pgentry_val(ed->arch.hl2_vtable[index])) {
  12.113          pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
  12.114 -        ed->arch.guest_pl2e_cache[index] = 
  12.115 +        ed->arch.hl2_vtable[index] = 
  12.116              mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
  12.117      }
  12.118  
  12.119 @@ -731,22 +696,52 @@ static inline unsigned long gva_to_gpa(u
  12.120  static inline void __update_pagetables(struct exec_domain *ed)
  12.121  {
  12.122      struct domain *d = ed->domain;
  12.123 -    unsigned long gpfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
  12.124 +    unsigned long gmfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
  12.125 +    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
  12.126      unsigned long smfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
  12.127  
  12.128 -    SH_VVLOG("0: __update_pagetables(gpfn=%p, smfn=%p)", gpfn, smfn);
  12.129 +    SH_VVLOG("0: __update_pagetables(gmfn=%p, smfn=%p)", gmfn, smfn);
  12.130  
  12.131      if ( unlikely(smfn == 0) )
  12.132 -        smfn = shadow_l2_table(d, gpfn);
  12.133 -#ifdef CONFIG_VMX
  12.134 -    else if ( shadow_mode_translate(ed->domain) )
  12.135 -        vmx_update_shadow_state(ed, gpfn, smfn);
  12.136 -#endif
  12.137 +        smfn = shadow_l2_table(d, gmfn);
  12.138  
  12.139      ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
  12.140  
  12.141 -    if ( !shadow_mode_external(ed->domain) )
  12.142 -        ed->arch.monitor_table = ed->arch.shadow_table;
  12.143 +    if  ( shadow_mode_translate(ed->domain) )
  12.144 +    {
  12.145 +        l2_pgentry_t *gpl2e, *spl2e;
  12.146 +
  12.147 +        if ( ed->arch.guest_vtable )
  12.148 +            unmap_domain_mem(ed->arch.guest_vtable);
  12.149 +        if ( ed->arch.shadow_vtable )
  12.150 +            unmap_domain_mem(ed->arch.shadow_vtable);
  12.151 +
  12.152 +        gpl2e = ed->arch.guest_vtable =
  12.153 +            map_domain_mem(pagetable_val(ed->arch.guest_table));
  12.154 +        spl2e = ed->arch.shadow_vtable =
  12.155 +            map_domain_mem(pagetable_val(ed->arch.shadow_table));
  12.156 +
  12.157 +        if ( shadow_mode_external(ed->domain ) )
  12.158 +        {
  12.159 +            l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
  12.160 +            unsigned long old_smfn;
  12.161 +            unsigned sh_l2offset = l2_table_offset(SH_LINEAR_PT_VIRT_START);
  12.162 +            
  12.163 +            old_smfn = l2_pgentry_val(mpl2e[sh_l2offset]) >> PAGE_SHIFT;
  12.164 +            if ( old_smfn != smfn )
  12.165 +            {
  12.166 +                mpl2e[sh_l2offset] =
  12.167 +                    mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
  12.168 +                local_flush_tlb();
  12.169 +            }
  12.170 +        }
  12.171 +
  12.172 +        if ( ed->arch.arch_vmx.flags )
  12.173 +        {
  12.174 +            // Why is VMX mode doing this?
  12.175 +            memset(spl2e, 0, L2_PAGETABLE_ENTRIES * sizeof(l2_pgentry_t));
  12.176 +        }
  12.177 +    }
  12.178  }
  12.179  
  12.180  static inline void update_pagetables(struct exec_domain *ed)
  12.181 @@ -757,12 +752,18 @@ static inline void update_pagetables(str
  12.182           __update_pagetables(ed);
  12.183           shadow_unlock(ed->domain);
  12.184       }
  12.185 +     if ( !shadow_mode_external(ed->domain) )
  12.186 +     {
  12.187  #ifdef __x86_64__
  12.188 -     else if ( !(ed->arch.flags & TF_kernel_mode) )
  12.189 -         ed->arch.monitor_table = ed->arch.guest_table_user;
  12.190 +         if ( !(ed->arch.flags & TF_kernel_mode) )
  12.191 +             ed->arch.monitor_table = ed->arch.guest_table_user;
  12.192 +         else
  12.193  #endif
  12.194 -     else
  12.195 -         ed->arch.monitor_table = ed->arch.guest_table;
  12.196 +         if ( shadow_mode_enabled(ed->domain) )
  12.197 +             ed->arch.monitor_table = ed->arch.shadow_table;
  12.198 +         else
  12.199 +             ed->arch.monitor_table = ed->arch.guest_table;
  12.200 +     }
  12.201  }
  12.202  
  12.203  #if SHADOW_DEBUG
    13.1 --- a/xen/include/xen/lib.h	Sat Feb 19 10:25:07 2005 +0000
    13.2 +++ b/xen/include/xen/lib.h	Sat Feb 19 20:01:12 2005 +0000
    13.3 @@ -7,6 +7,7 @@
    13.4  #include <xen/string.h>
    13.5  
    13.6  #define BUG() do {					\
    13.7 +    debugtrace_dump(0);                                 \
    13.8      printk("BUG at %s:%d\n", __FILE__, __LINE__);	\
    13.9      FORCE_CRASH();                                      \
   13.10  } while ( 0 )
   13.11 @@ -29,13 +30,13 @@ struct domain;
   13.12  void cmdline_parse(char *cmdline);
   13.13  
   13.14  #ifndef NDEBUG
   13.15 -extern void debugtrace_reset(void);
   13.16 -extern void debugtrace_dump(void);
   13.17 +extern void debugtrace_reset(int send_to_console);
   13.18 +extern void debugtrace_dump(int send_to_console);
   13.19  extern void debugtrace_printk(const char *fmt, ...);
   13.20  #else
   13.21 -#define debugtrace_reset()         ((void)0)
   13.22 -#define debugtrace_dump()          ((void)0)
   13.23 -#define debugtrace_printk(_f, ...) ((void)0)
   13.24 +#define debugtrace_reset(_send_to_console) ((void)0)
   13.25 +#define debugtrace_dump(_send_to_console)  ((void)0)
   13.26 +#define debugtrace_printk(_f, ...)         ((void)0)
   13.27  #endif
   13.28  
   13.29  #define printk printf