ia64/xen-unstable

changeset 3929:826d1823c5b4

bitkeeper revision 1.1236.1.23 (421f3b04FuVFrUEUrYIs2_3sbFngVg)

Merge burn.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into burn.cl.cam.ac.uk:/local/scratch-1/maf46/xen-unstable.bk

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author maf46@burn.cl.cam.ac.uk
date Fri Feb 25 14:49:40 2005 +0000 (2005-02-25)
parents 71098e86c2a4 e8255a227e2c
children e526531ea6f7
files xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/vmx.c xen/arch/x86/x86_32/domain_page.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_32/traps.c xen/include/asm-x86/shadow.h xen/include/xen/perfc_defn.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Fri Feb 25 01:19:08 2005 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Fri Feb 25 14:49:40 2005 +0000
     1.3 @@ -409,8 +409,8 @@ static int vmx_final_setup_guest(struct 
     1.4      }
     1.5  
     1.6      /* We don't call update_pagetables() as we actively want fields such as 
     1.7 -     * the linear_pg_table to be null so that we bail out early of 
     1.8 -     * shadow_fault in case the vmx guest tries illegal accesses with
     1.9 +     * the linear_pg_table to be inaccessible so that we bail out early of 
    1.10 +     * shadow_fault() in case the vmx guest tries illegal accesses with
    1.11       * paging turned off. 
    1.12       */
    1.13      //update_pagetables(ed);     /* this assigns shadow_pagetable */
     2.1 --- a/xen/arch/x86/mm.c	Fri Feb 25 01:19:08 2005 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Fri Feb 25 14:49:40 2005 +0000
     2.3 @@ -1956,6 +1956,9 @@ int do_update_va_mapping(unsigned long v
     2.4               * page was not shadowed, or that the L2 entry has not yet been
     2.5               * updated to reflect the shadow.
     2.6               */
     2.7 +            if ( shadow_mode_external(current->domain) )
     2.8 +                BUG(); // can't use linear_l2_table with external tables.
     2.9 +
    2.10              l2_pgentry_t gpde = linear_l2_table[l2_table_offset(va)];
    2.11              unsigned long gpfn = l2_pgentry_val(gpde) >> PAGE_SHIFT;
    2.12  
    2.13 @@ -2380,6 +2383,9 @@ int ptwr_do_page_fault(unsigned long add
    2.14       * Attempt to read the PTE that maps the VA being accessed. By checking for
    2.15       * PDE validity in the L2 we avoid many expensive fixups in __get_user().
    2.16       */
    2.17 +    if ( shadow_mode_external(current->domain) )
    2.18 +        BUG(); // can't use linear_l2_table with external tables.
    2.19 +
    2.20      if ( !(l2_pgentry_val(linear_l2_table[addr>>L2_PAGETABLE_SHIFT]) &
    2.21             _PAGE_PRESENT) ||
    2.22           __get_user(pte, (unsigned long *)
    2.23 @@ -2416,6 +2422,9 @@ int ptwr_do_page_fault(unsigned long add
    2.24       * Is the L1 p.t. mapped into the current address space? If so we call it
    2.25       * an ACTIVE p.t., otherwise it is INACTIVE.
    2.26       */
    2.27 +    if ( shadow_mode_external(current->domain) )
    2.28 +        BUG(); // can't use linear_l2_table with external tables.
    2.29 +
    2.30      pl2e = &linear_l2_table[l2_idx];
    2.31      l2e  = l2_pgentry_val(*pl2e);
    2.32      which = PTWR_PT_INACTIVE;
     3.1 --- a/xen/arch/x86/shadow.c	Fri Feb 25 01:19:08 2005 +0000
     3.2 +++ b/xen/arch/x86/shadow.c	Fri Feb 25 14:49:40 2005 +0000
     3.3 @@ -110,6 +110,10 @@ static inline int clear_shadow_page(
     3.4      int              restart = 0;
     3.5      struct pfn_info *spage = &frame_table[x->smfn_and_flags & PSH_pfn_mask];
     3.6  
     3.7 +    // We don't clear hl2_table's here.  At least not yet.
     3.8 +    if ( x->pfn & PSH_hl2 )
     3.9 +        return 0;
    3.10 +
    3.11      switch ( spage->u.inuse.type_info & PGT_type_mask )
    3.12      {
    3.13          /* We clear L2 pages by zeroing the guest entries. */
    3.14 @@ -485,7 +489,7 @@ unsigned long shadow_l2_table(
    3.15      spfn_info->u.inuse.type_info = PGT_l2_page_table;
    3.16      perfc_incr(shadow_l2_pages);
    3.17  
    3.18 -    spfn = spfn_info - frame_table;
    3.19 +    spfn = page_to_pfn(spfn_info);
    3.20    /* Mark pfn as being shadowed; update field to point at shadow. */
    3.21      set_shadow_status(d, gpfn, spfn | PSH_shadowed);
    3.22   
    3.23 @@ -769,6 +773,41 @@ void shadow_l2_normal_pt_update(unsigned
    3.24      unmap_domain_mem(spl2e);
    3.25  }
    3.26  
    3.27 +unsigned long mk_hl2_table(struct exec_domain *ed)
    3.28 +{
    3.29 +    struct domain *d = ed->domain;
    3.30 +    unsigned long gmfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
    3.31 +    unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
    3.32 +    unsigned long hl2mfn, status;
    3.33 +    struct pfn_info *hl2_info;
    3.34 +    l1_pgentry_t *hl2;
    3.35 +
    3.36 +    perfc_incr(hl2_table_pages);
    3.37 +
    3.38 +    if ( (hl2_info = alloc_shadow_page(d)) == NULL )
    3.39 +        BUG(); /* XXX Deal gracefully with failure. */
    3.40 +
    3.41 +    hl2_info->u.inuse.type_info = PGT_l1_page_table;
    3.42 +
    3.43 +    hl2mfn = page_to_pfn(hl2_info);
    3.44 +    status = hl2mfn | PSH_hl2;
    3.45 +    set_shadow_status(ed->domain, gpfn | PSH_hl2, status);
    3.46 +
    3.47 +    // need to optimize this...
    3.48 +    hl2 = map_domain_mem(hl2mfn << PAGE_SHIFT);
    3.49 +    memset(hl2, 0, PAGE_SIZE);
    3.50 +    unmap_domain_mem(hl2);
    3.51 +
    3.52 +    // install this hl2 as the linear_pg_table
    3.53 +    if ( shadow_mode_external(d) )
    3.54 +        ed->arch.monitor_vtable[l2_table_offset(LINEAR_PT_VIRT_START)] =
    3.55 +            mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    3.56 +    else
    3.57 +        ed->arch.shadow_vtable[l2_table_offset(LINEAR_PT_VIRT_START)] =
    3.58 +            mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    3.59 +
    3.60 +    return status;
    3.61 +}
    3.62  
    3.63  
    3.64  
     4.1 --- a/xen/arch/x86/vmx.c	Fri Feb 25 01:19:08 2005 +0000
     4.2 +++ b/xen/arch/x86/vmx.c	Fri Feb 25 14:49:40 2005 +0000
     4.3 @@ -138,10 +138,17 @@ static int vmx_do_page_fault(unsigned lo
     4.4      if (mmio_space(gpa))
     4.5          handle_mmio(va, gpa);
     4.6  
     4.7 -    if ((result = shadow_fault(va, regs)))
     4.8 -        return result;
     4.9 -    
    4.10 -    return 0;       /* failed to resolve, i.e raise #PG */
    4.11 +    result = shadow_fault(va, regs);
    4.12 +
    4.13 +#if 0
    4.14 +    if ( !result )
    4.15 +    {
    4.16 +        __vmread(GUEST_EIP, &eip);
    4.17 +        printk("vmx pgfault to guest va=%p eip=%p\n", va, eip);
    4.18 +    }
    4.19 +#endif
    4.20 +
    4.21 +    return result;
    4.22  }
    4.23  
    4.24  static void vmx_do_general_protection_fault(struct xen_regs *regs) 
    4.25 @@ -274,19 +281,11 @@ static void vmx_vmexit_do_invlpg(unsigne
    4.26       * copying from guest
    4.27       */
    4.28      shadow_invlpg(ed, va);
    4.29 -    index = (va >> L2_PAGETABLE_SHIFT);
    4.30 +    index = l2_table_offset(va);
    4.31      ed->arch.hl2_vtable[index] = 
    4.32          mk_l2_pgentry(0); /* invalidate pgd cache */
    4.33  }
    4.34  
    4.35 -static inline void hl2_table_invalidate(struct exec_domain *ed)
    4.36 -{
    4.37 -    /*
    4.38 -     * Need to optimize this
    4.39 -     */
    4.40 -    memset(ed->arch.hl2_vtable, 0, PAGE_SIZE);
    4.41 -}
    4.42 -
    4.43  static void vmx_io_instruction(struct xen_regs *regs, 
    4.44                     unsigned long exit_qualification, unsigned long inst_len) 
    4.45  {
     5.1 --- a/xen/arch/x86/x86_32/domain_page.c	Fri Feb 25 01:19:08 2005 +0000
     5.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Fri Feb 25 14:49:40 2005 +0000
     5.3 @@ -44,6 +44,9 @@ void *map_domain_mem(unsigned long pa)
     5.4      unsigned long va;
     5.5      unsigned int idx, cpu = smp_processor_id();
     5.6      unsigned long *cache = mapcache;
     5.7 +#ifndef NDEBUG
     5.8 +    unsigned flush_count = 0;
     5.9 +#endif
    5.10  
    5.11      ASSERT(!in_irq());
    5.12      perfc_incrc(map_domain_mem_count);
    5.13 @@ -66,6 +69,13 @@ void *map_domain_mem(unsigned long pa)
    5.14              perfc_incrc(domain_page_tlb_flush);
    5.15              local_flush_tlb();
    5.16              shadow_epoch[cpu] = ++epoch;
    5.17 +#ifndef NDEBUG
    5.18 +            if ( unlikely(flush_count++) )
    5.19 +            {
    5.20 +                // we've run out of map cache entries...
    5.21 +                BUG();
    5.22 +            }
    5.23 +#endif
    5.24          }
    5.25      }
    5.26      while ( cache[idx] != 0 );
     6.1 --- a/xen/arch/x86/x86_32/entry.S	Fri Feb 25 01:19:08 2005 +0000
     6.2 +++ b/xen/arch/x86/x86_32/entry.S	Fri Feb 25 14:49:40 2005 +0000
     6.3 @@ -99,6 +99,7 @@
     6.4          pushl $VMX_MONITOR_EFLAGS; \
     6.5          popf; \
     6.6          subl $(NR_SKIPPED_REGS*4), %esp; \
     6.7 +        movl $0, 0xc(%esp); /* eflags==0 identifies xen_regs as VMX guest */ \
     6.8          pushl %eax; \
     6.9          pushl %ebp; \
    6.10          pushl %edi; \
     7.1 --- a/xen/arch/x86/x86_32/traps.c	Fri Feb 25 01:19:08 2005 +0000
     7.2 +++ b/xen/arch/x86/x86_32/traps.c	Fri Feb 25 14:49:40 2005 +0000
     7.3 @@ -8,6 +8,10 @@
     7.4  #include <xen/irq.h>
     7.5  #include <asm/flushtlb.h>
     7.6  
     7.7 +#ifdef CONFIG_VMX
     7.8 +#include <asm/vmx.h>
     7.9 +#endif
    7.10 +
    7.11  /* All CPUs have their own IDT to allow set_fast_trap(). */
    7.12  idt_entry_t *idt_tables[NR_CPUS] = { 0 };
    7.13  
    7.14 @@ -87,37 +91,63 @@ void show_stack(unsigned long *esp)
    7.15  
    7.16  void show_registers(struct xen_regs *regs)
    7.17  {
    7.18 -    unsigned long esp;
    7.19 -    unsigned short ss, ds, es, fs, gs;
    7.20 +    unsigned long ss, ds, es, fs, gs, cs;
    7.21 +    unsigned long eip, esp, eflags;
    7.22 +    const char *context;
    7.23  
    7.24 -    if ( GUEST_MODE(regs) )
    7.25 +#ifdef CONFIG_VMX
    7.26 +    if ( current->arch.arch_vmx.flags && (regs->eflags == 0) )
    7.27      {
    7.28 -        esp = regs->esp;
    7.29 -        ss  = regs->ss & 0xffff;
    7.30 -        ds  = regs->ds & 0xffff;
    7.31 -        es  = regs->es & 0xffff;
    7.32 -        fs  = regs->fs & 0xffff;
    7.33 -        gs  = regs->gs & 0xffff;
    7.34 +        __vmread(GUEST_EIP, &eip);
    7.35 +        __vmread(GUEST_ESP, &esp);
    7.36 +        __vmread(GUEST_EFLAGS, &eflags);
    7.37 +        __vmread(GUEST_SS_SELECTOR, &ss);
    7.38 +        __vmread(GUEST_DS_SELECTOR, &ds);
    7.39 +        __vmread(GUEST_ES_SELECTOR, &es);
    7.40 +        __vmread(GUEST_FS_SELECTOR, &fs);
    7.41 +        __vmread(GUEST_GS_SELECTOR, &gs);
    7.42 +        __vmread(GUEST_CS_SELECTOR, &cs);
    7.43 +        context = "vmx guest";
    7.44      }
    7.45      else
    7.46 +#endif
    7.47      {
    7.48 -        esp = (unsigned long)(&regs->esp);
    7.49 -        ss  = __HYPERVISOR_DS;
    7.50 -        ds  = __HYPERVISOR_DS;
    7.51 -        es  = __HYPERVISOR_DS;
    7.52 -        fs  = __HYPERVISOR_DS;
    7.53 -        gs  = __HYPERVISOR_DS;
    7.54 +        eip = regs->eip;
    7.55 +        eflags = regs->eflags;
    7.56 +
    7.57 +        if ( GUEST_MODE(regs) )
    7.58 +        {
    7.59 +            esp = regs->esp;
    7.60 +            ss  = regs->ss & 0xffff;
    7.61 +            ds  = regs->ds & 0xffff;
    7.62 +            es  = regs->es & 0xffff;
    7.63 +            fs  = regs->fs & 0xffff;
    7.64 +            gs  = regs->gs & 0xffff;
    7.65 +            cs  = regs->cs & 0xffff;
    7.66 +            context = "guest";
    7.67 +        }
    7.68 +        else
    7.69 +        {
    7.70 +            esp = (unsigned long)(&regs->esp);
    7.71 +            ss  = __HYPERVISOR_DS;
    7.72 +            ds  = __HYPERVISOR_DS;
    7.73 +            es  = __HYPERVISOR_DS;
    7.74 +            fs  = __HYPERVISOR_DS;
    7.75 +            gs  = __HYPERVISOR_DS;
    7.76 +            cs  = __HYPERVISOR_CS;
    7.77 +            
    7.78 +            context = "hypervisor";
    7.79 +        }
    7.80      }
    7.81  
    7.82 -    printk("CPU:    %d\nEIP:    %04lx:[<%p>]      \nEFLAGS: %p\n",
    7.83 -           smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags);
    7.84 +    printk("CPU:    %d\nEIP:    %04lx:[<%p>]      \nEFLAGS: %p   CONTEXT: %s\n",
    7.85 +           smp_processor_id(), 0xffff & regs->cs, eip, eflags, context);
    7.86      printk("eax: %p   ebx: %p   ecx: %p   edx: %p\n",
    7.87             regs->eax, regs->ebx, regs->ecx, regs->edx);
    7.88      printk("esi: %p   edi: %p   ebp: %p   esp: %p\n",
    7.89             regs->esi, regs->edi, regs->ebp, esp);
    7.90 -    printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x\n",
    7.91 -           ds, es, fs, gs, ss);
    7.92 -    printk("cr3: %08lx\n", read_cr3());
    7.93 +    printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x   cs: %04x\n",
    7.94 +           ds, es, fs, gs, ss, cs);
    7.95  
    7.96      show_stack((unsigned long *)&regs->esp);
    7.97  } 
     8.1 --- a/xen/include/asm-x86/shadow.h	Fri Feb 25 01:19:08 2005 +0000
     8.2 +++ b/xen/include/asm-x86/shadow.h	Fri Feb 25 14:49:40 2005 +0000
     8.3 @@ -8,8 +8,9 @@
     8.4  #include <asm/processor.h>
     8.5  #include <asm/domain_page.h>
     8.6  
     8.7 -/* Shadow PT flag bits in pfn_info */
     8.8 +/* Shadow PT flag bits in shadow_status */
     8.9  #define PSH_shadowed    (1<<31) /* page has a shadow. PFN points to shadow */
    8.10 +#define PSH_hl2         (1<<30) /* page is an hl2 */
    8.11  #define PSH_pfn_mask    ((1<<21)-1)
    8.12  
    8.13  /* Shadow PT operation mode : shadow-mode variable in arch_domain. */
    8.14 @@ -43,6 +44,7 @@ extern void unshadow_table(unsigned long
    8.15  extern int shadow_mode_enable(struct domain *p, unsigned int mode);
    8.16  extern void free_shadow_state(struct domain *d);
    8.17  extern void shadow_invlpg(struct exec_domain *, unsigned long);
    8.18 +extern unsigned long mk_hl2_table(struct exec_domain *ed);
    8.19  
    8.20  extern void vmx_shadow_clear_state(struct domain *);
    8.21  
    8.22 @@ -67,7 +69,7 @@ extern unsigned long shadow_l2_table(
    8.23      struct domain *d, unsigned long gmfn);
    8.24    
    8.25  static inline void shadow_invalidate(struct exec_domain *ed) {
    8.26 -    if ( !shadow_mode_translate(ed->domain))
    8.27 +    if ( !ed->arch.arch_vmx.flags )
    8.28          BUG();
    8.29      memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
    8.30  }
    8.31 @@ -117,29 +119,27 @@ struct shadow_status {
    8.32  static inline void __shadow_get_l2e(
    8.33      struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
    8.34  {
    8.35 -    if ( likely(shadow_mode_enabled(ed->domain)) ) {
    8.36 -        if ( shadow_mode_translate(ed->domain) )
    8.37 -            *sl2e = l2_pgentry_val(
    8.38 -                ed->arch.shadow_vtable[l2_table_offset(va)]);       
    8.39 -        else 
    8.40 -            *sl2e = l2_pgentry_val(
    8.41 -                shadow_linear_l2_table[l2_table_offset(va)]);
    8.42 -    }
    8.43 -    else
    8.44 +    if ( !likely(shadow_mode_enabled(ed->domain)) )
    8.45          BUG();
    8.46 +
    8.47 +    if ( shadow_mode_translate(ed->domain) )
    8.48 +        *sl2e = l2_pgentry_val(
    8.49 +            ed->arch.shadow_vtable[l2_table_offset(va)]);       
    8.50 +    else 
    8.51 +        *sl2e = l2_pgentry_val(
    8.52 +            shadow_linear_l2_table[l2_table_offset(va)]);
    8.53  }
    8.54  
    8.55  static inline void __shadow_set_l2e(
    8.56      struct exec_domain *ed, unsigned long va, unsigned long value)
    8.57  {
    8.58 -    if ( likely(shadow_mode_enabled(ed->domain)) ) {
    8.59 -        if ( shadow_mode_translate(ed->domain) ) 
    8.60 -            ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
    8.61 -        else 
    8.62 -            shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
    8.63 -    }
    8.64 -    else
    8.65 +    if ( !likely(shadow_mode_enabled(ed->domain)) )
    8.66          BUG();
    8.67 +
    8.68 +    if ( shadow_mode_translate(ed->domain) ) 
    8.69 +        ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
    8.70 +    else 
    8.71 +        shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
    8.72  }
    8.73  
    8.74  static inline void __guest_get_l2e(
    8.75 @@ -347,8 +347,14 @@ static void shadow_audit(struct domain *
    8.76      for ( j = 0; j < shadow_ht_buckets; j++ )
    8.77      {
    8.78          a = &d->arch.shadow_ht[j];        
    8.79 -        if ( a->pfn ) { live++; ASSERT(a->smfn_and_flags & PSH_pfn_mask); }
    8.80 -        ASSERT(a->pfn < 0x00100000UL);
    8.81 +        if ( a->pfn )
    8.82 +        {
    8.83 +            live++;
    8.84 +            ASSERT(a->smfn_and_flags & PSH_pfn_mask);
    8.85 +        }
    8.86 +        else
    8.87 +            ASSERT(!a->next);
    8.88 +        ASSERT( (a->pfn & ~PSH_hl2) < 0x00100000UL);
    8.89          a = a->next;
    8.90          while ( a && (live < 9999) )
    8.91          { 
    8.92 @@ -359,7 +365,7 @@ static void shadow_audit(struct domain *
    8.93                         live, a->pfn, a->smfn_and_flags, a->next);
    8.94                  BUG();
    8.95              }
    8.96 -            ASSERT(a->pfn < 0x00100000UL);
    8.97 +            ASSERT( (a->pfn & ~PSH_hl2) < 0x00100000UL);
    8.98              ASSERT(a->smfn_and_flags & PSH_pfn_mask);
    8.99              a = a->next; 
   8.100          }
   8.101 @@ -369,15 +375,22 @@ static void shadow_audit(struct domain *
   8.102      for ( a = d->arch.shadow_ht_free; a != NULL; a = a->next )
   8.103          free++; 
   8.104  
   8.105 -    if ( print)
   8.106 +    if ( print )
   8.107          printk("Xlive=%d free=%d\n",live,free);
   8.108  
   8.109 -    abs = (perfc_value(shadow_l1_pages) + perfc_value(shadow_l2_pages)) - live;
   8.110 +    // BUG: this only works if there's only a single domain which is
   8.111 +    //      using shadow tables.
   8.112 +    //
   8.113 +    abs = ( perfc_value(shadow_l1_pages) +
   8.114 +            perfc_value(shadow_l2_pages) +
   8.115 +            perfc_value(hl2_table_pages) ) - live;
   8.116  #ifdef PERF_COUNTERS
   8.117      if ( (abs < -1) || (abs > 1) )
   8.118      {
   8.119 -        printk("live=%d free=%d l1=%d l2=%d\n",live,free,
   8.120 -               perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages) );
   8.121 +        printk("live=%d free=%d l1=%d l2=%d hl2=%d\n", live, free,
   8.122 +               perfc_value(shadow_l1_pages),
   8.123 +               perfc_value(shadow_l2_pages),
   8.124 +               perfc_value(hl2_table_pages));
   8.125          BUG();
   8.126      }
   8.127  #endif
   8.128 @@ -405,6 +418,8 @@ static inline unsigned long __shadow_sta
   8.129  {
   8.130      struct shadow_status *p, *x, *head;
   8.131  
   8.132 +    ASSERT(spin_is_locked(&d->arch.shadow_lock));
   8.133 +
   8.134      x = head = hash_bucket(d, gpfn);
   8.135      p = NULL;
   8.136  
   8.137 @@ -570,7 +585,7 @@ static inline void set_shadow_status(
   8.138  
   8.139      ASSERT(spin_is_locked(&d->arch.shadow_lock));
   8.140      ASSERT(gpfn != 0);
   8.141 -    ASSERT(s & PSH_shadowed);
   8.142 +    ASSERT(s & (PSH_shadowed | PSH_hl2));
   8.143  
   8.144      x = head = hash_bucket(d, gpfn);
   8.145     
   8.146 @@ -658,7 +673,7 @@ static inline unsigned long gva_to_gpte(
   8.147      if (!(gpde & _PAGE_PRESENT))
   8.148          return 0;
   8.149  
   8.150 -    index = (gva >> L2_PAGETABLE_SHIFT);
   8.151 +    index = l2_table_offset(gva);
   8.152  
   8.153      if (!l2_pgentry_val(ed->arch.hl2_vtable[index])) {
   8.154          pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
   8.155 @@ -684,6 +699,14 @@ static inline unsigned long gva_to_gpa(u
   8.156      return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK); 
   8.157  }
   8.158  
   8.159 +static inline void hl2_table_invalidate(struct exec_domain *ed)
   8.160 +{
   8.161 +    /*
   8.162 +     * Need to optimize this
   8.163 +     */
   8.164 +    memset(ed->arch.hl2_vtable, 0, PAGE_SIZE);
   8.165 +}
   8.166 +
   8.167  static inline void __update_pagetables(struct exec_domain *ed)
   8.168  {
   8.169      struct domain *d = ed->domain;
   8.170 @@ -698,63 +721,83 @@ static inline void __update_pagetables(s
   8.171  
   8.172      ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
   8.173  
   8.174 -    if  ( shadow_mode_translate(ed->domain) )
   8.175 +    if ( shadow_mode_translate(d) )
   8.176      {
   8.177 +        l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
   8.178          l2_pgentry_t *gpl2e, *spl2e;
   8.179 +        unsigned long hl2_status, hl2mfn, offset;
   8.180 +        int need_flush = 0;
   8.181  
   8.182          if ( ed->arch.guest_vtable )
   8.183              unmap_domain_mem(ed->arch.guest_vtable);
   8.184          if ( ed->arch.shadow_vtable )
   8.185              unmap_domain_mem(ed->arch.shadow_vtable);
   8.186 +        if ( ed->arch.hl2_vtable )
   8.187 +            unmap_domain_mem(ed->arch.hl2_vtable);
   8.188  
   8.189          gpl2e = ed->arch.guest_vtable =
   8.190              map_domain_mem(pagetable_val(ed->arch.guest_table));
   8.191          spl2e = ed->arch.shadow_vtable =
   8.192              map_domain_mem(pagetable_val(ed->arch.shadow_table));
   8.193  
   8.194 -        if ( shadow_mode_external(ed->domain ) )
   8.195 +        hl2_status = __shadow_status(d, gpfn | PSH_hl2);
   8.196 +        if ( unlikely(!(hl2_status & PSH_hl2)) )
   8.197 +            hl2_status = mk_hl2_table(ed);
   8.198 +
   8.199 +        hl2mfn = hl2_status & PSH_pfn_mask;
   8.200 +        ed->arch.hl2_vtable = map_domain_mem(hl2mfn << PAGE_SHIFT);
   8.201 +
   8.202 +        offset = l2_table_offset(LINEAR_PT_VIRT_START);
   8.203 +        if ( hl2mfn != (l2_pgentry_val(mpl2e[offset]) >> PAGE_SHIFT) )
   8.204          {
   8.205 -            l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
   8.206 -            unsigned long old_smfn;
   8.207 -            unsigned sh_l2offset = l2_table_offset(SH_LINEAR_PT_VIRT_START);
   8.208 -            
   8.209 -            old_smfn = l2_pgentry_val(mpl2e[sh_l2offset]) >> PAGE_SHIFT;
   8.210 -            if ( old_smfn != smfn )
   8.211 +            mpl2e[offset] =
   8.212 +                mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   8.213 +            need_flush = 1;
   8.214 +        }
   8.215 +
   8.216 +        if ( shadow_mode_external(d ) )
   8.217 +        {
   8.218 +            offset = l2_table_offset(SH_LINEAR_PT_VIRT_START);
   8.219 +            if ( smfn != (l2_pgentry_val(mpl2e[offset]) >> PAGE_SHIFT) )
   8.220              {
   8.221 -                mpl2e[sh_l2offset] =
   8.222 +                mpl2e[offset] =
   8.223                      mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   8.224 -                local_flush_tlb();
   8.225 +                need_flush = 1;
   8.226              }
   8.227          }
   8.228  
   8.229          if ( ed->arch.arch_vmx.flags )
   8.230          {
   8.231              // Why is VMX mode doing this?
   8.232 -            memset(spl2e, 0, L2_PAGETABLE_ENTRIES * sizeof(l2_pgentry_t));
   8.233 +            shadow_invalidate(ed);
   8.234 +            hl2_table_invalidate(ed);
   8.235          }
   8.236 +
   8.237 +        if ( need_flush )
   8.238 +            local_flush_tlb();
   8.239      }
   8.240  }
   8.241  
   8.242  static inline void update_pagetables(struct exec_domain *ed)
   8.243  {
   8.244 -     if ( unlikely(shadow_mode_enabled(ed->domain)) )
   8.245 -     {
   8.246 -         shadow_lock(ed->domain);
   8.247 -         __update_pagetables(ed);
   8.248 -         shadow_unlock(ed->domain);
   8.249 -     }
   8.250 -     if ( !shadow_mode_external(ed->domain) )
   8.251 -     {
   8.252 +    if ( unlikely(shadow_mode_enabled(ed->domain)) )
   8.253 +    {
   8.254 +        shadow_lock(ed->domain);
   8.255 +        __update_pagetables(ed);
   8.256 +        shadow_unlock(ed->domain);
   8.257 +    }
   8.258 +    if ( !shadow_mode_external(ed->domain) )
   8.259 +    {
   8.260  #ifdef __x86_64__
   8.261 -         if ( !(ed->arch.flags & TF_kernel_mode) )
   8.262 -             ed->arch.monitor_table = ed->arch.guest_table_user;
   8.263 -         else
   8.264 +        if ( !(ed->arch.flags & TF_kernel_mode) )
   8.265 +            ed->arch.monitor_table = ed->arch.guest_table_user;
   8.266 +        else
   8.267  #endif
   8.268 -         if ( shadow_mode_enabled(ed->domain) )
   8.269 -             ed->arch.monitor_table = ed->arch.shadow_table;
   8.270 -         else
   8.271 -             ed->arch.monitor_table = ed->arch.guest_table;
   8.272 -     }
   8.273 +        if ( shadow_mode_enabled(ed->domain) )
   8.274 +            ed->arch.monitor_table = ed->arch.shadow_table;
   8.275 +        else
   8.276 +            ed->arch.monitor_table = ed->arch.guest_table;
   8.277 +    }
   8.278  }
   8.279  
   8.280  #if SHADOW_DEBUG
     9.1 --- a/xen/include/xen/perfc_defn.h	Fri Feb 25 01:19:08 2005 +0000
     9.2 +++ b/xen/include/xen/perfc_defn.h	Fri Feb 25 14:49:40 2005 +0000
     9.3 @@ -31,6 +31,7 @@ PERFCOUNTER_CPU( shadow_update_va_fail2,
     9.4  /* STATUS counters do not reset when 'P' is hit */
     9.5  PERFSTATUS( shadow_l2_pages, "current # shadow L2 pages" )
     9.6  PERFSTATUS( shadow_l1_pages, "current # shadow L1 pages" )
     9.7 +PERFSTATUS( hl2_table_pages, "current # hl2 pages" )
     9.8  
     9.9  PERFCOUNTER_CPU( check_pagetable, "calls to check_pagetable" )
    9.10  PERFCOUNTER_CPU( check_all_pagetables, "calls to check_all_pagetables" )