ia64/xen-unstable

changeset 4278:0c149b605692

bitkeeper revision 1.1258 (42419f7b3Pv5vbds6QtH6mlFaDtzEg)

Remember min and max indices for valid entries in shadow L1 tables.
Use these to minimize the revalidation effort.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Wed Mar 23 16:55:23 2005 +0000 (2005-03-23)
parents d1854f84c551
children 3fe0f99cb576
files xen/arch/x86/shadow.c xen/include/asm-x86/shadow.h
line diff
     1.1 --- a/xen/arch/x86/shadow.c	Wed Mar 23 12:53:05 2005 +0000
     1.2 +++ b/xen/arch/x86/shadow.c	Wed Mar 23 16:55:23 2005 +0000
     1.3 @@ -174,7 +174,8 @@ shadow_demote(struct domain *d, unsigned
     1.4   * don't want to let those disappear just because no CR3 is currently pointing
     1.5   * at it.
     1.6   *
     1.7 - * tlbflush_timestamp holds a pickled pointer to the domain.
     1.8 + * tlbflush_timestamp holds a min & max index of valid page table entries
     1.9 + * within the shadow page.
    1.10   */
    1.11  
    1.12  static inline unsigned long
    1.13 @@ -204,7 +205,7 @@ alloc_shadow_page(struct domain *d,
    1.14      ASSERT( (gmfn & ~PGT_mfn_mask) == 0 );
    1.15      page->u.inuse.type_info = psh_type | gmfn;
    1.16      page->count_info = 0;
    1.17 -    page->tlbflush_timestamp = pickle_domptr(d);
    1.18 +    page->tlbflush_timestamp = 0;
    1.19  
    1.20      switch ( psh_type )
    1.21      {
    1.22 @@ -325,8 +326,8 @@ free_shadow_l2_table(struct domain *d, u
    1.23  void free_shadow_page(unsigned long smfn)
    1.24  {
    1.25      struct pfn_info *page = &frame_table[smfn];
    1.26 -    struct domain *d = unpickle_domptr(page->tlbflush_timestamp);
    1.27      unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
    1.28 +    struct domain *d = page_get_owner(pfn_to_page(gmfn));
    1.29      unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
    1.30      unsigned long type = page->u.inuse.type_info & PGT_type_mask;
    1.31  
    1.32 @@ -1431,25 +1432,34 @@ void shadow_map_l1_into_current_l2(unsig
    1.33  
    1.34          unsigned long sl1e;
    1.35          int index = l1_table_offset(va);
    1.36 -
    1.37 -        l1pte_propagate_from_guest(d, gpl1e[index], &sl1e);
    1.38 -        if ( (sl1e & _PAGE_PRESENT) &&
    1.39 -             !shadow_get_page_from_l1e(mk_l1_pgentry(sl1e), d) )
    1.40 -            sl1e = 0;
    1.41 -        spl1e[index] = sl1e;
    1.42 +        int min = 1, max = 0;
    1.43  
    1.44          for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    1.45          {
    1.46 -            if ( i == index )
    1.47 -                continue;
    1.48              l1pte_propagate_from_guest(d, gpl1e[i], &sl1e);
    1.49              if ( (sl1e & _PAGE_PRESENT) &&
    1.50                   !shadow_get_page_from_l1e(mk_l1_pgentry(sl1e), d) )
    1.51                  sl1e = 0;
    1.52              if ( sl1e == 0 )
    1.53 +            {
    1.54 +                // First copy entries from 0 until first invalid.
    1.55 +                // Then copy entries from index until first invalid.
    1.56 +                //
    1.57 +                if ( i < index ) {
    1.58 +                    i = index - 1;
    1.59 +                    continue;
    1.60 +                }
    1.61                  break;
    1.62 +            }
    1.63              spl1e[i] = sl1e;
    1.64 +            if ( unlikely(i < min) )
    1.65 +                min = i;
    1.66 +            if ( likely(i > max) )
    1.67 +                max = i;
    1.68          }
    1.69 +
    1.70 +        frame_table[sl1mfn].tlbflush_timestamp =
    1.71 +            SHADOW_ENCODE_MIN_MAX(min, max);
    1.72      }
    1.73  }
    1.74  
    1.75 @@ -1996,6 +2006,8 @@ static int resync_all(struct domain *d, 
    1.76      unsigned long *guest, *shadow, *snapshot;
    1.77      int need_flush = 0, external = shadow_mode_external(d);
    1.78      int unshadow;
    1.79 +    unsigned long min_max;
    1.80 +    int min, max;
    1.81  
    1.82      ASSERT(spin_is_locked(&d->arch.shadow_lock));
    1.83  
    1.84 @@ -2020,7 +2032,10 @@ static int resync_all(struct domain *d, 
    1.85  
    1.86          switch ( stype ) {
    1.87          case PGT_l1_shadow:
    1.88 -            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    1.89 +            min_max = pfn_to_page(smfn)->tlbflush_timestamp;
    1.90 +            min = SHADOW_MIN(min_max);
    1.91 +            max = SHADOW_MAX(min_max);
    1.92 +            for ( i = min; i <= max; i++ )
    1.93              {
    1.94                  unsigned new_pte = guest[i];
    1.95                  if ( new_pte != snapshot[i] )
     2.1 --- a/xen/include/asm-x86/shadow.h	Wed Mar 23 12:53:05 2005 +0000
     2.2 +++ b/xen/include/asm-x86/shadow.h	Wed Mar 23 16:55:23 2005 +0000
     2.3 @@ -52,6 +52,10 @@
     2.4  #define shadow_lock(_d)      do { ASSERT(!spin_is_locked(&(_d)->arch.shadow_lock)); spin_lock(&(_d)->arch.shadow_lock); } while (0)
     2.5  #define shadow_unlock(_d)    spin_unlock(&(_d)->arch.shadow_lock)
     2.6  
     2.7 +#define SHADOW_ENCODE_MIN_MAX(_min, _max) (((L1_PAGETABLE_ENTRIES - (_max)) << 16) | (_min))
     2.8 +#define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
     2.9 +#define SHADOW_MAX(_encoded) (L1_PAGETABLE_ENTRIES - ((_encoded) >> 16))
    2.10 +
    2.11  extern void shadow_mode_init(void);
    2.12  extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
    2.13  extern int shadow_fault(unsigned long va, struct xen_regs *regs);
    2.14 @@ -187,13 +191,9 @@ extern unsigned long gpfn_to_mfn_safe(
    2.15  /************************************************************************/
    2.16  
    2.17  struct shadow_status {
    2.18 +    struct shadow_status *next;   /* Pull-to-front list per hash bucket. */
    2.19      unsigned long gpfn_and_flags; /* Guest pfn plus flags. */
    2.20 -    struct shadow_status *next;   /* Pull-to-front list per hash bucket. */
    2.21      unsigned long smfn;           /* Shadow mfn.           */
    2.22 -
    2.23 -    // Pull-to-front list of L1s/L2s from which we check when removing
    2.24 -    // write access to a page.
    2.25 -    //struct list_head next_to_check;
    2.26  };
    2.27  
    2.28  #define shadow_ht_extra_size 128
    2.29 @@ -1290,6 +1290,29 @@ static inline void set_shadow_status(
    2.30  
    2.31  /************************************************************************/
    2.32  
    2.33 +void static inline
    2.34 +shadow_update_min_max(unsigned long smfn, int index)
    2.35 +{
    2.36 +    struct pfn_info *sl1page = pfn_to_page(smfn);
    2.37 +    unsigned long min_max = sl1page->tlbflush_timestamp;
    2.38 +    int min = SHADOW_MIN(min_max);
    2.39 +    int max = SHADOW_MAX(min_max);
    2.40 +    int update = 0;
    2.41 +
    2.42 +    if ( index < min )
    2.43 +    {
    2.44 +        min = index;
    2.45 +        update = 1;
    2.46 +    }
    2.47 +    if ( index > max )
    2.48 +    {
    2.49 +        max = index;
    2.50 +        update = 1;
    2.51 +    }
    2.52 +    if ( update )
    2.53 +        sl1page->tlbflush_timestamp = SHADOW_ENCODE_MIN_MAX(min, max);
    2.54 +}
    2.55 +
    2.56  extern void shadow_map_l1_into_current_l2(unsigned long va);
    2.57  
    2.58  void static inline
    2.59 @@ -1357,6 +1380,8 @@ shadow_set_l1e(unsigned long va, unsigne
    2.60      }
    2.61  
    2.62      shadow_linear_pg_table[l1_linear_offset(va)] = mk_l1_pgentry(new_spte);
    2.63 +
    2.64 +    shadow_update_min_max(sl2e >> PAGE_SHIFT, l1_table_offset(va));
    2.65  }
    2.66  
    2.67  /************************************************************************/