ia64/xen-unstable

changeset 4291:b40fc0992e25

bitkeeper revision 1.1260 (4242b380EoY-OHIALnp_JwJHYfsozA)

Keep a list of pre-zero'ed L1 shadow pages.
Avoid the cost of zero'ing them upon allocation.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Thu Mar 24 12:33:04 2005 +0000 (2005-03-24)
parents 3fe0f99cb576
children 13032fd25c06
files xen/arch/x86/domain.c xen/arch/x86/shadow.c xen/include/asm-x86/domain.h xen/include/asm-x86/shadow.h xen/include/xen/perfc_defn.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Wed Mar 23 17:07:19 2005 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Thu Mar 24 12:33:04 2005 +0000
     1.3 @@ -262,7 +262,8 @@ void arch_do_createdomain(struct exec_do
     1.4              mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
     1.5  #endif
     1.6  
     1.7 -        shadow_lock_init(d);        
     1.8 +        shadow_lock_init(d);
     1.9 +        INIT_LIST_HEAD(&d->arch.free_shadow_frames);
    1.10      }
    1.11  }
    1.12  
     2.1 --- a/xen/arch/x86/shadow.c	Wed Mar 23 17:07:19 2005 +0000
     2.2 +++ b/xen/arch/x86/shadow.c	Thu Mar 24 12:33:04 2005 +0000
     2.3 @@ -187,7 +187,29 @@ alloc_shadow_page(struct domain *d,
     2.4      unsigned long smfn;
     2.5      int pin = 0;
     2.6  
     2.7 -    page = alloc_domheap_page(NULL);
     2.8 +    // Currently, we only keep pre-zero'ed pages around for use as L1's...
     2.9 +    // This will change.  Soon.
    2.10 +    //
    2.11 +    if ( psh_type == PGT_l1_shadow )
    2.12 +    {
    2.13 +        if ( !list_empty(&d->arch.free_shadow_frames) )
    2.14 +        {
    2.15 +            struct list_head *entry = d->arch.free_shadow_frames.next;
    2.16 +            page = list_entry(entry, struct pfn_info, list);
    2.17 +            list_del(entry);
    2.18 +            perfc_decr(free_l1_pages);
    2.19 +        }
    2.20 +        else
    2.21 +        {
    2.22 +            page = alloc_domheap_page(NULL);
    2.23 +            void *l1 = map_domain_mem(page_to_pfn(page) << PAGE_SHIFT);
    2.24 +            memset(l1, 0, PAGE_SIZE);
    2.25 +            unmap_domain_mem(l1);
    2.26 +        }
    2.27 +    }
    2.28 +    else
    2.29 +        page = alloc_domheap_page(NULL);
    2.30 +
    2.31      if ( unlikely(page == NULL) )
    2.32      {
    2.33          printk("Couldn't alloc shadow page! dom%d count=%d\n",
    2.34 @@ -271,11 +293,21 @@ free_shadow_l1_table(struct domain *d, u
    2.35  {
    2.36      l1_pgentry_t *pl1e = map_domain_mem(smfn << PAGE_SHIFT);
    2.37      int i;
    2.38 -
    2.39 -    for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    2.40 +    struct pfn_info *spage = pfn_to_page(smfn);
    2.41 +    u32 min_max = spage->tlbflush_timestamp;
    2.42 +    int min = SHADOW_MIN(min_max);
    2.43 +    int max = SHADOW_MAX(min_max);
    2.44 +
    2.45 +    for ( i = min; i <= max; i++ )
    2.46 +    {
    2.47          put_page_from_l1e(pl1e[i], d);
    2.48 +        pl1e[i] = mk_l1_pgentry(0);
    2.49 +    }
    2.50  
    2.51      unmap_domain_mem(pl1e);
    2.52 +
    2.53 +    list_add(&spage->list, &d->arch.free_shadow_frames);
    2.54 +    perfc_incr(free_l1_pages);
    2.55  }
    2.56  
    2.57  static void inline
    2.58 @@ -372,7 +404,8 @@ void free_shadow_page(unsigned long smfn
    2.59      page->tlbflush_timestamp = 0;
    2.60      page->u.free.cpu_mask = 0;
    2.61  
    2.62 -    free_domheap_page(page);
    2.63 +    if ( type != PGT_l1_shadow )
    2.64 +        free_domheap_page(page);
    2.65  }
    2.66  
    2.67  static void inline
    2.68 @@ -1428,8 +1461,6 @@ void shadow_map_l1_into_current_l2(unsig
    2.69              &(shadow_linear_pg_table[l1_linear_offset(va) &
    2.70                                       ~(L1_PAGETABLE_ENTRIES-1)]);
    2.71  
    2.72 -        memset(spl1e, 0, PAGE_SIZE);
    2.73 -
    2.74          unsigned long sl1e;
    2.75          int index = l1_table_offset(va);
    2.76          int min = 1, max = 0;
    2.77 @@ -2006,7 +2037,7 @@ static int resync_all(struct domain *d, 
    2.78      unsigned long *guest, *shadow, *snapshot;
    2.79      int need_flush = 0, external = shadow_mode_external(d);
    2.80      int unshadow;
    2.81 -    unsigned long min_max;
    2.82 +    u32 min_max;
    2.83      int min, max;
    2.84  
    2.85      ASSERT(spin_is_locked(&d->arch.shadow_lock));
     3.1 --- a/xen/include/asm-x86/domain.h	Wed Mar 23 17:07:19 2005 +0000
     3.2 +++ b/xen/include/asm-x86/domain.h	Thu Mar 24 12:33:04 2005 +0000
     3.3 @@ -50,6 +50,8 @@ struct arch_domain
     3.4      struct out_of_sync_entry *out_of_sync_extras;
     3.5      unsigned int out_of_sync_extras_count;
     3.6  
     3.7 +    struct list_head free_shadow_frames;
     3.8 +
     3.9      pagetable_t  phys_table;               /* guest 1:1 pagetable */
    3.10  
    3.11  } __cacheline_aligned;
     4.1 --- a/xen/include/asm-x86/shadow.h	Wed Mar 23 17:07:19 2005 +0000
     4.2 +++ b/xen/include/asm-x86/shadow.h	Thu Mar 24 12:33:04 2005 +0000
     4.3 @@ -1294,7 +1294,7 @@ void static inline
     4.4  shadow_update_min_max(unsigned long smfn, int index)
     4.5  {
     4.6      struct pfn_info *sl1page = pfn_to_page(smfn);
     4.7 -    unsigned long min_max = sl1page->tlbflush_timestamp;
     4.8 +    u32 min_max = sl1page->tlbflush_timestamp;
     4.9      int min = SHADOW_MIN(min_max);
    4.10      int max = SHADOW_MAX(min_max);
    4.11      int update = 0;
     5.1 --- a/xen/include/xen/perfc_defn.h	Wed Mar 23 17:07:19 2005 +0000
     5.2 +++ b/xen/include/xen/perfc_defn.h	Thu Mar 24 12:33:04 2005 +0000
     5.3 @@ -1,3 +1,16 @@
     5.4 +#define PERFC_MAX_PT_UPDATES 64
     5.5 +#define PERFC_PT_UPDATES_BUCKET_SIZE 3
     5.6 +PERFCOUNTER_ARRAY( wpt_updates, "writable pt updates", PERFC_MAX_PT_UPDATES )
     5.7 +PERFCOUNTER_ARRAY( bpt_updates, "batched pt updates", PERFC_MAX_PT_UPDATES )
     5.8 +
     5.9 +PERFCOUNTER_ARRAY( hypercalls, "hypercalls", NR_hypercalls )
    5.10 +PERFCOUNTER_ARRAY( exceptions, "exceptions", 32 )
    5.11 +
    5.12 +#define VMX_PERF_EXIT_REASON_SIZE 37
    5.13 +#define VMX_PERF_VECTOR_SIZE 0x20
    5.14 +PERFCOUNTER_ARRAY( vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE )
    5.15 +PERFCOUNTER_ARRAY( cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE )
    5.16 +
    5.17  PERFCOUNTER_CPU (seg_fixups,   "segmentation fixups" )
    5.18  
    5.19  PERFCOUNTER_CPU( irqs,         "#interrupts" )
    5.20 @@ -31,24 +44,13 @@ PERFCOUNTER_CPU( shadow_update_va_fail2,
    5.21  PERFSTATUS( shadow_l2_pages, "current # shadow L2 pages" )
    5.22  PERFSTATUS( shadow_l1_pages, "current # shadow L1 pages" )
    5.23  PERFSTATUS( hl2_table_pages, "current # hl2 pages" )
    5.24 +PERFSTATUS( snapshot_pages,  "current # fshadow snapshot pages" )
    5.25 +PERFSTATUS( writable_pte_predictions, "# writable pte predictions")
    5.26 +PERFSTATUS( free_l1_pages,   "current # free shadow L1 pages" )
    5.27  
    5.28  PERFCOUNTER_CPU( check_pagetable, "calls to check_pagetable" )
    5.29  PERFCOUNTER_CPU( check_all_pagetables, "calls to check_all_pagetables" )
    5.30  
    5.31 -#define PERFC_MAX_PT_UPDATES 64
    5.32 -#define PERFC_PT_UPDATES_BUCKET_SIZE 3
    5.33 -PERFCOUNTER_ARRAY( wpt_updates, "writable pt updates", PERFC_MAX_PT_UPDATES )
    5.34 -PERFCOUNTER_ARRAY( bpt_updates, "batched pt updates", PERFC_MAX_PT_UPDATES )
    5.35 -
    5.36 -PERFCOUNTER_ARRAY( hypercalls, "hypercalls", NR_hypercalls )
    5.37 -PERFCOUNTER_ARRAY( exceptions, "exceptions", 32 )
    5.38 -
    5.39 -#define VMX_PERF_EXIT_REASON_SIZE 37
    5.40 -#define VMX_PERF_VECTOR_SIZE 0x20
    5.41 -PERFCOUNTER_ARRAY( vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE )
    5.42 -PERFCOUNTER_ARRAY( cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE )
    5.43 -
    5.44 -
    5.45  PERFCOUNTER_CPU( shadow_hl2_table_count,   "shadow_hl2_table count" )
    5.46  PERFCOUNTER_CPU( shadow_set_l1e_force_map, "shadow_set_l1e forced to map l1" )
    5.47  PERFCOUNTER_CPU( shadow_set_l1e_unlinked,  "shadow_set_l1e found unlinked l1" )
    5.48 @@ -56,10 +58,6 @@ PERFCOUNTER_CPU( shadow_set_l1e_fail,   
    5.49  PERFCOUNTER_CPU( shadow_invlpg_faults,     "shadow_invlpg's get_user faulted")
    5.50  PERFCOUNTER_CPU( unshadow_l2_count,        "unpinned L2 count")
    5.51  
    5.52 -
    5.53 -/* STATUS counters do not reset when 'P' is hit */
    5.54 -PERFSTATUS( snapshot_pages,  "current # fshadow snapshot pages" )
    5.55 -
    5.56  PERFCOUNTER_CPU(shadow_status_shortcut, "fastpath miss on shadow cache")
    5.57  PERFCOUNTER_CPU(shadow_status_calls,    "calls to ___shadow_status" )
    5.58  PERFCOUNTER_CPU(shadow_status_miss,     "missed shadow cache" )
    5.59 @@ -87,7 +85,6 @@ PERFCOUNTER_CPU(validate_hl2e_calls,    
    5.60  PERFCOUNTER_CPU(validate_hl2e_changes,             "validate_hl2e makes changes")
    5.61  PERFCOUNTER_CPU(exception_fixed,                   "pre-exception fixed")
    5.62  PERFCOUNTER_CPU(gpfn_to_mfn_safe,                  "calls to gpfn_to_mfn_safe")
    5.63 -PERFSTATUS( writable_pte_predictions, "# writable pte predictions")
    5.64  PERFCOUNTER_CPU(remove_write_access,               "calls to remove_write_access")
    5.65  PERFCOUNTER_CPU(remove_write_access_easy,          "easy outs of remove_write_access")
    5.66  PERFCOUNTER_CPU(remove_write_no_work,              "no work in remove_write_access")