direct-io.hg

changeset 8020:64d919032001

Merged.
author emellor@leeni.uk.xensource.com
date Thu Nov 24 15:57:45 2005 +0000 (2005-11-24)
parents 4d1d9f7ebcfc 8952af4fc166
children a9fef115e8ec d2b957902c6b
files
line diff
     1.1 --- a/tools/libxc/xc_linux_save.c	Thu Nov 24 15:57:36 2005 +0000
     1.2 +++ b/tools/libxc/xc_linux_save.c	Thu Nov 24 15:57:45 2005 +0000
     1.3 @@ -727,8 +727,8 @@ int xc_linux_save(int xc_handle, int io_
     1.4  
     1.5      /* Domain is still running at this point */
     1.6  
     1.7 -    if (live && (pt_levels != 2)) {
     1.8 -        ERR("Live migration supported only for 32-bit non-pae");
     1.9 +    if (live && (pt_levels == 4)) {
    1.10 +        ERR("Live migration not supported for 64-bit guests");
    1.11          live = 0;
    1.12      }
    1.13  
     2.1 --- a/xen/arch/x86/audit.c	Thu Nov 24 15:57:36 2005 +0000
     2.2 +++ b/xen/arch/x86/audit.c	Thu Nov 24 15:57:45 2005 +0000
     2.3 @@ -472,13 +472,6 @@ int audit_adjust_pgtables(struct domain 
     2.4                              errors++;
     2.5                          }
     2.6  
     2.7 -                        if ( (page->u.inuse.type_info & PGT_pinned) != PGT_pinned )
     2.8 -                        {
     2.9 -                            printk("Audit %d: L2 mfn=%lx not pinned t=%"
    2.10 -				   PRtype_info "\n",
    2.11 -                                   d->domain_id, mfn, page->u.inuse.type_info);
    2.12 -                            errors++;
    2.13 -                        }
    2.14                      }
    2.15                  }
    2.16  
     3.1 --- a/xen/arch/x86/mm.c	Thu Nov 24 15:57:36 2005 +0000
     3.2 +++ b/xen/arch/x86/mm.c	Thu Nov 24 15:57:45 2005 +0000
     3.3 @@ -1289,6 +1289,11 @@ static int mod_l4_entry(l4_pgentry_t *pl
     3.4  
     3.5  int alloc_page_type(struct pfn_info *page, unsigned long type)
     3.6  {
     3.7 +    struct domain *owner = page_get_owner(page);
     3.8 +
     3.9 +    if ( owner != NULL )
    3.10 +        mark_dirty(owner, page_to_pfn(page));
    3.11 +
    3.12      switch ( type & PGT_type_mask )
    3.13      {
    3.14      case PGT_l1_page_table:
    3.15 @@ -1318,16 +1323,14 @@ void free_page_type(struct pfn_info *pag
    3.16      struct domain *owner = page_get_owner(page);
    3.17      unsigned long gpfn;
    3.18  
    3.19 -    if ( owner != NULL )
    3.20 +    if ( unlikely((owner != NULL) && shadow_mode_enabled(owner)) )
    3.21      {
    3.22 +        mark_dirty(owner, page_to_pfn(page));
    3.23          if ( unlikely(shadow_mode_refcounts(owner)) )
    3.24              return;
    3.25 -        if ( unlikely(shadow_mode_enabled(owner)) )
    3.26 -        {
    3.27 -            gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
    3.28 -            ASSERT(VALID_M2P(gpfn));
    3.29 -            remove_shadow(owner, gpfn, type & PGT_type_mask);
    3.30 -        }
    3.31 +        gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
    3.32 +        ASSERT(VALID_M2P(gpfn));
    3.33 +        remove_shadow(owner, gpfn, type & PGT_type_mask);
    3.34      }
    3.35  
    3.36      switch ( type & PGT_type_mask )
    3.37 @@ -2203,8 +2206,7 @@ int do_mmu_update(
    3.38                      {
    3.39                          shadow_lock(d);
    3.40  
    3.41 -                        if ( shadow_mode_log_dirty(d) )
    3.42 -                            __mark_dirty(d, mfn);
    3.43 +                        __mark_dirty(d, mfn);
    3.44  
    3.45                          if ( page_is_page_table(page) &&
    3.46                               !page_out_of_sync(page) )
    3.47 @@ -2263,13 +2265,7 @@ int do_mmu_update(
    3.48              set_pfn_from_mfn(mfn, gpfn);
    3.49              okay = 1;
    3.50  
    3.51 -            /*
    3.52 -             * If in log-dirty mode, mark the corresponding
    3.53 -             * page as dirty.
    3.54 -             */
    3.55 -            if ( unlikely(shadow_mode_log_dirty(FOREIGNDOM)) &&
    3.56 -                 mark_dirty(FOREIGNDOM, mfn) )
    3.57 -                FOREIGNDOM->arch.shadow_dirty_block_count++;
    3.58 +            mark_dirty(FOREIGNDOM, mfn);
    3.59  
    3.60              put_page(&frame_table[mfn]);
    3.61              break;
    3.62 @@ -2841,8 +2837,7 @@ long do_update_descriptor(u64 pa, u64 de
    3.63      {
    3.64          shadow_lock(dom);
    3.65  
    3.66 -        if ( shadow_mode_log_dirty(dom) )
    3.67 -            __mark_dirty(dom, mfn);
    3.68 +        __mark_dirty(dom, mfn);
    3.69  
    3.70          if ( page_is_page_table(page) && !page_out_of_sync(page) )
    3.71              shadow_mark_mfn_out_of_sync(current, gpfn, mfn);
     4.1 --- a/xen/arch/x86/shadow.c	Thu Nov 24 15:57:36 2005 +0000
     4.2 +++ b/xen/arch/x86/shadow.c	Thu Nov 24 15:57:45 2005 +0000
     4.3 @@ -1757,6 +1757,7 @@ static void sync_all(struct domain *d)
     4.4      struct out_of_sync_entry *entry;
     4.5      int need_flush = 0;
     4.6      l1_pgentry_t *ppte, opte, npte;
     4.7 +    cpumask_t other_vcpus_mask;
     4.8  
     4.9      perfc_incrc(shadow_sync_all);
    4.10  
    4.11 @@ -1789,23 +1790,15 @@ static void sync_all(struct domain *d)
    4.12          unmap_domain_page(ppte);
    4.13      }
    4.14  
    4.15 -    // XXX mafetter: SMP
    4.16 -    //
    4.17 -    // With the current algorithm, we've gotta flush all the TLBs
    4.18 -    // before we can safely continue.  I don't think we want to
    4.19 -    // do it this way, so I think we should consider making
    4.20 -    // entirely private copies of the shadow for each vcpu, and/or
    4.21 -    // possibly having a mix of private and shared shadow state
    4.22 -    // (any path from a PTE that grants write access to an out-of-sync
    4.23 -    // page table page needs to be vcpu private).
    4.24 -    //
    4.25 -#if 0 // this should be enabled for SMP guests...
    4.26 -    flush_tlb_mask(cpu_online_map);
    4.27 -#endif
    4.28 +    /* Other VCPUs mustn't use the revoked writable mappings. */
    4.29 +    other_vcpus_mask = d->cpumask;
    4.30 +    cpu_clear(smp_processor_id(), other_vcpus_mask);
    4.31 +    flush_tlb_mask(other_vcpus_mask);
    4.32 +
    4.33 +    /* Flush ourself later. */
    4.34      need_flush = 1;
    4.35  
    4.36 -    // Second, resync all L1 pages, then L2 pages, etc...
    4.37 -    //
    4.38 +    /* Second, resync all L1 pages, then L2 pages, etc... */
    4.39      need_flush |= resync_all(d, PGT_l1_shadow);
    4.40  
    4.41  #if CONFIG_PAGING_LEVELS == 2
    4.42 @@ -1858,8 +1851,7 @@ static inline int l1pte_write_fault(
    4.43      SH_VVLOG("l1pte_write_fault: updating spte=0x%" PRIpte " gpte=0x%" PRIpte,
    4.44               l1e_get_intpte(spte), l1e_get_intpte(gpte));
    4.45  
    4.46 -    if ( shadow_mode_log_dirty(d) )
    4.47 -        __mark_dirty(d, gmfn);
    4.48 +    __mark_dirty(d, gmfn);
    4.49  
    4.50      if ( mfn_is_page_table(gmfn) )
    4.51          shadow_mark_va_out_of_sync(v, gpfn, gmfn, va);
    4.52 @@ -2021,9 +2013,7 @@ static int shadow_fault_32(unsigned long
    4.53              domain_crash_synchronous();
    4.54          }
    4.55  
    4.56 -        // if necessary, record the page table page as dirty
    4.57 -        if ( unlikely(shadow_mode_log_dirty(d)) )
    4.58 -            __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
    4.59 +        __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
    4.60      }
    4.61  
    4.62      shadow_set_l1e(va, spte, 1);
    4.63 @@ -2082,8 +2072,7 @@ static int do_update_va_mapping(unsigned
    4.64       * the PTE in the PT-holding page. We need the machine frame number
    4.65       * for this.
    4.66       */
    4.67 -    if ( shadow_mode_log_dirty(d) )
    4.68 -        __mark_dirty(d, va_to_l1mfn(v, va));
    4.69 +    __mark_dirty(d, va_to_l1mfn(v, va));
    4.70  
    4.71      shadow_unlock(d);
    4.72  
    4.73 @@ -3189,11 +3178,7 @@ static inline int l2e_rw_fault(
    4.74                  l1e_remove_flags(sl1e, _PAGE_RW);
    4.75              }
    4.76          } else {
    4.77 -            /* log dirty*/
    4.78 -            /*
    4.79 -               if ( shadow_mode_log_dirty(d) )
    4.80 -               __mark_dirty(d, gmfn);
    4.81 -             */
    4.82 +            /* __mark_dirty(d, gmfn); */
    4.83          }
    4.84         // printk("<%s> gpfn: %lx, mfn: %lx, sl1e: %lx\n", __func__, gpfn, mfn, l1e_get_intpte(sl1e));
    4.85          /* The shadow entrys need setup before shadow_mark_va_out_of_sync()*/
    4.86 @@ -3476,9 +3461,7 @@ check_writeable:
    4.87          if (unlikely(!__guest_set_l1e(v, va, &gl1e))) 
    4.88              domain_crash_synchronous();
    4.89  
    4.90 -        // if necessary, record the page table page as dirty
    4.91 -        if ( unlikely(shadow_mode_log_dirty(d)) )
    4.92 -            __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gl2e)));
    4.93 +        __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gl2e)));
    4.94      }
    4.95  
    4.96      shadow_set_l1e_64(va, (pgentry_64_t *)&sl1e, 1);
     5.1 --- a/xen/arch/x86/shadow32.c	Thu Nov 24 15:57:36 2005 +0000
     5.2 +++ b/xen/arch/x86/shadow32.c	Thu Nov 24 15:57:45 2005 +0000
     5.3 @@ -1274,8 +1274,6 @@ static int shadow_mode_table_op(
     5.4  
     5.5          d->arch.shadow_fault_count       = 0;
     5.6          d->arch.shadow_dirty_count       = 0;
     5.7 -        d->arch.shadow_dirty_net_count   = 0;
     5.8 -        d->arch.shadow_dirty_block_count = 0;
     5.9  
    5.10          break;
    5.11     
    5.12 @@ -1284,13 +1282,9 @@ static int shadow_mode_table_op(
    5.13  
    5.14          sc->stats.fault_count       = d->arch.shadow_fault_count;
    5.15          sc->stats.dirty_count       = d->arch.shadow_dirty_count;
    5.16 -        sc->stats.dirty_net_count   = d->arch.shadow_dirty_net_count;
    5.17 -        sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
    5.18  
    5.19          d->arch.shadow_fault_count       = 0;
    5.20          d->arch.shadow_dirty_count       = 0;
    5.21 -        d->arch.shadow_dirty_net_count   = 0;
    5.22 -        d->arch.shadow_dirty_block_count = 0;
    5.23   
    5.24          if ( (sc->dirty_bitmap == NULL) || 
    5.25               (d->arch.shadow_dirty_bitmap == NULL) )
    5.26 @@ -1327,9 +1321,6 @@ static int shadow_mode_table_op(
    5.27      case DOM0_SHADOW_CONTROL_OP_PEEK:
    5.28          sc->stats.fault_count       = d->arch.shadow_fault_count;
    5.29          sc->stats.dirty_count       = d->arch.shadow_dirty_count;
    5.30 -        sc->stats.dirty_net_count   = d->arch.shadow_dirty_net_count;
    5.31 -        sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
    5.32 - 
    5.33  
    5.34          if ( (sc->dirty_bitmap == NULL) || 
    5.35               (d->arch.shadow_dirty_bitmap == NULL) )
    5.36 @@ -2563,6 +2554,7 @@ void __shadow_sync_all(struct domain *d)
    5.37      struct out_of_sync_entry *entry;
    5.38      int need_flush = 0;
    5.39      l1_pgentry_t *ppte, opte, npte;
    5.40 +    cpumask_t other_vcpus_mask;
    5.41  
    5.42      perfc_incrc(shadow_sync_all);
    5.43  
    5.44 @@ -2595,23 +2587,15 @@ void __shadow_sync_all(struct domain *d)
    5.45          unmap_domain_page(ppte);
    5.46      }
    5.47  
    5.48 -    // XXX mafetter: SMP
    5.49 -    //
    5.50 -    // With the current algorithm, we've gotta flush all the TLBs
    5.51 -    // before we can safely continue.  I don't think we want to
    5.52 -    // do it this way, so I think we should consider making
    5.53 -    // entirely private copies of the shadow for each vcpu, and/or
    5.54 -    // possibly having a mix of private and shared shadow state
    5.55 -    // (any path from a PTE that grants write access to an out-of-sync
    5.56 -    // page table page needs to be vcpu private).
    5.57 -    //
    5.58 -#if 0 // this should be enabled for SMP guests...
    5.59 -    flush_tlb_mask(cpu_online_map);
    5.60 -#endif
    5.61 +    /* Other VCPUs mustn't use the revoked writable mappings. */
    5.62 +    other_vcpus_mask = d->cpumask;
    5.63 +    cpu_clear(smp_processor_id(), other_vcpus_mask);
    5.64 +    flush_tlb_mask(other_vcpus_mask);
    5.65 +
    5.66 +    /* Flush ourself later. */
    5.67      need_flush = 1;
    5.68  
    5.69 -    // Second, resync all L1 pages, then L2 pages, etc...
    5.70 -    //
    5.71 +    /* Second, resync all L1 pages, then L2 pages, etc... */
    5.72      need_flush |= resync_all(d, PGT_l1_shadow);
    5.73      if ( shadow_mode_translate(d) )
    5.74          need_flush |= resync_all(d, PGT_hl2_shadow);
    5.75 @@ -2738,9 +2722,7 @@ int shadow_fault(unsigned long va, struc
    5.76              domain_crash_synchronous();
    5.77          }
    5.78  
    5.79 -        // if necessary, record the page table page as dirty
    5.80 -        if ( unlikely(shadow_mode_log_dirty(d)) )
    5.81 -            __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
    5.82 +        __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
    5.83      }
    5.84  
    5.85      shadow_set_l1e(va, spte, 1);
    5.86 @@ -2837,8 +2819,6 @@ int shadow_do_update_va_mapping(unsigned
    5.87  
    5.88      shadow_lock(d);
    5.89  
    5.90 -    //printk("%s(va=%p, val=%p)\n", __func__, (void *)va, (void *)l1e_get_intpte(val));
    5.91 -        
    5.92      // This is actually overkill - we don't need to sync the L1 itself,
    5.93      // just everything involved in getting to this L1 (i.e. we need
    5.94      // linear_pg_table[l1_linear_offset(va)] to be in sync)...
    5.95 @@ -2853,10 +2833,8 @@ int shadow_do_update_va_mapping(unsigned
    5.96       * the PTE in the PT-holding page. We need the machine frame number
    5.97       * for this.
    5.98       */
    5.99 -    if ( shadow_mode_log_dirty(d) )
   5.100 -        __mark_dirty(d, va_to_l1mfn(v, va));
   5.101 -
   5.102 -// out:
   5.103 +    __mark_dirty(d, va_to_l1mfn(v, va));
   5.104 +
   5.105      shadow_unlock(d);
   5.106  
   5.107      return rc;
     6.1 --- a/xen/arch/x86/shadow_public.c	Thu Nov 24 15:57:36 2005 +0000
     6.2 +++ b/xen/arch/x86/shadow_public.c	Thu Nov 24 15:57:45 2005 +0000
     6.3 @@ -1183,8 +1183,6 @@ static int shadow_mode_table_op(
     6.4  
     6.5          d->arch.shadow_fault_count       = 0;
     6.6          d->arch.shadow_dirty_count       = 0;
     6.7 -        d->arch.shadow_dirty_net_count   = 0;
     6.8 -        d->arch.shadow_dirty_block_count = 0;
     6.9  
    6.10          break;
    6.11     
    6.12 @@ -1193,15 +1191,10 @@ static int shadow_mode_table_op(
    6.13  
    6.14          sc->stats.fault_count       = d->arch.shadow_fault_count;
    6.15          sc->stats.dirty_count       = d->arch.shadow_dirty_count;
    6.16 -        sc->stats.dirty_net_count   = d->arch.shadow_dirty_net_count;
    6.17 -        sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
    6.18  
    6.19          d->arch.shadow_fault_count       = 0;
    6.20          d->arch.shadow_dirty_count       = 0;
    6.21 -        d->arch.shadow_dirty_net_count   = 0;
    6.22 -        d->arch.shadow_dirty_block_count = 0;
    6.23   
    6.24 -
    6.25          if ( (sc->dirty_bitmap == NULL) || 
    6.26               (d->arch.shadow_dirty_bitmap == NULL) )
    6.27          {
    6.28 @@ -1236,8 +1229,6 @@ static int shadow_mode_table_op(
    6.29      case DOM0_SHADOW_CONTROL_OP_PEEK:
    6.30          sc->stats.fault_count       = d->arch.shadow_fault_count;
    6.31          sc->stats.dirty_count       = d->arch.shadow_dirty_count;
    6.32 -        sc->stats.dirty_net_count   = d->arch.shadow_dirty_net_count;
    6.33 -        sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
    6.34   
    6.35          if ( (sc->dirty_bitmap == NULL) || 
    6.36               (d->arch.shadow_dirty_bitmap == NULL) )
     7.1 --- a/xen/common/trace.c	Thu Nov 24 15:57:36 2005 +0000
     7.2 +++ b/xen/common/trace.c	Thu Nov 24 15:57:45 2005 +0000
     7.3 @@ -89,7 +89,6 @@ static int alloc_trace_bufs(void)
     7.4      {
     7.5          buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*opt_tbuf_size*PAGE_SIZE];
     7.6          buf->cons = buf->prod = 0;
     7.7 -        buf->nr_recs = nr_recs;
     7.8          t_recs[i] = (struct t_rec *)(buf + 1);
     7.9      }
    7.10  
     8.1 --- a/xen/include/asm-x86/domain.h	Thu Nov 24 15:57:36 2005 +0000
     8.2 +++ b/xen/include/asm-x86/domain.h	Thu Nov 24 15:57:45 2005 +0000
     8.3 @@ -49,8 +49,6 @@ struct arch_domain
     8.4  
     8.5      unsigned int shadow_fault_count;
     8.6      unsigned int shadow_dirty_count;
     8.7 -    unsigned int shadow_dirty_net_count;
     8.8 -    unsigned int shadow_dirty_block_count;
     8.9  
    8.10      /* full shadow mode */
    8.11      struct out_of_sync_entry *out_of_sync; /* list of out-of-sync pages */
     9.1 --- a/xen/include/asm-x86/grant_table.h	Thu Nov 24 15:57:36 2005 +0000
     9.2 +++ b/xen/include/asm-x86/grant_table.h	Thu Nov 24 15:57:45 2005 +0000
     9.3 @@ -33,10 +33,6 @@ int steal_page_for_grant_transfer(
     9.4  #define gnttab_shared_mfn(d, t, i)                      \
     9.5      ((virt_to_phys((t)->shared) >> PAGE_SHIFT) + (i))
     9.6  
     9.7 -#define gnttab_log_dirty(d, f)                          \
     9.8 -    do {                                                \
     9.9 -        if ( unlikely(shadow_mode_log_dirty((d))) )     \
    9.10 -            mark_dirty((d), (f));                       \
    9.11 -    } while ( 0 )
    9.12 +#define gnttab_log_dirty(d, f) mark_dirty((d), (f))
    9.13  
    9.14  #endif /* __ASM_GRANT_TABLE_H__ */
    10.1 --- a/xen/include/asm-x86/shadow.h	Thu Nov 24 15:57:36 2005 +0000
    10.2 +++ b/xen/include/asm-x86/shadow.h	Thu Nov 24 15:57:45 2005 +0000
    10.3 @@ -343,19 +343,15 @@ extern int shadow_status_noswap;
    10.4  #define SHADOW_REFLECTS_SNAPSHOT _PAGE_AVAIL0
    10.5  #endif
    10.6  
    10.7 -#ifdef VERBOSE
    10.8 +#if SHADOW_VERBOSE_DEBUG
    10.9  #define SH_LOG(_f, _a...)                                               \
   10.10      printk("DOM%uP%u: SH_LOG(%d): " _f "\n",                            \
   10.11         current->domain->domain_id , current->processor, __LINE__ , ## _a )
   10.12 -#else
   10.13 -#define SH_LOG(_f, _a...) ((void)0)
   10.14 -#endif
   10.15 -
   10.16 -#if SHADOW_VERBOSE_DEBUG
   10.17  #define SH_VLOG(_f, _a...)                                              \
   10.18      printk("DOM%uP%u: SH_VLOG(%d): " _f "\n",                           \
   10.19             current->domain->domain_id, current->processor, __LINE__ , ## _a )
   10.20  #else
   10.21 +#define SH_LOG(_f, _a...) ((void)0)
   10.22  #define SH_VLOG(_f, _a...) ((void)0)
   10.23  #endif
   10.24  
   10.25 @@ -468,21 +464,18 @@ static inline void shadow_put_page(struc
   10.26  
   10.27  /************************************************************************/
   10.28  
   10.29 -static inline int __mark_dirty(struct domain *d, unsigned long mfn)
   10.30 +static inline void __mark_dirty(struct domain *d, unsigned long mfn)
   10.31  {
   10.32      unsigned long pfn;
   10.33 -    int           rc = 0;
   10.34  
   10.35      ASSERT(shadow_lock_is_acquired(d));
   10.36 +
   10.37 +    if ( likely(!shadow_mode_log_dirty(d)) || !VALID_MFN(mfn) )
   10.38 +        return;
   10.39 +
   10.40      ASSERT(d->arch.shadow_dirty_bitmap != NULL);
   10.41  
   10.42 -    if ( !VALID_MFN(mfn) )
   10.43 -        return rc;
   10.44 -
   10.45 -    // N.B. This doesn't use __mfn_to_gpfn().
   10.46 -    // This wants the nice compact set of PFNs from 0..domain's max,
   10.47 -    // which __mfn_to_gpfn() only returns for translated domains.
   10.48 -    //
   10.49 +    /* We /really/ mean PFN here, even for non-translated guests. */
   10.50      pfn = get_pfn_from_mfn(mfn);
   10.51  
   10.52      /*
   10.53 @@ -491,16 +484,13 @@ static inline int __mark_dirty(struct do
   10.54       * Nothing to do here...
   10.55       */
   10.56      if ( unlikely(IS_INVALID_M2P_ENTRY(pfn)) )
   10.57 -        return rc;
   10.58 +        return;
   10.59  
   10.60 -    if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) )
   10.61 +    /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
   10.62 +    if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) &&
   10.63 +         !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) )
   10.64      {
   10.65 -        /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
   10.66 -        if ( !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) )
   10.67 -        {
   10.68 -            d->arch.shadow_dirty_count++;
   10.69 -            rc = 1;
   10.70 -        }
   10.71 +        d->arch.shadow_dirty_count++;
   10.72      }
   10.73  #ifndef NDEBUG
   10.74      else if ( mfn < max_page )
   10.75 @@ -513,18 +503,17 @@ static inline int __mark_dirty(struct do
   10.76                 frame_table[mfn].u.inuse.type_info );
   10.77      }
   10.78  #endif
   10.79 -
   10.80 -    return rc;
   10.81  }
   10.82  
   10.83  
   10.84 -static inline int mark_dirty(struct domain *d, unsigned int mfn)
   10.85 +static inline void mark_dirty(struct domain *d, unsigned int mfn)
   10.86  {
   10.87 -    int rc;
   10.88 -    shadow_lock(d);
   10.89 -    rc = __mark_dirty(d, mfn);
   10.90 -    shadow_unlock(d);
   10.91 -    return rc;
   10.92 +    if ( unlikely(shadow_mode_log_dirty(d)) )
   10.93 +    {
   10.94 +        shadow_lock(d);
   10.95 +        __mark_dirty(d, mfn);
   10.96 +        shadow_unlock(d);
   10.97 +    }
   10.98  }
   10.99  
  10.100  
  10.101 @@ -566,8 +555,7 @@ static inline void
  10.102      if ( unlikely(shadow_mode_translate(d)) )
  10.103          update_hl2e(v, va);
  10.104  
  10.105 -    if ( unlikely(shadow_mode_log_dirty(d)) )
  10.106 -        __mark_dirty(d, pagetable_get_pfn(v->arch.guest_table));
  10.107 +    __mark_dirty(d, pagetable_get_pfn(v->arch.guest_table));
  10.108  }
  10.109  
  10.110  static inline void
  10.111 @@ -608,8 +596,8 @@ update_hl2e(struct vcpu *v, unsigned lon
  10.112          if ( need_flush )
  10.113          {
  10.114              perfc_incrc(update_hl2e_invlpg);
  10.115 -            // SMP BUG???
  10.116 -            local_flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]);
  10.117 +            flush_tlb_one_mask(v->domain->cpumask,
  10.118 +                               &linear_pg_table[l1_linear_offset(va)]);
  10.119          }
  10.120      }
  10.121  }
  10.122 @@ -787,8 +775,7 @@ static inline int l1pte_write_fault(
  10.123      SH_VVLOG("l1pte_write_fault: updating spte=0x%" PRIpte " gpte=0x%" PRIpte,
  10.124               l1e_get_intpte(spte), l1e_get_intpte(gpte));
  10.125  
  10.126 -    if ( shadow_mode_log_dirty(d) )
  10.127 -        __mark_dirty(d, gmfn);
  10.128 +    __mark_dirty(d, gmfn);
  10.129  
  10.130      if ( mfn_is_page_table(gmfn) )
  10.131          shadow_mark_va_out_of_sync(v, gpfn, gmfn, va);
  10.132 @@ -1325,46 +1312,6 @@ shadow_max_pgtable_type(struct domain *d
  10.133      return pttype;
  10.134  }
  10.135  
  10.136 -/*
  10.137 - * N.B. We can make this locking more fine grained (e.g., per shadow page) if
  10.138 - * it ever becomes a problem, but since we need a spin lock on the hash table 
  10.139 - * anyway it's probably not worth being too clever.
  10.140 - */
  10.141 -static inline unsigned long get_shadow_status(
  10.142 -    struct domain *d, unsigned long gpfn, unsigned long stype)
  10.143 -{
  10.144 -    unsigned long res;
  10.145 -
  10.146 -    ASSERT(shadow_mode_enabled(d));
  10.147 -
  10.148 -    /*
  10.149 -     * If we get here we know that some sort of update has happened to the
  10.150 -     * underlying page table page: either a PTE has been updated, or the page
  10.151 -     * has changed type. If we're in log dirty mode, we should set the
  10.152 -     * appropriate bit in the dirty bitmap.
  10.153 -     * N.B. The VA update path doesn't use this and is handled independently. 
  10.154 -     *
  10.155 -     * XXX need to think this through for vmx guests, but probably OK
  10.156 -     */
  10.157 -
  10.158 -    shadow_lock(d);
  10.159 -
  10.160 -    if ( shadow_mode_log_dirty(d) )
  10.161 -        __mark_dirty(d, __gpfn_to_mfn(d, gpfn));
  10.162 -
  10.163 -    if ( !(res = __shadow_status(d, gpfn, stype)) )
  10.164 -        shadow_unlock(d);
  10.165 -
  10.166 -    return res;
  10.167 -}
  10.168 -
  10.169 -
  10.170 -static inline void put_shadow_status(struct domain *d)
  10.171 -{
  10.172 -    shadow_unlock(d);
  10.173 -}
  10.174 -
  10.175 -
  10.176  static inline void delete_shadow_status(
  10.177      struct domain *d, unsigned long gpfn, unsigned long gmfn, unsigned int stype)
  10.178  {
    11.1 --- a/xen/include/public/trace.h	Thu Nov 24 15:57:36 2005 +0000
    11.2 +++ b/xen/include/public/trace.h	Thu Nov 24 15:57:45 2005 +0000
    11.3 @@ -74,7 +74,6 @@ struct t_rec {
    11.4  struct t_buf {
    11.5      unsigned int  cons;      /* Next item to be consumed by control tools. */
    11.6      unsigned int  prod;      /* Next item to be produced by Xen.           */
    11.7 -    unsigned int  nr_recs;   /* Number of records in this trace buffer.    */
    11.8      /* 'nr_recs' records follow immediately after the meta-data header.    */
    11.9  };
   11.10