direct-io.hg

changeset 14127:720afbf74001

[XEN] Allow log-dirty mode to be enabled on already-shadowed domains.
and catch a few missing mark_dirty() calls
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Mon Feb 26 13:56:01 2007 +0000 (2007-02-26)
parents bfd4fad0f052
children 9be2dec913b7 90f37b1748a8
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/io.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/private.h xen/include/asm-x86/shadow.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Mon Feb 26 11:53:35 2007 +0000
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Mon Feb 26 13:56:01 2007 +0000
     1.3 @@ -398,6 +398,8 @@ static int __hvm_copy(void *buf, paddr_t
     1.4              memcpy(buf, p, count); /* dir == FALSE: *from guest */
     1.5  
     1.6          unmap_domain_page(p);
     1.7 +        
     1.8 +        mark_dirty(current->domain, mfn);
     1.9  
    1.10          addr += count;
    1.11          buf  += count;
     2.1 --- a/xen/arch/x86/hvm/io.c	Mon Feb 26 11:53:35 2007 +0000
     2.2 +++ b/xen/arch/x86/hvm/io.c	Mon Feb 26 13:56:01 2007 +0000
     2.3 @@ -33,6 +33,8 @@
     2.4  #include <asm/msr.h>
     2.5  #include <asm/apic.h>
     2.6  #include <asm/paging.h>
     2.7 +#include <asm/shadow.h>
     2.8 +#include <asm/p2m.h>
     2.9  #include <asm/hvm/hvm.h>
    2.10  #include <asm/hvm/support.h>
    2.11  #include <asm/hvm/vpt.h>
    2.12 @@ -739,6 +741,7 @@ void hvm_io_assist(struct vcpu *v)
    2.13      ioreq_t *p;
    2.14      struct cpu_user_regs *regs;
    2.15      struct hvm_io_op *io_opp;
    2.16 +    unsigned long gmfn;
    2.17  
    2.18      io_opp = &v->arch.hvm_vcpu.io_op;
    2.19      regs   = &io_opp->io_context;
    2.20 @@ -763,6 +766,13 @@ void hvm_io_assist(struct vcpu *v)
    2.21      /* Copy register changes back into current guest state. */
    2.22      hvm_load_cpu_guest_regs(v, regs);
    2.23      memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
    2.24 +
    2.25 +    /* Has memory been dirtied? */
    2.26 +    if ( p->dir == IOREQ_READ && p->data_is_ptr ) 
    2.27 +    {
    2.28 +        gmfn = get_mfn_from_gpfn(paging_gva_to_gfn(v, p->data));
    2.29 +        mark_dirty(v->domain, gmfn);
    2.30 +    }
    2.31  }
    2.32  
    2.33  /*
     3.1 --- a/xen/arch/x86/mm/shadow/common.c	Mon Feb 26 11:53:35 2007 +0000
     3.2 +++ b/xen/arch/x86/mm/shadow/common.c	Mon Feb 26 13:56:01 2007 +0000
     3.3 @@ -981,7 +981,6 @@ mfn_t shadow_alloc(struct domain *d,
     3.4          INIT_LIST_HEAD(&sp[i].list);
     3.5          sp[i].type = shadow_type;
     3.6          sp[i].pinned = 0;
     3.7 -        sp[i].logdirty = 0;
     3.8          sp[i].count = 0;
     3.9          sp[i].backpointer = backpointer;
    3.10          sp[i].next_shadow = NULL;
    3.11 @@ -1230,7 +1229,6 @@ static unsigned int sh_set_allocation(st
    3.12              {
    3.13                  sp[j].type = 0;  
    3.14                  sp[j].pinned = 0;
    3.15 -                sp[j].logdirty = 0;
    3.16                  sp[j].count = 0;
    3.17                  sp[j].mbz = 0;
    3.18                  sp[j].tlbflush_timestamp = 0; /* Not in any TLB */
    3.19 @@ -2558,7 +2556,7 @@ static int shadow_one_bit_enable(struct 
    3.20      ASSERT(shadow_locked_by_me(d));
    3.21  
    3.22      /* Sanity check the call */
    3.23 -    if ( d == current->domain || (d->arch.paging.mode & mode) )
    3.24 +    if ( d == current->domain || (d->arch.paging.mode & mode) == mode )
    3.25      {
    3.26          return -EINVAL;
    3.27      }
    3.28 @@ -2589,7 +2587,7 @@ static int shadow_one_bit_disable(struct
    3.29      ASSERT(shadow_locked_by_me(d));
    3.30  
    3.31      /* Sanity check the call */
    3.32 -    if ( d == current->domain || !(d->arch.paging.mode & mode) )
    3.33 +    if ( d == current->domain || !((d->arch.paging.mode & mode) == mode) )
    3.34      {
    3.35          return -EINVAL;
    3.36      }
    3.37 @@ -2646,17 +2644,7 @@ static int shadow_test_enable(struct dom
    3.38  
    3.39      domain_pause(d);
    3.40      shadow_lock(d);
    3.41 -
    3.42 -    if ( shadow_mode_enabled(d) )
    3.43 -    {
    3.44 -        SHADOW_ERROR("Don't support enabling test mode"
    3.45 -                      " on already shadowed doms\n");
    3.46 -        ret = -EINVAL;
    3.47 -        goto out;
    3.48 -    }
    3.49 -
    3.50      ret = shadow_one_bit_enable(d, PG_SH_enable);
    3.51 - out:
    3.52      shadow_unlock(d);
    3.53      domain_unpause(d);
    3.54  
    3.55 @@ -2722,10 +2710,10 @@ static int shadow_log_dirty_enable(struc
    3.56  
    3.57      if ( shadow_mode_enabled(d) )
    3.58      {
    3.59 -        SHADOW_ERROR("Don't (yet) support enabling log-dirty"
    3.60 -                      " on already shadowed doms\n");
    3.61 -        ret = -EINVAL;
    3.62 -        goto out;
    3.63 +        /* This domain already has some shadows: need to clear them out 
    3.64 +         * of the way to make sure that all references to guest memory are 
    3.65 +         * properly write-protected */
    3.66 +        shadow_blow_tables(d);
    3.67      }
    3.68  
    3.69  #if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
    3.70 @@ -2917,12 +2905,18 @@ static int shadow_log_dirty_op(
    3.71  void sh_mark_dirty(struct domain *d, mfn_t gmfn)
    3.72  {
    3.73      unsigned long pfn;
    3.74 -
    3.75 -    ASSERT(shadow_locked_by_me(d));
    3.76 +    int do_locking;
    3.77  
    3.78      if ( !shadow_mode_log_dirty(d) || !mfn_valid(gmfn) )
    3.79          return;
    3.80  
    3.81 +    /* Although this is an externally visible function, we do not know
    3.82 +     * whether the shadow lock will be held when it is called (since it
    3.83 +     * can be called from __hvm_copy during emulation).
    3.84 +     * If the lock isn't held, take it for the duration of the call. */
    3.85 +    do_locking = !shadow_locked_by_me(d);
    3.86 +    if ( do_locking ) shadow_lock(d);
    3.87 +
    3.88      ASSERT(d->arch.paging.shadow.dirty_bitmap != NULL);
    3.89  
    3.90      /* We /really/ mean PFN here, even for non-translated guests. */
    3.91 @@ -2962,13 +2956,8 @@ void sh_mark_dirty(struct domain *d, mfn
    3.92                         mfn_to_page(gmfn)->count_info, 
    3.93                         mfn_to_page(gmfn)->u.inuse.type_info);
    3.94      }
    3.95 -}
    3.96 -
    3.97 -void shadow_mark_dirty(struct domain *d, mfn_t gmfn)
    3.98 -{
    3.99 -    shadow_lock(d);
   3.100 -    sh_mark_dirty(d, gmfn);
   3.101 -    shadow_unlock(d);
   3.102 +
   3.103 +    if ( do_locking ) shadow_unlock(d);
   3.104  }
   3.105  
   3.106  /**************************************************************************/
   3.107 @@ -2992,9 +2981,7 @@ int shadow_domctl(struct domain *d,
   3.108          if ( shadow_mode_log_dirty(d) )
   3.109              if ( (rc = shadow_log_dirty_disable(d)) != 0 ) 
   3.110                  return rc;
   3.111 -        if ( is_hvm_domain(d) )
   3.112 -            return -EINVAL;
   3.113 -        if ( d->arch.paging.mode & PG_SH_enable )
   3.114 +        if ( d->arch.paging.mode == PG_SH_enable )
   3.115              if ( (rc = shadow_test_disable(d)) != 0 ) 
   3.116                  return rc;
   3.117          return 0;
     4.1 --- a/xen/arch/x86/mm/shadow/multi.c	Mon Feb 26 11:53:35 2007 +0000
     4.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Mon Feb 26 13:56:01 2007 +0000
     4.3 @@ -101,14 +101,6 @@ get_fl1_shadow_status(struct vcpu *v, gf
     4.4  /* Look for FL1 shadows in the hash table */
     4.5  {
     4.6      mfn_t smfn = shadow_hash_lookup(v, gfn_x(gfn), SH_type_fl1_shadow);
     4.7 -
     4.8 -    if ( unlikely(shadow_mode_log_dirty(v->domain) && mfn_valid(smfn)) )
     4.9 -    {
    4.10 -        struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
    4.11 -        if ( !(sp->logdirty) )
    4.12 -            shadow_convert_to_log_dirty(v, smfn);
    4.13 -    }
    4.14 -
    4.15      return smfn;
    4.16  }
    4.17  
    4.18 @@ -118,14 +110,6 @@ get_shadow_status(struct vcpu *v, mfn_t 
    4.19  {
    4.20      mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn), shadow_type);
    4.21      perfc_incrc(shadow_get_shadow_status);
    4.22 -
    4.23 -    if ( unlikely(shadow_mode_log_dirty(v->domain) && mfn_valid(smfn)) )
    4.24 -    {
    4.25 -        struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
    4.26 -        if ( !(sp->logdirty) )
    4.27 -            shadow_convert_to_log_dirty(v, smfn);
    4.28 -    }
    4.29 -
    4.30      return smfn;
    4.31  }
    4.32  
    4.33 @@ -136,12 +120,6 @@ set_fl1_shadow_status(struct vcpu *v, gf
    4.34      SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
    4.35                     gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn));
    4.36  
    4.37 -    if ( unlikely(shadow_mode_log_dirty(v->domain)) )
    4.38 -        // mark this shadow as a log dirty shadow...
    4.39 -        mfn_to_shadow_page(smfn)->logdirty = 1;
    4.40 -    else
    4.41 -        mfn_to_shadow_page(smfn)->logdirty = 0;
    4.42 -
    4.43      shadow_hash_insert(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
    4.44  }
    4.45  
    4.46 @@ -156,12 +134,6 @@ set_shadow_status(struct vcpu *v, mfn_t 
    4.47                     d->domain_id, v->vcpu_id, mfn_x(gmfn),
    4.48                     shadow_type, mfn_x(smfn));
    4.49  
    4.50 -    if ( unlikely(shadow_mode_log_dirty(d)) )
    4.51 -        // mark this shadow as a log dirty shadow...
    4.52 -        mfn_to_shadow_page(smfn)->logdirty = 1;
    4.53 -    else
    4.54 -        mfn_to_shadow_page(smfn)->logdirty = 0;
    4.55 -
    4.56  #ifdef CONFIG_COMPAT
    4.57      if ( !IS_COMPAT(d) || shadow_type != SH_type_l4_64_shadow )
    4.58  #endif
    4.59 @@ -3994,6 +3966,8 @@ sh_x86_emulate_write(struct vcpu *v, uns
    4.60      /* If we are writing zeros to this page, might want to unshadow */
    4.61      if ( likely(bytes >= 4) && (*(u32 *)addr == 0) && is_lo_pte(vaddr) )
    4.62          check_for_early_unshadow(v, mfn);
    4.63 +    
    4.64 +    sh_mark_dirty(v->domain, mfn);
    4.65  
    4.66      sh_unmap_domain_page(addr);
    4.67      shadow_audit_tables(v);
    4.68 @@ -4047,6 +4021,8 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
    4.69      if ( likely(bytes >= 4) && (*(u32 *)addr == 0) && is_lo_pte(vaddr) )
    4.70          check_for_early_unshadow(v, mfn);
    4.71  
    4.72 +    sh_mark_dirty(v->domain, mfn);
    4.73 +
    4.74      sh_unmap_domain_page(addr);
    4.75      shadow_audit_tables(v);
    4.76      return rv;
    4.77 @@ -4087,6 +4063,8 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
    4.78      if ( *(u32 *)addr == 0 )
    4.79          check_for_early_unshadow(v, mfn);
    4.80  
    4.81 +    sh_mark_dirty(v->domain, mfn);
    4.82 +
    4.83      sh_unmap_domain_page(addr);
    4.84      shadow_audit_tables(v);
    4.85      return rv;
     5.1 --- a/xen/arch/x86/mm/shadow/private.h	Mon Feb 26 11:53:35 2007 +0000
     5.2 +++ b/xen/arch/x86/mm/shadow/private.h	Mon Feb 26 13:56:01 2007 +0000
     5.3 @@ -229,8 +229,7 @@ struct shadow_page_info
     5.4      struct {
     5.5          unsigned int type:4;      /* What kind of shadow is this? */
     5.6          unsigned int pinned:1;    /* Is the shadow pinned? */
     5.7 -        unsigned int logdirty:1;  /* Was it made in log-dirty mode? */
     5.8 -        unsigned int count:26;    /* Reference count */
     5.9 +        unsigned int count:27;    /* Reference count */
    5.10          u32 mbz;                  /* Must be zero: this is where the owner 
    5.11                                     * field lives in a non-shadow page */
    5.12      } __attribute__((packed));
     6.1 --- a/xen/include/asm-x86/shadow.h	Mon Feb 26 11:53:35 2007 +0000
     6.2 +++ b/xen/include/asm-x86/shadow.h	Mon Feb 26 13:56:01 2007 +0000
     6.3 @@ -87,12 +87,13 @@ void shadow_final_teardown(struct domain
     6.4  
     6.5  /* Mark a page as dirty in the log-dirty bitmap: called when Xen 
     6.6   * makes changes to guest memory on its behalf. */
     6.7 -void shadow_mark_dirty(struct domain *d, mfn_t gmfn);
     6.8 +void sh_mark_dirty(struct domain *d, mfn_t gmfn);
     6.9  /* Cleaner version so we don't pepper shadow_mode tests all over the place */
    6.10  static inline void mark_dirty(struct domain *d, unsigned long gmfn)
    6.11  {
    6.12      if ( unlikely(shadow_mode_log_dirty(d)) )
    6.13 -        shadow_mark_dirty(d, _mfn(gmfn));
    6.14 +        /* See the comment about locking in sh_mark_dirty */
    6.15 +        sh_mark_dirty(d, _mfn(gmfn));
    6.16  }
    6.17  
    6.18  /* Update all the things that are derived from the guest's CR0/CR3/CR4.