ia64/xen-unstable

changeset 15643:531b8ccda973

[HVM] Shadow: release shadow lock during emulation path
and retake it only for the write-back at the end.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Tue Jul 24 11:10:08 2007 +0100 (2007-07-24)
parents 7bdc9f6407d3
children eff24408830c c585f993385c
files xen/arch/x86/mm/shadow/multi.c
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/multi.c	Mon Jul 23 10:03:17 2007 +0100
     1.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Tue Jul 24 11:10:08 2007 +0100
     1.3 @@ -2920,6 +2920,15 @@ static int sh_page_fault(struct vcpu *v,
     1.4      SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n", 
     1.5                    (unsigned long)regs->eip, (unsigned long)regs->esp);
     1.6  
     1.7 +    /*
     1.8 +     * We don't need to hold the lock for the whole emulation; we will
     1.9 +     * take it again when we write to the pagetables.
    1.10 +     */
    1.11 +    sh_audit_gw(v, &gw);
    1.12 +    unmap_walk(v, &gw);
    1.13 +    shadow_audit_tables(v);
    1.14 +    shadow_unlock(d);
    1.15 +
    1.16      emul_ops = shadow_init_emulation(&emul_ctxt, regs);
    1.17  
    1.18      r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
    1.19 @@ -2937,7 +2946,7 @@ static int sh_page_fault(struct vcpu *v,
    1.20          /* If this is actually a page table, then we have a bug, and need 
    1.21           * to support more operations in the emulator.  More likely, 
    1.22           * though, this is a hint that this page should not be shadowed. */
    1.23 -        sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
    1.24 +        shadow_remove_all_shadows(v, gmfn);
    1.25      }
    1.26  
    1.27  #if GUEST_PAGING_LEVELS == 3 /* PAE guest */
    1.28 @@ -2972,7 +2981,9 @@ static int sh_page_fault(struct vcpu *v,
    1.29      /* Emulator has changed the user registers: write back */
    1.30      if ( is_hvm_domain(d) )
    1.31          hvm_load_cpu_guest_regs(v, regs);
    1.32 -    goto done;
    1.33 +
    1.34 +    SHADOW_PRINTK("emulated\n");
    1.35 +    return EXCRET_fault_fixed;
    1.36  
    1.37   mmio:
    1.38      if ( !guest_mode(regs) )
    1.39 @@ -4053,11 +4064,15 @@ sh_x86_emulate_write(struct vcpu *v, uns
    1.40      if ( vaddr & (bytes-1) )
    1.41          return X86EMUL_UNHANDLEABLE;
    1.42  
    1.43 -    ASSERT(shadow_locked_by_me(v->domain));
    1.44      ASSERT(((vaddr & ~PAGE_MASK) + bytes) <= PAGE_SIZE);
    1.45 -
    1.46 -    if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
    1.47 +    shadow_lock(v->domain);
    1.48 +
    1.49 +    addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
    1.50 +    if ( addr == NULL )
    1.51 +    {
    1.52 +        shadow_unlock(v->domain);
    1.53          return X86EMUL_EXCEPTION;
    1.54 +    }
    1.55  
    1.56      skip = safe_not_to_verify_write(mfn, addr, src, bytes);
    1.57      memcpy(addr, src, bytes);
    1.58 @@ -4073,6 +4088,7 @@ sh_x86_emulate_write(struct vcpu *v, uns
    1.59  
    1.60      sh_unmap_domain_page(addr);
    1.61      shadow_audit_tables(v);
    1.62 +    shadow_unlock(v->domain);
    1.63      return X86EMUL_OKAY;
    1.64  }
    1.65  
    1.66 @@ -4086,14 +4102,18 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
    1.67      unsigned long prev;
    1.68      int rv = X86EMUL_OKAY, skip;
    1.69  
    1.70 -    ASSERT(shadow_locked_by_me(v->domain));
    1.71      ASSERT(bytes <= sizeof(unsigned long));
    1.72 +    shadow_lock(v->domain);
    1.73  
    1.74      if ( vaddr & (bytes-1) )
    1.75          return X86EMUL_UNHANDLEABLE;
    1.76  
    1.77 -    if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
    1.78 +    addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
    1.79 +    if ( addr == NULL )
    1.80 +    {
    1.81 +        shadow_unlock(v->domain);
    1.82          return X86EMUL_EXCEPTION;
    1.83 +    }
    1.84  
    1.85      skip = safe_not_to_verify_write(mfn, &new, &old, bytes);
    1.86  
    1.87 @@ -4129,6 +4149,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
    1.88  
    1.89      sh_unmap_domain_page(addr);
    1.90      shadow_audit_tables(v);
    1.91 +    shadow_unlock(v->domain);
    1.92      return rv;
    1.93  }
    1.94  
    1.95 @@ -4143,13 +4164,17 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
    1.96      u64 old, new, prev;
    1.97      int rv = X86EMUL_OKAY, skip;
    1.98  
    1.99 -    ASSERT(shadow_locked_by_me(v->domain));
   1.100 -
   1.101      if ( vaddr & 7 )
   1.102          return X86EMUL_UNHANDLEABLE;
   1.103  
   1.104 -    if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
   1.105 +    shadow_lock(v->domain);
   1.106 +
   1.107 +    addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
   1.108 +    if ( addr == NULL )
   1.109 +    {
   1.110 +        shadow_unlock(v->domain);
   1.111          return X86EMUL_EXCEPTION;
   1.112 +    }
   1.113  
   1.114      old = (((u64) old_hi) << 32) | (u64) old_lo;
   1.115      new = (((u64) new_hi) << 32) | (u64) new_lo;
   1.116 @@ -4173,6 +4198,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
   1.117  
   1.118      sh_unmap_domain_page(addr);
   1.119      shadow_audit_tables(v);
   1.120 +    shadow_unlock(v->domain);
   1.121      return rv;
   1.122  }
   1.123