ia64/xen-unstable

changeset 1219:ee66ecf7b0f5

bitkeeper revision 1.794.1.5 (4062ccdazymF4jNwgVklHYBnrJ8UUQ)

turn shadow mode off by default, locking improvements
author iap10@tetris.cl.cam.ac.uk
date Thu Mar 25 12:13:14 2004 +0000 (2004-03-25)
parents a9abf4f7b2f6
children 0b18f5295cff
files xen/common/domain.c xen/common/shadow.c
line diff
     1.1 --- a/xen/common/domain.c	Thu Mar 25 01:50:35 2004 +0000
     1.2 +++ b/xen/common/domain.c	Thu Mar 25 12:13:14 2004 +0000
     1.3 @@ -850,7 +850,7 @@ int setup_guestos(struct task_struct *p,
     1.4  
     1.5      set_bit(PF_CONSTRUCTED, &p->flags);
     1.6  
     1.7 -#if 1 // XXXXX IAP DO NOT CHECK IN ENBALED !!!!!!!
     1.8 +#if 0 // XXXXX DO NOT CHECK IN ENBALED !!! (but useful for testing so leave) 
     1.9      shadow_mode_enable(p, SHM_test); 
    1.10  #endif
    1.11  
     2.1 --- a/xen/common/shadow.c	Thu Mar 25 01:50:35 2004 +0000
     2.2 +++ b/xen/common/shadow.c	Thu Mar 25 12:13:14 2004 +0000
     2.3 @@ -13,10 +13,17 @@
     2.4  To use these shadow page tables, guests must not rely on the ACCESSED
     2.5  and DIRTY bits on L2 pte's being accurate -- they will typically all be set.
     2.6  
     2.7 -
     2.8  I doubt this will break anything. (If guests want to use the va_update
     2.9  mechanism they've signed up for this anyhow...)
    2.10  
    2.11 +There's a per-domain shadow table spin lock which works fine for SMP
    2.12 +hosts. We don't have to worry about interrupts as no shadow operations
    2.13 +happen in an interrupt context. It's probably not quite ready for SMP
    2.14 +guest operation as we have to worry about synchonisation between gpte
    2.15 +and spte updates. Its possible that this might only happen in a
    2.16 +hypercall context, in which case we'll probably at have a per-domain
    2.17 +hypercall lock anyhow (at least initially).
    2.18 +
    2.19  ********/
    2.20  
    2.21  
    2.22 @@ -320,30 +327,48 @@ int shadow_fault( unsigned long va, long
    2.23  
    2.24  	SH_VVLOG("shadow_fault( va=%08lx, code=%ld )", va, error_code );
    2.25  
    2.26 -    spin_lock(&current->mm.shadow_lock);
    2.27 -
    2.28      check_pagetable( current, current->mm.pagetable, "pre-sf" );
    2.29  
    2.30  	if ( unlikely(__get_user(gpte, (unsigned long*)&linear_pg_table[va>>PAGE_SHIFT])) )
    2.31  	{
    2.32  		SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
    2.33 -        spin_unlock(&current->mm.shadow_lock);
    2.34  		return 0;  // propagate to guest
    2.35  	}
    2.36  
    2.37  	if ( ! (gpte & _PAGE_PRESENT) )
    2.38  	{
    2.39  		SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
    2.40 -        spin_unlock(&current->mm.shadow_lock);
    2.41  		return 0;  // we're not going to be able to help
    2.42      }
    2.43  
    2.44 +    if ( (error_code & 2)  && ! (gpte & _PAGE_RW) )
    2.45 +    {
    2.46 +	    // write fault on RO page
    2.47 +	    return 0;
    2.48 +	}
    2.49 +
    2.50 +    spin_lock(&current->mm.shadow_lock);
    2.51 +    // take the lock and reread gpte
    2.52 +
    2.53 +	if ( unlikely(__get_user(gpte, (unsigned long*)&linear_pg_table[va>>PAGE_SHIFT])) )
    2.54 +	{
    2.55 +		SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
    2.56 +		spin_unlock(&current->mm.shadow_lock);
    2.57 +		return 0;  // propagate to guest
    2.58 +	}
    2.59 +
    2.60 +	if ( unlikely(!(gpte & _PAGE_PRESENT)) )
    2.61 +	{
    2.62 +		SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
    2.63 +		spin_unlock(&current->mm.shadow_lock);
    2.64 +		return 0;  // we're not going to be able to help
    2.65 +    }
    2.66  
    2.67      spte = gpte;
    2.68  
    2.69  	if ( error_code & 2  )  
    2.70  	{  // write fault
    2.71 -		if ( gpte & _PAGE_RW )
    2.72 +		if ( likely(gpte & _PAGE_RW) )
    2.73  	    {
    2.74  			gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
    2.75  			spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; 
    2.76 @@ -385,6 +410,7 @@ int shadow_fault( unsigned long va, long
    2.77  
    2.78          gl1pfn = gpde>>PAGE_SHIFT;
    2.79  
    2.80 +        
    2.81          if ( ! (sl1pfn=__shadow_status(current, gl1pfn) ) )
    2.82          {
    2.83              // this L1 is NOT already shadowed so we need to shadow it
    2.84 @@ -458,8 +484,8 @@ int shadow_fault( unsigned long va, long
    2.85  
    2.86          }              
    2.87  
    2.88 -    shadow_linear_pg_table[va>>PAGE_SHIFT] = mk_l1_pgentry(spte);
    2.89 -    // (we need to do the above even if we've just made the shadow L1)
    2.90 +        shadow_linear_pg_table[va>>PAGE_SHIFT] = mk_l1_pgentry(spte);
    2.91 +        // (we need to do the above even if we've just made the shadow L1)
    2.92  
    2.93      } // end of fixup writing the shadow L1 directly failed
    2.94