ia64/xen-unstable

changeset 1409:27b5376a7ec5

bitkeeper revision 1.911.1.4 (40ac8fefyv7QTbpYikVy1Mlh5Thh3Q)

When doing a live migrate, be more persistent in continuing to iterate --
even with a worst-case memory thrasher we seem to get over the knee after
a few iterations.

Also, free L1 shadows rather than zeroing. I think this will be faster, but
will add some stats.
author iap10@labyrinth.cl.cam.ac.uk
date Thu May 20 11:01:03 2004 +0000 (2004-05-20)
parents 1ad6b2dcc1f8
children fa3cfd1bdd96
files tools/xc/lib/xc_linux_save.c xen/common/shadow.c
line diff
     1.1 --- a/tools/xc/lib/xc_linux_save.c	Thu May 20 10:16:50 2004 +0000
     1.2 +++ b/tools/xc/lib/xc_linux_save.c	Thu May 20 11:01:03 2004 +0000
     1.3 @@ -292,7 +292,7 @@ int xc_linux_save(int xc_handle,
     1.4  
     1.5  	last_iter = 0;
     1.6  	sent_last_iter = 1<<20; // 4GB's worth of pages
     1.7 -	max_iters = 19; // limit us to 20 times round loop
     1.8 +	max_iters = 29; // limit us to 30 times round loop
     1.9      }
    1.10      else
    1.11  	last_iter = 1;
    1.12 @@ -645,8 +645,10 @@ int xc_linux_save(int xc_handle,
    1.13  
    1.14  	if ( live )
    1.15  	{
    1.16 -	    if ( ( sent_this_iter > (sent_last_iter * 0.95) ) ||
    1.17 -		 (iter >= max_iters) || (sent_this_iter < 10) || 
    1.18 +	    if ( 
    1.19 +		 // ( sent_this_iter > (sent_last_iter * 0.95) ) ||		 
    1.20 +		 (iter >= max_iters) || 
    1.21 +		 (sent_this_iter+skip_this_iter < 10) || 
    1.22  		 (total_sent > nr_pfns*2) )
    1.23  	    {
    1.24  		DPRINTF("Start last iteration\n");
    1.25 @@ -657,7 +659,7 @@ int xc_linux_save(int xc_handle,
    1.26  	    } 
    1.27  
    1.28  	    if ( xc_shadow_control( xc_handle, domid, 
    1.29 -				    DOM0_SHADOW_CONTROL_OP_CLEAN,
    1.30 +				    DOM0_SHADOW_CONTROL_OP_CLEAN2,
    1.31  				    to_send, nr_pfns ) != nr_pfns ) 
    1.32  	    {
    1.33  		ERROR("Error flushing shadow PT");
     2.1 --- a/xen/common/shadow.c	Thu May 20 10:16:50 2004 +0000
     2.2 +++ b/xen/common/shadow.c	Thu May 20 11:01:03 2004 +0000
     2.3 @@ -109,7 +109,13 @@ static void __free_shadow_table( struct 
     2.4      SH_LOG("Free shadow table. Freed= %d",free);
     2.5  }
     2.6  
     2.7 -static inline int shadow_page_op( struct mm_struct *m, unsigned int op,
     2.8 +
     2.9 +#define TABLE_OP_ZERO_L2 1
    2.10 +#define TABLE_OP_ZERO_L1 2
    2.11 +#define TABLE_OP_FREE_L1 3
    2.12 +
    2.13 +static inline int shadow_page_op( struct mm_struct *m, unsigned int op, 
    2.14 +								  unsigned int gpfn,
    2.15                                    struct pfn_info *spfn_info, int *work )
    2.16  {
    2.17      unsigned int spfn = spfn_info-frame_table;
    2.18 @@ -117,48 +123,45 @@ static inline int shadow_page_op( struct
    2.19  
    2.20      switch( op )
    2.21      {
    2.22 -    case DOM0_SHADOW_CONTROL_OP_CLEAN:
    2.23 -    {
    2.24 -        int i;
    2.25 -        if ( (spfn_info->type_and_flags & PGT_type_mask) == 
    2.26 -             PGT_l1_page_table )
    2.27 -        {
    2.28 -            unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
    2.29 -
    2.30 -            for (i=0;i<ENTRIES_PER_L1_PAGETABLE;i++)
    2.31 -            {                    
    2.32 -                if ( (spl1e[i] & _PAGE_PRESENT ) && (spl1e[i] & _PAGE_RW) )
    2.33 -                {
    2.34 -                    *work++;
    2.35 -                    spl1e[i] &= ~_PAGE_RW;
    2.36 -                }
    2.37 -            }
    2.38 -            unmap_domain_mem( spl1e );
    2.39 -        }
    2.40 -    }
    2.41 -	break;
    2.42 -
    2.43 -    case DOM0_SHADOW_CONTROL_OP_CLEAN2:
    2.44 -    {
    2.45 -        if ( (spfn_info->type_and_flags & PGT_type_mask) == 
    2.46 -             PGT_l1_page_table )
    2.47 -        {
    2.48 -			delete_shadow_status( m, frame_table-spfn_info );
    2.49 -			restart = 1; // we need to go to start of list again
    2.50 -		}
    2.51 -		else if ( (spfn_info->type_and_flags & PGT_type_mask) == 
    2.52 +	case TABLE_OP_ZERO_L2:
    2.53 +	{
    2.54 +		if ( (spfn_info->type_and_flags & PGT_type_mask) == 
    2.55               PGT_l2_page_table )
    2.56  		{
    2.57  			unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
    2.58  			memset( spl1e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*spl1e) );
    2.59  			unmap_domain_mem( spl1e );
    2.60  		}
    2.61 -		else
    2.62 -			BUG();
    2.63 +    }
    2.64 +	break;
    2.65 +	
    2.66 +	case TABLE_OP_ZERO_L1:
    2.67 +	{
    2.68 +		if ( (spfn_info->type_and_flags & PGT_type_mask) == 
    2.69 +             PGT_l1_page_table )
    2.70 +		{
    2.71 +			unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
    2.72 +			memset( spl1e, 0, ENTRIES_PER_L1_PAGETABLE * sizeof(*spl1e) );
    2.73 +			unmap_domain_mem( spl1e );
    2.74 +		}
    2.75      }
    2.76  	break;
    2.77  
    2.78 +	case TABLE_OP_FREE_L1:
    2.79 +	{
    2.80 +		if ( (spfn_info->type_and_flags & PGT_type_mask) == 
    2.81 +             PGT_l1_page_table )
    2.82 +		{
    2.83 +			// lock is already held
    2.84 +			delete_shadow_status( m, gpfn );
    2.85 +			restart = 1; // we need to go to start of list again
    2.86 +		}
    2.87 +    }
    2.88  
    2.89 +	break;
    2.90 +	
    2.91 +	default:
    2.92 +		BUG();
    2.93  
    2.94      }
    2.95      return restart;
    2.96 @@ -183,18 +186,18 @@ static void __scan_shadow_table( struct 
    2.97  		next = a->next;
    2.98          if (a->pfn)
    2.99          {
   2.100 -            if ( shadow_page_op( m, op, 
   2.101 -							&frame_table[a->spfn_and_flags & PSH_pfn_mask], 
   2.102 -							&work ) )
   2.103 +            if ( shadow_page_op( m, op, a->pfn,								 
   2.104 +								 &frame_table[a->spfn_and_flags & PSH_pfn_mask], 
   2.105 +								 &work ) )
   2.106  				goto retry;
   2.107          }
   2.108          a=next;
   2.109          while(a)
   2.110          { 
   2.111  			next = a->next;
   2.112 -            if ( shadow_page_op( m, op, 
   2.113 -							&frame_table[a->spfn_and_flags & PSH_pfn_mask],
   2.114 -							&work ) )
   2.115 +            if ( shadow_page_op( m, op, a->pfn,
   2.116 +								 &frame_table[a->spfn_and_flags & PSH_pfn_mask],
   2.117 +								 &work ) )
   2.118  				goto retry;
   2.119              a=next;
   2.120          }
   2.121 @@ -332,17 +335,29 @@ static int shadow_mode_table_op( struct 
   2.122      switch(op)
   2.123      {
   2.124      case DOM0_SHADOW_CONTROL_OP_FLUSH:
   2.125 -        __free_shadow_table( m );
   2.126 +        // XXX THIS IS VERY DANGEROUS : MUST ENSURE THE PTs ARE NOT IN USE ON
   2.127 +		// OTHER CPU -- fix when we get sched sync pause.
   2.128 +        __free_shadow_table( m );  
   2.129          break;
   2.130     
   2.131      case DOM0_SHADOW_CONTROL_OP_CLEAN:   // zero all-non hypervisor
   2.132 +	{
   2.133 +		__scan_shadow_table( m, TABLE_OP_ZERO_L2 );
   2.134 +		__scan_shadow_table( m, TABLE_OP_ZERO_L1 );
   2.135 +
   2.136 +		goto send_bitmap;
   2.137 +	}
   2.138 +		
   2.139 +
   2.140      case DOM0_SHADOW_CONTROL_OP_CLEAN2:  // zero all L2, free L1s
   2.141      {
   2.142  		int i,j,zero=1;
   2.143  		
   2.144 -		__scan_shadow_table( m, op );
   2.145 -		//    __free_shadow_table( m );
   2.146 -	
   2.147 +		__scan_shadow_table( m, TABLE_OP_ZERO_L2 );
   2.148 +		__scan_shadow_table( m, TABLE_OP_FREE_L1 );
   2.149 +		
   2.150 +	send_bitmap:
   2.151 +
   2.152  		if( p->tot_pages > sc->pages || 
   2.153  			!sc->dirty_bitmap || !p->mm.shadow_dirty_bitmap )
   2.154  		{