ia64/xen-unstable

changeset 1271:b046dc7976c7

bitkeeper revision 1.835 (406d9481GqoZ_RrT3GukXhamv7rulA)

Delete shadow page tables when destroying domain
author iap10@tetris.cl.cam.ac.uk
date Fri Apr 02 16:27:45 2004 +0000 (2004-04-02)
parents 369b77bc3884
children 498890e33619
files xen/common/domain.c xen/common/memory.c xen/common/schedule.c xen/common/shadow.c xen/include/xen/shadow.h
line diff
     1.1 --- a/xen/common/domain.c	Fri Apr 02 14:03:03 2004 +0000
     1.2 +++ b/xen/common/domain.c	Fri Apr 02 16:27:45 2004 +0000
     1.3 @@ -19,6 +19,7 @@
     1.4  #include <xen/console.h>
     1.5  #include <xen/vbd.h>
     1.6  #include <asm/i387.h>
     1.7 +#include <xen/shadow.h>
     1.8  
     1.9  #ifdef CONFIG_X86_64BITMODE
    1.10  #define ELFSIZE 64
    1.11 @@ -382,6 +383,8 @@ void free_all_dom_mem(struct task_struct
    1.12  
    1.13      INIT_LIST_HEAD(&zombies);
    1.14  
    1.15 +    if ( p->mm.shadow_mode ) shadow_mode_disable(p);
    1.16 +
    1.17      /* STEP 1. Drop the in-use reference to the page-table base. */
    1.18      put_page_and_type(&frame_table[pagetable_val(p->mm.pagetable) >>
    1.19                                    PAGE_SHIFT]);
     2.1 --- a/xen/common/memory.c	Fri Apr 02 14:03:03 2004 +0000
     2.2 +++ b/xen/common/memory.c	Fri Apr 02 16:27:45 2004 +0000
     2.3 @@ -770,7 +770,14 @@ void free_page_type(struct pfn_info *pag
     2.4  	     (get_shadow_status(&current->mm, 
     2.5  				page-frame_table) & PSH_shadowed) )
     2.6  	{
     2.7 -	    unshadow_table( page-frame_table, type );
     2.8 +	    /* using 'current-mm' is safe because page type changes only
     2.9 +	       occur within the context of the currently running domain as 
    2.10 +	       pagetable pages can not be shared across domains. The one
    2.11 +	       exception is when destroying a domain. However, we get away 
    2.12 +	       with this as there's no way the current domain can have this
    2.13 +	       mfn shadowed, so we won't get here... Phew! */
    2.14 +
    2.15 + 	    unshadow_table( page-frame_table, type );
    2.16  	    put_shadow_status(&current->mm);
    2.17          }
    2.18  	return;
     3.1 --- a/xen/common/schedule.c	Fri Apr 02 14:03:03 2004 +0000
     3.2 +++ b/xen/common/schedule.c	Fri Apr 02 16:27:45 2004 +0000
     3.3 @@ -283,7 +283,13 @@ long do_sched_op(unsigned long op)
     3.4  }
     3.5  
     3.6  
     3.7 -/* sched_pause_sync - synchronously pause a domain's execution */
     3.8 +/* sched_pause_sync - synchronously pause a domain's execution 
     3.9 +
    3.10 +XXXX This is horibly broken -- here just as a place holder at present,
    3.11 +                               do not use.
    3.12 +
    3.13 +*/
    3.14 +
    3.15  void sched_pause_sync(struct task_struct *p)
    3.16  {
    3.17      unsigned long flags;
    3.18 @@ -293,7 +299,7 @@ void sched_pause_sync(struct task_struct
    3.19  
    3.20      if ( schedule_data[cpu].curr != p )
    3.21          /* if not the current task, we can remove it from scheduling now */
    3.22 -        SCHED_FN(pause, p);
    3.23 +        SCHED_OP(pause, p);
    3.24  
    3.25      p->state = TASK_PAUSED;
    3.26      
     4.1 --- a/xen/common/shadow.c	Fri Apr 02 14:03:03 2004 +0000
     4.2 +++ b/xen/common/shadow.c	Fri Apr 02 16:27:45 2004 +0000
     4.3 @@ -225,7 +225,7 @@ nomem:
     4.4      return -ENOMEM;
     4.5  }
     4.6  
     4.7 -static void shadow_mode_disable( struct task_struct *p )
     4.8 +void shadow_mode_disable( struct task_struct *p )
     4.9  {
    4.10      struct mm_struct *m = &p->mm;
    4.11      struct shadow_status *next;
    4.12 @@ -353,144 +353,6 @@ static inline struct pfn_info *alloc_sha
    4.13      return alloc_domain_page( NULL );
    4.14  }
    4.15  
    4.16 -/************************************************************************/
    4.17 -
    4.18 -static inline void mark_dirty( struct mm_struct *m, unsigned int mfn )
    4.19 -{
    4.20 -	unsigned int pfn = machine_to_phys_mapping[mfn];
    4.21 -	ASSERT(m->shadow_dirty_bitmap);
    4.22 -	if( likely(pfn<m->shadow_dirty_bitmap_size) )
    4.23 -	{
    4.24 -		// XXX use setbit
    4.25 -		m->shadow_dirty_bitmap[pfn/(sizeof(int)*8)] |= 
    4.26 -			(1<<(pfn%(sizeof(int)*8)));
    4.27 -	}
    4.28 -	else
    4.29 -	{
    4.30 -		printk("XXXX mark dirty overflow!");
    4.31 -	}
    4.32 -
    4.33 -}
    4.34 -
    4.35 -/************************************************************************/
    4.36 -
    4.37 -static inline void l1pte_write_fault( struct mm_struct *m, 
    4.38 -									  unsigned long *gpte_p, unsigned long *spte_p )
    4.39 -{ 
    4.40 -    unsigned long gpte = *gpte_p;
    4.41 -    unsigned long spte = *spte_p;
    4.42 -
    4.43 -    switch( m->shadow_mode )
    4.44 -    {
    4.45 -    case SHM_test:
    4.46 -		spte = gpte;
    4.47 -		gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
    4.48 -		spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; 			
    4.49 -		break;
    4.50 -
    4.51 -    case SHM_logdirty:
    4.52 -		spte = gpte;
    4.53 -		gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
    4.54 -		spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; 			
    4.55 -		mark_dirty( m, gpte >> PAGE_SHIFT );
    4.56 -		break;
    4.57 -    }
    4.58 -
    4.59 -    *gpte_p = gpte;
    4.60 -    *spte_p = spte;
    4.61 -}
    4.62 -
    4.63 -static inline void l1pte_read_fault( struct mm_struct *m, 
    4.64 -									 unsigned long *gpte_p, unsigned long *spte_p )
    4.65 -{ 
    4.66 -    unsigned long gpte = *gpte_p;
    4.67 -    unsigned long spte = *spte_p;
    4.68 -
    4.69 -    switch( m->shadow_mode )
    4.70 -    {
    4.71 -    case SHM_test:
    4.72 -		spte = gpte;
    4.73 -		gpte |= _PAGE_ACCESSED;
    4.74 -		spte |= _PAGE_ACCESSED; 			
    4.75 -		if ( ! (gpte & _PAGE_DIRTY ) )
    4.76 -			spte &= ~ _PAGE_RW;
    4.77 -		break;
    4.78 -
    4.79 -    case SHM_logdirty:
    4.80 -		spte = gpte;
    4.81 -		gpte |= _PAGE_ACCESSED;
    4.82 -		spte |= _PAGE_ACCESSED; 			
    4.83 -		spte &= ~ _PAGE_RW;
    4.84 -		break;
    4.85 -    }
    4.86 -
    4.87 -    *gpte_p = gpte;
    4.88 -    *spte_p = spte;
    4.89 -}
    4.90 -
    4.91 -static inline void l1pte_no_fault( struct mm_struct *m, 
    4.92 -								   unsigned long *gpte_p, unsigned long *spte_p )
    4.93 -{ 
    4.94 -    unsigned long gpte = *gpte_p;
    4.95 -    unsigned long spte = *spte_p;
    4.96 -
    4.97 -    switch( m->shadow_mode )
    4.98 -    {
    4.99 -    case SHM_test:
   4.100 -		spte = 0;
   4.101 -		if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
   4.102 -			 (_PAGE_PRESENT|_PAGE_ACCESSED) )
   4.103 -		{
   4.104 -			spte = gpte;
   4.105 -			if ( ! (gpte & _PAGE_DIRTY ) )
   4.106 -				spte &= ~ _PAGE_RW;
   4.107 -		}
   4.108 -		break;
   4.109 -
   4.110 -    case SHM_logdirty:
   4.111 -		spte = 0;
   4.112 -		if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
   4.113 -			 (_PAGE_PRESENT|_PAGE_ACCESSED) )
   4.114 -		{
   4.115 -			spte = gpte;
   4.116 -			spte &= ~ _PAGE_RW;
   4.117 -		}
   4.118 -
   4.119 -		break;
   4.120 -    }
   4.121 -
   4.122 -    *gpte_p = gpte;
   4.123 -    *spte_p = spte;
   4.124 -}
   4.125 -
   4.126 -static inline void l2pde_general( struct mm_struct *m, 
   4.127 -			   unsigned long *gpde_p, unsigned long *spde_p,
   4.128 -			   unsigned long sl1pfn)
   4.129 -{
   4.130 -    unsigned long gpde = *gpde_p;
   4.131 -    unsigned long spde = *spde_p;
   4.132 -
   4.133 -	spde = 0;
   4.134 -
   4.135 -	if ( sl1pfn )
   4.136 -	{
   4.137 -		spde = (gpde & ~PAGE_MASK) | (sl1pfn<<PAGE_SHIFT) | 
   4.138 -			_PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
   4.139 -		gpde = gpde | _PAGE_ACCESSED | _PAGE_DIRTY;
   4.140 -
   4.141 -		if ( unlikely( (sl1pfn<<PAGE_SHIFT) == (gpde & PAGE_MASK)  ) )
   4.142 -		{   
   4.143 -			// detect linear map, and keep pointing at guest
   4.144 -			SH_VLOG("4c: linear mapping ( %08lx )",sl1pfn);
   4.145 -			spde = gpde & ~_PAGE_RW;
   4.146 -		}
   4.147 -	}
   4.148 -
   4.149 -    *gpde_p = gpde;
   4.150 -    *spde_p = spde;
   4.151 -}
   4.152 -
   4.153 -/*********************************************************************/
   4.154  
   4.155  void unshadow_table( unsigned long gpfn, unsigned int type )
   4.156  {
     5.1 --- a/xen/include/xen/shadow.h	Fri Apr 02 14:03:03 2004 +0000
     5.2 +++ b/xen/include/xen/shadow.h	Fri Apr 02 16:27:45 2004 +0000
     5.3 @@ -31,6 +31,7 @@ extern void shadow_l1_normal_pt_update( 
     5.4  extern void shadow_l2_normal_pt_update( unsigned long pa, unsigned long gpte );
     5.5  extern void unshadow_table( unsigned long gpfn, unsigned int type );
     5.6  extern int shadow_mode_enable( struct task_struct *p, unsigned int mode );
     5.7 +extern void shadow_mode_disable( struct task_struct *p );
     5.8  extern unsigned long shadow_l2_table( 
     5.9                       struct mm_struct *m, unsigned long gpfn );
    5.10  
    5.11 @@ -73,6 +74,146 @@ struct shadow_status {
    5.12  
    5.13  
    5.14  
    5.15 +/************************************************************************/
    5.16 +
    5.17 +static inline void mark_dirty( struct mm_struct *m, unsigned int mfn )
    5.18 +{
    5.19 +	unsigned int pfn = machine_to_phys_mapping[mfn];
    5.20 +	ASSERT(m->shadow_dirty_bitmap);
    5.21 +	if( likely(pfn<m->shadow_dirty_bitmap_size) )
    5.22 +	{
    5.23 +		// use setbit to be smp guest safe
    5.24 +		set_bit( pfn, m->shadow_dirty_bitmap );
    5.25 +	}
    5.26 +	else
    5.27 +	{
    5.28 +		SH_LOG("mark_dirty pfn out of range attempt!");
    5.29 +	}
    5.30 +
    5.31 +}
    5.32 +
    5.33 +/************************************************************************/
    5.34 +
    5.35 +static inline void l1pte_write_fault( struct mm_struct *m, 
    5.36 +									  unsigned long *gpte_p, unsigned long *spte_p )
    5.37 +{ 
    5.38 +    unsigned long gpte = *gpte_p;
    5.39 +    unsigned long spte = *spte_p;
    5.40 +
    5.41 +    switch( m->shadow_mode )
    5.42 +    {
    5.43 +    case SHM_test:
    5.44 +		spte = gpte;
    5.45 +		gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
    5.46 +		spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; 			
    5.47 +		break;
    5.48 +
    5.49 +    case SHM_logdirty:
    5.50 +		spte = gpte;
    5.51 +		gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
    5.52 +		spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; 			
    5.53 +		mark_dirty( m, gpte >> PAGE_SHIFT );
    5.54 +		break;
    5.55 +    }
    5.56 +
    5.57 +    *gpte_p = gpte;
    5.58 +    *spte_p = spte;
    5.59 +}
    5.60 +
    5.61 +static inline void l1pte_read_fault( struct mm_struct *m, 
    5.62 +									 unsigned long *gpte_p, unsigned long *spte_p )
    5.63 +{ 
    5.64 +    unsigned long gpte = *gpte_p;
    5.65 +    unsigned long spte = *spte_p;
    5.66 +
    5.67 +    switch( m->shadow_mode )
    5.68 +    {
    5.69 +    case SHM_test:
    5.70 +		spte = gpte;
    5.71 +		gpte |= _PAGE_ACCESSED;
    5.72 +		spte |= _PAGE_ACCESSED; 			
    5.73 +		if ( ! (gpte & _PAGE_DIRTY ) )
    5.74 +			spte &= ~ _PAGE_RW;
    5.75 +		break;
    5.76 +
    5.77 +    case SHM_logdirty:
    5.78 +		spte = gpte;
    5.79 +		gpte |= _PAGE_ACCESSED;
    5.80 +		spte |= _PAGE_ACCESSED; 			
    5.81 +		spte &= ~ _PAGE_RW;
    5.82 +		break;
    5.83 +    }
    5.84 +
    5.85 +    *gpte_p = gpte;
    5.86 +    *spte_p = spte;
    5.87 +}
    5.88 +
    5.89 +static inline void l1pte_no_fault( struct mm_struct *m, 
    5.90 +								   unsigned long *gpte_p, unsigned long *spte_p )
    5.91 +{ 
    5.92 +    unsigned long gpte = *gpte_p;
    5.93 +    unsigned long spte = *spte_p;
    5.94 +
    5.95 +    switch( m->shadow_mode )
    5.96 +    {
    5.97 +    case SHM_test:
    5.98 +		spte = 0;
    5.99 +		if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
   5.100 +			 (_PAGE_PRESENT|_PAGE_ACCESSED) )
   5.101 +		{
   5.102 +			spte = gpte;
   5.103 +			if ( ! (gpte & _PAGE_DIRTY ) )
   5.104 +				spte &= ~ _PAGE_RW;
   5.105 +		}
   5.106 +		break;
   5.107 +
   5.108 +    case SHM_logdirty:
   5.109 +		spte = 0;
   5.110 +		if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
   5.111 +			 (_PAGE_PRESENT|_PAGE_ACCESSED) )
   5.112 +		{
   5.113 +			spte = gpte;
   5.114 +			spte &= ~ _PAGE_RW;
   5.115 +		}
   5.116 +
   5.117 +		break;
   5.118 +    }
   5.119 +
   5.120 +    *gpte_p = gpte;
   5.121 +    *spte_p = spte;
   5.122 +}
   5.123 +
   5.124 +static inline void l2pde_general( struct mm_struct *m, 
   5.125 +			   unsigned long *gpde_p, unsigned long *spde_p,
   5.126 +			   unsigned long sl1pfn)
   5.127 +{
   5.128 +    unsigned long gpde = *gpde_p;
   5.129 +    unsigned long spde = *spde_p;
   5.130 +
   5.131 +	spde = 0;
   5.132 +
   5.133 +	if ( sl1pfn )
   5.134 +	{
   5.135 +		spde = (gpde & ~PAGE_MASK) | (sl1pfn<<PAGE_SHIFT) | 
   5.136 +			_PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
   5.137 +		gpde = gpde | _PAGE_ACCESSED | _PAGE_DIRTY;
   5.138 +
   5.139 +		if ( unlikely( (sl1pfn<<PAGE_SHIFT) == (gpde & PAGE_MASK)  ) )
   5.140 +		{   
   5.141 +			// detect linear map, and keep pointing at guest
   5.142 +			SH_VLOG("4c: linear mapping ( %08lx )",sl1pfn);
   5.143 +			spde = gpde & ~_PAGE_RW;
   5.144 +		}
   5.145 +	}
   5.146 +
   5.147 +    *gpde_p = gpde;
   5.148 +    *spde_p = spde;
   5.149 +}
   5.150 +
   5.151 +/*********************************************************************/
   5.152 +
   5.153 +
   5.154 +
   5.155  #if SHADOW_HASH_DEBUG
   5.156  static void shadow_audit(struct mm_struct *m, int print)
   5.157  {