ia64/xen-unstable
changeset 1238:b9ab14271453
bitkeeper revision 1.830 (40644790Uo9XYxIfmoQH6z677kJnPg)
cleanup
cleanup
author | iap10@tetris.cl.cam.ac.uk |
---|---|
date | Fri Mar 26 15:09:04 2004 +0000 (2004-03-26) |
parents | 8b5451a42056 |
children | a0d70d73a737 |
files | tools/xc/lib/xc_domain.c xen/arch/i386/process.c xen/common/domain.c xen/common/memory.c xen/common/schedule.c xen/common/shadow.c xen/include/asm-i386/processor.h xen/include/xen/mm.h xen/include/xen/shadow.h xen/net/dev.c |
line diff
1.1 --- a/tools/xc/lib/xc_domain.c Fri Mar 26 15:07:40 2004 +0000 1.2 +++ b/tools/xc/lib/xc_domain.c Fri Mar 26 15:09:04 2004 +0000 1.3 @@ -102,3 +102,14 @@ int xc_domain_getinfo(int xc_handle, 1.4 1.5 return nr_doms; 1.6 } 1.7 + 1.8 +int xc_shadow_control(int xc_handle, 1.9 + u64 domid, 1.10 + unsigned int sop) 1.11 +{ 1.12 + dom0_op_t op; 1.13 + op.cmd = DOM0_SHADOW_CONTROL; 1.14 + op.u.shadow_control.domain = (domid_t)domid; 1.15 + op.u.shadow_control.op = sop; 1.16 + return do_dom0_op(xc_handle, &op); 1.17 +}
2.1 --- a/xen/arch/i386/process.c Fri Mar 26 15:07:40 2004 +0000 2.2 +++ b/xen/arch/i386/process.c Fri Mar 26 15:09:04 2004 +0000 2.3 @@ -281,13 +281,7 @@ void switch_to(struct task_struct *prev_ 2.4 } 2.5 2.6 /* Switch page tables. */ 2.7 - if( next_p->mm.shadow_mode ) 2.8 - { 2.9 - check_pagetable( next_p, next_p->mm.pagetable, "switch" ); 2.10 - write_cr3_counted(pagetable_val(next_p->mm.shadow_table)); 2.11 - } 2.12 - else 2.13 - write_cr3_counted(pagetable_val(next_p->mm.pagetable)); 2.14 + write_ptbase( &next_p->mm ); 2.15 2.16 set_current(next_p); 2.17
3.1 --- a/xen/common/domain.c Fri Mar 26 15:07:40 2004 +0000 3.2 +++ b/xen/common/domain.c Fri Mar 26 15:09:04 2004 +0000 3.3 @@ -850,7 +850,7 @@ int setup_guestos(struct task_struct *p, 3.4 set_bit(PF_CONSTRUCTED, &p->flags); 3.5 3.6 #if 0 // XXXXX DO NOT CHECK IN ENBALED !!! (but useful for testing so leave) 3.7 - shadow_mode_enable(p, SHM_test); 3.8 + shadow_mode_enable(&p->mm, SHM_test); 3.9 #endif 3.10 3.11 new_thread(p,
4.1 --- a/xen/common/memory.c Fri Mar 26 15:07:40 2004 +0000 4.2 +++ b/xen/common/memory.c Fri Mar 26 15:09:04 2004 +0000 4.3 @@ -766,20 +766,22 @@ void free_page_type(struct pfn_info *pag 4.4 case PGT_l1_page_table: 4.5 free_l1_table(page); 4.6 if ( unlikely(current->mm.shadow_mode) && 4.7 - (get_shadow_status(current, page-frame_table) & PSH_shadowed) ) 4.8 + (get_shadow_status(¤t->mm, 4.9 + page-frame_table) & PSH_shadowed) ) 4.10 { 4.11 unshadow_table( page-frame_table, type ); 4.12 - put_shadow_status(current); 4.13 + put_shadow_status(¤t->mm); 4.14 } 4.15 return; 4.16 4.17 case PGT_l2_page_table: 4.18 free_l2_table(page); 4.19 if ( unlikely(current->mm.shadow_mode) && 4.20 - (get_shadow_status(current, page-frame_table) & PSH_shadowed) ) 4.21 + (get_shadow_status(¤t->mm, 4.22 + page-frame_table) & PSH_shadowed) ) 4.23 { 4.24 unshadow_table( page-frame_table, type ); 4.25 - put_shadow_status(current); 4.26 + put_shadow_status(¤t->mm); 4.27 } 4.28 return; 4.29 4.30 @@ -854,16 +856,10 @@ static int do_extended_command(unsigned 4.31 old_base_pfn = pagetable_val(current->mm.pagetable) >> PAGE_SHIFT; 4.32 current->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT); 4.33 4.34 - if( unlikely(current->mm.shadow_mode)) 4.35 - { 4.36 - current->mm.shadow_table = 4.37 - shadow_mk_pagetable(current, pfn<<PAGE_SHIFT); 4.38 - write_cr3_counted(pagetable_val(current->mm.shadow_table)); 4.39 - } 4.40 - else 4.41 - { 4.42 - write_cr3_counted(pfn << PAGE_SHIFT); 4.43 - } 4.44 + shadow_mk_pagetable(¤t->mm); 4.45 + 4.46 + write_ptbase(¤t->mm); 4.47 + 4.48 put_page_and_type(&frame_table[old_base_pfn]); 4.49 } 4.50 else 4.51 @@ -1002,12 +998,12 @@ int do_mmu_update(mmu_update_t *ureqs, i 4.52 mk_l1_pgentry(req.val)); 4.53 4.54 if ( okay && unlikely(current->mm.shadow_mode) && 4.55 - (get_shadow_status(current, page-frame_table) & 4.56 + (get_shadow_status(¤t->mm, page-frame_table) & 4.57 PSH_shadowed) ) 4.58 { 4.59 shadow_l1_normal_pt_update( req.ptr, req.val, 4.60 &prev_spfn, &prev_spl1e ); 4.61 - put_shadow_status(current); 4.62 + put_shadow_status(¤t->mm); 4.63 } 4.64 4.65 put_page_type(page); 4.66 @@ -1021,11 +1017,11 @@ int do_mmu_update(mmu_update_t *ureqs, i 4.67 pfn); 4.68 4.69 if ( okay && unlikely(current->mm.shadow_mode) && 4.70 - (get_shadow_status(current, page-frame_table) & 4.71 + (get_shadow_status(¤t->mm, page-frame_table) & 4.72 PSH_shadowed) ) 4.73 { 4.74 shadow_l2_normal_pt_update( req.ptr, req.val ); 4.75 - put_shadow_status(current); 4.76 + put_shadow_status(¤t->mm); 4.77 } 4.78 4.79 put_page_type(page); 4.80 @@ -1093,14 +1089,7 @@ int do_mmu_update(mmu_update_t *ureqs, i 4.81 4.82 if ( deferred_ops & DOP_FLUSH_TLB ) 4.83 { 4.84 - if ( unlikely(current->mm.shadow_mode) ) 4.85 - { 4.86 - check_pagetable( current, 4.87 - current->mm.pagetable, "pre-stlb-flush" ); 4.88 - write_cr3_counted(pagetable_val(current->mm.shadow_table)); 4.89 - } 4.90 - else 4.91 - write_cr3_counted(pagetable_val(current->mm.pagetable)); 4.92 + write_ptbase(¤t->mm); 4.93 } 4.94 4.95 if ( deferred_ops & DOP_RELOAD_LDT ) 4.96 @@ -1172,10 +1161,7 @@ int do_update_va_mapping(unsigned long p 4.97 if ( unlikely(deferred_ops & DOP_FLUSH_TLB) || 4.98 unlikely(flags & UVMF_FLUSH_TLB) ) 4.99 { 4.100 - if ( unlikely(p->mm.shadow_mode) ) 4.101 - write_cr3_counted(pagetable_val(p->mm.shadow_table)); 4.102 - else 4.103 - write_cr3_counted(pagetable_val(p->mm.pagetable)); 4.104 + write_ptbase(&p->mm); 4.105 } 4.106 else if ( unlikely(flags & UVMF_INVLPG) ) 4.107 __flush_tlb_one(page_nr << PAGE_SHIFT);
5.1 --- a/xen/common/schedule.c Fri Mar 26 15:07:40 2004 +0000 5.2 +++ b/xen/common/schedule.c Fri Mar 26 15:09:04 2004 +0000 5.3 @@ -300,9 +300,8 @@ void sched_pause_sync(struct task_struct 5.4 /* spin until domain is descheduled by its local scheduler */ 5.5 while ( schedule_data[cpu].curr == p ) 5.6 { 5.7 - set_bit(_HYP_EVENT_NEED_RESCHED, &p->hyp_events); 5.8 - hyp_event_notify(1 << cpu); 5.9 - do_yield(); 5.10 + send_hyp_event(p, _HYP_EVENT_NEED_RESCHED ); 5.11 + do_yield(); 5.12 } 5.13 5.14
6.1 --- a/xen/common/shadow.c Fri Mar 26 15:07:40 2004 +0000 6.2 +++ b/xen/common/shadow.c Fri Mar 26 15:09:04 2004 +0000 6.3 @@ -26,99 +26,11 @@ hypercall lock anyhow (at least initiall 6.4 6.5 ********/ 6.6 6.7 -int shadow_mode_control( struct task_struct *p, unsigned int op ) 6.8 -{ 6.9 - if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_OFF ) 6.10 - { 6.11 - shadow_mode_disable(p); 6.12 - } 6.13 - else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_ENABLE_TEST ) 6.14 - { 6.15 - shadow_mode_disable(p); 6.16 - shadow_mode_enable(p, SHM_test); 6.17 - } 6.18 - else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_FLUSH ) 6.19 - { 6.20 - //shadow_mode_flush(p); 6.21 - } 6.22 - else 6.23 - { 6.24 - return -EINVAL; 6.25 - } 6.26 - 6.27 - return 0; 6.28 -} 6.29 - 6.30 -int shadow_mode_enable( struct task_struct *p, unsigned int mode ) 6.31 -{ 6.32 - struct shadow_status **fptr; 6.33 - int i; 6.34 - 6.35 - // sychronously stop domain 6.36 - // XXX for the moment, only use on already stopped domains!!! 6.37 - 6.38 - spin_lock_init(&p->mm.shadow_lock); 6.39 - spin_lock(&p->mm.shadow_lock); 6.40 - 6.41 - p->mm.shadow_mode = mode; 6.42 - 6.43 - // allocate hashtable 6.44 - p->mm.shadow_ht = kmalloc( shadow_ht_buckets * 6.45 - sizeof(struct shadow_status), GFP_KERNEL ); 6.46 - if( ! p->mm.shadow_ht ) 6.47 - goto nomem; 6.48 - 6.49 - memset( p->mm.shadow_ht, 0, shadow_ht_buckets * 6.50 - sizeof(struct shadow_status) ); 6.51 - 6.52 - 6.53 - // allocate space for first lot of extra nodes 6.54 - p->mm.shadow_ht_extras = kmalloc( sizeof(void*) + (shadow_ht_extra_size * 6.55 - sizeof(struct shadow_status)), GFP_KERNEL ); 6.56 - 6.57 - if( ! p->mm.shadow_ht_extras ) 6.58 - goto nomem; 6.59 - 6.60 - memset( p->mm.shadow_ht_extras, 0, sizeof(void*) + (shadow_ht_extra_size * 6.61 - sizeof(struct shadow_status)) ); 6.62 - 6.63 - // add extras to free list 6.64 - fptr = &p->mm.shadow_ht_free; 6.65 - for ( i=0; i<shadow_ht_extra_size; i++ ) 6.66 - { 6.67 - *fptr = &p->mm.shadow_ht_extras[i]; 6.68 - fptr = &(p->mm.shadow_ht_extras[i].next); 6.69 - } 6.70 - *fptr = NULL; 6.71 - *((struct shadow_status ** ) &p->mm.shadow_ht_extras[shadow_ht_extra_size]) = NULL; 6.72 - 6.73 - spin_unlock(&p->mm.shadow_lock); 6.74 - 6.75 - // call shadow_mk_pagetable 6.76 - p->mm.shadow_table = shadow_mk_pagetable( p, 6.77 - pagetable_val(p->mm.pagetable) ); 6.78 - 6.79 - return 0; 6.80 - 6.81 -nomem: 6.82 - spin_unlock(&p->mm.shadow_lock); 6.83 - return -ENOMEM; 6.84 -} 6.85 - 6.86 -void shadow_mode_disable( ) 6.87 -{ 6.88 - 6.89 - // free the hash buckets as you go 6.90 - 6.91 - // free the hashtable itself 6.92 -} 6.93 - 6.94 - 6.95 -static inline void free_shadow_page( struct task_struct *p, unsigned int pfn ) 6.96 +static inline void free_shadow_page( struct mm_struct *m, unsigned int pfn ) 6.97 { 6.98 unsigned long flags; 6.99 6.100 - p->mm.shadow_page_count--; 6.101 + m->shadow_page_count--; 6.102 6.103 spin_lock_irqsave(&free_list_lock, flags); 6.104 list_add(&frame_table[pfn].list, &free_list); 6.105 @@ -126,15 +38,7 @@ static inline void free_shadow_page( str 6.106 spin_unlock_irqrestore(&free_list_lock, flags); 6.107 } 6.108 6.109 -static inline struct pfn_info *alloc_shadow_page( struct task_struct *p ) 6.110 -{ 6.111 - p->mm.shadow_page_count++; 6.112 - 6.113 - return alloc_domain_page( NULL ); 6.114 -} 6.115 - 6.116 - 6.117 -static void __free_shadow_table( struct task_struct *p ) 6.118 +static void __free_shadow_table( struct mm_struct *m ) 6.119 { 6.120 int j; 6.121 struct shadow_status *a; 6.122 @@ -146,10 +50,10 @@ static void __free_shadow_table( struct 6.123 6.124 for(j=0;j<shadow_ht_buckets;j++) 6.125 { 6.126 - a = &p->mm.shadow_ht[j]; 6.127 + a = &m->shadow_ht[j]; 6.128 if (a->pfn) 6.129 { 6.130 - free_shadow_page( p, a->spfn_and_flags & PSH_pfn_mask ); 6.131 + free_shadow_page( m, a->spfn_and_flags & PSH_pfn_mask ); 6.132 a->pfn = 0; 6.133 a->spfn_and_flags = 0; 6.134 } 6.135 @@ -157,29 +61,145 @@ static void __free_shadow_table( struct 6.136 while(a) 6.137 { 6.138 struct shadow_status *next = a->next; 6.139 - free_shadow_page( p, a->spfn_and_flags & PSH_pfn_mask ); 6.140 + free_shadow_page( m, a->spfn_and_flags & PSH_pfn_mask ); 6.141 a->pfn = 0; 6.142 a->spfn_and_flags = 0; 6.143 - a->next = p->mm.shadow_ht_free; 6.144 - p->mm.shadow_ht_free = a; 6.145 + a->next = m->shadow_ht_free; 6.146 + m->shadow_ht_free = a; 6.147 a=next; 6.148 } 6.149 } 6.150 } 6.151 6.152 -static void flush_shadow_table( struct task_struct *p ) 6.153 + 6.154 +int shadow_mode_enable( struct mm_struct *m, unsigned int mode ) 6.155 { 6.156 + struct shadow_status **fptr; 6.157 + int i; 6.158 + 6.159 + 6.160 + spin_lock_init(&m->shadow_lock); 6.161 + spin_lock(&m->shadow_lock); 6.162 + 6.163 + m->shadow_mode = mode; 6.164 + 6.165 + // allocate hashtable 6.166 + m->shadow_ht = kmalloc( shadow_ht_buckets * 6.167 + sizeof(struct shadow_status), GFP_KERNEL ); 6.168 + if( ! m->shadow_ht ) 6.169 + goto nomem; 6.170 + 6.171 + memset( m->shadow_ht, 0, shadow_ht_buckets * 6.172 + sizeof(struct shadow_status) ); 6.173 + 6.174 + 6.175 + // allocate space for first lot of extra nodes 6.176 + m->shadow_ht_extras = kmalloc( sizeof(void*) + (shadow_ht_extra_size * 6.177 + sizeof(struct shadow_status)), GFP_KERNEL ); 6.178 + 6.179 + if( ! m->shadow_ht_extras ) 6.180 + goto nomem; 6.181 + 6.182 + memset( m->shadow_ht_extras, 0, sizeof(void*) + (shadow_ht_extra_size * 6.183 + sizeof(struct shadow_status)) ); 6.184 6.185 - // XXX synchronously stop domain (needed for SMP guests) 6.186 + // add extras to free list 6.187 + fptr = &m->shadow_ht_free; 6.188 + for ( i=0; i<shadow_ht_extra_size; i++ ) 6.189 + { 6.190 + *fptr = &m->shadow_ht_extras[i]; 6.191 + fptr = &(m->shadow_ht_extras[i].next); 6.192 + } 6.193 + *fptr = NULL; 6.194 + *((struct shadow_status ** ) 6.195 + &m->shadow_ht_extras[shadow_ht_extra_size]) = NULL; 6.196 + 6.197 + spin_unlock(&m->shadow_lock); 6.198 + 6.199 + // call shadow_mk_pagetable 6.200 + shadow_mk_pagetable( m ); 6.201 + 6.202 + return 0; 6.203 + 6.204 +nomem: 6.205 + spin_unlock(&m->shadow_lock); 6.206 + return -ENOMEM; 6.207 +} 6.208 + 6.209 +static void shadow_mode_disable( struct mm_struct *m ) 6.210 +{ 6.211 + 6.212 + // free the hash buckets as you go 6.213 6.214 - // switch to idle task's page tables 6.215 + // free the hashtable itself 6.216 +} 6.217 + 6.218 +static void shadow_mode_flush( struct mm_struct *m ) 6.219 +{ 6.220 + 6.221 + // since Dom0 did the hypercall, we should be running with it's page 6.222 + // tables right now. Calling flush on yourself would be really 6.223 + // stupid. 6.224 + 6.225 + if ( m == ¤t->mm ) 6.226 + { 6.227 + printk("Don't try and flush your own page tables!\n"); 6.228 + return; 6.229 + } 6.230 + 6.231 + spin_lock(&m->shadow_lock); 6.232 + __free_shadow_table( m ); 6.233 + spin_unlock(&m->shadow_lock); 6.234 + 6.235 + // call shadow_mk_pagetable 6.236 + shadow_mk_pagetable( m ); 6.237 + 6.238 +} 6.239 + 6.240 + 6.241 +int shadow_mode_control( struct task_struct *p, unsigned int op ) 6.242 +{ 6.243 + int we_paused = 0; 6.244 6.245 - // walk the hash table and call free_shadow_page on all pages 6.246 - spin_lock(&p->mm.shadow_lock); 6.247 - __free_shadow_table( p ); 6.248 - spin_unlock(&p->mm.shadow_lock); 6.249 + // don't call if already shadowed... 6.250 + 6.251 + // sychronously stop domain 6.252 + if( !(p->state & TASK_STOPPED) && !(p->state & TASK_PAUSED)) 6.253 + { 6.254 + sched_pause_sync(p); 6.255 + printk("paused domain\n"); 6.256 + we_paused = 1; 6.257 + } 6.258 6.259 - // XXX unpause domain 6.260 + if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_OFF ) 6.261 + { 6.262 + shadow_mode_disable(&p->mm); 6.263 + } 6.264 + else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_ENABLE_TEST ) 6.265 + { 6.266 + shadow_mode_disable(&p->mm); 6.267 + shadow_mode_enable(&p->mm, SHM_test); 6.268 + } 6.269 + else if (p->mm.shadow_mode && op == DOM0_SHADOW_CONTROL_OP_FLUSH ) 6.270 + { 6.271 + shadow_mode_flush(&p->mm); 6.272 + } 6.273 + else 6.274 + { 6.275 + return -EINVAL; 6.276 + } 6.277 + 6.278 + if ( we_paused ) wake_up(p); 6.279 + return 0; 6.280 +} 6.281 + 6.282 + 6.283 + 6.284 +static inline struct pfn_info *alloc_shadow_page( struct mm_struct *m ) 6.285 +{ 6.286 + m->shadow_page_count++; 6.287 + 6.288 + return alloc_domain_page( NULL ); 6.289 } 6.290 6.291 6.292 @@ -199,8 +219,8 @@ void unshadow_table( unsigned long gpfn, 6.293 // even in the SMP guest case, there won't be a race here as 6.294 // this CPU was the one that cmpxchg'ed the page to invalid 6.295 6.296 - spfn = __shadow_status(current, gpfn) & PSH_pfn_mask; 6.297 - delete_shadow_status(current, gpfn); 6.298 + spfn = __shadow_status(¤t->mm, gpfn) & PSH_pfn_mask; 6.299 + delete_shadow_status(¤t->mm, gpfn); 6.300 6.301 #if 0 // XXX leave as might be useful for later debugging 6.302 { 6.303 @@ -220,13 +240,13 @@ void unshadow_table( unsigned long gpfn, 6.304 else 6.305 perfc_decr(shadow_l2_pages); 6.306 6.307 - free_shadow_page( current, spfn ); 6.308 + free_shadow_page( ¤t->mm, spfn ); 6.309 6.310 } 6.311 6.312 6.313 -static unsigned long shadow_l2_table( 6.314 - struct task_struct *p, unsigned long gpfn ) 6.315 +unsigned long shadow_l2_table( 6.316 + struct mm_struct *m, unsigned long gpfn ) 6.317 { 6.318 struct pfn_info *spfn_info; 6.319 unsigned long spfn; 6.320 @@ -234,7 +254,6 @@ static unsigned long shadow_l2_table( 6.321 int i; 6.322 6.323 SH_VVLOG("shadow_l2_table( %08lx )",gpfn); 6.324 - spin_lock(&p->mm.shadow_lock); 6.325 6.326 perfc_incrc(shadow_l2_table_count); 6.327 perfc_incr(shadow_l2_pages); 6.328 @@ -242,14 +261,14 @@ static unsigned long shadow_l2_table( 6.329 // XXX in future, worry about racing in SMP guests 6.330 // -- use cmpxchg with PSH_pending flag to show progress (and spin) 6.331 6.332 - spfn_info = alloc_shadow_page(p); 6.333 + spfn_info = alloc_shadow_page(m); 6.334 6.335 ASSERT( spfn_info ); // XXX deal with failure later e.g. blow cache 6.336 6.337 spfn = (unsigned long) (spfn_info - frame_table); 6.338 6.339 // mark pfn as being shadowed, update field to point at shadow 6.340 - set_shadow_status(p, gpfn, spfn | PSH_shadowed); 6.341 + set_shadow_status(m, gpfn, spfn | PSH_shadowed); 6.342 6.343 // we need to do this before the linear map is set up 6.344 spl2e = (l2_pgentry_t *) map_domain_mem(spfn << PAGE_SHIFT); 6.345 @@ -315,33 +334,9 @@ static unsigned long shadow_l2_table( 6.346 6.347 SH_VLOG("shadow_l2_table( %08lx -> %08lx)",gpfn,spfn); 6.348 6.349 - spin_unlock(&p->mm.shadow_lock); 6.350 return spfn; 6.351 } 6.352 6.353 -pagetable_t shadow_mk_pagetable( struct task_struct *p, 6.354 - unsigned long gptbase) 6.355 -{ 6.356 - unsigned long gpfn, spfn=0; 6.357 - 6.358 - SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )", 6.359 - gptbase, p->mm.shadow_mode ); 6.360 - 6.361 - if ( likely(p->mm.shadow_mode) ) // should always be true if we're here 6.362 - { 6.363 - gpfn = gptbase >> PAGE_SHIFT; 6.364 - 6.365 - if ( unlikely((spfn=__shadow_status(p, gpfn)) == 0 ) ) 6.366 - { 6.367 - spfn = shadow_l2_table(p, gpfn ); 6.368 - } 6.369 - } 6.370 - 6.371 - SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d )", 6.372 - gptbase, p->mm.shadow_mode ); 6.373 - 6.374 - return mk_pagetable(spfn<<PAGE_SHIFT); 6.375 -} 6.376 6.377 int shadow_fault( unsigned long va, long error_code ) 6.378 { 6.379 @@ -433,7 +428,7 @@ int shadow_fault( unsigned long va, long 6.380 gl1pfn = gpde>>PAGE_SHIFT; 6.381 6.382 6.383 - if ( ! (sl1pfn=__shadow_status(current, gl1pfn) ) ) 6.384 + if ( ! (sl1pfn=__shadow_status(¤t->mm, gl1pfn) ) ) 6.385 { 6.386 // this L1 is NOT already shadowed so we need to shadow it 6.387 struct pfn_info *sl1pfn_info; 6.388 @@ -446,7 +441,7 @@ int shadow_fault( unsigned long va, long 6.389 perfc_incrc(shadow_l1_table_count); 6.390 perfc_incr(shadow_l1_pages); 6.391 6.392 - set_shadow_status(current, gl1pfn, PSH_shadowed | sl1pfn); 6.393 + set_shadow_status(¤t->mm, gl1pfn, PSH_shadowed | sl1pfn); 6.394 6.395 gpde = gpde | _PAGE_ACCESSED | _PAGE_DIRTY; 6.396 spde = (gpde & ~PAGE_MASK) | _PAGE_RW | (sl1pfn<<PAGE_SHIFT); 6.397 @@ -530,13 +525,13 @@ void shadow_l1_normal_pt_update( unsigne 6.398 l1_pgentry_t * spl1e, * prev_spl1e = *prev_spl1e_ptr; 6.399 6.400 6.401 -SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, prev_spfn=%08lx, prev_spl1e=%08lx\n", 6.402 +SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, prev_spfn=%08lx, prev_spl1e=%p\n", 6.403 pa,gpte,prev_spfn, prev_spl1e); 6.404 6.405 // to get here, we know the l1 page *must* be shadowed 6.406 6.407 gpfn = pa >> PAGE_SHIFT; 6.408 - spfn = __shadow_status(current, gpfn) & PSH_pfn_mask; 6.409 + spfn = __shadow_status(¤t->mm, gpfn) & PSH_pfn_mask; 6.410 6.411 if ( spfn == prev_spfn ) 6.412 { 6.413 @@ -581,13 +576,13 @@ void shadow_l2_normal_pt_update( unsigne 6.414 // to get here, we know the l2 page has a shadow 6.415 6.416 gpfn = pa >> PAGE_SHIFT; 6.417 - spfn = __shadow_status(current, gpfn) & PSH_pfn_mask; 6.418 + spfn = __shadow_status(¤t->mm, gpfn) & PSH_pfn_mask; 6.419 6.420 6.421 spte = 0; 6.422 6.423 if( gpte & _PAGE_PRESENT ) 6.424 - s_sh = __shadow_status(current, gpte >> PAGE_SHIFT); 6.425 + s_sh = __shadow_status(¤t->mm, gpte >> PAGE_SHIFT); 6.426 6.427 sp2le = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT ); 6.428 // no real need for a cache here 6.429 @@ -622,7 +617,7 @@ char * sh_check_name; 6.430 #define FAIL(_f, _a...) \ 6.431 {printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx\n", sh_check_name, level, i, ## _a , gpte, spte ); BUG();} 6.432 6.433 -static int check_pte( struct task_struct *p, 6.434 +static int check_pte( struct mm_struct *m, 6.435 unsigned long gpte, unsigned long spte, int level, int i ) 6.436 { 6.437 unsigned long mask, gpfn, spfn; 6.438 @@ -680,7 +675,7 @@ static int check_pte( struct task_struct 6.439 } 6.440 6.441 6.442 -static int check_l1_table( struct task_struct *p, unsigned long va, 6.443 +static int check_l1_table( struct mm_struct *m, unsigned long va, 6.444 unsigned long g2, unsigned long s2 ) 6.445 { 6.446 int j; 6.447 @@ -709,7 +704,7 @@ static int check_l1_table( struct task_s 6.448 #define FAILPT(_f, _a...) \ 6.449 {printk("XXX FAIL %s-PT" _f "\n", s, ## _a ); BUG();} 6.450 6.451 -int check_pagetable( struct task_struct *p, pagetable_t pt, char *s ) 6.452 +int check_pagetable( struct mm_struct *m, pagetable_t pt, char *s ) 6.453 { 6.454 unsigned long gptbase = pagetable_val(pt); 6.455 unsigned long gpfn, spfn;
7.1 --- a/xen/include/asm-i386/processor.h Fri Mar 26 15:07:40 2004 +0000 7.2 +++ b/xen/include/asm-i386/processor.h Fri Mar 26 15:09:04 2004 +0000 7.3 @@ -11,6 +11,7 @@ 7.4 #include <asm/types.h> 7.5 #include <asm/cpufeature.h> 7.6 #include <asm/desc.h> 7.7 +#include <asm/flushtlb.h> 7.8 #include <xen/config.h> 7.9 #include <xen/spinlock.h> 7.10 #include <hypervisor-ifs/hypervisor-if.h> 7.11 @@ -233,6 +234,8 @@ static inline void clear_in_cr4 (unsigne 7.12 :"ax"); 7.13 } 7.14 7.15 + 7.16 + 7.17 /* 7.18 * Cyrix CPU configuration register indexes 7.19 */ 7.20 @@ -432,6 +435,22 @@ struct mm_struct { 7.21 char gdt[6]; 7.22 }; 7.23 7.24 +static inline void write_ptbase( struct mm_struct *m ) 7.25 +{ 7.26 +/* printk("write_ptbase mode=%08x pt=%08lx st=%08lx\n", 7.27 + m->shadow_mode, pagetable_val(m->pagetable), 7.28 + pagetable_val(m->shadow_table) ); 7.29 + */ 7.30 + if( m->shadow_mode ) 7.31 + { 7.32 + //check_pagetable( m, m->pagetable, "write_ptbase" ); 7.33 + write_cr3_counted(pagetable_val(m->shadow_table)); 7.34 + } 7.35 + else 7.36 + write_cr3_counted(pagetable_val(m->pagetable)); 7.37 +} 7.38 + 7.39 + 7.40 #define IDLE0_MM \ 7.41 { \ 7.42 perdomain_pt: 0, \
8.1 --- a/xen/include/xen/mm.h Fri Mar 26 15:07:40 2004 +0000 8.2 +++ b/xen/include/xen/mm.h Fri Mar 26 15:09:04 2004 +0000 8.3 @@ -8,6 +8,7 @@ 8.4 #include <xen/perfc.h> 8.5 #include <xen/sched.h> 8.6 8.7 +#include <asm/processor.h> 8.8 #include <asm/pgalloc.h> 8.9 #include <asm/atomic.h> 8.10 #include <asm/desc.h>
9.1 --- a/xen/include/xen/shadow.h Fri Mar 26 15:07:40 2004 +0000 9.2 +++ b/xen/include/xen/shadow.h Fri Mar 26 15:09:04 2004 +0000 9.3 @@ -1,12 +1,13 @@ 9.4 /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*- */ 9.5 9.6 -#ifndef _XENO_SHADOW_H 9.7 -#define _XENO_SHADOW_H 9.8 +#ifndef _XEN_SHADOW_H 9.9 +#define _XEN_SHADOW_H 9.10 9.11 #include <xen/config.h> 9.12 #include <xen/types.h> 9.13 -#include <xen/mm.h> 9.14 #include <xen/perfc.h> 9.15 +#include <asm/processor.h> 9.16 + 9.17 9.18 /* Shadow PT flag bits in pfn_info */ 9.19 #define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */ 9.20 @@ -23,15 +24,15 @@ 9.21 #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START+(SH_LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT)))) 9.22 9.23 extern int shadow_mode_control( struct task_struct *p, unsigned int op ); 9.24 -extern pagetable_t shadow_mk_pagetable( struct task_struct *p, 9.25 - unsigned long gptbase); 9.26 extern int shadow_fault( unsigned long va, long error_code ); 9.27 extern void shadow_l1_normal_pt_update( unsigned long pa, unsigned long gpte, 9.28 unsigned long *prev_spfn_ptr, 9.29 l1_pgentry_t **prev_spl1e_ptr ); 9.30 extern void shadow_l2_normal_pt_update( unsigned long pa, unsigned long gpte ); 9.31 extern void unshadow_table( unsigned long gpfn, unsigned int type ); 9.32 -extern int shadow_mode_enable( struct task_struct *p, unsigned int mode ); 9.33 +extern int shadow_mode_enable( struct mm_struct *m, unsigned int mode ); 9.34 +extern unsigned long shadow_l2_table( 9.35 + struct mm_struct *m, unsigned long gpfn ); 9.36 9.37 #define SHADOW_DEBUG 0 9.38 #define SHADOW_HASH_DEBUG 0 9.39 @@ -73,7 +74,7 @@ struct shadow_status { 9.40 9.41 9.42 #if SHADOW_HASH_DEBUG 9.43 -static void shadow_audit(struct task_struct *p, int print) 9.44 +static void shadow_audit(struct mm_struct *m, int print) 9.45 { 9.46 int live=0, free=0, j=0, abs; 9.47 struct shadow_status *a; 9.48 @@ -115,23 +116,25 @@ static void shadow_audit(struct task_str 9.49 #define shadow_audit(p, print) 9.50 #endif 9.51 9.52 -static inline struct shadow_status* hash_bucket( struct task_struct *p, 9.53 + 9.54 + 9.55 +static inline struct shadow_status* hash_bucket( struct mm_struct *m, 9.56 unsigned int gpfn ) 9.57 { 9.58 - return &(p->mm.shadow_ht[gpfn % shadow_ht_buckets]); 9.59 + return &(m->shadow_ht[gpfn % shadow_ht_buckets]); 9.60 } 9.61 9.62 9.63 -static inline unsigned long __shadow_status( struct task_struct *p, 9.64 +static inline unsigned long __shadow_status( struct mm_struct *m, 9.65 unsigned int gpfn ) 9.66 { 9.67 - struct shadow_status **ob, *b, *B = hash_bucket( p, gpfn ); 9.68 + struct shadow_status **ob, *b, *B = hash_bucket( m, gpfn ); 9.69 9.70 b = B; 9.71 ob = NULL; 9.72 9.73 - SH_VVLOG("lookup gpfn=%08lx bucket=%08lx", gpfn, b ); 9.74 - shadow_audit(p,0); // if in debug mode 9.75 + SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, b ); 9.76 + shadow_audit(m,0); // if in debug mode 9.77 9.78 do 9.79 { 9.80 @@ -172,33 +175,33 @@ static inline unsigned long __shadow_sta 9.81 ever becomes a problem, but since we need a spin lock on the hash table 9.82 anyway its probably not worth being too clever. */ 9.83 9.84 -static inline unsigned long get_shadow_status( struct task_struct *p, 9.85 +static inline unsigned long get_shadow_status( struct mm_struct *m, 9.86 unsigned int gpfn ) 9.87 { 9.88 unsigned long res; 9.89 9.90 - spin_lock(&p->mm.shadow_lock); 9.91 - res = __shadow_status( p, gpfn ); 9.92 - if (!res) spin_unlock(&p->mm.shadow_lock); 9.93 + spin_lock(&m->shadow_lock); 9.94 + res = __shadow_status( m, gpfn ); 9.95 + if (!res) spin_unlock(&m->shadow_lock); 9.96 return res; 9.97 } 9.98 9.99 9.100 -static inline void put_shadow_status( struct task_struct *p ) 9.101 +static inline void put_shadow_status( struct mm_struct *m ) 9.102 { 9.103 - spin_unlock(&p->mm.shadow_lock); 9.104 + spin_unlock(&m->shadow_lock); 9.105 } 9.106 9.107 9.108 -static inline void delete_shadow_status( struct task_struct *p, 9.109 +static inline void delete_shadow_status( struct mm_struct *m, 9.110 unsigned int gpfn ) 9.111 { 9.112 struct shadow_status *b, *B, **ob; 9.113 9.114 - B = b = hash_bucket( p, gpfn ); 9.115 + B = b = hash_bucket( m, gpfn ); 9.116 9.117 SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, b ); 9.118 - shadow_audit(p,0); 9.119 + shadow_audit(m,0); 9.120 ASSERT(gpfn); 9.121 9.122 if( b->pfn == gpfn ) 9.123 @@ -210,8 +213,8 @@ static inline void delete_shadow_status( 9.124 b->pfn = b->next->pfn; 9.125 9.126 b->next = b->next->next; 9.127 - D->next = p->mm.shadow_ht_free; 9.128 - p->mm.shadow_ht_free = D; 9.129 + D->next = m->shadow_ht_free; 9.130 + m->shadow_ht_free = D; 9.131 } 9.132 else 9.133 { 9.134 @@ -220,7 +223,7 @@ static inline void delete_shadow_status( 9.135 } 9.136 9.137 #if SHADOW_HASH_DEBUG 9.138 - if( __shadow_status(p,gpfn) ) BUG(); 9.139 + if( __shadow_status(m,gpfn) ) BUG(); 9.140 #endif 9.141 return; 9.142 } 9.143 @@ -237,11 +240,11 @@ static inline void delete_shadow_status( 9.144 9.145 // b is in the list 9.146 *ob=b->next; 9.147 - b->next = p->mm.shadow_ht_free; 9.148 - p->mm.shadow_ht_free = b; 9.149 + b->next = m->shadow_ht_free; 9.150 + m->shadow_ht_free = b; 9.151 9.152 #if SHADOW_HASH_DEBUG 9.153 - if( __shadow_status(p,gpfn) ) BUG(); 9.154 + if( __shadow_status(m,gpfn) ) BUG(); 9.155 #endif 9.156 return; 9.157 } 9.158 @@ -256,18 +259,18 @@ static inline void delete_shadow_status( 9.159 } 9.160 9.161 9.162 -static inline void set_shadow_status( struct task_struct *p, 9.163 +static inline void set_shadow_status( struct mm_struct *m, 9.164 unsigned int gpfn, unsigned long s ) 9.165 { 9.166 struct shadow_status *b, *B, *extra, **fptr; 9.167 int i; 9.168 9.169 - B = b = hash_bucket( p, gpfn ); 9.170 + B = b = hash_bucket( m, gpfn ); 9.171 9.172 ASSERT(gpfn); 9.173 ASSERT(s); 9.174 SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, b, b->next ); 9.175 - shadow_audit(p,0); 9.176 + shadow_audit(m,0); 9.177 9.178 do 9.179 { 9.180 @@ -294,7 +297,7 @@ static inline void set_shadow_status( st 9.181 return; 9.182 } 9.183 9.184 - if( unlikely(p->mm.shadow_ht_free == NULL) ) 9.185 + if( unlikely(m->shadow_ht_free == NULL) ) 9.186 { 9.187 SH_LOG("allocate more shadow hashtable blocks"); 9.188 9.189 @@ -308,7 +311,7 @@ static inline void set_shadow_status( st 9.190 sizeof(struct shadow_status)) ); 9.191 9.192 // add extras to free list 9.193 - fptr = &p->mm.shadow_ht_free; 9.194 + fptr = &m->shadow_ht_free; 9.195 for ( i=0; i<shadow_ht_extra_size; i++ ) 9.196 { 9.197 *fptr = &extra[i]; 9.198 @@ -316,15 +319,15 @@ static inline void set_shadow_status( st 9.199 } 9.200 *fptr = NULL; 9.201 9.202 - *((struct shadow_status ** ) &p->mm.shadow_ht[shadow_ht_extra_size]) = 9.203 - p->mm.shadow_ht_extras; 9.204 - p->mm.shadow_ht_extras = extra; 9.205 + *((struct shadow_status ** ) &m->shadow_ht[shadow_ht_extra_size]) = 9.206 + m->shadow_ht_extras; 9.207 + m->shadow_ht_extras = extra; 9.208 9.209 } 9.210 9.211 // should really put this in B to go right to front 9.212 - b = p->mm.shadow_ht_free; 9.213 - p->mm.shadow_ht_free = b->next; 9.214 + b = m->shadow_ht_free; 9.215 + m->shadow_ht_free = b->next; 9.216 b->spfn_and_flags = s; 9.217 b->pfn = gpfn; 9.218 b->next = B->next; 9.219 @@ -333,13 +336,39 @@ static inline void set_shadow_status( st 9.220 return; 9.221 } 9.222 9.223 +static inline void shadow_mk_pagetable( struct mm_struct *mm ) 9.224 +{ 9.225 + unsigned long gpfn, spfn=0; 9.226 + 9.227 + SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )", 9.228 + pagetable_val(mm->pagetable), mm->shadow_mode ); 9.229 + 9.230 + if ( unlikely(mm->shadow_mode) ) 9.231 + { 9.232 + gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT; 9.233 + 9.234 + spin_lock(&mm->shadow_lock); 9.235 + if ( unlikely((spfn=__shadow_status(mm, gpfn)) == 0 ) ) 9.236 + { 9.237 + spfn = shadow_l2_table(mm, gpfn ); 9.238 + } 9.239 + mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT); 9.240 + spin_unlock(&mm->shadow_lock); 9.241 + } 9.242 + 9.243 + SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d ) sh=%08lx", 9.244 + pagetable_val(mm->pagetable), mm->shadow_mode, 9.245 + pagetable_val(mm->shadow_table) ); 9.246 + 9.247 +} 9.248 + 9.249 9.250 9.251 #if SHADOW_DEBUG 9.252 -extern int check_pagetable( struct task_struct *p, pagetable_t pt, char *s ); 9.253 +extern int check_pagetable( struct mm_struct *m, pagetable_t pt, char *s ); 9.254 #else 9.255 -#define check_pagetable( p, pt, s ) 9.256 +#define check_pagetable( m, pt, s ) 9.257 #endif 9.258 9.259 9.260 -#endif 9.261 +#endif /* XEN_SHADOW_H */
10.1 --- a/xen/net/dev.c Fri Mar 26 15:07:40 2004 +0000 10.2 +++ b/xen/net/dev.c Fri Mar 26 15:09:04 2004 +0000 10.3 @@ -548,7 +548,7 @@ void deliver_packet(struct sk_buff *skb, 10.4 } 10.5 10.6 if ( p->mm.shadow_mode && 10.7 - (spte_pfn=get_shadow_status(p, pte_page-frame_table)) ) 10.8 + (spte_pfn=get_shadow_status(&p->mm, pte_page-frame_table)) ) 10.9 { 10.10 unsigned long *sptr = map_domain_mem( (spte_pfn<<PAGE_SHIFT) | 10.11 (((unsigned long)ptep)&~PAGE_MASK) ); 10.12 @@ -557,7 +557,7 @@ void deliver_packet(struct sk_buff *skb, 10.13 *sptr = new_pte; 10.14 10.15 unmap_domain_mem(sptr); 10.16 - put_shadow_status(p); 10.17 + put_shadow_status(&p->mm); 10.18 } 10.19 10.20 machine_to_phys_mapping[new_page - frame_table] 10.21 @@ -2113,14 +2113,14 @@ static void get_rx_bufs(net_vif_t *vif) 10.22 } 10.23 10.24 if ( p->mm.shadow_mode && 10.25 - (spfn=get_shadow_status(p, rx.addr>>PAGE_SHIFT)) ) 10.26 + (spfn=get_shadow_status(&p->mm, rx.addr>>PAGE_SHIFT)) ) 10.27 { 10.28 unsigned long * sptr = 10.29 map_domain_mem( (spfn<<PAGE_SHIFT) | (rx.addr&~PAGE_MASK) ); 10.30 10.31 *sptr = 0; 10.32 unmap_domain_mem( sptr ); 10.33 - put_shadow_status(p); 10.34 + put_shadow_status(&p->mm); 10.35 } 10.36 10.37 buf_pfn = pte >> PAGE_SHIFT;