ia64/xen-unstable
changeset 2651:11aa3dfa5e00
bitkeeper revision 1.1159.113.2 (4171af5bkubQeHmeCV0gipUrBqdltA)
Fix TLB coherency bug in map_domain_mem, as pointed out
by Michael Fetterman.
Fix TLB coherency bug in map_domain_mem, as pointed out
by Michael Fetterman.
author | kaf24@freefall.cl.cam.ac.uk |
---|---|
date | Sat Oct 16 23:31:39 2004 +0000 (2004-10-16) |
parents | dc59c5558adc |
children | 756c16a98f27 |
files | xen/arch/x86/x86_32/domain_page.c |
line diff
1.1 --- a/xen/arch/x86/x86_32/domain_page.c Sat Oct 16 01:39:42 2004 +0000 1.2 +++ b/xen/arch/x86/x86_32/domain_page.c Sat Oct 16 23:31:39 2004 +0000 1.3 @@ -19,7 +19,7 @@ 1.4 #include <asm/flushtlb.h> 1.5 1.6 unsigned long *mapcache; 1.7 -static unsigned int map_idx, shadow_map_idx[NR_CPUS]; 1.8 +static unsigned int map_idx, epoch, shadow_epoch[NR_CPUS]; 1.9 static spinlock_t map_lock = SPIN_LOCK_UNLOCKED; 1.10 1.11 /* Use a spare PTE bit to mark entries ready for recycling. */ 1.12 @@ -30,11 +30,11 @@ static void flush_all_ready_maps(void) 1.13 unsigned long *cache = mapcache; 1.14 1.15 /* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */ 1.16 - do { if ( (*cache & READY_FOR_TLB_FLUSH) ) *cache = 0; } 1.17 + do { 1.18 + if ( (*cache & READY_FOR_TLB_FLUSH) ) 1.19 + *cache = 0; 1.20 + } 1.21 while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 ); 1.22 - 1.23 - perfc_incrc(domain_page_tlb_flush); 1.24 - local_flush_tlb(); 1.25 } 1.26 1.27 1.28 @@ -50,25 +50,29 @@ void *map_domain_mem(unsigned long pa) 1.29 spin_lock_irqsave(&map_lock, flags); 1.30 1.31 /* Has some other CPU caused a wrap? We must flush if so. */ 1.32 - if ( map_idx < shadow_map_idx[cpu] ) 1.33 + if ( epoch != shadow_epoch[cpu] ) 1.34 { 1.35 perfc_incrc(domain_page_tlb_flush); 1.36 local_flush_tlb(); 1.37 + shadow_epoch[cpu] = epoch; 1.38 } 1.39 1.40 - for ( ; ; ) 1.41 - { 1.42 + do { 1.43 idx = map_idx = (map_idx + 1) & (MAPCACHE_ENTRIES - 1); 1.44 - if ( idx == 0 ) flush_all_ready_maps(); 1.45 - if ( cache[idx] == 0 ) break; 1.46 + if ( unlikely(idx == 0) ) 1.47 + { 1.48 + flush_all_ready_maps(); 1.49 + perfc_incrc(domain_page_tlb_flush); 1.50 + local_flush_tlb(); 1.51 + shadow_epoch[cpu] = ++epoch; 1.52 + } 1.53 } 1.54 + while ( cache[idx] != 0 ); 1.55 1.56 cache[idx] = (pa & PAGE_MASK) | __PAGE_HYPERVISOR; 1.57 1.58 spin_unlock_irqrestore(&map_lock, flags); 1.59 1.60 - shadow_map_idx[cpu] = idx; 1.61 - 1.62 va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK); 1.63 return (void *)va; 1.64 }