ia64/xen-unstable
changeset 2748:e9473b885613
bitkeeper revision 1.1159.1.286 (417f78e2iA5JKPrp7BvoiDunXMJ_mA)
Fix the TLB-flush logic. Epoch changes were broken.
Fix the TLB-flush logic. Epoch changes were broken.
author | kaf24@freefall.cl.cam.ac.uk |
---|---|
date | Wed Oct 27 10:30:58 2004 +0000 (2004-10-27) |
parents | ce225d7c7410 |
children | f985db0a10f6 |
files | xen/arch/x86/flushtlb.c xen/arch/x86/smp.c xen/include/asm-x86/flushtlb.h |
line diff
1.1 --- a/xen/arch/x86/flushtlb.c Wed Oct 27 09:25:04 2004 +0000 1.2 +++ b/xen/arch/x86/flushtlb.c Wed Oct 27 10:30:58 2004 +0000 1.3 @@ -4,7 +4,7 @@ 1.4 * TLB flushes are timestamped using a global virtual 'clock' which ticks 1.5 * on any TLB flush on any processor. 1.6 * 1.7 - * Copyright (c) 2003, K A Fraser 1.8 + * Copyright (c) 2003-2004, K A Fraser 1.9 */ 1.10 1.11 #include <xen/config.h> 1.12 @@ -12,12 +12,16 @@ 1.13 #include <xen/softirq.h> 1.14 #include <asm/flushtlb.h> 1.15 1.16 +unsigned long tlbflush_epoch_changing; 1.17 u32 tlbflush_clock; 1.18 u32 tlbflush_time[NR_CPUS]; 1.19 1.20 void tlb_clocktick(void) 1.21 { 1.22 u32 y, ny; 1.23 + unsigned long flags; 1.24 + 1.25 + local_irq_save(flags); 1.26 1.27 /* Tick the clock. 'y' contains the current time after the tick. */ 1.28 ny = tlbflush_clock; 1.29 @@ -25,8 +29,11 @@ void tlb_clocktick(void) 1.30 #ifdef CONFIG_SMP 1.31 if ( unlikely(((y = ny+1) & TLBCLOCK_EPOCH_MASK) == 0) ) 1.32 { 1.33 - raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ); 1.34 - y = tlbflush_clock; 1.35 + /* Epoch is changing: the first to detect this is the leader. */ 1.36 + if ( unlikely(!test_and_set_bit(0, &tlbflush_epoch_changing)) ) 1.37 + raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ); 1.38 + /* The clock doesn't tick again until end of the epoch change. */ 1.39 + y--; 1.40 break; 1.41 } 1.42 #else 1.43 @@ -37,4 +44,6 @@ void tlb_clocktick(void) 1.44 1.45 /* Update this CPU's timestamp to new time. */ 1.46 tlbflush_time[smp_processor_id()] = y; 1.47 + 1.48 + local_irq_restore(flags); 1.49 }
2.1 --- a/xen/arch/x86/smp.c Wed Oct 27 09:25:04 2004 +0000 2.2 +++ b/xen/arch/x86/smp.c Wed Oct 27 10:30:58 2004 +0000 2.3 @@ -218,8 +218,8 @@ asmlinkage void smp_invalidate_interrupt 2.4 { 2.5 ack_APIC_irq(); 2.6 perfc_incrc(ipis); 2.7 - if ( likely(test_and_clear_bit(smp_processor_id(), &flush_cpumask)) ) 2.8 - local_flush_tlb(); 2.9 + local_flush_tlb(); 2.10 + clear_bit(smp_processor_id(), &flush_cpumask); 2.11 } 2.12 2.13 void flush_tlb_mask(unsigned long mask) 2.14 @@ -267,15 +267,13 @@ void flush_tlb_mask(unsigned long mask) 2.15 */ 2.16 void new_tlbflush_clock_period(void) 2.17 { 2.18 - spin_lock(&flush_lock); 2.19 + /* Only the leader gets here. Noone else should tick the clock. */ 2.20 + ASSERT(((tlbflush_clock+1) & TLBCLOCK_EPOCH_MASK) == 0); 2.21 2.22 - /* Someone may acquire the lock and execute the flush before us. */ 2.23 - if ( ((tlbflush_clock+1) & TLBCLOCK_EPOCH_MASK) != 0 ) 2.24 - goto out; 2.25 - 2.26 + /* Flush everyone else. We definitely flushed just before entry. */ 2.27 if ( smp_num_cpus > 1 ) 2.28 { 2.29 - /* Flush everyone else. We definitely flushed just before entry. */ 2.30 + spin_lock(&flush_lock); 2.31 flush_cpumask = ((1 << smp_num_cpus) - 1) & ~(1 << smp_processor_id()); 2.32 send_IPI_allbutself(INVALIDATE_TLB_VECTOR); 2.33 while ( flush_cpumask != 0 ) 2.34 @@ -283,13 +281,18 @@ void new_tlbflush_clock_period(void) 2.35 rep_nop(); 2.36 barrier(); 2.37 } 2.38 + spin_unlock(&flush_lock); 2.39 } 2.40 2.41 /* No need for atomicity: we are the only possible updater. */ 2.42 tlbflush_clock++; 2.43 2.44 - out: 2.45 - spin_unlock(&flush_lock); 2.46 + /* Finally, signal the end of the epoch-change protocol. */ 2.47 + wmb(); 2.48 + tlbflush_epoch_changing = 0; 2.49 + 2.50 + /* In case we got to the end of the next epoch already. */ 2.51 + tlb_clocktick(); 2.52 } 2.53 2.54 static void flush_tlb_all_pge_ipi(void* info)
3.1 --- a/xen/include/asm-x86/flushtlb.h Wed Oct 27 09:25:04 2004 +0000 3.2 +++ b/xen/include/asm-x86/flushtlb.h Wed Oct 27 10:30:58 2004 +0000 3.3 @@ -4,7 +4,7 @@ 3.4 * TLB flushes are timestamped using a global virtual 'clock' which ticks 3.5 * on any TLB flush on any processor. 3.6 * 3.7 - * Copyright (c) 2003, K A Fraser 3.8 + * Copyright (c) 2003-2004, K A Fraser 3.9 */ 3.10 3.11 #ifndef __FLUSHTLB_H__ 3.12 @@ -31,16 +31,17 @@ 3.13 static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp) 3.14 { 3.15 /* 3.16 - * Why does this work? 3.17 - * 1. XOR sets high-order bits determines if stamps from differing epochs. 3.18 - * 2. Subtraction sets high-order bits if 'cpu_stamp > lastuse_stamp'. 3.19 - * In either case a flush is unnecessary: we therefore OR the results from 3.20 - * (1) and (2), mask the high-order bits, and return the inverse. 3.21 + * Worst case in which a flush really is required: 3.22 + * CPU has not flushed since end of last epoch (cpu_stamp = 0x0000ffff). 3.23 + * Clock has run to end of current epoch (clock = 0x0001ffff). 3.24 + * Therefore maximum valid difference is 0x10000 (EPOCH_MASK + 1). 3.25 + * N.B. The clock cannot run further until the CPU has flushed once more 3.26 + * and updated its stamp to 0x1ffff, so this is as 'far out' as it can get. 3.27 */ 3.28 - return !(((lastuse_stamp^cpu_stamp)|(lastuse_stamp-cpu_stamp)) & 3.29 - ~TLBCLOCK_EPOCH_MASK); 3.30 + return ((lastuse_stamp - cpu_stamp) <= (TLBCLOCK_EPOCH_MASK + 1)); 3.31 } 3.32 3.33 +extern unsigned long tlbflush_epoch_changing; 3.34 extern u32 tlbflush_clock; 3.35 extern u32 tlbflush_time[NR_CPUS]; 3.36