ia64/xen-unstable

changeset 1301:d8fc9ec791e9

bitkeeper revision 1.863 (407c1595i8rvyUK0N49ldfbOv570xA)

Fix deadlock in TLB-flush routines.
author kaf24@scramble.cl.cam.ac.uk
date Tue Apr 13 16:30:13 2004 +0000 (2004-04-13)
parents 22991e70f58f
children 92668c30fa16
files xen/arch/i386/smp.c xen/include/asm-i386/system.h
line diff
     1.1 --- a/xen/arch/i386/smp.c	Tue Apr 13 14:36:00 2004 +0000
     1.2 +++ b/xen/arch/i386/smp.c	Tue Apr 13 16:30:13 2004 +0000
     1.3 @@ -21,6 +21,15 @@
     1.4  #ifdef CONFIG_SMP
     1.5  
     1.6  /*
     1.7 + * This lock must be acquired before sending a synchronous IPI to another
     1.8 + * CPU (i.e., IPI + spin waiting for acknowledgement). The only safe ways of
     1.9 + * acquiring the lock are spin_lock() and spin_trylock(). The former is only
    1.10 + * safe if local interrupts are enabled (otherwise we will never see an IPI
    1.11 + * destined for us which we must acknowledge for the lock to be released).
    1.12 + */
    1.13 +static spinlock_t synchronous_ipi_lock = SPIN_LOCK_UNLOCKED;
    1.14 +
    1.15 +/*
    1.16   *	Some notes on x86 processor bugs affecting SMP operation:
    1.17   *
    1.18   *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
    1.19 @@ -106,7 +115,7 @@ void send_IPI_self(int vector)
    1.20      __send_IPI_shortcut(APIC_DEST_SELF, vector);
    1.21  }
    1.22  
    1.23 -static inline void send_IPI_mask_bitmask(int mask, int vector)
    1.24 +static inline void send_IPI_mask(int mask, int vector)
    1.25  {
    1.26      unsigned long cfg;
    1.27      unsigned long flags;
    1.28 @@ -139,48 +148,6 @@ static inline void send_IPI_mask_bitmask
    1.29      __restore_flags(flags);
    1.30  }
    1.31  
    1.32 -static inline void send_IPI_mask_sequence(int mask, int vector)
    1.33 -{
    1.34 -    unsigned long cfg, flags;
    1.35 -    unsigned int query_cpu, query_mask;
    1.36 -
    1.37 -    __save_flags(flags);
    1.38 -    __cli();
    1.39 -
    1.40 -    for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
    1.41 -        query_mask = 1 << query_cpu;
    1.42 -        if (query_mask & mask) {
    1.43 -		
    1.44 -            /*
    1.45 -             * Wait for idle.
    1.46 -             */
    1.47 -            apic_wait_icr_idle();
    1.48 -		
    1.49 -            /*
    1.50 -             * prepare target chip field
    1.51 -             */
    1.52 -            cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
    1.53 -            apic_write_around(APIC_ICR2, cfg);
    1.54 -		
    1.55 -            /*
    1.56 -             * program the ICR 
    1.57 -             */
    1.58 -            cfg = __prepare_ICR(0, vector);
    1.59 -			
    1.60 -            /*
    1.61 -             * Send the IPI. The write to APIC_ICR fires this off.
    1.62 -             */
    1.63 -            apic_write_around(APIC_ICR, cfg);
    1.64 -        }
    1.65 -    }
    1.66 -    __restore_flags(flags);
    1.67 -}
    1.68 -
    1.69 -static inline void send_IPI_mask(int mask, int vector)
    1.70 -{
    1.71 -    send_IPI_mask_bitmask(mask, vector);
    1.72 -}
    1.73 -
    1.74  static inline void send_IPI_allbutself(int vector)
    1.75  {
    1.76      /*
    1.77 @@ -194,11 +161,6 @@ static inline void send_IPI_allbutself(i
    1.78      __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
    1.79  }
    1.80  
    1.81 -static inline void send_IPI_all(int vector)
    1.82 -{
    1.83 -    __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
    1.84 -}
    1.85 -
    1.86  /*
    1.87   * ********* XEN NOTICE **********
    1.88   * I've left the following comments lying around as they look liek they might
    1.89 @@ -259,14 +221,13 @@ static inline void send_IPI_all(int vect
    1.90   */
    1.91  
    1.92  static volatile unsigned long flush_cpumask;
    1.93 -static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
    1.94  #define FLUSH_ALL	0xffffffff
    1.95  
    1.96  asmlinkage void smp_invalidate_interrupt(void)
    1.97  {
    1.98      ack_APIC_irq();
    1.99 +    local_flush_tlb();
   1.100      clear_bit(smp_processor_id(), &flush_cpumask);
   1.101 -    local_flush_tlb();
   1.102  }
   1.103  
   1.104  int try_flush_tlb_mask(unsigned long mask)
   1.105 @@ -279,7 +240,7 @@ int try_flush_tlb_mask(unsigned long mas
   1.106  
   1.107      if ( mask != 0 )
   1.108      {
   1.109 -        if ( unlikely(!spin_trylock(&tlbstate_lock)) )
   1.110 +        if ( unlikely(!spin_trylock(&synchronous_ipi_lock)) )
   1.111              return 0;
   1.112          flush_cpumask = mask;
   1.113          send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
   1.114 @@ -288,7 +249,7 @@ int try_flush_tlb_mask(unsigned long mas
   1.115              rep_nop();
   1.116              barrier();
   1.117          }
   1.118 -        spin_unlock(&tlbstate_lock);
   1.119 +        spin_unlock(&synchronous_ipi_lock);
   1.120      }
   1.121  
   1.122      return 1;
   1.123 @@ -296,9 +257,7 @@ int try_flush_tlb_mask(unsigned long mas
   1.124  
   1.125  void flush_tlb_mask(unsigned long mask)
   1.126  {
   1.127 -    /* WARNING: Only try_flush_tlb_mask() is safe in IRQ context. */
   1.128 -    if ( unlikely(in_irq()) )
   1.129 -        BUG();
   1.130 +    ASSERT(local_irq_is_enabled());
   1.131      
   1.132      if ( mask & (1 << smp_processor_id()) )
   1.133      {
   1.134 @@ -308,7 +267,7 @@ void flush_tlb_mask(unsigned long mask)
   1.135  
   1.136      if ( mask != 0 )
   1.137      {
   1.138 -        spin_lock(&tlbstate_lock);
   1.139 +        spin_lock(&synchronous_ipi_lock);
   1.140          flush_cpumask = mask;
   1.141          send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
   1.142          while ( flush_cpumask != 0 )
   1.143 @@ -316,16 +275,17 @@ void flush_tlb_mask(unsigned long mask)
   1.144              rep_nop();
   1.145              barrier();
   1.146          }
   1.147 -        spin_unlock(&tlbstate_lock);
   1.148 +        spin_unlock(&synchronous_ipi_lock);
   1.149      }
   1.150  }
   1.151  
   1.152  void new_tlbflush_clock_period(void)
   1.153  {
   1.154 -    if ( unlikely(!spin_trylock(&tlbstate_lock)) )
   1.155 +    if ( unlikely(!spin_trylock(&synchronous_ipi_lock)) )
   1.156          return;
   1.157  
   1.158 -    if ( unlikely((flush_cpumask = tlbflush_mask) != 0) )
   1.159 +    flush_cpumask = tlbflush_mask & ~(1 << smp_processor_id());
   1.160 +    if ( unlikely(flush_cpumask != 0) )
   1.161      {
   1.162          send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR);
   1.163          while ( flush_cpumask != 0 )
   1.164 @@ -340,7 +300,7 @@ void new_tlbflush_clock_period(void)
   1.165      wmb(); /* Reset the mask before allowing the clock to continue ticking. */
   1.166      tlbflush_clock++;
   1.167  
   1.168 -    spin_unlock(&tlbstate_lock);
   1.169 +    spin_unlock(&synchronous_ipi_lock);
   1.170  }
   1.171  
   1.172  static void flush_tlb_all_pge_ipi(void* info)
   1.173 @@ -359,12 +319,6 @@ void smp_send_event_check_mask(unsigned 
   1.174      send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
   1.175  }
   1.176  
   1.177 -/*
   1.178 - * Structure and data for smp_call_function(). This is designed to minimise
   1.179 - * static memory requirements. It also looks cleaner.
   1.180 - */
   1.181 -static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
   1.182 -
   1.183  struct call_data_struct {
   1.184      void (*func) (void *info);
   1.185      void *info;
   1.186 @@ -408,7 +362,9 @@ int smp_call_function (void (*func) (voi
   1.187      if (wait)
   1.188          atomic_set(&data.finished, 0);
   1.189  
   1.190 -    spin_lock(&call_lock);
   1.191 +    ASSERT(local_irq_is_enabled());
   1.192 +
   1.193 +    spin_lock(&synchronous_ipi_lock);
   1.194      call_data = &data;
   1.195      wmb();
   1.196      /* Send a message to all other CPUs and wait for them to respond */
   1.197 @@ -422,7 +378,7 @@ int smp_call_function (void (*func) (voi
   1.198          while (atomic_read(&data.finished) != cpus)
   1.199              barrier();
   1.200  
   1.201 -    spin_unlock(&call_lock);
   1.202 +    spin_unlock(&synchronous_ipi_lock);
   1.203  
   1.204      return 0;
   1.205  }
     2.1 --- a/xen/include/asm-i386/system.h	Tue Apr 13 14:36:00 2004 +0000
     2.2 +++ b/xen/include/asm-i386/system.h	Tue Apr 13 16:30:13 2004 +0000
     2.3 @@ -175,6 +175,13 @@ static inline unsigned long __cmpxchg(vo
     2.4  #define local_irq_disable()	__cli()
     2.5  #define local_irq_enable()	__sti()
     2.6  
     2.7 +static inline int local_irq_is_enabled(void)
     2.8 +{
     2.9 +    unsigned long flags;
    2.10 +    __save_flags(flags);
    2.11 +    return !!(flags & (1<<9)); /* EFLAGS_IF */
    2.12 +}
    2.13 +
    2.14  #ifdef CONFIG_SMP
    2.15  
    2.16  extern void __global_cli(void);