ia64/xen-unstable

changeset 19670:0e111bfd22d0

x86: Fix flush_area_mask() and on_selected_cpus() to not race updates
of the supplied cpumask (which is now passed by reference).

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 28 10:19:15 2009 +0100 (2009-05-28)
parents e95c4611a0ae
children ae810b258394
files xen/arch/x86/smp.c
line diff
     1.1 --- a/xen/arch/x86/smp.c	Thu May 28 10:06:01 2009 +0100
     1.2 +++ b/xen/arch/x86/smp.c	Thu May 28 10:19:15 2009 +0100
     1.3 @@ -192,7 +192,7 @@ void flush_area_mask(const cpumask_t *ma
     1.4          cpus_andnot(flush_cpumask, *mask, *cpumask_of(smp_processor_id()));
     1.5          flush_va      = va;
     1.6          flush_flags   = flags;
     1.7 -        send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
     1.8 +        send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
     1.9          while ( !cpus_empty(flush_cpumask) )
    1.10              cpu_relax();
    1.11          spin_unlock(&flush_lock);
    1.12 @@ -223,18 +223,16 @@ void smp_send_event_check_mask(const cpu
    1.13   * Structure and data for smp_call_function()/on_selected_cpus().
    1.14   */
    1.15  
    1.16 -struct call_data_struct {
    1.17 +static void __smp_call_function_interrupt(void);
    1.18 +static DEFINE_SPINLOCK(call_lock);
    1.19 +static struct call_data_struct {
    1.20      void (*func) (void *info);
    1.21      void *info;
    1.22      int wait;
    1.23      atomic_t started;
    1.24      atomic_t finished;
    1.25 -    const cpumask_t *selected;
    1.26 -};
    1.27 -
    1.28 -static DEFINE_SPINLOCK(call_lock);
    1.29 -static struct call_data_struct *call_data;
    1.30 -static void __smp_call_function_interrupt(void);
    1.31 +    cpumask_t selected;
    1.32 +} call_data;
    1.33  
    1.34  int smp_call_function(
    1.35      void (*func) (void *info),
    1.36 @@ -252,39 +250,39 @@ int on_selected_cpus(
    1.37      void *info,
    1.38      int wait)
    1.39  {
    1.40 -    struct call_data_struct data;
    1.41 -    unsigned int nr_cpus = cpus_weight(*selected);
    1.42 +    unsigned int nr_cpus;
    1.43  
    1.44      ASSERT(local_irq_is_enabled());
    1.45  
    1.46 -    if ( nr_cpus == 0 )
    1.47 -        return 0;
    1.48 -
    1.49 -    data.func = func;
    1.50 -    data.info = info;
    1.51 -    data.wait = wait;
    1.52 -    atomic_set(&data.started, 0);
    1.53 -    atomic_set(&data.finished, 0);
    1.54 -    data.selected = selected;
    1.55 -
    1.56      spin_lock(&call_lock);
    1.57  
    1.58 -    call_data = &data;
    1.59 +    call_data.selected = *selected;
    1.60  
    1.61 -    send_IPI_mask(selected, CALL_FUNCTION_VECTOR);
    1.62 +    nr_cpus = cpus_weight(call_data.selected);
    1.63 +    if ( nr_cpus == 0 )
    1.64 +        goto out;
    1.65  
    1.66 -    if ( cpu_isset(smp_processor_id(), *call_data->selected) )
    1.67 +    call_data.func = func;
    1.68 +    call_data.info = info;
    1.69 +    call_data.wait = wait;
    1.70 +    atomic_set(&call_data.started, 0);
    1.71 +    atomic_set(&call_data.finished, 0);
    1.72 +
    1.73 +    send_IPI_mask(&call_data.selected, CALL_FUNCTION_VECTOR);
    1.74 +
    1.75 +    if ( cpu_isset(smp_processor_id(), call_data.selected) )
    1.76      {
    1.77          local_irq_disable();
    1.78          __smp_call_function_interrupt();
    1.79          local_irq_enable();
    1.80      }
    1.81  
    1.82 -    while ( atomic_read(wait ? &data.finished : &data.started) != nr_cpus )
    1.83 +    while ( atomic_read(wait ? &call_data.finished : &call_data.started)
    1.84 +            != nr_cpus )
    1.85          cpu_relax();
    1.86  
    1.87 + out:
    1.88      spin_unlock(&call_lock);
    1.89 -
    1.90      return 0;
    1.91  }
    1.92  
    1.93 @@ -345,24 +343,24 @@ fastcall void smp_event_check_interrupt(
    1.94  
    1.95  static void __smp_call_function_interrupt(void)
    1.96  {
    1.97 -    void (*func)(void *info) = call_data->func;
    1.98 -    void *info = call_data->info;
    1.99 +    void (*func)(void *info) = call_data.func;
   1.100 +    void *info = call_data.info;
   1.101  
   1.102 -    if ( !cpu_isset(smp_processor_id(), *call_data->selected) )
   1.103 +    if ( !cpu_isset(smp_processor_id(), call_data.selected) )
   1.104          return;
   1.105  
   1.106      irq_enter();
   1.107  
   1.108 -    if ( call_data->wait )
   1.109 +    if ( call_data.wait )
   1.110      {
   1.111          (*func)(info);
   1.112          mb();
   1.113 -        atomic_inc(&call_data->finished);
   1.114 +        atomic_inc(&call_data.finished);
   1.115      }
   1.116      else
   1.117      {
   1.118          mb();
   1.119 -        atomic_inc(&call_data->started);
   1.120 +        atomic_inc(&call_data.started);
   1.121          (*func)(info);
   1.122      }
   1.123