ia64/xen-unstable

changeset 8596:c1840ac1f05d

Introduce a locking protocol for acquiring the 'scheduler
lock' on a particular VCPU. Since this requires acquiring
the approrpiate per-CPU lock, we must re-check the VCPU's
current CPU binding after the lock is acquired.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jan 13 16:44:04 2006 +0100 (2006-01-13)
parents 0c5980d0bf20
children 3f702887d4a6
files xen/common/sched_bvt.c xen/common/schedule.c xen/include/xen/sched-if.h
line diff
     1.1 --- a/xen/common/sched_bvt.c	Fri Jan 13 16:27:45 2006 +0100
     1.2 +++ b/xen/common/sched_bvt.c	Fri Jan 13 16:44:04 2006 +0100
     1.3 @@ -98,9 +98,9 @@ static inline int __task_on_runqueue(str
     1.4  static void warp_timer_fn(void *data)
     1.5  {
     1.6      struct bvt_dom_info *inf = data;
     1.7 -    unsigned int cpu = inf->domain->vcpu[0]->processor;
     1.8 -    
     1.9 -    spin_lock_irq(&schedule_data[cpu].schedule_lock);
    1.10 +    struct vcpu *v = inf->domain->vcpu[0];
    1.11 +
    1.12 +    vcpu_schedule_lock_irq(v);
    1.13  
    1.14      inf->warp = 0;
    1.15  
    1.16 @@ -108,28 +108,28 @@ static void warp_timer_fn(void *data)
    1.17      if ( inf->warpu == 0 )
    1.18      {
    1.19          inf->warpback = 0;
    1.20 -        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);   
    1.21 +        cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);   
    1.22      }
    1.23      
    1.24      set_timer(&inf->unwarp_timer, NOW() + inf->warpu);
    1.25  
    1.26 -    spin_unlock_irq(&schedule_data[cpu].schedule_lock);
    1.27 +    vcpu_schedule_unlock_irq(v);
    1.28  }
    1.29  
    1.30  static void unwarp_timer_fn(void *data)
    1.31  {
    1.32      struct bvt_dom_info *inf = data;
    1.33 -    unsigned int cpu = inf->domain->vcpu[0]->processor;
    1.34 +    struct vcpu *v = inf->domain->vcpu[0];
    1.35  
    1.36 -    spin_lock_irq(&schedule_data[cpu].schedule_lock);
    1.37 +    vcpu_schedule_lock_irq(v);
    1.38  
    1.39      if ( inf->warpback )
    1.40      {
    1.41          inf->warp = 1;
    1.42 -        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);   
    1.43 +        cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);   
    1.44      }
    1.45       
    1.46 -    spin_unlock_irq(&schedule_data[cpu].schedule_lock);
    1.47 +    vcpu_schedule_unlock_irq(v);
    1.48  }
    1.49  
    1.50  static inline u32 calc_avt(struct vcpu *d, s_time_t now)
     2.1 --- a/xen/common/schedule.c	Fri Jan 13 16:27:45 2006 +0100
     2.2 +++ b/xen/common/schedule.c	Fri Jan 13 16:44:04 2006 +0100
     2.3 @@ -165,10 +165,10 @@ void vcpu_sleep_nosync(struct vcpu *v)
     2.4  {
     2.5      unsigned long flags;
     2.6  
     2.7 -    spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags);
     2.8 +    vcpu_schedule_lock_irqsave(v, flags);
     2.9      if ( likely(!vcpu_runnable(v)) )
    2.10          SCHED_OP(sleep, v);
    2.11 -    spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags);
    2.12 +    vcpu_schedule_unlock_irqrestore(v, flags);
    2.13  
    2.14      TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
    2.15  }
    2.16 @@ -187,13 +187,13 @@ void vcpu_wake(struct vcpu *v)
    2.17  {
    2.18      unsigned long flags;
    2.19  
    2.20 -    spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags);
    2.21 +    vcpu_schedule_lock_irqsave(v, flags);
    2.22      if ( likely(vcpu_runnable(v)) )
    2.23      {
    2.24          SCHED_OP(wake, v);
    2.25          v->wokenup = NOW();
    2.26      }
    2.27 -    spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags);
    2.28 +    vcpu_schedule_unlock_irqrestore(v, flags);
    2.29  
    2.30      TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
    2.31  }
    2.32 @@ -324,7 +324,7 @@ long sched_adjdom(struct sched_adjdom_cm
    2.33      for_each_vcpu ( d, v )
    2.34      {
    2.35          if ( v == current )
    2.36 -            spin_lock_irq(&schedule_data[smp_processor_id()].schedule_lock);
    2.37 +            vcpu_schedule_lock_irq(v);
    2.38          else
    2.39              vcpu_pause(v);
    2.40      }
    2.41 @@ -336,7 +336,7 @@ long sched_adjdom(struct sched_adjdom_cm
    2.42      for_each_vcpu ( d, v )
    2.43      {
    2.44          if ( v == current )
    2.45 -            spin_unlock_irq(&schedule_data[smp_processor_id()].schedule_lock);
    2.46 +            vcpu_schedule_unlock_irq(v);
    2.47          else
    2.48              vcpu_unpause(v);
    2.49      }
     3.1 --- a/xen/include/xen/sched-if.h	Fri Jan 13 16:27:45 2006 +0100
     3.2 +++ b/xen/include/xen/sched-if.h	Fri Jan 13 16:44:04 2006 +0100
     3.3 @@ -16,16 +16,47 @@ struct schedule_data {
     3.4      struct vcpu        *curr;           /* current task                    */
     3.5      struct vcpu        *idle;           /* idle task for this cpu          */
     3.6      void               *sched_priv;
     3.7 -    struct timer     s_timer;        /* scheduling timer                */
     3.8 +    struct timer        s_timer;        /* scheduling timer                */
     3.9      unsigned long       tick;           /* current periodic 'tick'         */
    3.10  #ifdef BUCKETS
    3.11      u32                 hist[BUCKETS];  /* for scheduler latency histogram */
    3.12  #endif
    3.13  } __cacheline_aligned;
    3.14  
    3.15 +extern struct schedule_data schedule_data[];
    3.16 +
    3.17 +static inline void vcpu_schedule_lock(struct vcpu *v)
    3.18 +{
    3.19 +    unsigned int cpu;
    3.20 +
    3.21 +    for ( ; ; )
    3.22 +    {
    3.23 +        cpu = v->processor;
    3.24 +        spin_lock(&schedule_data[cpu].schedule_lock);
    3.25 +        if ( likely(v->processor == cpu) )
    3.26 +            break;
    3.27 +        spin_unlock(&schedule_data[cpu].schedule_lock);
    3.28 +    }
    3.29 +}
    3.30 +
    3.31 +#define vcpu_schedule_lock_irq(v) \
    3.32 +    do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 )
    3.33 +#define vcpu_schedule_lock_irqsave(v, flags) \
    3.34 +    do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 )
    3.35 +
    3.36 +static inline void vcpu_schedule_unlock(struct vcpu *v)
    3.37 +{
    3.38 +    spin_unlock(&schedule_data[v->processor].schedule_lock);
    3.39 +}
    3.40 +
    3.41 +#define vcpu_schedule_unlock_irq(v) \
    3.42 +    do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 )
    3.43 +#define vcpu_schedule_unlock_irqrestore(v, flags) \
    3.44 +    do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 )
    3.45 +
    3.46  struct task_slice {
    3.47      struct vcpu *task;
    3.48 -    s_time_t            time;
    3.49 +    s_time_t     time;
    3.50  };
    3.51  
    3.52  struct scheduler {
    3.53 @@ -48,6 +79,4 @@ struct scheduler {
    3.54      void         (*dump_cpu_state) (int);
    3.55  };
    3.56  
    3.57 -extern struct schedule_data schedule_data[];
    3.58 -
    3.59  #endif /* __XEN_SCHED_IF_H__ */