ia64/xen-unstable

changeset 8513:3c84ce41d184

Change the context-switch interface. Get rid of
context_switch_finalise(). Instead provide a back-call
context_switch_done() for situations where arch-specific
context_switch() function does not return to the caller,
or needs to do some parts of state restoration with
interrupts enabled.

Get rid of ugly hack in arch/ia64.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jan 06 18:14:29 2006 +0100 (2006-01-06)
parents 82eafda1c710
children a933d82321b3
files xen/arch/ia64/xen/process.c xen/arch/ia64/xen/xenmisc.c xen/arch/x86/domain.c xen/common/schedule.c xen/include/xen/sched-if.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/ia64/xen/process.c	Fri Jan 06 17:45:31 2006 +0100
     1.2 +++ b/xen/arch/ia64/xen/process.c	Fri Jan 06 18:14:29 2006 +0100
     1.3 @@ -71,12 +71,10 @@ void schedule_tail(struct vcpu *next)
     1.4  	//printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
     1.5  	//printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
     1.6  
     1.7 -    // TG: Real HACK FIXME.
     1.8 -    // This is currently necessary because when a new domain is started, 
     1.9 -    // the context_switch function of xen/common/schedule.c(__enter_scheduler)
    1.10 -    // never returns.  Therefore, the lock must be released.
    1.11 -    // schedule_tail is only called when a domain is started.
    1.12 -    spin_unlock_irq(&schedule_data[current->processor].schedule_lock);
    1.13 +    // This is necessary because when a new domain is started, our
    1.14 +    // implementation of context_switch() does not return (switch_to() has
    1.15 +    // special and peculiar behaviour in this case).
    1.16 +    context_switch_done();
    1.17  
    1.18  	/* rr7 will be postponed to last point when resuming back to guest */
    1.19      if(VMX_DOMAIN(current)){
     2.1 --- a/xen/arch/ia64/xen/xenmisc.c	Fri Jan 06 17:45:31 2006 +0100
     2.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Fri Jan 06 18:14:29 2006 +0100
     2.3 @@ -329,11 +329,6 @@ if (!i--) { printk("+",id); i = 1000000;
     2.4      }
     2.5  }
     2.6  
     2.7 -void context_switch_finalise(struct vcpu *next)
     2.8 -{
     2.9 -	/* nothing to do */
    2.10 -}
    2.11 -
    2.12  void continue_running(struct vcpu *same)
    2.13  {
    2.14  	/* nothing to do */
     3.1 --- a/xen/arch/x86/domain.c	Fri Jan 06 17:45:31 2006 +0100
     3.2 +++ b/xen/arch/x86/domain.c	Fri Jan 06 18:14:29 2006 +0100
     3.3 @@ -46,7 +46,6 @@ boolean_param("noreboot", opt_noreboot);
     3.4  
     3.5  struct percpu_ctxt {
     3.6      struct vcpu *curr_vcpu;
     3.7 -    unsigned int context_not_finalised;
     3.8      unsigned int dirty_segment_mask;
     3.9  } __cacheline_aligned;
    3.10  static struct percpu_ctxt percpu_ctxt[NR_CPUS];
    3.11 @@ -758,21 +757,9 @@ void context_switch(struct vcpu *prev, s
    3.12           !is_idle_domain(next->domain) )
    3.13      {
    3.14          __context_switch();
    3.15 -        percpu_ctxt[cpu].context_not_finalised = 1;
    3.16 -    }
    3.17 -}
    3.18  
    3.19 -void context_switch_finalise(struct vcpu *next)
    3.20 -{
    3.21 -    unsigned int cpu = smp_processor_id();
    3.22 -
    3.23 -    ASSERT(local_irq_is_enabled());
    3.24 -
    3.25 -    if ( percpu_ctxt[cpu].context_not_finalised )
    3.26 -    {
    3.27 -        percpu_ctxt[cpu].context_not_finalised = 0;
    3.28 -
    3.29 -        BUG_ON(percpu_ctxt[cpu].curr_vcpu != next);
    3.30 +        context_switch_done();
    3.31 +        ASSERT(local_irq_is_enabled());
    3.32  
    3.33          if ( VMX_DOMAIN(next) )
    3.34          {
    3.35 @@ -785,6 +772,10 @@ void context_switch_finalise(struct vcpu
    3.36              vmx_load_msrs(next);
    3.37          }
    3.38      }
    3.39 +    else
    3.40 +    {
    3.41 +        context_switch_done();
    3.42 +    }
    3.43  
    3.44      schedule_tail(next);
    3.45      BUG();
     4.1 --- a/xen/common/schedule.c	Fri Jan 06 17:45:31 2006 +0100
     4.2 +++ b/xen/common/schedule.c	Fri Jan 06 18:14:29 2006 +0100
     4.3 @@ -474,11 +474,18 @@ static void __enter_scheduler(void)
     4.4               prev->domain->domain_id, prev->vcpu_id,
     4.5               next->domain->domain_id, next->vcpu_id);
     4.6  
     4.7 +    schedule_data[cpu].context_switch_in_progress = 1;
     4.8      context_switch(prev, next);
     4.9 +    if ( schedule_data[cpu].context_switch_in_progress )
    4.10 +        context_switch_done();
    4.11 +}
    4.12  
    4.13 +void context_switch_done(void)
    4.14 +{
    4.15 +    unsigned int cpu = smp_processor_id();
    4.16 +    ASSERT(schedule_data[cpu].context_switch_in_progress);
    4.17      spin_unlock_irq(&schedule_data[cpu].schedule_lock);
    4.18 -
    4.19 -    context_switch_finalise(next);
    4.20 +    schedule_data[cpu].context_switch_in_progress = 0;
    4.21  }
    4.22  
    4.23  /* No locking needed -- pointer comparison is safe :-) */
     5.1 --- a/xen/include/xen/sched-if.h	Fri Jan 06 17:45:31 2006 +0100
     5.2 +++ b/xen/include/xen/sched-if.h	Fri Jan 06 18:14:29 2006 +0100
     5.3 @@ -13,11 +13,12 @@
     5.4  
     5.5  struct schedule_data {
     5.6      spinlock_t          schedule_lock;  /* spinlock protecting curr        */
     5.7 -    struct vcpu *curr;           /* current task                    */
     5.8 -    struct vcpu *idle;           /* idle task for this cpu          */
     5.9 +    struct vcpu        *curr;           /* current task                    */
    5.10 +    struct vcpu        *idle;           /* idle task for this cpu          */
    5.11      void               *sched_priv;
    5.12      struct ac_timer     s_timer;        /* scheduling timer                */
    5.13      unsigned long       tick;           /* current periodic 'tick'         */
    5.14 +    int                 context_switch_in_progress;
    5.15  #ifdef BUCKETS
    5.16      u32                 hist[BUCKETS];  /* for scheduler latency histogram */
    5.17  #endif
     6.1 --- a/xen/include/xen/sched.h	Fri Jan 06 17:45:31 2006 +0100
     6.2 +++ b/xen/include/xen/sched.h	Fri Jan 06 18:14:29 2006 +0100
     6.3 @@ -287,13 +287,17 @@ extern void context_switch(
     6.4      struct vcpu *next);
     6.5  
     6.6  /*
     6.7 - * On some architectures (notably x86) it is not possible to entirely load
     6.8 - * @next's context with interrupts disabled. These may implement a function to
     6.9 - * finalise loading the new context after interrupts are re-enabled. This
    6.10 - * function is not given @prev and is not permitted to access it.
    6.11 + * If context_switch() does not return to the caller, or you need to perform
    6.12 + * some aspects of state restoration with interrupts enabled, then you must
    6.13 + * call context_switch_done() at a suitable safe point.
    6.14 + * 
    6.15 + * As when returning from context_switch(), the caller must ensure that the
    6.16 + * local CPU is no longer running in the previous VCPU's context, and that the
    6.17 + * context is saved to memory. Alternatively, if implementing lazy context
    6.18 + * switching, ensure that invoking sync_vcpu_execstate() will switch and
    6.19 + * commit the previous VCPU's state.
    6.20   */
    6.21 -extern void context_switch_finalise(
    6.22 -    struct vcpu *next);
    6.23 +extern void context_switch_done(void);
    6.24  
    6.25  /* Called by the scheduler to continue running the current VCPU. */
    6.26  extern void continue_running(