ia64/xen-unstable

changeset 8528:0ba3b9d60da6

Fix context_switch(). It is necessary to set_current() and
then check curr_vcpu all with interrupts disabled. This in
turn requires us to hoist the heck of next's vcpu_dirty_cpumask
as sending the flush IPI needs us to have interrupts enabled to
avoid deadlock.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jan 09 12:25:05 2006 +0100 (2006-01-09)
parents 8af1199488d3
children 96c7303b03ab
files xen/arch/x86/domain.c
line diff
     1.1 --- a/xen/arch/x86/domain.c	Mon Jan 09 11:22:17 2006 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Mon Jan 09 12:25:05 2006 +0100
     1.3 @@ -689,6 +689,9 @@ static void __context_switch(void)
     1.4      struct vcpu          *p = percpu_ctxt[cpu].curr_vcpu;
     1.5      struct vcpu          *n = current;
     1.6  
     1.7 +    ASSERT(p != n);
     1.8 +    ASSERT(cpus_empty(n->vcpu_dirty_cpumask));
     1.9 +
    1.10      if ( !is_idle_domain(p->domain) )
    1.11      {
    1.12          memcpy(&p->arch.guest_context.user_regs,
    1.13 @@ -748,24 +751,31 @@ static void __context_switch(void)
    1.14  void context_switch(struct vcpu *prev, struct vcpu *next)
    1.15  {
    1.16      unsigned int cpu = smp_processor_id();
    1.17 +    cpumask_t dirty_mask = next->vcpu_dirty_cpumask;
    1.18  
    1.19      ASSERT(local_irq_is_enabled());
    1.20  
    1.21 +    /* Allow at most one CPU at a time to be dirty. */
    1.22 +    ASSERT(cpus_weight(dirty_mask) <= 1);
    1.23 +    if ( unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)) )
    1.24 +    {
    1.25 +        /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
    1.26 +        flush_tlb_mask(dirty_mask);
    1.27 +    }
    1.28 +
    1.29 +    local_irq_disable();
    1.30 +
    1.31      set_current(next);
    1.32  
    1.33 -    if ( (percpu_ctxt[cpu].curr_vcpu != next) &&
    1.34 -         !is_idle_domain(next->domain) )
    1.35 +    if ( (percpu_ctxt[cpu].curr_vcpu == next) || is_idle_domain(next->domain) )
    1.36      {
    1.37 -        /* This may happen if next has been migrated by the scheduler. */
    1.38 -        if ( unlikely(!cpus_empty(next->vcpu_dirty_cpumask)) )
    1.39 -        {
    1.40 -            ASSERT(!cpu_isset(cpu, next->vcpu_dirty_cpumask));
    1.41 -            sync_vcpu_execstate(next);
    1.42 -            ASSERT(cpus_empty(next->vcpu_dirty_cpumask));
    1.43 -        }
    1.44 +        local_irq_enable();
    1.45 +    }
    1.46 +    else
    1.47 +    {
    1.48 +        __context_switch();
    1.49  
    1.50 -        local_irq_disable();
    1.51 -        __context_switch();
    1.52 +        /* Re-enable interrupts before restoring state which may fault. */
    1.53          local_irq_enable();
    1.54  
    1.55          if ( VMX_DOMAIN(next) )