ia64/xen-unstable

changeset 8585:bcf188da3ea1

Simplify vcpu_sleep_sync() and sched_adjdom(). Their
interaction with the scheduler locks was unnecessarily
complicated.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Jan 12 23:25:02 2006 +0100 (2006-01-12)
parents 8531996d207f
children c055d76ec559
files xen/common/schedule.c
line diff
     1.1 --- a/xen/common/schedule.c	Thu Jan 12 21:48:51 2006 +0100
     1.2 +++ b/xen/common/schedule.c	Thu Jan 12 23:25:02 2006 +0100
     1.3 @@ -171,20 +171,13 @@ void vcpu_sleep_nosync(struct vcpu *v)
     1.4      spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags);
     1.5  
     1.6      TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
     1.7 -} 
     1.8 +}
     1.9  
    1.10  void vcpu_sleep_sync(struct vcpu *v)
    1.11  {
    1.12      vcpu_sleep_nosync(v);
    1.13  
    1.14 -    /*
    1.15 -     * We can be sure that the VCPU is finally descheduled after the running
    1.16 -     * flag is cleared and the scheduler lock is released. We also check that
    1.17 -     * the domain continues to be unrunnable, in case someone else wakes it.
    1.18 -     */
    1.19 -    while ( !vcpu_runnable(v) &&
    1.20 -            (test_bit(_VCPUF_running, &v->vcpu_flags) ||
    1.21 -             spin_is_locked(&schedule_data[v->processor].schedule_lock)) )
    1.22 +    while ( !vcpu_runnable(v) && test_bit(_VCPUF_running, &v->vcpu_flags) )
    1.23          cpu_relax();
    1.24  
    1.25      sync_vcpu_execstate(v);
    1.26 @@ -314,68 +307,42 @@ long sched_adjdom(struct sched_adjdom_cm
    1.27  {
    1.28      struct domain *d;
    1.29      struct vcpu *v;
    1.30 -    int cpu;
    1.31 -#if NR_CPUS <=32
    1.32 -    unsigned long have_lock;
    1.33 - #else
    1.34 -    unsigned long long have_lock;
    1.35 -#endif
    1.36 -    int succ;
    1.37 -
    1.38 -    #define __set_cpu_bit(cpu, data) data |= ((typeof(data))1)<<cpu
    1.39 -    #define __get_cpu_bit(cpu, data) (data & ((typeof(data))1)<<cpu)
    1.40 -    #define __clear_cpu_bits(data) data = ((typeof(data))0)
    1.41      
    1.42 -    if ( cmd->sched_id != ops.sched_id )
    1.43 -        return -EINVAL;
    1.44 -    
    1.45 -    if ( cmd->direction != SCHED_INFO_PUT && cmd->direction != SCHED_INFO_GET )
    1.46 +    if ( (cmd->sched_id != ops.sched_id) ||
    1.47 +         ((cmd->direction != SCHED_INFO_PUT) &&
    1.48 +          (cmd->direction != SCHED_INFO_GET)) )
    1.49          return -EINVAL;
    1.50  
    1.51      d = find_domain_by_id(cmd->domain);
    1.52      if ( d == NULL )
    1.53          return -ESRCH;
    1.54  
    1.55 -    /* acquire locks on all CPUs on which vcpus of this domain run */
    1.56 -    do {
    1.57 -        succ = 0;
    1.58 -        __clear_cpu_bits(have_lock);
    1.59 -        for_each_vcpu ( d, v )
    1.60 -        {
    1.61 -            cpu = v->processor;
    1.62 -            if ( !__get_cpu_bit(cpu, have_lock) )
    1.63 -            {
    1.64 -                /* if we don't have a lock on this CPU: acquire it*/
    1.65 -                if ( spin_trylock(&schedule_data[cpu].schedule_lock) )
    1.66 -                {
    1.67 -                    /*we have this lock!*/
    1.68 -                    __set_cpu_bit(cpu, have_lock);
    1.69 -                    succ = 1;
    1.70 -                }
    1.71 -                else
    1.72 -                {
    1.73 -                    /*we didn,t get this lock -> free all other locks too!*/
    1.74 -                    for ( cpu = 0; cpu < NR_CPUS; cpu++ )
    1.75 -                        if ( __get_cpu_bit(cpu, have_lock) )
    1.76 -                            spin_unlock(&schedule_data[cpu].schedule_lock);
    1.77 -                    /* and start from the beginning! */
    1.78 -                    succ = 0;
    1.79 -                    /* leave the "for_each_domain_loop" */
    1.80 -                    break;
    1.81 -                }
    1.82 -            }
    1.83 -        }
    1.84 -    } while ( !succ );
    1.85 +    /*
    1.86 +     * Most VCPUs we can simply pause. If we are adjusting this VCPU then
    1.87 +     * we acquire the local schedule_lock to guard against concurrent updates.
    1.88 +     */
    1.89 +    for_each_vcpu ( d, v )
    1.90 +    {
    1.91 +        if ( v == current )
    1.92 +            spin_lock_irq(&schedule_data[smp_processor_id()].schedule_lock);
    1.93 +        else
    1.94 +            vcpu_pause(v);
    1.95 +    }
    1.96  
    1.97      SCHED_OP(adjdom, d, cmd);
    1.98  
    1.99 -    for ( cpu = 0; cpu < NR_CPUS; cpu++ )
   1.100 -        if ( __get_cpu_bit(cpu, have_lock) )
   1.101 -            spin_unlock(&schedule_data[cpu].schedule_lock);
   1.102 -    __clear_cpu_bits(have_lock);
   1.103 +    TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
   1.104  
   1.105 -    TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
   1.106 +    for_each_vcpu ( d, v )
   1.107 +    {
   1.108 +        if ( v == current )
   1.109 +            spin_unlock_irq(&schedule_data[smp_processor_id()].schedule_lock);
   1.110 +        else
   1.111 +            vcpu_unpause(v);
   1.112 +    }
   1.113 +
   1.114      put_domain(d);
   1.115 +
   1.116      return 0;
   1.117  }
   1.118