ia64/xen-unstable

changeset 16896:7327e1c2a42c

Inroduce new vcpu_lock_affinity() and vcpu_unlock_affinity() helper
functions for use by x86's continue_hypercall_on_cpu().

This has two advantages:
1. We can lock out ordinary vcpu_set_affinity() commands from dom0.
2. We avoid the (in this case bogus) check for dom0_vcpus_pin.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Sat Jan 26 11:25:48 2008 +0000 (2008-01-26)
parents 4fd33f77be6b
children 63275fd1596a
files xen/arch/x86/domain.c xen/common/schedule.c xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Sat Jan 26 10:39:58 2008 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Sat Jan 26 11:25:48 2008 +0000
     1.3 @@ -1416,16 +1416,16 @@ static void continue_hypercall_on_cpu_he
     1.4  {
     1.5      struct cpu_user_regs *regs = guest_cpu_user_regs();
     1.6      struct migrate_info *info = v->arch.continue_info;
     1.7 +    cpumask_t mask = info->saved_affinity;
     1.8  
     1.9      regs->eax = info->func(info->data);
    1.10  
    1.11      v->arch.schedule_tail = info->saved_schedule_tail;
    1.12 -    v->cpu_affinity = info->saved_affinity;
    1.13      v->arch.continue_info = NULL;
    1.14  
    1.15      xfree(info);
    1.16  
    1.17 -    vcpu_set_affinity(v, &v->cpu_affinity);
    1.18 +    vcpu_unlock_affinity(v, &mask);
    1.19      schedule_tail(v);
    1.20  }
    1.21  
    1.22 @@ -1433,7 +1433,6 @@ int continue_hypercall_on_cpu(int cpu, l
    1.23  {
    1.24      struct vcpu *v = current;
    1.25      struct migrate_info *info;
    1.26 -    cpumask_t mask = cpumask_of_cpu(cpu);
    1.27      int rc;
    1.28  
    1.29      if ( cpu == smp_processor_id() )
    1.30 @@ -1446,12 +1445,12 @@ int continue_hypercall_on_cpu(int cpu, l
    1.31      info->func = func;
    1.32      info->data = data;
    1.33      info->saved_schedule_tail = v->arch.schedule_tail;
    1.34 -    info->saved_affinity = v->cpu_affinity;
    1.35 +    info->saved_affinity = cpumask_of_cpu(cpu);
    1.36  
    1.37      v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
    1.38      v->arch.continue_info = info;
    1.39  
    1.40 -    rc = vcpu_set_affinity(v, &mask);
    1.41 +    rc = vcpu_lock_affinity(v, &info->saved_affinity);
    1.42      if ( rc )
    1.43      {
    1.44          v->arch.schedule_tail = info->saved_schedule_tail;
     2.1 --- a/xen/common/schedule.c	Sat Jan 26 10:39:58 2008 +0000
     2.2 +++ b/xen/common/schedule.c	Sat Jan 26 11:25:48 2008 +0000
     2.3 @@ -262,12 +262,11 @@ void vcpu_force_reschedule(struct vcpu *
     2.4      }
     2.5  }
     2.6  
     2.7 -int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
     2.8 +static int __vcpu_set_affinity(
     2.9 +    struct vcpu *v, cpumask_t *affinity,
    2.10 +    bool_t old_lock_status, bool_t new_lock_status)
    2.11  {
    2.12 -    cpumask_t online_affinity;
    2.13 -
    2.14 -    if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
    2.15 -        return -EINVAL;
    2.16 +    cpumask_t online_affinity, old_affinity;
    2.17  
    2.18      cpus_and(online_affinity, *affinity, cpu_online_map);
    2.19      if ( cpus_empty(online_affinity) )
    2.20 @@ -275,7 +274,18 @@ int vcpu_set_affinity(struct vcpu *v, cp
    2.21  
    2.22      vcpu_schedule_lock_irq(v);
    2.23  
    2.24 +    if ( v->affinity_locked != old_lock_status )
    2.25 +    {
    2.26 +        BUG_ON(!v->affinity_locked);
    2.27 +        vcpu_schedule_unlock_irq(v);
    2.28 +        return -EBUSY;
    2.29 +    }
    2.30 +
    2.31 +    v->affinity_locked = new_lock_status;
    2.32 +
    2.33 +    old_affinity = v->cpu_affinity;
    2.34      v->cpu_affinity = *affinity;
    2.35 +    *affinity = old_affinity;
    2.36      if ( !cpu_isset(v->processor, v->cpu_affinity) )
    2.37          set_bit(_VPF_migrating, &v->pause_flags);
    2.38  
    2.39 @@ -290,6 +300,31 @@ int vcpu_set_affinity(struct vcpu *v, cp
    2.40      return 0;
    2.41  }
    2.42  
    2.43 +int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
    2.44 +{
    2.45 +    if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
    2.46 +        return -EINVAL;
    2.47 +    return __vcpu_set_affinity(v, affinity, 0, 0);
    2.48 +}
    2.49 +
    2.50 +int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity)
    2.51 +{
    2.52 +    return __vcpu_set_affinity(v, affinity, 0, 1);
    2.53 +}
    2.54 +
    2.55 +void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity)
    2.56 +{
    2.57 +    cpumask_t online_affinity;
    2.58 +
    2.59 +    /* Do not fail if no CPU in old affinity mask is online. */
    2.60 +    cpus_and(online_affinity, *affinity, cpu_online_map);
    2.61 +    if ( cpus_empty(online_affinity) )
    2.62 +        *affinity = cpu_online_map;
    2.63 +
    2.64 +    if ( __vcpu_set_affinity(v, affinity, 1, 0) != 0 )
    2.65 +        BUG();
    2.66 +}
    2.67 +
    2.68  /* Block the currently-executing domain until a pertinent event occurs. */
    2.69  static long do_block(void)
    2.70  {
     3.1 --- a/xen/include/xen/sched.h	Sat Jan 26 10:39:58 2008 +0000
     3.2 +++ b/xen/include/xen/sched.h	Sat Jan 26 11:25:48 2008 +0000
     3.3 @@ -122,6 +122,8 @@ struct vcpu
     3.4      bool_t           defer_shutdown;
     3.5      /* VCPU is paused following shutdown request (d->is_shutting_down)? */
     3.6      bool_t           paused_for_shutdown;
     3.7 +    /* VCPU affinity is temporarily locked from controller changes? */
     3.8 +    bool_t           affinity_locked;
     3.9  
    3.10      unsigned long    pause_flags;
    3.11      atomic_t         pause_count;
    3.12 @@ -485,6 +487,8 @@ void cpu_init(void);
    3.13  
    3.14  void vcpu_force_reschedule(struct vcpu *v);
    3.15  int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
    3.16 +int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity);
    3.17 +void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);
    3.18  
    3.19  void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
    3.20