ia64/xen-unstable

changeset 5523:4cadd9fa93d5

bitkeeper revision 1.1725 (42b7e4fcombAuDB0AR2i8hfnOEMfOQ)

sched.h, xen.h, schedule.c, dom0_ops.c, hypercall.h, smpboot.c, process.c:
Extend the CONFIG_HOTPLUG_CPU behavior down into the hypervisor.
Adds two SCHEDOPS (vcpu_down/vcpu_up) which set/clear VCPU flag VCPU_down.
The domain_runnable() check now looks at this flag and subsequently the
vcpu is not scheduled when VCPU_down is set.
Signed-off-by: Ryan Harper <ryanh@us.ibm.com>
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Tue Jun 21 09:59:24 2005 +0000 (2005-06-21)
parents 2387d992079a
children 4ef3760d61f0
files linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h xen/common/dom0_ops.c xen/common/schedule.c xen/include/public/xen.h xen/include/xen/sched.h
line diff
     1.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c	Tue Jun 21 08:59:34 2005 +0000
     1.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c	Tue Jun 21 09:59:24 2005 +0000
     1.3 @@ -154,8 +154,13 @@ void cpu_idle (void)
     1.4  				cpu_clear(cpu, cpu_idle_map);
     1.5  			rmb();
     1.6  
     1.7 -			if (cpu_is_offline(cpu))
     1.8 +			if (cpu_is_offline(cpu)) {
     1.9 +#if defined(CONFIG_XEN) && defined(CONFIG_HOTPLUG_CPU)
    1.10 +				/* Tell hypervisor to take vcpu down. */
    1.11 +				HYPERVISOR_vcpu_down(cpu);
    1.12 +#endif
    1.13  				play_dead();
    1.14 +         }
    1.15  
    1.16  			irq_stat[cpu].idle_timestamp = jiffies;
    1.17  			xen_idle();
     2.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c	Tue Jun 21 08:59:34 2005 +0000
     2.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c	Tue Jun 21 09:59:24 2005 +0000
     2.3 @@ -1397,6 +1397,10 @@ int __devinit __cpu_up(unsigned int cpu)
     2.4  	}
     2.5  
     2.6  #ifdef CONFIG_HOTPLUG_CPU
     2.7 +#ifdef CONFIG_XEN
     2.8 +	/* Tell hypervisor to bring vcpu up. */
     2.9 +	HYPERVISOR_vcpu_up(cpu);
    2.10 +#endif
    2.11  	/* Already up, and in cpu_quiescent now? */
    2.12  	if (cpu_isset(cpu, smp_commenced_mask)) {
    2.13  		cpu_enable(cpu);
     3.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h	Tue Jun 21 08:59:34 2005 +0000
     3.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h	Tue Jun 21 09:59:24 2005 +0000
     3.3 @@ -517,4 +517,35 @@ HYPERVISOR_boot_vcpu(
     3.4      return ret;
     3.5  }
     3.6  
     3.7 +static inline int
     3.8 +HYPERVISOR_vcpu_down(
     3.9 +    int vcpu)
    3.10 +{
    3.11 +    int ret;
    3.12 +    unsigned long ign1;
    3.13 +    __asm__ __volatile__ (
    3.14 +        TRAP_INSTR
    3.15 +        : "=a" (ret), "=b" (ign1)
    3.16 +	: "0" (__HYPERVISOR_sched_op),
    3.17 +	  "1" (SCHEDOP_vcpu_down | (vcpu << SCHEDOP_vcpushift))
    3.18 +        : "memory" );
    3.19 +
    3.20 +    return ret;
    3.21 +}
    3.22 +
    3.23 +static inline int
    3.24 +HYPERVISOR_vcpu_up(
    3.25 +    int vcpu)
    3.26 +{
    3.27 +    int ret;
    3.28 +    unsigned long ign1;
    3.29 +    __asm__ __volatile__ (
    3.30 +        TRAP_INSTR
    3.31 +        : "=a" (ret), "=b" (ign1)
    3.32 +	: "0" (__HYPERVISOR_sched_op),
    3.33 +	  "1" (SCHEDOP_vcpu_up | (vcpu << SCHEDOP_vcpushift))
    3.34 +        : "memory" );
    3.35 +
    3.36 +    return ret;
    3.37 +}
    3.38  #endif /* __HYPERCALL_H__ */
     4.1 --- a/xen/common/dom0_ops.c	Tue Jun 21 08:59:34 2005 +0000
     4.2 +++ b/xen/common/dom0_ops.c	Tue Jun 21 09:59:24 2005 +0000
     4.3 @@ -339,9 +339,14 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
     4.4           * - domain is marked as paused or blocked only if all its vcpus 
     4.5           *   are paused or blocked 
     4.6           * - domain is marked as running if any of its vcpus is running
     4.7 +         * - only map vcpus that aren't down.  Note, at some point we may
     4.8 +         *   wish to demux the -1 value to indicate down vs. not-ever-booted
     4.9 +         *   
    4.10           */
    4.11          for_each_vcpu ( d, v ) {
    4.12 -            op->u.getdomaininfo.vcpu_to_cpu[v->vcpu_id] = v->processor;
    4.13 +            /* only map vcpus that are up */
    4.14 +            if ( !(test_bit(_VCPUF_down, &v->vcpu_flags)) )
    4.15 +                op->u.getdomaininfo.vcpu_to_cpu[v->vcpu_id] = v->processor;
    4.16              op->u.getdomaininfo.cpumap[v->vcpu_id]      = v->cpumap;
    4.17              if ( !(v->vcpu_flags & VCPUF_ctrl_pause) )
    4.18                  flags &= ~DOMFLAGS_PAUSED;
    4.19 @@ -384,6 +389,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    4.20          struct vcpu_guest_context *c;
    4.21          struct domain             *d;
    4.22          struct vcpu               *v;
    4.23 +        int i;
    4.24  
    4.25          d = find_domain_by_id(op->u.getvcpucontext.domain);
    4.26          if ( d == NULL )
    4.27 @@ -398,8 +404,16 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    4.28              put_domain(d);
    4.29              break;
    4.30          }
    4.31 +
    4.32 +        /* find first valid vcpu starting from request. */
    4.33 +        v = NULL;
    4.34 +        for ( i = op->u.getvcpucontext.vcpu; i < MAX_VIRT_CPUS; i++ )
    4.35 +        {
    4.36 +            v = d->vcpu[i];
    4.37 +            if ( v != NULL && !(test_bit(_VCPUF_down, &v->vcpu_flags)) )
    4.38 +                break;
    4.39 +        }
    4.40          
    4.41 -        v = d->vcpu[op->u.getvcpucontext.vcpu];
    4.42          if ( v == NULL )
    4.43          {
    4.44              ret = -ESRCH;
     5.1 --- a/xen/common/schedule.c	Tue Jun 21 08:59:34 2005 +0000
     5.2 +++ b/xen/common/schedule.c	Tue Jun 21 09:59:24 2005 +0000
     5.3 @@ -261,6 +261,40 @@ static long do_yield(void)
     5.4      return 0;
     5.5  }
     5.6  
     5.7 +/* Mark target vcpu as non-runnable so it is not scheduled */
     5.8 +static long do_vcpu_down(int vcpu)
     5.9 +{
    5.10 +    struct vcpu *target;
    5.11 +    
    5.12 +    if ( vcpu > MAX_VIRT_CPUS )
    5.13 +        return -EINVAL;
    5.14 +
    5.15 +    target = current->domain->vcpu[vcpu];
    5.16 +    if ( target == NULL )
    5.17 +        return -ESRCH;
    5.18 +    set_bit(_VCPUF_down, &target->vcpu_flags);
    5.19 +
    5.20 +    return 0;
    5.21 +}
    5.22 +
    5.23 +/* Mark target vcpu as runnable and wake it */
    5.24 +static long do_vcpu_up(int vcpu)
    5.25 +{
    5.26 +    struct vcpu *target;
    5.27 +   
    5.28 +    if (vcpu > MAX_VIRT_CPUS)
    5.29 +        return -EINVAL;
    5.30 +
    5.31 +    target = current->domain->vcpu[vcpu];
    5.32 +    if ( target == NULL )
    5.33 +        return -ESRCH;
    5.34 +    clear_bit(_VCPUF_down, &target->vcpu_flags);
    5.35 +    /* wake vcpu */
    5.36 +    domain_wake(target);
    5.37 +
    5.38 +    return 0;
    5.39 +}
    5.40 +
    5.41  /*
    5.42   * Demultiplex scheduler-related hypercalls.
    5.43   */
    5.44 @@ -290,6 +324,16 @@ long do_sched_op(unsigned long op)
    5.45          domain_shutdown((u8)(op >> SCHEDOP_reasonshift));
    5.46          break;
    5.47      }
    5.48 +    case SCHEDOP_vcpu_down:
    5.49 +    {
    5.50 +        ret = do_vcpu_down((int)(op >> SCHEDOP_vcpushift));
    5.51 +        break;
    5.52 +    }
    5.53 +    case SCHEDOP_vcpu_up:
    5.54 +    {
    5.55 +        ret = do_vcpu_up((int)(op >> SCHEDOP_vcpushift));
    5.56 +        break;
    5.57 +    }
    5.58  
    5.59      default:
    5.60          ret = -ENOSYS;
     6.1 --- a/xen/include/public/xen.h	Tue Jun 21 08:59:34 2005 +0000
     6.2 +++ b/xen/include/public/xen.h	Tue Jun 21 09:59:24 2005 +0000
     6.3 @@ -58,7 +58,7 @@
     6.4  #define __HYPERVISOR_boot_vcpu            24
     6.5  #define __HYPERVISOR_set_segment_base     25 /* x86/64 only */
     6.6  #define __HYPERVISOR_mmuext_op            26
     6.7 -#define __HYPERVISOR_policy_op		  27
     6.8 +#define __HYPERVISOR_policy_op            27
     6.9  
    6.10  /* 
    6.11   * VIRTUAL INTERRUPTS
    6.12 @@ -201,8 +201,11 @@ struct mmuext_op {
    6.13  #define SCHEDOP_yield           0   /* Give up the CPU voluntarily.       */
    6.14  #define SCHEDOP_block           1   /* Block until an event is received.  */
    6.15  #define SCHEDOP_shutdown        2   /* Stop executing this domain.        */
    6.16 +#define SCHEDOP_vcpu_down       3   /* make target VCPU not-runnable.     */
    6.17 +#define SCHEDOP_vcpu_up         4   /* make target VCPU runnable.         */
    6.18  #define SCHEDOP_cmdmask       255   /* 8-bit command. */
    6.19  #define SCHEDOP_reasonshift     8   /* 8-bit reason code. (SCHEDOP_shutdown) */
    6.20 +#define SCHEDOP_vcpushift       8   /* 8-bit VCPU target. (SCHEDOP_up|down) */
    6.21  
    6.22  /*
    6.23   * Reason codes for SCHEDOP_shutdown. These may be interpreted by control 
     7.1 --- a/xen/include/xen/sched.h	Tue Jun 21 08:59:34 2005 +0000
     7.2 +++ b/xen/include/xen/sched.h	Tue Jun 21 09:59:24 2005 +0000
     7.3 @@ -348,6 +348,9 @@ extern struct domain *domain_list;
     7.4   /* Initialization completed. */
     7.5  #define _VCPUF_initialised     8
     7.6  #define VCPUF_initialised      (1UL<<_VCPUF_initialised)
     7.7 + /* VCPU is not-runnable */
     7.8 +#define _VCPUF_down            9
     7.9 +#define VCPUF_down             (1UL<<_VCPUF_down)
    7.10  
    7.11  /*
    7.12   * Per-domain flags (domain_flags).
    7.13 @@ -377,7 +380,7 @@ extern struct domain *domain_list;
    7.14  static inline int domain_runnable(struct vcpu *v)
    7.15  {
    7.16      return ( (atomic_read(&v->pausecnt) == 0) &&
    7.17 -             !(v->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause)) &&
    7.18 +             !(v->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause|VCPUF_down)) &&
    7.19               !(v->domain->domain_flags & (DOMF_shutdown|DOMF_shuttingdown)) );
    7.20  }
    7.21