ia64/xen-unstable

changeset 7170:85f92475b943

Create new vcpu_op() hypercall. Replaces old boot_vcpu()
hypercall and vcpu-related schedop commands.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Oct 03 19:14:02 2005 +0100 (2005-10-03)
parents dd87869f877c
children 716a0d177ffa
files linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_64/entry.S xen/common/domain.c xen/common/schedule.c xen/include/public/vcpu.h xen/include/public/xen.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c	Mon Oct 03 15:05:37 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c	Mon Oct 03 19:14:02 2005 +0100
     1.3 @@ -49,6 +49,7 @@
     1.4  #include <asm/irq.h>
     1.5  #include <asm/desc.h>
     1.6  #include <asm-xen/xen-public/physdev.h>
     1.7 +#include <asm-xen/xen-public/vcpu.h>
     1.8  #ifdef CONFIG_MATH_EMULATION
     1.9  #include <asm/math_emu.h>
    1.10  #endif
    1.11 @@ -178,7 +179,7 @@ void cpu_idle (void)
    1.12  				   don't printk. */
    1.13  				__get_cpu_var(cpu_state) = CPU_DEAD;
    1.14  				/* Tell hypervisor to take vcpu down. */
    1.15 -				HYPERVISOR_vcpu_down(cpu);
    1.16 +				HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
    1.17  #endif
    1.18  				play_dead();
    1.19  				local_irq_enable();
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c	Mon Oct 03 15:05:37 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c	Mon Oct 03 19:14:02 2005 +0100
     2.3 @@ -63,6 +63,7 @@
     2.4  #include <smpboot_hooks.h>
     2.5  
     2.6  #include <asm-xen/evtchn.h>
     2.7 +#include <asm-xen/xen-public/vcpu.h>
     2.8  
     2.9  /* Set if we find a B stepping CPU */
    2.10  static int __initdata smp_b_stepping;
    2.11 @@ -882,11 +883,13 @@ static int __init do_boot_cpu(int apicid
    2.12  
    2.13  	ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
    2.14  
    2.15 -	boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt);
    2.16 +	boot_error = HYPERVISOR_vcpu_op(VCPUOP_create, cpu, &ctxt);
    2.17  	if (boot_error)
    2.18  		printk("boot error: %ld\n", boot_error);
    2.19  
    2.20  	if (!boot_error) {
    2.21 +		HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
    2.22 +
    2.23  		/*
    2.24  		 * allow APs to start initializing.
    2.25  		 */
    2.26 @@ -1499,7 +1502,7 @@ int __devinit __cpu_up(unsigned int cpu)
    2.27  #ifdef CONFIG_HOTPLUG_CPU
    2.28  #ifdef CONFIG_XEN
    2.29  	/* Tell hypervisor to bring vcpu up. */
    2.30 -	HYPERVISOR_vcpu_up(cpu);
    2.31 +	HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
    2.32  #endif
    2.33  	/* Already up, and in cpu_quiescent now? */
    2.34  	if (cpu_isset(cpu, smp_commenced_mask)) {
    2.35 @@ -1621,5 +1624,6 @@ void vcpu_prepare(int vcpu)
    2.36  
    2.37  	ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
    2.38  
    2.39 -	(void)HYPERVISOR_boot_vcpu(vcpu, &ctxt);
    2.40 +	(void)HYPERVISOR_vcpu_op(VCPUOP_create, vcpu, &ctxt);
    2.41 +	(void)HYPERVISOR_vcpu_op(VCPUOP_up, vcpu, NULL);
    2.42  }
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c	Mon Oct 03 15:05:37 2005 +0100
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c	Mon Oct 03 19:14:02 2005 +0100
     3.3 @@ -62,8 +62,8 @@
     3.4  #include <asm/nmi.h>
     3.5  #ifdef CONFIG_XEN
     3.6  #include <asm/arch_hooks.h>
     3.7 -
     3.8  #include <asm-xen/evtchn.h>
     3.9 +#include <asm-xen/xen-public/vcpu.h>
    3.10  #endif
    3.11  
    3.12  /* Change for real CPU hotplug. Note other files need to be fixed
    3.13 @@ -771,11 +771,13 @@ static int __cpuinit do_boot_cpu(int cpu
    3.14  
    3.15  	ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
    3.16  
    3.17 -	boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt);
    3.18 +	boot_error  = HYPERVISOR_vcpu_op(VCPUOP_create, cpu, &ctxt);
    3.19  	if (boot_error)
    3.20  		printk("boot error: %ld\n", boot_error);
    3.21  
    3.22  	if (!boot_error) {
    3.23 +		HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
    3.24 +
    3.25  		/*
    3.26  		 * allow APs to start initializing.
    3.27  		 */
     4.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h	Mon Oct 03 15:05:37 2005 +0100
     4.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h	Mon Oct 03 19:14:02 2005 +0100
     4.3 @@ -316,26 +316,10 @@ HYPERVISOR_vm_assist(
     4.4  }
     4.5  
     4.6  static inline int
     4.7 -HYPERVISOR_boot_vcpu(
     4.8 -	unsigned long vcpu, vcpu_guest_context_t *ctxt)
     4.9 -{
    4.10 -	return _hypercall2(int, boot_vcpu, vcpu, ctxt);
    4.11 -}
    4.12 -
    4.13 -static inline int
    4.14 -HYPERVISOR_vcpu_up(
    4.15 -	int vcpu)
    4.16 +HYPERVISOR_vcpu_op(
    4.17 +	int cmd, int vcpuid, void *extra_args)
    4.18  {
    4.19 -	return _hypercall2(int, sched_op, SCHEDOP_vcpu_up |
    4.20 -			   (vcpu << SCHEDOP_vcpushift), 0);
    4.21 -}
    4.22 -
    4.23 -static inline int
    4.24 -HYPERVISOR_vcpu_pickle(
    4.25 -	int vcpu, vcpu_guest_context_t *ctxt)
    4.26 -{
    4.27 -	return _hypercall2(int, sched_op, SCHEDOP_vcpu_pickle |
    4.28 -			   (vcpu << SCHEDOP_vcpushift), ctxt);
    4.29 +	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
    4.30  }
    4.31  
    4.32  static inline int
    4.33 @@ -357,24 +341,6 @@ HYPERVISOR_suspend(
    4.34  	return ret;
    4.35  }
    4.36  
    4.37 -static inline int
    4.38 -HYPERVISOR_vcpu_down(
    4.39 -	int vcpu)
    4.40 -{
    4.41 -	int ret;
    4.42 -	unsigned long ign1;
    4.43 -	/* Yes, I really do want to clobber edx here: when we resume a
    4.44 -	   vcpu after unpickling a multi-processor domain, it returns
    4.45 -	   here, but clobbers all of the call clobbered registers. */
    4.46 -	__asm__ __volatile__ (
    4.47 -		TRAP_INSTR
    4.48 -		: "=a" (ret), "=b" (ign1)
    4.49 -		: "0" (__HYPERVISOR_sched_op),
    4.50 -		"1" (SCHEDOP_vcpu_down | (vcpu << SCHEDOP_vcpushift))
    4.51 -		: "memory", "ecx", "edx" );
    4.52 -	return ret;
    4.53 -}
    4.54 -
    4.55  #endif /* __HYPERCALL_H__ */
    4.56  
    4.57  /*
     5.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h	Mon Oct 03 15:05:37 2005 +0100
     5.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h	Mon Oct 03 19:14:02 2005 +0100
     5.3 @@ -601,24 +601,6 @@ HYPERVISOR_vm_assist(
     5.4      return 1;
     5.5  }
     5.6  
     5.7 -static inline int
     5.8 -HYPERVISOR_boot_vcpu(
     5.9 -    unsigned long vcpu, vcpu_guest_context_t *ctxt)
    5.10 -{
    5.11 -#if 0
    5.12 -    int ret;
    5.13 -    unsigned long ign1, ign2;
    5.14 -
    5.15 -    __asm__ __volatile__ (
    5.16 -        TRAP_INSTR
    5.17 -        : "=a" (ret), "=b" (ign1), "=c" (ign2)
    5.18 -	: "0" (__HYPERVISOR_boot_vcpu), "1" (vcpu), "2" (ctxt)
    5.19 -	: "memory");
    5.20 -
    5.21 -    return ret;
    5.22 -#endif
    5.23 -    return 1;
    5.24 -}
    5.25  #endif
    5.26  
    5.27  #endif /* __HYPERCALL_H__ */
     6.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h	Mon Oct 03 15:05:37 2005 +0100
     6.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h	Mon Oct 03 19:14:02 2005 +0100
     6.3 @@ -302,26 +302,10 @@ HYPERVISOR_vm_assist(
     6.4  }
     6.5  
     6.6  static inline int
     6.7 -HYPERVISOR_boot_vcpu(
     6.8 -	unsigned long vcpu, vcpu_guest_context_t *ctxt)
     6.9 -{
    6.10 -	return _hypercall2(int, boot_vcpu, vcpu, ctxt);
    6.11 -}
    6.12 -
    6.13 -static inline int
    6.14 -HYPERVISOR_vcpu_up(
    6.15 -	int vcpu)
    6.16 +HYPERVISOR_vcpu_op(
    6.17 +	int cmd, int vcpuid, void *extra_args)
    6.18  {
    6.19 -	return _hypercall2(int, sched_op, SCHEDOP_vcpu_up |
    6.20 -			   (vcpu << SCHEDOP_vcpushift), 0);
    6.21 -}
    6.22 -
    6.23 -static inline int
    6.24 -HYPERVISOR_vcpu_pickle(
    6.25 -	int vcpu, vcpu_guest_context_t *ctxt)
    6.26 -{
    6.27 -	return _hypercall2(int, sched_op, SCHEDOP_vcpu_pickle |
    6.28 -			   (vcpu << SCHEDOP_vcpushift), ctxt);
    6.29 +	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
    6.30  }
    6.31  
    6.32  static inline int
     7.1 --- a/xen/arch/x86/x86_32/entry.S	Mon Oct 03 15:05:37 2005 +0100
     7.2 +++ b/xen/arch/x86/x86_32/entry.S	Mon Oct 03 19:14:02 2005 +0100
     7.3 @@ -808,7 +808,7 @@ ENTRY(hypercall_table)
     7.4          .long do_vm_assist
     7.5          .long do_update_va_mapping_otherdomain
     7.6          .long do_switch_vm86
     7.7 -        .long do_boot_vcpu
     7.8 +        .long do_vcpu_op
     7.9          .long do_ni_hypercall       /* 25 */
    7.10          .long do_mmuext_op
    7.11          .long do_acm_op             /* 27 */
    7.12 @@ -841,7 +841,7 @@ ENTRY(hypercall_args_table)
    7.13          .byte 2 /* do_vm_assist         */
    7.14          .byte 5 /* do_update_va_mapping_otherdomain */
    7.15          .byte 0 /* do_switch_vm86       */
    7.16 -        .byte 2 /* do_boot_vcpu         */
    7.17 +        .byte 3 /* do_vcpu_op           */
    7.18          .byte 0 /* do_ni_hypercall      */  /* 25 */
    7.19          .byte 4 /* do_mmuext_op         */
    7.20          .byte 1 /* do_acm_op            */
     8.1 --- a/xen/arch/x86/x86_64/entry.S	Mon Oct 03 15:05:37 2005 +0100
     8.2 +++ b/xen/arch/x86/x86_64/entry.S	Mon Oct 03 19:14:02 2005 +0100
     8.3 @@ -629,7 +629,7 @@ ENTRY(hypercall_table)
     8.4          .quad do_vm_assist
     8.5          .quad do_update_va_mapping_otherdomain
     8.6          .quad do_switch_to_user
     8.7 -        .quad do_boot_vcpu
     8.8 +        .quad do_vcpu_op
     8.9          .quad do_set_segment_base   /* 25 */
    8.10          .quad do_mmuext_op
    8.11          .quad do_acm_op
    8.12 @@ -662,7 +662,7 @@ ENTRY(hypercall_args_table)
    8.13          .byte 2 /* do_vm_assist         */
    8.14          .byte 4 /* do_update_va_mapping_otherdomain */
    8.15          .byte 0 /* do_switch_to_user    */
    8.16 -        .byte 2 /* do_boot_vcpu         */
    8.17 +        .byte 3 /* do_vcpu_op           */
    8.18          .byte 2 /* do_set_segment_base  */  /* 25 */
    8.19          .byte 4 /* do_mmuext_op         */
    8.20          .byte 1 /* do_acm_op            */
     9.1 --- a/xen/common/domain.c	Mon Oct 03 15:05:37 2005 +0100
     9.2 +++ b/xen/common/domain.c	Mon Oct 03 19:14:02 2005 +0100
     9.3 @@ -18,6 +18,7 @@
     9.4  #include <xen/domain_page.h>
     9.5  #include <asm/debugger.h>
     9.6  #include <public/dom0_ops.h>
     9.7 +#include <public/vcpu.h>
     9.8  
     9.9  /* Both these structures are protected by the domlist_lock. */
    9.10  rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
    9.11 @@ -366,37 +367,17 @@ int set_info_guest(struct domain *d, dom
    9.12      return rc;
    9.13  }
    9.14  
    9.15 -/*
    9.16 - * final_setup_guest is used for final setup and launching of domains other
    9.17 - * than domain 0. ie. the domains that are being built by the userspace dom0
    9.18 - * domain builder.
    9.19 - */
    9.20 -long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt) 
    9.21 +int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt) 
    9.22  {
    9.23 -    struct domain *d = current->domain;
    9.24      struct vcpu *v;
    9.25 -    int rc = 0;
    9.26 -    struct vcpu_guest_context *c;
    9.27 +    int rc;
    9.28  
    9.29 -    if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] != NULL) )
    9.30 -        return -EINVAL;
    9.31 +    ASSERT(d->vcpu[vcpuid] == NULL);
    9.32  
    9.33 -    if ( alloc_vcpu_struct(d, vcpu) == NULL )
    9.34 +    if ( alloc_vcpu_struct(d, vcpuid) == NULL )
    9.35          return -ENOMEM;
    9.36  
    9.37 -    if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
    9.38 -    {
    9.39 -        rc = -ENOMEM;
    9.40 -        goto out;
    9.41 -    }
    9.42 -
    9.43 -    if ( copy_from_user(c, ctxt, sizeof(*c)) )
    9.44 -    {
    9.45 -        rc = -EFAULT;
    9.46 -        goto out;
    9.47 -    }
    9.48 -
    9.49 -    v = d->vcpu[vcpu];
    9.50 +    v = d->vcpu[vcpuid];
    9.51  
    9.52      atomic_set(&v->pausecnt, 0);
    9.53      v->cpumap = CPUMAP_RUNANYWHERE;
    9.54 @@ -405,22 +386,73 @@ long do_boot_vcpu(unsigned long vcpu, st
    9.55  
    9.56      arch_do_boot_vcpu(v);
    9.57  
    9.58 -    if ( (rc = arch_set_info_guest(v, c)) != 0 )
    9.59 +    if ( (rc = arch_set_info_guest(v, ctxt)) != 0 )
    9.60          goto out;
    9.61  
    9.62      sched_add_domain(v);
    9.63  
    9.64 -    /* domain_unpause_by_systemcontroller */
    9.65 -    if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
    9.66 -        vcpu_wake(v);
    9.67 +    set_bit(_VCPUF_down, &v->vcpu_flags);
    9.68 +    clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
    9.69  
    9.70 -    xfree(c);
    9.71      return 0;
    9.72  
    9.73   out:
    9.74 -    xfree(c);
    9.75 -    arch_free_vcpu_struct(d->vcpu[vcpu]);
    9.76 -    d->vcpu[vcpu] = NULL;
    9.77 +    arch_free_vcpu_struct(d->vcpu[vcpuid]);
    9.78 +    d->vcpu[vcpuid] = NULL;
    9.79 +    return rc;
    9.80 +}
    9.81 +
    9.82 +long do_vcpu_op(int cmd, int vcpuid, void *arg)
    9.83 +{
    9.84 +    struct domain *d = current->domain;
    9.85 +    struct vcpu *v;
    9.86 +    struct vcpu_guest_context *ctxt;
    9.87 +    long rc = 0;
    9.88 +
    9.89 +    if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
    9.90 +        return -EINVAL;
    9.91 +
    9.92 +    if ( ((v = d->vcpu[vcpuid]) == NULL) && (cmd != VCPUOP_create) )
    9.93 +        return -ENOENT;
    9.94 +
    9.95 +    switch ( cmd )
    9.96 +    {
    9.97 +    case VCPUOP_create:
    9.98 +        if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
    9.99 +        {
   9.100 +            rc = -ENOMEM;
   9.101 +            break;
   9.102 +        }
   9.103 +
   9.104 +        if ( copy_from_user(ctxt, arg, sizeof(*ctxt)) )
   9.105 +        {
   9.106 +            xfree(ctxt);
   9.107 +            rc = -EFAULT;
   9.108 +            break;
   9.109 +        }
   9.110 +
   9.111 +        LOCK_BIGLOCK(d);
   9.112 +        rc = (d->vcpu[vcpuid] == NULL) ? boot_vcpu(d, vcpuid, ctxt) : -EEXIST;
   9.113 +        UNLOCK_BIGLOCK(d);
   9.114 +
   9.115 +        xfree(ctxt);
   9.116 +        break;
   9.117 +
   9.118 +    case VCPUOP_up:
   9.119 +        if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
   9.120 +            vcpu_wake(v);
   9.121 +        break;
   9.122 +
   9.123 +    case VCPUOP_down:
   9.124 +        if ( !test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
   9.125 +            vcpu_sleep_nosync(v);
   9.126 +        break;
   9.127 +
   9.128 +    case VCPUOP_is_up:
   9.129 +        rc = !test_bit(_VCPUF_down, &v->vcpu_flags);
   9.130 +        break;
   9.131 +    }
   9.132 +
   9.133      return rc;
   9.134  }
   9.135  
    10.1 --- a/xen/common/schedule.c	Mon Oct 03 15:05:37 2005 +0100
    10.2 +++ b/xen/common/schedule.c	Mon Oct 03 19:14:02 2005 +0100
    10.3 @@ -270,69 +270,6 @@ static long do_yield(void)
    10.4      return 0;
    10.5  }
    10.6  
    10.7 -/* Mark target vcpu as non-runnable so it is not scheduled */
    10.8 -static long do_vcpu_down(int vcpu)
    10.9 -{
   10.10 -    struct vcpu *target;
   10.11 -    
   10.12 -    if ( vcpu > MAX_VIRT_CPUS )
   10.13 -        return -EINVAL;
   10.14 -
   10.15 -    target = current->domain->vcpu[vcpu];
   10.16 -    if ( target == NULL )
   10.17 -        return -ESRCH;
   10.18 -    set_bit(_VCPUF_down, &target->vcpu_flags);
   10.19 -
   10.20 -    return 0;
   10.21 -}
   10.22 -
   10.23 -/* Mark target vcpu as runnable and wake it */
   10.24 -static long do_vcpu_up(int vcpu)
   10.25 -{
   10.26 -    struct vcpu *target;
   10.27 -   
   10.28 -    if (vcpu > MAX_VIRT_CPUS)
   10.29 -        return -EINVAL;
   10.30 -
   10.31 -    target = current->domain->vcpu[vcpu];
   10.32 -    if ( target == NULL )
   10.33 -        return -ESRCH;
   10.34 -    clear_bit(_VCPUF_down, &target->vcpu_flags);
   10.35 -    /* wake vcpu */
   10.36 -    vcpu_wake(target);
   10.37 -
   10.38 -    return 0;
   10.39 -}
   10.40 -
   10.41 -static long do_vcpu_pickle(int vcpu, unsigned long arg)
   10.42 -{
   10.43 -    struct vcpu *v;
   10.44 -    vcpu_guest_context_t *c;
   10.45 -    int ret = 0;
   10.46 -
   10.47 -    if (vcpu >= MAX_VIRT_CPUS)
   10.48 -        return -EINVAL;
   10.49 -    v = current->domain->vcpu[vcpu];
   10.50 -    if (!v)
   10.51 -        return -ESRCH;
   10.52 -    /* Don't pickle vcpus which are currently running */
   10.53 -    if (!test_bit(_VCPUF_down, &v->vcpu_flags)) {
   10.54 -        return -EBUSY;
   10.55 -    }
   10.56 -    c = xmalloc(vcpu_guest_context_t);
   10.57 -    if (!c)
   10.58 -        return -ENOMEM;
   10.59 -    arch_getdomaininfo_ctxt(v, c);
   10.60 -    if (copy_to_user((vcpu_guest_context_t *)arg,
   10.61 -                     (const vcpu_guest_context_t *)c, sizeof(*c)))
   10.62 -        ret = -EFAULT;
   10.63 -    xfree(c);
   10.64 -    return ret;
   10.65 -}
   10.66 -
   10.67 -/*
   10.68 - * Demultiplex scheduler-related hypercalls.
   10.69 - */
   10.70  long do_sched_op(unsigned long op, unsigned long arg)
   10.71  {
   10.72      long ret = 0;
   10.73 @@ -359,21 +296,6 @@ long do_sched_op(unsigned long op, unsig
   10.74          domain_shutdown((u8)(op >> SCHEDOP_reasonshift));
   10.75          break;
   10.76      }
   10.77 -    case SCHEDOP_vcpu_down:
   10.78 -    {
   10.79 -        ret = do_vcpu_down((int)(op >> SCHEDOP_vcpushift));
   10.80 -        break;
   10.81 -    }
   10.82 -    case SCHEDOP_vcpu_up:
   10.83 -    {
   10.84 -        ret = do_vcpu_up((int)(op >> SCHEDOP_vcpushift));
   10.85 -        break;
   10.86 -    }
   10.87 -    case SCHEDOP_vcpu_pickle:
   10.88 -    {
   10.89 -        ret = do_vcpu_pickle((int)(op >> SCHEDOP_vcpushift), arg);
   10.90 -        break;
   10.91 -    }
   10.92  
   10.93      default:
   10.94          ret = -ENOSYS;
   10.95 @@ -395,8 +317,8 @@ long do_set_timer_op(s_time_t timeout)
   10.96      return 0;
   10.97  }
   10.98  
   10.99 -/** sched_id - fetch ID of current scheduler */
  10.100 -int sched_id()
  10.101 +/* sched_id - fetch ID of current scheduler */
  10.102 +int sched_id(void)
  10.103  {
  10.104      return ops.sched_id;
  10.105  }
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen/include/public/vcpu.h	Mon Oct 03 19:14:02 2005 +0100
    11.3 @@ -0,0 +1,55 @@
    11.4 +/******************************************************************************
    11.5 + * vcpu.h
    11.6 + * 
    11.7 + * VCPU creation and hotplug.
    11.8 + * 
    11.9 + * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
   11.10 + */
   11.11 +
   11.12 +#ifndef __XEN_PUBLIC_VCPU_H__
   11.13 +#define __XEN_PUBLIC_VCPU_H__
   11.14 +
   11.15 +/*
   11.16 + * Prototype for this hypercall is:
   11.17 + *  int vcpu_op(int cmd, int vcpuid, void *extra_args)
   11.18 + * @cmd        == VCPUOP_??? (VCPU operation).
   11.19 + * @vcpuid     == VCPU to operate on.
   11.20 + * @extra_args == Operation-specific extra arguments (NULL if none).
   11.21 + */
   11.22 +
   11.23 +/*
   11.24 + * Create a new VCPU. This must be called before a VCPU can be referred to
   11.25 + * in any other hypercall (e.g., to bind event channels). The new VCPU
   11.26 + * will not run until it is brought up by VCPUOP_up.
   11.27 + * 
   11.28 + * @extra_arg == pointer to vcpu_guest_context structure containing initial
   11.29 + *               state for the new VCPU.
   11.30 + */
   11.31 +#define VCPUOP_create               0
   11.32 +
   11.33 +/*
   11.34 + * Bring up a newly-created or previously brought-down VCPU. This makes the
   11.35 + * VCPU runnable.
   11.36 + */
   11.37 +#define VCPUOP_up                   1
   11.38 +
   11.39 +/*
   11.40 + * Bring down a VCPU (i.e., make it non-runnable).
   11.41 + * There are a few caveats that callers should observe:
   11.42 + *  1. This operation may return, and VCPU_is_up may return false, before the
   11.43 + *     VCPU stops running (i.e., the command is asynchronous). It is a good
   11.44 + *     idea to ensure that the VCPU has entered a non-critical loop before
   11.45 + *     bringing it down. Alternatively, this operation is guaranteed
   11.46 + *     synchronous if invoked by the VCPU itself.
   11.47 + *  2. After a VCPU is created, there is currently no way to drop all its
   11.48 + *     references to domain memory. Even a VCPU that is down still holds
   11.49 + *     memory references via its pagetable base pointer and GDT. It is good
   11.50 + *     practise to move a VCPU onto an 'idle' or default page table, LDT and
   11.51 + *     GDT before bringing it down.
   11.52 + */
   11.53 +#define VCPUOP_down                 2
   11.54 +
   11.55 +/* Returns 1 if the given VCPU is up. */
   11.56 +#define VCPUOP_is_up                3
   11.57 +
   11.58 +#endif /* __XEN_PUBLIC_VCPU_H__ */
    12.1 --- a/xen/include/public/xen.h	Mon Oct 03 15:05:37 2005 +0100
    12.2 +++ b/xen/include/public/xen.h	Mon Oct 03 19:14:02 2005 +0100
    12.3 @@ -55,7 +55,7 @@
    12.4  #define __HYPERVISOR_update_va_mapping_otherdomain 22
    12.5  #define __HYPERVISOR_switch_vm86          23 /* x86/32 only */
    12.6  #define __HYPERVISOR_switch_to_user       23 /* x86/64 only */
    12.7 -#define __HYPERVISOR_boot_vcpu            24
    12.8 +#define __HYPERVISOR_vcpu_op              24
    12.9  #define __HYPERVISOR_set_segment_base     25 /* x86/64 only */
   12.10  #define __HYPERVISOR_mmuext_op            26
   12.11  #define __HYPERVISOR_acm_op               27
   12.12 @@ -201,12 +201,8 @@ struct mmuext_op {
   12.13  #define SCHEDOP_yield           0   /* Give up the CPU voluntarily.       */
   12.14  #define SCHEDOP_block           1   /* Block until an event is received.  */
   12.15  #define SCHEDOP_shutdown        2   /* Stop executing this domain.        */
   12.16 -#define SCHEDOP_vcpu_down       3   /* make target VCPU not-runnable.     */
   12.17 -#define SCHEDOP_vcpu_up         4   /* make target VCPU runnable.         */
   12.18 -#define SCHEDOP_vcpu_pickle     5   /* save a vcpu's context to memory.   */
   12.19  #define SCHEDOP_cmdmask       255   /* 8-bit command. */
   12.20  #define SCHEDOP_reasonshift     8   /* 8-bit reason code. (SCHEDOP_shutdown) */
   12.21 -#define SCHEDOP_vcpushift       8   /* 8-bit VCPU target. (SCHEDOP_up|down) */
   12.22  
   12.23  /*
   12.24   * Reason codes for SCHEDOP_shutdown. These may be interpreted by control