ia64/xen-unstable

changeset 1246:0933e977b522

bitkeeper revision 1.825.3.7 (4064773d4Vkaf0WFguSCpOO7O0qqEQ)

Add Atropos code and update control interface.
author mwilli2@equilibrium.research.intel-research.net
date Fri Mar 26 18:32:29 2004 +0000 (2004-03-26)
parents 7944940addb2
children 57d3d92c1ccc
files .rootkeys tools/xc/lib/xc.h tools/xc/lib/xc_atropos.c tools/xc/lib/xc_bvtsched.c tools/xc/lib/xc_misc.c tools/xc/lib/xc_rrobin.c tools/xc/py/Xc.c xen/common/dom0_ops.c xen/common/keyhandler.c xen/common/sched_atropos.c xen/common/sched_bvt.c xen/common/sched_rrobin.c xen/common/schedule.c xen/include/hypervisor-ifs/dom0_ops.h xen/include/hypervisor-ifs/sched_ctl.h xen/include/xen/sched-if.h xen/include/xen/sched.h
line diff
     1.1 --- a/.rootkeys	Fri Mar 26 17:08:45 2004 +0000
     1.2 +++ b/.rootkeys	Fri Mar 26 18:32:29 2004 +0000
     1.3 @@ -175,6 +175,7 @@ 3e54c38dkHAev597bPr71-hGzTdocg xen/commo
     1.4  4051bcecFeq4DE70p4zGO5setf47CA xen/common/physdev.c
     1.5  4006e659i9j-doVxY7DKOGU4XVin1Q xen/common/rbtree.c
     1.6  3ddb79bdHqdQpATqC0rmUZNbsb6L6A xen/common/resource.c
     1.7 +4064773cJ31vZt-zhbSoxqft1Jaw0w xen/common/sched_atropos.c
     1.8  40589968dD2D1aejwSOvrROg7fOvGQ xen/common/sched_bvt.c
     1.9  40589968be_t_n0-w6ggceW7h-sx0w xen/common/sched_rrobin.c
    1.10  3e397e6619PgAfBbw2XFbXkewvUWgw xen/common/schedule.c
     2.1 --- a/tools/xc/lib/xc.h	Fri Mar 26 17:08:45 2004 +0000
     2.2 +++ b/tools/xc/lib/xc.h	Fri Mar 26 18:32:29 2004 +0000
     2.3 @@ -74,6 +74,7 @@ int xc_netbsd_build(int xc_handle,
     2.4  
     2.5  int xc_bvtsched_global_set(int xc_handle,
     2.6                             unsigned long ctx_allow);
     2.7 +
     2.8  int xc_bvtsched_domain_set(int xc_handle,
     2.9                             u64 domid,
    2.10                             unsigned long mcuadv,
    2.11 @@ -81,13 +82,32 @@ int xc_bvtsched_domain_set(int xc_handle
    2.12                             unsigned long warpl,
    2.13                             unsigned long warpu);
    2.14  
    2.15 +int xc_bvtsched_global_get(int xc_handle,
    2.16 +			   unsigned long *ctx_allow);
    2.17 +
    2.18 +int xc_bvtsched_domain_get(int xc_handle,
    2.19 +                           u64 domid,
    2.20 +                           unsigned long *mcuadv,
    2.21 +                           unsigned long *warp,
    2.22 +                           unsigned long *warpl,
    2.23 +                           unsigned long *warpu);
    2.24 +
    2.25  int xc_atropos_domain_set(int xc_handle,
    2.26  			  u64 domid,
    2.27 +			  u64 period, u64 slice, u64 latency,
    2.28  			  int xtratime);
    2.29  
    2.30 +int xc_atropos_domain_get(int xc_handle,
    2.31 +                          u64 domid,
    2.32 +                          u64* period, u64 *slice, u64 *latency,
    2.33 +                          int *xtratime);
    2.34 +
    2.35  int xc_rrobin_global_set(int xc_handle,
    2.36  			 u64 slice);
    2.37  
    2.38 +int xc_rrobin_global_get(int xc_handle,
    2.39 +                         u64 *slice);
    2.40 +
    2.41  typedef struct {
    2.42      unsigned long credit_bytes;
    2.43      unsigned long credit_usec;
     3.1 --- a/tools/xc/lib/xc_atropos.c	Fri Mar 26 17:08:45 2004 +0000
     3.2 +++ b/tools/xc/lib/xc_atropos.c	Fri Mar 26 18:32:29 2004 +0000
     3.3 @@ -8,29 +8,44 @@
     3.4  
     3.5  #include "xc_private.h"
     3.6  
     3.7 -int xc_atropos_global_set(int xc_handle,
     3.8 -			  unsigned long ctx_allow)
     3.9 +int xc_atropos_domain_set(int xc_handle,
    3.10 +			  u64 domid, u64 period, u64 slice, u64 latency,
    3.11 +                          int xtratime)
    3.12  {
    3.13      dom0_op_t op;
    3.14 +    struct atropos_adjdom *p = &op.u.adjustdom.u.atropos;
    3.15  
    3.16 -    op.cmd = DOM0_SCHEDCTL;
    3.17 -    op.u.schedctl.sched_id = SCHED_BVT;
    3.18 +    op.cmd = DOM0_ADJUSTDOM;
    3.19 +    op.u.adjustdom.domain  = (domid_t)domid;
    3.20 +    op.u.adjustdom.sched_id = SCHED_ATROPOS;
    3.21 +    op.u.adjustdom.direction = SCHED_INFO_PUT;
    3.22  
    3.23 -    op.u.schedctl.u.bvt.ctx_allow = ctx_allow;
    3.24 +    p->period   = period;
    3.25 +    p->slice    = slice;
    3.26 +    p->latency  = latency;
    3.27 +    p->xtratime = xtratime;
    3.28  
    3.29      return do_dom0_op(xc_handle, &op);
    3.30  }
    3.31  
    3.32 -int xc_atropos_domain_set(int xc_handle,
    3.33 -			  u64 domid, int xtratime)
    3.34 +int xc_atropos_domain_get(int xc_handle, u64 domid, u64 *period,
    3.35 +                          u64 *slice, u64 *latency, int *xtratime)
    3.36  {
    3.37      dom0_op_t op;
    3.38 +    int ret;
    3.39 +    struct atropos_adjdom *p = &op.u.adjustdom.u.atropos;
    3.40  
    3.41 -    op.cmd = DOM0_ADJUSTDOM;
    3.42 -    op.u.adjustdom.domain  = (domid_t)domid;
    3.43 +    op.cmd = DOM0_ADJUSTDOM;    
    3.44 +    op.u.adjustdom.domain = (domid_t)domid;
    3.45      op.u.adjustdom.sched_id = SCHED_ATROPOS;
    3.46 +    op.u.adjustdom.direction = SCHED_INFO_GET;
    3.47  
    3.48 -    op.u.adjustdom.u.atropos.xtratime = xtratime;
    3.49 +    ret = do_dom0_op(xc_handle, &op);
    3.50  
    3.51 -    return do_dom0_op(xc_handle, &op);
    3.52 +    *period   = p->period;
    3.53 +    *slice    = p->slice;
    3.54 +    *latency  = p->latency;
    3.55 +    *xtratime = p->xtratime;
    3.56 +
    3.57 +    return ret;
    3.58  }
     4.1 --- a/tools/xc/lib/xc_bvtsched.c	Fri Mar 26 17:08:45 2004 +0000
     4.2 +++ b/tools/xc/lib/xc_bvtsched.c	Fri Mar 26 18:32:29 2004 +0000
     4.3 @@ -15,12 +15,29 @@ int xc_bvtsched_global_set(int xc_handle
     4.4  
     4.5      op.cmd = DOM0_SCHEDCTL;
     4.6      op.u.schedctl.sched_id = SCHED_BVT;
     4.7 -
     4.8 +    op.u.schedctl.direction = SCHED_INFO_PUT;
     4.9      op.u.schedctl.u.bvt.ctx_allow = ctx_allow;
    4.10  
    4.11      return do_dom0_op(xc_handle, &op);
    4.12  }
    4.13  
    4.14 +int xc_bvtsched_global_get(int xc_handle,
    4.15 +			   unsigned long *ctx_allow)
    4.16 +{
    4.17 +    dom0_op_t op;
    4.18 +    int ret;
    4.19 +    
    4.20 +    op.cmd = DOM0_SCHEDCTL;
    4.21 +    op.u.schedctl.sched_id = SCHED_BVT;
    4.22 +    op.u.schedctl.direction = SCHED_INFO_GET;
    4.23 +
    4.24 +    ret = do_dom0_op(xc_handle, &op);
    4.25 +
    4.26 +    *ctx_allow = op.u.schedctl.u.bvt.ctx_allow;
    4.27 +
    4.28 +    return ret;
    4.29 +}
    4.30 +
    4.31  int xc_bvtsched_domain_set(int xc_handle,
    4.32                             u64 domid,
    4.33                             unsigned long mcuadv,
    4.34 @@ -34,11 +51,38 @@ int xc_bvtsched_domain_set(int xc_handle
    4.35      op.cmd = DOM0_ADJUSTDOM;
    4.36      op.u.adjustdom.domain  = (domid_t)domid;
    4.37      op.u.adjustdom.sched_id = SCHED_BVT;
    4.38 +    op.u.adjustdom.direction = SCHED_INFO_PUT;
    4.39  
    4.40      bvtadj->mcu_adv = mcuadv;
    4.41      bvtadj->warp    = warp;
    4.42      bvtadj->warpl   = warpl;
    4.43      bvtadj->warpu   = warpu;
    4.44 -
    4.45      return do_dom0_op(xc_handle, &op);
    4.46  }
    4.47 +
    4.48 +
    4.49 +int xc_bvtsched_domain_get(int xc_handle,
    4.50 +			   u64 domid,
    4.51 +			   unsigned long *mcuadv,
    4.52 +			   unsigned long *warp,
    4.53 +                           unsigned long *warpl,
    4.54 +                           unsigned long *warpu)
    4.55 +{
    4.56 +    
    4.57 +    dom0_op_t op;
    4.58 +    int ret;
    4.59 +    struct bvt_adjdom *adjptr = &op.u.adjustdom.u.bvt;
    4.60 +
    4.61 +    op.cmd = DOM0_ADJUSTDOM;
    4.62 +    op.u.adjustdom.domain  = (domid_t)domid;
    4.63 +    op.u.adjustdom.sched_id = SCHED_BVT;
    4.64 +    op.u.adjustdom.direction = SCHED_INFO_GET;
    4.65 +
    4.66 +    ret = do_dom0_op(xc_handle, &op);
    4.67 +
    4.68 +    *mcuadv = adjptr->mcu_adv;
    4.69 +    *warp   = adjptr->warp;
    4.70 +    *warpl  = adjptr->warpl;
    4.71 +    *warpu  = adjptr->warpu;
    4.72 +    return ret;
    4.73 +}
     5.1 --- a/tools/xc/lib/xc_misc.c	Fri Mar 26 17:08:45 2004 +0000
     5.2 +++ b/tools/xc/lib/xc_misc.c	Fri Mar 26 18:32:29 2004 +0000
     5.3 @@ -68,3 +68,20 @@ int xc_physinfo(int xc_handle,
     5.4      return 0;
     5.5  }
     5.6  
     5.7 +
     5.8 +int xc_sched_id(int xc_handle,
     5.9 +                int *sched_id)
    5.10 +{
    5.11 +    int ret;
    5.12 +    dom0_op_t op;
    5.13 +    
    5.14 +    op.cmd = DOM0_SCHED_ID;
    5.15 +    op.interface_version = DOM0_INTERFACE_VERSION;
    5.16 +    
    5.17 +    if((ret = do_dom0_op(xc_handle, &op))) return ret;
    5.18 +    
    5.19 +    *sched_id = op.u.sched_id.sched_id;
    5.20 +    
    5.21 +    return 0;
    5.22 +}
    5.23 +
     6.1 --- a/tools/xc/lib/xc_rrobin.c	Fri Mar 26 17:08:45 2004 +0000
     6.2 +++ b/tools/xc/lib/xc_rrobin.c	Fri Mar 26 18:32:29 2004 +0000
     6.3 @@ -11,11 +11,27 @@
     6.4  int xc_rrobin_global_set(int xc_handle, u64 slice)
     6.5  {
     6.6      dom0_op_t op;
     6.7 +    op.cmd = DOM0_SCHEDCTL;
     6.8 +    op.u.schedctl.sched_id = SCHED_RROBIN;
     6.9 +    op.u.schedctl.direction = SCHED_INFO_PUT;
    6.10 +
    6.11 +    op.u.schedctl.u.rrobin.slice = slice;
    6.12 +    return do_dom0_op(xc_handle, &op);
    6.13 +}
    6.14 +
    6.15 +
    6.16 +int xc_rrobin_global_get(int xc_handle, u64 *slice)
    6.17 +{
    6.18 +    dom0_op_t op;
    6.19 +    int ret;
    6.20  
    6.21      op.cmd = DOM0_SCHEDCTL;
    6.22      op.u.schedctl.sched_id = SCHED_RROBIN;
    6.23 +    op.u.schedctl.direction = SCHED_INFO_GET;
    6.24  
    6.25 -    op.u.schedctl.u.rrobin.slice = slice;
    6.26 +    ret = do_dom0_op(xc_handle, &op);
    6.27  
    6.28 -    return do_dom0_op(xc_handle, &op);
    6.29 +    *slice = op.u.schedctl.u.rrobin.slice;
    6.30 +
    6.31 +    return ret;
    6.32  }
     7.1 --- a/tools/xc/py/Xc.c	Fri Mar 26 17:08:45 2004 +0000
     7.2 +++ b/tools/xc/py/Xc.c	Fri Mar 26 18:32:29 2004 +0000
     7.3 @@ -281,6 +281,23 @@ static PyObject *pyxc_bvtsched_global_se
     7.4      return zero;
     7.5  }
     7.6  
     7.7 +static PyObject *pyxc_bvtsched_global_get(PyObject *self,
     7.8 +					  PyObject *args,
     7.9 +					  PyObject *kwds)
    7.10 +{
    7.11 +    XcObject *xc = (XcObject *)self;
    7.12 +    
    7.13 +    unsigned long ctx_allow;
    7.14 +    
    7.15 +    if ( !PyArg_ParseTuple(args, "") )
    7.16 +        return NULL;
    7.17 +    
    7.18 +    if ( xc_bvtsched_global_get(xc->xc_handle, &ctx_allow) != 0 )
    7.19 +        return PyErr_SetFromErrno(xc_error);
    7.20 +    
    7.21 +    return Py_BuildValue("s:l", "ctx_allow", ctx_allow);
    7.22 +}
    7.23 +
    7.24  static PyObject *pyxc_bvtsched_domain_set(PyObject *self,
    7.25                                            PyObject *args,
    7.26                                            PyObject *kwds)
    7.27 @@ -305,6 +322,31 @@ static PyObject *pyxc_bvtsched_domain_se
    7.28      return zero;
    7.29  }
    7.30  
    7.31 +static PyObject *pyxc_bvtsched_domain_get(PyObject *self,
    7.32 +                                          PyObject *args,
    7.33 +                                          PyObject *kwds)
    7.34 +{
    7.35 +    XcObject *xc = (XcObject *)self;
    7.36 +    u64 dom;
    7.37 +    unsigned long mcuadv, warp, warpl, warpu;
    7.38 +    
    7.39 +    static char *kwd_list[] = { "dom", NULL };
    7.40 +
    7.41 +    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "L", kwd_list, &dom) )
    7.42 +        return NULL;
    7.43 +    
    7.44 +    if ( xc_bvtsched_domain_get(xc->xc_handle, dom, &mcuadv, &warp,
    7.45 +                                &warpl, &warpu) != 0 )
    7.46 +        return PyErr_SetFromErrno(xc_error);
    7.47 +
    7.48 +    return Py_BuildValue("{s:L,s:l,s:l,s:l,s:l}",
    7.49 +                         "domain", dom,
    7.50 +                         "mcuadv", mcuadv,
    7.51 +                         "warp",   warp,
    7.52 +                         "warpl",  warpl,
    7.53 +                         "warpu",  warpu);
    7.54 +}
    7.55 +
    7.56  static PyObject *pyxc_vif_scheduler_set(PyObject *self,
    7.57                                          PyObject *args,
    7.58                                          PyObject *kwds)
    7.59 @@ -879,22 +921,52 @@ static PyObject *pyxc_atropos_domain_set
    7.60                                           PyObject *kwds)
    7.61  {
    7.62      XcObject *xc = (XcObject *)self;
    7.63 +    u64 domid;
    7.64 +    u64 period, slice, latency;
    7.65      int xtratime;
    7.66 -    u64 domid;
    7.67  
    7.68 -    static char *kwd_list[] = { "dom", "xtratime", NULL };
    7.69 +    static char *kwd_list[] = { "dom", "period", "slice", "latency",
    7.70 +				"xtratime", NULL };
    7.71      
    7.72 -    if( !PyArg_ParseTupleAndKeywords(args, kwds, "Li", kwd_list, &domid,
    7.73 -                                     &xtratime) )
    7.74 +    if( !PyArg_ParseTupleAndKeywords(args, kwds, "LLLLi", kwd_list, &domid,
    7.75 +                                     &period, &slice, &latency, &xtratime) )
    7.76          return NULL;
    7.77     
    7.78 -    if ( xc_atropos_domain_set(xc->xc_handle, domid, xtratime) != 0 )
    7.79 +    if ( xc_atropos_domain_set(xc->xc_handle, domid, period, slice,
    7.80 +			       latency, xtratime) != 0 )
    7.81          return PyErr_SetFromErrno(xc_error);
    7.82  
    7.83      Py_INCREF(zero);
    7.84      return zero;
    7.85  }
    7.86  
    7.87 +static PyObject *pyxc_atropos_domain_get(PyObject *self,
    7.88 +                                         PyObject *args,
    7.89 +                                         PyObject *kwds)
    7.90 +{
    7.91 +    XcObject *xc = (XcObject *)self;
    7.92 +    u64 domid;
    7.93 +    u64 period, slice, latency;
    7.94 +    int xtratime;
    7.95 +    
    7.96 +    static char *kwd_list[] = { "dom", NULL };
    7.97 +
    7.98 +    if( !PyArg_ParseTupleAndKeywords(args, kwds, "L", kwd_list, &domid) )
    7.99 +        return NULL;
   7.100 +    
   7.101 +    if ( xc_atropos_domain_get( xc->xc_handle, domid, &period,
   7.102 +                                &slice, &latency, &xtratime ) )
   7.103 +        return PyErr_SetFromErrno(xc_error);
   7.104 +
   7.105 +    return Py_BuildValue("{s:L,s:L,s:L,s:L,s:i}",
   7.106 +                         "domain",  domid,
   7.107 +                         "period",  period,
   7.108 +                         "slice",   slice,
   7.109 +                         "latency", latency,
   7.110 +                         "xtratime", xtratime);
   7.111 +}
   7.112 +
   7.113 +
   7.114  static PyObject *pyxc_rrobin_global_set(PyObject *self,
   7.115                                          PyObject *args,
   7.116                                          PyObject *kwds)
   7.117 @@ -914,6 +986,22 @@ static PyObject *pyxc_rrobin_global_set(
   7.118      return zero;
   7.119  }
   7.120  
   7.121 +static PyObject *pyxc_rrobin_global_get(PyObject *self,
   7.122 +                                        PyObject *args,
   7.123 +                                        PyObject *kwds)
   7.124 +{
   7.125 +    XcObject *xc = (XcObject *)self;
   7.126 +    u64 slice;
   7.127 +
   7.128 +    if ( !PyArg_ParseTuple(args, "") )
   7.129 +        return NULL;
   7.130 +
   7.131 +    if ( xc_rrobin_global_get(xc->xc_handle, &slice) != 0 )
   7.132 +        return PyErr_SetFromErrno(xc_error);
   7.133 +    
   7.134 +    return Py_BuildValue("s:L", "slice", slice);
   7.135 +}
   7.136 +
   7.137  
   7.138  static PyMethodDef pyxc_methods[] = {
   7.139      { "domain_create", 
   7.140 @@ -1015,6 +1103,13 @@ static PyMethodDef pyxc_methods[] = {
   7.141        " ctx_allow [int]: Minimal guaranteed quantum (I think!).\n\n"
   7.142        "Returns: [int] 0 on success; -1 on error.\n" },
   7.143  
   7.144 +    { "bvtsched_global_get",
   7.145 +      (PyCFunction)pyxc_bvtsched_global_get,
   7.146 +      METH_KEYWORDS, "\n"
   7.147 +      "Get global tuning parameters for BVT scheduler.\n"
   7.148 +      "Returns: [dict]:\n"
   7.149 +      " ctx_allow [int]: context switch allowance\n" },
   7.150 +
   7.151      { "bvtsched_domain_set",
   7.152        (PyCFunction)pyxc_bvtsched_domain_set,
   7.153        METH_VARARGS | METH_KEYWORDS, "\n"
   7.154 @@ -1026,21 +1121,56 @@ static PyMethodDef pyxc_methods[] = {
   7.155        " warpu  [int]:  Internal BVT parameter.\n\n"
   7.156        "Returns: [int] 0 on success; -1 on error.\n" },
   7.157  
   7.158 +    { "bvtsched_domain_get",
   7.159 +      (PyCFunction)pyxc_bvtsched_domain_get,
   7.160 +      METH_KEYWORDS, "\n"
   7.161 +      "Get per-domain tuning parameters under the BVT scheduler.\n"
   7.162 +      " dom [long]: Identifier of domain to be queried.\n"
   7.163 +      "Returns [dict]:\n"
   7.164 +      " domain [long]: Domain ID.\n"
   7.165 +      " mcuadv [long]: MCU Advance.\n"
   7.166 +      " warp   [long]: Warp.\n"
   7.167 +      " warpu  [long]:\n"
   7.168 +      " warpl  [long]: Warp limit,\n"
   7.169 +    },
   7.170 +
   7.171      { "atropos_domain_set",
   7.172        (PyCFunction)pyxc_atropos_domain_set,
   7.173 -      METH_VARARGS | METH_KEYWORDS, "\n"
   7.174 -      "Set the extra time flag for a domain when running with Atropos.\n"
   7.175 -      " dom [long]: domain to set\n"
   7.176 +      METH_KEYWORDS, "\n"
   7.177 +      "Set the scheduling parameters for a domain when running with Atropos.\n"
   7.178 +      " dom      [long]: domain to set\n"
   7.179 +      " period   [long]: domain's scheduling period\n"
   7.180 +      " slice    [long]: domain's slice per period\n"
   7.181 +      " latency  [long]: wakeup latency hint\n"
   7.182        " xtratime [int]: boolean\n"
   7.183        "Returns: [int] 0 on success; -1 on error.\n" },
   7.184  
   7.185 +    { "atropos_domain_get",
   7.186 +      (PyCFunction)pyxc_atropos_domain_get,
   7.187 +      METH_KEYWORDS, "\n"
   7.188 +      "Get the current scheduling parameters for a domain when running with\n"
   7.189 +      "the Atropos scheduler."
   7.190 +      " dom      [long]: domain to query\n"
   7.191 +      "Returns:  [dict]\n"
   7.192 +      " domain   [long]: domain ID\n"
   7.193 +      " period   [long]: scheduler period\n"
   7.194 +      " slice    [long]: CPU reservation per period\n"
   7.195 +      " latency  [long]: unblocking latency hint\n"
   7.196 +      " xtratime [int] : 0 if not using slack time, nonzero otherwise\n" },
   7.197 +
   7.198      { "rrobin_global_set",
   7.199        (PyCFunction)pyxc_rrobin_global_set,
   7.200        METH_KEYWORDS, "\n"
   7.201        "Set Round Robin scheduler slice.\n"
   7.202        " slice [long]: Round Robin scheduler slice\n"
   7.203 -      "Returns: [int] 0 on success, throws an exception on failure\n"
   7.204 -    },
   7.205 +      "Returns: [int] 0 on success, throws an exception on failure\n" },
   7.206 +
   7.207 +    { "rrobin_global_get",
   7.208 +      (PyCFunction)pyxc_rrobin_global_get,
   7.209 +      METH_KEYWORDS, "\n"
   7.210 +      "Get Round Robin scheduler settings\n"
   7.211 +      "Returns [dict]:\n"
   7.212 +      " slice  [long]: Scheduler time slice.\n" },    
   7.213  
   7.214      { "vif_scheduler_set", 
   7.215        (PyCFunction)pyxc_vif_scheduler_set, 
     8.1 --- a/xen/common/dom0_ops.c	Fri Mar 26 17:08:45 2004 +0000
     8.2 +++ b/xen/common/dom0_ops.c	Fri Mar 26 18:32:29 2004 +0000
     8.3 @@ -200,12 +200,14 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
     8.4      case DOM0_SCHEDCTL:
     8.5      {
     8.6          ret = sched_ctl(&op->u.schedctl);
     8.7 +        copy_to_user(u_dom0_op, op, sizeof(*op));
     8.8      }
     8.9      break;
    8.10  
    8.11      case DOM0_ADJUSTDOM:
    8.12      {
    8.13          ret = sched_adjdom(&op->u.adjustdom);
    8.14 +        copy_to_user(u_dom0_op, op, sizeof(*op));
    8.15      }
    8.16      break;
    8.17  
    8.18 @@ -275,7 +277,6 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    8.19              if ( (p->state == TASK_STOPPED) || (p->state == TASK_DYING) )
    8.20                  op->u.getdomaininfo.state = DOMSTATE_STOPPED;
    8.21              op->u.getdomaininfo.hyp_events  = p->hyp_events;
    8.22 -//            op->u.getdomaininfo.mcu_advance = p->mcu_advance;
    8.23              op->u.getdomaininfo.tot_pages   = p->tot_pages;
    8.24              op->u.getdomaininfo.cpu_time    = p->cpu_time;
    8.25              op->u.getdomaininfo.shared_info_frame = 
    8.26 @@ -485,6 +486,14 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    8.27                                          op->u.pcidev_access.enable);
    8.28      }
    8.29      break;
    8.30 +
    8.31 +    case DOM0_SCHED_ID:
    8.32 +    {
    8.33 +        op->u.sched_id.sched_id = sched_id();
    8.34 +
    8.35 +        copy_to_user(u_dom0_op, op, sizeof(*op));
    8.36 +        ret = 0;        
    8.37 +    }
    8.38       
    8.39      default:
    8.40          ret = -ENOSYS;
     9.1 --- a/xen/common/keyhandler.c	Fri Mar 26 17:08:45 2004 +0000
     9.2 +++ b/xen/common/keyhandler.c	Fri Mar 26 18:32:29 2004 +0000
     9.3 @@ -4,6 +4,7 @@
     9.4  #include <xen/event.h>
     9.5  #include <xen/console.h>
     9.6  #include <xen/serial.h>
     9.7 +#include <xen/sched.h>
     9.8  
     9.9  #define KEY_MAX 256
    9.10  #define STR_MAX  64
    9.11 @@ -74,29 +75,6 @@ static void kill_dom0(u_char key, void *
    9.12      kill_other_domain(0, 0);
    9.13  }
    9.14  
    9.15 -
    9.16 -/* XXX SMH: this is keir's fault */
    9.17 -static char *task_states[] = 
    9.18 -{ 
    9.19 -    "Runnable  ", 
    9.20 -    "Int Sleep ", 
    9.21 -    "UInt Sleep", 
    9.22 -    NULL,
    9.23 -    "Stopped   ", 
    9.24 -    NULL,
    9.25 -    NULL,
    9.26 -    NULL,
    9.27 -    "Dying     ",
    9.28 -    NULL,
    9.29 -    NULL,
    9.30 -    NULL,
    9.31 -    NULL,
    9.32 -    NULL,
    9.33 -    NULL,
    9.34 -    NULL,
    9.35 -    "Sched priv"
    9.36 -}; 
    9.37 -
    9.38  void do_task_queues(u_char key, void *dev_id, struct pt_regs *regs) 
    9.39  {
    9.40      unsigned long       flags;
    9.41 @@ -111,10 +89,10 @@ void do_task_queues(u_char key, void *de
    9.42  
    9.43      for_each_domain ( p )
    9.44      {
    9.45 -        printk("Xen: DOM %llu, CPU %d [has=%c], state = %s, "
    9.46 -               "hyp_events = %08x\n", 
    9.47 -               p->domain, p->processor, p->has_cpu ? 'T':'F', 
    9.48 -               task_states[p->state], p->hyp_events); 
    9.49 +        printk("Xen: DOM %llu, CPU %d [has=%c], state = ",
    9.50 +               p->domain, p->processor, p->has_cpu ? 'T':'F'); 
    9.51 +        sched_prn_state(p ->state);
    9.52 +	printk(", hyp_events = %08x\n", p->hyp_events);
    9.53          s = p->shared_info; 
    9.54          printk("Guest: upcall_pend = %08lx, upcall_mask = %08lx\n", 
    9.55                 s->evtchn_upcall_pending, s->evtchn_upcall_mask);
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/common/sched_atropos.c	Fri Mar 26 18:32:29 2004 +0000
    10.3 @@ -0,0 +1,598 @@
    10.4 +/*
    10.5 + *	atropos.c
    10.6 + *	---------
    10.7 + *
    10.8 + * Copyright (c) 1994 University of Cambridge Computer Laboratory.
    10.9 + * This is part of Nemesis; consult your contract for terms and conditions.
   10.10 + *
   10.11 + * ID : $Id: atropos.c 1.1 Tue, 13 Apr 1999 13:30:49 +0100 dr10009 $
   10.12 + *
   10.13 + * This is the "atropos" CPU scheduler. 
   10.14 + */
   10.15 +
   10.16 +/* Ported to Xen's generic scheduler interface by Mark Williamson
   10.17 + * these modifications are (C) 2004 Intel Research Cambridge
   10.18 + */
   10.19 +
   10.20 +#include <xen/time.h>
   10.21 +#include <xen/sched.h>
   10.22 +#include <xen/sched-if.h>
   10.23 +#include <hypervisor-ifs/sched_ctl.h>
   10.24 +#include <xen/trace.h>
   10.25 +
   10.26 +#define ATROPOS_TASK_UNBLOCKED 16
   10.27 +#define ATROPOS_TASK_WAIT      32
   10.28 +
   10.29 +#define Activation_Reason_Allocated 1
   10.30 +#define Activation_Reason_Preempted 2
   10.31 +#define Activation_Reason_Extra     3
   10.32 +
   10.33 +/* The following will be used for atropos-specific per-domain data fields */
   10.34 +struct at_dom_info
   10.35 +{
   10.36 +    /* MAW Xen additions */
   10.37 +    struct task_struct *owner; /* the struct task_struct this data belongs to */
   10.38 +    struct list_head waitq;    /* wait queue                                  */
   10.39 +    int reason;                /* reason domain was last scheduled            */
   10.40 +
   10.41 +    /* (what remains of) the original fields */
   10.42 +
   10.43 +    s_time_t     deadline;       /* Next deadline                */
   10.44 +    s_time_t     prevddln;       /* Previous deadline            */
   10.45 +    
   10.46 +    s_time_t     remain;         /* Time remaining this period   */
   10.47 +    s_time_t     period;         /* Period of time allocation    */
   10.48 +    s_time_t     slice;          /* Length of allocation         */
   10.49 +    s_time_t     latency;        /* Unblocking latency           */
   10.50 +
   10.51 +    int          xtratime;       /* Prepared to accept extra?    */
   10.52 +};
   10.53 +
   10.54 +
   10.55 +struct at_cpu_info
   10.56 +{
   10.57 +    struct list_head waitq; /* wait queue*/
   10.58 +};
   10.59 +
   10.60 +
   10.61 +#define DOM_INFO(_p) ( (struct at_dom_info *)((_p)->sched_priv) )
   10.62 +#define CPU_INF(_p)  ( (struct at_cpu_info *)((_p).sched_priv) )
   10.63 +#define WAITQ(cpu)   (&( CPU_INF(schedule_data[cpu]) )->waitq )
   10.64 +#define RUNQ(cpu)    (&schedule_data[cpu].runqueue)
   10.65 +
   10.66 +#define BESTEFFORT_QUANTUM MILLISECS(5)
   10.67 +
   10.68 +/* SLAB cache for struct at_dom_info objects */
   10.69 +static kmem_cache_t *dom_info_cache;
   10.70 +
   10.71 +/** calculate the length of a linked list */
   10.72 +static int q_len(struct list_head *q) 
   10.73 +{
   10.74 +    int i = 0;
   10.75 +    struct list_head *tmp;
   10.76 +    list_for_each(tmp, q) i++;
   10.77 +    return i;
   10.78 +}
   10.79 +
   10.80 +
   10.81 +/** waitq_el - get the task_struct that owns a wait queue list element */
   10.82 +static inline struct task_struct * waitq_el(struct list_head *l)
   10.83 +{
   10.84 +    struct at_dom_info *inf;
   10.85 +    inf = list_entry(l, struct at_dom_info, waitq);
   10.86 +    return inf->owner;
   10.87 +}
   10.88 +
   10.89 +
   10.90 +/*
   10.91 + * requeue
   10.92 + *
   10.93 + * Places the specified domain on the appropriate queue.
   10.94 + * The wait queue is ordered by the time at which the domain
   10.95 + * will receive more CPU time.  If a domain has no guaranteed time
   10.96 + * left then the domain will be placed on the WAIT queue until
   10.97 + * its next period. 
   10.98 + *
   10.99 + * Note that domains can be on the wait queue with remain > 0 
  10.100 + * as a result of being blocked for a short time.
  10.101 + * These are scheduled in preference to domains with remain < 0 
  10.102 + * in an attempt to improve interactive performance.
  10.103 + */
  10.104 +static void requeue(struct task_struct *sdom)
  10.105 +{
  10.106 +    struct at_dom_info *inf = DOM_INFO(sdom);
  10.107 +    struct list_head *prev = WAITQ(sdom->processor);
  10.108 +    struct list_head *next;
  10.109 +
  10.110 +    if(sdom->state == ATROPOS_TASK_WAIT ||
  10.111 +       sdom->state == ATROPOS_TASK_UNBLOCKED )
  10.112 +    {
  10.113 +        /* insert into ordered wait queue */
  10.114 +
  10.115 +        prev = WAITQ(sdom->processor);
  10.116 +        list_for_each(next, WAITQ(sdom->processor))
  10.117 +        {
  10.118 +            struct at_dom_info *i = list_entry(next, struct at_dom_info, waitq);
  10.119 +            if( i->deadline > inf->deadline )
  10.120 +            {
  10.121 +                __list_add(&inf->waitq, prev, next);
  10.122 +                break;
  10.123 +            }
  10.124 +
  10.125 +            prev = next;
  10.126 +        }
  10.127 +
  10.128 +        /* put the domain on the end of the list if it hasn't been put
  10.129 +         * elsewhere */
  10.130 +        if ( next == WAITQ(sdom->processor))
  10.131 +            list_add_tail(&inf->waitq, WAITQ(sdom->processor));
  10.132 +    }
  10.133 +    else if(sdom->state == TASK_RUNNING)
  10.134 +    {
  10.135 +        /* insert into ordered run queue */
  10.136 +        prev = RUNQ(sdom->processor);
  10.137 +
  10.138 +        list_for_each(next, RUNQ(sdom->processor))
  10.139 +        {
  10.140 +            struct task_struct *p = list_entry(next, struct task_struct,
  10.141 +                                               run_list);
  10.142 +
  10.143 +            if( DOM_INFO(p)->deadline > inf->deadline || is_idle_task(p) )
  10.144 +            {
  10.145 +                __list_add(&sdom->run_list, prev, next);
  10.146 +                break;
  10.147 +            }
  10.148 +
  10.149 +            prev = next;
  10.150 +        }
  10.151 +
  10.152 +        if ( next == RUNQ(sdom->processor) )
  10.153 +            list_add_tail(&sdom->run_list, RUNQ(sdom->processor));
  10.154 +    }
  10.155 +    /* silently ignore tasks in other states like BLOCKED, DYING, STOPPED, etc
  10.156 +     * - they shouldn't be on any queue */
  10.157 +}
  10.158 +
  10.159 +/* prepare a task to be added to scheduling */
  10.160 +static void at_add_task(struct task_struct *p)
  10.161 +{
  10.162 +    s_time_t now = NOW();
  10.163 +
  10.164 +    ASSERT( p->sched_priv != NULL );
  10.165 +
  10.166 +    DOM_INFO(p)->owner = p;
  10.167 +    p->lastschd = now;
  10.168 + 
  10.169 +    if(is_idle_task(p))
  10.170 +      DOM_INFO(p)->slice = MILLISECS(5);
  10.171 +
  10.172 +    /* DOM 0's scheduling parameters must be set here in order for it to boot
  10.173 +     * the system! */
  10.174 +    if(p->domain == 0)
  10.175 +    {
  10.176 +        DOM_INFO(p)->remain = MILLISECS(15);
  10.177 +        DOM_INFO(p)->period = MILLISECS(20);
  10.178 +        DOM_INFO(p)->slice  = MILLISECS(15);
  10.179 +        DOM_INFO(p)->latency = MILLISECS(10);
  10.180 +        DOM_INFO(p)->xtratime = 1;
  10.181 +        DOM_INFO(p)->deadline = now;
  10.182 +        DOM_INFO(p)->prevddln = now;
  10.183 +    }
  10.184 +    else /* other domains run basically best effort unless otherwise set */
  10.185 +    {
  10.186 +        DOM_INFO(p)->remain = 0;
  10.187 +        DOM_INFO(p)->period = MILLISECS(10000);
  10.188 +        DOM_INFO(p)->slice  = MILLISECS(10);
  10.189 +        DOM_INFO(p)->latency = MILLISECS(10000);
  10.190 +        DOM_INFO(p)->xtratime = 1;
  10.191 +        DOM_INFO(p)->deadline = now + MILLISECS(10000);
  10.192 +        DOM_INFO(p)->prevddln = 0;
  10.193 +    }
  10.194 +
  10.195 +    INIT_LIST_HEAD(&(DOM_INFO(p)->waitq));
  10.196 +}
  10.197 +
  10.198 +
  10.199 +/**
  10.200 + * dequeue - remove a domain from any queues it is on.
  10.201 + * @sdom:    the task to remove
  10.202 + */
  10.203 +static void dequeue(struct task_struct *sdom)
  10.204 +{
  10.205 +    struct at_dom_info *inf = DOM_INFO(sdom);
  10.206 +
  10.207 +    ASSERT(sdom->domain != IDLE_DOMAIN_ID);
  10.208 +    
  10.209 +    /* just delete it from all the queues! */
  10.210 +    list_del(&inf->waitq);
  10.211 +    INIT_LIST_HEAD(&inf->waitq);
  10.212 +    
  10.213 +    if(__task_on_runqueue(sdom))
  10.214 +        __del_from_runqueue(sdom);
  10.215 +
  10.216 +    sdom->run_list.next = NULL;
  10.217 +    sdom->run_list.prev = NULL;
  10.218 +
  10.219 +}
  10.220 +
  10.221 +
  10.222 +/*
  10.223 + * unblock
  10.224 + *
  10.225 + * This function deals with updating the sdom for a domain
  10.226 + * which has just been unblocked.  
  10.227 + *
  10.228 + * ASSERT: On entry, the sdom has already been removed from the block
  10.229 + * queue (it can be done more efficiently if we know that it
  10.230 + * is on the head of the queue) but its deadline field has not been
  10.231 + * restored yet.
  10.232 + */
  10.233 +static void unblock(struct task_struct *sdom)
  10.234 +{
  10.235 +    s_time_t time = NOW();
  10.236 +    struct at_dom_info *inf = DOM_INFO(sdom);
  10.237 +    
  10.238 +    dequeue(sdom);
  10.239 +
  10.240 +    /* We distinguish two cases... short and long blocks */
  10.241 +    if ( inf->deadline < time ) {
  10.242 +	/* The sdom has passed its deadline since it was blocked. 
  10.243 +	   Give it its new deadline based on the latency value. */
  10.244 +	inf->prevddln = time; 
  10.245 +	inf->deadline = time + inf->latency;
  10.246 +	inf->remain   = inf->slice;
  10.247 +        if(inf->remain > 0)
  10.248 +            sdom->state = TASK_RUNNING;
  10.249 +        else
  10.250 +            sdom->state = ATROPOS_TASK_WAIT;
  10.251 +        
  10.252 +    } else {
  10.253 +	/* We leave REMAIN intact, but put this domain on the WAIT
  10.254 +	   queue marked as recently unblocked.  It will be given
  10.255 +	   priority over other domains on the wait queue until while
  10.256 +	   REMAIN>0 in a generous attempt to help it make up for its
  10.257 +	   own foolishness. */
  10.258 +	if(inf->remain > 0)
  10.259 +            sdom->state = ATROPOS_TASK_UNBLOCKED;
  10.260 +        else
  10.261 +            sdom->state = ATROPOS_TASK_WAIT;
  10.262 +    }
  10.263 +
  10.264 +    requeue(sdom);
  10.265 +
  10.266 +}
  10.267 +
  10.268 +/**
  10.269 + * ATROPOS - main scheduler function
  10.270 + */
  10.271 +task_slice_t ksched_scheduler(s_time_t time)
  10.272 +{
  10.273 +    struct task_struct	*cur_sdom = current;  /* Current sdom           */
  10.274 +    s_time_t     newtime;
  10.275 +    s_time_t      ranfor;	        /* How long the domain ran      */
  10.276 +    struct task_struct	*sdom;	        /* tmp. scheduling domain	*/
  10.277 +    int   reason;                       /* reason for reschedule        */
  10.278 +    int cpu = cur_sdom->processor;      /* current CPU                  */
  10.279 +    struct at_dom_info *cur_info;
  10.280 +    static unsigned long waitq_rrobin = 0;
  10.281 +    int i;
  10.282 +    task_slice_t ret;
  10.283 +
  10.284 +    cur_info = DOM_INFO(cur_sdom);
  10.285 +
  10.286 +    ASSERT( cur_sdom != NULL);
  10.287 +
  10.288 +    /* If we were spinning in the idle loop, there is no current
  10.289 +     * domain to deschedule. */
  10.290 +    if (is_idle_task(cur_sdom)) {
  10.291 +	goto deschedule_done;
  10.292 +    }
  10.293 +
  10.294 +    /*****************************
  10.295 +     * 
  10.296 +     * Deschedule the current scheduling domain
  10.297 +     *
  10.298 +     ****************************/
  10.299 +
  10.300 +   /* Record the time the domain was preempted and for how long it
  10.301 +       ran.  Work out if the domain is going to be blocked to save
  10.302 +       some pointless queue shuffling */
  10.303 +    cur_sdom->lastdeschd = time;
  10.304 +
  10.305 +    ranfor = (time - cur_sdom->lastschd);
  10.306 +
  10.307 +    dequeue(cur_sdom);
  10.308 +
  10.309 +    if ((cur_sdom->state == TASK_RUNNING) ||
  10.310 +        (cur_sdom->state == ATROPOS_TASK_UNBLOCKED)) {
  10.311 +
  10.312 +	/* In this block, we are doing accounting for an sdom which has 
  10.313 +	   been running in contracted time.  Note that this could now happen
  10.314 +	   even if the domain is on the wait queue (i.e. if it blocked) */
  10.315 +
  10.316 +	/* Deduct guaranteed time from the domain */
  10.317 +	cur_info->remain  -= ranfor;
  10.318 +
  10.319 +	/* If guaranteed time has run out... */
  10.320 +	if ( cur_info->remain <= 0 ) {
  10.321 +	    /* Move domain to correct position in WAIT queue */
  10.322 +            /* XXX sdom_unblocked doesn't need this since it is 
  10.323 +	     already in the correct place. */
  10.324 +	    cur_sdom->state = ATROPOS_TASK_WAIT;
  10.325 +	}
  10.326 +    }
  10.327 +
  10.328 +    requeue(cur_sdom);
  10.329 +
  10.330 +  deschedule_done:
  10.331 +
  10.332 +    /*****************************
  10.333 +     * 
  10.334 +     * We have now successfully descheduled the current sdom.
  10.335 +     * The next task is the allocate CPU time to any sdom it is due to.
  10.336 +     *
  10.337 +       ****************************/
  10.338 +    cur_sdom = NULL;
  10.339 +
  10.340 +    /*****************************
  10.341 +     * 
  10.342 +     * Allocate CPU time to any waiting domains who have passed their
  10.343 +     * period deadline.  If necessary, move them to run queue.
  10.344 +     *
  10.345 +     ****************************/
  10.346 +    while(!list_empty(WAITQ(cpu)) && 
  10.347 +	  DOM_INFO(sdom = waitq_el(WAITQ(cpu)->next))->deadline <= time ) {
  10.348 +
  10.349 +	struct at_dom_info *inf = DOM_INFO(sdom);
  10.350 +
  10.351 +        dequeue(sdom);
  10.352 +
  10.353 +	/* Domain begins a new period and receives a slice of CPU 
  10.354 +	 * If this domain has been blocking then throw away the
  10.355 +	 * rest of it's remain - it can't be trusted */
  10.356 +	if (inf->remain > 0) 
  10.357 +	    inf->remain = inf->slice;
  10.358 +    	else 
  10.359 +	    inf->remain += inf->slice;
  10.360 +	inf->prevddln = inf->deadline;
  10.361 +	inf->deadline += inf->period;
  10.362 +        if(inf->remain > 0)
  10.363 +            sdom->state = TASK_RUNNING;
  10.364 +        else
  10.365 +            sdom->state = ATROPOS_TASK_WAIT;
  10.366 +
  10.367 +	/* Place on the appropriate queue */
  10.368 +	requeue(sdom);
  10.369 +    }
  10.370 +
  10.371 +    /*****************************
  10.372 +     * 
  10.373 +     * Next we need to pick an sdom to run.
  10.374 +     * If anything is actually 'runnable', we run that. 
  10.375 +     * If nothing is, we pick a waiting sdom to run optimistically.
  10.376 +     * If there aren't even any of those, we have to spin waiting for an
  10.377 +     * event or a suitable time condition to happen.
  10.378 +     *
  10.379 +     ****************************/
  10.380 +    
  10.381 +    /* we guarantee there's always something on the runqueue */
  10.382 +    cur_sdom = list_entry(RUNQ(cpu)->next,
  10.383 +                          struct task_struct, run_list);
  10.384 +
  10.385 +    cur_info = DOM_INFO(cur_sdom);
  10.386 +    newtime = time + cur_info->remain;
  10.387 +    reason  = (cur_info->prevddln > cur_sdom->lastschd) ?
  10.388 +      Activation_Reason_Allocated : Activation_Reason_Preempted;
  10.389 +
  10.390 +    /* MAW - the idle domain is always on the run queue.  We run from the
  10.391 +     * runqueue if it's NOT the idle domain or if there's nothing on the wait
  10.392 +     * queue */
  10.393 +    if (cur_sdom->domain == IDLE_DOMAIN_ID && !list_empty(WAITQ(cpu))) {
  10.394 +
  10.395 +        struct list_head *item;
  10.396 +
  10.397 +	/* Try running a domain on the WAIT queue - this part of the
  10.398 +	   scheduler isn't particularly efficient but then again, we
  10.399 +	   don't have any guaranteed domains to worry about. */
  10.400 +	
  10.401 +	/* See if there are any unblocked domains on the WAIT
  10.402 +	   queue who we can give preferential treatment to. */
  10.403 +        list_for_each(item, WAITQ(cpu))
  10.404 +        {
  10.405 +            struct at_dom_info *inf =
  10.406 +                list_entry(item, struct at_dom_info, waitq);
  10.407 +
  10.408 +            sdom = inf->owner;
  10.409 +            
  10.410 +	    if (sdom->state == ATROPOS_TASK_UNBLOCKED) {
  10.411 +		cur_sdom = sdom;
  10.412 +		cur_info  = inf;
  10.413 +		newtime  = time + inf->remain;
  10.414 +		reason   = Activation_Reason_Preempted;
  10.415 +		goto found;
  10.416 +	    }
  10.417 +	}
  10.418 +
  10.419 +        /* init values needed to approximate round-robin for slack time */
  10.420 +        i = 0;
  10.421 +        if ( waitq_rrobin >= q_len(WAITQ(cpu)))
  10.422 +            waitq_rrobin = 0;
  10.423 +        
  10.424 +	/* Last chance: pick a domain on the wait queue with the XTRA
  10.425 +	   flag set.  The NEXT_OPTM field is used to cheaply achieve
  10.426 +	   an approximation of round-robin order */
  10.427 +        list_for_each(item, WAITQ(cpu))
  10.428 +            {
  10.429 +                struct at_dom_info *inf =
  10.430 +                    list_entry(item, struct at_dom_info, waitq);
  10.431 +                
  10.432 +                sdom = inf->owner;
  10.433 +
  10.434 +                if (inf->xtratime && i >= waitq_rrobin) {
  10.435 +                    cur_sdom = sdom;
  10.436 +                    cur_info  = inf;
  10.437 +                    newtime = time + BESTEFFORT_QUANTUM;
  10.438 +                    reason  = Activation_Reason_Extra;
  10.439 +                    waitq_rrobin = i + 1; /* set this value ready for next */
  10.440 +                    goto found;
  10.441 +                }
  10.442 +
  10.443 +                i++;
  10.444 +            }
  10.445 +
  10.446 +    }
  10.447 +
  10.448 +    found:
  10.449 +    /**********************
  10.450 +     * 
  10.451 +     * We now have to work out the time when we next need to
  10.452 +     * make a scheduling decision.  We set the alarm timer
  10.453 +     * to cause an interrupt at that time.
  10.454 +     *
  10.455 +     **********************/
  10.456 +
  10.457 +#define MIN(x,y) ( ( x < y ) ? x : y )
  10.458 +#define MAX(x,y) ( ( x > y ) ? x : y )
  10.459 +
  10.460 +    /* If we might be able to run a waiting domain before this one has */
  10.461 +    /* exhausted its time, cut short the time allocation */
  10.462 +    if (!list_empty(WAITQ(cpu)))
  10.463 +    {
  10.464 +	newtime = MIN(newtime,
  10.465 +                      DOM_INFO(waitq_el(WAITQ(cpu)->next))->deadline);
  10.466 +    }
  10.467 +
  10.468 +    /* don't allow pointlessly small time slices */
  10.469 +    newtime = MAX(newtime, time + BESTEFFORT_QUANTUM);
  10.470 +    
  10.471 +    ret.task = cur_sdom;
  10.472 +    ret.time = newtime - time;
  10.473 +
  10.474 +    cur_sdom->min_slice = newtime - time;
  10.475 +    DOM_INFO(cur_sdom)->reason = reason;
  10.476 +
  10.477 +    TRACE_2D(0, cur_sdom->domain >> 32, (u32)cur_sdom->domain);
  10.478 + 
  10.479 +    return ret;
  10.480 +}
  10.481 +
  10.482 +
  10.483 +/* set up some private data structures */
  10.484 +static int at_init_scheduler()
  10.485 +{
  10.486 +    int i;
  10.487 +    
  10.488 +    for( i = 0; i < NR_CPUS; i++)
  10.489 +    {
  10.490 +        if( (CPU_INF(schedule_data[i]) = kmalloc(sizeof(struct at_cpu_info),
  10.491 +                                            GFP_KERNEL)) == NULL )
  10.492 +            return -1;
  10.493 +        WAITQ(i)->next = WAITQ(i);
  10.494 +        WAITQ(i)->prev = WAITQ(i);
  10.495 +    }
  10.496 +
  10.497 +    dom_info_cache = kmem_cache_create("Atropos dom info",
  10.498 +                                       sizeof(struct at_dom_info),
  10.499 +                                       0, 0, NULL, NULL);
  10.500 +
  10.501 +    return 0;
  10.502 +}
  10.503 +
  10.504 +/* dump relevant per-cpu state for a run queue dump */
  10.505 +static void at_dump_cpu_state(int cpu)
  10.506 +{
  10.507 +    printk("Waitq len: %d Runq len: %d ",
  10.508 +           q_len(WAITQ(cpu)),
  10.509 +           q_len(RUNQ(cpu)));
  10.510 +}
  10.511 +
  10.512 +/* print relevant per-domain info for a run queue dump */
  10.513 +static void at_dump_runq_el(struct task_struct *p)
  10.514 +{
  10.515 +    printk("lastschd = %llu, xtratime = %d ",
  10.516 +           p->lastschd, DOM_INFO(p)->xtratime);
  10.517 +}
  10.518 +
  10.519 +
  10.520 +/* set or fetch domain scheduling parameters */
  10.521 +static int at_adjdom(struct task_struct *p, struct sched_adjdom_cmd *cmd)
  10.522 +{
  10.523 +    if ( cmd->direction == SCHED_INFO_PUT )
  10.524 +    {
  10.525 +        DOM_INFO(p)->period   = cmd->u.atropos.period;
  10.526 +        DOM_INFO(p)->slice    = cmd->u.atropos.slice;
  10.527 +        DOM_INFO(p)->latency  = cmd->u.atropos.latency;
  10.528 +        DOM_INFO(p)->xtratime = !!cmd->u.atropos.xtratime;
  10.529 +    }
  10.530 +    else if ( cmd->direction == SCHED_INFO_GET )
  10.531 +    {
  10.532 +        cmd->u.atropos.period   = DOM_INFO(p)->period;
  10.533 +        cmd->u.atropos.slice    = DOM_INFO(p)->slice;
  10.534 +        cmd->u.atropos.latency  = DOM_INFO(p)->latency;
  10.535 +        cmd->u.atropos.xtratime = DOM_INFO(p)->xtratime;
  10.536 +    }
  10.537 +
  10.538 +    return 0;
  10.539 +}
  10.540 +
  10.541 +
  10.542 +/** at_alloc_task - allocate private info for a task */
  10.543 +static int at_alloc_task(struct task_struct *p)
  10.544 +{
  10.545 +    ASSERT(p != NULL);
  10.546 +
  10.547 +    if( (DOM_INFO(p) = kmem_cache_alloc(dom_info_cache, GFP_KERNEL)) == NULL )
  10.548 +        return -1;
  10.549 +
  10.550 +    if(p->domain == IDLE_DOMAIN_ID)
  10.551 +      printk("ALLOC IDLE ON CPU %d\n", p->processor);
  10.552 +
  10.553 +    memset(DOM_INFO(p), 0, sizeof(struct at_dom_info));
  10.554 +
  10.555 +    return 0;
  10.556 +}
  10.557 +
  10.558 +
  10.559 +/* free memory associated with a task */
  10.560 +static void at_free_task(struct task_struct *p)
  10.561 +{
  10.562 +    kmem_cache_free( dom_info_cache, DOM_INFO(p) );
  10.563 +}
  10.564 +
  10.565 +/* print decoded domain private state value (if known) */
  10.566 +static int at_prn_state(int state)
  10.567 +{
  10.568 +    int ret = 0;
  10.569 +    
  10.570 +    switch(state)
  10.571 +    {
  10.572 +    case ATROPOS_TASK_UNBLOCKED:
  10.573 +        printk("Unblocked");
  10.574 +        break;
  10.575 +    case ATROPOS_TASK_WAIT:
  10.576 +        printk("Wait");
  10.577 +        break;
  10.578 +    default:
  10.579 +        ret = -1;
  10.580 +    }
  10.581 +
  10.582 +    return ret;
  10.583 +}
  10.584 +    
  10.585 +
  10.586 +struct scheduler sched_atropos_def = {
  10.587 +    .name           = "Atropos Soft Real Time Scheduler",
  10.588 +    .opt_name       = "atropos",
  10.589 +    .sched_id       = SCHED_ATROPOS,
  10.590 +
  10.591 +    .init_scheduler = at_init_scheduler,
  10.592 +    .alloc_task     = at_alloc_task,
  10.593 +    .add_task       = at_add_task,
  10.594 +    .free_task      = at_free_task,
  10.595 +    .wake_up        = unblock,
  10.596 +    .do_schedule    = ksched_scheduler,
  10.597 +    .adjdom         = at_adjdom,
  10.598 +    .dump_cpu_state = at_dump_cpu_state,
  10.599 +    .dump_runq_el   = at_dump_runq_el,
  10.600 +    .prn_state      = at_prn_state,
  10.601 +};
    11.1 --- a/xen/common/sched_bvt.c	Fri Mar 26 17:08:45 2004 +0000
    11.2 +++ b/xen/common/sched_bvt.c	Fri Mar 26 18:32:29 2004 +0000
    11.3 @@ -151,6 +151,7 @@ void bvt_wake_up(struct task_struct *p)
    11.4      struct bvt_dom_info *inf = BVT_INFO(p);
    11.5  
    11.6      ASSERT(inf != NULL);
    11.7 +    
    11.8  
    11.9      /* set the BVT parameters */
   11.10      if (inf->avt < CPU_SVT(p->processor))
   11.11 @@ -166,19 +167,25 @@ void bvt_wake_up(struct task_struct *p)
   11.12  /* 
   11.13   * Block the currently-executing domain until a pertinent event occurs.
   11.14   */
   11.15 -static long bvt_do_block(struct task_struct *p)
   11.16 +static void bvt_do_block(struct task_struct *p)
   11.17  {
   11.18      BVT_INFO(p)->warpback = 0; 
   11.19 -    return 0;
   11.20  }
   11.21  
   11.22  /* Control the scheduler. */
   11.23  int bvt_ctl(struct sched_ctl_cmd *cmd)
   11.24  {
   11.25      struct bvt_ctl *params = &cmd->u.bvt;
   11.26 +
   11.27 +    if ( cmd->direction == SCHED_INFO_PUT )
   11.28 +    { 
   11.29 +        ctx_allow = params->ctx_allow;
   11.30 +    }
   11.31 +    else
   11.32 +    {
   11.33 +        params->ctx_allow = ctx_allow;
   11.34 +    }
   11.35      
   11.36 -    ctx_allow = params->ctx_allow;
   11.37 -
   11.38      return 0;
   11.39  }
   11.40  
   11.41 @@ -187,24 +194,40 @@ int bvt_adjdom(struct task_struct *p,
   11.42                 struct sched_adjdom_cmd *cmd)
   11.43  {
   11.44      struct bvt_adjdom *params = &cmd->u.bvt;
   11.45 -    unsigned long mcu_adv = params->mcu_adv,
   11.46 -                    warp  = params->warp,
   11.47 -                    warpl = params->warpl,
   11.48 -                    warpu = params->warpu;
   11.49 -    
   11.50 -    struct bvt_dom_info *inf = BVT_INFO(p);
   11.51 +    unsigned long flags;
   11.52  
   11.53 -    /* Sanity -- this can avoid divide-by-zero. */
   11.54 -    if ( mcu_adv == 0 )
   11.55 -        return -EINVAL;
   11.56 +    if ( cmd->direction == SCHED_INFO_PUT )
   11.57 +    {
   11.58 +        unsigned long mcu_adv = params->mcu_adv,
   11.59 +            warp  = params->warp,
   11.60 +            warpl = params->warpl,
   11.61 +            warpu = params->warpu;
   11.62 +        
   11.63 +        struct bvt_dom_info *inf = BVT_INFO(p);
   11.64 +        
   11.65 +        /* Sanity -- this can avoid divide-by-zero. */
   11.66 +        if ( mcu_adv == 0 )
   11.67 +            return -EINVAL;
   11.68 +        
   11.69 +        spin_lock_irqsave(&schedule_lock[p->processor], flags);   
   11.70 +        inf->mcu_advance = mcu_adv;
   11.71 +        inf->warp = warp;
   11.72 +        inf->warpl = warpl;
   11.73 +        inf->warpu = warpu;
   11.74 +        spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
   11.75 +    }
   11.76 +    else if ( cmd->direction == SCHED_INFO_GET )
   11.77 +    {
   11.78 +        struct bvt_dom_info *inf = BVT_INFO(p);
   11.79  
   11.80 -    spin_lock_irq(&schedule_lock[p->processor]);   
   11.81 -    inf->mcu_advance = mcu_adv;
   11.82 -    inf->warp = warp;
   11.83 -    inf->warpl = warpl;
   11.84 -    inf->warpu = warpu;
   11.85 -    spin_unlock_irq(&schedule_lock[p->processor]); 
   11.86 -
   11.87 +        spin_lock_irqsave(&schedule_lock[p->processor], flags);   
   11.88 +        params->mcu_adv = inf->mcu_advance;
   11.89 +        params->warp    = inf->warp;
   11.90 +        params->warpl   = inf->warpl;
   11.91 +        params->warpu   = inf->warpu;
   11.92 +        spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
   11.93 +    }
   11.94 +    
   11.95      return 0;
   11.96  }
   11.97  
    12.1 --- a/xen/common/sched_rrobin.c	Fri Mar 26 17:08:45 2004 +0000
    12.2 +++ b/xen/common/sched_rrobin.c	Fri Mar 26 18:32:29 2004 +0000
    12.3 @@ -1,5 +1,5 @@
    12.4  /****************************************************************************
    12.5 - * Very stupid Round Robin Scheduler for Xen
    12.6 + * Round Robin Scheduler for Xen
    12.7   *
    12.8   * by Mark Williamson (C) 2004 Intel Research Cambridge
    12.9   */
   12.10 @@ -33,7 +33,15 @@ static task_slice_t rr_do_schedule(s_tim
   12.11  
   12.12  static int rr_ctl(struct sched_ctl_cmd *cmd)
   12.13  {
   12.14 -    rr_slice = cmd->u.rrobin.slice;
   12.15 +    if(cmd->direction == SCHED_INFO_PUT)
   12.16 +    {
   12.17 +        rr_slice = cmd->u.rrobin.slice;
   12.18 +    }
   12.19 +    else /* cmd->direction == SCHED_INFO_GET */
   12.20 +    {
   12.21 +        cmd->u.rrobin.slice = rr_slice;
   12.22 +    }
   12.23 +    
   12.24      return 0;
   12.25  }
   12.26  
    13.1 --- a/xen/common/schedule.c	Fri Mar 26 17:08:45 2004 +0000
    13.2 +++ b/xen/common/schedule.c	Fri Mar 26 18:32:29 2004 +0000
    13.3 @@ -41,7 +41,7 @@
    13.4  #define TIME_SLOP      (s32)MICROSECS(50)     /* allow time to slip a bit */
    13.5  
    13.6  /*
    13.7 - * XXX Pull trace-related #defines out of here and into an auto-generated
    13.8 + * TODO MAW pull trace-related #defines out of here and into an auto-generated
    13.9   * header file later on!
   13.10   */
   13.11  #define TRC_SCHED_DOM_ADD             0x00010000
   13.12 @@ -68,23 +68,25 @@ static void t_timer_fn(unsigned long unu
   13.13  static void dom_timer_fn(unsigned long data);
   13.14  static void fallback_timer_fn(unsigned long unused);
   13.15  
   13.16 -/* This is global for now so that private implementations can reach it. */
   13.17 +/* This is global for now so that private implementations can reach it */
   13.18  schedule_data_t schedule_data[NR_CPUS];
   13.19  
   13.20  /*
   13.21 - * XXX It would be nice if the schedulers array could get populated
   13.22 + * TODO: It would be nice if the schedulers array could get populated
   13.23   * automagically without having to hack the code in here.
   13.24   */
   13.25 -extern struct scheduler sched_bvt_def, sched_rrobin_def;
   13.26 +extern struct scheduler sched_bvt_def, sched_rrobin_def, sched_atropos_def;
   13.27  static struct scheduler *schedulers[] = { &sched_bvt_def,
   13.28                                            &sched_rrobin_def,
   13.29 +                                          &sched_atropos_def,
   13.30                                            NULL};
   13.31  
   13.32  /* Operations for the current scheduler. */
   13.33  static struct scheduler ops;
   13.34  
   13.35 -#define SCHED_FN(fn, ...) \
   13.36 -    ((ops.fn != NULL) ? (ops.fn(__VA_ARGS__)) : (typeof(ops.fn(__VA_ARGS__)))0)
   13.37 +#define SCHED_OP(fn, ...)                                 \
   13.38 +         (( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ )      \
   13.39 +          : (typeof(ops.fn(__VA_ARGS__)))0 )
   13.40  
   13.41  spinlock_t schedule_lock[NR_CPUS] __cacheline_aligned;
   13.42  
   13.43 @@ -101,7 +103,7 @@ extern kmem_cache_t *task_struct_cachep;
   13.44  
   13.45  void free_task_struct(struct task_struct *p)
   13.46  {
   13.47 -    SCHED_FN(free_task, p);
   13.48 +    SCHED_OP(free_task, p);
   13.49      kmem_cache_free(task_struct_cachep, p);
   13.50  }
   13.51  
   13.52 @@ -114,15 +116,15 @@ struct task_struct *alloc_task_struct(vo
   13.53  
   13.54      if ( (p = kmem_cache_alloc(task_struct_cachep,GFP_KERNEL)) == NULL )
   13.55          return NULL;
   13.56 -
   13.57 -    memset(p, 0, sizeof(*p));    
   13.58 +    
   13.59 +    memset(p, 0, sizeof(*p));
   13.60  
   13.61 -    if ( SCHED_FN(alloc_task, p) < 0)
   13.62 +    if ( SCHED_OP(alloc_task, p) < 0 )
   13.63      {
   13.64 -        kmem_cache_free(task_struct_cachep, p);
   13.65 +        kmem_cache_free(task_struct_cachep,p);
   13.66          return NULL;
   13.67      }
   13.68 -    
   13.69 +
   13.70      return p;
   13.71  }
   13.72  
   13.73 @@ -146,7 +148,7 @@ void sched_add_domain(struct task_struct
   13.74          schedule_data[p->processor].idle = p;
   13.75      }
   13.76  
   13.77 -    SCHED_FN(add_task, p);
   13.78 +    SCHED_OP(add_task, p);
   13.79  
   13.80      TRACE_3D(TRC_SCHED_DOM_ADD, _HIGH32(p->domain), _LOW32(p->domain), p);
   13.81  }
   13.82 @@ -160,7 +162,7 @@ int sched_rem_domain(struct task_struct 
   13.83  
   13.84      rem_ac_timer(&p->timer);
   13.85  
   13.86 -    SCHED_FN(rem_task, p);
   13.87 +    SCHED_OP(rem_task, p);
   13.88  
   13.89      TRACE_3D(TRC_SCHED_DOM_REM, _HIGH32(p->domain), _LOW32(p->domain), p);
   13.90  
   13.91 @@ -173,9 +175,9 @@ void init_idle_task(void)
   13.92      unsigned long flags;
   13.93      struct task_struct *p = current;
   13.94  
   13.95 -    if ( SCHED_FN(alloc_task, p) < 0 )
   13.96 -        panic("Failed to allocate scheduler private data for idle task");
   13.97 -    SCHED_FN(add_task, p);
   13.98 +    if ( SCHED_OP(alloc_task, p) < 0)
   13.99 +		panic("Failed to allocate scheduler private data for idle task");
  13.100 +    SCHED_OP(add_task, p);
  13.101  
  13.102      spin_lock_irqsave(&schedule_lock[p->processor], flags);
  13.103      p->has_cpu = 1;
  13.104 @@ -191,12 +193,12 @@ void __wake_up(struct task_struct *p)
  13.105  
  13.106      ASSERT(p->state != TASK_DYING);
  13.107  
  13.108 -    if ( unlikely(__task_on_runqueue(p)) )
  13.109 +    if ( unlikely(__task_on_runqueue(p)) )        
  13.110          return;
  13.111  
  13.112      p->state = TASK_RUNNING;
  13.113  
  13.114 -    SCHED_FN(wake_up, p);
  13.115 +    SCHED_OP(wake_up, p);
  13.116  
  13.117  #ifdef WAKEUP_HISTO
  13.118      p->wokenup = NOW();
  13.119 @@ -300,15 +302,12 @@ long do_set_timer_op(unsigned long timeo
  13.120      return 0;
  13.121  }
  13.122  
  13.123 +/** sched_id - fetch ID of current scheduler */
  13.124 +int sched_id()
  13.125 +{
  13.126 +    return ops.sched_id;
  13.127 +}
  13.128  
  13.129 -/**
  13.130 - * sched_ctl - dispatch a scheduler control operation
  13.131 - * @cmd:       the command passed in the dom0 op
  13.132 - *
  13.133 - * Given a generic scheduler control operation, call the control function for
  13.134 - * the scheduler in use, passing the appropriate control information from the
  13.135 - * union supplied.
  13.136 - */
  13.137  long sched_ctl(struct sched_ctl_cmd *cmd)
  13.138  {
  13.139      TRACE_0D(TRC_SCHED_CTL);
  13.140 @@ -316,7 +315,7 @@ long sched_ctl(struct sched_ctl_cmd *cmd
  13.141      if ( cmd->sched_id != ops.sched_id )
  13.142          return -EINVAL;
  13.143  
  13.144 -    return SCHED_FN(control, cmd);
  13.145 +    return SCHED_OP(control, cmd);
  13.146  }
  13.147  
  13.148  
  13.149 @@ -328,6 +327,9 @@ long sched_adjdom(struct sched_adjdom_cm
  13.150      if ( cmd->sched_id != ops.sched_id )
  13.151          return -EINVAL;
  13.152  
  13.153 +    if ( cmd->direction != SCHED_INFO_PUT && cmd->direction != SCHED_INFO_GET )
  13.154 +        return -EINVAL;
  13.155 +
  13.156      p = find_domain_by_id(cmd->domain);
  13.157  
  13.158      if( p == NULL )
  13.159 @@ -335,7 +337,7 @@ long sched_adjdom(struct sched_adjdom_cm
  13.160  
  13.161      TRACE_2D(TRC_SCHED_ADJDOM, _HIGH32(p->domain), _LOW32(p->domain));
  13.162  
  13.163 -    SCHED_FN(adjdom, p, cmd);
  13.164 +    SCHED_OP(adjdom, p, cmd);
  13.165  
  13.166      put_task_struct(p); 
  13.167      return 0;
  13.168 @@ -351,7 +353,7 @@ long sched_adjdom(struct sched_adjdom_cm
  13.169   */
  13.170  unsigned long __reschedule(struct task_struct *p)
  13.171  {
  13.172 -    int cpu = p->processor;
  13.173 +       int cpu = p->processor;
  13.174      struct task_struct *curr;
  13.175      s_time_t now, min_time;
  13.176  
  13.177 @@ -376,7 +378,7 @@ unsigned long __reschedule(struct task_s
  13.178      if ( schedule_data[cpu].s_timer.expires > min_time + TIME_SLOP )
  13.179          mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
  13.180  
  13.181 -    return SCHED_FN(reschedule, p);
  13.182 +    return SCHED_OP(reschedule, p);
  13.183  }
  13.184  
  13.185  void reschedule(struct task_struct *p)
  13.186 @@ -385,6 +387,7 @@ void reschedule(struct task_struct *p)
  13.187  
  13.188      spin_lock_irqsave(&schedule_lock[p->processor], flags);
  13.189      cpu_mask = __reschedule(p);
  13.190 +
  13.191      spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
  13.192  
  13.193  #ifdef CONFIG_SMP
  13.194 @@ -420,7 +423,6 @@ asmlinkage void __enter_scheduler(void)
  13.195      ASSERT(!in_interrupt());
  13.196      ASSERT(__task_on_runqueue(prev));
  13.197      ASSERT(prev->state != TASK_UNINTERRUPTIBLE);
  13.198 -    ASSERT(prev != NULL);
  13.199  
  13.200      if ( prev->state == TASK_INTERRUPTIBLE )
  13.201      {
  13.202 @@ -428,19 +430,16 @@ asmlinkage void __enter_scheduler(void)
  13.203          if ( signal_pending(prev) )
  13.204              prev->state = TASK_RUNNING;
  13.205          else
  13.206 -            SCHED_FN(do_block, prev);
  13.207 +            SCHED_OP(do_block, prev);
  13.208      }
  13.209  
  13.210 +    prev->cpu_time += now - prev->lastschd;
  13.211 +
  13.212      /* get policy-specific decision on scheduling... */
  13.213      next_slice = ops.do_schedule(now);
  13.214  
  13.215      r_time = next_slice.time;
  13.216 -    next   = next_slice.task;
  13.217 -
  13.218 -    if ( likely(!is_idle_task(prev)) ) 
  13.219 -        prev->cpu_time += (now - prev->lastschd);
  13.220 -
  13.221 -    /* now, switch to the new task... */
  13.222 +    next = next_slice.task;
  13.223  
  13.224      prev->has_cpu = 0;
  13.225      next->has_cpu = 1;
  13.226 @@ -484,8 +483,6 @@ asmlinkage void __enter_scheduler(void)
  13.227  
  13.228      TRACE_2D(TRC_SCHED_SWITCH, next->domain, next);
  13.229  
  13.230 -    ASSERT(next->processor == current->processor);
  13.231 -
  13.232      switch_to(prev, next);
  13.233      
  13.234      if ( unlikely(prev->state == TASK_DYING) ) 
  13.235 @@ -520,7 +517,6 @@ int idle_cpu(int cpu)
  13.236  static void s_timer_fn(unsigned long unused)
  13.237  {
  13.238      TRACE_0D(TRC_SCHED_S_TIMER_FN);
  13.239 -    
  13.240      set_bit(_HYP_EVENT_NEED_RESCHED, &current->hyp_events);
  13.241      perfc_incrc(sched_irq);
  13.242  }
  13.243 @@ -532,6 +528,8 @@ static void t_timer_fn(unsigned long unu
  13.244  
  13.245      TRACE_0D(TRC_SCHED_T_TIMER_FN);
  13.246  
  13.247 +    TRACE_0D(TRC_SCHED_T_TIMER_FN);
  13.248 +
  13.249      if ( !is_idle_task(p) )
  13.250          send_guest_virq(p, VIRQ_TIMER);
  13.251  
  13.252 @@ -611,10 +609,8 @@ void __init scheduler_init(void)
  13.253      if ( ops.do_schedule == NULL)
  13.254          panic("Chosen scheduler has NULL do_schedule!");
  13.255  
  13.256 -    if ( SCHED_FN(init_scheduler) < 0 )
  13.257 +    if ( SCHED_OP(init_scheduler) < 0 )
  13.258          panic("Initialising scheduler failed!");
  13.259 -
  13.260 -    SCHED_FN(add_task, &idle0_task);
  13.261  }
  13.262  
  13.263  /*
  13.264 @@ -654,7 +650,7 @@ static void dump_rqueue(struct list_head
  13.265      list_for_each (list, queue) {
  13.266          p = list_entry(list, struct task_struct, run_list);
  13.267          printk("%3d: %llu has=%c ", loop++, p->domain, p->has_cpu ? 'T':'F');
  13.268 -        SCHED_FN(dump_runq_el, p);
  13.269 +        SCHED_OP(dump_runq_el, p);
  13.270          printk("c=0x%X%08X\n", (u32)(p->cpu_time>>32), (u32)p->cpu_time);
  13.271          printk("         l: %lx n: %lx  p: %lx\n",
  13.272                 (unsigned long)list, (unsigned long)list->next,
  13.273 @@ -670,18 +666,48 @@ void dump_runq(u_char key, void *dev_id,
  13.274      int i;
  13.275  
  13.276      printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name);
  13.277 -    SCHED_FN(dump_settings);
  13.278 +    SCHED_OP(dump_settings);
  13.279      printk("NOW=0x%08X%08X\n",  (u32)(now>>32), (u32)now); 
  13.280      for (i = 0; i < smp_num_cpus; i++) {
  13.281          spin_lock_irqsave(&schedule_lock[i], flags);
  13.282          printk("CPU[%02d] ", i);
  13.283 -        SCHED_FN(dump_cpu_state,i);
  13.284 +        SCHED_OP(dump_cpu_state,i);
  13.285          dump_rqueue(&schedule_data[i].runqueue, "rq"); 
  13.286          spin_unlock_irqrestore(&schedule_lock[i], flags);
  13.287      }
  13.288      return; 
  13.289  }
  13.290  
  13.291 +/* print human-readable "state", given the numeric code for that state */
  13.292 +void sched_prn_state(int state)
  13.293 +{
  13.294 +    int ret = 0;
  13.295 +    
  13.296 +    switch(state)
  13.297 +    {
  13.298 +    case TASK_RUNNING:
  13.299 +        printk("Running");
  13.300 +        break;
  13.301 +    case TASK_INTERRUPTIBLE:
  13.302 +        printk("Int sleep");
  13.303 +        break;
  13.304 +    case TASK_UNINTERRUPTIBLE:
  13.305 +        printk("UInt sleep");
  13.306 +        break;
  13.307 +    case TASK_STOPPED:
  13.308 +        printk("Stopped");
  13.309 +        break;
  13.310 +    case TASK_DYING:
  13.311 +        printk("Dying");
  13.312 +        break;
  13.313 +    default:
  13.314 +        ret = SCHED_OP(prn_state, state);
  13.315 +    }
  13.316 +
  13.317 +    if ( ret != 0 )
  13.318 +        printk("Unknown");
  13.319 +}
  13.320 +
  13.321  #if defined(WAKEUP_HISTO) || defined(BLOCKTIME_HISTO)
  13.322  void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
  13.323  {
    14.1 --- a/xen/include/hypervisor-ifs/dom0_ops.h	Fri Mar 26 17:08:45 2004 +0000
    14.2 +++ b/xen/include/hypervisor-ifs/dom0_ops.h	Fri Mar 26 18:32:29 2004 +0000
    14.3 @@ -18,7 +18,7 @@
    14.4   * This makes sure that old versions of dom0 tools will stop working in a
    14.5   * well-defined way (rather than crashing the machine, for instance).
    14.6   */
    14.7 -#define DOM0_INTERFACE_VERSION   0xAAAA000A
    14.8 +#define DOM0_INTERFACE_VERSION   0xAAAA000B
    14.9  
   14.10  #define MAX_CMD_LEN       256
   14.11  #define MAX_DOMAIN_NAME    16
   14.12 @@ -96,7 +96,6 @@ typedef struct dom0_getdomaininfo_st
   14.13  #define DOMSTATE_STOPPED             1
   14.14      int state;
   14.15      int hyp_events;
   14.16 -    unsigned long mcu_advance;
   14.17      unsigned int tot_pages;
   14.18      long long cpu_time;
   14.19      unsigned long shared_info_frame;  /* MFN of shared_info struct */
   14.20 @@ -214,6 +213,16 @@ typedef struct dom0_pcidev_access_st
   14.21      int          enable;
   14.22  } dom0_pcidev_access_t;
   14.23  
   14.24 +/*
   14.25 + * Get the ID of the current scheduler.
   14.26 + */
   14.27 +#define DOM0_SCHED_ID        24
   14.28 +typedef struct dom0_sched_id_st
   14.29 +{
   14.30 +    /* OUT variable */
   14.31 +    int sched_id;
   14.32 +} dom0_sched_id_t;
   14.33 +
   14.34  typedef struct dom0_op_st
   14.35  {
   14.36      unsigned long cmd;
   14.37 @@ -239,6 +248,7 @@ typedef struct dom0_op_st
   14.38          dom0_gettbufs_t         gettbufs;
   14.39          dom0_physinfo_t         physinfo;
   14.40          dom0_pcidev_access_t    pcidev_access;
   14.41 +        dom0_sched_id_t         sched_id;
   14.42      } u;
   14.43  } dom0_op_t;
   14.44  
    15.1 --- a/xen/include/hypervisor-ifs/sched_ctl.h	Fri Mar 26 17:08:45 2004 +0000
    15.2 +++ b/xen/include/hypervisor-ifs/sched_ctl.h	Fri Mar 26 18:32:29 2004 +0000
    15.3 @@ -7,18 +7,24 @@
    15.4  #ifndef __SCHED_CTL_H__
    15.5  #define __SCHED_CTL_H__
    15.6  
    15.7 -/* Scheduler types. */
    15.8 +/* Scheduler types */
    15.9  #define SCHED_BVT      0
   15.10  #define SCHED_ATROPOS  1
   15.11  #define SCHED_RROBIN   2
   15.12  
   15.13 +/* these describe the intended direction used for a scheduler control or domain
   15.14 + * command */
   15.15 +#define SCHED_INFO_PUT 0
   15.16 +#define SCHED_INFO_GET 1
   15.17 +
   15.18  /*
   15.19 - * Generic scheduler control command: union of all scheduler control command
   15.20 - * structures.
   15.21 + * Generic scheduler control command - used to adjust system-wide scheduler
   15.22 + * parameters
   15.23   */
   15.24  struct sched_ctl_cmd
   15.25  {
   15.26      unsigned int sched_id;
   15.27 +    int direction;          /* are we getting or putting settings? */
   15.28      
   15.29      union
   15.30      {
   15.31 @@ -40,6 +46,7 @@ struct sched_adjdom_cmd
   15.32  {
   15.33      unsigned int sched_id;
   15.34      domid_t domain;
   15.35 +    int direction;          /* are we getting or putting settings? */
   15.36      
   15.37      union
   15.38      {
   15.39 @@ -53,6 +60,9 @@ struct sched_adjdom_cmd
   15.40  
   15.41          struct atropos_adjdom
   15.42          {
   15.43 +            u64 period;
   15.44 +            u64 slice;
   15.45 +            u64 latency;
   15.46              int xtratime;
   15.47          } atropos;
   15.48      } u;
    16.1 --- a/xen/include/xen/sched-if.h	Fri Mar 26 17:08:45 2004 +0000
    16.2 +++ b/xen/include/xen/sched-if.h	Fri Mar 26 18:32:29 2004 +0000
    16.3 @@ -40,8 +40,7 @@ struct scheduler
    16.4      void         (*free_task)      (struct task_struct *);
    16.5      void         (*rem_task)       (struct task_struct *);
    16.6      void         (*wake_up)        (struct task_struct *);
    16.7 -    /* XXX why does do_block need to return anything at all? */
    16.8 -    long         (*do_block)       (struct task_struct *);
    16.9 +    void         (*do_block)       (struct task_struct *);
   16.10      task_slice_t (*do_schedule)    (s_time_t);
   16.11      int          (*control)        (struct sched_ctl_cmd *);
   16.12      int          (*adjdom)         (struct task_struct *,
   16.13 @@ -50,6 +49,7 @@ struct scheduler
   16.14      void         (*dump_settings)  (void);
   16.15      void         (*dump_cpu_state) (int);
   16.16      void         (*dump_runq_el)   (struct task_struct *);
   16.17 +    int          (*prn_state)      (int);
   16.18  };
   16.19  
   16.20  /* per CPU scheduler information */
    17.1 --- a/xen/include/xen/sched.h	Fri Mar 26 17:08:45 2004 +0000
    17.2 +++ b/xen/include/xen/sched.h	Fri Mar 26 18:32:29 2004 +0000
    17.3 @@ -4,6 +4,9 @@
    17.4  #include <xen/config.h>
    17.5  #include <xen/types.h>
    17.6  #include <xen/spinlock.h>
    17.7 +#include <xen/config.h>
    17.8 +#include <xen/types.h>
    17.9 +#include <xen/spinlock.h>
   17.10  #include <asm/ptrace.h>
   17.11  #include <xen/smp.h>
   17.12  #include <asm/page.h>
   17.13 @@ -266,6 +269,7 @@ void sched_add_domain(struct task_struct
   17.14  int  sched_rem_domain(struct task_struct *p);
   17.15  long sched_ctl(struct sched_ctl_cmd *);
   17.16  long sched_adjdom(struct sched_adjdom_cmd *);
   17.17 +int  sched_id();
   17.18  void init_idle_task(void);
   17.19  void __wake_up(struct task_struct *p);
   17.20  void wake_up(struct task_struct *p);
   17.21 @@ -302,6 +306,7 @@ void startup_cpu_idle_loop(void);
   17.22  void continue_cpu_idle_loop(void);
   17.23  
   17.24  void continue_nonidle_task(void);
   17.25 +void sched_prn_state(int state);
   17.26  
   17.27  /* This task_hash and task_list are protected by the tasklist_lock. */
   17.28  #define TASK_HASH_SIZE 256