ia64/xen-unstable

changeset 3449:52e7357311c2

bitkeeper revision 1.1159.221.1 (41ebbfeezjfpGp20MD6f6Xl2r6Wd3Q)

Added new simple EDF scheduler
author sd386@font.cl.cam.ac.uk
date Mon Jan 17 13:38:54 2005 +0000 (2005-01-17)
parents d01d0f3f5ecc
children f8e1f285e41f
files .rootkeys tools/libxc/Makefile tools/libxc/xc.h tools/libxc/xc_sedf.c tools/python/xen/lowlevel/xc/xc.c tools/python/xen/xend/XendClient.py tools/python/xen/xend/XendDomain.py tools/python/xen/xend/server/SrvDomain.py tools/python/xen/xm/main.py xen/common/sched_sedf.c xen/common/schedule.c xen/include/public/sched_ctl.h
line diff
     1.1 --- a/.rootkeys	Tue Nov 30 15:03:06 2004 +0000
     1.2 +++ b/.rootkeys	Mon Jan 17 13:38:54 2005 +0000
     1.3 @@ -336,6 +336,7 @@ 4051bce6CHAsYh8P5t2OHDtRWOP9og tools/lib
     1.4  3fbba6dctWRWlFJkYb6hdix2X4WMuw tools/libxc/xc_private.c
     1.5  3fbba6dcbVrG2hPzEzwdeV_UC8kydQ tools/libxc/xc_private.h
     1.6  40589968UQFnJeOMn8UIFLbXBuwXjw tools/libxc/xc_rrobin.c
     1.7 +41ebbfe9U0b0kI-HgjK7VEY4EvW7_w tools/libxc/xc_sedf.c
     1.8  40e1b09dMYB4ItGCqcMIzirdMd9I-w tools/libxutil/Makefile
     1.9  40e033325Sjqs-_4TuzeUEprP_gYFg tools/libxutil/allocate.c
    1.10  40e03332KYz7o1bn2MG_KPbBlyoIMA tools/libxutil/allocate.h
    1.11 @@ -715,6 +716,7 @@ 3ddb79bdHqdQpATqC0rmUZNbsb6L6A xen/commo
    1.12  4064773cJ31vZt-zhbSoxqft1Jaw0w xen/common/sched_atropos.c
    1.13  40589968dD2D1aejwSOvrROg7fOvGQ xen/common/sched_bvt.c
    1.14  40589968be_t_n0-w6ggceW7h-sx0w xen/common/sched_rrobin.c
    1.15 +41ebbfe9oF1BF3cH5v7yE3eOL9uPbA xen/common/sched_sedf.c
    1.16  3e397e6619PgAfBbw2XFbXkewvUWgw xen/common/schedule.c
    1.17  3ddb79bdB9RNMnkQnUyZ5C9hhMSQQw xen/common/slab.c
    1.18  3ddb79bd0gVQYmL2zvuJnldvD0AGxQ xen/common/softirq.c
     2.1 --- a/tools/libxc/Makefile	Tue Nov 30 15:03:06 2004 +0000
     2.2 +++ b/tools/libxc/Makefile	Mon Jan 17 13:38:54 2005 +0000
     2.3 @@ -12,6 +12,7 @@ vpath %c       $(XEN_LIBXUTIL)
     2.4  INCLUDES += -I $(XEN_LIBXUTIL)
     2.5  
     2.6  SRCS     :=
     2.7 +SRCS     += xc_sedf.c
     2.8  SRCS     += xc_atropos.c
     2.9  SRCS     += xc_bvtsched.c
    2.10  SRCS     += xc_domain.c
     3.1 --- a/tools/libxc/xc.h	Tue Nov 30 15:03:06 2004 +0000
     3.2 +++ b/tools/libxc/xc.h	Mon Jan 17 13:38:54 2005 +0000
     3.3 @@ -132,6 +132,14 @@ int xc_rrobin_global_set(int xc_handle, 
     3.4  
     3.5  int xc_rrobin_global_get(int xc_handle, u64 *slice);
     3.6  
     3.7 +int xc_sedf_domain_set(int xc_handle,
     3.8 +                          u32 domid,
     3.9 +                          u64 period, u64 slice);
    3.10 +
    3.11 +int xc_sedf_domain_get(int xc_handle,
    3.12 +                          u32 domid,
    3.13 +                          u64* period, u64 *slice);
    3.14 +
    3.15  typedef evtchn_status_t xc_evtchn_status_t;
    3.16  int xc_evtchn_alloc_unbound(int xc_handle,
    3.17                              u32 dom,
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/tools/libxc/xc_sedf.c	Mon Jan 17 13:38:54 2005 +0000
     4.3 @@ -0,0 +1,45 @@
     4.4 +/******************************************************************************
     4.5 + * xc_sedf.c
     4.6 + * 
     4.7 + * API for manipulating parameters of the Simple EDF scheduler.
     4.8 + * 
     4.9 + * changes by Stephan Diestelhorst
    4.10 + * based on code
    4.11 + * by Mark Williamson, Copyright (c) 2004 Intel Research Cambridge.
    4.12 + */
    4.13 +
    4.14 +#include "xc_private.h"
    4.15 +
    4.16 +int xc_sedf_domain_set(int xc_handle,
    4.17 +                          u32 domid, u64 period, u64 slice)
    4.18 +{
    4.19 +    dom0_op_t op;
    4.20 +    struct sedf_adjdom *p = &op.u.adjustdom.u.sedf;
    4.21 +
    4.22 +    op.cmd = DOM0_ADJUSTDOM;
    4.23 +    op.u.adjustdom.domain  = (domid_t)domid;
    4.24 +    op.u.adjustdom.sched_id = SCHED_SEDF;
    4.25 +    op.u.adjustdom.direction = SCHED_INFO_PUT;
    4.26 +
    4.27 +    p->period   = period;
    4.28 +    p->slice    = slice;
    4.29 +    return do_dom0_op(xc_handle, &op);
    4.30 +}
    4.31 +
    4.32 +int xc_sedf_domain_get(int xc_handle, u32 domid, u64 *period, u64 *slice)
    4.33 +{
    4.34 +    dom0_op_t op;
    4.35 +    int ret;
    4.36 +    struct sedf_adjdom *p = &op.u.adjustdom.u.sedf;
    4.37 +
    4.38 +    op.cmd = DOM0_ADJUSTDOM;    
    4.39 +    op.u.adjustdom.domain = (domid_t)domid;
    4.40 +    op.u.adjustdom.sched_id = SCHED_SEDF;
    4.41 +    op.u.adjustdom.direction = SCHED_INFO_GET;
    4.42 +
    4.43 +    ret = do_dom0_op(xc_handle, &op);
    4.44 +
    4.45 +    *period   = p->period;
    4.46 +    *slice    = p->slice;
    4.47 +    return ret;
    4.48 +}
     5.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Tue Nov 30 15:03:06 2004 +0000
     5.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Mon Jan 17 13:38:54 2005 +0000
     5.3 @@ -758,6 +758,50 @@ static PyObject *pyxc_rrobin_global_set(
     5.4      return zero;
     5.5  }
     5.6  
     5.7 +static PyObject *pyxc_sedf_domain_set(PyObject *self,
     5.8 +                                         PyObject *args,
     5.9 +                                         PyObject *kwds)
    5.10 +{
    5.11 +    XcObject *xc = (XcObject *)self;
    5.12 +    u32 domid;
    5.13 +    u64 period, slice;
    5.14 +    
    5.15 +    static char *kwd_list[] = { "dom", "period", "slice", NULL };
    5.16 +    
    5.17 +    if( !PyArg_ParseTupleAndKeywords(args, kwds, "iLL", kwd_list, &domid,
    5.18 +                                     &period, &slice) )
    5.19 +        return NULL;
    5.20 +   
    5.21 +    if ( xc_sedf_domain_set(xc->xc_handle, domid, period, slice) != 0 )
    5.22 +        return PyErr_SetFromErrno(xc_error);
    5.23 +
    5.24 +    Py_INCREF(zero);
    5.25 +    return zero;
    5.26 +}
    5.27 +
    5.28 +static PyObject *pyxc_sedf_domain_get(PyObject *self,
    5.29 +                                         PyObject *args,
    5.30 +                                         PyObject *kwds)
    5.31 +{
    5.32 +    XcObject *xc = (XcObject *)self;
    5.33 +    u32 domid;
    5.34 +    u64 period, slice;
    5.35 +        
    5.36 +    static char *kwd_list[] = { "dom", NULL };
    5.37 +
    5.38 +    if( !PyArg_ParseTupleAndKeywords(args, kwds, "i", kwd_list, &domid) )
    5.39 +        return NULL;
    5.40 +    
    5.41 +    if ( xc_sedf_domain_get( xc->xc_handle, domid, &period,
    5.42 +                                &slice) )
    5.43 +        return PyErr_SetFromErrno(xc_error);
    5.44 +
    5.45 +    return Py_BuildValue("{s:i,s:L,s:L}",
    5.46 +                         "domain",  domid,
    5.47 +                         "period",  period,
    5.48 +                         "slice",   slice);
    5.49 +}
    5.50 +
    5.51  static PyObject *pyxc_shadow_control(PyObject *self,
    5.52                                       PyObject *args,
    5.53                                       PyObject *kwds)
    5.54 @@ -984,6 +1028,26 @@ static PyMethodDef pyxc_methods[] = {
    5.55        "Get Round Robin scheduler settings\n"
    5.56        "Returns [dict]:\n"
    5.57        " slice  [long]: Scheduler time slice.\n" },    
    5.58 +    
    5.59 +    { "sedf_domain_set",
    5.60 +      (PyCFunction)pyxc_sedf_domain_set,
    5.61 +      METH_KEYWORDS, "\n"
    5.62 +      "Set the scheduling parameters for a domain when running with Atropos.\n"
    5.63 +      " dom      [int]:  domain to set\n"
    5.64 +      " period   [long]: domain's scheduling period\n"
    5.65 +      " slice    [long]: domain's slice per period\n"
    5.66 +      "Returns: [int] 0 on success; -1 on error.\n" },
    5.67 +
    5.68 +    { "sedf_domain_get",
    5.69 +      (PyCFunction)pyxc_sedf_domain_get,
    5.70 +      METH_KEYWORDS, "\n"
    5.71 +      "Get the current scheduling parameters for a domain when running with\n"
    5.72 +      "the Atropos scheduler."
    5.73 +      " dom      [int]: domain to query\n"
    5.74 +      "Returns:  [dict]\n"
    5.75 +      " domain   [int]: domain ID\n"
    5.76 +      " period   [long]: scheduler period\n"
    5.77 +      " slice    [long]: CPU reservation per period\n"},
    5.78  
    5.79      { "evtchn_alloc_unbound", 
    5.80        (PyCFunction)pyxc_evtchn_alloc_unbound,
     6.1 --- a/tools/python/xen/xend/XendClient.py	Tue Nov 30 15:03:06 2004 +0000
     6.2 +++ b/tools/python/xen/xend/XendClient.py	Mon Jan 17 13:38:54 2005 +0000
     6.3 @@ -272,6 +272,12 @@ class Xend:
     6.4                                'latency' : latency,
     6.5                                'xtratime': xtratime })
     6.6  
     6.7 +    def xend_domain_cpu_sedf_set(self, id, period, slice):
     6.8 +        return self.xendPost(self.domainurl(id),
     6.9 +                             {'op'      : 'cpu_sedf_set',
    6.10 +                              'period'  : period,
    6.11 +                              'slice'   : slice })
    6.12 +
    6.13      def xend_domain_maxmem_set(self, id, memory):
    6.14          return self.xendPost(self.domainurl(id),
    6.15                               { 'op'     : 'maxmem_set',
     7.1 --- a/tools/python/xen/xend/XendDomain.py	Tue Nov 30 15:03:06 2004 +0000
     7.2 +++ b/tools/python/xen/xend/XendDomain.py	Mon Jan 17 13:38:54 2005 +0000
     7.3 @@ -659,6 +659,24 @@ class XendDomain:
     7.4              return xc.atropos_domain_get(dominfo.dom)
     7.5          except Exception, ex:
     7.6              raise XendError(str(ex))
     7.7 +    
     7.8 +    def domain_cpu_sedf_set(self, id, period, slice):
     7.9 +        """Set Atropos scheduler parameters for a domain.
    7.10 +        """
    7.11 +        dominfo = self.domain_lookup(id)
    7.12 +        try:
    7.13 +            return xc.sedf_domain_set(dominfo.dom, period, slice)
    7.14 +        except Exception, ex:
    7.15 +            raise XendError(str(ex))
    7.16 +
    7.17 +    def domain_cpu_sedf_get(self, id):
    7.18 +        """Get Atropos scheduler parameters for a domain.
    7.19 +        """
    7.20 +        dominfo = self.domain_lookup(id)
    7.21 +        try:
    7.22 +            return xc.sedf_domain_get(dominfo.dom)
    7.23 +        except Exception, ex:
    7.24 +            raise XendError(str(ex))
    7.25  
    7.26      def domain_device_create(self, id, devconfig):
    7.27          """Create a new device for a domain.
     8.1 --- a/tools/python/xen/xend/server/SrvDomain.py	Tue Nov 30 15:03:06 2004 +0000
     8.2 +++ b/tools/python/xen/xend/server/SrvDomain.py	Mon Jan 17 13:38:54 2005 +0000
     8.3 @@ -132,6 +132,14 @@ class SrvDomain(SrvDir):
     8.4                       ['xtratime', 'int']])
     8.5          val = fn(req.args, {'dom': self.dom.id})
     8.6          return val
     8.7 +    
     8.8 +    def op_cpu_sedf_set(self, op, req):
     8.9 +        fn = FormFn(self.xd.domain_cpu_sedf_set,
    8.10 +                    [['dom', 'str'],
    8.11 +                     ['period', 'int'],
    8.12 +                     ['slice', 'int']])
    8.13 +        val = fn(req.args, {'dom': self.dom.id})
    8.14 +        return val
    8.15  
    8.16      def op_maxmem_set(self, op, req):
    8.17          fn = FormFn(self.xd.domain_maxmem_set,
     9.1 --- a/tools/python/xen/xm/main.py	Tue Nov 30 15:03:06 2004 +0000
     9.2 +++ b/tools/python/xen/xm/main.py	Mon Jan 17 13:38:54 2005 +0000
     9.3 @@ -587,6 +587,23 @@ class ProgRrobin(Prog):
     9.4  
     9.5  xm.prog(ProgRrobin)
     9.6  
     9.7 +class ProgSedf(Prog):
     9.8 +    group = 'scheduler'
     9.9 +    name= "sedf"
    9.10 +    info = """Set simple EDF parameters."""
    9.11 +
    9.12 +    def help(self, args):
    9.13 +        print args[0], "DOM PERIOD SLICE"
    9.14 +        print "\nSet simple EDF parameters."
    9.15 +
    9.16 +    def main(self, args):
    9.17 +        if len(args) != 4: self.err("%s: Invalid argument(s)" % args[0])
    9.18 +        dom = args[1]
    9.19 +        v = map(int, args[2:4])
    9.20 +        server.xend_domain_cpu_sedf_set(dom, *v)
    9.21 +
    9.22 +xm.prog(ProgSedf)
    9.23 +
    9.24  class ProgInfo(Prog):
    9.25      group = 'host'
    9.26      name = "info"
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/common/sched_sedf.c	Mon Jan 17 13:38:54 2005 +0000
    10.3 @@ -0,0 +1,505 @@
    10.4 +/****************************************************************************
    10.5 + * Simple EDF scheduler for xen
    10.6 + *
    10.7 + * by Stephan Diestelhorst (C)  2004 Cambridge University
    10.8 + * based on code by Mark Williamson (C) 2004 Intel Research Cambridge
    10.9 + */
   10.10 +
   10.11 +#include <xen/sched.h>
   10.12 +#include <xen/sched-if.h>
   10.13 +#include <public/sched_ctl.h>
   10.14 +#include <xen/ac_timer.h>
   10.15 +#include <xen/softirq.h>
   10.16 +#include <xen/time.h>
   10.17 +#include <xen/slab.h>
   10.18 +
   10.19 +#define SEDFLEVEL 0
   10.20 +#define PRINT(_f, _a...)  \
   10.21 +if ((_f)<=SEDFLEVEL) printk(_a );
   10.22 +
   10.23 +/*
   10.24 +	TODO:
   10.25 +	TESTING!
   10.26 +	implement stylish features!
   10.27 +	tracing instead of PRINTs
   10.28 +*/
   10.29 +
   10.30 +
   10.31 +#define TRC_SEDF 0xBEEF0000
   10.32 +
   10.33 +struct sedf_dom_info
   10.34 +{
   10.35 +	struct domain		*owner;
   10.36 +	struct list_head	list;
   10.37 +	
   10.38 +	//Parameters for EDF
   10.39 +	s_time_t		period;		//=(relative deadline)
   10.40 +	s_time_t		slice;		//=worst case execution time
   10.41 +	int			extratime;
   10.42 +	//Bookkeeping
   10.43 +	s_time_t		absdead;
   10.44 +	s_time_t		sched_start;
   10.45 +	s_time_t		cputime;
   10.46 +	s_time_t		absblock;
   10.47 +	//Statistics
   10.48 +	s_time_t		block_time_tot;
   10.49 +	s_time_t		penalty_time_tot;
   10.50 +	
   10.51 +};
   10.52 +
   10.53 +struct sedf_cpu_info {
   10.54 +	struct list_head runnableq;
   10.55 +	struct list_head waitq;
   10.56 +};
   10.57 +
   10.58 +#define DOM_INFO(d)	((struct sedf_dom_info *)((d)->sched_priv))
   10.59 +#define CPU_INFO(cpu)	((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
   10.60 +#define LIST(d)		(&DOM_INFO(d)->list)
   10.61 +#define RUNQ(cpu)   	(&CPU_INFO(cpu)->runnableq)
   10.62 +#define WAITQ(cpu)   	(&CPU_INFO(cpu)->waitq)
   10.63 +#define IDLETASK(cpu)	((struct domain *)schedule_data[cpu].idle)
   10.64 +
   10.65 +static xmem_cache_t *dom_info_cache;
   10.66 +
   10.67 +/*static inline void __add_to_runqueue_head(struct domain *d)
   10.68 +{
   10.69 +    list_add(RUNLIST(d), RUNQUEUE(d->processor));
   10.70 +}
   10.71 +
   10.72 +static inline void __add_to_runqueue_tail(struct domain *d)
   10.73 +{
   10.74 +    list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
   10.75 +}*/
   10.76 +
   10.77 +static inline void __del_from_queue(struct domain *d)
   10.78 +{
   10.79 +    struct list_head *list = LIST(d);
   10.80 +    list_del(list);
   10.81 +    list->next = NULL;
   10.82 +}
   10.83 +
   10.84 +/* adds a domain to the queue of processes which wait for the beginning of the next period
   10.85 + * this list is therefore sortet by this time, which is simply absol. deadline - period
   10.86 + */ 
   10.87 +static inline void __add_to_waitqueue_sort(struct domain *d) {
   10.88 +	struct list_head     *cur;
   10.89 +	struct sedf_dom_info *curinf;
   10.90 +	
   10.91 +	PRINT(3,"Adding domain %i (bop= %llu) to waitq\n",d->id,DOM_INFO(d)->absdead - DOM_INFO(d)->period);	
   10.92 +	//iterate through all elements to find our "hole"
   10.93 +	list_for_each(cur,WAITQ(d->processor)){
   10.94 +		curinf = list_entry(cur,struct sedf_dom_info,list);
   10.95 +		if ( DOM_INFO(d)->absdead - DOM_INFO(d)->period < curinf->absdead - curinf->period)
   10.96 +	 		break;
   10.97 +		else
   10.98 +			PRINT(4,"\tbehind domain %i (bop= %llu)\n",curinf->owner->id,curinf->absdead - curinf->period);
   10.99 +	}
  10.100 +	//cur now contains the element, before which we'll enqueue
  10.101 +	PRINT(3,"\tlist_add to %x\n",cur->prev);
  10.102 +	list_add(LIST(d),cur->prev);
  10.103 +
  10.104 +}
  10.105 +
  10.106 +/* adds a domain to the queue of processes which have started their current period and are
  10.107 + * runnable (i.e. not blocked, dieing,...). The first element on this list is running on the processor,
  10.108 + * if the list is empty the idle task will run. As we are implementing EDF, this list is sorted by 
  10.109 + * deadlines.
  10.110 + */ 
  10.111 +static inline void __add_to_runqueue_sort(struct domain *d) {
  10.112 +	struct list_head     *cur;
  10.113 +	struct sedf_dom_info *curinf;
  10.114 +
  10.115 +	PRINT(3,"Adding domain %i (deadl= %llu) to runq\n",d->id,DOM_INFO(d)->absdead);	
  10.116 +	//iterate through all elements to find our "hole"
  10.117 +	list_for_each(cur,RUNQ(d->processor)){
  10.118 +		curinf = list_entry(cur,struct sedf_dom_info,list);
  10.119 +		if (DOM_INFO(d)->absdead < curinf->absdead)
  10.120 +	 		break;
  10.121 +		else
  10.122 +			PRINT(4,"\tbehind domain %i (deadl= %llu)\n",curinf->owner->id,curinf->absdead);
  10.123 +	}
  10.124 +	//cur now contains the element, before which we'll enqueue
  10.125 +	PRINT(3,"\tlist_add to %x\n",cur->prev);
  10.126 +	list_add(LIST(d),cur->prev);
  10.127 +
  10.128 +}
  10.129 +static inline int __task_on_queue(struct domain *d) {
  10.130 +	return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d)));
  10.131 +}
  10.132 +
  10.133 +/* Initialises the queues and creates the domain info cache */
  10.134 +static int sedf_init_scheduler() {
  10.135 +	int i;
  10.136 +	PRINT(2,"sedf_init_scheduler was called\n");
  10.137 +	
  10.138 +	for ( i = 0; i < NR_CPUS; i++ ) {
  10.139 +		schedule_data[i].sched_priv = xmalloc(sizeof(struct sedf_cpu_info));
  10.140 +		if ( schedule_data[i].sched_priv == NULL )
  10.141 +			return -1;
  10.142 +		INIT_LIST_HEAD(WAITQ(i));
  10.143 +		INIT_LIST_HEAD(RUNQ(i));
  10.144 +	}
  10.145 +	
  10.146 +	dom_info_cache = xmem_cache_create(
  10.147 +		"SEDF dom info", sizeof(struct sedf_dom_info), 0, 0, 0, NULL);
  10.148 +	if ( dom_info_cache == NULL )
  10.149 +	{
  10.150 +		printk("Could not allocate SLAB cache.\n");
  10.151 +		return -1;
  10.152 +	}
  10.153 +	
  10.154 +	return 0;                                                                
  10.155 +}
  10.156 +
  10.157 +/* Allocates memory for per domain private scheduling data*/
  10.158 +static int sedf_alloc_task(struct domain *d) {
  10.159 +	PRINT(2,"sedf_alloc_task was called, domain-id %i\n",d->id);
  10.160 +	if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
  10.161 +		return -1;
  10.162 +	memset(d->sched_priv, 0, sizeof(struct sedf_dom_info));
  10.163 +	return 0;
  10.164 +}
  10.165 +
  10.166 +/* Setup the sedf_dom_info */
  10.167 +static void sedf_add_task(struct domain *d)
  10.168 +{
  10.169 +	//s_time_t now=NOW();
  10.170 +	struct sedf_dom_info *inf=DOM_INFO(d);
  10.171 +	inf->owner = d;
  10.172 +	
  10.173 +	PRINT(2,"sedf_add_task was called, domain-id %i\n",d->id);
  10.174 +	if (d->id==0) {
  10.175 +		//set dom0 to something useful to boot the machine
  10.176 +		inf->period = MILLISECS(20);
  10.177 +		inf->slice  = MILLISECS(15);
  10.178 +		inf->absdead= 0;
  10.179 +	}
  10.180 +	else {
  10.181 +		//other domains don't get any execution time at all in the beginning!
  10.182 +		inf->period = MILLISECS(20);
  10.183 +		inf->slice  = 0;
  10.184 +		inf->absdead= 0;
  10.185 +	}
  10.186 +	INIT_LIST_HEAD(&(inf->list));
  10.187 +}
  10.188 +
  10.189 +/* Frees memory used by domain info */
  10.190 +static void sedf_free_task(struct domain *d)
  10.191 +{
  10.192 +	PRINT(2,"sedf_free_task was called, domain-id %i\n",d->id);
  10.193 +	ASSERT(d->sched_priv != NULL);
  10.194 +	xmem_cache_free(dom_info_cache, d->sched_priv);
  10.195 +}
  10.196 +
  10.197 +/* Initialises idle task */
  10.198 +static int sedf_init_idle_task(struct domain *d) {
  10.199 +	PRINT(2,"sedf_init_idle_task was called, domain-id %i\n",d->id);
  10.200 +	if ( sedf_alloc_task(d) < 0 )
  10.201 +		return -1;
  10.202 +	
  10.203 +	sedf_add_task(d);
  10.204 +	DOM_INFO(d)->absdead=0;
  10.205 +	set_bit(DF_RUNNING, &d->flags);
  10.206 +	//the idle task doesn't have to turn up on any list...
  10.207 +	return 0;
  10.208 +}
  10.209 +
  10.210 +#define MIN(x,y) (((x)<(y))?(x):(y))
  10.211 +/* Main scheduling function
  10.212 + * Reasons for calling this function are:
  10.213 + * -timeslice for the current period used up
  10.214 + * -domain on waitqueue has started it's period*/
  10.215 +static task_slice_t sedf_do_schedule(s_time_t now)
  10.216 +{
  10.217 +	struct sedf_dom_info *inf   = DOM_INFO(current);
  10.218 +	int                   cpu   = current->processor;
  10.219 +	struct list_head     *runq  = RUNQ(cpu);
  10.220 +	struct list_head     *waitq = WAITQ(cpu);
  10.221 +	struct list_head     *cur,*tmp;
  10.222 +	struct sedf_dom_info *curinf;
  10.223 +	task_slice_t          ret;
  10.224 +	
  10.225 +	//first of all update the domains cputime
  10.226 +	inf->cputime += now - inf->sched_start;
  10.227 +	
  10.228 +	//scheduling decisions, which don't involve the running domain
  10.229 +	if (is_idle_task(inf->owner))
  10.230 +		goto check_waitq;				//idle task doesn't get scheduled on the runq
  10.231 +	if (!((inf->cputime >= inf->slice) || !domain_runnable(inf->owner)))
  10.232 +		goto check_waitq;				//nothing to do with the running task
  10.233 +	
  10.234 +	//remove tasks that can't run
  10.235 +	__del_from_queue(inf->owner);
  10.236 +		
  10.237 +	//manage bookkeeping
  10.238 +	if (inf->cputime >= inf->slice) {
  10.239 +		inf->absdead += inf->period;
  10.240 +		inf->cputime -= inf->slice;
  10.241 +		if (inf->cputime<0) inf->cputime = 0;
  10.242 +	}
  10.243 +	if (inf->absdead<now);
  10.244 +		//printk("Domain %i exceeded it't deadline!!!! (now: %llu ddl: %llu)\n",current->id,now,inf->absdead);
  10.245 +	//add a runnable domain to the waitqueue		
  10.246 +	if (domain_runnable(inf->owner))
  10.247 +		__add_to_waitqueue_sort(inf->owner);
  10.248 +	else
  10.249 +		inf->absblock=now;
  10.250 +		
  10.251 +check_waitq:
  10.252 +	//check for the first elements of the waitqueue, whether their next period has already started
  10.253 +	list_for_each_safe(cur,tmp,waitq) {
  10.254 +		curinf = list_entry(cur,struct sedf_dom_info,list);
  10.255 +		if (curinf->absdead - curinf->period<=now) {
  10.256 +			__del_from_queue(curinf->owner);
  10.257 +			__add_to_runqueue_sort(curinf->owner);
  10.258 +		}
  10.259 +		else
  10.260 +			break;
  10.261 +	}
  10.262 +	
  10.263 +	//process the runq
  10.264 +	list_for_each_safe(cur,tmp,runq) {
  10.265 +		curinf = list_entry(cur,struct sedf_dom_info,list);
  10.266 +		if (unlikely(curinf->slice == 0)) {
  10.267 +			//special treatment of elements with empty slice
  10.268 +			__del_from_queue(curinf->owner);
  10.269 +			curinf->absdead += curinf->period;
  10.270 +			__add_to_waitqueue_sort(curinf->owner);
  10.271 +		}
  10.272 +		else
  10.273 +			if (unlikely((curinf->absdead < now) || (curinf->cputime > curinf->slice))) {
  10.274 +				//we missed the deadline or the slice was already finished... might hapen because of dom_adj.
  10.275 +				//printk("Ouch! Domain %i missed deadline %llu\n",curinf->owner->id,curinf->absdead);
  10.276 +				__del_from_queue(curinf->owner);
  10.277 +				curinf->absdead += ((now - curinf->absdead) / curinf->period + 1) * curinf->period;		
  10.278 +					//force start of period to be in future!
  10.279 +				//curinf->absdead += curinf->period;
  10.280 +				curinf->cputime = 0;
  10.281 +				__add_to_runqueue_sort(curinf->owner);
  10.282 +			}
  10.283 +			else
  10.284 +				break;
  10.285 +	}
  10.286 +	
  10.287 +	//now simply pick the first domain from the runqueue
  10.288 +	struct sedf_dom_info *runinf, *waitinf;
  10.289 +	
  10.290 +	if (!list_empty(runq)) {
  10.291 +		runinf   = list_entry(runq->next,struct sedf_dom_info,list);
  10.292 +		ret.task = runinf->owner;
  10.293 +		if (!list_empty(waitq)) {
  10.294 +			//rerun scheduler, when scheduled domain reaches it's end of slice or the first domain from the waitqueue gets ready
  10.295 +			waitinf  = list_entry(waitq->next,struct sedf_dom_info,list);
  10.296 +			ret.time = MIN(now + runinf->slice - runinf->cputime,waitinf->absdead - waitinf->period) - now;
  10.297 +		}
  10.298 +		else {
  10.299 +			ret.time = runinf->slice - runinf->cputime;
  10.300 +		}
  10.301 +	}
  10.302 +	else {
  10.303 +		//we have an empty runqueue => let the idle domain run and start the scheduler, when the next task becomes available
  10.304 +		ret.task = IDLETASK(cpu);
  10.305 +		if (!list_empty(waitq)) {
  10.306 +			waitinf = list_entry(waitq->next,struct sedf_dom_info,list);
  10.307 +			ret.time = (waitinf->absdead - waitinf->period) - now;
  10.308 +		}
  10.309 +		else {
  10.310 +			//this could porbably never happen, but one never knows...
  10.311 +			//it can... imagine a second CPU, which is pure scifi ATM, but one never knows ;)
  10.312 +			ret.time=SECONDS(1);
  10.313 +		}
  10.314 +	}
  10.315 +	if (ret.time<0)
  10.316 +		printk("Ouch! We are seriously BEHIND schedule! %lli\n",ret.time);
  10.317 +	DOM_INFO(ret.task)->sched_start=now;
  10.318 +	return ret;
  10.319 +}
  10.320 +
  10.321 +static void sedf_sleep(struct domain *d) {
  10.322 +	PRINT(2,"sedf_sleep was called, domain-id %i\n",d->id);
  10.323 +	if ( test_bit(DF_RUNNING, &d->flags) )
  10.324 +		cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
  10.325 +	else if ( __task_on_queue(d) )
  10.326 +		__del_from_queue(d);
  10.327 +}
  10.328 +
  10.329 +/* This function wakes ifup a domain, i.e. moves them into the waitqueue
  10.330 + * things to mention are: admission control is taking place nowhere at
  10.331 + * the moment, so we can't be sure, whether it is safe to wake the domain
  10.332 + * up at all. Anyway, even if it is safe (total cpu usage <=100%) there are
  10.333 + * some considerations on when to allow the domain to wake up and have it's
  10.334 + * first deadline...
  10.335 + * I detected 3 cases, which could describe the possible behaviour of the scheduler,
  10.336 + * and I'll try to make them more clear:
  10.337 + *
  10.338 + * 1. Very conservative
  10.339 + *     -when a blocked domain unblocks, it is allowed to start execution at
  10.340 + *      the beginning of the next complete period
  10.341 + *      (D..deadline, R..running, B..blocking/sleeping, U..unblocking/waking up
  10.342 + *
  10.343 + *      DRRB_____D__U_____DRRRRR___D________ ... 
  10.344 + *
  10.345 + *     -this causes the domain to miss a period (and a deadlline)
  10.346 + *     -doesn't disturb the schedule at all
  10.347 + *     -deadlines keep occuring isochronous
  10.348 + *
  10.349 + * 2. Conservative Part 1
  10.350 + *     -when a domain unblocks in the same period as it was blocked it unblocks and
  10.351 + *      may consume the rest of it's original time-slice minus the time it was blocked
  10.352 + *      (assume period=9, slice=5)
  10.353 + *
  10.354 + *      DRB_UR___DRRRRR___D...
  10.355 + *
  10.356 + *     -this also doesn't disturb scheduling, but might lead to the fact, that the domain
  10.357 + *      can't finish it's workload in the period
  10.358 + *
  10.359 + *    Part 2a
  10.360 + *     -it is obvious that such behaviour, applied when then unblocking is happening in
  10.361 + *      later domains,tinyvnc works fine aswell
  10.362 + *
  10.363 + *      DRB______D___UR___D... 
  10.364 + *
  10.365 + *    Part 2b
  10.366 + *     -if one needs the full slice in the next period, it is necessary to treat the unblocking
  10.367 + *      time as the start of the new period, i.e. move the deadline further back (later)
  10.368 + *     -this doesn't disturb scheduling as well, because for EDF periods can be treated as minimal
  10.369 + *      inter-release times and scheduling stays correct, when deadlines are kept relative to the time
  10.370 + *      the process unblocks
  10.371 + *
  10.372 + *      DRB______D___URRRR___D...
  10.373 + *                       (D) 
  10.374 + *     -problem: deadlines don't occur isochronous anymore
  10.375 + *
  10.376 + * 3. Unconservative (i.e. incorrect)
  10.377 + *     -to boost the performance of I/O dependent domains it would be possible to put the domain into
  10.378 + *      the runnable queue immediately, and let it run for the remainder of the slice of the current period
  10.379 + *      (or even worse: allocate a new full slice for the domain) (and probably tweaking the deadline/slice even more)
  10.380 + *     -either behaviour can lead to missed deadlines in other domains as opposed to approaches 1,2a,2b
  10.381 + */
  10.382 +void sedf_wake(struct domain *d) {
  10.383 +	//for the first try just implement the "very conservative" way of waking domains up
  10.384 +	s_time_t              now = NOW();
  10.385 +	struct sedf_dom_info* inf = DOM_INFO(d);
  10.386 +	
  10.387 +	PRINT(3,"sedf_wake was called, domain-id %i\n",d->id);
  10.388 +	
  10.389 +	if (unlikely(is_idle_task(d)))
  10.390 +		return;
  10.391 +	
  10.392 +	if ( unlikely(__task_on_queue(d)) ) {
  10.393 +		PRINT(3,"\tdomain %i is already in some queue\n",d->id);
  10.394 +		return;
  10.395 +	}
  10.396 +	
  10.397 +	//very conservative way of unblocking
  10.398 +	//make sure that the start of the period for this
  10.399 +	//domain is happening in the future
  10.400 +	PRINT(3,"waking up domain %i (deadl= %llu period= %llu now= %llu)\n",d->id,inf->absdead,inf->period,now);
  10.401 +	inf->absdead += ((now - inf->absdead) / inf->period+1)*inf->period;
  10.402 +	PRINT(3,"waking up domain %i (deadl= %llu period= %llu now= %llu)\n",d->id,inf->absdead,inf->period,now);
  10.403 +	
  10.404 +	__add_to_waitqueue_sort(d);
  10.405 +	PRINT(3,"added to waitq\n");	
  10.406 +	
  10.407 +	//TODO: Implement more fancy unblocking schemes!
  10.408 +	/*if (now < inf->absdead) {
  10.409 +		//short blocking
  10.410 +	}
  10.411 +	else {
  10.412 +		//long blocking 
  10.413 +	}*/
  10.414 +	
  10.415 +	//do some statistics here...
  10.416 +	if (inf->absblock!=0) {
  10.417 +		inf->block_time_tot += now - inf->absblock;
  10.418 +		inf->penalty_time_tot += (inf->absdead - inf-> period) - inf->absblock;
  10.419 +		/*if (DOM_INFO(d)->block_time_tot)
  10.420 +			PRINT(3,"penalty: %lu\n",(DOM_INFO(d)->penalty_time_tot*100)/DOM_INFO(d)->block_time_tot);*/
  10.421 +	}
  10.422 +	/*if ( is_idle_task(schedule_data[d->processor].curr)) {
  10.423 +		cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
  10.424 +		return;
  10.425 +	}*/
  10.426 +	
  10.427 +	//check whether the awakened task needs to get scheduled before the next sched. decision
  10.428 +	if  (inf->absdead - inf->period < schedule_data[d->processor].s_timer.expires)
  10.429 +		cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
  10.430 +}
  10.431 +
  10.432 +
  10.433 +/* This could probably be a bit more specific!*/
  10.434 +static void sedf_dump_domain(struct domain *d) {
  10.435 +	printk("%u has=%c ", d->id,
  10.436 +		test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
  10.437 +	printk("c=%llu p=%llu sl=%llu ddl=%llu", d->cpu_time,DOM_INFO(d)->period,DOM_INFO(d)->slice,DOM_INFO(d)->absdead);
  10.438 +	if (DOM_INFO(d)->block_time_tot!=0)
  10.439 +		printf(" penalty: %lu",(DOM_INFO(d)->penalty_time_tot*100)/DOM_INFO(d)->block_time_tot);
  10.440 +	printf("\n");
  10.441 +}
  10.442 +
  10.443 +static void sedf_dump_cpu_state(int i)
  10.444 +{
  10.445 +    struct list_head *list, *queue;
  10.446 +    int loop = 0;
  10.447 +    struct sedf_dom_info *d_inf;
  10.448 +
  10.449 +    printk("now=%llu\n",NOW());
  10.450 +    queue = RUNQ(i);
  10.451 +    printk("RUNQ rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
  10.452 +        (unsigned long) queue->next, (unsigned long) queue->prev);
  10.453 +    list_for_each ( list, queue )
  10.454 +    {
  10.455 +        printk("%3d: ",loop++);
  10.456 +        d_inf = list_entry(list, struct sedf_dom_info, list);
  10.457 +        sedf_dump_domain(d_inf->owner);
  10.458 +    }
  10.459 +    
  10.460 +    queue = WAITQ(i);
  10.461 +    printk("\nWAITQ rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
  10.462 +        (unsigned long) queue->next, (unsigned long) queue->prev);
  10.463 +    list_for_each ( list, queue )
  10.464 +    {
  10.465 +        printk("%3d: ",loop++);
  10.466 +        d_inf = list_entry(list, struct sedf_dom_info, list);
  10.467 +        sedf_dump_domain(d_inf->owner);
  10.468 +    }
  10.469 +
  10.470 +}
  10.471 +
  10.472 +/* set or fetch domain scheduling parameters */
  10.473 +static int sedf_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd) {
  10.474 +	PRINT(2,"sedf_adjdom was called, domain-id %i new period %llu new slice %llu\n",p->id,cmd->u.sedf.period,cmd->u.sedf.slice);
  10.475 +	if ( cmd->direction == SCHED_INFO_PUT )
  10.476 +	{
  10.477 +		/* sanity checking! */
  10.478 +		if(cmd->u.sedf.slice > cmd->u.sedf.period )
  10.479 +		return -EINVAL;
  10.480 +		
  10.481 +		DOM_INFO(p)->period   = cmd->u.sedf.period;
  10.482 +		DOM_INFO(p)->slice    = cmd->u.sedf.slice;
  10.483 +	}
  10.484 +	else if ( cmd->direction == SCHED_INFO_GET )
  10.485 +	{
  10.486 +		cmd->u.sedf.period   = DOM_INFO(p)->period;
  10.487 +		cmd->u.sedf.slice    = DOM_INFO(p)->slice;
  10.488 +	}
  10.489 +	PRINT(2,"sedf_adjdom_finished\n");
  10.490 +	return 0;
  10.491 +}
  10.492 +
  10.493 +struct scheduler sched_sedf_def = {
  10.494 +    .name     = "Simple EDF Scheduler",
  10.495 +    .opt_name = "sedf",
  10.496 +    .sched_id = SCHED_SEDF,
  10.497 +    
  10.498 +    .init_idle_task = sedf_init_idle_task,
  10.499 +    .alloc_task     = sedf_alloc_task,
  10.500 +    .add_task       = sedf_add_task,
  10.501 +    .free_task      = sedf_free_task,
  10.502 +    .init_scheduler = sedf_init_scheduler,
  10.503 +    .do_schedule    = sedf_do_schedule,
  10.504 +    .dump_cpu_state = sedf_dump_cpu_state,
  10.505 +    .sleep          = sedf_sleep,
  10.506 +    .wake           = sedf_wake,
  10.507 +    .adjdom         = sedf_adjdom,
  10.508 +};
    11.1 --- a/xen/common/schedule.c	Tue Nov 30 15:03:06 2004 +0000
    11.2 +++ b/xen/common/schedule.c	Mon Jan 17 13:38:54 2005 +0000
    11.3 @@ -68,10 +68,12 @@ schedule_data_t schedule_data[NR_CPUS];
    11.4  extern struct scheduler sched_bvt_def;
    11.5  extern struct scheduler sched_rrobin_def;
    11.6  extern struct scheduler sched_atropos_def;
    11.7 +extern struct scheduler sched_sedf_def;
    11.8  static struct scheduler *schedulers[] = { 
    11.9      &sched_bvt_def,
   11.10      &sched_rrobin_def,
   11.11      &sched_atropos_def,
   11.12 +    &sched_sedf_def,
   11.13      NULL
   11.14  };
   11.15  
   11.16 @@ -285,7 +287,7 @@ long sched_adjdom(struct sched_adjdom_cm
   11.17  
   11.18      if ( cmd->sched_id != ops.sched_id )
   11.19          return -EINVAL;
   11.20 -
   11.21 +    
   11.22      if ( cmd->direction != SCHED_INFO_PUT && cmd->direction != SCHED_INFO_GET )
   11.23          return -EINVAL;
   11.24  
    12.1 --- a/xen/include/public/sched_ctl.h	Tue Nov 30 15:03:06 2004 +0000
    12.2 +++ b/xen/include/public/sched_ctl.h	Mon Jan 17 13:38:54 2005 +0000
    12.3 @@ -11,6 +11,7 @@
    12.4  #define SCHED_BVT      0
    12.5  #define SCHED_ATROPOS  2
    12.6  #define SCHED_RROBIN   3
    12.7 +#define SCHED_SEDF     4
    12.8  
    12.9  /* these describe the intended direction used for a scheduler control or domain
   12.10   * command */
   12.11 @@ -64,6 +65,15 @@ struct sched_adjdom_cmd
   12.12              u64 latency;    /* 32 */
   12.13              u32 xtratime;   /* 36 */
   12.14          } PACKED atropos;
   12.15 +        
   12.16 +	struct sedf_adjdom
   12.17 +        {
   12.18 +            u64 period;     /* 16 */
   12.19 +            u64 slice;      /* 24 */
   12.20 +            u64 dummy1;     /* 32 */
   12.21 +            u32 dummy2;     /* 36 */
   12.22 +        } PACKED sedf;
   12.23 +
   12.24      } PACKED u;
   12.25  } PACKED; /* 40 bytes */
   12.26