direct-io.hg

changeset 7378:bd3268de4145

Store an opaque handle (tools uuid) in the domain structure
within Xen. Refactor GETVCPUCONTEXT into an op of the same
name plus a new op GETVCPUINFO. Move the cpumap and cpu info
arrays from GETDOMAININFO and move into new GETVCPUINFO.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Oct 14 15:40:48 2005 +0100 (2005-10-14)
parents 70aa62954e91
children d48bc069122c
files tools/libxc/xc_core.c tools/libxc/xc_domain.c tools/libxc/xc_private.c tools/libxc/xenctrl.h tools/python/xen/lowlevel/xc/xc.c tools/python/xen/xend/XendDomainInfo.py tools/python/xen/xm/main.py tools/xenstat/libxenstat/src/xen-interface.c xen/arch/x86/setup.c xen/common/dom0_ops.c xen/common/keyhandler.c xen/include/public/dom0_ops.h xen/include/public/xen.h xen/include/xen/sched.h
line diff
     1.1 --- a/tools/libxc/xc_core.c	Fri Oct 14 01:42:34 2005 +0100
     1.2 +++ b/tools/libxc/xc_core.c	Fri Oct 14 15:40:48 2005 +0100
     1.3 @@ -33,7 +33,7 @@ xc_domain_dumpcore(int xc_handle,
     1.4      unsigned long nr_pages;
     1.5      unsigned long *page_array;
     1.6      xc_dominfo_t info;
     1.7 -    int i, j, vcpu_map_size, dump_fd;
     1.8 +    int i, j, dump_fd;
     1.9      char *dump_mem, *dump_mem_start = NULL;
    1.10      struct xc_core_header header;
    1.11      vcpu_guest_context_t     ctxt[MAX_VIRT_CPUS];
    1.12 @@ -54,18 +54,9 @@ xc_domain_dumpcore(int xc_handle,
    1.13          goto error_out;
    1.14      }
    1.15   
    1.16 -    vcpu_map_size =  sizeof(info.vcpu_to_cpu) / sizeof(info.vcpu_to_cpu[0]);
    1.17 -
    1.18 -    for (i = 0, j = 0; i < vcpu_map_size; i++) {
    1.19 -        if (info.vcpu_to_cpu[i] == -1) {
    1.20 -            continue;
    1.21 -        }
    1.22 -        if (xc_domain_get_vcpu_context(xc_handle, domid, i, &ctxt[j])) {
    1.23 -            PERROR("Could not get all vcpu contexts for domain");
    1.24 -            goto error_out;
    1.25 -        }
    1.26 -        j++;
    1.27 -    }
    1.28 +    for (i = 0, j = 0; i < 32; i++)
    1.29 +        if (xc_domain_get_vcpu_context(xc_handle, domid, i, &ctxt[j]) == 0)
    1.30 +            j++;
    1.31   
    1.32      nr_pages = info.nr_pages;
    1.33  
     2.1 --- a/tools/libxc/xc_domain.c	Fri Oct 14 01:42:34 2005 +0100
     2.2 +++ b/tools/libxc/xc_domain.c	Fri Oct 14 15:40:48 2005 +0100
     2.3 @@ -11,6 +11,7 @@
     2.4  
     2.5  int xc_domain_create(int xc_handle,
     2.6                       uint32_t ssidref,
     2.7 +                     xen_domain_handle_t handle,
     2.8                       uint32_t *pdomid)
     2.9  {
    2.10      int err;
    2.11 @@ -19,6 +20,7 @@ int xc_domain_create(int xc_handle,
    2.12      op.cmd = DOM0_CREATEDOMAIN;
    2.13      op.u.createdomain.domain = (domid_t)*pdomid;
    2.14      op.u.createdomain.ssidref = ssidref;
    2.15 +    memcpy(op.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
    2.16      if ( (err = do_dom0_op(xc_handle, &op)) != 0 )
    2.17          return err;
    2.18  
    2.19 @@ -59,7 +61,7 @@ int xc_domain_destroy(int xc_handle,
    2.20  int xc_domain_pincpu(int xc_handle,
    2.21                       uint32_t domid, 
    2.22                       int vcpu,
    2.23 -                     cpumap_t *cpumap)
    2.24 +                     cpumap_t cpumap)
    2.25  {
    2.26      dom0_op_t op;
    2.27      op.cmd = DOM0_PINCPUDOMAIN;
    2.28 @@ -112,10 +114,9 @@ int xc_domain_getinfo(int xc_handle,
    2.29          info->shared_info_frame = op.u.getdomaininfo.shared_info_frame;
    2.30          info->cpu_time = op.u.getdomaininfo.cpu_time;
    2.31          info->vcpus = op.u.getdomaininfo.n_vcpu;
    2.32 -        memcpy(&info->vcpu_to_cpu, &op.u.getdomaininfo.vcpu_to_cpu, 
    2.33 -               sizeof(info->vcpu_to_cpu));
    2.34 -        memcpy(&info->cpumap, &op.u.getdomaininfo.cpumap, 
    2.35 -               sizeof(info->cpumap));
    2.36 +
    2.37 +        memcpy(info->handle, op.u.getdomaininfo.handle,
    2.38 +               sizeof(xen_domain_handle_t));
    2.39  
    2.40          next_domid = (uint16_t)op.u.getdomaininfo.domain + 1;
    2.41          info++;
    2.42 @@ -166,19 +167,14 @@ int xc_domain_get_vcpu_context(int xc_ha
    2.43      op.u.getvcpucontext.vcpu   = (uint16_t)vcpu;
    2.44      op.u.getvcpucontext.ctxt   = ctxt;
    2.45  
    2.46 -    if ( (ctxt != NULL) &&
    2.47 -         ((rc = mlock(ctxt, sizeof(*ctxt))) != 0) )
    2.48 +    if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 )
    2.49          return rc;
    2.50  
    2.51      rc = do_dom0_op(xc_handle, &op);
    2.52  
    2.53 -    if ( ctxt != NULL )
    2.54 -        safe_munlock(ctxt, sizeof(*ctxt));
    2.55 +    safe_munlock(ctxt, sizeof(*ctxt));
    2.56  
    2.57 -    if ( rc > 0 )
    2.58 -        return -ESRCH;
    2.59 -    else
    2.60 -        return rc;
    2.61 +    return rc;
    2.62  }
    2.63  
    2.64  
     3.1 --- a/tools/libxc/xc_private.c	Fri Oct 14 01:42:34 2005 +0100
     3.2 +++ b/tools/libxc/xc_private.c	Fri Oct 14 15:40:48 2005 +0100
     3.3 @@ -256,16 +256,15 @@ long long xc_domain_get_cpu_usage( int x
     3.4  {
     3.5      dom0_op_t op;
     3.6  
     3.7 -    op.cmd = DOM0_GETVCPUCONTEXT;
     3.8 -    op.u.getvcpucontext.domain = (domid_t)domid;
     3.9 -    op.u.getvcpucontext.vcpu   = (uint16_t)vcpu;
    3.10 -    op.u.getvcpucontext.ctxt   = NULL;
    3.11 +    op.cmd = DOM0_GETVCPUINFO;
    3.12 +    op.u.getvcpuinfo.domain = (domid_t)domid;
    3.13 +    op.u.getvcpuinfo.vcpu   = (uint16_t)vcpu;
    3.14      if ( (do_dom0_op(xc_handle, &op) < 0) )
    3.15      {
    3.16          PERROR("Could not get info on domain");
    3.17          return -1;
    3.18      }
    3.19 -    return op.u.getvcpucontext.cpu_time;
    3.20 +    return op.u.getvcpuinfo.cpu_time;
    3.21  }
    3.22  
    3.23  
     4.1 --- a/tools/libxc/xenctrl.h	Fri Oct 14 01:42:34 2005 +0100
     4.2 +++ b/tools/libxc/xenctrl.h	Fri Oct 14 15:40:48 2005 +0100
     4.3 @@ -123,23 +123,23 @@ int xc_waitdomain_core(
     4.4   */
     4.5  
     4.6  typedef struct {
     4.7 -    uint32_t           domid;
     4.8 -    uint32_t           ssidref;
     4.9 +    uint32_t      domid;
    4.10 +    uint32_t      ssidref;
    4.11      unsigned int  dying:1, crashed:1, shutdown:1, 
    4.12                    paused:1, blocked:1, running:1;
    4.13      unsigned int  shutdown_reason; /* only meaningful if shutdown==1 */
    4.14      unsigned long nr_pages;
    4.15      unsigned long shared_info_frame;
    4.16 -    uint64_t           cpu_time;
    4.17 +    uint64_t      cpu_time;
    4.18      unsigned long max_memkb;
    4.19      unsigned int  vcpus;
    4.20 -    int32_t           vcpu_to_cpu[MAX_VIRT_CPUS];
    4.21 -    cpumap_t      cpumap[MAX_VIRT_CPUS];
    4.22 +    xen_domain_handle_t handle;
    4.23  } xc_dominfo_t;
    4.24  
    4.25  typedef dom0_getdomaininfo_t xc_domaininfo_t;
    4.26  int xc_domain_create(int xc_handle, 
    4.27                       uint32_t ssidref,
    4.28 +                     xen_domain_handle_t handle,
    4.29                       uint32_t *pdomid);
    4.30  
    4.31  
    4.32 @@ -194,7 +194,8 @@ int xc_domain_destroy(int xc_handle,
    4.33  int xc_domain_pincpu(int xc_handle,
    4.34                       uint32_t domid,
    4.35                       int vcpu,
    4.36 -                     cpumap_t *cpumap);
    4.37 +                     cpumap_t cpumap);
    4.38 +
    4.39  /**
    4.40   * This function will return information about one or more domains. It is
    4.41   * designed to iterate over the list of domains. If a single domain is
     5.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Fri Oct 14 01:42:34 2005 +0100
     5.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Fri Oct 14 15:40:48 2005 +0100
     5.3 @@ -77,17 +77,41 @@ static PyObject *pyxc_domain_create(PyOb
     5.4  {
     5.5      XcObject *xc = (XcObject *)self;
     5.6  
     5.7 -    uint32_t          dom = 0;
     5.8 -    int          ret;
     5.9 -    uint32_t          ssidref = 0x0;
    5.10 +    uint32_t dom = 0;
    5.11 +    int      ret, i;
    5.12 +    uint32_t ssidref = 0;
    5.13 +    PyObject *pyhandle = NULL;
    5.14 +    xen_domain_handle_t handle = { 
    5.15 +        0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
    5.16 +        0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef };
    5.17  
    5.18 -    static char *kwd_list[] = { "dom", "ssidref", NULL };
    5.19 +    static char *kwd_list[] = { "dom", "ssidref", "handle", NULL };
    5.20  
    5.21 -    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
    5.22 -                                      &dom, &ssidref))
    5.23 +    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiO", kwd_list,
    5.24 +                                      &dom, &ssidref, &pyhandle))
    5.25          return NULL;
    5.26  
    5.27 -    if ( (ret = xc_domain_create(xc->xc_handle, ssidref, &dom)) < 0 )
    5.28 +    if ( pyhandle != NULL )
    5.29 +    {
    5.30 +        if ( !PyList_Check(pyhandle) || 
    5.31 +             (PyList_Size(pyhandle) != sizeof(xen_domain_handle_t)) )
    5.32 +        {
    5.33 +        out_exception:
    5.34 +            errno = EINVAL;
    5.35 +            PyErr_SetFromErrno(xc_error);
    5.36 +            return NULL;
    5.37 +        }
    5.38 +
    5.39 +        for ( i = 0; i < sizeof(xen_domain_handle_t); i++ )
    5.40 +        {
    5.41 +            PyObject *p = PyList_GetItem(pyhandle, i);
    5.42 +            if ( !PyInt_Check(p) )
    5.43 +                goto out_exception;
    5.44 +            handle[i] = (uint8_t)PyInt_AsLong(p);
    5.45 +        }
    5.46 +    }
    5.47 +
    5.48 +    if ( (ret = xc_domain_create(xc->xc_handle, ssidref, handle, &dom)) < 0 )
    5.49          return PyErr_SetFromErrno(xc_error);
    5.50  
    5.51      return PyInt_FromLong(dom);
    5.52 @@ -181,7 +205,7 @@ static PyObject *pyxc_domain_pincpu(PyOb
    5.53  
    5.54      uint32_t dom;
    5.55      int vcpu = 0;
    5.56 -    cpumap_t cpumap = 0xFFFFFFFF;
    5.57 +    cpumap_t cpumap = ~0ULL;
    5.58  
    5.59      static char *kwd_list[] = { "dom", "vcpu", "cpumap", NULL };
    5.60  
    5.61 @@ -189,7 +213,7 @@ static PyObject *pyxc_domain_pincpu(PyOb
    5.62                                        &dom, &vcpu, &cpumap) )
    5.63          return NULL;
    5.64  
    5.65 -    if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, &cpumap) != 0 )
    5.66 +    if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, cpumap) != 0 )
    5.67          return PyErr_SetFromErrno(xc_error);
    5.68      
    5.69      Py_INCREF(zero);
    5.70 @@ -223,7 +247,7 @@ static PyObject *pyxc_domain_getinfo(PyO
    5.71                                       PyObject *kwds)
    5.72  {
    5.73      XcObject *xc = (XcObject *)self;
    5.74 -    PyObject *list, *vcpu_list, *cpumap_list, *info_dict;
    5.75 +    PyObject *list, *info_dict;
    5.76  
    5.77      uint32_t first_dom = 0;
    5.78      int max_doms = 1024, nr_doms, i, j;
    5.79 @@ -249,15 +273,9 @@ static PyObject *pyxc_domain_getinfo(PyO
    5.80      list = PyList_New(nr_doms);
    5.81      for ( i = 0 ; i < nr_doms; i++ )
    5.82      {
    5.83 -        vcpu_list = PyList_New(MAX_VIRT_CPUS);
    5.84 -        cpumap_list = PyList_New(MAX_VIRT_CPUS);
    5.85 -        for ( j = 0; j < MAX_VIRT_CPUS; j++ ) {
    5.86 -            PyList_SetItem( vcpu_list, j, 
    5.87 -                            Py_BuildValue("i", info[i].vcpu_to_cpu[j]));
    5.88 -            PyList_SetItem( cpumap_list, j, 
    5.89 -                            Py_BuildValue("i", info[i].cpumap[j]));
    5.90 -        }
    5.91 -                 
    5.92 +        PyObject *pyhandle = PyList_New(sizeof(xen_domain_handle_t));
    5.93 +        for ( j = 0; j < sizeof(xen_domain_handle_t); j++ )
    5.94 +            PyList_SetItem(pyhandle, j, PyInt_FromLong(info[i].handle[j]));
    5.95          info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
    5.96                                    ",s:l,s:L,s:l,s:i,s:i}",
    5.97                                    "dom",       info[i].domid,
    5.98 @@ -273,10 +291,8 @@ static PyObject *pyxc_domain_getinfo(PyO
    5.99                                    "maxmem_kb", info[i].max_memkb,
   5.100                                    "ssidref",   info[i].ssidref,
   5.101                                    "shutdown_reason", info[i].shutdown_reason);
   5.102 -        PyDict_SetItemString( info_dict, "vcpu_to_cpu", vcpu_list );
   5.103 -        PyDict_SetItemString( info_dict, "cpumap", cpumap_list );
   5.104 -        PyList_SetItem( list, i, info_dict);
   5.105 - 
   5.106 +        PyDict_SetItemString(info_dict, "handle", pyhandle);
   5.107 +        PyList_SetItem(list, i, info_dict);
   5.108      }
   5.109  
   5.110      free(info);
     6.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Fri Oct 14 01:42:34 2005 +0100
     6.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Fri Oct 14 15:40:48 2005 +0100
     6.3 @@ -1377,8 +1377,7 @@ class XendDomainInfo:
     6.4          # target = 0 means use all processors
     6.5          if target > 0:
     6.6              # count the number of online vcpus (cpu values in v2c map >= 0)
     6.7 -            vcpu_to_cpu = dom_get(dom)['vcpu_to_cpu']
     6.8 -            vcpus_online = len(filter(lambda x: x >= 0, vcpu_to_cpu))
     6.9 +            vcpus_online = dom_get(dom)['vcpus']
    6.10              log.debug("found %d vcpus online", vcpus_online)
    6.11  
    6.12              # disable any extra vcpus that are online over the requested target
     7.1 --- a/tools/python/xen/xm/main.py	Fri Oct 14 01:42:34 2005 +0100
     7.2 +++ b/tools/python/xen/xm/main.py	Fri Oct 14 15:40:48 2005 +0100
     7.3 @@ -270,9 +270,9 @@ def parse_doms_info(info):
     7.4          vcpuinfo['dom']    = int(sxp.child_value(info, 'domid', '-1'))
     7.5          vcpuinfo['vcpu']   = int(count)
     7.6          vcpuinfo['cpu']    = int(cpu)
     7.7 -        vcpuinfo['cpumap'] = int(cpumap[count])&mask
     7.8 +        #vcpuinfo['cpumap'] = int(cpumap[count])&mask
     7.9          count = count + 1
    7.10 -        dominfo['vcpulist'].append(vcpuinfo)
    7.11 +        #dominfo['vcpulist'].append(vcpuinfo)
    7.12      return dominfo
    7.13          
    7.14  def xm_brief_list(domsinfo):
     8.1 --- a/tools/xenstat/libxenstat/src/xen-interface.c	Fri Oct 14 01:42:34 2005 +0100
     8.2 +++ b/tools/xenstat/libxenstat/src/xen-interface.c	Fri Oct 14 15:40:48 2005 +0100
     8.3 @@ -178,16 +178,15 @@ long long xi_get_vcpu_usage(xi_handle *h
     8.4                              unsigned int vcpu)
     8.5  {
     8.6  	dom0_op_t op;
     8.7 -	op.u.getvcpucontext.domain = domain;
     8.8 -	op.u.getvcpucontext.vcpu = vcpu;
     8.9 -	op.u.getvcpucontext.ctxt = NULL;
    8.10 +	op.u.getvcpuinfo.domain = domain;
    8.11 +	op.u.getvcpuinfo.vcpu   = vcpu;
    8.12  
    8.13 -	if (xi_make_dom0_op(handle, &op, DOM0_GETVCPUCONTEXT) < 0) {
    8.14 -		perror("DOM0_GETVCPUCONTEXT Hypercall failed");
    8.15 +	if (xi_make_dom0_op(handle, &op, DOM0_GETVCPUINFO) < 0) {
    8.16 +		perror("DOM0_GETVCPUINFO Hypercall failed");
    8.17  		return -1;
    8.18  	}
    8.19  
    8.20 -	return op.u.getvcpucontext.cpu_time;
    8.21 +	return op.u.getvcpuinfo.cpu_time;
    8.22  }
    8.23  
    8.24  /* gets xen version information from hypervisor */
     9.1 --- a/xen/arch/x86/setup.c	Fri Oct 14 01:42:34 2005 +0100
     9.2 +++ b/xen/arch/x86/setup.c	Fri Oct 14 15:40:48 2005 +0100
     9.3 @@ -420,6 +420,9 @@ void __init __start_xen(multiboot_info_t
     9.4             nr_pages << (PAGE_SHIFT - 10));
     9.5      total_pages = nr_pages;
     9.6  
     9.7 +    /* Sanity check for unwanted bloat of dom0_op_t structure. */
     9.8 +    BUG_ON(sizeof(((dom0_op_t *)0)->u) != sizeof(((dom0_op_t *)0)->u.pad));
     9.9 +
    9.10      init_frametable();
    9.11  
    9.12      end_boot_allocator();
    10.1 --- a/xen/common/dom0_ops.c	Fri Oct 14 01:42:34 2005 +0100
    10.2 +++ b/xen/common/dom0_ops.c	Fri Oct 14 15:40:48 2005 +0100
    10.3 @@ -48,26 +48,20 @@ static void getdomaininfo(struct domain 
    10.4      
    10.5      info->domain = d->domain_id;
    10.6      
    10.7 -    memset(&info->vcpu_to_cpu, -1, sizeof(info->vcpu_to_cpu));
    10.8 -    memset(&info->cpumap, 0, sizeof(info->cpumap));
    10.9 -
   10.10      /* 
   10.11       * - domain is marked as blocked only if all its vcpus are blocked
   10.12       * - domain is marked as running if any of its vcpus is running
   10.13 -     * - only map vcpus that aren't down.  Note, at some point we may
   10.14 -     *   wish to demux the -1 value to indicate down vs. not-ever-booted
   10.15       */
   10.16      for_each_vcpu ( d, v ) {
   10.17 -        /* only map vcpus that are up */
   10.18 -        if ( !(test_bit(_VCPUF_down, &v->vcpu_flags)) )
   10.19 -            info->vcpu_to_cpu[v->vcpu_id] = v->processor;
   10.20 -        info->cpumap[v->vcpu_id] = v->cpumap;
   10.21 -        if ( !(v->vcpu_flags & VCPUF_blocked) )
   10.22 -            flags &= ~DOMFLAGS_BLOCKED;
   10.23 -        if ( v->vcpu_flags & VCPUF_running )
   10.24 -            flags |= DOMFLAGS_RUNNING;
   10.25          cpu_time += v->cpu_time;
   10.26 -        vcpu_count++;
   10.27 +        if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
   10.28 +        {
   10.29 +            if ( !(v->vcpu_flags & VCPUF_blocked) )
   10.30 +                flags &= ~DOMFLAGS_BLOCKED;
   10.31 +            if ( v->vcpu_flags & VCPUF_running )
   10.32 +                flags |= DOMFLAGS_RUNNING;
   10.33 +            vcpu_count++;
   10.34 +        }
   10.35      }
   10.36      
   10.37      info->cpu_time = cpu_time;
   10.38 @@ -87,6 +81,8 @@ static void getdomaininfo(struct domain 
   10.39      info->tot_pages         = d->tot_pages;
   10.40      info->max_pages         = d->max_pages;
   10.41      info->shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT;
   10.42 +
   10.43 +    memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
   10.44  }
   10.45  
   10.46  long do_dom0_op(dom0_op_t *u_dom0_op)
   10.47 @@ -214,6 +210,9 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   10.48          if ( (d = do_createdomain(dom, pro)) == NULL )
   10.49              break;
   10.50  
   10.51 +        memcpy(d->handle, op->u.createdomain.handle,
   10.52 +               sizeof(xen_domain_handle_t));
   10.53 +
   10.54          ret = 0;
   10.55  
   10.56          op->u.createdomain.domain = d->domain_id;
   10.57 @@ -288,8 +287,6 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   10.58          domid_t dom = op->u.pincpudomain.domain;
   10.59          struct domain *d = find_domain_by_id(dom);
   10.60          struct vcpu *v;
   10.61 -        cpumap_t cpumap;
   10.62 -
   10.63  
   10.64          if ( d == NULL )
   10.65          {
   10.66 @@ -320,26 +317,17 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   10.67              break;
   10.68          }
   10.69  
   10.70 -        if ( copy_from_user(&cpumap, op->u.pincpudomain.cpumap,
   10.71 -                            sizeof(cpumap)) )
   10.72 -        {
   10.73 -            ret = -EFAULT;
   10.74 -            put_domain(d);
   10.75 -            break;
   10.76 -        }
   10.77 +        v->cpumap = op->u.pincpudomain.cpumap;
   10.78  
   10.79 -        /* update cpumap for this vcpu */
   10.80 -        v->cpumap = cpumap;
   10.81 -
   10.82 -        if ( cpumap == CPUMAP_RUNANYWHERE )
   10.83 +        if ( v->cpumap == CPUMAP_RUNANYWHERE )
   10.84          {
   10.85              clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
   10.86          }
   10.87          else
   10.88          {
   10.89              /* pick a new cpu from the usable map */
   10.90 -            int new_cpu = (int)find_first_set_bit(cpumap) % num_online_cpus();
   10.91 -
   10.92 +            int new_cpu;
   10.93 +            new_cpu = (int)find_first_set_bit(v->cpumap) % num_online_cpus();
   10.94              vcpu_pause(v);
   10.95              vcpu_migrate_cpu(v, new_cpu);
   10.96              set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
   10.97 @@ -394,6 +382,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   10.98      }
   10.99      break;
  10.100  
  10.101 +
  10.102 +
  10.103      case DOM0_GETDOMAININFOLIST:
  10.104      { 
  10.105          struct domain *d;
  10.106 @@ -446,66 +436,73 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  10.107          struct vcpu_guest_context *c;
  10.108          struct domain             *d;
  10.109          struct vcpu               *v;
  10.110 -        int i;
  10.111 -
  10.112 -        d = find_domain_by_id(op->u.getvcpucontext.domain);
  10.113 -        if ( d == NULL )
  10.114 -        {
  10.115 -            ret = -ESRCH;
  10.116 -            break;
  10.117 -        }
  10.118 -
  10.119 -        if ( op->u.getvcpucontext.vcpu >= MAX_VIRT_CPUS )
  10.120 -        {
  10.121 -            ret = -EINVAL;
  10.122 -            put_domain(d);
  10.123 -            break;
  10.124 -        }
  10.125  
  10.126 -        /* find first valid vcpu starting from request. */
  10.127 -        v = NULL;
  10.128 -        for ( i = op->u.getvcpucontext.vcpu; i < MAX_VIRT_CPUS; i++ )
  10.129 -        {
  10.130 -            v = d->vcpu[i];
  10.131 -            if ( v != NULL && !(test_bit(_VCPUF_down, &v->vcpu_flags)) )
  10.132 -                break;
  10.133 -        }
  10.134 -        
  10.135 -        if ( v == NULL )
  10.136 -        {
  10.137 -            ret = -ESRCH;
  10.138 -            put_domain(d);
  10.139 +        ret = -ESRCH;
  10.140 +        if ( (d = find_domain_by_id(op->u.getvcpucontext.domain)) == NULL )
  10.141              break;
  10.142 -        }
  10.143 -
  10.144 -        op->u.getvcpucontext.cpu_time = v->cpu_time;
  10.145  
  10.146 -        if ( op->u.getvcpucontext.ctxt != NULL )
  10.147 -        {
  10.148 -            if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
  10.149 -            {
  10.150 -                ret = -ENOMEM;
  10.151 -                put_domain(d);
  10.152 -                break;
  10.153 -            }
  10.154 +        ret = -EINVAL;
  10.155 +        if ( op->u.getvcpucontext.vcpu >= MAX_VIRT_CPUS )
  10.156 +            goto getvcpucontext_out;
  10.157  
  10.158 -            if ( v != current )
  10.159 -                vcpu_pause(v);
  10.160 -
  10.161 -            arch_getdomaininfo_ctxt(v,c);
  10.162 +        ret = -ESRCH;
  10.163 +        v = d->vcpu[op->u.getvcpucontext.vcpu];
  10.164 +        if ( (v == NULL) || test_bit(_VCPUF_down, &v->vcpu_flags) )
  10.165 +            goto getvcpucontext_out;
  10.166  
  10.167 -            if ( v != current )
  10.168 -                vcpu_unpause(v);
  10.169 +        ret = -ENOMEM;
  10.170 +        if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
  10.171 +            goto getvcpucontext_out;
  10.172  
  10.173 -            if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
  10.174 -                ret = -EINVAL;
  10.175 +        if ( v != current )
  10.176 +            vcpu_pause(v);
  10.177  
  10.178 -            xfree(c);
  10.179 -        }
  10.180 +        arch_getdomaininfo_ctxt(v,c);
  10.181 +        ret = 0;
  10.182 +
  10.183 +        if ( v != current )
  10.184 +            vcpu_unpause(v);
  10.185 +
  10.186 +        if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
  10.187 +            ret = -EFAULT;
  10.188 +
  10.189 +        xfree(c);
  10.190  
  10.191          if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )     
  10.192 -            ret = -EINVAL;
  10.193 +            ret = -EFAULT;
  10.194  
  10.195 +    getvcpucontext_out:
  10.196 +        put_domain(d);
  10.197 +    }
  10.198 +    break;
  10.199 +
  10.200 +    case DOM0_GETVCPUINFO:
  10.201 +    { 
  10.202 +        struct domain *d;
  10.203 +        struct vcpu   *v;
  10.204 +
  10.205 +        ret = -ESRCH;
  10.206 +        if ( (d = find_domain_by_id(op->u.getvcpuinfo.domain)) == NULL )
  10.207 +            break;
  10.208 +
  10.209 +        ret = -EINVAL;
  10.210 +        if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
  10.211 +            goto getvcpuinfo_out;
  10.212 +
  10.213 +        ret = -ESRCH;
  10.214 +        v = d->vcpu[op->u.getvcpuinfo.vcpu];
  10.215 +        if ( (v == NULL) || test_bit(_VCPUF_down, &v->vcpu_flags) )
  10.216 +            goto getvcpuinfo_out;
  10.217 +
  10.218 +        op->u.getvcpuinfo.cpu_time = v->cpu_time;
  10.219 +        op->u.getvcpuinfo.cpu      = v->processor;
  10.220 +        op->u.getvcpuinfo.cpumap   = v->cpumap;
  10.221 +        ret = 0;
  10.222 +
  10.223 +        if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )     
  10.224 +            ret = -EFAULT;
  10.225 +
  10.226 +    getvcpuinfo_out:
  10.227          put_domain(d);
  10.228      }
  10.229      break;
    11.1 --- a/xen/common/keyhandler.c	Fri Oct 14 01:42:34 2005 +0100
    11.2 +++ b/xen/common/keyhandler.c	Fri Oct 14 15:40:48 2005 +0100
    11.3 @@ -99,7 +99,7 @@ static void halt_machine(unsigned char k
    11.4  static void do_task_queues(unsigned char key)
    11.5  {
    11.6      struct domain *d;
    11.7 -    struct vcpu *v;
    11.8 +    struct vcpu   *v;
    11.9      s_time_t       now = NOW();
   11.10  
   11.11      printk("'%c' pressed -> dumping task queues (now=0x%X:%08X)\n", key,
   11.12 @@ -112,6 +112,12 @@ static void do_task_queues(unsigned char
   11.13          printk("Xen: DOM %u, flags=%lx refcnt=%d nr_pages=%d "
   11.14                 "xenheap_pages=%d\n", d->domain_id, d->domain_flags,
   11.15                 atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
   11.16 +        printk("     handle=%02x%02x%02x%02x-%02x%02x%02x%02x-"
   11.17 +               "%02x%02x%02x%02x-%02x%02x%02x%02x\n",
   11.18 +               d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
   11.19 +               d->handle[ 4], d->handle[ 5], d->handle[ 6], d->handle[ 7],
   11.20 +               d->handle[ 8], d->handle[ 9], d->handle[10], d->handle[11],
   11.21 +               d->handle[12], d->handle[13], d->handle[14], d->handle[15]);
   11.22  
   11.23          dump_pageframe_info(d);
   11.24                 
    12.1 --- a/xen/include/public/dom0_ops.h	Fri Oct 14 01:42:34 2005 +0100
    12.2 +++ b/xen/include/public/dom0_ops.h	Fri Oct 14 15:40:48 2005 +0100
    12.3 @@ -45,6 +45,7 @@ typedef struct sched_adjdom_cmd dom0_adj
    12.4  typedef struct {
    12.5      /* IN parameters */
    12.6      uint32_t ssidref;
    12.7 +    xen_domain_handle_t handle;
    12.8      /* IN/OUT parameters. */
    12.9      /* Identifier for new domain (auto-allocate if zero is specified). */
   12.10      domid_t domain;
   12.11 @@ -88,9 +89,8 @@ typedef struct {
   12.12      unsigned long shared_info_frame;       /* MFN of shared_info struct */
   12.13      uint64_t cpu_time;
   12.14      uint32_t n_vcpu;
   12.15 -    int32_t  vcpu_to_cpu[MAX_VIRT_CPUS];  /* current mapping   */
   12.16 -    cpumap_t cpumap[MAX_VIRT_CPUS];       /* allowable mapping */
   12.17      uint32_t ssidref;
   12.18 +    xen_domain_handle_t handle;
   12.19  } dom0_getdomaininfo_t;
   12.20  
   12.21  #define DOM0_SETDOMAININFO      13
   12.22 @@ -180,9 +180,9 @@ typedef struct {
   12.23  #define DOM0_PINCPUDOMAIN     20
   12.24  typedef struct {
   12.25      /* IN variables. */
   12.26 -    domid_t      domain;
   12.27 -    uint16_t          vcpu;
   12.28 -    cpumap_t     *cpumap;
   12.29 +    domid_t   domain;
   12.30 +    uint16_t  vcpu;
   12.31 +    cpumap_t cpumap;
   12.32  } dom0_pincpudomain_t;
   12.33  
   12.34  /* Get trace buffers machine base address */
   12.35 @@ -352,11 +352,23 @@ typedef struct {
   12.36  
   12.37  #define DOM0_GETVCPUCONTEXT      37
   12.38  typedef struct {
   12.39 +    /* IN variables. */
   12.40      domid_t  domain;                  /* domain to be affected */
   12.41      uint16_t vcpu;                    /* vcpu # */
   12.42 -    vcpu_guest_context_t *ctxt;       /* NB. IN/OUT variable. */
   12.43 +    /* OUT variables. */
   12.44 +    vcpu_guest_context_t *ctxt;
   12.45 +} dom0_getvcpucontext_t;
   12.46 +
   12.47 +#define DOM0_GETVCPUINFO         43
   12.48 +typedef struct {
   12.49 +    /* IN variables. */
   12.50 +    domid_t  domain;                  /* domain to be affected */
   12.51 +    uint16_t vcpu;                    /* vcpu # */
   12.52 +    /* OUT variables. */
   12.53      uint64_t cpu_time;                 
   12.54 -} dom0_getvcpucontext_t;
   12.55 +    uint32_t cpu;                     /* current mapping   */
   12.56 +    cpumap_t cpumap;                  /* allowable mapping */
   12.57 +} dom0_getvcpuinfo_t;
   12.58  
   12.59  #define DOM0_GETDOMAININFOLIST   38
   12.60  typedef struct {
   12.61 @@ -426,10 +438,12 @@ typedef struct {
   12.62          dom0_microcode_t         microcode;
   12.63          dom0_ioport_permission_t ioport_permission;
   12.64          dom0_getvcpucontext_t    getvcpucontext;
   12.65 +        dom0_getvcpuinfo_t       getvcpuinfo;
   12.66          dom0_getdomaininfolist_t getdomaininfolist;
   12.67          dom0_platform_quirk_t    platform_quirk;
   12.68          dom0_physical_memory_map_t physical_memory_map;
   12.69          dom0_max_vcpus_t         max_vcpus;
   12.70 +        uint8_t                  pad[128];
   12.71      } u;
   12.72  } dom0_op_t;
   12.73  
    13.1 --- a/xen/include/public/xen.h	Fri Oct 14 01:42:34 2005 +0100
    13.2 +++ b/xen/include/public/xen.h	Fri Oct 14 15:40:48 2005 +0100
    13.3 @@ -437,6 +437,8 @@ extern shared_info_t *HYPERVISOR_shared_
    13.4  
    13.5  typedef uint64_t cpumap_t;
    13.6  
    13.7 +typedef uint8_t xen_domain_handle_t[16];
    13.8 +
    13.9  #endif /* !__ASSEMBLY__ */
   13.10  
   13.11  #endif /* __XEN_PUBLIC_XEN_H__ */
    14.1 --- a/xen/include/xen/sched.h	Fri Oct 14 01:42:34 2005 +0100
    14.2 +++ b/xen/include/xen/sched.h	Fri Oct 14 15:40:48 2005 +0100
    14.3 @@ -139,6 +139,9 @@ struct domain
    14.4      struct arch_domain arch;
    14.5  
    14.6      void *ssid; /* sHype security subject identifier */
    14.7 +
    14.8 +    /* Control-plane tools handle for this domain. */
    14.9 +    xen_domain_handle_t handle;
   14.10  };
   14.11  
   14.12  struct domain_setup_info