ia64/xen-unstable

changeset 17366:250606290439

tools: Obtain platform capabilities via physinfo sysctl.
Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Apr 01 18:04:19 2008 +0100 (2008-04-01)
parents 720552439f74
children 59d2638a7243
files tools/python/xen/lowlevel/xc/xc.c tools/python/xen/xend/XendNode.py xen/arch/ia64/xen/dom0_ops.c xen/arch/powerpc/sysctl.c xen/arch/x86/sysctl.c xen/include/public/sysctl.h
line diff
     1.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Tue Apr 01 17:24:22 2008 +0100
     1.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Tue Apr 01 18:04:19 2008 +0100
     1.3 @@ -762,11 +762,12 @@ static PyObject *pyxc_physinfo(XcObject 
     1.4  {
     1.5  #define MAX_CPU_ID 255
     1.6      xc_physinfo_t info;
     1.7 -    char cpu_cap[128], *p=cpu_cap, *q=cpu_cap;
     1.8 +    char cpu_cap[128], virt_caps[128], *p;
     1.9      int i, j, max_cpu_id;
    1.10      uint64_t free_heap;
    1.11      PyObject *ret_obj, *node_to_cpu_obj, *node_to_memory_obj;
    1.12      xc_cpu_to_node_t map[MAX_CPU_ID + 1];
    1.13 +    const char *virtcap_names[] = { "hvm", "hvm_directio" };
    1.14  
    1.15      set_xen_guest_handle(info.cpu_to_node, map);
    1.16      info.max_cpu_id = MAX_CPU_ID;
    1.17 @@ -774,17 +775,21 @@ static PyObject *pyxc_physinfo(XcObject 
    1.18      if ( xc_physinfo(self->xc_handle, &info) != 0 )
    1.19          return pyxc_error_to_exception();
    1.20  
    1.21 -    *q = 0;
    1.22 +    p = cpu_cap;
    1.23 +    *p = '\0';
    1.24      for ( i = 0; i < sizeof(info.hw_cap)/4; i++ )
    1.25 -    {
    1.26          p += sprintf(p, "%08x:", info.hw_cap[i]);
    1.27 -        if ( info.hw_cap[i] )
    1.28 -            q = p;
    1.29 -    }
    1.30 -    if ( q > cpu_cap )
    1.31 -        *(q-1) = 0;
    1.32 +    *(p-1) = 0;
    1.33  
    1.34 -    ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}",
    1.35 +    p = virt_caps;
    1.36 +    *p = '\0';
    1.37 +    for ( i = 0; i < 2; i++ )
    1.38 +        if ( (info.capabilities >> i) & 1 )
    1.39 +          p += sprintf(p, "%s ", virtcap_names[i]);
    1.40 +    if ( p != virt_caps )
    1.41 +      *(p-1) = '\0';
    1.42 +
    1.43 +    ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s:s:s}",
    1.44                              "nr_nodes",         info.nr_nodes,
    1.45                              "max_cpu_id",       info.max_cpu_id,
    1.46                              "threads_per_core", info.threads_per_core,
    1.47 @@ -794,7 +799,8 @@ static PyObject *pyxc_physinfo(XcObject 
    1.48                              "free_memory",      pages_to_kib(info.free_pages),
    1.49                              "scrub_memory",     pages_to_kib(info.scrub_pages),
    1.50                              "cpu_khz",          info.cpu_khz,
    1.51 -                            "hw_caps",          cpu_cap);
    1.52 +                            "hw_caps",          cpu_cap,
    1.53 +                            "virt_caps",        virt_caps);
    1.54  
    1.55      max_cpu_id = info.max_cpu_id;
    1.56      if ( max_cpu_id > MAX_CPU_ID )
     2.1 --- a/tools/python/xen/xend/XendNode.py	Tue Apr 01 17:24:22 2008 +0100
     2.2 +++ b/tools/python/xen/xend/XendNode.py	Tue Apr 01 18:04:19 2008 +0100
     2.3 @@ -92,6 +92,7 @@ class XendNode:
     2.4          physinfo = self.physinfo_dict()
     2.5          cpu_count = physinfo['nr_cpus']
     2.6          cpu_features = physinfo['hw_caps']
     2.7 +        virt_caps = physinfo['virt_caps']
     2.8  
     2.9          # If the number of CPUs don't match, we should just reinitialise 
    2.10          # the CPU UUIDs.
    2.11 @@ -112,6 +113,7 @@ class XendNode:
    2.12                  self.cpus[u].update(
    2.13                      { 'host'     : self.uuid,
    2.14                        'features' : cpu_features,
    2.15 +                      'virt_caps': virt_caps,
    2.16                        'speed'    : int(float(cpuinfo[number]['cpu MHz'])),
    2.17                        'vendor'   : cpuinfo[number]['vendor_id'],
    2.18                        'modelname': cpuinfo[number]['model name'],
    2.19 @@ -605,6 +607,7 @@ class XendNode:
    2.20                        'threads_per_core',
    2.21                        'cpu_mhz',
    2.22                        'hw_caps',
    2.23 +                      'virt_caps',
    2.24                        'total_memory',
    2.25                        'free_memory',
    2.26                        'node_to_cpu',
     3.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Tue Apr 01 17:24:22 2008 +0100
     3.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Tue Apr 01 18:04:19 2008 +0100
     3.3 @@ -410,6 +410,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
     3.4  
     3.5          xen_sysctl_physinfo_t *pi = &op->u.physinfo;
     3.6  
     3.7 +        memset(pi, 0, sizeof(*pi));
     3.8          pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
     3.9          pi->cores_per_socket =
    3.10              cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
    3.11 @@ -419,7 +420,6 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
    3.12          pi->free_pages       = avail_domheap_pages();
    3.13          pi->scrub_pages      = avail_scrub_pages();
    3.14          pi->cpu_khz          = local_cpu_data->proc_freq / 1000;
    3.15 -        memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
    3.16  
    3.17          max_array_ent = pi->max_cpu_id;
    3.18          pi->max_cpu_id = last_cpu(cpu_online_map);
     4.1 --- a/xen/arch/powerpc/sysctl.c	Tue Apr 01 17:24:22 2008 +0100
     4.2 +++ b/xen/arch/powerpc/sysctl.c	Tue Apr 01 18:04:19 2008 +0100
     4.3 @@ -41,6 +41,7 @@ long arch_do_sysctl(struct xen_sysctl *s
     4.4      {
     4.5          xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
     4.6  
     4.7 +        memset(pi, 0, sizeof(*pi));
     4.8          pi->threads_per_core =
     4.9              cpus_weight(cpu_sibling_map[0]);
    4.10          pi->cores_per_socket =
    4.11 @@ -50,10 +51,7 @@ long arch_do_sysctl(struct xen_sysctl *s
    4.12          pi->total_pages      = total_pages;
    4.13          pi->free_pages       = avail_domheap_pages();
    4.14          pi->cpu_khz          = cpu_khz;
    4.15 -        memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
    4.16 -        ret = 0;
    4.17 -        if ( copy_to_guest(u_sysctl, sysctl, 1) )
    4.18 -            ret = -EFAULT;
    4.19 +        ret = copy_to_guest(u_sysctl, sysctl, 1) ? -EFAULT : 0;
    4.20      }
    4.21      break;
    4.22  
     5.1 --- a/xen/arch/x86/sysctl.c	Tue Apr 01 17:24:22 2008 +0100
     5.2 +++ b/xen/arch/x86/sysctl.c	Tue Apr 01 18:04:19 2008 +0100
     5.3 @@ -47,18 +47,22 @@ long arch_do_sysctl(
     5.4          if ( ret )
     5.5              break;
     5.6  
     5.7 +        memset(pi, 0, sizeof(*pi));
     5.8          pi->threads_per_core =
     5.9              cpus_weight(cpu_sibling_map[0]);
    5.10          pi->cores_per_socket =
    5.11              cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
    5.12          pi->nr_cpus = (u32)num_online_cpus();
    5.13          pi->nr_nodes = num_online_nodes();
    5.14 -        pi->total_pages      = total_pages;
    5.15 -        pi->free_pages       = avail_domheap_pages();
    5.16 -        pi->scrub_pages      = avail_scrub_pages();
    5.17 -        pi->cpu_khz          = cpu_khz;
    5.18 -        memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
    5.19 +        pi->total_pages = total_pages;
    5.20 +        pi->free_pages = avail_domheap_pages();
    5.21 +        pi->scrub_pages = avail_scrub_pages();
    5.22 +        pi->cpu_khz = cpu_khz;
    5.23          memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
    5.24 +        if ( hvm_enabled )
    5.25 +            pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
    5.26 +        if ( iommu_enabled )
    5.27 +            pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
    5.28  
    5.29          max_array_ent = pi->max_cpu_id;
    5.30          pi->max_cpu_id = last_cpu(cpu_online_map);
     6.1 --- a/xen/include/public/sysctl.h	Tue Apr 01 17:24:22 2008 +0100
     6.2 +++ b/xen/include/public/sysctl.h	Tue Apr 01 18:04:19 2008 +0100
     6.3 @@ -84,8 +84,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_
     6.4   * Get physical information about the host machine
     6.5   */
     6.6  #define XEN_SYSCTL_physinfo          3
     6.7 + /* (x86) The platform supports HVM guests. */
     6.8 +#define _XEN_SYSCTL_PHYSCAP_hvm          0
     6.9 +#define XEN_SYSCTL_PHYSCAP_hvm           (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
    6.10 + /* (x86) The platform supports HVM-guest direct access to I/O devices. */
    6.11 +#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
    6.12 +#define XEN_SYSCTL_PHYSCAP_hvm_directio  (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
    6.13  struct xen_sysctl_physinfo {
    6.14 -    /* IN variables. */
    6.15      uint32_t threads_per_core;
    6.16      uint32_t cores_per_socket;
    6.17      uint32_t nr_cpus;
    6.18 @@ -96,7 +101,6 @@ struct xen_sysctl_physinfo {
    6.19      uint64_aligned_t scrub_pages;
    6.20      uint32_t hw_cap[8];
    6.21  
    6.22 -    /* IN/OUT variables. */
    6.23      /*
    6.24       * IN: maximum addressable entry in the caller-provided cpu_to_node array.
    6.25       * OUT: largest cpu identifier in the system.
    6.26 @@ -112,6 +116,9 @@ struct xen_sysctl_physinfo {
    6.27       * elements of the array will not be written by the sysctl.
    6.28       */
    6.29      XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
    6.30 +
    6.31 +    /* XEN_SYSCTL_PHYSCAP_??? */
    6.32 +    uint32_t capabilities;
    6.33  };
    6.34  typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
    6.35  DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);