]> xenbits.xensource.com Git - xen.git/commitdiff
tools: Obtain platform capabilities via physinfo sysctl.
authorKeir Fraser <keir.fraser@citrix.com>
Tue, 1 Apr 2008 17:04:19 +0000 (18:04 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Tue, 1 Apr 2008 17:04:19 +0000 (18:04 +0100)
Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
tools/python/xen/lowlevel/xc/xc.c
tools/python/xen/xend/XendNode.py
xen/arch/ia64/xen/dom0_ops.c
xen/arch/powerpc/sysctl.c
xen/arch/x86/sysctl.c
xen/include/public/sysctl.h

index 6bc29d853728abc57f46f780e29a4f42598d35c3..1b233611429789346334d558762c47348f850374 100644 (file)
@@ -762,11 +762,12 @@ static PyObject *pyxc_physinfo(XcObject *self)
 {
 #define MAX_CPU_ID 255
     xc_physinfo_t info;
-    char cpu_cap[128], *p=cpu_cap, *q=cpu_cap;
+    char cpu_cap[128], virt_caps[128], *p;
     int i, j, max_cpu_id;
     uint64_t free_heap;
     PyObject *ret_obj, *node_to_cpu_obj, *node_to_memory_obj;
     xc_cpu_to_node_t map[MAX_CPU_ID + 1];
+    const char *virtcap_names[] = { "hvm", "hvm_directio" };
 
     set_xen_guest_handle(info.cpu_to_node, map);
     info.max_cpu_id = MAX_CPU_ID;
@@ -774,17 +775,21 @@ static PyObject *pyxc_physinfo(XcObject *self)
     if ( xc_physinfo(self->xc_handle, &info) != 0 )
         return pyxc_error_to_exception();
 
-    *q = 0;
+    p = cpu_cap;
+    *p = '\0';
     for ( i = 0; i < sizeof(info.hw_cap)/4; i++ )
-    {
         p += sprintf(p, "%08x:", info.hw_cap[i]);
-        if ( info.hw_cap[i] )
-            q = p;
-    }
-    if ( q > cpu_cap )
-        *(q-1) = 0;
+    *(p-1) = 0;
+
+    p = virt_caps;
+    *p = '\0';
+    for ( i = 0; i < 2; i++ )
+        if ( (info.capabilities >> i) & 1 )
+          p += sprintf(p, "%s ", virtcap_names[i]);
+    if ( p != virt_caps )
+      *(p-1) = '\0';
 
-    ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}",
+    ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s:s:s}",
                             "nr_nodes",         info.nr_nodes,
                             "max_cpu_id",       info.max_cpu_id,
                             "threads_per_core", info.threads_per_core,
@@ -794,7 +799,8 @@ static PyObject *pyxc_physinfo(XcObject *self)
                             "free_memory",      pages_to_kib(info.free_pages),
                             "scrub_memory",     pages_to_kib(info.scrub_pages),
                             "cpu_khz",          info.cpu_khz,
-                            "hw_caps",          cpu_cap);
+                            "hw_caps",          cpu_cap,
+                            "virt_caps",        virt_caps);
 
     max_cpu_id = info.max_cpu_id;
     if ( max_cpu_id > MAX_CPU_ID )
index 26db7f2966530cf9ba7718563928e3bd85260897..f394411a9ca4f166e0119d07d6fb09195adfc490 100644 (file)
@@ -92,6 +92,7 @@ class XendNode:
         physinfo = self.physinfo_dict()
         cpu_count = physinfo['nr_cpus']
         cpu_features = physinfo['hw_caps']
+        virt_caps = physinfo['virt_caps']
 
         # If the number of CPUs don't match, we should just reinitialise 
         # the CPU UUIDs.
@@ -112,6 +113,7 @@ class XendNode:
                 self.cpus[u].update(
                     { 'host'     : self.uuid,
                       'features' : cpu_features,
+                      'virt_caps': virt_caps,
                       'speed'    : int(float(cpuinfo[number]['cpu MHz'])),
                       'vendor'   : cpuinfo[number]['vendor_id'],
                       'modelname': cpuinfo[number]['model name'],
@@ -605,6 +607,7 @@ class XendNode:
                       'threads_per_core',
                       'cpu_mhz',
                       'hw_caps',
+                      'virt_caps',
                       'total_memory',
                       'free_memory',
                       'node_to_cpu',
index 1f42acc1c3efae6af8e0ac858d88db340e1bfa3d..af4e6b555f732b614212c1e28ff49a6f556bc5d3 100644 (file)
@@ -410,6 +410,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
 
         xen_sysctl_physinfo_t *pi = &op->u.physinfo;
 
+        memset(pi, 0, sizeof(*pi));
         pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
         pi->cores_per_socket =
             cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
@@ -419,7 +420,6 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
         pi->free_pages       = avail_domheap_pages();
         pi->scrub_pages      = avail_scrub_pages();
         pi->cpu_khz          = local_cpu_data->proc_freq / 1000;
-        memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
 
         max_array_ent = pi->max_cpu_id;
         pi->max_cpu_id = last_cpu(cpu_online_map);
index b211d4f9f7ba9201c687100dcfb8579cdb494e20..24db2a9141b3ae8a3c54284399876cc913f0d1d1 100644 (file)
@@ -41,6 +41,7 @@ long arch_do_sysctl(struct xen_sysctl *sysctl,
     {
         xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
 
+        memset(pi, 0, sizeof(*pi));
         pi->threads_per_core =
             cpus_weight(cpu_sibling_map[0]);
         pi->cores_per_socket =
@@ -50,10 +51,7 @@ long arch_do_sysctl(struct xen_sysctl *sysctl,
         pi->total_pages      = total_pages;
         pi->free_pages       = avail_domheap_pages();
         pi->cpu_khz          = cpu_khz;
-        memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
-        ret = 0;
-        if ( copy_to_guest(u_sysctl, sysctl, 1) )
-            ret = -EFAULT;
+        ret = copy_to_guest(u_sysctl, sysctl, 1) ? -EFAULT : 0;
     }
     break;
 
index db94bfd812dfcde0365b0366a6a2b438daa3f08e..13f177a7fac174b80209e70a3763319466e2abb1 100644 (file)
@@ -47,18 +47,22 @@ long arch_do_sysctl(
         if ( ret )
             break;
 
+        memset(pi, 0, sizeof(*pi));
         pi->threads_per_core =
             cpus_weight(cpu_sibling_map[0]);
         pi->cores_per_socket =
             cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
         pi->nr_cpus = (u32)num_online_cpus();
         pi->nr_nodes = num_online_nodes();
-        pi->total_pages      = total_pages;
-        pi->free_pages       = avail_domheap_pages();
-        pi->scrub_pages      = avail_scrub_pages();
-        pi->cpu_khz          = cpu_khz;
-        memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
+        pi->total_pages = total_pages;
+        pi->free_pages = avail_domheap_pages();
+        pi->scrub_pages = avail_scrub_pages();
+        pi->cpu_khz = cpu_khz;
         memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
+        if ( hvm_enabled )
+            pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
+        if ( iommu_enabled )
+            pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
 
         max_array_ent = pi->max_cpu_id;
         pi->max_cpu_id = last_cpu(cpu_online_map);
index f56f05777a33f43dfb1405444adf60cf579afe6b..b66c02190c9426737b6a6cae19f5e153aa040c50 100644 (file)
@@ -84,8 +84,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
  * Get physical information about the host machine
  */
 #define XEN_SYSCTL_physinfo          3
+ /* (x86) The platform supports HVM guests. */
+#define _XEN_SYSCTL_PHYSCAP_hvm          0
+#define XEN_SYSCTL_PHYSCAP_hvm           (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
+ /* (x86) The platform supports HVM-guest direct access to I/O devices. */
+#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
+#define XEN_SYSCTL_PHYSCAP_hvm_directio  (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
 struct xen_sysctl_physinfo {
-    /* IN variables. */
     uint32_t threads_per_core;
     uint32_t cores_per_socket;
     uint32_t nr_cpus;
@@ -96,7 +101,6 @@ struct xen_sysctl_physinfo {
     uint64_aligned_t scrub_pages;
     uint32_t hw_cap[8];
 
-    /* IN/OUT variables. */
     /*
      * IN: maximum addressable entry in the caller-provided cpu_to_node array.
      * OUT: largest cpu identifier in the system.
@@ -112,6 +116,9 @@ struct xen_sysctl_physinfo {
      * elements of the array will not be written by the sysctl.
      */
     XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
+
+    /* XEN_SYSCTL_PHYSCAP_??? */
+    uint32_t capabilities;
 };
 typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);