direct-io.hg

changeset 15482:89d2192942be

Extended the physinfo sysctl to export NUMA cpu_to_node topology info.
Print this in 'xm info'.
Signed-off-by: Ryan Harper <ryanh@us.ibm.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Jul 06 16:12:07 2007 +0100 (2007-07-06)
parents 538c3d8aa4b1
children eaf3aa32fa88
files tools/libxc/xc_misc.c tools/libxc/xenctrl.h tools/python/xen/lowlevel/xc/xc.c tools/python/xen/xend/XendNode.py tools/xenmon/xenbaked.c tools/xenstat/libxenstat/src/xenstat.c tools/xentrace/xentrace.c tools/xm-test/tests/info/02_info_compiledata_pos.py xen/arch/ia64/xen/dom0_ops.c xen/arch/powerpc/sysctl.c xen/arch/x86/sysctl.c xen/include/public/sysctl.h xen/include/xen/cpumask.h
line diff
     1.1 --- a/tools/libxc/xc_misc.c	Fri Jul 06 15:01:20 2007 +0100
     1.2 +++ b/tools/libxc/xc_misc.c	Fri Jul 06 16:12:07 2007 +0100
     1.3 @@ -60,6 +60,8 @@ int xc_physinfo(int xc_handle,
     1.4  
     1.5      sysctl.cmd = XEN_SYSCTL_physinfo;
     1.6  
     1.7 +    memcpy(&sysctl.u.physinfo, put_info, sizeof(*put_info));
     1.8 +
     1.9      if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
    1.10          return ret;
    1.11  
     2.1 --- a/tools/libxc/xenctrl.h	Fri Jul 06 15:01:20 2007 +0100
     2.2 +++ b/tools/libxc/xenctrl.h	Fri Jul 06 16:12:07 2007 +0100
     2.3 @@ -473,6 +473,7 @@ int xc_readconsolering(int xc_handle,
     2.4  int xc_send_debug_keys(int xc_handle, char *keys);
     2.5  
     2.6  typedef xen_sysctl_physinfo_t xc_physinfo_t;
     2.7 +typedef uint32_t xc_cpu_to_node_t;
     2.8  int xc_physinfo(int xc_handle,
     2.9                  xc_physinfo_t *info);
    2.10  
     3.1 --- a/tools/python/xen/lowlevel/xc/xc.c	Fri Jul 06 15:01:20 2007 +0100
     3.2 +++ b/tools/python/xen/lowlevel/xc/xc.c	Fri Jul 06 16:12:07 2007 +0100
     3.3 @@ -680,33 +680,62 @@ static PyObject *pyxc_pages_to_kib(XcObj
     3.4  
     3.5  static PyObject *pyxc_physinfo(XcObject *self)
     3.6  {
     3.7 +#define MAX_CPU_ID 255
     3.8      xc_physinfo_t info;
     3.9      char cpu_cap[128], *p=cpu_cap, *q=cpu_cap;
    3.10 -    int i;
    3.11 -    
    3.12 +    int i, j, max_cpu_id;
    3.13 +    PyObject *ret_obj, *node_to_cpu_obj;
    3.14 +    xc_cpu_to_node_t map[MAX_CPU_ID];
    3.15 +
    3.16 +    set_xen_guest_handle(info.cpu_to_node, map);
    3.17 +    info.max_cpu_id = MAX_CPU_ID;
    3.18 +
    3.19      if ( xc_physinfo(self->xc_handle, &info) != 0 )
    3.20          return pyxc_error_to_exception();
    3.21  
    3.22 -    *q=0;
    3.23 -    for(i=0;i<sizeof(info.hw_cap)/4;i++)
    3.24 +    *q = 0;
    3.25 +    for ( i = 0; i < sizeof(info.hw_cap)/4; i++ )
    3.26      {
    3.27 -        p+=sprintf(p,"%08x:",info.hw_cap[i]);
    3.28 -        if(info.hw_cap[i])
    3.29 -            q=p;
    3.30 +        p += sprintf(p, "%08x:", info.hw_cap[i]);
    3.31 +        if ( info.hw_cap[i] )
    3.32 +            q = p;
    3.33      }
    3.34 -    if(q>cpu_cap)
    3.35 -        *(q-1)=0;
    3.36 +    if ( q > cpu_cap )
    3.37 +        *(q-1) = 0;
    3.38  
    3.39 -    return Py_BuildValue("{s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}",
    3.40 -                         "threads_per_core", info.threads_per_core,
    3.41 -                         "cores_per_socket", info.cores_per_socket,
    3.42 -                         "sockets_per_node", info.sockets_per_node,
    3.43 -                         "nr_nodes",         info.nr_nodes,
    3.44 -                         "total_memory",     pages_to_kib(info.total_pages),
    3.45 -                         "free_memory",      pages_to_kib(info.free_pages),
    3.46 -                         "scrub_memory",     pages_to_kib(info.scrub_pages),
    3.47 -                         "cpu_khz",          info.cpu_khz,
    3.48 -                         "hw_caps",          cpu_cap);
    3.49 +    ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}",
    3.50 +                            "nr_nodes",         info.nr_nodes,
    3.51 +                            "max_cpu_id",       info.max_cpu_id,
    3.52 +                            "threads_per_core", info.threads_per_core,
    3.53 +                            "cores_per_socket", info.cores_per_socket,
    3.54 +                            "sockets_per_node", info.sockets_per_node,
    3.55 +                            "total_memory",     pages_to_kib(info.total_pages),
    3.56 +                            "free_memory",      pages_to_kib(info.free_pages),
    3.57 +                            "scrub_memory",     pages_to_kib(info.scrub_pages),
    3.58 +                            "cpu_khz",          info.cpu_khz,
    3.59 +                            "hw_caps",          cpu_cap);
    3.60 +
    3.61 +    max_cpu_id = info.max_cpu_id;
    3.62 +    if ( max_cpu_id > MAX_CPU_ID )
    3.63 +        max_cpu_id = MAX_CPU_ID;
    3.64 +
    3.65 +    /* Construct node-to-cpu lists. */
    3.66 +    node_to_cpu_obj = PyList_New(0);
    3.67 +
    3.68 +    /* Make a list for each node. */
    3.69 +    for ( i = 0; i < info.nr_nodes; i++ )
    3.70 +    {
    3.71 +        PyObject *cpus = PyList_New(0);
    3.72 +        for ( j = 0; j <= max_cpu_id; j++ )
    3.73 +            if ( i == map[j])
    3.74 +                PyList_Append(cpus, PyInt_FromLong(j));
    3.75 +        PyList_Append(node_to_cpu_obj, cpus); 
    3.76 +    }
    3.77 +
    3.78 +    PyDict_SetItemString(ret_obj, "node_to_cpu", node_to_cpu_obj);
    3.79 + 
    3.80 +    return ret_obj;
    3.81 +#undef MAX_CPU_ID
    3.82  }
    3.83  
    3.84  static PyObject *pyxc_xeninfo(XcObject *self)
     4.1 --- a/tools/python/xen/xend/XendNode.py	Fri Jul 06 15:01:20 2007 +0100
     4.2 +++ b/tools/python/xen/xend/XendNode.py	Fri Jul 06 16:12:07 2007 +0100
     4.3 @@ -533,6 +533,54 @@ class XendNode:
     4.4                  ['version', ver],
     4.5                  ['machine', mch]]
     4.6  
     4.7 +    def list_to_rangepairs(self,cmap):
     4.8 +            cmap.sort()
     4.9 +            pairs = []
    4.10 +            x = y = 0
    4.11 +            for i in range(0,len(cmap)):
    4.12 +                try:
    4.13 +                    if ((cmap[y+1] - cmap[i]) > 1):
    4.14 +                        pairs.append((cmap[x],cmap[y]))
    4.15 +                        x = y = i+1
    4.16 +                    else:
    4.17 +                        y = y + 1
    4.18 +                # if we go off the end, then just add x to y
    4.19 +                except IndexError:
    4.20 +                    pairs.append((cmap[x],cmap[y]))
    4.21 +
    4.22 +            return pairs
    4.23 +
    4.24 +    def format_pairs(self,pairs):
    4.25 +            if not pairs:
    4.26 +                return "no cpus"
    4.27 +            out = ""
    4.28 +            for f,s in pairs:
    4.29 +                if (f==s):
    4.30 +                    out += '%d'%f
    4.31 +                else:
    4.32 +                    out += '%d-%d'%(f,s)
    4.33 +                out += ','
    4.34 +            # trim trailing ','
    4.35 +            return out[:-1]
    4.36 +
    4.37 +    def list_to_strrange(self,list):
    4.38 +        return self.format_pairs(self.list_to_rangepairs(list))
    4.39 +
    4.40 +    def format_node_to_cpu(self, pinfo):
    4.41 +        str=''
    4.42 +        whitespace=''
    4.43 +        try:
    4.44 +            node_to_cpu=pinfo['node_to_cpu']
    4.45 +            for i in range(0, pinfo['nr_nodes']):
    4.46 +                str+='%snode%d:%s\n' % (whitespace,
    4.47 +                                        i, 
    4.48 +                                      self.list_to_strrange(node_to_cpu[i]))
    4.49 +                whitespace='%25s' % ''        
    4.50 +        except:
    4.51 +            str='none\n'
    4.52 +        return str[:-1];
    4.53 +
    4.54 +
    4.55      def physinfo(self):
    4.56          info = self.xc.physinfo()
    4.57  
    4.58 @@ -545,6 +593,7 @@ class XendNode:
    4.59          # physinfo is in KiB, need it in MiB
    4.60          info['total_memory'] = info['total_memory'] / 1024
    4.61          info['free_memory']  = info['free_memory'] / 1024
    4.62 +        info['node_to_cpu']  = self.format_node_to_cpu(info)
    4.63  
    4.64          ITEM_ORDER = ['nr_cpus',
    4.65                        'nr_nodes',
    4.66 @@ -555,6 +604,7 @@ class XendNode:
    4.67                        'hw_caps',
    4.68                        'total_memory',
    4.69                        'free_memory',
    4.70 +                      'node_to_cpu'
    4.71                        ]
    4.72  
    4.73          return [[k, info[k]] for k in ITEM_ORDER]
     5.1 --- a/tools/xenmon/xenbaked.c	Fri Jul 06 15:01:20 2007 +0100
     5.2 +++ b/tools/xenmon/xenbaked.c	Fri Jul 06 16:12:07 2007 +0100
     5.3 @@ -444,7 +444,7 @@ struct t_rec **init_rec_ptrs(struct t_bu
     5.4   */
     5.5  unsigned int get_num_cpus(void)
     5.6  {
     5.7 -    xc_physinfo_t physinfo;
     5.8 +    xc_physinfo_t physinfo = { 0 };
     5.9      int xc_handle = xc_interface_open();
    5.10      int ret;
    5.11  
     6.1 --- a/tools/xenstat/libxenstat/src/xenstat.c	Fri Jul 06 15:01:20 2007 +0100
     6.2 +++ b/tools/xenstat/libxenstat/src/xenstat.c	Fri Jul 06 16:12:07 2007 +0100
     6.3 @@ -135,7 +135,7 @@ xenstat_node *xenstat_get_node(xenstat_h
     6.4  {
     6.5  #define DOMAIN_CHUNK_SIZE 256
     6.6  	xenstat_node *node;
     6.7 -	xc_physinfo_t physinfo;
     6.8 +	xc_physinfo_t physinfo = { 0 };
     6.9  	xc_domaininfo_t domaininfo[DOMAIN_CHUNK_SIZE];
    6.10  	unsigned int new_domains;
    6.11  	unsigned int i;
     7.1 --- a/tools/xentrace/xentrace.c	Fri Jul 06 15:01:20 2007 +0100
     7.2 +++ b/tools/xentrace/xentrace.c	Fri Jul 06 16:12:07 2007 +0100
     7.3 @@ -256,7 +256,7 @@ struct t_rec **init_rec_ptrs(struct t_bu
     7.4   */
     7.5  unsigned int get_num_cpus(void)
     7.6  {
     7.7 -    xc_physinfo_t physinfo;
     7.8 +    xc_physinfo_t physinfo = { 0 };
     7.9      int xc_handle = xc_interface_open();
    7.10      int ret;
    7.11      
     8.1 --- a/tools/xm-test/tests/info/02_info_compiledata_pos.py	Fri Jul 06 15:01:20 2007 +0100
     8.2 +++ b/tools/xm-test/tests/info/02_info_compiledata_pos.py	Fri Jul 06 16:12:07 2007 +0100
     8.3 @@ -18,9 +18,7 @@ map = {}
     8.4  for line in lines:
     8.5      pieces = line.split(" : ", 1)
     8.6  
     8.7 -    if len(pieces) < 2:
     8.8 -        FAIL("Found invalid line: [%s]" % line)
     8.9 -    else:
    8.10 +    if len(pieces) > 1:
    8.11          map[pieces[0]] = pieces[1]
    8.12  
    8.13  for field in ["cores_per_socket", "threads_per_core", "cpu_mhz",
     9.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Fri Jul 06 15:01:20 2007 +0100
     9.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Fri Jul 06 16:12:07 2007 +0100
     9.3 @@ -240,8 +240,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
     9.4      {
     9.5  #ifdef IA64_NUMA_PHYSINFO
     9.6          int i;
     9.7 -        node_data_t *chunks;
     9.8 -        u64 *map, cpu_to_node_map[MAX_NUMNODES];
     9.9 +        uint32_t *map, cpu_to_node_map[NR_CPUS];
    9.10  #endif
    9.11  
    9.12          xen_sysctl_physinfo_t *pi = &op->u.physinfo;
    9.13 @@ -250,11 +249,9 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
    9.14              cpus_weight(cpu_sibling_map[0]);
    9.15          pi->cores_per_socket =
    9.16              cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
    9.17 -        pi->sockets_per_node = 
    9.18 -            num_online_cpus() / cpus_weight(cpu_core_map[0]);
    9.19 -#ifndef IA64_NUMA_PHYSINFO
    9.20 -        pi->nr_nodes         = 1; 
    9.21 -#endif
    9.22 +        pi->nr_nodes         = num_online_nodes();
    9.23 +        pi->sockets_per_node = num_online_cpus() / 
    9.24 +            (pi->nr_nodes * pi->cores_per_socket * pi->threads_per_core);
    9.25          pi->total_pages      = total_pages; 
    9.26          pi->free_pages       = avail_domheap_pages();
    9.27          pi->scrub_pages      = avail_scrub_pages();
    9.28 @@ -264,41 +261,6 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
    9.29          ret = 0;
    9.30  
    9.31  #ifdef IA64_NUMA_PHYSINFO
    9.32 -        /* fetch memory_chunk pointer from guest */
    9.33 -        get_xen_guest_handle(chunks, pi->memory_chunks);
    9.34 -
    9.35 -        printk("chunks=%p, num_node_memblks=%u\n", chunks, num_node_memblks);
    9.36 -        /* if it is set, fill out memory chunk array */
    9.37 -        if (chunks != NULL) {
    9.38 -            if (num_node_memblks == 0) {
    9.39 -                /* Non-NUMA machine.  Put pseudo-values.  */
    9.40 -                node_data_t data;
    9.41 -                data.node_start_pfn = 0;
    9.42 -                data.node_spanned_pages = total_pages;
    9.43 -                data.node_id = 0;
    9.44 -                /* copy memory chunk structs to guest */
    9.45 -                if (copy_to_guest_offset(pi->memory_chunks, 0, &data, 1)) {
    9.46 -                    ret = -EFAULT;
    9.47 -                    break;
    9.48 -                }
    9.49 -            } else {
    9.50 -                for (i = 0; i < num_node_memblks && i < PUBLIC_MAXCHUNKS; i++) {
    9.51 -                    node_data_t data;
    9.52 -                    data.node_start_pfn = node_memblk[i].start_paddr >>
    9.53 -                                          PAGE_SHIFT;
    9.54 -                    data.node_spanned_pages = node_memblk[i].size >> PAGE_SHIFT;
    9.55 -                    data.node_id = node_memblk[i].nid;
    9.56 -                    /* copy memory chunk structs to guest */
    9.57 -                    if (copy_to_guest_offset(pi->memory_chunks, i, &data, 1)) {
    9.58 -                        ret = -EFAULT;
    9.59 -                        break;
    9.60 -                    }
    9.61 -                }
    9.62 -            }
    9.63 -        }
    9.64 -        /* set number of notes */
    9.65 -        pi->nr_nodes = num_online_nodes();
    9.66 -
    9.67          /* fetch cpu_to_node pointer from guest */
    9.68          get_xen_guest_handle(map, pi->cpu_to_node);
    9.69  
    10.1 --- a/xen/arch/powerpc/sysctl.c	Fri Jul 06 15:01:20 2007 +0100
    10.2 +++ b/xen/arch/powerpc/sysctl.c	Fri Jul 06 16:12:07 2007 +0100
    10.3 @@ -45,10 +45,10 @@ long arch_do_sysctl(struct xen_sysctl *s
    10.4              cpus_weight(cpu_sibling_map[0]);
    10.5          pi->cores_per_socket =
    10.6              cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
    10.7 -        pi->sockets_per_node = 
    10.8 -            num_online_cpus() / cpus_weight(cpu_core_map[0]);
    10.9 +        pi->sockets_per_node = num_online_cpus() / 
   10.10 +            (num_online_nodes() * pi->cores_per_socket * pi->threads_per_core);
   10.11  
   10.12 -        pi->nr_nodes         = 1;
   10.13 +        pi->nr_nodes         = num_online_nodes();
   10.14          pi->total_pages      = total_pages;
   10.15          pi->free_pages       = avail_domheap_pages();
   10.16          pi->cpu_khz          = cpu_khz;
    11.1 --- a/xen/arch/x86/sysctl.c	Fri Jul 06 15:01:20 2007 +0100
    11.2 +++ b/xen/arch/x86/sysctl.c	Fri Jul 06 16:12:07 2007 +0100
    11.3 @@ -23,6 +23,10 @@
    11.4  #include <asm/hvm/hvm.h>
    11.5  #include <asm/hvm/support.h>
    11.6  #include <asm/processor.h>
    11.7 +#include <asm/numa.h>
    11.8 +#include <xen/nodemask.h>
    11.9 +
   11.10 +#define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
   11.11  
   11.12  long arch_do_sysctl(
   11.13      struct xen_sysctl *sysctl, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
   11.14 @@ -34,25 +38,41 @@ long arch_do_sysctl(
   11.15  
   11.16      case XEN_SYSCTL_physinfo:
   11.17      {
   11.18 +        uint32_t i, max_array_ent;
   11.19 +
   11.20          xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
   11.21  
   11.22          pi->threads_per_core =
   11.23              cpus_weight(cpu_sibling_map[0]);
   11.24          pi->cores_per_socket =
   11.25              cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
   11.26 -        pi->sockets_per_node = 
   11.27 -            num_online_cpus() / cpus_weight(cpu_core_map[0]);
   11.28 +        pi->nr_nodes = num_online_nodes();
   11.29 +        pi->sockets_per_node = num_online_cpus() / 
   11.30 +            (pi->nr_nodes * pi->cores_per_socket * pi->threads_per_core);
   11.31  
   11.32 -        pi->nr_nodes         = 1;
   11.33          pi->total_pages      = total_pages;
   11.34          pi->free_pages       = avail_domheap_pages();
   11.35          pi->scrub_pages      = avail_scrub_pages();
   11.36          pi->cpu_khz          = cpu_khz;
   11.37          memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
   11.38          memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
   11.39 -        ret = 0;
   11.40 -        if ( copy_to_guest(u_sysctl, sysctl, 1) )
   11.41 -            ret = -EFAULT;
   11.42 +
   11.43 +        max_array_ent = pi->max_cpu_id;
   11.44 +        pi->max_cpu_id = last_cpu(cpu_online_map);
   11.45 +        max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
   11.46 +
   11.47 +        ret = -EFAULT;
   11.48 +        if ( !guest_handle_is_null(pi->cpu_to_node) )
   11.49 +        {
   11.50 +            for ( i = 0; i <= max_array_ent; i++ )
   11.51 +            {
   11.52 +                uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
   11.53 +                if ( copy_to_guest_offset(pi->cpu_to_node, i, &node, 1) )
   11.54 +                    break;
   11.55 +            }
   11.56 +        }
   11.57 +
   11.58 +        ret = copy_to_guest(u_sysctl, sysctl, 1) ? -EFAULT : 0;
   11.59      }
   11.60      break;
   11.61      
    12.1 --- a/xen/include/public/sysctl.h	Fri Jul 06 15:01:20 2007 +0100
    12.2 +++ b/xen/include/public/sysctl.h	Fri Jul 06 16:12:07 2007 +0100
    12.3 @@ -34,7 +34,7 @@
    12.4  #include "xen.h"
    12.5  #include "domctl.h"
    12.6  
    12.7 -#define XEN_SYSCTL_INTERFACE_VERSION 0x00000003
    12.8 +#define XEN_SYSCTL_INTERFACE_VERSION 0x00000004
    12.9  
   12.10  /*
   12.11   * Read console content from Xen buffer ring.
   12.12 @@ -76,6 +76,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_
   12.13   */
   12.14  #define XEN_SYSCTL_physinfo          3
   12.15  struct xen_sysctl_physinfo {
   12.16 +    /* IN variables. */
   12.17      uint32_t threads_per_core;
   12.18      uint32_t cores_per_socket;
   12.19      uint32_t sockets_per_node;
   12.20 @@ -85,6 +86,23 @@ struct xen_sysctl_physinfo {
   12.21      uint64_aligned_t free_pages;
   12.22      uint64_aligned_t scrub_pages;
   12.23      uint32_t hw_cap[8];
   12.24 +
   12.25 +    /* IN/OUT variables. */
   12.26 +    /*
   12.27 +     * IN: maximum addressable entry in the caller-provided cpu_to_node array.
   12.28 +     * OUT: largest cpu identifier in the system.
   12.29 +     * If OUT is greater than IN then the cpu_to_node array is truncated!
   12.30 +     */
   12.31 +    uint32_t max_cpu_id;
   12.32 +    /*
   12.33 +     * If not NULL, this array is filled with node identifier for each cpu.
   12.34 +     * If a cpu has no node information (e.g., cpu not present) then the
   12.35 +     * sentinel value ~0u is written.
   12.36 +     * The size of this array is specified by the caller in @max_cpu_id.
   12.37 +     * If the actual @max_cpu_id is smaller than the array then the trailing
   12.38 +     * elements of the array will not be written by the sysctl.
   12.39 +     */
   12.40 +    XEN_GUEST_HANDLE_64(uint32_t) cpu_to_node;
   12.41  };
   12.42  typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
   12.43  DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
    13.1 --- a/xen/include/xen/cpumask.h	Fri Jul 06 15:01:20 2007 +0100
    13.2 +++ b/xen/include/xen/cpumask.h	Fri Jul 06 16:12:07 2007 +0100
    13.3 @@ -222,6 +222,15 @@ static inline int __next_cpu(int n, cons
    13.4  	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
    13.5  }
    13.6  
    13.7 +#define last_cpu(src) __last_cpu(&(src), NR_CPUS)
    13.8 +static inline int __last_cpu(const cpumask_t *srcp, int nbits)
    13.9 +{
   13.10 +	int cpu, pcpu = NR_CPUS;
   13.11 +	for (cpu = first_cpu(*srcp); cpu < NR_CPUS; cpu = next_cpu(cpu, *srcp))
   13.12 +		pcpu = cpu;
   13.13 +	return pcpu;
   13.14 +}
   13.15 +
   13.16  #define cpumask_of_cpu(cpu)						\
   13.17  ({									\
   13.18  	typeof(_unused_cpumask_arg_) m;					\