ia64/xen-unstable
changeset 15557:a36c51f43fdb
[IA64] Fixup physinfo
Use max cpus per node to guess at sockets per node. This avoids
averaging problems with offline cpus and nodes without cpus. Also
fill in the cpu_to_node array.
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
Use max cpus per node to guess at sockets per node. This avoids
averaging problems with offline cpus and nodes without cpus. Also
fill in the cpu_to_node array.
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Tue Jul 10 11:15:54 2007 -0600 (2007-07-10) |
parents | 42586a0f4407 |
children | f536eb8576ee |
files | xen/arch/ia64/xen/dom0_ops.c |
line diff
1.1 --- a/xen/arch/ia64/xen/dom0_ops.c Tue Jul 10 08:39:26 2007 -0600 1.2 +++ b/xen/arch/ia64/xen/dom0_ops.c Tue Jul 10 11:15:54 2007 -0600 1.3 @@ -224,12 +224,6 @@ long arch_do_domctl(xen_domctl_t *op, XE 1.4 return ret; 1.5 } 1.6 1.7 -/* 1.8 - * Temporarily disable the NUMA PHYSINFO code until the rest of the 1.9 - * changes are upstream. 1.10 - */ 1.11 -#undef IA64_NUMA_PHYSINFO 1.12 - 1.13 long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl) 1.14 { 1.15 long ret = 0; 1.16 @@ -238,46 +232,47 @@ long arch_do_sysctl(xen_sysctl_t *op, XE 1.17 { 1.18 case XEN_SYSCTL_physinfo: 1.19 { 1.20 -#ifdef IA64_NUMA_PHYSINFO 1.21 - int i; 1.22 - uint32_t *map, cpu_to_node_map[NR_CPUS]; 1.23 -#endif 1.24 + int i, node_cpus = 0; 1.25 + uint32_t max_array_ent; 1.26 1.27 xen_sysctl_physinfo_t *pi = &op->u.physinfo; 1.28 1.29 - pi->threads_per_core = 1.30 - cpus_weight(cpu_sibling_map[0]); 1.31 + pi->threads_per_core = cpus_weight(cpu_sibling_map[0]); 1.32 pi->cores_per_socket = 1.33 cpus_weight(cpu_core_map[0]) / pi->threads_per_core; 1.34 pi->nr_nodes = num_online_nodes(); 1.35 - pi->sockets_per_node = num_online_cpus() / 1.36 - (pi->nr_nodes * pi->cores_per_socket * pi->threads_per_core); 1.37 + 1.38 + /* 1.39 + * Guess at a sockets_per_node value. Use the maximum number of 1.40 + * CPUs per node to avoid deconfigured CPUs breaking the average. 1.41 + */ 1.42 + for_each_online_node(i) 1.43 + node_cpus = max(node_cpus, cpus_weight(node_to_cpumask(i))); 1.44 + 1.45 + pi->sockets_per_node = node_cpus / 1.46 + (pi->cores_per_socket * pi->threads_per_core); 1.47 + 1.48 pi->total_pages = total_pages; 1.49 pi->free_pages = avail_domheap_pages(); 1.50 pi->scrub_pages = avail_scrub_pages(); 1.51 pi->cpu_khz = local_cpu_data->proc_freq / 1000; 1.52 memset(pi->hw_cap, 0, sizeof(pi->hw_cap)); 1.53 - //memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4); 1.54 + 1.55 + max_array_ent = pi->max_cpu_id; 1.56 + pi->max_cpu_id = last_cpu(cpu_online_map); 1.57 + max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id); 1.58 + 1.59 ret = 0; 1.60 1.61 -#ifdef IA64_NUMA_PHYSINFO 1.62 - /* fetch cpu_to_node pointer from guest */ 1.63 - get_xen_guest_handle(map, pi->cpu_to_node); 1.64 - 1.65 - /* if set, fill out cpu_to_node array */ 1.66 - if (map != NULL) { 1.67 - /* copy cpu to node mapping to domU */ 1.68 - memset(cpu_to_node_map, 0, sizeof(cpu_to_node_map)); 1.69 - for (i = 0; i < num_online_cpus(); i++) { 1.70 - cpu_to_node_map[i] = cpu_to_node(i); 1.71 - if (copy_to_guest_offset(pi->cpu_to_node, i, 1.72 - &(cpu_to_node_map[i]), 1)) { 1.73 + if (!guest_handle_is_null(pi->cpu_to_node)) { 1.74 + for (i = 0; i <= max_array_ent; i++) { 1.75 + uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u; 1.76 + if (copy_to_guest_offset(pi->cpu_to_node, i, &node, 1)) { 1.77 ret = -EFAULT; 1.78 break; 1.79 } 1.80 } 1.81 } 1.82 -#endif 1.83 1.84 if ( copy_to_guest(u_sysctl, op, 1) ) 1.85 ret = -EFAULT;