direct-io.hg

view xen/arch/x86/oprofile/nmi_int.c @ 11135:88e6bd5e2b54

Whitespace clean-ups.

Signed-off-by: Steven Hand <steven@xensource.com>
author shand@kneesaa.uk.xensource.com
date Wed Aug 16 11:36:13 2006 +0100 (2006-08-16)
parents 23591d2c46aa
children 3f57453d404c
line source
1 /**
2 * @file nmi_int.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
8 *
9 * Modified for Xen: by Aravind Menon & Jose Renato Santos
10 * These modifications are:
11 * Copyright (C) 2005 Hewlett-Packard Co.
12 */
14 #include <xen/event.h>
15 #include <xen/types.h>
16 #include <xen/errno.h>
17 #include <xen/init.h>
18 #include <xen/nmi.h>
19 #include <public/xen.h>
20 #include <asm/msr.h>
21 #include <asm/apic.h>
22 #include <asm/regs.h>
23 #include <asm/current.h>
24 #include <xen/delay.h>
26 #include "op_counter.h"
27 #include "op_x86_model.h"
29 static struct op_x86_model_spec const * model;
30 static struct op_msrs cpu_msrs[NR_CPUS];
31 static unsigned long saved_lvtpc[NR_CPUS];
33 #define VIRQ_BITMASK_SIZE (MAX_OPROF_DOMAINS/32 + 1)
34 extern int active_domains[MAX_OPROF_DOMAINS];
35 extern unsigned int adomains;
36 extern struct domain *primary_profiler;
37 extern struct domain *adomain_ptrs[MAX_OPROF_DOMAINS];
38 extern unsigned long virq_ovf_pending[VIRQ_BITMASK_SIZE];
39 extern int is_active(struct domain *d);
40 extern int active_id(struct domain *d);
41 extern int is_profiled(struct domain *d);
43 extern size_t strlcpy(char *dest, const char *src, size_t size);
46 int nmi_callback(struct cpu_user_regs *regs, int cpu)
47 {
48 int xen_mode, ovf;
50 ovf = model->check_ctrs(cpu, &cpu_msrs[cpu], regs);
51 xen_mode = ring_0(regs);
52 if ( ovf && is_active(current->domain) && !xen_mode )
53 send_guest_vcpu_virq(current, VIRQ_XENOPROF);
55 return 1;
56 }
59 static void nmi_cpu_save_registers(struct op_msrs *msrs)
60 {
61 unsigned int const nr_ctrs = model->num_counters;
62 unsigned int const nr_ctrls = model->num_controls;
63 struct op_msr *counters = msrs->counters;
64 struct op_msr *controls = msrs->controls;
65 unsigned int i;
67 for (i = 0; i < nr_ctrs; ++i) {
68 rdmsr(counters[i].addr,
69 counters[i].saved.low,
70 counters[i].saved.high);
71 }
73 for (i = 0; i < nr_ctrls; ++i) {
74 rdmsr(controls[i].addr,
75 controls[i].saved.low,
76 controls[i].saved.high);
77 }
78 }
81 static void nmi_save_registers(void * dummy)
82 {
83 int cpu = smp_processor_id();
84 struct op_msrs * msrs = &cpu_msrs[cpu];
85 model->fill_in_addresses(msrs);
86 nmi_cpu_save_registers(msrs);
87 }
90 static void free_msrs(void)
91 {
92 int i;
93 for (i = 0; i < NR_CPUS; ++i) {
94 xfree(cpu_msrs[i].counters);
95 cpu_msrs[i].counters = NULL;
96 xfree(cpu_msrs[i].controls);
97 cpu_msrs[i].controls = NULL;
98 }
99 }
102 static int allocate_msrs(void)
103 {
104 int success = 1;
105 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
106 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
108 int i;
109 for (i = 0; i < NR_CPUS; ++i) {
110 if (!test_bit(i, &cpu_online_map))
111 continue;
113 cpu_msrs[i].counters = xmalloc_bytes(counters_size);
114 if (!cpu_msrs[i].counters) {
115 success = 0;
116 break;
117 }
118 cpu_msrs[i].controls = xmalloc_bytes(controls_size);
119 if (!cpu_msrs[i].controls) {
120 success = 0;
121 break;
122 }
123 }
125 if (!success)
126 free_msrs();
128 return success;
129 }
132 static void nmi_cpu_setup(void * dummy)
133 {
134 int cpu = smp_processor_id();
135 struct op_msrs * msrs = &cpu_msrs[cpu];
136 model->setup_ctrs(msrs);
137 }
140 int nmi_setup_events(void)
141 {
142 on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
143 return 0;
144 }
146 int nmi_reserve_counters(void)
147 {
148 if (!allocate_msrs())
149 return -ENOMEM;
151 /* We walk a thin line between law and rape here.
152 * We need to be careful to install our NMI handler
153 * without actually triggering any NMIs as this will
154 * break the core code horrifically.
155 */
156 if (reserve_lapic_nmi() < 0) {
157 free_msrs();
158 return -EBUSY;
159 }
160 /* We need to serialize save and setup for HT because the subset
161 * of msrs are distinct for save and setup operations
162 */
163 on_each_cpu(nmi_save_registers, NULL, 0, 1);
164 return 0;
165 }
167 int nmi_enable_virq(void)
168 {
169 set_nmi_callback(nmi_callback);
170 return 0;
171 }
174 void nmi_disable_virq(void)
175 {
176 unset_nmi_callback();
177 }
180 static void nmi_restore_registers(struct op_msrs * msrs)
181 {
182 unsigned int const nr_ctrs = model->num_counters;
183 unsigned int const nr_ctrls = model->num_controls;
184 struct op_msr * counters = msrs->counters;
185 struct op_msr * controls = msrs->controls;
186 unsigned int i;
188 for (i = 0; i < nr_ctrls; ++i) {
189 wrmsr(controls[i].addr,
190 controls[i].saved.low,
191 controls[i].saved.high);
192 }
194 for (i = 0; i < nr_ctrs; ++i) {
195 wrmsr(counters[i].addr,
196 counters[i].saved.low,
197 counters[i].saved.high);
198 }
199 }
202 static void nmi_cpu_shutdown(void * dummy)
203 {
204 int cpu = smp_processor_id();
205 struct op_msrs * msrs = &cpu_msrs[cpu];
206 nmi_restore_registers(msrs);
207 }
210 void nmi_release_counters(void)
211 {
212 on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
213 release_lapic_nmi();
214 free_msrs();
215 }
218 static void nmi_cpu_start(void * dummy)
219 {
220 int cpu = smp_processor_id();
221 struct op_msrs const * msrs = &cpu_msrs[cpu];
222 saved_lvtpc[cpu] = apic_read(APIC_LVTPC);
223 apic_write(APIC_LVTPC, APIC_DM_NMI);
224 model->start(msrs);
225 }
228 int nmi_start(void)
229 {
230 on_each_cpu(nmi_cpu_start, NULL, 0, 1);
231 return 0;
232 }
235 static void nmi_cpu_stop(void * dummy)
236 {
237 unsigned int v;
238 int cpu = smp_processor_id();
239 struct op_msrs const * msrs = &cpu_msrs[cpu];
240 model->stop(msrs);
242 /* restoring APIC_LVTPC can trigger an apic error because the delivery
243 * mode and vector nr combination can be illegal. That's by design: on
244 * power on apic lvt contain a zero vector nr which are legal only for
245 * NMI delivery mode. So inhibit apic err before restoring lvtpc
246 */
247 if ( !(apic_read(APIC_LVTPC) & APIC_DM_NMI)
248 || (apic_read(APIC_LVTPC) & APIC_LVT_MASKED) )
249 {
250 printk("nmi_stop: APIC not good %ul\n", apic_read(APIC_LVTPC));
251 mdelay(5000);
252 }
253 v = apic_read(APIC_LVTERR);
254 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
255 apic_write(APIC_LVTPC, saved_lvtpc[cpu]);
256 apic_write(APIC_LVTERR, v);
257 }
260 void nmi_stop(void)
261 {
262 on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
263 }
266 struct op_counter_config counter_config[OP_MAX_COUNTER];
268 static int __init p4_init(char * cpu_type)
269 {
270 __u8 cpu_model = current_cpu_data.x86_model;
272 if ((cpu_model > 6) || (cpu_model == 5))
273 return 0;
275 #ifndef CONFIG_SMP
276 strncpy (cpu_type, "i386/p4", XENOPROF_CPU_TYPE_SIZE - 1);
277 model = &op_p4_spec;
278 return 1;
279 #else
280 switch (smp_num_siblings) {
281 case 1:
282 strncpy (cpu_type, "i386/p4",
283 XENOPROF_CPU_TYPE_SIZE - 1);
284 model = &op_p4_spec;
285 return 1;
287 case 2:
288 strncpy (cpu_type, "i386/p4-ht",
289 XENOPROF_CPU_TYPE_SIZE - 1);
290 model = &op_p4_ht2_spec;
291 return 1;
292 }
293 #endif
294 printk("Xenoprof ERROR: P4 HyperThreading detected with > 2 threads\n");
296 return 0;
297 }
300 static int __init ppro_init(char *cpu_type)
301 {
302 __u8 cpu_model = current_cpu_data.x86_model;
304 if (cpu_model > 0xd)
305 return 0;
307 if (cpu_model == 9) {
308 strncpy (cpu_type, "i386/p6_mobile", XENOPROF_CPU_TYPE_SIZE - 1);
309 } else if (cpu_model > 5) {
310 strncpy (cpu_type, "i386/piii", XENOPROF_CPU_TYPE_SIZE - 1);
311 } else if (cpu_model > 2) {
312 strncpy (cpu_type, "i386/pii", XENOPROF_CPU_TYPE_SIZE - 1);
313 } else {
314 strncpy (cpu_type, "i386/ppro", XENOPROF_CPU_TYPE_SIZE - 1);
315 }
317 model = &op_ppro_spec;
318 return 1;
319 }
321 int nmi_init(int *num_events, int *is_primary, char *cpu_type)
322 {
323 __u8 vendor = current_cpu_data.x86_vendor;
324 __u8 family = current_cpu_data.x86;
325 int prim = 0;
327 if (!cpu_has_apic)
328 return -ENODEV;
330 if (primary_profiler == NULL) {
331 /* For now, only dom0 can be the primary profiler */
332 if (current->domain->domain_id == 0) {
333 primary_profiler = current->domain;
334 prim = 1;
335 }
336 }
338 /* Make sure string is NULL terminated */
339 cpu_type[XENOPROF_CPU_TYPE_SIZE - 1] = 0;
341 switch (vendor) {
342 case X86_VENDOR_AMD:
343 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
345 switch (family) {
346 default:
347 return -ENODEV;
348 case 6:
349 model = &op_athlon_spec;
350 strncpy (cpu_type, "i386/athlon",
351 XENOPROF_CPU_TYPE_SIZE - 1);
352 break;
353 case 0xf:
354 model = &op_athlon_spec;
355 /* Actually it could be i386/hammer too, but give
356 user space an consistent name. */
357 strncpy (cpu_type, "x86-64/hammer",
358 XENOPROF_CPU_TYPE_SIZE - 1);
359 break;
360 }
361 break;
363 case X86_VENDOR_INTEL:
364 switch (family) {
365 /* Pentium IV */
366 case 0xf:
367 if (!p4_init(cpu_type))
368 return -ENODEV;
369 break;
371 /* A P6-class processor */
372 case 6:
373 if (!ppro_init(cpu_type))
374 return -ENODEV;
375 break;
377 default:
378 return -ENODEV;
379 }
380 break;
382 default:
383 return -ENODEV;
384 }
386 *num_events = model->num_counters;
387 *is_primary = prim;
389 return 0;
390 }