ia64/xen-unstable

view patches/linux-2.6.16.29/xenoprof-generic.patch @ 12327:eea9247ad5a0

[XENOPROF] Oprofile user level samples for passive domains are being lost.
The number of of lost samples is most significant when dom0 is idle.

From: joserenato.santos@hp.com
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Nov 09 11:47:42 2006 +0000 (2006-11-09)
parents 041be3f6b38e
children
line source
1 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/buffer_sync.c ./drivers/oprofile/buffer_sync.c
2 --- ../orig-linux-2.6.16.29/drivers/oprofile/buffer_sync.c 2006-11-06 14:46:52.000000000 -0800
3 +++ ./drivers/oprofile/buffer_sync.c 2006-11-06 15:16:52.000000000 -0800
4 @@ -6,6 +6,10 @@
5 *
6 * @author John Levon <levon@movementarian.org>
7 *
8 + * Modified by Aravind Menon for Xen
9 + * These modifications are:
10 + * Copyright (C) 2005 Hewlett-Packard Co.
11 + *
12 * This is the core of the buffer management. Each
13 * CPU buffer is processed and entered into the
14 * global event buffer. Such processing is necessary
15 @@ -38,6 +42,7 @@ static cpumask_t marked_cpus = CPU_MASK_
16 static DEFINE_SPINLOCK(task_mortuary);
17 static void process_task_mortuary(void);
19 +static int cpu_current_domain[NR_CPUS];
21 /* Take ownership of the task struct and place it on the
22 * list for processing. Only after two full buffer syncs
23 @@ -146,6 +151,11 @@ static void end_sync(void)
24 int sync_start(void)
25 {
26 int err;
27 + int i;
28 +
29 + for (i = 0; i < NR_CPUS; i++) {
30 + cpu_current_domain[i] = COORDINATOR_DOMAIN;
31 + }
33 start_cpu_work();
35 @@ -275,15 +285,31 @@ static void add_cpu_switch(int i)
36 last_cookie = INVALID_COOKIE;
37 }
39 -static void add_kernel_ctx_switch(unsigned int in_kernel)
40 +static void add_cpu_mode_switch(unsigned int cpu_mode)
41 {
42 add_event_entry(ESCAPE_CODE);
43 - if (in_kernel)
44 - add_event_entry(KERNEL_ENTER_SWITCH_CODE);
45 - else
46 - add_event_entry(KERNEL_EXIT_SWITCH_CODE);
47 + switch (cpu_mode) {
48 + case CPU_MODE_USER:
49 + add_event_entry(USER_ENTER_SWITCH_CODE);
50 + break;
51 + case CPU_MODE_KERNEL:
52 + add_event_entry(KERNEL_ENTER_SWITCH_CODE);
53 + break;
54 + case CPU_MODE_XEN:
55 + add_event_entry(XEN_ENTER_SWITCH_CODE);
56 + break;
57 + default:
58 + break;
59 + }
60 }
61 -
62 +
63 +static void add_domain_switch(unsigned long domain_id)
64 +{
65 + add_event_entry(ESCAPE_CODE);
66 + add_event_entry(DOMAIN_SWITCH_CODE);
67 + add_event_entry(domain_id);
68 +}
69 +
70 static void
71 add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
72 {
73 @@ -348,9 +374,9 @@ static int add_us_sample(struct mm_struc
74 * for later lookup from userspace.
75 */
76 static int
77 -add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
78 +add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
79 {
80 - if (in_kernel) {
81 + if (cpu_mode >= CPU_MODE_KERNEL) {
82 add_sample_entry(s->eip, s->event);
83 return 1;
84 } else if (mm) {
85 @@ -496,15 +522,21 @@ void sync_buffer(int cpu)
86 struct mm_struct *mm = NULL;
87 struct task_struct * new;
88 unsigned long cookie = 0;
89 - int in_kernel = 1;
90 + int cpu_mode = 1;
91 unsigned int i;
92 sync_buffer_state state = sb_buffer_start;
93 unsigned long available;
94 + int domain_switch = 0;
96 down(&buffer_sem);
98 add_cpu_switch(cpu);
100 + /* We need to assign the first samples in this CPU buffer to the
101 + same domain that we were processing at the last sync_buffer */
102 + if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
103 + add_domain_switch(cpu_current_domain[cpu]);
104 + }
105 /* Remember, only we can modify tail_pos */
107 available = get_slots(cpu_buf);
108 @@ -512,16 +544,18 @@ void sync_buffer(int cpu)
109 for (i = 0; i < available; ++i) {
110 struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
112 - if (is_code(s->eip)) {
113 - if (s->event <= CPU_IS_KERNEL) {
114 - /* kernel/userspace switch */
115 - in_kernel = s->event;
116 + if (is_code(s->eip) && !domain_switch) {
117 + if (s->event <= CPU_MODE_XEN) {
118 + /* xen/kernel/userspace switch */
119 + cpu_mode = s->event;
120 if (state == sb_buffer_start)
121 state = sb_sample_start;
122 - add_kernel_ctx_switch(s->event);
123 + add_cpu_mode_switch(s->event);
124 } else if (s->event == CPU_TRACE_BEGIN) {
125 state = sb_bt_start;
126 add_trace_begin();
127 + } else if (s->event == CPU_DOMAIN_SWITCH) {
128 + domain_switch = 1;
129 } else {
130 struct mm_struct * oldmm = mm;
132 @@ -535,11 +569,21 @@ void sync_buffer(int cpu)
133 add_user_ctx_switch(new, cookie);
134 }
135 } else {
136 - if (state >= sb_bt_start &&
137 - !add_sample(mm, s, in_kernel)) {
138 - if (state == sb_bt_start) {
139 - state = sb_bt_ignore;
140 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
141 + if (domain_switch) {
142 + cpu_current_domain[cpu] = s->eip;
143 + add_domain_switch(s->eip);
144 + domain_switch = 0;
145 + } else {
146 + if (cpu_current_domain[cpu] !=
147 + COORDINATOR_DOMAIN) {
148 + add_sample_entry(s->eip, s->event);
149 + }
150 + else if (state >= sb_bt_start &&
151 + !add_sample(mm, s, cpu_mode)) {
152 + if (state == sb_bt_start) {
153 + state = sb_bt_ignore;
154 + atomic_inc(&oprofile_stats.bt_lost_no_mapping);
155 + }
156 }
157 }
158 }
159 @@ -548,6 +592,11 @@ void sync_buffer(int cpu)
160 }
161 release_mm(mm);
163 + /* We reset domain to COORDINATOR at each CPU switch */
164 + if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
165 + add_domain_switch(COORDINATOR_DOMAIN);
166 + }
167 +
168 mark_done(cpu);
170 up(&buffer_sem);
171 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.c ./drivers/oprofile/cpu_buffer.c
172 --- ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.c 2006-11-06 14:46:52.000000000 -0800
173 +++ ./drivers/oprofile/cpu_buffer.c 2006-11-06 14:47:55.000000000 -0800
174 @@ -6,6 +6,10 @@
175 *
176 * @author John Levon <levon@movementarian.org>
177 *
178 + * Modified by Aravind Menon for Xen
179 + * These modifications are:
180 + * Copyright (C) 2005 Hewlett-Packard Co.
181 + *
182 * Each CPU has a local buffer that stores PC value/event
183 * pairs. We also log context switches when we notice them.
184 * Eventually each CPU's buffer is processed into the global
185 @@ -34,6 +38,8 @@ static void wq_sync_buffer(void *);
186 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
187 static int work_enabled;
189 +static int32_t current_domain = COORDINATOR_DOMAIN;
190 +
191 void free_cpu_buffers(void)
192 {
193 int i;
194 @@ -58,7 +64,7 @@ int alloc_cpu_buffers(void)
195 goto fail;
197 b->last_task = NULL;
198 - b->last_is_kernel = -1;
199 + b->last_cpu_mode = -1;
200 b->tracing = 0;
201 b->buffer_size = buffer_size;
202 b->tail_pos = 0;
203 @@ -114,7 +120,7 @@ void cpu_buffer_reset(struct oprofile_cp
204 * collected will populate the buffer with proper
205 * values to initialize the buffer
206 */
207 - cpu_buf->last_is_kernel = -1;
208 + cpu_buf->last_cpu_mode = -1;
209 cpu_buf->last_task = NULL;
210 }
212 @@ -164,13 +170,13 @@ add_code(struct oprofile_cpu_buffer * bu
213 * because of the head/tail separation of the writer and reader
214 * of the CPU buffer.
215 *
216 - * is_kernel is needed because on some architectures you cannot
217 + * cpu_mode is needed because on some architectures you cannot
218 * tell if you are in kernel or user space simply by looking at
219 - * pc. We tag this in the buffer by generating kernel enter/exit
220 - * events whenever is_kernel changes
221 + * pc. We tag this in the buffer by generating kernel/user (and xen)
222 + * enter events whenever cpu_mode changes
223 */
224 static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
225 - int is_kernel, unsigned long event)
226 + int cpu_mode, unsigned long event)
227 {
228 struct task_struct * task;
230 @@ -181,18 +187,18 @@ static int log_sample(struct oprofile_cp
231 return 0;
232 }
234 - is_kernel = !!is_kernel;
235 -
236 task = current;
238 /* notice a switch from user->kernel or vice versa */
239 - if (cpu_buf->last_is_kernel != is_kernel) {
240 - cpu_buf->last_is_kernel = is_kernel;
241 - add_code(cpu_buf, is_kernel);
242 + if (cpu_buf->last_cpu_mode != cpu_mode) {
243 + cpu_buf->last_cpu_mode = cpu_mode;
244 + add_code(cpu_buf, cpu_mode);
245 }
246 -
247 +
248 /* notice a task switch */
249 - if (cpu_buf->last_task != task) {
250 + /* if not processing other domain samples */
251 + if ((cpu_buf->last_task != task) &&
252 + (current_domain == COORDINATOR_DOMAIN)) {
253 cpu_buf->last_task = task;
254 add_code(cpu_buf, (unsigned long)task);
255 }
256 @@ -269,6 +275,25 @@ void oprofile_add_trace(unsigned long pc
257 add_sample(cpu_buf, pc, 0);
258 }
260 +int oprofile_add_domain_switch(int32_t domain_id)
261 +{
262 + struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
263 +
264 + /* should have space for switching into and out of domain
265 + (2 slots each) plus one sample and one cpu mode switch */
266 + if (((nr_available_slots(cpu_buf) < 6) &&
267 + (domain_id != COORDINATOR_DOMAIN)) ||
268 + (nr_available_slots(cpu_buf) < 2))
269 + return 0;
270 +
271 + add_code(cpu_buf, CPU_DOMAIN_SWITCH);
272 + add_sample(cpu_buf, domain_id, 0);
273 +
274 + current_domain = domain_id;
275 +
276 + return 1;
277 +}
278 +
279 /*
280 * This serves to avoid cpu buffer overflow, and makes sure
281 * the task mortuary progresses
282 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.h ./drivers/oprofile/cpu_buffer.h
283 --- ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.h 2006-11-06 14:46:52.000000000 -0800
284 +++ ./drivers/oprofile/cpu_buffer.h 2006-11-06 14:47:55.000000000 -0800
285 @@ -36,7 +36,7 @@ struct oprofile_cpu_buffer {
286 volatile unsigned long tail_pos;
287 unsigned long buffer_size;
288 struct task_struct * last_task;
289 - int last_is_kernel;
290 + int last_cpu_mode;
291 int tracing;
292 struct op_sample * buffer;
293 unsigned long sample_received;
294 @@ -51,7 +51,10 @@ extern struct oprofile_cpu_buffer cpu_bu
295 void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
297 /* transient events for the CPU buffer -> event buffer */
298 -#define CPU_IS_KERNEL 1
299 -#define CPU_TRACE_BEGIN 2
300 +#define CPU_MODE_USER 0
301 +#define CPU_MODE_KERNEL 1
302 +#define CPU_MODE_XEN 2
303 +#define CPU_TRACE_BEGIN 3
304 +#define CPU_DOMAIN_SWITCH 4
306 #endif /* OPROFILE_CPU_BUFFER_H */
307 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/event_buffer.h ./drivers/oprofile/event_buffer.h
308 --- ../orig-linux-2.6.16.29/drivers/oprofile/event_buffer.h 2006-11-06 14:46:52.000000000 -0800
309 +++ ./drivers/oprofile/event_buffer.h 2006-11-06 14:47:55.000000000 -0800
310 @@ -29,15 +29,20 @@ void wake_up_buffer_waiter(void);
311 #define CPU_SWITCH_CODE 2
312 #define COOKIE_SWITCH_CODE 3
313 #define KERNEL_ENTER_SWITCH_CODE 4
314 -#define KERNEL_EXIT_SWITCH_CODE 5
315 +#define USER_ENTER_SWITCH_CODE 5
316 #define MODULE_LOADED_CODE 6
317 #define CTX_TGID_CODE 7
318 #define TRACE_BEGIN_CODE 8
319 #define TRACE_END_CODE 9
320 +#define XEN_ENTER_SWITCH_CODE 10
321 +#define DOMAIN_SWITCH_CODE 11
323 #define INVALID_COOKIE ~0UL
324 #define NO_COOKIE 0UL
326 +/* Constant used to refer to coordinator domain (Xen) */
327 +#define COORDINATOR_DOMAIN -1
328 +
329 /* add data to the event buffer */
330 void add_event_entry(unsigned long data);
332 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprof.c ./drivers/oprofile/oprof.c
333 --- ../orig-linux-2.6.16.29/drivers/oprofile/oprof.c 2006-11-06 14:46:52.000000000 -0800
334 +++ ./drivers/oprofile/oprof.c 2006-11-06 14:47:55.000000000 -0800
335 @@ -5,6 +5,10 @@
336 * @remark Read the file COPYING
337 *
338 * @author John Levon <levon@movementarian.org>
339 + *
340 + * Modified by Aravind Menon for Xen
341 + * These modifications are:
342 + * Copyright (C) 2005 Hewlett-Packard Co.
343 */
345 #include <linux/kernel.h>
346 @@ -19,7 +23,7 @@
347 #include "cpu_buffer.h"
348 #include "buffer_sync.h"
349 #include "oprofile_stats.h"
350 -
351 +
352 struct oprofile_operations oprofile_ops;
354 unsigned long oprofile_started;
355 @@ -33,6 +37,32 @@ static DECLARE_MUTEX(start_sem);
356 */
357 static int timer = 0;
359 +int oprofile_set_active(int active_domains[], unsigned int adomains)
360 +{
361 + int err;
362 +
363 + if (!oprofile_ops.set_active)
364 + return -EINVAL;
365 +
366 + down(&start_sem);
367 + err = oprofile_ops.set_active(active_domains, adomains);
368 + up(&start_sem);
369 + return err;
370 +}
371 +
372 +int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
373 +{
374 + int err;
375 +
376 + if (!oprofile_ops.set_passive)
377 + return -EINVAL;
378 +
379 + down(&start_sem);
380 + err = oprofile_ops.set_passive(passive_domains, pdomains);
381 + up(&start_sem);
382 + return err;
383 +}
384 +
385 int oprofile_setup(void)
386 {
387 int err;
388 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprof.h ./drivers/oprofile/oprof.h
389 --- ../orig-linux-2.6.16.29/drivers/oprofile/oprof.h 2006-11-06 14:46:52.000000000 -0800
390 +++ ./drivers/oprofile/oprof.h 2006-11-06 14:47:55.000000000 -0800
391 @@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
392 void oprofile_timer_init(struct oprofile_operations * ops);
394 int oprofile_set_backtrace(unsigned long depth);
395 +
396 +int oprofile_set_active(int active_domains[], unsigned int adomains);
397 +int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
399 #endif /* OPROF_H */
400 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprofile_files.c ./drivers/oprofile/oprofile_files.c
401 --- ../orig-linux-2.6.16.29/drivers/oprofile/oprofile_files.c 2006-11-06 14:46:52.000000000 -0800
402 +++ ./drivers/oprofile/oprofile_files.c 2006-11-06 14:47:55.000000000 -0800
403 @@ -5,15 +5,21 @@
404 * @remark Read the file COPYING
405 *
406 * @author John Levon <levon@movementarian.org>
407 + *
408 + * Modified by Aravind Menon for Xen
409 + * These modifications are:
410 + * Copyright (C) 2005 Hewlett-Packard Co.
411 */
413 #include <linux/fs.h>
414 #include <linux/oprofile.h>
415 +#include <asm/uaccess.h>
416 +#include <linux/ctype.h>
418 #include "event_buffer.h"
419 #include "oprofile_stats.h"
420 #include "oprof.h"
421 -
422 +
423 unsigned long fs_buffer_size = 131072;
424 unsigned long fs_cpu_buffer_size = 8192;
425 unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
426 @@ -117,11 +123,202 @@ static ssize_t dump_write(struct file *
427 static struct file_operations dump_fops = {
428 .write = dump_write,
429 };
430 -
431 +
432 +#define TMPBUFSIZE 512
433 +
434 +static unsigned int adomains = 0;
435 +static int active_domains[MAX_OPROF_DOMAINS + 1];
436 +static DEFINE_MUTEX(adom_mutex);
437 +
438 +static ssize_t adomain_write(struct file * file, char const __user * buf,
439 + size_t count, loff_t * offset)
440 +{
441 + char *tmpbuf;
442 + char *startp, *endp;
443 + int i;
444 + unsigned long val;
445 + ssize_t retval = count;
446 +
447 + if (*offset)
448 + return -EINVAL;
449 + if (count > TMPBUFSIZE - 1)
450 + return -EINVAL;
451 +
452 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
453 + return -ENOMEM;
454 +
455 + if (copy_from_user(tmpbuf, buf, count)) {
456 + kfree(tmpbuf);
457 + return -EFAULT;
458 + }
459 + tmpbuf[count] = 0;
460 +
461 + mutex_lock(&adom_mutex);
462 +
463 + startp = tmpbuf;
464 + /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
465 + for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
466 + val = simple_strtoul(startp, &endp, 0);
467 + if (endp == startp)
468 + break;
469 + while (ispunct(*endp) || isspace(*endp))
470 + endp++;
471 + active_domains[i] = val;
472 + if (active_domains[i] != val)
473 + /* Overflow, force error below */
474 + i = MAX_OPROF_DOMAINS + 1;
475 + startp = endp;
476 + }
477 + /* Force error on trailing junk */
478 + adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
479 +
480 + kfree(tmpbuf);
481 +
482 + if (adomains > MAX_OPROF_DOMAINS
483 + || oprofile_set_active(active_domains, adomains)) {
484 + adomains = 0;
485 + retval = -EINVAL;
486 + }
487 +
488 + mutex_unlock(&adom_mutex);
489 + return retval;
490 +}
491 +
492 +static ssize_t adomain_read(struct file * file, char __user * buf,
493 + size_t count, loff_t * offset)
494 +{
495 + char * tmpbuf;
496 + size_t len;
497 + int i;
498 + ssize_t retval;
499 +
500 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
501 + return -ENOMEM;
502 +
503 + mutex_lock(&adom_mutex);
504 +
505 + len = 0;
506 + for (i = 0; i < adomains; i++)
507 + len += snprintf(tmpbuf + len,
508 + len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
509 + "%u ", active_domains[i]);
510 + WARN_ON(len > TMPBUFSIZE);
511 + if (len != 0 && len <= TMPBUFSIZE)
512 + tmpbuf[len-1] = '\n';
513 +
514 + mutex_unlock(&adom_mutex);
515 +
516 + retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
517 +
518 + kfree(tmpbuf);
519 + return retval;
520 +}
521 +
522 +
523 +static struct file_operations active_domain_ops = {
524 + .read = adomain_read,
525 + .write = adomain_write,
526 +};
527 +
528 +static unsigned int pdomains = 0;
529 +static int passive_domains[MAX_OPROF_DOMAINS];
530 +static DEFINE_MUTEX(pdom_mutex);
531 +
532 +static ssize_t pdomain_write(struct file * file, char const __user * buf,
533 + size_t count, loff_t * offset)
534 +{
535 + char *tmpbuf;
536 + char *startp, *endp;
537 + int i;
538 + unsigned long val;
539 + ssize_t retval = count;
540 +
541 + if (*offset)
542 + return -EINVAL;
543 + if (count > TMPBUFSIZE - 1)
544 + return -EINVAL;
545 +
546 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
547 + return -ENOMEM;
548 +
549 + if (copy_from_user(tmpbuf, buf, count)) {
550 + kfree(tmpbuf);
551 + return -EFAULT;
552 + }
553 + tmpbuf[count] = 0;
554 +
555 + mutex_lock(&pdom_mutex);
556 +
557 + startp = tmpbuf;
558 + /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
559 + for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
560 + val = simple_strtoul(startp, &endp, 0);
561 + if (endp == startp)
562 + break;
563 + while (ispunct(*endp) || isspace(*endp))
564 + endp++;
565 + passive_domains[i] = val;
566 + if (passive_domains[i] != val)
567 + /* Overflow, force error below */
568 + i = MAX_OPROF_DOMAINS + 1;
569 + startp = endp;
570 + }
571 + /* Force error on trailing junk */
572 + pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
573 +
574 + kfree(tmpbuf);
575 +
576 + if (pdomains > MAX_OPROF_DOMAINS
577 + || oprofile_set_passive(passive_domains, pdomains)) {
578 + pdomains = 0;
579 + retval = -EINVAL;
580 + }
581 +
582 + mutex_unlock(&pdom_mutex);
583 + return retval;
584 +}
585 +
586 +static ssize_t pdomain_read(struct file * file, char __user * buf,
587 + size_t count, loff_t * offset)
588 +{
589 + char * tmpbuf;
590 + size_t len;
591 + int i;
592 + ssize_t retval;
593 +
594 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
595 + return -ENOMEM;
596 +
597 + mutex_lock(&pdom_mutex);
598 +
599 + len = 0;
600 + for (i = 0; i < pdomains; i++)
601 + len += snprintf(tmpbuf + len,
602 + len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
603 + "%u ", passive_domains[i]);
604 + WARN_ON(len > TMPBUFSIZE);
605 + if (len != 0 && len <= TMPBUFSIZE)
606 + tmpbuf[len-1] = '\n';
607 +
608 + mutex_unlock(&pdom_mutex);
609 +
610 + retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
611 +
612 + kfree(tmpbuf);
613 + return retval;
614 +}
615 +
616 +static struct file_operations passive_domain_ops = {
617 + .read = pdomain_read,
618 + .write = pdomain_write,
619 +};
620 +
621 void oprofile_create_files(struct super_block * sb, struct dentry * root)
622 {
623 oprofilefs_create_file(sb, root, "enable", &enable_fops);
624 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
625 + oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
626 + oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
627 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
628 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
629 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
630 diff -pruN ../orig-linux-2.6.16.29/include/linux/oprofile.h ./include/linux/oprofile.h
631 --- ../orig-linux-2.6.16.29/include/linux/oprofile.h 2006-11-06 14:46:42.000000000 -0800
632 +++ ./include/linux/oprofile.h 2006-11-06 14:47:55.000000000 -0800
633 @@ -16,6 +16,8 @@
634 #include <linux/types.h>
635 #include <linux/spinlock.h>
636 #include <asm/atomic.h>
637 +
638 +#include <xen/interface/xenoprof.h>
640 struct super_block;
641 struct dentry;
642 @@ -27,6 +29,11 @@ struct oprofile_operations {
643 /* create any necessary configuration files in the oprofile fs.
644 * Optional. */
645 int (*create_files)(struct super_block * sb, struct dentry * root);
646 + /* setup active domains with Xen */
647 + int (*set_active)(int *active_domains, unsigned int adomains);
648 + /* setup passive domains with Xen */
649 + int (*set_passive)(int *passive_domains, unsigned int pdomains);
650 +
651 /* Do any necessary interrupt setup. Optional. */
652 int (*setup)(void);
653 /* Do any necessary interrupt shutdown. Optional. */
654 @@ -68,6 +75,8 @@ void oprofile_add_pc(unsigned long pc, i
655 /* add a backtrace entry, to be called from the ->backtrace callback */
656 void oprofile_add_trace(unsigned long eip);
658 +/* add a domain switch entry */
659 +int oprofile_add_domain_switch(int32_t domain_id);
661 /**
662 * Create a file of the given name as a child of the given root, with