direct-io.hg

view patches/linux-2.6.16.29/xenoprof-generic.patch @ 11750:d845c9522d9e

[HVM][SVM] Check if SVM is disabled by the BIOS before enabling it.

Newer BIOS implementations will be able to disable the SVM feature,
although an additional test of an MSR (VMCR 0xC0010114 bit 4) is
necessary (set equals disabled). Bit 4 of MSR 0xc0010114 returns 0
(SVM enabled) on machines with older BIOS' without the SVM disable
feature support.

Signed-off-by: Wei Huang <wei.huang2@amd.com>=20
Signed-off-by: Tom Woller <thomas.woller@amd.com>=20
author kfraser@localhost.localdomain
date Thu Oct 12 16:12:10 2006 +0100 (2006-10-12)
parents 041be3f6b38e
children eea9247ad5a0
line source
1 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/buffer_sync.c ./drivers/oprofile/buffer_sync.c
2 --- ../orig-linux-2.6.16.29/drivers/oprofile/buffer_sync.c 2006-09-12 19:02:10.000000000 +0100
3 +++ ./drivers/oprofile/buffer_sync.c 2006-09-19 14:06:05.000000000 +0100
4 @@ -6,6 +6,10 @@
5 *
6 * @author John Levon <levon@movementarian.org>
7 *
8 + * Modified by Aravind Menon for Xen
9 + * These modifications are:
10 + * Copyright (C) 2005 Hewlett-Packard Co.
11 + *
12 * This is the core of the buffer management. Each
13 * CPU buffer is processed and entered into the
14 * global event buffer. Such processing is necessary
15 @@ -275,15 +279,31 @@ static void add_cpu_switch(int i)
16 last_cookie = INVALID_COOKIE;
17 }
19 -static void add_kernel_ctx_switch(unsigned int in_kernel)
20 +static void add_cpu_mode_switch(unsigned int cpu_mode)
21 {
22 add_event_entry(ESCAPE_CODE);
23 - if (in_kernel)
24 - add_event_entry(KERNEL_ENTER_SWITCH_CODE);
25 - else
26 - add_event_entry(KERNEL_EXIT_SWITCH_CODE);
27 + switch (cpu_mode) {
28 + case CPU_MODE_USER:
29 + add_event_entry(USER_ENTER_SWITCH_CODE);
30 + break;
31 + case CPU_MODE_KERNEL:
32 + add_event_entry(KERNEL_ENTER_SWITCH_CODE);
33 + break;
34 + case CPU_MODE_XEN:
35 + add_event_entry(XEN_ENTER_SWITCH_CODE);
36 + break;
37 + default:
38 + break;
39 + }
40 }
41 -
42 +
43 +static void add_domain_switch(unsigned long domain_id)
44 +{
45 + add_event_entry(ESCAPE_CODE);
46 + add_event_entry(DOMAIN_SWITCH_CODE);
47 + add_event_entry(domain_id);
48 +}
49 +
50 static void
51 add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
52 {
53 @@ -348,9 +368,9 @@ static int add_us_sample(struct mm_struc
54 * for later lookup from userspace.
55 */
56 static int
57 -add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
58 +add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
59 {
60 - if (in_kernel) {
61 + if (cpu_mode >= CPU_MODE_KERNEL) {
62 add_sample_entry(s->eip, s->event);
63 return 1;
64 } else if (mm) {
65 @@ -496,10 +516,11 @@ void sync_buffer(int cpu)
66 struct mm_struct *mm = NULL;
67 struct task_struct * new;
68 unsigned long cookie = 0;
69 - int in_kernel = 1;
70 + int cpu_mode = 1;
71 unsigned int i;
72 sync_buffer_state state = sb_buffer_start;
73 unsigned long available;
74 + int domain_switch = 0;
76 down(&buffer_sem);
78 @@ -512,16 +533,18 @@ void sync_buffer(int cpu)
79 for (i = 0; i < available; ++i) {
80 struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
82 - if (is_code(s->eip)) {
83 - if (s->event <= CPU_IS_KERNEL) {
84 - /* kernel/userspace switch */
85 - in_kernel = s->event;
86 + if (is_code(s->eip) && !domain_switch) {
87 + if (s->event <= CPU_MODE_XEN) {
88 + /* xen/kernel/userspace switch */
89 + cpu_mode = s->event;
90 if (state == sb_buffer_start)
91 state = sb_sample_start;
92 - add_kernel_ctx_switch(s->event);
93 + add_cpu_mode_switch(s->event);
94 } else if (s->event == CPU_TRACE_BEGIN) {
95 state = sb_bt_start;
96 add_trace_begin();
97 + } else if (s->event == CPU_DOMAIN_SWITCH) {
98 + domain_switch = 1;
99 } else {
100 struct mm_struct * oldmm = mm;
102 @@ -535,11 +558,16 @@ void sync_buffer(int cpu)
103 add_user_ctx_switch(new, cookie);
104 }
105 } else {
106 - if (state >= sb_bt_start &&
107 - !add_sample(mm, s, in_kernel)) {
108 - if (state == sb_bt_start) {
109 - state = sb_bt_ignore;
110 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
111 + if (domain_switch) {
112 + add_domain_switch(s->eip);
113 + domain_switch = 0;
114 + } else {
115 + if (state >= sb_bt_start &&
116 + !add_sample(mm, s, cpu_mode)) {
117 + if (state == sb_bt_start) {
118 + state = sb_bt_ignore;
119 + atomic_inc(&oprofile_stats.bt_lost_no_mapping);
120 + }
121 }
122 }
123 }
124 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.c ./drivers/oprofile/cpu_buffer.c
125 --- ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.c 2006-09-12 19:02:10.000000000 +0100
126 +++ ./drivers/oprofile/cpu_buffer.c 2006-09-19 14:06:05.000000000 +0100
127 @@ -6,6 +6,10 @@
128 *
129 * @author John Levon <levon@movementarian.org>
130 *
131 + * Modified by Aravind Menon for Xen
132 + * These modifications are:
133 + * Copyright (C) 2005 Hewlett-Packard Co.
134 + *
135 * Each CPU has a local buffer that stores PC value/event
136 * pairs. We also log context switches when we notice them.
137 * Eventually each CPU's buffer is processed into the global
138 @@ -34,6 +38,8 @@ static void wq_sync_buffer(void *);
139 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
140 static int work_enabled;
142 +static int32_t current_domain = COORDINATOR_DOMAIN;
143 +
144 void free_cpu_buffers(void)
145 {
146 int i;
147 @@ -58,7 +64,7 @@ int alloc_cpu_buffers(void)
148 goto fail;
150 b->last_task = NULL;
151 - b->last_is_kernel = -1;
152 + b->last_cpu_mode = -1;
153 b->tracing = 0;
154 b->buffer_size = buffer_size;
155 b->tail_pos = 0;
156 @@ -114,7 +120,7 @@ void cpu_buffer_reset(struct oprofile_cp
157 * collected will populate the buffer with proper
158 * values to initialize the buffer
159 */
160 - cpu_buf->last_is_kernel = -1;
161 + cpu_buf->last_cpu_mode = -1;
162 cpu_buf->last_task = NULL;
163 }
165 @@ -164,13 +170,13 @@ add_code(struct oprofile_cpu_buffer * bu
166 * because of the head/tail separation of the writer and reader
167 * of the CPU buffer.
168 *
169 - * is_kernel is needed because on some architectures you cannot
170 + * cpu_mode is needed because on some architectures you cannot
171 * tell if you are in kernel or user space simply by looking at
172 - * pc. We tag this in the buffer by generating kernel enter/exit
173 - * events whenever is_kernel changes
174 + * pc. We tag this in the buffer by generating kernel/user (and xen)
175 + * enter events whenever cpu_mode changes
176 */
177 static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
178 - int is_kernel, unsigned long event)
179 + int cpu_mode, unsigned long event)
180 {
181 struct task_struct * task;
183 @@ -181,18 +187,18 @@ static int log_sample(struct oprofile_cp
184 return 0;
185 }
187 - is_kernel = !!is_kernel;
188 -
189 task = current;
191 /* notice a switch from user->kernel or vice versa */
192 - if (cpu_buf->last_is_kernel != is_kernel) {
193 - cpu_buf->last_is_kernel = is_kernel;
194 - add_code(cpu_buf, is_kernel);
195 + if (cpu_buf->last_cpu_mode != cpu_mode) {
196 + cpu_buf->last_cpu_mode = cpu_mode;
197 + add_code(cpu_buf, cpu_mode);
198 }
199 -
200 +
201 /* notice a task switch */
202 - if (cpu_buf->last_task != task) {
203 + /* if not processing other domain samples */
204 + if ((cpu_buf->last_task != task) &&
205 + (current_domain == COORDINATOR_DOMAIN)) {
206 cpu_buf->last_task = task;
207 add_code(cpu_buf, (unsigned long)task);
208 }
209 @@ -269,6 +275,25 @@ void oprofile_add_trace(unsigned long pc
210 add_sample(cpu_buf, pc, 0);
211 }
213 +int oprofile_add_domain_switch(int32_t domain_id)
214 +{
215 + struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
216 +
217 + /* should have space for switching into and out of domain
218 + (2 slots each) plus one sample and one cpu mode switch */
219 + if (((nr_available_slots(cpu_buf) < 6) &&
220 + (domain_id != COORDINATOR_DOMAIN)) ||
221 + (nr_available_slots(cpu_buf) < 2))
222 + return 0;
223 +
224 + add_code(cpu_buf, CPU_DOMAIN_SWITCH);
225 + add_sample(cpu_buf, domain_id, 0);
226 +
227 + current_domain = domain_id;
228 +
229 + return 1;
230 +}
231 +
232 /*
233 * This serves to avoid cpu buffer overflow, and makes sure
234 * the task mortuary progresses
235 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.h ./drivers/oprofile/cpu_buffer.h
236 --- ../orig-linux-2.6.16.29/drivers/oprofile/cpu_buffer.h 2006-09-12 19:02:10.000000000 +0100
237 +++ ./drivers/oprofile/cpu_buffer.h 2006-09-19 14:06:05.000000000 +0100
238 @@ -36,7 +36,7 @@ struct oprofile_cpu_buffer {
239 volatile unsigned long tail_pos;
240 unsigned long buffer_size;
241 struct task_struct * last_task;
242 - int last_is_kernel;
243 + int last_cpu_mode;
244 int tracing;
245 struct op_sample * buffer;
246 unsigned long sample_received;
247 @@ -51,7 +51,10 @@ extern struct oprofile_cpu_buffer cpu_bu
248 void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
250 /* transient events for the CPU buffer -> event buffer */
251 -#define CPU_IS_KERNEL 1
252 -#define CPU_TRACE_BEGIN 2
253 +#define CPU_MODE_USER 0
254 +#define CPU_MODE_KERNEL 1
255 +#define CPU_MODE_XEN 2
256 +#define CPU_TRACE_BEGIN 3
257 +#define CPU_DOMAIN_SWITCH 4
259 #endif /* OPROFILE_CPU_BUFFER_H */
260 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/event_buffer.h ./drivers/oprofile/event_buffer.h
261 --- ../orig-linux-2.6.16.29/drivers/oprofile/event_buffer.h 2006-09-12 19:02:10.000000000 +0100
262 +++ ./drivers/oprofile/event_buffer.h 2006-09-19 14:06:05.000000000 +0100
263 @@ -29,15 +29,20 @@ void wake_up_buffer_waiter(void);
264 #define CPU_SWITCH_CODE 2
265 #define COOKIE_SWITCH_CODE 3
266 #define KERNEL_ENTER_SWITCH_CODE 4
267 -#define KERNEL_EXIT_SWITCH_CODE 5
268 +#define USER_ENTER_SWITCH_CODE 5
269 #define MODULE_LOADED_CODE 6
270 #define CTX_TGID_CODE 7
271 #define TRACE_BEGIN_CODE 8
272 #define TRACE_END_CODE 9
273 +#define XEN_ENTER_SWITCH_CODE 10
274 +#define DOMAIN_SWITCH_CODE 11
276 #define INVALID_COOKIE ~0UL
277 #define NO_COOKIE 0UL
279 +/* Constant used to refer to coordinator domain (Xen) */
280 +#define COORDINATOR_DOMAIN -1
281 +
282 /* add data to the event buffer */
283 void add_event_entry(unsigned long data);
285 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprof.c ./drivers/oprofile/oprof.c
286 --- ../orig-linux-2.6.16.29/drivers/oprofile/oprof.c 2006-09-12 19:02:10.000000000 +0100
287 +++ ./drivers/oprofile/oprof.c 2006-09-19 14:06:05.000000000 +0100
288 @@ -5,6 +5,10 @@
289 * @remark Read the file COPYING
290 *
291 * @author John Levon <levon@movementarian.org>
292 + *
293 + * Modified by Aravind Menon for Xen
294 + * These modifications are:
295 + * Copyright (C) 2005 Hewlett-Packard Co.
296 */
298 #include <linux/kernel.h>
299 @@ -19,7 +23,7 @@
300 #include "cpu_buffer.h"
301 #include "buffer_sync.h"
302 #include "oprofile_stats.h"
303 -
304 +
305 struct oprofile_operations oprofile_ops;
307 unsigned long oprofile_started;
308 @@ -33,6 +37,32 @@ static DECLARE_MUTEX(start_sem);
309 */
310 static int timer = 0;
312 +int oprofile_set_active(int active_domains[], unsigned int adomains)
313 +{
314 + int err;
315 +
316 + if (!oprofile_ops.set_active)
317 + return -EINVAL;
318 +
319 + down(&start_sem);
320 + err = oprofile_ops.set_active(active_domains, adomains);
321 + up(&start_sem);
322 + return err;
323 +}
324 +
325 +int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
326 +{
327 + int err;
328 +
329 + if (!oprofile_ops.set_passive)
330 + return -EINVAL;
331 +
332 + down(&start_sem);
333 + err = oprofile_ops.set_passive(passive_domains, pdomains);
334 + up(&start_sem);
335 + return err;
336 +}
337 +
338 int oprofile_setup(void)
339 {
340 int err;
341 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprof.h ./drivers/oprofile/oprof.h
342 --- ../orig-linux-2.6.16.29/drivers/oprofile/oprof.h 2006-09-12 19:02:10.000000000 +0100
343 +++ ./drivers/oprofile/oprof.h 2006-09-19 14:06:05.000000000 +0100
344 @@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
345 void oprofile_timer_init(struct oprofile_operations * ops);
347 int oprofile_set_backtrace(unsigned long depth);
348 +
349 +int oprofile_set_active(int active_domains[], unsigned int adomains);
350 +int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
352 #endif /* OPROF_H */
353 diff -pruN ../orig-linux-2.6.16.29/drivers/oprofile/oprofile_files.c ./drivers/oprofile/oprofile_files.c
354 --- ../orig-linux-2.6.16.29/drivers/oprofile/oprofile_files.c 2006-09-12 19:02:10.000000000 +0100
355 +++ ./drivers/oprofile/oprofile_files.c 2006-09-19 14:06:05.000000000 +0100
356 @@ -5,15 +5,21 @@
357 * @remark Read the file COPYING
358 *
359 * @author John Levon <levon@movementarian.org>
360 + *
361 + * Modified by Aravind Menon for Xen
362 + * These modifications are:
363 + * Copyright (C) 2005 Hewlett-Packard Co.
364 */
366 #include <linux/fs.h>
367 #include <linux/oprofile.h>
368 +#include <asm/uaccess.h>
369 +#include <linux/ctype.h>
371 #include "event_buffer.h"
372 #include "oprofile_stats.h"
373 #include "oprof.h"
374 -
375 +
376 unsigned long fs_buffer_size = 131072;
377 unsigned long fs_cpu_buffer_size = 8192;
378 unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
379 @@ -117,11 +123,202 @@ static ssize_t dump_write(struct file *
380 static struct file_operations dump_fops = {
381 .write = dump_write,
382 };
383 -
384 +
385 +#define TMPBUFSIZE 512
386 +
387 +static unsigned int adomains = 0;
388 +static int active_domains[MAX_OPROF_DOMAINS + 1];
389 +static DEFINE_MUTEX(adom_mutex);
390 +
391 +static ssize_t adomain_write(struct file * file, char const __user * buf,
392 + size_t count, loff_t * offset)
393 +{
394 + char *tmpbuf;
395 + char *startp, *endp;
396 + int i;
397 + unsigned long val;
398 + ssize_t retval = count;
399 +
400 + if (*offset)
401 + return -EINVAL;
402 + if (count > TMPBUFSIZE - 1)
403 + return -EINVAL;
404 +
405 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
406 + return -ENOMEM;
407 +
408 + if (copy_from_user(tmpbuf, buf, count)) {
409 + kfree(tmpbuf);
410 + return -EFAULT;
411 + }
412 + tmpbuf[count] = 0;
413 +
414 + mutex_lock(&adom_mutex);
415 +
416 + startp = tmpbuf;
417 + /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
418 + for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
419 + val = simple_strtoul(startp, &endp, 0);
420 + if (endp == startp)
421 + break;
422 + while (ispunct(*endp) || isspace(*endp))
423 + endp++;
424 + active_domains[i] = val;
425 + if (active_domains[i] != val)
426 + /* Overflow, force error below */
427 + i = MAX_OPROF_DOMAINS + 1;
428 + startp = endp;
429 + }
430 + /* Force error on trailing junk */
431 + adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
432 +
433 + kfree(tmpbuf);
434 +
435 + if (adomains > MAX_OPROF_DOMAINS
436 + || oprofile_set_active(active_domains, adomains)) {
437 + adomains = 0;
438 + retval = -EINVAL;
439 + }
440 +
441 + mutex_unlock(&adom_mutex);
442 + return retval;
443 +}
444 +
445 +static ssize_t adomain_read(struct file * file, char __user * buf,
446 + size_t count, loff_t * offset)
447 +{
448 + char * tmpbuf;
449 + size_t len;
450 + int i;
451 + ssize_t retval;
452 +
453 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
454 + return -ENOMEM;
455 +
456 + mutex_lock(&adom_mutex);
457 +
458 + len = 0;
459 + for (i = 0; i < adomains; i++)
460 + len += snprintf(tmpbuf + len,
461 + len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
462 + "%u ", active_domains[i]);
463 + WARN_ON(len > TMPBUFSIZE);
464 + if (len != 0 && len <= TMPBUFSIZE)
465 + tmpbuf[len-1] = '\n';
466 +
467 + mutex_unlock(&adom_mutex);
468 +
469 + retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
470 +
471 + kfree(tmpbuf);
472 + return retval;
473 +}
474 +
475 +
476 +static struct file_operations active_domain_ops = {
477 + .read = adomain_read,
478 + .write = adomain_write,
479 +};
480 +
481 +static unsigned int pdomains = 0;
482 +static int passive_domains[MAX_OPROF_DOMAINS];
483 +static DEFINE_MUTEX(pdom_mutex);
484 +
485 +static ssize_t pdomain_write(struct file * file, char const __user * buf,
486 + size_t count, loff_t * offset)
487 +{
488 + char *tmpbuf;
489 + char *startp, *endp;
490 + int i;
491 + unsigned long val;
492 + ssize_t retval = count;
493 +
494 + if (*offset)
495 + return -EINVAL;
496 + if (count > TMPBUFSIZE - 1)
497 + return -EINVAL;
498 +
499 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
500 + return -ENOMEM;
501 +
502 + if (copy_from_user(tmpbuf, buf, count)) {
503 + kfree(tmpbuf);
504 + return -EFAULT;
505 + }
506 + tmpbuf[count] = 0;
507 +
508 + mutex_lock(&pdom_mutex);
509 +
510 + startp = tmpbuf;
511 + /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
512 + for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
513 + val = simple_strtoul(startp, &endp, 0);
514 + if (endp == startp)
515 + break;
516 + while (ispunct(*endp) || isspace(*endp))
517 + endp++;
518 + passive_domains[i] = val;
519 + if (passive_domains[i] != val)
520 + /* Overflow, force error below */
521 + i = MAX_OPROF_DOMAINS + 1;
522 + startp = endp;
523 + }
524 + /* Force error on trailing junk */
525 + pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
526 +
527 + kfree(tmpbuf);
528 +
529 + if (pdomains > MAX_OPROF_DOMAINS
530 + || oprofile_set_passive(passive_domains, pdomains)) {
531 + pdomains = 0;
532 + retval = -EINVAL;
533 + }
534 +
535 + mutex_unlock(&pdom_mutex);
536 + return retval;
537 +}
538 +
539 +static ssize_t pdomain_read(struct file * file, char __user * buf,
540 + size_t count, loff_t * offset)
541 +{
542 + char * tmpbuf;
543 + size_t len;
544 + int i;
545 + ssize_t retval;
546 +
547 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
548 + return -ENOMEM;
549 +
550 + mutex_lock(&pdom_mutex);
551 +
552 + len = 0;
553 + for (i = 0; i < pdomains; i++)
554 + len += snprintf(tmpbuf + len,
555 + len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
556 + "%u ", passive_domains[i]);
557 + WARN_ON(len > TMPBUFSIZE);
558 + if (len != 0 && len <= TMPBUFSIZE)
559 + tmpbuf[len-1] = '\n';
560 +
561 + mutex_unlock(&pdom_mutex);
562 +
563 + retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
564 +
565 + kfree(tmpbuf);
566 + return retval;
567 +}
568 +
569 +static struct file_operations passive_domain_ops = {
570 + .read = pdomain_read,
571 + .write = pdomain_write,
572 +};
573 +
574 void oprofile_create_files(struct super_block * sb, struct dentry * root)
575 {
576 oprofilefs_create_file(sb, root, "enable", &enable_fops);
577 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
578 + oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
579 + oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
580 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
581 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
582 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
583 diff -pruN ../orig-linux-2.6.16.29/include/linux/oprofile.h ./include/linux/oprofile.h
584 --- ../orig-linux-2.6.16.29/include/linux/oprofile.h 2006-09-12 19:02:10.000000000 +0100
585 +++ ./include/linux/oprofile.h 2006-09-19 14:06:05.000000000 +0100
586 @@ -16,6 +16,8 @@
587 #include <linux/types.h>
588 #include <linux/spinlock.h>
589 #include <asm/atomic.h>
590 +
591 +#include <xen/interface/xenoprof.h>
593 struct super_block;
594 struct dentry;
595 @@ -27,6 +29,11 @@ struct oprofile_operations {
596 /* create any necessary configuration files in the oprofile fs.
597 * Optional. */
598 int (*create_files)(struct super_block * sb, struct dentry * root);
599 + /* setup active domains with Xen */
600 + int (*set_active)(int *active_domains, unsigned int adomains);
601 + /* setup passive domains with Xen */
602 + int (*set_passive)(int *passive_domains, unsigned int pdomains);
603 +
604 /* Do any necessary interrupt setup. Optional. */
605 int (*setup)(void);
606 /* Do any necessary interrupt shutdown. Optional. */
607 @@ -68,6 +75,8 @@ void oprofile_add_pc(unsigned long pc, i
608 /* add a backtrace entry, to be called from the ->backtrace callback */
609 void oprofile_add_trace(unsigned long eip);
611 +/* add a domain switch entry */
612 +int oprofile_add_domain_switch(int32_t domain_id);
614 /**
615 * Create a file of the given name as a child of the given root, with