ia64/linux-2.6.18-xen.hg

changeset 15:132f24200f4c

Imported patch xenoprof-generic.patch from xen-unstable.hg 15200:bd3d6b4c52ec
author Ian Campbell <ian.campbell@xensource.com>
date Mon Jun 04 10:05:24 2007 +0100 (2007-06-04)
parents f58a34be59d6
children b450c13522c8
files drivers/oprofile/buffer_sync.c drivers/oprofile/cpu_buffer.c drivers/oprofile/cpu_buffer.h drivers/oprofile/event_buffer.h drivers/oprofile/oprof.c drivers/oprofile/oprof.h drivers/oprofile/oprofile_files.c include/linux/oprofile.h
line diff
     1.1 --- a/drivers/oprofile/buffer_sync.c	Mon Jun 04 10:05:24 2007 +0100
     1.2 +++ b/drivers/oprofile/buffer_sync.c	Mon Jun 04 10:05:24 2007 +0100
     1.3 @@ -6,6 +6,10 @@
     1.4   *
     1.5   * @author John Levon <levon@movementarian.org>
     1.6   *
     1.7 + * Modified by Aravind Menon for Xen
     1.8 + * These modifications are:
     1.9 + * Copyright (C) 2005 Hewlett-Packard Co.
    1.10 + *
    1.11   * This is the core of the buffer management. Each
    1.12   * CPU buffer is processed and entered into the
    1.13   * global event buffer. Such processing is necessary
    1.14 @@ -38,6 +42,7 @@ static cpumask_t marked_cpus = CPU_MASK_
    1.15  static DEFINE_SPINLOCK(task_mortuary);
    1.16  static void process_task_mortuary(void);
    1.17  
    1.18 +static int cpu_current_domain[NR_CPUS];
    1.19  
    1.20  /* Take ownership of the task struct and place it on the
    1.21   * list for processing. Only after two full buffer syncs
    1.22 @@ -146,6 +151,11 @@ static void end_sync(void)
    1.23  int sync_start(void)
    1.24  {
    1.25  	int err;
    1.26 +	int i;
    1.27 +
    1.28 +	for (i = 0; i < NR_CPUS; i++) {
    1.29 +		cpu_current_domain[i] = COORDINATOR_DOMAIN;
    1.30 +	}
    1.31  
    1.32  	start_cpu_work();
    1.33  
    1.34 @@ -275,15 +285,31 @@ static void add_cpu_switch(int i)
    1.35  	last_cookie = INVALID_COOKIE;
    1.36  }
    1.37  
    1.38 -static void add_kernel_ctx_switch(unsigned int in_kernel)
    1.39 +static void add_cpu_mode_switch(unsigned int cpu_mode)
    1.40  {
    1.41  	add_event_entry(ESCAPE_CODE);
    1.42 -	if (in_kernel)
    1.43 -		add_event_entry(KERNEL_ENTER_SWITCH_CODE); 
    1.44 -	else
    1.45 -		add_event_entry(KERNEL_EXIT_SWITCH_CODE); 
    1.46 +	switch (cpu_mode) {
    1.47 +	case CPU_MODE_USER:
    1.48 +		add_event_entry(USER_ENTER_SWITCH_CODE);
    1.49 +		break;
    1.50 +	case CPU_MODE_KERNEL:
    1.51 +		add_event_entry(KERNEL_ENTER_SWITCH_CODE);
    1.52 +		break;
    1.53 +	case CPU_MODE_XEN:
    1.54 +		add_event_entry(XEN_ENTER_SWITCH_CODE);
    1.55 +	  	break;
    1.56 +	default:
    1.57 +		break;
    1.58 +	}
    1.59  }
    1.60 - 
    1.61 +
    1.62 +static void add_domain_switch(unsigned long domain_id)
    1.63 +{
    1.64 +	add_event_entry(ESCAPE_CODE);
    1.65 +	add_event_entry(DOMAIN_SWITCH_CODE);
    1.66 +	add_event_entry(domain_id);
    1.67 +}
    1.68 +
    1.69  static void
    1.70  add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
    1.71  {
    1.72 @@ -348,9 +374,9 @@ static int add_us_sample(struct mm_struc
    1.73   * for later lookup from userspace.
    1.74   */
    1.75  static int
    1.76 -add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
    1.77 +add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
    1.78  {
    1.79 -	if (in_kernel) {
    1.80 +	if (cpu_mode >= CPU_MODE_KERNEL) {
    1.81  		add_sample_entry(s->eip, s->event);
    1.82  		return 1;
    1.83  	} else if (mm) {
    1.84 @@ -496,15 +522,21 @@ void sync_buffer(int cpu)
    1.85  	struct mm_struct *mm = NULL;
    1.86  	struct task_struct * new;
    1.87  	unsigned long cookie = 0;
    1.88 -	int in_kernel = 1;
    1.89 +	int cpu_mode = 1;
    1.90  	unsigned int i;
    1.91  	sync_buffer_state state = sb_buffer_start;
    1.92  	unsigned long available;
    1.93 +	int domain_switch = 0;
    1.94  
    1.95  	mutex_lock(&buffer_mutex);
    1.96   
    1.97  	add_cpu_switch(cpu);
    1.98  
    1.99 +	/* We need to assign the first samples in this CPU buffer to the
   1.100 +	   same domain that we were processing at the last sync_buffer */
   1.101 +	if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
   1.102 +		add_domain_switch(cpu_current_domain[cpu]);
   1.103 +	}
   1.104  	/* Remember, only we can modify tail_pos */
   1.105  
   1.106  	available = get_slots(cpu_buf);
   1.107 @@ -512,16 +544,18 @@ void sync_buffer(int cpu)
   1.108  	for (i = 0; i < available; ++i) {
   1.109  		struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
   1.110   
   1.111 -		if (is_code(s->eip)) {
   1.112 -			if (s->event <= CPU_IS_KERNEL) {
   1.113 -				/* kernel/userspace switch */
   1.114 -				in_kernel = s->event;
   1.115 +		if (is_code(s->eip) && !domain_switch) {
   1.116 +			if (s->event <= CPU_MODE_XEN) {
   1.117 +				/* xen/kernel/userspace switch */
   1.118 +				cpu_mode = s->event;
   1.119  				if (state == sb_buffer_start)
   1.120  					state = sb_sample_start;
   1.121 -				add_kernel_ctx_switch(s->event);
   1.122 +				add_cpu_mode_switch(s->event);
   1.123  			} else if (s->event == CPU_TRACE_BEGIN) {
   1.124  				state = sb_bt_start;
   1.125  				add_trace_begin();
   1.126 +			} else if (s->event == CPU_DOMAIN_SWITCH) {
   1.127 +					domain_switch = 1;				
   1.128  			} else {
   1.129  				struct mm_struct * oldmm = mm;
   1.130  
   1.131 @@ -535,11 +569,21 @@ void sync_buffer(int cpu)
   1.132  				add_user_ctx_switch(new, cookie);
   1.133  			}
   1.134  		} else {
   1.135 -			if (state >= sb_bt_start &&
   1.136 -			    !add_sample(mm, s, in_kernel)) {
   1.137 -				if (state == sb_bt_start) {
   1.138 -					state = sb_bt_ignore;
   1.139 -					atomic_inc(&oprofile_stats.bt_lost_no_mapping);
   1.140 +			if (domain_switch) {
   1.141 +				cpu_current_domain[cpu] = s->eip;
   1.142 +				add_domain_switch(s->eip);
   1.143 +				domain_switch = 0;
   1.144 +			} else {
   1.145 +				if (cpu_current_domain[cpu] !=
   1.146 +				    COORDINATOR_DOMAIN) {
   1.147 +					add_sample_entry(s->eip, s->event);
   1.148 +				}
   1.149 +				else  if (state >= sb_bt_start &&
   1.150 +				    !add_sample(mm, s, cpu_mode)) {
   1.151 +					if (state == sb_bt_start) {
   1.152 +						state = sb_bt_ignore;
   1.153 +						atomic_inc(&oprofile_stats.bt_lost_no_mapping);
   1.154 +					}
   1.155  				}
   1.156  			}
   1.157  		}
   1.158 @@ -548,6 +592,11 @@ void sync_buffer(int cpu)
   1.159  	}
   1.160  	release_mm(mm);
   1.161  
   1.162 +	/* We reset domain to COORDINATOR at each CPU switch */
   1.163 +	if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
   1.164 +		add_domain_switch(COORDINATOR_DOMAIN);
   1.165 +	}
   1.166 +
   1.167  	mark_done(cpu);
   1.168  
   1.169  	mutex_unlock(&buffer_mutex);
     2.1 --- a/drivers/oprofile/cpu_buffer.c	Mon Jun 04 10:05:24 2007 +0100
     2.2 +++ b/drivers/oprofile/cpu_buffer.c	Mon Jun 04 10:05:24 2007 +0100
     2.3 @@ -6,6 +6,10 @@
     2.4   *
     2.5   * @author John Levon <levon@movementarian.org>
     2.6   *
     2.7 + * Modified by Aravind Menon for Xen
     2.8 + * These modifications are:
     2.9 + * Copyright (C) 2005 Hewlett-Packard Co.
    2.10 + *
    2.11   * Each CPU has a local buffer that stores PC value/event
    2.12   * pairs. We also log context switches when we notice them.
    2.13   * Eventually each CPU's buffer is processed into the global
    2.14 @@ -34,6 +38,8 @@ static void wq_sync_buffer(void *);
    2.15  #define DEFAULT_TIMER_EXPIRE (HZ / 10)
    2.16  static int work_enabled;
    2.17  
    2.18 +static int32_t current_domain = COORDINATOR_DOMAIN;
    2.19 +
    2.20  void free_cpu_buffers(void)
    2.21  {
    2.22  	int i;
    2.23 @@ -57,7 +63,7 @@ int alloc_cpu_buffers(void)
    2.24  			goto fail;
    2.25   
    2.26  		b->last_task = NULL;
    2.27 -		b->last_is_kernel = -1;
    2.28 +		b->last_cpu_mode = -1;
    2.29  		b->tracing = 0;
    2.30  		b->buffer_size = buffer_size;
    2.31  		b->tail_pos = 0;
    2.32 @@ -113,7 +119,7 @@ void cpu_buffer_reset(struct oprofile_cp
    2.33  	 * collected will populate the buffer with proper
    2.34  	 * values to initialize the buffer
    2.35  	 */
    2.36 -	cpu_buf->last_is_kernel = -1;
    2.37 +	cpu_buf->last_cpu_mode = -1;
    2.38  	cpu_buf->last_task = NULL;
    2.39  }
    2.40  
    2.41 @@ -163,13 +169,13 @@ add_code(struct oprofile_cpu_buffer * bu
    2.42   * because of the head/tail separation of the writer and reader
    2.43   * of the CPU buffer.
    2.44   *
    2.45 - * is_kernel is needed because on some architectures you cannot
    2.46 + * cpu_mode is needed because on some architectures you cannot
    2.47   * tell if you are in kernel or user space simply by looking at
    2.48 - * pc. We tag this in the buffer by generating kernel enter/exit
    2.49 - * events whenever is_kernel changes
    2.50 + * pc. We tag this in the buffer by generating kernel/user (and xen)
    2.51 + *  enter events whenever cpu_mode changes
    2.52   */
    2.53  static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
    2.54 -		      int is_kernel, unsigned long event)
    2.55 +		      int cpu_mode, unsigned long event)
    2.56  {
    2.57  	struct task_struct * task;
    2.58  
    2.59 @@ -180,18 +186,18 @@ static int log_sample(struct oprofile_cp
    2.60  		return 0;
    2.61  	}
    2.62  
    2.63 -	is_kernel = !!is_kernel;
    2.64 -
    2.65  	task = current;
    2.66  
    2.67  	/* notice a switch from user->kernel or vice versa */
    2.68 -	if (cpu_buf->last_is_kernel != is_kernel) {
    2.69 -		cpu_buf->last_is_kernel = is_kernel;
    2.70 -		add_code(cpu_buf, is_kernel);
    2.71 +	if (cpu_buf->last_cpu_mode != cpu_mode) {
    2.72 +		cpu_buf->last_cpu_mode = cpu_mode;
    2.73 +		add_code(cpu_buf, cpu_mode);
    2.74  	}
    2.75 -
    2.76 +	
    2.77  	/* notice a task switch */
    2.78 -	if (cpu_buf->last_task != task) {
    2.79 +	/* if not processing other domain samples */
    2.80 +	if ((cpu_buf->last_task != task) &&
    2.81 +	    (current_domain == COORDINATOR_DOMAIN)) {
    2.82  		cpu_buf->last_task = task;
    2.83  		add_code(cpu_buf, (unsigned long)task);
    2.84  	}
    2.85 @@ -275,6 +281,25 @@ void oprofile_add_trace(unsigned long pc
    2.86  	add_sample(cpu_buf, pc, 0);
    2.87  }
    2.88  
    2.89 +int oprofile_add_domain_switch(int32_t domain_id)
    2.90 +{
    2.91 +	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
    2.92 +
    2.93 +	/* should have space for switching into and out of domain 
    2.94 +	   (2 slots each) plus one sample and one cpu mode switch */
    2.95 +	if (((nr_available_slots(cpu_buf) < 6) && 
    2.96 +	     (domain_id != COORDINATOR_DOMAIN)) ||
    2.97 +	    (nr_available_slots(cpu_buf) < 2))
    2.98 +		return 0;
    2.99 +
   2.100 +	add_code(cpu_buf, CPU_DOMAIN_SWITCH);
   2.101 +	add_sample(cpu_buf, domain_id, 0);
   2.102 +
   2.103 +	current_domain = domain_id;
   2.104 +
   2.105 +	return 1;
   2.106 +}
   2.107 +
   2.108  /*
   2.109   * This serves to avoid cpu buffer overflow, and makes sure
   2.110   * the task mortuary progresses
     3.1 --- a/drivers/oprofile/cpu_buffer.h	Mon Jun 04 10:05:24 2007 +0100
     3.2 +++ b/drivers/oprofile/cpu_buffer.h	Mon Jun 04 10:05:24 2007 +0100
     3.3 @@ -36,7 +36,7 @@ struct oprofile_cpu_buffer {
     3.4  	volatile unsigned long tail_pos;
     3.5  	unsigned long buffer_size;
     3.6  	struct task_struct * last_task;
     3.7 -	int last_is_kernel;
     3.8 +	int last_cpu_mode;
     3.9  	int tracing;
    3.10  	struct op_sample * buffer;
    3.11  	unsigned long sample_received;
    3.12 @@ -51,7 +51,10 @@ extern struct oprofile_cpu_buffer cpu_bu
    3.13  void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
    3.14  
    3.15  /* transient events for the CPU buffer -> event buffer */
    3.16 -#define CPU_IS_KERNEL 1
    3.17 -#define CPU_TRACE_BEGIN 2
    3.18 +#define CPU_MODE_USER           0
    3.19 +#define CPU_MODE_KERNEL         1
    3.20 +#define CPU_MODE_XEN            2
    3.21 +#define CPU_TRACE_BEGIN         3
    3.22 +#define CPU_DOMAIN_SWITCH       4
    3.23  
    3.24  #endif /* OPROFILE_CPU_BUFFER_H */
     4.1 --- a/drivers/oprofile/event_buffer.h	Mon Jun 04 10:05:24 2007 +0100
     4.2 +++ b/drivers/oprofile/event_buffer.h	Mon Jun 04 10:05:24 2007 +0100
     4.3 @@ -29,15 +29,20 @@ void wake_up_buffer_waiter(void);
     4.4  #define CPU_SWITCH_CODE 		2
     4.5  #define COOKIE_SWITCH_CODE 		3
     4.6  #define KERNEL_ENTER_SWITCH_CODE	4
     4.7 -#define KERNEL_EXIT_SWITCH_CODE		5
     4.8 +#define USER_ENTER_SWITCH_CODE		5
     4.9  #define MODULE_LOADED_CODE		6
    4.10  #define CTX_TGID_CODE			7
    4.11  #define TRACE_BEGIN_CODE		8
    4.12  #define TRACE_END_CODE			9
    4.13 +#define XEN_ENTER_SWITCH_CODE		10
    4.14 +#define DOMAIN_SWITCH_CODE		11
    4.15   
    4.16  #define INVALID_COOKIE ~0UL
    4.17  #define NO_COOKIE 0UL
    4.18  
    4.19 +/* Constant used to refer to coordinator domain (Xen) */
    4.20 +#define COORDINATOR_DOMAIN -1
    4.21 +
    4.22  /* add data to the event buffer */
    4.23  void add_event_entry(unsigned long data);
    4.24   
     5.1 --- a/drivers/oprofile/oprof.c	Mon Jun 04 10:05:24 2007 +0100
     5.2 +++ b/drivers/oprofile/oprof.c	Mon Jun 04 10:05:24 2007 +0100
     5.3 @@ -5,6 +5,10 @@
     5.4   * @remark Read the file COPYING
     5.5   *
     5.6   * @author John Levon <levon@movementarian.org>
     5.7 + *
     5.8 + * Modified by Aravind Menon for Xen
     5.9 + * These modifications are:
    5.10 + * Copyright (C) 2005 Hewlett-Packard Co.
    5.11   */
    5.12  
    5.13  #include <linux/kernel.h>
    5.14 @@ -19,7 +23,7 @@
    5.15  #include "cpu_buffer.h"
    5.16  #include "buffer_sync.h"
    5.17  #include "oprofile_stats.h"
    5.18 - 
    5.19 +
    5.20  struct oprofile_operations oprofile_ops;
    5.21  
    5.22  unsigned long oprofile_started;
    5.23 @@ -33,6 +37,32 @@ static DEFINE_MUTEX(start_mutex);
    5.24   */
    5.25  static int timer = 0;
    5.26  
    5.27 +int oprofile_set_active(int active_domains[], unsigned int adomains)
    5.28 +{
    5.29 +	int err;
    5.30 +
    5.31 +	if (!oprofile_ops.set_active)
    5.32 +		return -EINVAL;
    5.33 +
    5.34 +	mutex_lock(&start_mutex);
    5.35 +	err = oprofile_ops.set_active(active_domains, adomains);
    5.36 +	mutex_unlock(&start_mutex);
    5.37 +	return err;
    5.38 +}
    5.39 +
    5.40 +int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
    5.41 +{
    5.42 +	int err;
    5.43 +
    5.44 +	if (!oprofile_ops.set_passive)
    5.45 +		return -EINVAL;
    5.46 +
    5.47 +	mutex_lock(&start_mutex);
    5.48 +	err = oprofile_ops.set_passive(passive_domains, pdomains);
    5.49 +	mutex_unlock(&start_mutex);
    5.50 +	return err;
    5.51 +}
    5.52 +
    5.53  int oprofile_setup(void)
    5.54  {
    5.55  	int err;
     6.1 --- a/drivers/oprofile/oprof.h	Mon Jun 04 10:05:24 2007 +0100
     6.2 +++ b/drivers/oprofile/oprof.h	Mon Jun 04 10:05:24 2007 +0100
     6.3 @@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
     6.4  void oprofile_timer_init(struct oprofile_operations * ops);
     6.5  
     6.6  int oprofile_set_backtrace(unsigned long depth);
     6.7 +
     6.8 +int oprofile_set_active(int active_domains[], unsigned int adomains);
     6.9 +int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
    6.10   
    6.11  #endif /* OPROF_H */
     7.1 --- a/drivers/oprofile/oprofile_files.c	Mon Jun 04 10:05:24 2007 +0100
     7.2 +++ b/drivers/oprofile/oprofile_files.c	Mon Jun 04 10:05:24 2007 +0100
     7.3 @@ -5,15 +5,21 @@
     7.4   * @remark Read the file COPYING
     7.5   *
     7.6   * @author John Levon <levon@movementarian.org>
     7.7 + *
     7.8 + * Modified by Aravind Menon for Xen
     7.9 + * These modifications are:
    7.10 + * Copyright (C) 2005 Hewlett-Packard Co.	
    7.11   */
    7.12  
    7.13  #include <linux/fs.h>
    7.14  #include <linux/oprofile.h>
    7.15 +#include <asm/uaccess.h>
    7.16 +#include <linux/ctype.h>
    7.17  
    7.18  #include "event_buffer.h"
    7.19  #include "oprofile_stats.h"
    7.20  #include "oprof.h"
    7.21 - 
    7.22 +
    7.23  unsigned long fs_buffer_size = 131072;
    7.24  unsigned long fs_cpu_buffer_size = 8192;
    7.25  unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
    7.26 @@ -117,11 +123,202 @@ static ssize_t dump_write(struct file * 
    7.27  static struct file_operations dump_fops = {
    7.28  	.write		= dump_write,
    7.29  };
    7.30 - 
    7.31 +
    7.32 +#define TMPBUFSIZE 512
    7.33 +
    7.34 +static unsigned int adomains = 0;
    7.35 +static int active_domains[MAX_OPROF_DOMAINS + 1];
    7.36 +static DEFINE_MUTEX(adom_mutex);
    7.37 +
    7.38 +static ssize_t adomain_write(struct file * file, char const __user * buf, 
    7.39 +			     size_t count, loff_t * offset)
    7.40 +{
    7.41 +	char *tmpbuf;
    7.42 +	char *startp, *endp;
    7.43 +	int i;
    7.44 +	unsigned long val;
    7.45 +	ssize_t retval = count;
    7.46 +	
    7.47 +	if (*offset)
    7.48 +		return -EINVAL;	
    7.49 +	if (count > TMPBUFSIZE - 1)
    7.50 +		return -EINVAL;
    7.51 +
    7.52 +	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
    7.53 +		return -ENOMEM;
    7.54 +
    7.55 +	if (copy_from_user(tmpbuf, buf, count)) {
    7.56 +		kfree(tmpbuf);
    7.57 +		return -EFAULT;
    7.58 +	}
    7.59 +	tmpbuf[count] = 0;
    7.60 +
    7.61 +	mutex_lock(&adom_mutex);
    7.62 +
    7.63 +	startp = tmpbuf;
    7.64 +	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
    7.65 +	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
    7.66 +		val = simple_strtoul(startp, &endp, 0);
    7.67 +		if (endp == startp)
    7.68 +			break;
    7.69 +		while (ispunct(*endp) || isspace(*endp))
    7.70 +			endp++;
    7.71 +		active_domains[i] = val;
    7.72 +		if (active_domains[i] != val)
    7.73 +			/* Overflow, force error below */
    7.74 +			i = MAX_OPROF_DOMAINS + 1;
    7.75 +		startp = endp;
    7.76 +	}
    7.77 +	/* Force error on trailing junk */
    7.78 +	adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
    7.79 +
    7.80 +	kfree(tmpbuf);
    7.81 +
    7.82 +	if (adomains > MAX_OPROF_DOMAINS
    7.83 +	    || oprofile_set_active(active_domains, adomains)) {
    7.84 +		adomains = 0;
    7.85 +		retval = -EINVAL;
    7.86 +	}
    7.87 +
    7.88 +	mutex_unlock(&adom_mutex);
    7.89 +	return retval;
    7.90 +}
    7.91 +
    7.92 +static ssize_t adomain_read(struct file * file, char __user * buf, 
    7.93 +			    size_t count, loff_t * offset)
    7.94 +{
    7.95 +	char * tmpbuf;
    7.96 +	size_t len;
    7.97 +	int i;
    7.98 +	ssize_t retval;
    7.99 +
   7.100 +	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
   7.101 +		return -ENOMEM;
   7.102 +
   7.103 +	mutex_lock(&adom_mutex);
   7.104 +
   7.105 +	len = 0;
   7.106 +	for (i = 0; i < adomains; i++)
   7.107 +		len += snprintf(tmpbuf + len,
   7.108 +				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
   7.109 +				"%u ", active_domains[i]);
   7.110 +	WARN_ON(len > TMPBUFSIZE);
   7.111 +	if (len != 0 && len <= TMPBUFSIZE)
   7.112 +		tmpbuf[len-1] = '\n';
   7.113 +
   7.114 +	mutex_unlock(&adom_mutex);
   7.115 +
   7.116 +	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
   7.117 +
   7.118 +	kfree(tmpbuf);
   7.119 +	return retval;
   7.120 +}
   7.121 +
   7.122 +
   7.123 +static struct file_operations active_domain_ops = {
   7.124 +	.read		= adomain_read,
   7.125 +	.write		= adomain_write,
   7.126 +};
   7.127 +
   7.128 +static unsigned int pdomains = 0;
   7.129 +static int passive_domains[MAX_OPROF_DOMAINS];
   7.130 +static DEFINE_MUTEX(pdom_mutex);
   7.131 +
   7.132 +static ssize_t pdomain_write(struct file * file, char const __user * buf, 
   7.133 +			     size_t count, loff_t * offset)
   7.134 +{
   7.135 +	char *tmpbuf;
   7.136 +	char *startp, *endp;
   7.137 +	int i;
   7.138 +	unsigned long val;
   7.139 +	ssize_t retval = count;
   7.140 +	
   7.141 +	if (*offset)
   7.142 +		return -EINVAL;	
   7.143 +	if (count > TMPBUFSIZE - 1)
   7.144 +		return -EINVAL;
   7.145 +
   7.146 +	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
   7.147 +		return -ENOMEM;
   7.148 +
   7.149 +	if (copy_from_user(tmpbuf, buf, count)) {
   7.150 +		kfree(tmpbuf);
   7.151 +		return -EFAULT;
   7.152 +	}
   7.153 +	tmpbuf[count] = 0;
   7.154 +
   7.155 +	mutex_lock(&pdom_mutex);
   7.156 +
   7.157 +	startp = tmpbuf;
   7.158 +	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
   7.159 +	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
   7.160 +		val = simple_strtoul(startp, &endp, 0);
   7.161 +		if (endp == startp)
   7.162 +			break;
   7.163 +		while (ispunct(*endp) || isspace(*endp))
   7.164 +			endp++;
   7.165 +		passive_domains[i] = val;
   7.166 +		if (passive_domains[i] != val)
   7.167 +			/* Overflow, force error below */
   7.168 +			i = MAX_OPROF_DOMAINS + 1;
   7.169 +		startp = endp;
   7.170 +	}
   7.171 +	/* Force error on trailing junk */
   7.172 +	pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
   7.173 +
   7.174 +	kfree(tmpbuf);
   7.175 +
   7.176 +	if (pdomains > MAX_OPROF_DOMAINS
   7.177 +	    || oprofile_set_passive(passive_domains, pdomains)) {
   7.178 +		pdomains = 0;
   7.179 +		retval = -EINVAL;
   7.180 +	}
   7.181 +
   7.182 +	mutex_unlock(&pdom_mutex);
   7.183 +	return retval;
   7.184 +}
   7.185 +
   7.186 +static ssize_t pdomain_read(struct file * file, char __user * buf, 
   7.187 +			    size_t count, loff_t * offset)
   7.188 +{
   7.189 +	char * tmpbuf;
   7.190 +	size_t len;
   7.191 +	int i;
   7.192 +	ssize_t retval;
   7.193 +
   7.194 +	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
   7.195 +		return -ENOMEM;
   7.196 +
   7.197 +	mutex_lock(&pdom_mutex);
   7.198 +
   7.199 +	len = 0;
   7.200 +	for (i = 0; i < pdomains; i++)
   7.201 +		len += snprintf(tmpbuf + len,
   7.202 +				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
   7.203 +				"%u ", passive_domains[i]);
   7.204 +	WARN_ON(len > TMPBUFSIZE);
   7.205 +	if (len != 0 && len <= TMPBUFSIZE)
   7.206 +		tmpbuf[len-1] = '\n';
   7.207 +
   7.208 +	mutex_unlock(&pdom_mutex);
   7.209 +
   7.210 +	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
   7.211 +
   7.212 +	kfree(tmpbuf);
   7.213 +	return retval;
   7.214 +}
   7.215 +
   7.216 +static struct file_operations passive_domain_ops = {
   7.217 +	.read		= pdomain_read,
   7.218 +	.write		= pdomain_write,
   7.219 +};
   7.220 +
   7.221  void oprofile_create_files(struct super_block * sb, struct dentry * root)
   7.222  {
   7.223  	oprofilefs_create_file(sb, root, "enable", &enable_fops);
   7.224  	oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
   7.225 +	oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
   7.226 +	oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
   7.227  	oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
   7.228  	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
   7.229  	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
     8.1 --- a/include/linux/oprofile.h	Mon Jun 04 10:05:24 2007 +0100
     8.2 +++ b/include/linux/oprofile.h	Mon Jun 04 10:05:24 2007 +0100
     8.3 @@ -16,6 +16,8 @@
     8.4  #include <linux/types.h>
     8.5  #include <linux/spinlock.h>
     8.6  #include <asm/atomic.h>
     8.7 +
     8.8 +#include <xen/interface/xenoprof.h>
     8.9   
    8.10  struct super_block;
    8.11  struct dentry;
    8.12 @@ -27,6 +29,11 @@ struct oprofile_operations {
    8.13  	/* create any necessary configuration files in the oprofile fs.
    8.14  	 * Optional. */
    8.15  	int (*create_files)(struct super_block * sb, struct dentry * root);
    8.16 +	/* setup active domains with Xen */
    8.17 +	int (*set_active)(int *active_domains, unsigned int adomains);
    8.18 +        /* setup passive domains with Xen */
    8.19 +        int (*set_passive)(int *passive_domains, unsigned int pdomains);
    8.20 +	
    8.21  	/* Do any necessary interrupt setup. Optional. */
    8.22  	int (*setup)(void);
    8.23  	/* Do any necessary interrupt shutdown. Optional. */
    8.24 @@ -78,6 +85,8 @@ void oprofile_add_pc(unsigned long pc, i
    8.25  /* add a backtrace entry, to be called from the ->backtrace callback */
    8.26  void oprofile_add_trace(unsigned long eip);
    8.27  
    8.28 +/* add a domain switch entry */
    8.29 +int oprofile_add_domain_switch(int32_t domain_id);
    8.30  
    8.31  /**
    8.32   * Create a file of the given name as a child of the given root, with