ia64/xen-unstable

changeset 10535:23591d2c46aa

Add Xenoprof passive domain support
Signed-off-by: Yang Xiaowei <xiaowei.yang@intel.com>
Signed-off-by: Jose Renato Santos <jsantos@hpl.hp.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Jun 27 11:23:06 2006 +0100 (2006-06-27)
parents b12cd185d579
children 60d7d64eaff2
files linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c patches/linux-2.6.16.13/xenoprof-generic.patch xen/arch/x86/oprofile/nmi_int.c xen/arch/x86/oprofile/xenoprof.c xen/include/public/xenoprof.h xen/include/xen/xenoprof.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c	Tue Jun 27 11:17:14 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c	Tue Jun 27 11:23:06 2006 +0100
     1.3 @@ -28,6 +28,7 @@
     1.4  
     1.5  #include <xen/interface/xen.h>
     1.6  #include <xen/interface/xenoprof.h>
     1.7 +#include <../../../drivers/oprofile/cpu_buffer.h>
     1.8  
     1.9  static int xenoprof_start(void);
    1.10  static void xenoprof_stop(void);
    1.11 @@ -50,6 +51,11 @@ int ovf_irq[NR_CPUS];
    1.12  /* cpu model type string - copied from Xen memory space on XENOPROF_init command */
    1.13  char cpu_type[XENOPROF_CPU_TYPE_SIZE];
    1.14  
    1.15 +/* Passive sample buffers shared with Xen */
    1.16 +xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
    1.17 +/* Passive shared buffer area */
    1.18 +char *p_shared_buffer[MAX_OPROF_DOMAINS];
    1.19 +
    1.20  #ifdef CONFIG_PM
    1.21  
    1.22  static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
    1.23 @@ -102,16 +108,14 @@ static void __exit exit_driverfs(void)
    1.24  #endif /* CONFIG_PM */
    1.25  
    1.26  unsigned long long oprofile_samples = 0;
    1.27 +unsigned long long p_oprofile_samples = 0;
    1.28  
    1.29 -static irqreturn_t 
    1.30 -xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
    1.31 +unsigned int pdomains;
    1.32 +struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
    1.33 +
    1.34 +static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
    1.35  {
    1.36  	int head, tail, size;
    1.37 -	struct xenoprof_buf * buf;
    1.38 -	int cpu;
    1.39 -
    1.40 -	cpu = smp_processor_id();
    1.41 -	buf = xenoprof_buf[cpu];
    1.42  
    1.43  	head = buf->event_head;
    1.44  	tail = buf->event_tail;
    1.45 @@ -122,7 +126,10 @@ xenoprof_ovf_interrupt(int irq, void * d
    1.46  			oprofile_add_pc(buf->event_log[tail].eip,
    1.47  					buf->event_log[tail].mode,
    1.48  					buf->event_log[tail].event);
    1.49 -			oprofile_samples++;
    1.50 +			if (!is_passive)
    1.51 +				oprofile_samples++;
    1.52 +			else
    1.53 +				p_oprofile_samples++;
    1.54  			tail++;
    1.55  		}
    1.56  		tail = 0;
    1.57 @@ -131,11 +138,47 @@ xenoprof_ovf_interrupt(int irq, void * d
    1.58  		oprofile_add_pc(buf->event_log[tail].eip,
    1.59  				buf->event_log[tail].mode,
    1.60  				buf->event_log[tail].event);
    1.61 -		oprofile_samples++;
    1.62 +		if (!is_passive)
    1.63 +			oprofile_samples++;
    1.64 +		else
    1.65 +			p_oprofile_samples++;
    1.66  		tail++;
    1.67  	}
    1.68  
    1.69  	buf->event_tail = tail;
    1.70 +}
    1.71 +
    1.72 +static void xenoprof_handle_passive(void)
    1.73 +{
    1.74 +	int i, j;
    1.75 +
    1.76 +	for (i = 0; i < pdomains; i++)
    1.77 +		for (j = 0; j < passive_domains[i].nbuf; j++) {
    1.78 +			xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
    1.79 +			if (buf->event_head == buf->event_tail)
    1.80 +				continue;
    1.81 +                        oprofile_add_pc(IGNORED_PC, CPU_MODE_PASSIVE_START, passive_domains[i].domain_id);
    1.82 +			xenoprof_add_pc(buf, 1);
    1.83 +                        oprofile_add_pc(IGNORED_PC, CPU_MODE_PASSIVE_STOP, passive_domains[i].domain_id);
    1.84 +		}			
    1.85 +}
    1.86 +
    1.87 +static irqreturn_t 
    1.88 +xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
    1.89 +{
    1.90 +	struct xenoprof_buf * buf;
    1.91 +	int cpu;
    1.92 +	static unsigned long flag;
    1.93 +
    1.94 +	cpu = smp_processor_id();
    1.95 +	buf = xenoprof_buf[cpu];
    1.96 +
    1.97 +	xenoprof_add_pc(buf, 0);
    1.98 +
    1.99 +	if (is_primary && !test_and_set_bit(0, &flag)) {
   1.100 +		xenoprof_handle_passive();
   1.101 +		clear_bit(0, &flag);
   1.102 +	}
   1.103  
   1.104  	return IRQ_HANDLED;
   1.105  }
   1.106 @@ -312,6 +355,63 @@ out:
   1.107  	return ret;
   1.108  }
   1.109  
   1.110 +static int xenoprof_set_passive(int * p_domains,
   1.111 +                                unsigned int pdoms)
   1.112 +{
   1.113 +	int ret;
   1.114 +	int i, j;
   1.115 +	int vm_size;
   1.116 +	int npages;
   1.117 +	struct xenoprof_buf *buf;
   1.118 +	pgprot_t prot = __pgprot(_KERNPG_TABLE);
   1.119 +
   1.120 +	if (!is_primary)
   1.121 +        	return 0;
   1.122 +
   1.123 +	if (pdoms > MAX_OPROF_DOMAINS)
   1.124 +		return -E2BIG;
   1.125 +
   1.126 +	ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
   1.127 +	if (ret)
   1.128 +		return ret;
   1.129 +
   1.130 +	for (i = 0; i < pdoms; i++) {
   1.131 +		passive_domains[i].domain_id = p_domains[i];
   1.132 +		passive_domains[i].max_samples = 2048;
   1.133 +		ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, &passive_domains[i]);
   1.134 +		if (ret)
   1.135 +			return ret;
   1.136 +
   1.137 +		npages = (passive_domains[i].bufsize * passive_domains[i].nbuf - 1) / PAGE_SIZE + 1;
   1.138 +		vm_size = npages * PAGE_SIZE;
   1.139 +
   1.140 +		p_shared_buffer[i] = (char *)vm_map_xen_pages(passive_domains[i].buf_maddr,
   1.141 +							      vm_size, prot);
   1.142 +		if (!p_shared_buffer[i]) {
   1.143 +			ret = -ENOMEM;
   1.144 +			goto out;
   1.145 +		}
   1.146 +
   1.147 +		for (j = 0; j < passive_domains[i].nbuf; j++) {
   1.148 +			buf = (struct xenoprof_buf *)
   1.149 +				&p_shared_buffer[i][j * passive_domains[i].bufsize];
   1.150 +			BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
   1.151 +			p_xenoprof_buf[i][buf->vcpu_id] = buf;
   1.152 +		}
   1.153 +
   1.154 +	}
   1.155 +
   1.156 +	pdomains = pdoms;
   1.157 +	return 0;
   1.158 +
   1.159 +out:
   1.160 +	for (j = 0; j < i; j++) {
   1.161 +		vunmap(p_shared_buffer[j]);
   1.162 +		p_shared_buffer[j] = NULL;
   1.163 +	}
   1.164 +
   1.165 + 	return ret;
   1.166 +}
   1.167  
   1.168  struct op_counter_config counter_config[OP_MAX_COUNTER];
   1.169  
   1.170 @@ -346,6 +446,7 @@ static int xenoprof_create_files(struct 
   1.171  struct oprofile_operations xenoprof_ops = {
   1.172  	.create_files 	= xenoprof_create_files,
   1.173  	.set_active	= xenoprof_set_active,
   1.174 +	.set_passive    = xenoprof_set_passive,
   1.175  	.setup 		= xenoprof_setup,
   1.176  	.shutdown	= xenoprof_shutdown,
   1.177  	.start		= xenoprof_start,
   1.178 @@ -420,6 +521,8 @@ int __init oprofile_arch_init(struct opr
   1.179  
   1.180  void __exit oprofile_arch_exit(void)
   1.181  {
   1.182 +	int i;
   1.183 +
   1.184  	if (using_xenoprof)
   1.185  		exit_driverfs();
   1.186  
   1.187 @@ -427,6 +530,13 @@ void __exit oprofile_arch_exit(void)
   1.188  		vunmap(shared_buffer);
   1.189  		shared_buffer = NULL;
   1.190  	}
   1.191 -	if (is_primary)
   1.192 +	if (is_primary) {
   1.193 +		for (i = 0; i < pdomains; i++)
   1.194 +			if (p_shared_buffer[i]) {
   1.195 +		                vunmap(p_shared_buffer[i]);
   1.196 +                		p_shared_buffer[i] = NULL;
   1.197 +			}
   1.198  		HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL);
   1.199 +        }
   1.200 +
   1.201  }
     2.1 --- a/patches/linux-2.6.16.13/xenoprof-generic.patch	Tue Jun 27 11:17:14 2006 +0100
     2.2 +++ b/patches/linux-2.6.16.13/xenoprof-generic.patch	Tue Jun 27 11:23:06 2006 +0100
     2.3 @@ -1,6 +1,6 @@
     2.4 -diff -pruN ../pristine-linux-2.6.16.13/drivers/oprofile/buffer_sync.c ./drivers/oprofile/buffer_sync.c
     2.5 ---- ../pristine-linux-2.6.16.13/drivers/oprofile/buffer_sync.c	2006-05-02 22:38:44.000000000 +0100
     2.6 -+++ ./drivers/oprofile/buffer_sync.c	2006-05-04 17:41:51.000000000 +0100
     2.7 +diff -pru ../pristine-linux-2.6.16.13/drivers/oprofile/buffer_sync.c ./drivers/oprofile/buffer_sync.c
     2.8 +--- ../pristine-linux-2.6.16.13/drivers/oprofile/buffer_sync.c	2006-05-03 05:38:44.000000000 +0800
     2.9 ++++ ./drivers/oprofile/buffer_sync.c	2006-06-27 12:14:53.000000000 +0800
    2.10  @@ -6,6 +6,10 @@
    2.11    *
    2.12    * @author John Levon <levon@movementarian.org>
    2.13 @@ -12,7 +12,7 @@ diff -pruN ../pristine-linux-2.6.16.13/d
    2.14    * This is the core of the buffer management. Each
    2.15    * CPU buffer is processed and entered into the
    2.16    * global event buffer. Such processing is necessary
    2.17 -@@ -275,15 +279,24 @@ static void add_cpu_switch(int i)
    2.18 +@@ -275,15 +279,30 @@ static void add_cpu_switch(int i)
    2.19   	last_cookie = INVALID_COOKIE;
    2.20   }
    2.21   
    2.22 @@ -33,7 +33,13 @@ diff -pruN ../pristine-linux-2.6.16.13/d
    2.23  +		break;
    2.24  +	case CPU_MODE_XEN:
    2.25  +		add_event_entry(XEN_ENTER_SWITCH_CODE);
    2.26 -+		break;
    2.27 ++	  	break;
    2.28 ++        case CPU_MODE_PASSIVE_START:
    2.29 ++                add_event_entry(PASSIVE_START_CODE);
    2.30 ++                break;
    2.31 ++        case CPU_MODE_PASSIVE_STOP:
    2.32 ++                add_event_entry(PASSIVE_STOP_CODE);
    2.33 ++                break;
    2.34  +	default:
    2.35  +		break;
    2.36  +	}
    2.37 @@ -43,7 +49,7 @@ diff -pruN ../pristine-linux-2.6.16.13/d
    2.38   static void
    2.39   add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
    2.40   {
    2.41 -@@ -348,9 +361,9 @@ static int add_us_sample(struct mm_struc
    2.42 +@@ -348,9 +367,9 @@ static int add_us_sample(struct mm_struc
    2.43    * for later lookup from userspace.
    2.44    */
    2.45   static int
    2.46 @@ -55,7 +61,7 @@ diff -pruN ../pristine-linux-2.6.16.13/d
    2.47   		add_sample_entry(s->eip, s->event);
    2.48   		return 1;
    2.49   	} else if (mm) {
    2.50 -@@ -496,7 +509,7 @@ void sync_buffer(int cpu)
    2.51 +@@ -496,10 +515,11 @@ void sync_buffer(int cpu)
    2.52   	struct mm_struct *mm = NULL;
    2.53   	struct task_struct * new;
    2.54   	unsigned long cookie = 0;
    2.55 @@ -64,34 +70,62 @@ diff -pruN ../pristine-linux-2.6.16.13/d
    2.56   	unsigned int i;
    2.57   	sync_buffer_state state = sb_buffer_start;
    2.58   	unsigned long available;
    2.59 -@@ -513,12 +526,12 @@ void sync_buffer(int cpu)
    2.60 ++	int domain_switch = NO_DOMAIN_SWITCH;
    2.61 + 
    2.62 + 	down(&buffer_sem);
    2.63 +  
    2.64 +@@ -513,12 +533,19 @@ void sync_buffer(int cpu)
    2.65   		struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
    2.66    
    2.67   		if (is_code(s->eip)) {
    2.68  -			if (s->event <= CPU_IS_KERNEL) {
    2.69 -+			if (s->event <= CPU_MODE_XEN) {
    2.70 ++			if (s->event < CPU_TRACE_BEGIN) {
    2.71   				/* kernel/userspace switch */
    2.72  -				in_kernel = s->event;
    2.73  +				cpu_mode = s->event;
    2.74   				if (state == sb_buffer_start)
    2.75   					state = sb_sample_start;
    2.76  -				add_kernel_ctx_switch(s->event);
    2.77 -+				add_cpu_mode_switch(s->event);
    2.78 ++
    2.79 ++				if (s->event == CPU_MODE_PASSIVE_START)
    2.80 ++					domain_switch = DOMAIN_SWITCH_START_EVENT1;
    2.81 ++				else if (s->event == CPU_MODE_PASSIVE_STOP)
    2.82 ++					domain_switch = DOMAIN_SWITCH_STOP_EVENT1;
    2.83 ++
    2.84 ++				if (domain_switch != DOMAIN_SWITCH_START_EVENT2)
    2.85 ++					add_cpu_mode_switch(s->event);
    2.86   			} else if (s->event == CPU_TRACE_BEGIN) {
    2.87   				state = sb_bt_start;
    2.88   				add_trace_begin();
    2.89 -@@ -536,7 +549,7 @@ void sync_buffer(int cpu)
    2.90 +@@ -535,11 +562,20 @@ void sync_buffer(int cpu)
    2.91 + 				add_user_ctx_switch(new, cookie);
    2.92   			}
    2.93   		} else {
    2.94 - 			if (state >= sb_bt_start &&
    2.95 +-			if (state >= sb_bt_start &&
    2.96  -			    !add_sample(mm, s, in_kernel)) {
    2.97 -+			    !add_sample(mm, s, cpu_mode)) {
    2.98 - 				if (state == sb_bt_start) {
    2.99 - 					state = sb_bt_ignore;
   2.100 - 					atomic_inc(&oprofile_stats.bt_lost_no_mapping);
   2.101 -diff -pruN ../pristine-linux-2.6.16.13/drivers/oprofile/cpu_buffer.c ./drivers/oprofile/cpu_buffer.c
   2.102 ---- ../pristine-linux-2.6.16.13/drivers/oprofile/cpu_buffer.c	2006-05-02 22:38:44.000000000 +0100
   2.103 -+++ ./drivers/oprofile/cpu_buffer.c	2006-05-04 17:41:51.000000000 +0100
   2.104 +-				if (state == sb_bt_start) {
   2.105 +-					state = sb_bt_ignore;
   2.106 +-					atomic_inc(&oprofile_stats.bt_lost_no_mapping);
   2.107 ++			if (domain_switch == DOMAIN_SWITCH_START_EVENT1) {
   2.108 ++			        add_event_entry(s->event);
   2.109 ++				domain_switch = DOMAIN_SWITCH_START_EVENT2;
   2.110 ++			} else if (domain_switch == DOMAIN_SWITCH_START_EVENT1) {
   2.111 ++				add_sample_entry(s->eip, s->event);
   2.112 ++			} else if (domain_switch == DOMAIN_SWITCH_STOP_EVENT1) {
   2.113 ++				domain_switch = NO_DOMAIN_SWITCH;
   2.114 ++			} else {
   2.115 ++				if (state >= sb_bt_start &&
   2.116 ++				    !add_sample(mm, s, cpu_mode)) {
   2.117 ++					if (state == sb_bt_start) {
   2.118 ++						state = sb_bt_ignore;
   2.119 ++						atomic_inc(&oprofile_stats.bt_lost_no_mapping);
   2.120 ++					}
   2.121 + 				}
   2.122 + 			}
   2.123 + 		}
   2.124 +diff -pru ../pristine-linux-2.6.16.13/drivers/oprofile/cpu_buffer.c ./drivers/oprofile/cpu_buffer.c
   2.125 +--- ../pristine-linux-2.6.16.13/drivers/oprofile/cpu_buffer.c	2006-05-03 05:38:44.000000000 +0800
   2.126 ++++ ./drivers/oprofile/cpu_buffer.c	2006-06-19 22:43:53.000000000 +0800
   2.127  @@ -6,6 +6,10 @@
   2.128    *
   2.129    * @author John Levon <levon@movementarian.org>
   2.130 @@ -139,13 +173,12 @@ diff -pruN ../pristine-linux-2.6.16.13/d
   2.131   {
   2.132   	struct task_struct * task;
   2.133   
   2.134 -@@ -181,16 +185,16 @@ static int log_sample(struct oprofile_cp
   2.135 +@@ -181,16 +185,14 @@ static int log_sample(struct oprofile_cp
   2.136   		return 0;
   2.137   	}
   2.138   
   2.139  -	is_kernel = !!is_kernel;
   2.140 -+	WARN_ON(cpu_mode > CPU_MODE_XEN);
   2.141 - 
   2.142 +-
   2.143   	task = current;
   2.144   
   2.145   	/* notice a switch from user->kernel or vice versa */
   2.146 @@ -161,9 +194,9 @@ diff -pruN ../pristine-linux-2.6.16.13/d
   2.147   	/* notice a task switch */
   2.148   	if (cpu_buf->last_task != task) {
   2.149   		cpu_buf->last_task = task;
   2.150 -diff -pruN ../pristine-linux-2.6.16.13/drivers/oprofile/cpu_buffer.h ./drivers/oprofile/cpu_buffer.h
   2.151 ---- ../pristine-linux-2.6.16.13/drivers/oprofile/cpu_buffer.h	2006-05-02 22:38:44.000000000 +0100
   2.152 -+++ ./drivers/oprofile/cpu_buffer.h	2006-05-04 17:41:51.000000000 +0100
   2.153 +diff -pru ../pristine-linux-2.6.16.13/drivers/oprofile/cpu_buffer.h ./drivers/oprofile/cpu_buffer.h
   2.154 +--- ../pristine-linux-2.6.16.13/drivers/oprofile/cpu_buffer.h	2006-05-03 05:38:44.000000000 +0800
   2.155 ++++ ./drivers/oprofile/cpu_buffer.h	2006-06-27 10:38:08.000000000 +0800
   2.156  @@ -36,7 +36,7 @@ struct oprofile_cpu_buffer {
   2.157   	volatile unsigned long tail_pos;
   2.158   	unsigned long buffer_size;
   2.159 @@ -173,22 +206,26 @@ diff -pruN ../pristine-linux-2.6.16.13/d
   2.160   	int tracing;
   2.161   	struct op_sample * buffer;
   2.162   	unsigned long sample_received;
   2.163 -@@ -51,7 +51,9 @@ extern struct oprofile_cpu_buffer cpu_bu
   2.164 +@@ -51,7 +51,13 @@ extern struct oprofile_cpu_buffer cpu_bu
   2.165   void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
   2.166   
   2.167   /* transient events for the CPU buffer -> event buffer */
   2.168  -#define CPU_IS_KERNEL 1
   2.169  -#define CPU_TRACE_BEGIN 2
   2.170 -+#define CPU_MODE_USER    0
   2.171 -+#define CPU_MODE_KERNEL  1
   2.172 -+#define CPU_MODE_XEN     2
   2.173 -+#define CPU_TRACE_BEGIN  3
   2.174 ++#define CPU_MODE_USER           0
   2.175 ++#define CPU_MODE_KERNEL         1
   2.176 ++#define CPU_MODE_XEN            2
   2.177 ++#define CPU_MODE_PASSIVE_START  3
   2.178 ++#define CPU_MODE_PASSIVE_STOP   4
   2.179 ++#define CPU_TRACE_BEGIN         5
   2.180 ++
   2.181 ++#define IGNORED_PC              0
   2.182   
   2.183   #endif /* OPROFILE_CPU_BUFFER_H */
   2.184 -diff -pruN ../pristine-linux-2.6.16.13/drivers/oprofile/event_buffer.h ./drivers/oprofile/event_buffer.h
   2.185 ---- ../pristine-linux-2.6.16.13/drivers/oprofile/event_buffer.h	2006-05-02 22:38:44.000000000 +0100
   2.186 -+++ ./drivers/oprofile/event_buffer.h	2006-05-04 17:41:51.000000000 +0100
   2.187 -@@ -29,11 +29,12 @@ void wake_up_buffer_waiter(void);
   2.188 +diff -pru ../pristine-linux-2.6.16.13/drivers/oprofile/event_buffer.h ./drivers/oprofile/event_buffer.h
   2.189 +--- ../pristine-linux-2.6.16.13/drivers/oprofile/event_buffer.h	2006-05-03 05:38:44.000000000 +0800
   2.190 ++++ ./drivers/oprofile/event_buffer.h	2006-06-19 22:43:53.000000000 +0800
   2.191 +@@ -29,11 +29,14 @@ void wake_up_buffer_waiter(void);
   2.192   #define CPU_SWITCH_CODE 		2
   2.193   #define COOKIE_SWITCH_CODE 		3
   2.194   #define KERNEL_ENTER_SWITCH_CODE	4
   2.195 @@ -199,12 +236,14 @@ diff -pruN ../pristine-linux-2.6.16.13/d
   2.196   #define TRACE_BEGIN_CODE		8
   2.197   #define TRACE_END_CODE			9
   2.198  +#define XEN_ENTER_SWITCH_CODE		10
   2.199 ++#define PASSIVE_START_CODE		11
   2.200 ++#define PASSIVE_STOP_CODE		12
   2.201    
   2.202   #define INVALID_COOKIE ~0UL
   2.203   #define NO_COOKIE 0UL
   2.204 -diff -pruN ../pristine-linux-2.6.16.13/drivers/oprofile/oprof.c ./drivers/oprofile/oprof.c
   2.205 ---- ../pristine-linux-2.6.16.13/drivers/oprofile/oprof.c	2006-05-02 22:38:44.000000000 +0100
   2.206 -+++ ./drivers/oprofile/oprof.c	2006-05-04 17:41:51.000000000 +0100
   2.207 +diff -pru ../pristine-linux-2.6.16.13/drivers/oprofile/oprof.c ./drivers/oprofile/oprof.c
   2.208 +--- ../pristine-linux-2.6.16.13/drivers/oprofile/oprof.c	2006-05-03 05:38:44.000000000 +0800
   2.209 ++++ ./drivers/oprofile/oprof.c	2006-06-19 23:45:17.000000000 +0800
   2.210  @@ -5,6 +5,10 @@
   2.211    * @remark Read the file COPYING
   2.212    *
   2.213 @@ -225,7 +264,7 @@ diff -pruN ../pristine-linux-2.6.16.13/d
   2.214   struct oprofile_operations oprofile_ops;
   2.215   
   2.216   unsigned long oprofile_started;
   2.217 -@@ -33,6 +37,19 @@ static DECLARE_MUTEX(start_sem);
   2.218 +@@ -33,6 +37,32 @@ static DECLARE_MUTEX(start_sem);
   2.219    */
   2.220   static int timer = 0;
   2.221   
   2.222 @@ -242,23 +281,37 @@ diff -pruN ../pristine-linux-2.6.16.13/d
   2.223  +	return err;
   2.224  +}
   2.225  +
   2.226 ++int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
   2.227 ++{
   2.228 ++	int err;
   2.229 ++
   2.230 ++	if (!oprofile_ops.set_passive)
   2.231 ++		return -EINVAL;
   2.232 ++
   2.233 ++	down(&start_sem);
   2.234 ++	err = oprofile_ops.set_passive(passive_domains, pdomains);
   2.235 ++	up(&start_sem);
   2.236 ++	return err;
   2.237 ++}
   2.238 ++
   2.239   int oprofile_setup(void)
   2.240   {
   2.241   	int err;
   2.242 -diff -pruN ../pristine-linux-2.6.16.13/drivers/oprofile/oprof.h ./drivers/oprofile/oprof.h
   2.243 ---- ../pristine-linux-2.6.16.13/drivers/oprofile/oprof.h	2006-05-02 22:38:44.000000000 +0100
   2.244 -+++ ./drivers/oprofile/oprof.h	2006-05-04 17:41:51.000000000 +0100
   2.245 -@@ -35,5 +35,7 @@ void oprofile_create_files(struct super_
   2.246 +diff -pru ../pristine-linux-2.6.16.13/drivers/oprofile/oprof.h ./drivers/oprofile/oprof.h
   2.247 +--- ../pristine-linux-2.6.16.13/drivers/oprofile/oprof.h	2006-05-03 05:38:44.000000000 +0800
   2.248 ++++ ./drivers/oprofile/oprof.h	2006-06-19 23:42:36.000000000 +0800
   2.249 +@@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
   2.250   void oprofile_timer_init(struct oprofile_operations * ops);
   2.251   
   2.252   int oprofile_set_backtrace(unsigned long depth);
   2.253  +
   2.254  +int oprofile_set_active(int active_domains[], unsigned int adomains);
   2.255 ++int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
   2.256    
   2.257   #endif /* OPROF_H */
   2.258 -diff -pruN ../pristine-linux-2.6.16.13/drivers/oprofile/oprofile_files.c ./drivers/oprofile/oprofile_files.c
   2.259 ---- ../pristine-linux-2.6.16.13/drivers/oprofile/oprofile_files.c	2006-05-02 22:38:44.000000000 +0100
   2.260 -+++ ./drivers/oprofile/oprofile_files.c	2006-05-04 17:41:51.000000000 +0100
   2.261 +diff -pru ../pristine-linux-2.6.16.13/drivers/oprofile/oprofile_files.c ./drivers/oprofile/oprofile_files.c
   2.262 +--- ../pristine-linux-2.6.16.13/drivers/oprofile/oprofile_files.c	2006-05-03 05:38:44.000000000 +0800
   2.263 ++++ ./drivers/oprofile/oprofile_files.c	2006-06-19 23:29:07.000000000 +0800
   2.264  @@ -5,15 +5,21 @@
   2.265    * @remark Read the file COPYING
   2.266    *
   2.267 @@ -282,7 +335,7 @@ diff -pruN ../pristine-linux-2.6.16.13/d
   2.268   unsigned long fs_buffer_size = 131072;
   2.269   unsigned long fs_cpu_buffer_size = 8192;
   2.270   unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
   2.271 -@@ -117,11 +123,108 @@ static ssize_t dump_write(struct file * 
   2.272 +@@ -117,11 +123,202 @@ static ssize_t dump_write(struct file * 
   2.273   static struct file_operations dump_fops = {
   2.274   	.write		= dump_write,
   2.275   };
   2.276 @@ -384,17 +437,110 @@ diff -pruN ../pristine-linux-2.6.16.13/d
   2.277  +	.write		= adomain_write,
   2.278  +};
   2.279  +
   2.280 ++static unsigned int pdomains = 0;
   2.281 ++static int passive_domains[MAX_OPROF_DOMAINS];
   2.282 ++static DEFINE_MUTEX(pdom_mutex);
   2.283 ++
   2.284 ++static ssize_t pdomain_write(struct file * file, char const __user * buf, 
   2.285 ++			     size_t count, loff_t * offset)
   2.286 ++{
   2.287 ++	char *tmpbuf;
   2.288 ++	char *startp, *endp;
   2.289 ++	int i;
   2.290 ++	unsigned long val;
   2.291 ++	ssize_t retval = count;
   2.292 ++	
   2.293 ++	if (*offset)
   2.294 ++		return -EINVAL;	
   2.295 ++	if (count > TMPBUFSIZE - 1)
   2.296 ++		return -EINVAL;
   2.297 ++
   2.298 ++	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
   2.299 ++		return -ENOMEM;
   2.300 ++
   2.301 ++	if (copy_from_user(tmpbuf, buf, count)) {
   2.302 ++		kfree(tmpbuf);
   2.303 ++		return -EFAULT;
   2.304 ++	}
   2.305 ++	tmpbuf[count] = 0;
   2.306 ++
   2.307 ++	mutex_lock(&pdom_mutex);
   2.308 ++
   2.309 ++	startp = tmpbuf;
   2.310 ++	/* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
   2.311 ++	for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
   2.312 ++		val = simple_strtoul(startp, &endp, 0);
   2.313 ++		if (endp == startp)
   2.314 ++			break;
   2.315 ++		while (ispunct(*endp) || isspace(*endp))
   2.316 ++			endp++;
   2.317 ++		passive_domains[i] = val;
   2.318 ++		if (passive_domains[i] != val)
   2.319 ++			/* Overflow, force error below */
   2.320 ++			i = MAX_OPROF_DOMAINS + 1;
   2.321 ++		startp = endp;
   2.322 ++	}
   2.323 ++	/* Force error on trailing junk */
   2.324 ++	pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
   2.325 ++
   2.326 ++	kfree(tmpbuf);
   2.327 ++
   2.328 ++	if (pdomains > MAX_OPROF_DOMAINS
   2.329 ++	    || oprofile_set_passive(passive_domains, pdomains)) {
   2.330 ++		pdomains = 0;
   2.331 ++		retval = -EINVAL;
   2.332 ++	}
   2.333 ++
   2.334 ++	mutex_unlock(&pdom_mutex);
   2.335 ++	return retval;
   2.336 ++}
   2.337 ++
   2.338 ++static ssize_t pdomain_read(struct file * file, char __user * buf, 
   2.339 ++			    size_t count, loff_t * offset)
   2.340 ++{
   2.341 ++	char * tmpbuf;
   2.342 ++	size_t len;
   2.343 ++	int i;
   2.344 ++	ssize_t retval;
   2.345 ++
   2.346 ++	if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
   2.347 ++		return -ENOMEM;
   2.348 ++
   2.349 ++	mutex_lock(&pdom_mutex);
   2.350 ++
   2.351 ++	len = 0;
   2.352 ++	for (i = 0; i < pdomains; i++)
   2.353 ++		len += snprintf(tmpbuf + len,
   2.354 ++				len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
   2.355 ++				"%u ", passive_domains[i]);
   2.356 ++	WARN_ON(len > TMPBUFSIZE);
   2.357 ++	if (len != 0 && len <= TMPBUFSIZE)
   2.358 ++		tmpbuf[len-1] = '\n';
   2.359 ++
   2.360 ++	mutex_unlock(&pdom_mutex);
   2.361 ++
   2.362 ++	retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
   2.363 ++
   2.364 ++	kfree(tmpbuf);
   2.365 ++	return retval;
   2.366 ++}
   2.367 ++
   2.368 ++static struct file_operations passive_domain_ops = {
   2.369 ++	.read		= pdomain_read,
   2.370 ++	.write		= pdomain_write,
   2.371 ++};
   2.372 ++
   2.373   void oprofile_create_files(struct super_block * sb, struct dentry * root)
   2.374   {
   2.375   	oprofilefs_create_file(sb, root, "enable", &enable_fops);
   2.376   	oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
   2.377  +	oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
   2.378 ++	oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
   2.379   	oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
   2.380   	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
   2.381   	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
   2.382 -diff -pruN ../pristine-linux-2.6.16.13/include/linux/oprofile.h ./include/linux/oprofile.h
   2.383 ---- ../pristine-linux-2.6.16.13/include/linux/oprofile.h	2006-05-02 22:38:44.000000000 +0100
   2.384 -+++ ./include/linux/oprofile.h	2006-05-04 17:41:51.000000000 +0100
   2.385 +--- ../pristine-linux-2.6.16.13/include/linux/oprofile.h	2006-05-03 05:38:44.000000000 +0800
   2.386 ++++ ./include/linux/oprofile.h	2006-06-19 23:52:00.000000000 +0800
   2.387  @@ -16,6 +16,8 @@
   2.388   #include <linux/types.h>
   2.389   #include <linux/spinlock.h>
   2.390 @@ -404,12 +550,15 @@ diff -pruN ../pristine-linux-2.6.16.13/i
   2.391    
   2.392   struct super_block;
   2.393   struct dentry;
   2.394 -@@ -27,6 +29,8 @@ struct oprofile_operations {
   2.395 +@@ -27,6 +29,11 @@ struct oprofile_operations {
   2.396   	/* create any necessary configuration files in the oprofile fs.
   2.397   	 * Optional. */
   2.398   	int (*create_files)(struct super_block * sb, struct dentry * root);
   2.399  +	/* setup active domains with Xen */
   2.400  +	int (*set_active)(int *active_domains, unsigned int adomains);
   2.401 ++        /* setup passive domains with Xen */
   2.402 ++        int (*set_passive)(int *passive_domains, unsigned int pdomains);
   2.403 ++	
   2.404   	/* Do any necessary interrupt setup. Optional. */
   2.405   	int (*setup)(void);
   2.406   	/* Do any necessary interrupt shutdown. Optional. */
     3.1 --- a/xen/arch/x86/oprofile/nmi_int.c	Tue Jun 27 11:17:14 2006 +0100
     3.2 +++ b/xen/arch/x86/oprofile/nmi_int.c	Tue Jun 27 11:23:06 2006 +0100
     3.3 @@ -269,7 +269,7 @@ static int __init p4_init(char * cpu_typ
     3.4  { 
     3.5  	__u8 cpu_model = current_cpu_data.x86_model;
     3.6  
     3.7 -	if (cpu_model > 4)
     3.8 +	if ((cpu_model > 6) || (cpu_model == 5))
     3.9  		return 0;
    3.10  
    3.11  #ifndef CONFIG_SMP
     4.1 --- a/xen/arch/x86/oprofile/xenoprof.c	Tue Jun 27 11:17:14 2006 +0100
     4.2 +++ b/xen/arch/x86/oprofile/xenoprof.c	Tue Jun 27 11:23:06 2006 +0100
     4.3 @@ -13,9 +13,13 @@
     4.4  /* Limit amount of pages used for shared buffer (per domain) */
     4.5  #define MAX_OPROF_SHARED_PAGES 32
     4.6  
     4.7 -domid_t active_domains[MAX_OPROF_DOMAINS];
     4.8 +struct domain *active_domains[MAX_OPROF_DOMAINS];
     4.9  int active_ready[MAX_OPROF_DOMAINS];
    4.10  unsigned int adomains;
    4.11 +
    4.12 +struct domain *passive_domains[MAX_OPROF_DOMAINS];
    4.13 +unsigned int pdomains;
    4.14 +
    4.15  unsigned int activated;
    4.16  struct domain *primary_profiler;
    4.17  int xenoprof_state = XENOPROF_IDLE;
    4.18 @@ -25,6 +29,7 @@ u64 invalid_buffer_samples;
    4.19  u64 corrupted_buffer_samples;
    4.20  u64 lost_samples;
    4.21  u64 active_samples;
    4.22 +u64 passive_samples;
    4.23  u64 idle_samples;
    4.24  u64 others_samples;
    4.25  
    4.26 @@ -44,9 +49,15 @@ int is_active(struct domain *d)
    4.27      return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
    4.28  }
    4.29  
    4.30 +int is_passive(struct domain *d)
    4.31 +{
    4.32 +    struct xenoprof *x = d->xenoprof;
    4.33 +    return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
    4.34 +}
    4.35 +
    4.36  int is_profiled(struct domain *d)
    4.37  {
    4.38 -    return is_active(d);
    4.39 +    return (is_active(d) || is_passive(d));
    4.40  }
    4.41  
    4.42  static void xenoprof_reset_stat(void)
    4.43 @@ -56,6 +67,7 @@ static void xenoprof_reset_stat(void)
    4.44      corrupted_buffer_samples = 0;
    4.45      lost_samples = 0;
    4.46      active_samples = 0;
    4.47 +    passive_samples = 0;
    4.48      idle_samples = 0;
    4.49      others_samples = 0;
    4.50  }
    4.51 @@ -83,13 +95,122 @@ static void xenoprof_reset_buf(struct do
    4.52      }
    4.53  }
    4.54  
    4.55 +char *alloc_xenoprof_buf(struct domain *d, int npages)
    4.56 +{
    4.57 +    char *rawbuf;
    4.58 +    int i, order;
    4.59 +
    4.60 +    /* allocate pages to store sample buffer shared with domain */
    4.61 +    order  = get_order_from_pages(npages);
    4.62 +    rawbuf = alloc_xenheap_pages(order);
    4.63 +    if ( rawbuf == NULL )
    4.64 +    {
    4.65 +        printk("alloc_xenoprof_buf(): memory allocation failed\n");
    4.66 +        return 0;
    4.67 +    }
    4.68 +
    4.69 +    /* Share pages so that kernel can map it */
    4.70 +    for ( i = 0; i < npages; i++ )
    4.71 +        share_xen_page_with_guest(
    4.72 +            virt_to_page(rawbuf + i * PAGE_SIZE), 
    4.73 +            d, XENSHARE_writable);
    4.74 +
    4.75 +    return rawbuf;
    4.76 +}
    4.77 +
    4.78 +int alloc_xenoprof_struct(struct domain *d, int max_samples, int is_passive)
    4.79 +{
    4.80 +    struct vcpu *v;
    4.81 +    int nvcpu, npages, bufsize, max_bufsize;
    4.82 +    int i;
    4.83 +
    4.84 +    d->xenoprof = xmalloc(struct xenoprof);
    4.85 +
    4.86 +    if ( d->xenoprof == NULL )
    4.87 +    {
    4.88 +        printk ("alloc_xenoprof_struct(): memory "
    4.89 +                "allocation (xmalloc) failed\n");
    4.90 +        return -ENOMEM;
    4.91 +    }
    4.92 +
    4.93 +    memset(d->xenoprof, 0, sizeof(*d->xenoprof));
    4.94 +
    4.95 +    nvcpu = 0;
    4.96 +    for_each_vcpu ( d, v )
    4.97 +        nvcpu++;
    4.98 +
    4.99 +    /* reduce buffer size if necessary to limit pages allocated */
   4.100 +    bufsize = sizeof(struct xenoprof_buf) +
   4.101 +        (max_samples - 1) * sizeof(struct event_log);
   4.102 +    max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
   4.103 +    if ( bufsize > max_bufsize )
   4.104 +    {
   4.105 +        bufsize = max_bufsize;
   4.106 +        max_samples = ( (max_bufsize - sizeof(struct xenoprof_buf)) /
   4.107 +                        sizeof(struct event_log) ) + 1;
   4.108 +    }
   4.109 +
   4.110 +    npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
   4.111 +    
   4.112 +    d->xenoprof->rawbuf = alloc_xenoprof_buf(is_passive ? dom0 : d, npages);
   4.113 +
   4.114 +    if ( d->xenoprof->rawbuf == NULL )
   4.115 +    {
   4.116 +        xfree(d->xenoprof);
   4.117 +        d->xenoprof = NULL;
   4.118 +        return -ENOMEM;
   4.119 +    }
   4.120 +
   4.121 +    d->xenoprof->npages = npages;
   4.122 +    d->xenoprof->nbuf = nvcpu;
   4.123 +    d->xenoprof->bufsize = bufsize;
   4.124 +    d->xenoprof->domain_ready = 0;
   4.125 +    d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
   4.126 +
   4.127 +    /* Update buffer pointers for active vcpus */
   4.128 +    i = 0;
   4.129 +    for_each_vcpu ( d, v )
   4.130 +    {
   4.131 +        d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
   4.132 +        d->xenoprof->vcpu[v->vcpu_id].buffer =
   4.133 +            (struct xenoprof_buf *)&d->xenoprof->rawbuf[i * bufsize];
   4.134 +        d->xenoprof->vcpu[v->vcpu_id].buffer->event_size = max_samples;
   4.135 +        d->xenoprof->vcpu[v->vcpu_id].buffer->vcpu_id = v->vcpu_id;
   4.136 +
   4.137 +        i++;
   4.138 +        /* in the unlikely case that the number of active vcpus changes */
   4.139 +        if ( i >= nvcpu )
   4.140 +            break;
   4.141 +    }
   4.142 +    
   4.143 +    return 0;
   4.144 +}
   4.145 +
   4.146 +void free_xenoprof_pages(struct domain *d)
   4.147 +{
   4.148 +    struct xenoprof *x;
   4.149 +    int order;
   4.150 +
   4.151 +    x = d->xenoprof;
   4.152 +    if ( x == NULL )
   4.153 +        return;
   4.154 +
   4.155 +    if ( x->rawbuf != NULL )
   4.156 +    {
   4.157 +        order = get_order_from_pages(x->npages);
   4.158 +        free_xenheap_pages(x->rawbuf, order);
   4.159 +    }
   4.160 +
   4.161 +    xfree(x);
   4.162 +    d->xenoprof = NULL;
   4.163 +}
   4.164 +
   4.165  int active_index(struct domain *d)
   4.166  {
   4.167      int i;
   4.168 -    domid_t id = d->domain_id;
   4.169  
   4.170      for ( i = 0; i < adomains; i++ )
   4.171 -        if ( active_domains[i] == id )
   4.172 +        if ( active_domains[i] == d )
   4.173              return i;
   4.174  
   4.175      return -1;
   4.176 @@ -132,49 +253,118 @@ int reset_active(struct domain *d)
   4.177      x->domain_ready = 0;
   4.178      x->domain_type = XENOPROF_DOMAIN_IGNORED;
   4.179      active_ready[ind] = 0;
   4.180 +    active_domains[ind] = NULL;
   4.181      activated--;
   4.182 +    put_domain(d); 
   4.183 +
   4.184      if ( activated <= 0 )
   4.185          adomains = 0;
   4.186  
   4.187      return 0;
   4.188  }
   4.189  
   4.190 -int reset_active_list(void)
   4.191 +void reset_passive(struct domain *d)
   4.192 +{
   4.193 +    struct xenoprof *x;
   4.194 +
   4.195 +    if (d==0)
   4.196 +        return;
   4.197 +
   4.198 +    x = d->xenoprof;
   4.199 +    if ( x == NULL )
   4.200 +        return;
   4.201 +
   4.202 +    x->domain_type = XENOPROF_DOMAIN_IGNORED;
   4.203 +
   4.204 +    return;
   4.205 +}
   4.206 +
   4.207 +void reset_active_list(void)
   4.208  {
   4.209      int i;
   4.210 -    struct domain *d;
   4.211  
   4.212      for ( i = 0; i < adomains; i++ )
   4.213      {
   4.214          if ( active_ready[i] )
   4.215          {
   4.216 -            d = find_domain_by_id(active_domains[i]);
   4.217 -            if ( d != NULL )
   4.218 -            {
   4.219 -                reset_active(d);
   4.220 -                put_domain(d);
   4.221 -            }
   4.222 +            reset_active(active_domains[i]);
   4.223          }
   4.224      }
   4.225  
   4.226      adomains = 0;
   4.227      activated = 0;
   4.228 +}
   4.229  
   4.230 -    return 0;
   4.231 +void reset_passive_list(void)
   4.232 +{
   4.233 +    int i;
   4.234 +
   4.235 +    for ( i = 0; i < pdomains; i++ )
   4.236 +    {
   4.237 +        reset_passive(passive_domains[i]);
   4.238 +        put_domain(passive_domains[i]);
   4.239 +        passive_domains[i] = NULL;
   4.240 +    }
   4.241 +
   4.242 +    pdomains = 0;
   4.243  }
   4.244  
   4.245  int add_active_list (domid_t domid)
   4.246  {
   4.247 +    struct domain *d;
   4.248 +
   4.249      if ( adomains >= MAX_OPROF_DOMAINS )
   4.250          return -E2BIG;
   4.251  
   4.252 -    active_domains[adomains] = domid;
   4.253 +    d = find_domain_by_id(domid); 
   4.254 +    if ( d == NULL )
   4.255 +        return -EINVAL;
   4.256 +
   4.257 +    active_domains[adomains] = d;
   4.258      active_ready[adomains] = 0;
   4.259      adomains++;
   4.260  
   4.261      return 0;
   4.262  }
   4.263  
   4.264 +int add_passive_list(XEN_GUEST_HANDLE(void) arg)
   4.265 +{
   4.266 +    struct xenoprof_passive passive;
   4.267 +    struct domain *d;
   4.268 +    int ret = 0;
   4.269 +
   4.270 +    if ( pdomains >= MAX_OPROF_DOMAINS )
   4.271 +        return -E2BIG;
   4.272 +
   4.273 +    if ( copy_from_guest(&passive, arg, 1) )
   4.274 +        return -EFAULT;
   4.275 +
   4.276 +    d = find_domain_by_id(passive.domain_id); 
   4.277 +    if ( d == NULL )
   4.278 +        return -EINVAL;
   4.279 +
   4.280 +    if ( (d->xenoprof == NULL) && 
   4.281 +         ((ret = alloc_xenoprof_struct(d, passive.max_samples, 1)) < 0) ) {
   4.282 +        put_domain(d);
   4.283 +        return -ENOMEM;
   4.284 +    }
   4.285 +
   4.286 +    d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
   4.287 +    passive.nbuf = d->xenoprof->nbuf;
   4.288 +    passive.bufsize = d->xenoprof->bufsize;
   4.289 +    passive.buf_maddr = __pa(d->xenoprof->rawbuf);
   4.290 +
   4.291 +    if ( copy_to_guest(arg, &passive, 1) ) {
   4.292 +        put_domain(d);
   4.293 +        return -EFAULT;
   4.294 +    }
   4.295 +    
   4.296 +    passive_domains[pdomains] = d;
   4.297 +    pdomains++;
   4.298 +
   4.299 +    return ret;
   4.300 +}
   4.301 +
   4.302  void xenoprof_log_event(
   4.303      struct vcpu *vcpu, unsigned long eip, int mode, int event)
   4.304  {
   4.305 @@ -231,7 +421,10 @@ void xenoprof_log_event(
   4.306          if ( head >= size )
   4.307              head = 0;
   4.308          buf->event_head = head;
   4.309 -        active_samples++;
   4.310 +        if ( is_active(vcpu->domain) )
   4.311 +            active_samples++;
   4.312 +        else
   4.313 +            passive_samples++;
   4.314          if ( mode == 0 )
   4.315              buf->user_samples++;
   4.316          else if ( mode == 1 )
   4.317 @@ -241,114 +434,6 @@ void xenoprof_log_event(
   4.318      }
   4.319  }
   4.320  
   4.321 -char *alloc_xenoprof_buf(struct domain *d, int npages)
   4.322 -{
   4.323 -    char *rawbuf;
   4.324 -    int i, order;
   4.325 -
   4.326 -    /* allocate pages to store sample buffer shared with domain */
   4.327 -    order  = get_order_from_pages(npages);
   4.328 -    rawbuf = alloc_xenheap_pages(order);
   4.329 -    if ( rawbuf == NULL )
   4.330 -    {
   4.331 -        printk("alloc_xenoprof_buf(): memory allocation failed\n");
   4.332 -        return 0;
   4.333 -    }
   4.334 -
   4.335 -    /* Share pages so that kernel can map it */
   4.336 -    for ( i = 0; i < npages; i++ )
   4.337 -        share_xen_page_with_guest(
   4.338 -            virt_to_page(rawbuf + i * PAGE_SIZE), 
   4.339 -            d, XENSHARE_writable);
   4.340 -
   4.341 -    return rawbuf;
   4.342 -}
   4.343 -
   4.344 -int alloc_xenoprof_struct(struct domain *d, int max_samples)
   4.345 -{
   4.346 -    struct vcpu *v;
   4.347 -    int nvcpu, npages, bufsize, max_bufsize;
   4.348 -    int i;
   4.349 -
   4.350 -    d->xenoprof = xmalloc(struct xenoprof);
   4.351 -
   4.352 -    if ( d->xenoprof == NULL )
   4.353 -    {
   4.354 -        printk ("alloc_xenoprof_struct(): memory "
   4.355 -                "allocation (xmalloc) failed\n");
   4.356 -        return -ENOMEM;
   4.357 -    }
   4.358 -
   4.359 -    memset(d->xenoprof, 0, sizeof(*d->xenoprof));
   4.360 -
   4.361 -    nvcpu = 0;
   4.362 -    for_each_vcpu ( d, v )
   4.363 -        nvcpu++;
   4.364 -
   4.365 -    /* reduce buffer size if necessary to limit pages allocated */
   4.366 -    bufsize = sizeof(struct xenoprof_buf) +
   4.367 -        (max_samples - 1) * sizeof(struct event_log);
   4.368 -    max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
   4.369 -    if ( bufsize > max_bufsize )
   4.370 -    {
   4.371 -        bufsize = max_bufsize;
   4.372 -        max_samples = ( (max_bufsize - sizeof(struct xenoprof_buf)) /
   4.373 -                        sizeof(struct event_log) ) + 1;
   4.374 -    }
   4.375 -
   4.376 -    npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
   4.377 -    d->xenoprof->rawbuf = alloc_xenoprof_buf(d, npages);
   4.378 -    if ( d->xenoprof->rawbuf == NULL )
   4.379 -    {
   4.380 -        xfree(d->xenoprof);
   4.381 -        d->xenoprof = NULL;
   4.382 -        return -ENOMEM;
   4.383 -    }
   4.384 -
   4.385 -    d->xenoprof->npages = npages;
   4.386 -    d->xenoprof->nbuf = nvcpu;
   4.387 -    d->xenoprof->bufsize = bufsize;
   4.388 -    d->xenoprof->domain_ready = 0;
   4.389 -    d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
   4.390 -
   4.391 -    /* Update buffer pointers for active vcpus */
   4.392 -    i = 0;
   4.393 -    for_each_vcpu ( d, v )
   4.394 -    {
   4.395 -        d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
   4.396 -        d->xenoprof->vcpu[v->vcpu_id].buffer =
   4.397 -            (struct xenoprof_buf *)&d->xenoprof->rawbuf[i * bufsize];
   4.398 -        d->xenoprof->vcpu[v->vcpu_id].buffer->event_size = max_samples;
   4.399 -        d->xenoprof->vcpu[v->vcpu_id].buffer->vcpu_id = v->vcpu_id;
   4.400 -
   4.401 -        i++;
   4.402 -        /* in the unlikely case that the number of active vcpus changes */
   4.403 -        if ( i >= nvcpu )
   4.404 -            break;
   4.405 -    }
   4.406 -
   4.407 -    return 0;
   4.408 -}
   4.409 -
   4.410 -void free_xenoprof_pages(struct domain *d)
   4.411 -{
   4.412 -    struct xenoprof *x;
   4.413 -    int order;
   4.414 -
   4.415 -    x = d->xenoprof;
   4.416 -    if ( x == NULL )
   4.417 -        return;
   4.418 -
   4.419 -    if ( x->rawbuf != NULL )
   4.420 -    {
   4.421 -        order = get_order_from_pages(x->npages);
   4.422 -        free_xenheap_pages(x->rawbuf, order);
   4.423 -    }
   4.424 -
   4.425 -    xfree(x);
   4.426 -    d->xenoprof = NULL;
   4.427 -}
   4.428 -
   4.429  int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
   4.430  {
   4.431      struct xenoprof_init xenoprof_init;
   4.432 @@ -373,7 +458,7 @@ int xenoprof_op_init(XEN_GUEST_HANDLE(vo
   4.433       * is called. Memory is then kept until domain is destroyed.
   4.434       */
   4.435      if ( (d->xenoprof == NULL) &&
   4.436 -         ((ret = alloc_xenoprof_struct(d, xenoprof_init.max_samples)) < 0) )
   4.437 +         ((ret = alloc_xenoprof_struct(d, xenoprof_init.max_samples, 0)) < 0) )
   4.438          goto err;
   4.439  
   4.440      xenoprof_reset_buf(d);
   4.441 @@ -429,7 +514,14 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
   4.442  
   4.443      case XENOPROF_reset_active_list:
   4.444      {
   4.445 -        ret = reset_active_list();
   4.446 +        reset_active_list();
   4.447 +        ret = 0;
   4.448 +        break;
   4.449 +    }
   4.450 +    case XENOPROF_reset_passive_list:
   4.451 +    {
   4.452 +        reset_passive_list();
   4.453 +        ret = 0;
   4.454          break;
   4.455      }
   4.456      case XENOPROF_set_active:
   4.457 @@ -442,6 +534,13 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
   4.458          ret = add_active_list(domid);
   4.459          break;
   4.460      }
   4.461 +    case XENOPROF_set_passive:
   4.462 +    {
   4.463 +        if ( xenoprof_state != XENOPROF_IDLE )
   4.464 +            return -EPERM;
   4.465 +        ret = add_passive_list(arg);
   4.466 +        break;
   4.467 +    }
   4.468      case XENOPROF_reserve_counters:
   4.469          if ( xenoprof_state != XENOPROF_IDLE )
   4.470              return -EPERM;
   4.471 @@ -484,14 +583,20 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
   4.472          break;
   4.473  
   4.474      case XENOPROF_enable_virq:
   4.475 +    {
   4.476 +        int i;
   4.477          if ( current->domain == primary_profiler )
   4.478          {
   4.479              nmi_enable_virq();
   4.480              xenoprof_reset_stat();
   4.481 +            for ( i = 0; i < pdomains; i++ ) {
   4.482 +                xenoprof_reset_buf(passive_domains[i]);
   4.483 +            }
   4.484          }
   4.485          xenoprof_reset_buf(current->domain);
   4.486          ret = set_active(current->domain);
   4.487          break;
   4.488 +    }
   4.489  
   4.490      case XENOPROF_start:
   4.491          ret = -EPERM;
   4.492 @@ -525,6 +630,7 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
   4.493              xenoprof_state = XENOPROF_IDLE;
   4.494              nmi_release_counters();
   4.495              nmi_disable_virq();
   4.496 +            reset_passive_list();
   4.497              ret = 0;
   4.498          }
   4.499          break;
     5.1 --- a/xen/include/public/xenoprof.h	Tue Jun 27 11:17:14 2006 +0100
     5.2 +++ b/xen/include/public/xenoprof.h	Tue Jun 27 11:23:06 2006 +0100
     5.3 @@ -80,6 +80,15 @@ struct xenoprof_counter {
     5.4  typedef struct xenoprof_counter xenoprof_counter_t;
     5.5  DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t);
     5.6  
     5.7 +typedef struct xenoprof_passive {
     5.8 +    uint16_t domain_id;
     5.9 +    int32_t  max_samples;
    5.10 +    int32_t  nbuf;
    5.11 +    int32_t  bufsize;
    5.12 +    uint64_t buf_maddr;
    5.13 +} xenoprof_passive_t;
    5.14 +DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t);
    5.15 +
    5.16  
    5.17  #endif /* __XEN_PUBLIC_XENOPROF_H__ */
    5.18  
     6.1 --- a/xen/include/xen/xenoprof.h	Tue Jun 27 11:17:14 2006 +0100
     6.2 +++ b/xen/include/xen/xenoprof.h	Tue Jun 27 11:23:06 2006 +0100
     6.3 @@ -14,6 +14,7 @@
     6.4  
     6.5  #define XENOPROF_DOMAIN_IGNORED    0
     6.6  #define XENOPROF_DOMAIN_ACTIVE     1
     6.7 +#define XENOPROF_DOMAIN_PASSIVE    2
     6.8  
     6.9  #define XENOPROF_IDLE              0
    6.10  #define XENOPROF_COUNTERS_RESERVED 1