ia64/xen-unstable

changeset 12629:8ab9b43ad557

[IA64] xenoprof ia64 xen side

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Tue Nov 28 21:35:13 2006 -0700 (2006-11-28)
parents 112e0e3b4852
children fe565ac4bf25
files xen/arch/ia64/Rules.mk xen/arch/ia64/linux-xen/Makefile xen/arch/ia64/linux-xen/perfmon.c xen/arch/ia64/linux-xen/perfmon_default_smpl.c xen/arch/ia64/linux/Makefile xen/arch/ia64/xen/Makefile xen/arch/ia64/xen/dom0_ops.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/hypercall.c xen/arch/ia64/xen/oprofile/Makefile xen/arch/ia64/xen/oprofile/perfmon.c xen/arch/ia64/xen/oprofile/xenoprof.c xen/include/asm-ia64/config.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/linux-xen/asm/perfmon.h xen/include/asm-ia64/xenoprof.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/xen/arch/ia64/Rules.mk	Tue Nov 28 11:34:03 2006 -0700
     1.2 +++ b/xen/arch/ia64/Rules.mk	Tue Nov 28 21:35:13 2006 -0700
     1.3 @@ -3,6 +3,7 @@
     1.4  
     1.5  HAS_ACPI := y
     1.6  HAS_VGA  := y
     1.7 +xenoprof := y
     1.8  VALIDATE_VT	?= n
     1.9  no_warns ?= n
    1.10  xen_ia64_expose_p2m	?= y
     2.1 --- a/xen/arch/ia64/linux-xen/Makefile	Tue Nov 28 11:34:03 2006 -0700
     2.2 +++ b/xen/arch/ia64/linux-xen/Makefile	Tue Nov 28 21:35:13 2006 -0700
     2.3 @@ -18,3 +18,5 @@ obj-y += unwind.o
     2.4  obj-y += iosapic.o
     2.5  obj-y += numa.o
     2.6  obj-y += mm_numa.o
     2.7 +obj-y += perfmon.o
     2.8 +obj-y += perfmon_default_smpl.o
     3.1 --- a/xen/arch/ia64/linux-xen/perfmon.c	Tue Nov 28 11:34:03 2006 -0700
     3.2 +++ b/xen/arch/ia64/linux-xen/perfmon.c	Tue Nov 28 21:35:13 2006 -0700
     3.3 @@ -17,6 +17,12 @@
     3.4   *
     3.5   * More information about perfmon available at:
     3.6   * 	http://www.hpl.hp.com/research/linux/perfmon
     3.7 + *
     3.8 + *
     3.9 + * For Xen/IA64 xenoprof
    3.10 + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
    3.11 + *                    VA Linux Systems Japan K.K.
    3.12 + *
    3.13   */
    3.14  
    3.15  #include <linux/config.h>
    3.16 @@ -42,7 +48,11 @@
    3.17  #include <linux/rcupdate.h>
    3.18  #include <linux/completion.h>
    3.19  
    3.20 +#ifndef XEN
    3.21  #include <asm/errno.h>
    3.22 +#else
    3.23 +#include <xen/errno.h>
    3.24 +#endif
    3.25  #include <asm/intrinsics.h>
    3.26  #include <asm/page.h>
    3.27  #include <asm/perfmon.h>
    3.28 @@ -52,6 +62,15 @@
    3.29  #include <asm/uaccess.h>
    3.30  #include <asm/delay.h>
    3.31  
    3.32 +#ifdef XEN
    3.33 +#include <xen/guest_access.h>
    3.34 +#include <asm/hw_irq.h>
    3.35 +#define CONFIG_PERFMON
    3.36 +#define pid		vcpu_id
    3.37 +#define thread		arch._thread
    3.38 +#define task_pt_regs	vcpu_regs
    3.39 +#endif
    3.40 +
    3.41  #ifdef CONFIG_PERFMON
    3.42  /*
    3.43   * perfmon context state
    3.44 @@ -287,7 +306,9 @@ typedef struct pfm_context {
    3.45  
    3.46  	unsigned long		ctx_ovfl_regs[4];	/* which registers overflowed (notification) */
    3.47  
    3.48 +#ifndef XEN
    3.49  	struct completion	ctx_restart_done;  	/* use for blocking notification mode */
    3.50 +#endif
    3.51  
    3.52  	unsigned long		ctx_used_pmds[4];	/* bitmask of PMD used            */
    3.53  	unsigned long		ctx_all_pmds[4];	/* bitmask of all accessible PMDs */
    3.54 @@ -320,6 +341,7 @@ typedef struct pfm_context {
    3.55  	unsigned long		ctx_smpl_size;		/* size of sampling buffer */
    3.56  	void			*ctx_smpl_vaddr;	/* user level virtual address of smpl buffer */
    3.57  
    3.58 +#ifndef XEN
    3.59  	wait_queue_head_t 	ctx_msgq_wait;
    3.60  	pfm_msg_t		ctx_msgq[PFM_MAX_MSGS];
    3.61  	int			ctx_msgq_head;
    3.62 @@ -327,6 +349,7 @@ typedef struct pfm_context {
    3.63  	struct fasync_struct	*ctx_async_queue;
    3.64  
    3.65  	wait_queue_head_t 	ctx_zombieq;		/* termination cleanup wait queue */
    3.66 +#endif
    3.67  } pfm_context_t;
    3.68  
    3.69  /*
    3.70 @@ -371,6 +394,9 @@ typedef struct {
    3.71  	unsigned int		pfs_sys_use_dbregs;	   /* incremented when a system wide session uses debug regs */
    3.72  	unsigned int		pfs_ptrace_use_dbregs;	   /* incremented when a process uses debug regs */
    3.73  	struct task_struct	*pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
    3.74 +#ifdef XEN
    3.75 +#define XENOPROF_TASK	((struct task_struct*)1)
    3.76 +#endif
    3.77  } pfm_session_t;
    3.78  
    3.79  /*
    3.80 @@ -499,10 +525,14 @@ typedef struct {
    3.81  static pfm_stats_t		pfm_stats[NR_CPUS];
    3.82  static pfm_session_t		pfm_sessions;	/* global sessions information */
    3.83  
    3.84 +#ifndef XEN
    3.85  static DEFINE_SPINLOCK(pfm_alt_install_check);
    3.86 +#endif
    3.87  static pfm_intr_handler_desc_t  *pfm_alt_intr_handler;
    3.88  
    3.89 +#ifndef XEN
    3.90  static struct proc_dir_entry 	*perfmon_dir;
    3.91 +#endif
    3.92  static pfm_uuid_t		pfm_null_uuid = {0,};
    3.93  
    3.94  static spinlock_t		pfm_buffer_fmt_lock;
    3.95 @@ -514,6 +544,7 @@ static pmu_config_t		*pmu_conf;
    3.96  pfm_sysctl_t pfm_sysctl;
    3.97  EXPORT_SYMBOL(pfm_sysctl);
    3.98  
    3.99 +#ifndef XEN
   3.100  static ctl_table pfm_ctl_table[]={
   3.101  	{1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
   3.102  	{2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
   3.103 @@ -533,10 +564,12 @@ static struct ctl_table_header *pfm_sysc
   3.104  
   3.105  static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
   3.106  static int pfm_flush(struct file *filp);
   3.107 +#endif
   3.108  
   3.109  #define pfm_get_cpu_var(v)		__ia64_per_cpu_var(v)
   3.110  #define pfm_get_cpu_data(a,b)		per_cpu(a, b)
   3.111  
   3.112 +#ifndef XEN
   3.113  static inline void
   3.114  pfm_put_task(struct task_struct *task)
   3.115  {
   3.116 @@ -568,6 +601,7 @@ pfm_unreserve_page(unsigned long a)
   3.117  {
   3.118  	ClearPageReserved(vmalloc_to_page((void*)a));
   3.119  }
   3.120 +#endif
   3.121  
   3.122  static inline unsigned long
   3.123  pfm_protect_ctx_ctxsw(pfm_context_t *x)
   3.124 @@ -582,6 +616,7 @@ pfm_unprotect_ctx_ctxsw(pfm_context_t *x
   3.125  	spin_unlock(&(x)->ctx_lock);
   3.126  }
   3.127  
   3.128 +#ifndef XEN
   3.129  static inline unsigned int
   3.130  pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
   3.131  {
   3.132 @@ -606,16 +641,19 @@ static struct file_system_type pfm_fs_ty
   3.133  	.get_sb   = pfmfs_get_sb,
   3.134  	.kill_sb  = kill_anon_super,
   3.135  };
   3.136 +#endif
   3.137  
   3.138  DEFINE_PER_CPU(unsigned long, pfm_syst_info);
   3.139  DEFINE_PER_CPU(struct task_struct *, pmu_owner);
   3.140  DEFINE_PER_CPU(pfm_context_t  *, pmu_ctx);
   3.141  DEFINE_PER_CPU(unsigned long, pmu_activation_number);
   3.142 +#ifndef XEN
   3.143  EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
   3.144  
   3.145  
   3.146  /* forward declaration */
   3.147  static struct file_operations pfm_file_ops;
   3.148 +#endif
   3.149  
   3.150  /*
   3.151   * forward declarations
   3.152 @@ -641,7 +679,9 @@ static pmu_config_t *pmu_confs[]={
   3.153  };
   3.154  
   3.155  
   3.156 +#ifndef XEN
   3.157  static int pfm_end_notify_user(pfm_context_t *ctx);
   3.158 +#endif
   3.159  
   3.160  static inline void
   3.161  pfm_clear_psr_pp(void)
   3.162 @@ -750,6 +790,7 @@ pfm_write_soft_counter(pfm_context_t *ct
   3.163  	ia64_set_pmd(i, val & ovfl_val);
   3.164  }
   3.165  
   3.166 +#ifndef XEN
   3.167  static pfm_msg_t *
   3.168  pfm_get_new_msg(pfm_context_t *ctx)
   3.169  {
   3.170 @@ -837,6 +878,7 @@ pfm_rvfree(void *mem, unsigned long size
   3.171  	}
   3.172  	return;
   3.173  }
   3.174 +#endif
   3.175  
   3.176  static pfm_context_t *
   3.177  pfm_context_alloc(void)
   3.178 @@ -864,6 +906,7 @@ pfm_context_free(pfm_context_t *ctx)
   3.179  	}
   3.180  }
   3.181  
   3.182 +#ifndef XEN
   3.183  static void
   3.184  pfm_mask_monitoring(struct task_struct *task)
   3.185  {
   3.186 @@ -1034,6 +1077,7 @@ pfm_restore_monitoring(struct task_struc
   3.187  	}
   3.188  	pfm_set_psr_l(psr);
   3.189  }
   3.190 +#endif
   3.191  
   3.192  static inline void
   3.193  pfm_save_pmds(unsigned long *pmds, unsigned long mask)
   3.194 @@ -1047,6 +1091,7 @@ pfm_save_pmds(unsigned long *pmds, unsig
   3.195  	}
   3.196  }
   3.197  
   3.198 +#ifndef XEN
   3.199  /*
   3.200   * reload from thread state (used for ctxw only)
   3.201   */
   3.202 @@ -1100,7 +1145,37 @@ pfm_copy_pmds(struct task_struct *task, 
   3.203  			ctx->ctx_pmds[i].val));
   3.204  	}
   3.205  }
   3.206 -
   3.207 +#else
   3.208 +static inline void
   3.209 +xenpfm_restore_pmds(pfm_context_t* ctx)
   3.210 +{
   3.211 +	int i;
   3.212 +	unsigned long ovfl_val = pmu_conf->ovfl_val;
   3.213 +	unsigned long mask = ctx->ctx_all_pmds[0];
   3.214 +	unsigned long val;
   3.215 +
   3.216 +	for (i = 0; mask; i++, mask >>= 1) {
   3.217 +		if ((mask & 0x1) == 0)
   3.218 +			continue;
   3.219 +
   3.220 +		val = ctx->ctx_pmds[i].val;
   3.221 +		/*
   3.222 +		 * We break up the 64 bit value into 2 pieces
   3.223 +		 * the lower bits go to the machine state in the
   3.224 +		 * thread (will be reloaded on ctxsw in).
   3.225 +		 * The upper part stays in the soft-counter.
   3.226 +		 */
   3.227 +		if (PMD_IS_COUNTING(i)) {
   3.228 +			ctx->ctx_pmds[i].val = val & ~ovfl_val;
   3.229 +			val &= ovfl_val;
   3.230 +		}
   3.231 +		ia64_set_pmd(i, val);		
   3.232 +	}
   3.233 +	ia64_srlz_d();
   3.234 +}
   3.235 +#endif
   3.236 +
   3.237 +#ifndef XEN
   3.238  /*
   3.239   * propagate PMC from context to thread-state
   3.240   */
   3.241 @@ -1133,6 +1208,23 @@ pfm_restore_pmcs(unsigned long *pmcs, un
   3.242  	}
   3.243  	ia64_srlz_d();
   3.244  }
   3.245 +#else
   3.246 +static inline void
   3.247 +xenpfm_restore_pmcs(pfm_context_t* ctx)
   3.248 +{
   3.249 +	int i;
   3.250 +	unsigned long mask = ctx->ctx_all_pmcs[0];
   3.251 +	
   3.252 +	for (i = 0; mask; i++, mask >>= 1) {
   3.253 +		if ((mask & 0x1) == 0)
   3.254 +			continue;
   3.255 +		ia64_set_pmc(i, ctx->ctx_pmcs[i]);
   3.256 +		DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
   3.257 +	}
   3.258 +	ia64_srlz_d();
   3.259 +	
   3.260 +}
   3.261 +#endif
   3.262  
   3.263  static inline int
   3.264  pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
   3.265 @@ -1305,7 +1397,11 @@ pfm_reserve_session(struct task_struct *
   3.266  
   3.267  		DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
   3.268  
   3.269 +#ifndef XEN
   3.270  		pfm_sessions.pfs_sys_session[cpu] = task;
   3.271 +#else
   3.272 +		pfm_sessions.pfs_sys_session[cpu] = XENOPROF_TASK;
   3.273 +#endif
   3.274  
   3.275  		pfm_sessions.pfs_sys_sessions++ ;
   3.276  
   3.277 @@ -1332,7 +1428,11 @@ pfm_reserve_session(struct task_struct *
   3.278  
   3.279  error_conflict:
   3.280  	DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
   3.281 +#ifndef XEN
   3.282    		pfm_sessions.pfs_sys_session[cpu]->pid,
   3.283 +#else
   3.284 +		-1,
   3.285 +#endif
   3.286  		cpu));
   3.287  abort:
   3.288  	UNLOCK_PFS(flags);
   3.289 @@ -1392,6 +1492,7 @@ pfm_unreserve_session(pfm_context_t *ctx
   3.290  	return 0;
   3.291  }
   3.292  
   3.293 +#ifndef XEN
   3.294  /*
   3.295   * removes virtual mapping of the sampling buffer.
   3.296   * IMPORTANT: cannot be called with interrupts disable, e.g. inside
   3.297 @@ -1428,6 +1529,7 @@ pfm_remove_smpl_mapping(struct task_stru
   3.298  
   3.299  	return 0;
   3.300  }
   3.301 +#endif
   3.302  
   3.303  /*
   3.304   * free actual physical storage used by sampling buffer
   3.305 @@ -1477,6 +1579,7 @@ pfm_exit_smpl_buffer(pfm_buffer_fmt_t *f
   3.306  
   3.307  }
   3.308  
   3.309 +#ifndef XEN
   3.310  /*
   3.311   * pfmfs should _never_ be mounted by userland - too much of security hassle,
   3.312   * no real gain from having the whole whorehouse mounted. So we don't need
   3.313 @@ -1901,6 +2004,7 @@ pfm_flush(struct file *filp)
   3.314  
   3.315  	return 0;
   3.316  }
   3.317 +#endif
   3.318  /*
   3.319   * called either on explicit close() or from exit_files(). 
   3.320   * Only the LAST user of the file gets to this point, i.e., it is
   3.321 @@ -1916,19 +2020,27 @@ pfm_flush(struct file *filp)
   3.322   * When called from exit_files(), the current task is not yet ZOMBIE but we
   3.323   * flush the PMU state to the context. 
   3.324   */
   3.325 +#ifndef XEN
   3.326  static int
   3.327  pfm_close(struct inode *inode, struct file *filp)
   3.328 -{
   3.329 +#else
   3.330 +static int
   3.331 +pfm_close(pfm_context_t *ctx)
   3.332 +#endif
   3.333 +{
   3.334 +#ifndef XEN
   3.335  	pfm_context_t *ctx;
   3.336  	struct task_struct *task;
   3.337  	struct pt_regs *regs;
   3.338    	DECLARE_WAITQUEUE(wait, current);
   3.339  	unsigned long flags;
   3.340 +#endif
   3.341  	unsigned long smpl_buf_size = 0UL;
   3.342  	void *smpl_buf_addr = NULL;
   3.343  	int free_possible = 1;
   3.344  	int state, is_system;
   3.345  
   3.346 +#ifndef XEN
   3.347  	DPRINT(("pfm_close called private=%p\n", filp->private_data));
   3.348  
   3.349  	if (PFM_IS_FILE(filp) == 0) {
   3.350 @@ -1943,10 +2055,14 @@ pfm_close(struct inode *inode, struct fi
   3.351  	}
   3.352  
   3.353  	PROTECT_CTX(ctx, flags);
   3.354 +#else
   3.355 +	BUG_ON(!spin_is_locked(&ctx->ctx_lock));
   3.356 +#endif
   3.357  
   3.358  	state     = ctx->ctx_state;
   3.359  	is_system = ctx->ctx_fl_system;
   3.360  
   3.361 +#ifndef XEN
   3.362  	task = PFM_CTX_TASK(ctx);
   3.363  	regs = task_pt_regs(task);
   3.364  
   3.365 @@ -2045,8 +2161,15 @@ pfm_close(struct inode *inode, struct fi
   3.366  		pfm_context_unload(ctx, NULL, 0, regs);
   3.367  #endif
   3.368  	}
   3.369 -
   3.370 +#else
   3.371 +	/* XXX XEN */
   3.372 +	/* unload context */
   3.373 +	BUG_ON(state != PFM_CTX_UNLOADED);
   3.374 +#endif
   3.375 +
   3.376 +#ifndef XEN
   3.377  doit:
   3.378 +#endif
   3.379  	/* reload state, may have changed during  opening of critical section */
   3.380  	state = ctx->ctx_state;
   3.381  
   3.382 @@ -2087,6 +2210,7 @@ doit:
   3.383  		pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
   3.384  	}
   3.385  
   3.386 +#ifndef XEN
   3.387  	/*
   3.388  	 * disconnect file descriptor from context must be done
   3.389  	 * before we unlock.
   3.390 @@ -2107,6 +2231,9 @@ doit:
   3.391  	 * MUST be done with interrupts ENABLED.
   3.392  	 */
   3.393  	if (smpl_buf_addr)  pfm_rvfree(smpl_buf_addr, smpl_buf_size);
   3.394 +#else
   3.395 +	UNPROTECT_CTX_NOIRQ(ctx);
   3.396 +#endif
   3.397  
   3.398  	/*
   3.399  	 * return the memory used by the context
   3.400 @@ -2116,6 +2243,7 @@ doit:
   3.401  	return 0;
   3.402  }
   3.403  
   3.404 +#ifndef XEN
   3.405  static int
   3.406  pfm_no_open(struct inode *irrelevant, struct file *dontcare)
   3.407  {
   3.408 @@ -2255,6 +2383,7 @@ pfm_remap_buffer(struct vm_area_struct *
   3.409  	}
   3.410  	return 0;
   3.411  }
   3.412 +#endif
   3.413  
   3.414  /*
   3.415   * allocate a sampling buffer and remaps it into the user address space of the task
   3.416 @@ -2262,6 +2391,7 @@ pfm_remap_buffer(struct vm_area_struct *
   3.417  static int
   3.418  pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
   3.419  {
   3.420 +#ifndef XEN
   3.421  	struct mm_struct *mm = task->mm;
   3.422  	struct vm_area_struct *vma = NULL;
   3.423  	unsigned long size;
   3.424 @@ -2374,8 +2504,13 @@ error_kmem:
   3.425  	pfm_rvfree(smpl_buf, size);
   3.426  
   3.427  	return -ENOMEM;
   3.428 -}
   3.429 -
   3.430 +#else
   3.431 +	/* XXX */
   3.432 +	return 0;
   3.433 +#endif
   3.434 +}
   3.435 +
   3.436 +#ifndef XEN
   3.437  /*
   3.438   * XXX: do something better here
   3.439   */
   3.440 @@ -2399,6 +2534,7 @@ pfm_bad_permissions(struct task_struct *
   3.441  	    || (current->gid != task->sgid)
   3.442  	    || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
   3.443  }
   3.444 +#endif
   3.445  
   3.446  static int
   3.447  pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
   3.448 @@ -2535,6 +2671,7 @@ pfm_reset_pmu_state(pfm_context_t *ctx)
   3.449  	ctx->ctx_used_dbrs[0] = 0UL;
   3.450  }
   3.451  
   3.452 +#ifndef XEN
   3.453  static int
   3.454  pfm_ctx_getsize(void *arg, size_t *sz)
   3.455  {
   3.456 @@ -2642,20 +2779,31 @@ pfm_get_task(pfm_context_t *ctx, pid_t p
   3.457  	}
   3.458  	return ret;
   3.459  }
   3.460 -
   3.461 -
   3.462 -
   3.463 +#endif
   3.464 +
   3.465 +
   3.466 +#ifndef XEN
   3.467  static int
   3.468  pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
   3.469 -{
   3.470 +#else
   3.471 +static pfm_context_t*
   3.472 +pfm_context_create(pfarg_context_t* req)
   3.473 +#endif
   3.474 +{
   3.475 +#ifndef XEN
   3.476  	pfarg_context_t *req = (pfarg_context_t *)arg;
   3.477  	struct file *filp;
   3.478 +#else
   3.479 +	pfm_context_t *ctx;
   3.480 +#endif
   3.481  	int ctx_flags;
   3.482  	int ret;
   3.483  
   3.484 +#ifndef XEN
   3.485  	/* let's check the arguments first */
   3.486  	ret = pfarg_is_sane(current, req);
   3.487  	if (ret < 0) return ret;
   3.488 +#endif
   3.489  
   3.490  	ctx_flags = req->ctx_flags;
   3.491  
   3.492 @@ -2664,6 +2812,7 @@ pfm_context_create(pfm_context_t *ctx, v
   3.493  	ctx = pfm_context_alloc();
   3.494  	if (!ctx) goto error;
   3.495  
   3.496 +#ifndef XEN
   3.497  	ret = pfm_alloc_fd(&filp);
   3.498  	if (ret < 0) goto error_file;
   3.499  
   3.500 @@ -2673,6 +2822,7 @@ pfm_context_create(pfm_context_t *ctx, v
   3.501  	 * attach context to file
   3.502  	 */
   3.503  	filp->private_data = ctx;
   3.504 +#endif
   3.505  
   3.506  	/*
   3.507  	 * does the user want to sample?
   3.508 @@ -2704,10 +2854,12 @@ pfm_context_create(pfm_context_t *ctx, v
   3.509  	 * ctx->ctx_fl_excl_idle   = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
   3.510  	 */
   3.511  
   3.512 +#ifndef XEN
   3.513  	/*
   3.514  	 * init restart semaphore to locked
   3.515  	 */
   3.516  	init_completion(&ctx->ctx_restart_done);
   3.517 +#endif
   3.518  
   3.519  	/*
   3.520  	 * activation is used in SMP only
   3.521 @@ -2715,12 +2867,14 @@ pfm_context_create(pfm_context_t *ctx, v
   3.522  	ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
   3.523  	SET_LAST_CPU(ctx, -1);
   3.524  
   3.525 +#ifndef XEN
   3.526  	/*
   3.527  	 * initialize notification message queue
   3.528  	 */
   3.529  	ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
   3.530  	init_waitqueue_head(&ctx->ctx_msgq_wait);
   3.531  	init_waitqueue_head(&ctx->ctx_zombieq);
   3.532 +#endif
   3.533  
   3.534  	DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
   3.535  		ctx,
   3.536 @@ -2736,19 +2890,35 @@ pfm_context_create(pfm_context_t *ctx, v
   3.537  	 */
   3.538  	pfm_reset_pmu_state(ctx);
   3.539  
   3.540 +#ifndef XEN
   3.541  	return 0;
   3.542 +#else
   3.543 +	return ctx;
   3.544 +#endif
   3.545  
   3.546  buffer_error:
   3.547 +#ifndef XEN
   3.548  	pfm_free_fd(ctx->ctx_fd, filp);
   3.549 +#endif
   3.550  
   3.551  	if (ctx->ctx_buf_fmt) {
   3.552 +#ifndef XEN
   3.553  		pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
   3.554 -	}
   3.555 +#else
   3.556 +		pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, NULL);
   3.557 +#endif
   3.558 +	}
   3.559 +#ifndef XEN
   3.560  error_file:
   3.561 +#endif
   3.562  	pfm_context_free(ctx);
   3.563  
   3.564  error:
   3.565 +#ifndef XEN
   3.566  	return ret;
   3.567 +#else
   3.568 +	return NULL;
   3.569 +#endif
   3.570  }
   3.571  
   3.572  static inline unsigned long
   3.573 @@ -2860,7 +3030,9 @@ pfm_reset_regs(pfm_context_t *ctx, unsig
   3.574  static int
   3.575  pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
   3.576  {
   3.577 +#ifndef XEN
   3.578  	struct thread_struct *thread = NULL;
   3.579 +#endif
   3.580  	struct task_struct *task;
   3.581  	pfarg_reg_t *req = (pfarg_reg_t *)arg;
   3.582  	unsigned long value, pmc_pm;
   3.583 @@ -2877,9 +3049,14 @@ pfm_write_pmcs(pfm_context_t *ctx, void 
   3.584  	is_system = ctx->ctx_fl_system;
   3.585  	task      = ctx->ctx_task;
   3.586  	impl_pmds = pmu_conf->impl_pmds[0];
   3.587 +#ifdef XEN
   3.588 +	task = NULL;
   3.589 +	BUG_ON(regs != NULL);
   3.590 +#endif
   3.591  
   3.592  	if (state == PFM_CTX_ZOMBIE) return -EINVAL;
   3.593  
   3.594 +#ifndef XEN
   3.595  	if (is_loaded) {
   3.596  		thread = &task->thread;
   3.597  		/*
   3.598 @@ -2893,6 +3070,13 @@ pfm_write_pmcs(pfm_context_t *ctx, void 
   3.599  		}
   3.600  		can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
   3.601  	}
   3.602 +#else
   3.603 +	/* XXX FIXME */
   3.604 +	if (state != PFM_CTX_UNLOADED) {
   3.605 +		return -EBUSY;
   3.606 +	}
   3.607 +#endif
   3.608 +
   3.609  	expert_mode = pfm_sysctl.expert_mode; 
   3.610  
   3.611  	for (i = 0; i < count; i++, req++) {
   3.612 @@ -3046,6 +3230,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void 
   3.613  		 */
   3.614  		ctx->ctx_pmcs[cnum] = value;
   3.615  
   3.616 +#ifndef XEN
   3.617  		if (is_loaded) {
   3.618  			/*
   3.619  			 * write thread state
   3.620 @@ -3071,6 +3256,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void 
   3.621  			}
   3.622  #endif
   3.623  		}
   3.624 +#endif
   3.625  
   3.626  		DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
   3.627  			  cnum,
   3.628 @@ -3102,7 +3288,9 @@ error:
   3.629  static int
   3.630  pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
   3.631  {
   3.632 +#ifndef XEN
   3.633  	struct thread_struct *thread = NULL;
   3.634 +#endif
   3.635  	struct task_struct *task;
   3.636  	pfarg_reg_t *req = (pfarg_reg_t *)arg;
   3.637  	unsigned long value, hw_value, ovfl_mask;
   3.638 @@ -3118,9 +3306,14 @@ pfm_write_pmds(pfm_context_t *ctx, void 
   3.639  	is_system = ctx->ctx_fl_system;
   3.640  	ovfl_mask = pmu_conf->ovfl_val;
   3.641  	task      = ctx->ctx_task;
   3.642 +#ifdef XEN
   3.643 +	task = NULL;
   3.644 +	BUG_ON(regs != NULL);
   3.645 +#endif
   3.646  
   3.647  	if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
   3.648  
   3.649 +#ifndef XEN
   3.650  	/*
   3.651  	 * on both UP and SMP, we can only write to the PMC when the task is
   3.652  	 * the owner of the local PMU.
   3.653 @@ -3138,6 +3331,12 @@ pfm_write_pmds(pfm_context_t *ctx, void 
   3.654  		}
   3.655  		can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
   3.656  	}
   3.657 +#else
   3.658 +	/* XXX FIXME */
   3.659 +	if (state != PFM_CTX_UNLOADED) {
   3.660 +		return -EBUSY;
   3.661 +	}
   3.662 +#endif
   3.663  	expert_mode = pfm_sysctl.expert_mode; 
   3.664  
   3.665  	for (i = 0; i < count; i++, req++) {
   3.666 @@ -3230,6 +3429,8 @@ pfm_write_pmds(pfm_context_t *ctx, void 
   3.667  			ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
   3.668  		}
   3.669  
   3.670 +		/* XXX FIXME */
   3.671 +#ifndef XEN
   3.672  		if (is_loaded) {
   3.673  			/*
   3.674  		 	 * write thread state
   3.675 @@ -3252,6 +3453,7 @@ pfm_write_pmds(pfm_context_t *ctx, void 
   3.676  #endif
   3.677  			}
   3.678  		}
   3.679 +#endif
   3.680  
   3.681  		DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx  short_reset=0x%lx "
   3.682  			  "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
   3.683 @@ -3288,6 +3490,7 @@ abort_mission:
   3.684  	return ret;
   3.685  }
   3.686  
   3.687 +#ifndef XEN
   3.688  /*
   3.689   * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
   3.690   * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
   3.691 @@ -3471,6 +3674,7 @@ pfm_mod_read_pmds(struct task_struct *ta
   3.692  	return pfm_read_pmds(ctx, req, nreq, regs);
   3.693  }
   3.694  EXPORT_SYMBOL(pfm_mod_read_pmds);
   3.695 +#endif
   3.696  
   3.697  /*
   3.698   * Only call this function when a process it trying to
   3.699 @@ -3552,6 +3756,7 @@ pfm_release_debug_registers(struct task_
   3.700  	return ret;
   3.701  }
   3.702  
   3.703 +#ifndef XEN
   3.704  static int
   3.705  pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
   3.706  {
   3.707 @@ -3720,6 +3925,7 @@ pfm_debug(pfm_context_t *ctx, void *arg,
   3.708  	}
   3.709  	return 0;
   3.710  }
   3.711 +#endif
   3.712  
   3.713  /*
   3.714   * arg can be NULL and count can be zero for this function
   3.715 @@ -3727,7 +3933,9 @@ pfm_debug(pfm_context_t *ctx, void *arg,
   3.716  static int
   3.717  pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
   3.718  {
   3.719 +#ifndef XEN
   3.720  	struct thread_struct *thread = NULL;
   3.721 +#endif
   3.722  	struct task_struct *task;
   3.723  	pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
   3.724  	unsigned long flags;
   3.725 @@ -3744,6 +3952,12 @@ pfm_write_ibr_dbr(int mode, pfm_context_
   3.726  	is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
   3.727  	is_system = ctx->ctx_fl_system;
   3.728  	task      = ctx->ctx_task;
   3.729 +#ifdef XEN
   3.730 +	task = NULL;
   3.731 +	BUG_ON(regs != NULL);
   3.732 +	/* currently dbrs, ibrs aren't supported */
   3.733 +	BUG();
   3.734 +#endif
   3.735  
   3.736  	if (state == PFM_CTX_ZOMBIE) return -EINVAL;
   3.737  
   3.738 @@ -3752,6 +3966,10 @@ pfm_write_ibr_dbr(int mode, pfm_context_
   3.739  	 * the owner of the local PMU.
   3.740  	 */
   3.741  	if (is_loaded) {
   3.742 +#ifdef XEN
   3.743 +		/* XXX */
   3.744 +		return -EBUSY;
   3.745 +#else
   3.746  		thread = &task->thread;
   3.747  		/*
   3.748  		 * In system wide and when the context is loaded, access can only happen
   3.749 @@ -3763,6 +3981,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_
   3.750  			return -EBUSY;
   3.751  		}
   3.752  		can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
   3.753 +#endif
   3.754  	}
   3.755  
   3.756  	/*
   3.757 @@ -3777,10 +3996,14 @@ pfm_write_ibr_dbr(int mode, pfm_context_
   3.758  	/*
   3.759  	 * don't bother if we are loaded and task is being debugged
   3.760  	 */
   3.761 +#ifndef XEN
   3.762  	if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
   3.763  		DPRINT(("debug registers already in use for [%d]\n", task->pid));
   3.764  		return -EBUSY;
   3.765  	}
   3.766 +#else
   3.767 +	/* Currently no support for is_loaded, see -EBUSY above */
   3.768 +#endif
   3.769  
   3.770  	/*
   3.771  	 * check for debug registers in system wide mode
   3.772 @@ -3819,7 +4042,9 @@ pfm_write_ibr_dbr(int mode, pfm_context_
   3.773  	 * is shared by all processes running on it
   3.774   	 */
   3.775  	if (first_time && can_access_pmu) {
   3.776 +#ifndef XEN
   3.777  		DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid));
   3.778 +#endif
   3.779  		for (i=0; i < pmu_conf->num_ibrs; i++) {
   3.780  			ia64_set_ibr(i, 0UL);
   3.781  			ia64_dv_serialize_instruction();
   3.782 @@ -3983,6 +4208,7 @@ pfm_get_features(pfm_context_t *ctx, voi
   3.783  	return 0;
   3.784  }
   3.785  
   3.786 +#ifndef XEN
   3.787  static int
   3.788  pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
   3.789  {
   3.790 @@ -4201,12 +4427,15 @@ pfm_check_task_exist(pfm_context_t *ctx)
   3.791  
   3.792  	return ret;
   3.793  }
   3.794 +#endif
   3.795  
   3.796  static int
   3.797  pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
   3.798  {
   3.799  	struct task_struct *task;
   3.800 +#ifndef XEN
   3.801  	struct thread_struct *thread;
   3.802 +#endif
   3.803  	struct pfm_context_t *old;
   3.804  	unsigned long flags;
   3.805  #ifndef CONFIG_SMP
   3.806 @@ -4220,6 +4449,17 @@ pfm_context_load(pfm_context_t *ctx, voi
   3.807  
   3.808  	state     = ctx->ctx_state;
   3.809  	is_system = ctx->ctx_fl_system;
   3.810 +#ifdef XEN
   3.811 +	task = NULL;
   3.812 +	old = NULL;
   3.813 +	pmcs_source = pmds_source = NULL;
   3.814 +#ifndef CONFIG_SMP
   3.815 +	owner_task = NULL;
   3.816 +#endif
   3.817 +	flags = 0;
   3.818 +	BUG_ON(count != 0);
   3.819 +	BUG_ON(regs != NULL);
   3.820 +#endif
   3.821  	/*
   3.822  	 * can only load from unloaded or terminated state
   3.823  	 */
   3.824 @@ -4230,6 +4470,7 @@ pfm_context_load(pfm_context_t *ctx, voi
   3.825  		return -EBUSY;
   3.826  	}
   3.827  
   3.828 +#ifndef XEN
   3.829  	DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
   3.830  
   3.831  	if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
   3.832 @@ -4255,8 +4496,16 @@ pfm_context_load(pfm_context_t *ctx, voi
   3.833  	}
   3.834  
   3.835  	thread = &task->thread;
   3.836 +#else
   3.837 +	BUG_ON(!spin_is_locked(&ctx->ctx_lock));
   3.838 +	if (!is_system) {
   3.839 +		ret = -EINVAL;
   3.840 +		goto error;
   3.841 +	}
   3.842 +#endif
   3.843  
   3.844  	ret = 0;
   3.845 +#ifndef XEN
   3.846  	/*
   3.847  	 * cannot load a context which is using range restrictions,
   3.848  	 * into a task that is being debugged.
   3.849 @@ -4284,6 +4533,9 @@ pfm_context_load(pfm_context_t *ctx, voi
   3.850  
   3.851  		if (ret) goto error;
   3.852  	}
   3.853 +#else
   3.854 +	BUG_ON(ctx->ctx_fl_using_dbreg);
   3.855 +#endif
   3.856  
   3.857  	/*
   3.858  	 * SMP system-wide monitoring implies self-monitoring.
   3.859 @@ -4318,6 +4570,7 @@ pfm_context_load(pfm_context_t *ctx, voi
   3.860  	 *
   3.861  	 * XXX: needs to be atomic
   3.862  	 */
   3.863 +#ifndef XEN
   3.864  	DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
   3.865  		thread->pfm_context, ctx));
   3.866  
   3.867 @@ -4329,6 +4582,7 @@ pfm_context_load(pfm_context_t *ctx, voi
   3.868  	}
   3.869  
   3.870  	pfm_reset_msgq(ctx);
   3.871 +#endif
   3.872  
   3.873  	ctx->ctx_state = PFM_CTX_LOADED;
   3.874  
   3.875 @@ -4346,9 +4600,14 @@ pfm_context_load(pfm_context_t *ctx, voi
   3.876  
   3.877  		if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
   3.878  	} else {
   3.879 +#ifndef XEN
   3.880  		thread->flags |= IA64_THREAD_PM_VALID;
   3.881 -	}
   3.882 -
   3.883 +#else
   3.884 +		BUG();
   3.885 +#endif
   3.886 +	}
   3.887 +
   3.888 +#ifndef XEN
   3.889  	/*
   3.890  	 * propagate into thread-state
   3.891  	 */
   3.892 @@ -4417,12 +4676,29 @@ pfm_context_load(pfm_context_t *ctx, voi
   3.893  		ctx->ctx_saved_psr_up = 0UL;
   3.894  		ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
   3.895  	}
   3.896 +#else
   3.897 +	BUG_ON(!is_system);
   3.898 +
   3.899 +	/* load pmds, pmcs */
   3.900 +	xenpfm_restore_pmds(ctx);
   3.901 +	xenpfm_restore_pmcs(ctx);
   3.902 +
   3.903 +	ctx->ctx_reload_pmcs[0] = 0UL;
   3.904 +	ctx->ctx_reload_pmds[0] = 0UL;
   3.905 +
   3.906 +	BUG_ON(ctx->ctx_fl_using_dbreg);
   3.907 +
   3.908 +	SET_PMU_OWNER(NULL, ctx);
   3.909 +#endif
   3.910  
   3.911  	ret = 0;
   3.912  
   3.913 +#ifndef XEN
   3.914  error_unres:
   3.915  	if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
   3.916 +#endif
   3.917  error:
   3.918 +#ifndef XEN
   3.919  	/*
   3.920  	 * we must undo the dbregs setting (for system-wide)
   3.921  	 */
   3.922 @@ -4445,6 +4721,9 @@ error:
   3.923  			}
   3.924  		}
   3.925  	}
   3.926 +#else
   3.927 +	BUG_ON(set_dbregs);
   3.928 +#endif
   3.929  	return ret;
   3.930  }
   3.931  
   3.932 @@ -4466,7 +4745,15 @@ pfm_context_unload(pfm_context_t *ctx, v
   3.933  	int prev_state, is_system;
   3.934  	int ret;
   3.935  
   3.936 +#ifndef XEN
   3.937  	DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
   3.938 +#else
   3.939 +	task = NULL;
   3.940 +	tregs = NULL;
   3.941 +	BUG_ON(arg != NULL);
   3.942 +	BUG_ON(count != 0);
   3.943 +	BUG_ON(regs != NULL);
   3.944 +#endif
   3.945  
   3.946  	prev_state = ctx->ctx_state;
   3.947  	is_system  = ctx->ctx_fl_system;
   3.948 @@ -4482,8 +4769,13 @@ pfm_context_unload(pfm_context_t *ctx, v
   3.949  	/*
   3.950  	 * clear psr and dcr bits
   3.951  	 */
   3.952 +#ifndef XEN
   3.953  	ret = pfm_stop(ctx, NULL, 0, regs);
   3.954  	if (ret) return ret;
   3.955 +#else
   3.956 +	/* caller does it by hand */
   3.957 +	ret = 0;
   3.958 +#endif
   3.959  
   3.960  	ctx->ctx_state = PFM_CTX_UNLOADED;
   3.961  
   3.962 @@ -4515,10 +4807,12 @@ pfm_context_unload(pfm_context_t *ctx, v
   3.963  		if (prev_state != PFM_CTX_ZOMBIE) 
   3.964  			pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
   3.965  
   3.966 +#ifndef XEN
   3.967  		/*
   3.968  		 * disconnect context from task
   3.969  		 */
   3.970  		task->thread.pfm_context = NULL;
   3.971 +#endif
   3.972  		/*
   3.973  		 * disconnect task from context
   3.974  		 */
   3.975 @@ -4530,6 +4824,7 @@ pfm_context_unload(pfm_context_t *ctx, v
   3.976  		return 0;
   3.977  	}
   3.978  
   3.979 +#ifndef XEN
   3.980  	/*
   3.981  	 * per-task mode
   3.982  	 */
   3.983 @@ -4584,9 +4879,14 @@ pfm_context_unload(pfm_context_t *ctx, v
   3.984  	DPRINT(("disconnected [%d] from context\n", task->pid));
   3.985  
   3.986  	return 0;
   3.987 -}
   3.988 -
   3.989 -
   3.990 +#else
   3.991 +	BUG();
   3.992 +	return -EINVAL;
   3.993 +#endif
   3.994 +}
   3.995 +
   3.996 +
   3.997 +#ifndef XEN
   3.998  /*
   3.999   * called only from exit_thread(): task == current
  3.1000   * we come here only if current has a context attached (loaded or masked)
  3.1001 @@ -5210,6 +5510,9 @@ pfm_end_notify_user(pfm_context_t *ctx)
  3.1002  
  3.1003  	return pfm_notify_user(ctx, msg);
  3.1004  }
  3.1005 +#else
  3.1006 +#define pfm_ovfl_notify_user(ctx, ovfl_pmds)	do {} while(0)
  3.1007 +#endif
  3.1008  
  3.1009  /*
  3.1010   * main overflow processing routine.
  3.1011 @@ -5226,6 +5529,9 @@ pfm_overflow_handler(struct task_struct 
  3.1012  	pfm_ovfl_ctrl_t	ovfl_ctrl;
  3.1013  	unsigned int i, has_smpl;
  3.1014  	int must_notify = 0;
  3.1015 +#ifdef XEN
  3.1016 +	BUG_ON(task != NULL);
  3.1017 +#endif
  3.1018  
  3.1019  	if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
  3.1020  
  3.1021 @@ -5400,6 +5706,7 @@ pfm_overflow_handler(struct task_struct 
  3.1022  	}
  3.1023  
  3.1024  	if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
  3.1025 +#ifndef XEN
  3.1026  		/*
  3.1027  		 * keep track of what to reset when unblocking
  3.1028  		 */
  3.1029 @@ -5428,11 +5735,18 @@ pfm_overflow_handler(struct task_struct 
  3.1030  		 * anyway, so the signal receiver would come spin for nothing.
  3.1031  		 */
  3.1032  		must_notify = 1;
  3.1033 +#else
  3.1034 +		gdprintk(XENLOG_INFO, "%s check!\n", __func__);
  3.1035 +#endif
  3.1036  	}
  3.1037  
  3.1038  	DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
  3.1039 +#ifndef XEN
  3.1040  			GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
  3.1041  			PFM_GET_WORK_PENDING(task),
  3.1042 +#else
  3.1043 +			-1, 0UL,
  3.1044 +#endif
  3.1045  			ctx->ctx_fl_trap_reason,
  3.1046  			ovfl_pmds,
  3.1047  			ovfl_notify,
  3.1048 @@ -5441,9 +5755,13 @@ pfm_overflow_handler(struct task_struct 
  3.1049  	 * in case monitoring must be stopped, we toggle the psr bits
  3.1050  	 */
  3.1051  	if (ovfl_ctrl.bits.mask_monitoring) {
  3.1052 +#ifndef XEN
  3.1053  		pfm_mask_monitoring(task);
  3.1054  		ctx->ctx_state = PFM_CTX_MASKED;
  3.1055  		ctx->ctx_fl_can_restart = 1;
  3.1056 +#else
  3.1057 +		gdprintk(XENLOG_INFO, "%s check!\n", __func__);
  3.1058 +#endif
  3.1059  	}
  3.1060  
  3.1061  	/*
  3.1062 @@ -5513,14 +5831,22 @@ pfm_do_interrupt_handler(int irq, void *
  3.1063  	 */
  3.1064  	pmc0 = ia64_get_pmc(0);
  3.1065  
  3.1066 +#ifndef XEN
  3.1067  	task = GET_PMU_OWNER();
  3.1068 +#else
  3.1069 +	task = NULL;
  3.1070 +#endif
  3.1071  	ctx  = GET_PMU_CTX();
  3.1072  
  3.1073  	/*
  3.1074  	 * if we have some pending bits set
  3.1075  	 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
  3.1076  	 */
  3.1077 +#ifndef XEN
  3.1078  	if (PMC0_HAS_OVFL(pmc0) && task) {
  3.1079 +#else
  3.1080 +	if (PMC0_HAS_OVFL(pmc0)) {
  3.1081 +#endif
  3.1082  		/*
  3.1083  		 * we assume that pmc0.fr is always set here
  3.1084  		 */
  3.1085 @@ -5528,8 +5854,10 @@ pfm_do_interrupt_handler(int irq, void *
  3.1086  		/* sanity check */
  3.1087  		if (!ctx) goto report_spurious1;
  3.1088  
  3.1089 +#ifndef XEN
  3.1090  		if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0) 
  3.1091  			goto report_spurious2;
  3.1092 +#endif
  3.1093  
  3.1094  		PROTECT_CTX_NOPRINT(ctx, flags);
  3.1095  
  3.1096 @@ -5549,16 +5877,20 @@ pfm_do_interrupt_handler(int irq, void *
  3.1097  	return retval;
  3.1098  
  3.1099  report_spurious1:
  3.1100 +#ifndef XEN
  3.1101  	printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
  3.1102  		this_cpu, task->pid);
  3.1103 +#endif
  3.1104  	pfm_unfreeze_pmu();
  3.1105  	return -1;
  3.1106 +#ifndef XEN  /* XEN path doesn't take this goto */
  3.1107  report_spurious2:
  3.1108  	printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n", 
  3.1109  		this_cpu, 
  3.1110  		task->pid);
  3.1111  	pfm_unfreeze_pmu();
  3.1112  	return -1;
  3.1113 +#endif
  3.1114  }
  3.1115  
  3.1116  static irqreturn_t
  3.1117 @@ -5600,6 +5932,7 @@ pfm_interrupt_handler(int irq, void *arg
  3.1118  	return IRQ_HANDLED;
  3.1119  }
  3.1120  
  3.1121 +#ifndef XEN
  3.1122  /*
  3.1123   * /proc/perfmon interface, for debug only
  3.1124   */
  3.1125 @@ -5777,6 +6110,7 @@ pfm_proc_open(struct inode *inode, struc
  3.1126  {
  3.1127  	return seq_open(file, &pfm_seq_ops);
  3.1128  }
  3.1129 +#endif
  3.1130  
  3.1131  
  3.1132  /*
  3.1133 @@ -5831,6 +6165,7 @@ pfm_syst_wide_update_task(struct task_st
  3.1134  	}
  3.1135  }
  3.1136  
  3.1137 +#ifndef XEN
  3.1138  #ifdef CONFIG_SMP
  3.1139  
  3.1140  static void
  3.1141 @@ -6326,6 +6661,7 @@ pfm_load_regs (struct task_struct *task)
  3.1142  	if (likely(psr_up)) pfm_set_psr_up();
  3.1143  }
  3.1144  #endif /* CONFIG_SMP */
  3.1145 +#endif /* XEN */
  3.1146  
  3.1147  /*
  3.1148   * this function assumes monitoring is stopped
  3.1149 @@ -6333,6 +6669,7 @@ pfm_load_regs (struct task_struct *task)
  3.1150  static void
  3.1151  pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
  3.1152  {
  3.1153 +#ifndef XEN
  3.1154  	u64 pmc0;
  3.1155  	unsigned long mask2, val, pmd_val, ovfl_val;
  3.1156  	int i, can_access_pmu = 0;
  3.1157 @@ -6438,14 +6775,20 @@ pfm_flush_pmds(struct task_struct *task,
  3.1158  
  3.1159  		ctx->ctx_pmds[i].val = val;
  3.1160  	}
  3.1161 +#else
  3.1162 +	/* XXX */
  3.1163 +#endif
  3.1164  }
  3.1165  
  3.1166  static struct irqaction perfmon_irqaction = {
  3.1167  	.handler = pfm_interrupt_handler,
  3.1168 +#ifndef XEN
  3.1169  	.flags   = SA_INTERRUPT,
  3.1170 +#endif
  3.1171  	.name    = "perfmon"
  3.1172  };
  3.1173  
  3.1174 +#ifndef XEN
  3.1175  static void
  3.1176  pfm_alt_save_pmu_state(void *data)
  3.1177  {
  3.1178 @@ -6580,11 +6923,16 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_ha
  3.1179  	return 0;
  3.1180  }
  3.1181  EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
  3.1182 +#endif
  3.1183  
  3.1184  /*
  3.1185   * perfmon initialization routine, called from the initcall() table
  3.1186   */
  3.1187 +#ifndef XEN
  3.1188  static int init_pfm_fs(void);
  3.1189 +#else
  3.1190 +#define init_pfm_fs() do {} while(0)
  3.1191 +#endif
  3.1192  
  3.1193  static int __init
  3.1194  pfm_probe_pmu(void)
  3.1195 @@ -6609,12 +6957,14 @@ found:
  3.1196  	return 0;
  3.1197  }
  3.1198  
  3.1199 +#ifndef XEN
  3.1200  static struct file_operations pfm_proc_fops = {
  3.1201  	.open		= pfm_proc_open,
  3.1202  	.read		= seq_read,
  3.1203  	.llseek		= seq_lseek,
  3.1204  	.release	= seq_release,
  3.1205  };
  3.1206 +#endif
  3.1207  
  3.1208  int __init
  3.1209  pfm_init(void)
  3.1210 @@ -6684,6 +7034,7 @@ pfm_init(void)
  3.1211  		return -1;
  3.1212  	}
  3.1213  
  3.1214 +#ifndef XEN
  3.1215  	/*
  3.1216  	 * create /proc/perfmon (mostly for debugging purposes)
  3.1217  	 */
  3.1218 @@ -6702,6 +7053,7 @@ pfm_init(void)
  3.1219  	 * create /proc/sys/kernel/perfmon (for debugging purposes)
  3.1220  	 */
  3.1221  	pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0);
  3.1222 +#endif
  3.1223  
  3.1224  	/*
  3.1225  	 * initialize all our spinlocks
  3.1226 @@ -6768,12 +7120,14 @@ dump_pmu_state(const char *from)
  3.1227  		return;
  3.1228  	}
  3.1229  
  3.1230 +#ifndef XEN
  3.1231  	printk("CPU%d from %s() current [%d] iip=0x%lx %s\n", 
  3.1232  		this_cpu, 
  3.1233  		from, 
  3.1234  		current->pid, 
  3.1235  		regs->cr_iip,
  3.1236  		current->comm);
  3.1237 +#endif
  3.1238  
  3.1239  	task = GET_PMU_OWNER();
  3.1240  	ctx  = GET_PMU_CTX();
  3.1241 @@ -6808,6 +7162,7 @@ dump_pmu_state(const char *from)
  3.1242  	}
  3.1243  
  3.1244  	if (ctx) {
  3.1245 +#ifndef XEN
  3.1246  		printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
  3.1247  				this_cpu,
  3.1248  				ctx->ctx_state,
  3.1249 @@ -6816,10 +7171,19 @@ dump_pmu_state(const char *from)
  3.1250  				ctx->ctx_msgq_head,
  3.1251  				ctx->ctx_msgq_tail,
  3.1252  				ctx->ctx_saved_psr_up);
  3.1253 +#else
  3.1254 +		printk("->CPU%d ctx_state=%d vaddr=%p addr=%p saved_psr_up=0x%lx\n",
  3.1255 +				this_cpu,
  3.1256 +				ctx->ctx_state,
  3.1257 +				ctx->ctx_smpl_vaddr,
  3.1258 +				ctx->ctx_smpl_hdr,
  3.1259 +				ctx->ctx_saved_psr_up);
  3.1260 +#endif
  3.1261  	}
  3.1262  	local_irq_restore(flags);
  3.1263  }
  3.1264  
  3.1265 +#ifndef XEN
  3.1266  /*
  3.1267   * called from process.c:copy_thread(). task is new child.
  3.1268   */
  3.1269 @@ -6843,6 +7207,7 @@ pfm_inherit(struct task_struct *task, st
  3.1270  	 * the psr bits are already set properly in copy_threads()
  3.1271  	 */
  3.1272  }
  3.1273 +#endif
  3.1274  #else  /* !CONFIG_PERFMON */
  3.1275  asmlinkage long
  3.1276  sys_perfmonctl (int fd, int cmd, void *arg, int count)
  3.1277 @@ -6850,3 +7215,584 @@ sys_perfmonctl (int fd, int cmd, void *a
  3.1278  	return -ENOSYS;
  3.1279  }
  3.1280  #endif /* CONFIG_PERFMON */
  3.1281 +
  3.1282 +
  3.1283 +#ifdef XEN
  3.1284 +static int xenpfm_context_unload(void); 
  3.1285 +static int xenpfm_start_stop_locked(int is_start);
  3.1286 +DEFINE_PER_CPU(pfm_context_t*, xenpfm_context);
  3.1287 +
  3.1288 +/*
  3.1289 + * note: some functions mask interrupt with this lock held
  3.1290 + * so that this lock can't be locked from interrupt handler.
  3.1291 + * lock order domlist_lock => xenpfm_context_lock
  3.1292 + */
  3.1293 +DEFINE_SPINLOCK(xenpfm_context_lock);
  3.1294 +
  3.1295 +static int
  3.1296 +xenpfm_get_features(XEN_GUEST_HANDLE(pfarg_features_t) req)
  3.1297 +{
  3.1298 +	pfarg_features_t res;
  3.1299 +	if (guest_handle_is_null(req))
  3.1300 +		return -EFAULT;
  3.1301 +
  3.1302 +	memset(&res, 0, sizeof(res));
  3.1303 +	pfm_get_features(NULL, &res, 0, NULL);
  3.1304 +	if (copy_to_guest(req, &res, 1))
  3.1305 +		return -EFAULT;
  3.1306 +	return 0;
  3.1307 +}
  3.1308 +
  3.1309 +static int
  3.1310 +xenpfm_pfarg_is_sane(pfarg_context_t* pfx)
  3.1311 +{
  3.1312 +	int error;
  3.1313 +	int ctx_flags;
  3.1314 +
  3.1315 +	error = pfarg_is_sane(NULL, pfx);
  3.1316 +	if (error)
  3.1317 +		return error;
  3.1318 +
  3.1319 +	ctx_flags = pfx->ctx_flags;
  3.1320 +	if (!(ctx_flags & PFM_FL_SYSTEM_WIDE) ||
  3.1321 +	    ctx_flags & PFM_FL_NOTIFY_BLOCK ||
  3.1322 +	    ctx_flags & PFM_FL_OVFL_NO_MSG)
  3.1323 +		return -EINVAL;
  3.1324 +
  3.1325 +	/* probably more to add here */
  3.1326 +
  3.1327 +	return 0;
  3.1328 +}
  3.1329 +
  3.1330 +static int
  3.1331 +xenpfm_context_create(XEN_GUEST_HANDLE(pfarg_context_t) req)
  3.1332 +{
  3.1333 +	int error;
  3.1334 +	pfarg_context_t kreq;
  3.1335 +
  3.1336 +	int cpu;
  3.1337 +	pfm_context_t* ctx[NR_CPUS] = {[0 ... (NR_CPUS - 1)] = NULL};
  3.1338 +	
  3.1339 +	if (copy_from_guest(&kreq, req, 1)) {
  3.1340 +		error = -EINVAL;
  3.1341 +		goto out;
  3.1342 +	}
  3.1343 +
  3.1344 +	error = xenpfm_pfarg_is_sane(&kreq);
  3.1345 +	if (error)
  3.1346 +		goto out;
  3.1347 +
  3.1348 +	/* XXX fmt */
  3.1349 +	for_each_cpu(cpu) {
  3.1350 +		ctx[cpu] = pfm_context_create(&kreq);
  3.1351 +		if (ctx[cpu] == NULL) {
  3.1352 +			error = -ENOMEM;
  3.1353 +			break;
  3.1354 +		}
  3.1355 +	}
  3.1356 +	if (error)
  3.1357 +		goto out;
  3.1358 +
  3.1359 +	BUG_ON(in_irq());
  3.1360 +	spin_lock(&xenpfm_context_lock);
  3.1361 +	for_each_cpu(cpu) {
  3.1362 +		if (per_cpu(xenpfm_context, cpu) != NULL) {
  3.1363 +			error = -EBUSY;
  3.1364 +			break;
  3.1365 +		}
  3.1366 +	}
  3.1367 +	for_each_cpu(cpu) {
  3.1368 +		per_cpu(xenpfm_context, cpu) = ctx[cpu];
  3.1369 +		ctx[cpu] = NULL;
  3.1370 +	}
  3.1371 +	spin_unlock(&xenpfm_context_lock);
  3.1372 +
  3.1373 +out:
  3.1374 +	for_each_cpu(cpu) {
  3.1375 +		if (ctx[cpu] != NULL)
  3.1376 +			pfm_context_free(ctx[cpu]);
  3.1377 +	}
  3.1378 +	return error;
  3.1379 +}
  3.1380 +
  3.1381 +static int
  3.1382 +xenpfm_context_destroy(void)
  3.1383 +{
  3.1384 +	int cpu;
  3.1385 +	pfm_context_t* ctx;
  3.1386 +	unsigned long flags;
  3.1387 +	unsigned long need_unload;
  3.1388 +	int error = 0;
  3.1389 +
  3.1390 +again:
  3.1391 +	need_unload = 0;
  3.1392 +	BUG_ON(in_irq());
  3.1393 +	spin_lock_irqsave(&xenpfm_context_lock, flags);
  3.1394 +	for_each_cpu(cpu) {
  3.1395 +		ctx = per_cpu(xenpfm_context, cpu);
  3.1396 +		if (ctx == NULL) {
  3.1397 +			error = -EINVAL;
  3.1398 +			break;
  3.1399 +		}
  3.1400 +		PROTECT_CTX_NOIRQ(ctx);
  3.1401 +		if (ctx->ctx_state != PFM_CTX_UNLOADED)
  3.1402 +			need_unload = 1;
  3.1403 +	}
  3.1404 +	if (error) {
  3.1405 +		for_each_cpu(cpu) {
  3.1406 +			ctx = per_cpu(xenpfm_context, cpu);
  3.1407 +			if (ctx == NULL)
  3.1408 +				break;
  3.1409 +			UNPROTECT_CTX_NOIRQ(per_cpu(xenpfm_context, cpu));
  3.1410 +		}
  3.1411 +		goto out;
  3.1412 +	}
  3.1413 +	if (need_unload) {
  3.1414 +		for_each_cpu(cpu)
  3.1415 +			UNPROTECT_CTX_NOIRQ(per_cpu(xenpfm_context, cpu));
  3.1416 +		spin_unlock_irqrestore(&xenpfm_context_lock, flags);
  3.1417 +
  3.1418 +		error = xenpfm_context_unload();
  3.1419 +		if (error)
  3.1420 +			return error;
  3.1421 +		goto again;
  3.1422 +	}
  3.1423 +
  3.1424 +	for_each_cpu(cpu) {
  3.1425 +		pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
  3.1426 +		per_cpu(xenpfm_context, cpu) = NULL;
  3.1427 +
  3.1428 +		/* pfm_close() unlocks spinlock and free the context. */
  3.1429 +		error |= pfm_close(ctx);
  3.1430 +	}
  3.1431 +out:
  3.1432 +	spin_unlock_irqrestore(&xenpfm_context_lock, flags);
  3.1433 +	return error;
  3.1434 +}
  3.1435 +
  3.1436 +static int
  3.1437 +xenpfm_write_pmcs(XEN_GUEST_HANDLE(pfarg_reg_t) req, unsigned long count)
  3.1438 +{
  3.1439 +	unsigned long i;
  3.1440 +	int error = 0;
  3.1441 +	unsigned long flags;
  3.1442 +
  3.1443 +	for (i = 0; i < count; i++) {
  3.1444 +		pfarg_reg_t kreq;
  3.1445 +		int cpu;
  3.1446 +		if (copy_from_guest_offset(&kreq, req, i, 1)) {
  3.1447 +			error = -EFAULT;
  3.1448 +			goto out;
  3.1449 +		}
  3.1450 +		BUG_ON(in_irq());
  3.1451 +		spin_lock_irqsave(&xenpfm_context_lock, flags);
  3.1452 +		for_each_online_cpu(cpu) {
  3.1453 +			pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
  3.1454 +			PROTECT_CTX_NOIRQ(ctx);
  3.1455 +			error |= pfm_write_pmcs(ctx, (void *)&kreq, 1, NULL);
  3.1456 +			UNPROTECT_CTX_NOIRQ(ctx);
  3.1457 +		}
  3.1458 +		spin_unlock_irqrestore(&xenpfm_context_lock, flags);
  3.1459 +		if (error)
  3.1460 +			break;
  3.1461 +	}
  3.1462 +	
  3.1463 +	/* XXX if is loaded, change all physical cpus pmcs. */
  3.1464 +	/* Currently results in error */
  3.1465 +out:
  3.1466 +	return error;
  3.1467 +}
  3.1468 +
  3.1469 +static int
  3.1470 +xenpfm_write_pmds(XEN_GUEST_HANDLE(pfarg_reg_t) req, unsigned long count)
  3.1471 +{
  3.1472 +	unsigned long i;
  3.1473 +	int error = 0;
  3.1474 +	
  3.1475 +	for (i = 0; i < count; i++) {
  3.1476 +		pfarg_reg_t kreq;
  3.1477 +		int cpu;
  3.1478 +		unsigned long flags;
  3.1479 +		if (copy_from_guest_offset(&kreq, req, i, 1)) {
  3.1480 +			error = -EFAULT;
  3.1481 +			goto out;
  3.1482 +		}
  3.1483 +		BUG_ON(in_irq());
  3.1484 +		spin_lock_irqsave(&xenpfm_context_lock, flags);
  3.1485 +		for_each_online_cpu(cpu) {
  3.1486 +			pfm_context_t* ctx = per_cpu(xenpfm_context, cpu);
  3.1487 +			PROTECT_CTX_NOIRQ(ctx);
  3.1488 +			error |= pfm_write_pmds(ctx, &kreq, 1, NULL);
  3.1489 +			UNPROTECT_CTX_NOIRQ(ctx);
  3.1490 +		}
  3.1491 +		spin_unlock_irqrestore(&xenpfm_context_lock, flags);
  3.1492 +	}
  3.1493 +	
  3.1494 +	/* XXX if is loaded, change all physical cpus pmds. */
  3.1495 +	/* Currently results in error */
  3.1496 +out:
  3.1497 +	return error;
  3.1498 +}
  3.1499 +
  3.1500 +struct xenpfm_context_load_arg {
  3.1501 +	pfarg_load_t*	req;
  3.1502 +	int		error[NR_CPUS];
  3.1503 +};
  3.1504 +
  3.1505 +static void
  3.1506 +xenpfm_context_load_cpu(void* info)
  3.1507 +{
  3.1508 +	unsigned long flags;
  3.1509 +	struct xenpfm_context_load_arg* arg = (struct xenpfm_context_load_arg*)info;
  3.1510 +	pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
  3.1511 +	PROTECT_CTX(ctx, flags);
  3.1512 +	arg->error[smp_processor_id()] = pfm_context_load(ctx, arg->req, 0, NULL);
  3.1513 +	UNPROTECT_CTX(ctx, flags);
  3.1514 +}
  3.1515 +
  3.1516 +static int
  3.1517 +xenpfm_context_load(XEN_GUEST_HANDLE(pfarg_load_t) req)
  3.1518 +{
  3.1519 +	pfarg_load_t kreq;
  3.1520 +	int cpu;
  3.1521 +	struct xenpfm_context_load_arg arg;
  3.1522 +	int error = 0;
  3.1523 +
  3.1524 +	if (copy_from_guest(&kreq, req, 1))
  3.1525 +		return -EFAULT;
  3.1526 +
  3.1527 +	arg.req = &kreq;
  3.1528 +	for_each_online_cpu(cpu)
  3.1529 +		arg.error[cpu] = 0;
  3.1530 +
  3.1531 +	BUG_ON(in_irq());
  3.1532 +	spin_lock(&xenpfm_context_lock);
  3.1533 +	smp_call_function(&xenpfm_context_load_cpu, &arg, 1, 1);
  3.1534 +	xenpfm_context_load_cpu(&arg);
  3.1535 +	spin_unlock(&xenpfm_context_lock);
  3.1536 +	for_each_online_cpu(cpu) {
  3.1537 +		if (arg.error[cpu]) {
  3.1538 +			gdprintk(XENLOG_INFO, "%s: error %d cpu %d\n",
  3.1539 +				 __func__, error, cpu);
  3.1540 +			error = arg.error[cpu];
  3.1541 +		}
  3.1542 +	}
  3.1543 +	return 0;
  3.1544 +}
  3.1545 +
  3.1546 +
  3.1547 +struct xenpfm_context_unload_arg {
  3.1548 +	int		error[NR_CPUS];
  3.1549 +};
  3.1550 +
  3.1551 +static void
  3.1552 +xenpfm_context_unload_cpu(void* info)
  3.1553 +{
  3.1554 +	unsigned long flags;
  3.1555 +	struct xenpfm_context_unload_arg* arg = (struct xenpfm_context_unload_arg*)info;
  3.1556 +	pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
  3.1557 +	PROTECT_CTX(ctx, flags);
  3.1558 +	arg->error[smp_processor_id()] = pfm_context_unload(ctx, NULL, 0, NULL);
  3.1559 +	UNPROTECT_CTX(ctx, flags);
  3.1560 +}
  3.1561 +
  3.1562 +static int
  3.1563 +xenpfm_context_unload(void)
  3.1564 +{
  3.1565 +	int cpu;
  3.1566 +	struct xenpfm_context_unload_arg arg;
  3.1567 +	int error = 0;
  3.1568 +
  3.1569 +	for_each_online_cpu(cpu)
  3.1570 +		arg.error[cpu] = 0;
  3.1571 +
  3.1572 +	BUG_ON(in_irq());
  3.1573 +	read_lock(&domlist_lock);
  3.1574 +	spin_lock(&xenpfm_context_lock);
  3.1575 +	error = xenpfm_start_stop_locked(0);
  3.1576 +	read_unlock(&domlist_lock);
  3.1577 +	if (error) {
  3.1578 +		spin_unlock(&xenpfm_context_lock);
  3.1579 +		return error;
  3.1580 +	}
  3.1581 +	
  3.1582 +	smp_call_function(&xenpfm_context_unload_cpu, &arg, 1, 1);
  3.1583 +	xenpfm_context_unload_cpu(&arg);
  3.1584 +	spin_unlock(&xenpfm_context_lock);
  3.1585 +	for_each_online_cpu(cpu) {
  3.1586 +		if (arg.error[cpu]) {
  3.1587 +			gdprintk(XENLOG_INFO, "%s: error %d cpu %d\n",
  3.1588 +				 __func__, error, cpu);
  3.1589 +			error = arg.error[cpu];
  3.1590 +		}
  3.1591 +	}
  3.1592 +	return error;
  3.1593 +}
  3.1594 +
  3.1595 +static int
  3.1596 +__xenpfm_start(void)
  3.1597 +{
  3.1598 +	pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
  3.1599 +	int state;
  3.1600 +	int error = 0;
  3.1601 +
  3.1602 +	BUG_ON(local_irq_is_enabled());
  3.1603 +	PROTECT_CTX_NOIRQ(ctx);	
  3.1604 +	state = ctx->ctx_state;
  3.1605 +	if (state != PFM_CTX_LOADED) {
  3.1606 +		error = -EINVAL;
  3.1607 +		goto out;
  3.1608 +	}
  3.1609 +
  3.1610 +	/* now update the local PMU and cpuinfo */
  3.1611 +	PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
  3.1612 +
  3.1613 +	/* start monitoring at kernel level */
  3.1614 +	pfm_set_psr_pp();
  3.1615 +
  3.1616 +	/* start monitoring at kernel level */
  3.1617 +	pfm_set_psr_up();
  3.1618 +
  3.1619 +	/* enable dcr pp */
  3.1620 +	ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
  3.1621 +	ia64_srlz_i();
  3.1622 +out:
  3.1623 +	UNPROTECT_CTX_NOIRQ(ctx);
  3.1624 +	return error;
  3.1625 +}
  3.1626 +
  3.1627 +static int
  3.1628 +__xenpfm_stop(void)
  3.1629 +{
  3.1630 +	pfm_context_t* ctx = __get_cpu_var(xenpfm_context);
  3.1631 +	int state;
  3.1632 +	int error = 0;
  3.1633 +
  3.1634 +	BUG_ON(local_irq_is_enabled());
  3.1635 +	PROTECT_CTX_NOIRQ(ctx);	
  3.1636 +	state = ctx->ctx_state;
  3.1637 +	if (state != PFM_CTX_LOADED) {
  3.1638 +		error = -EINVAL;
  3.1639 +		goto out;
  3.1640 +	}
  3.1641 +
  3.1642 +	/*
  3.1643 +	 * Update local PMU first
  3.1644 +	 *
  3.1645 +	 * disable dcr pp
  3.1646 +	 */
  3.1647 +	ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
  3.1648 +	ia64_srlz_i();
  3.1649 +
  3.1650 +	/* update local cpuinfo */
  3.1651 +	PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
  3.1652 +
  3.1653 +	/* stop monitoring, does srlz.i */
  3.1654 +	pfm_clear_psr_pp();
  3.1655 +
  3.1656 +	/* stop monitoring  at kernel level */
  3.1657 +	pfm_clear_psr_up();
  3.1658 +out:
  3.1659 +	UNPROTECT_CTX_NOIRQ(ctx);
  3.1660 +	return error;
  3.1661 +}
  3.1662 +
  3.1663 +int
  3.1664 +__xenpfm_start_stop(int is_start)
  3.1665 +{
  3.1666 +	if (is_start)
  3.1667 +		return __xenpfm_start();
  3.1668 +	else
  3.1669 +		return __xenpfm_stop();
  3.1670 +}
  3.1671 +
  3.1672 +struct xenpfm_start_arg {
  3.1673 +	int		is_start;
  3.1674 +	atomic_t	started;
  3.1675 +	atomic_t	finished;
  3.1676 +	int		error[NR_CPUS];
  3.1677 +};
  3.1678 +
  3.1679 +static void
  3.1680 +xenpfm_start_stop_cpu(void* info)
  3.1681 +{
  3.1682 +	unsigned long flags;
  3.1683 +	struct xenpfm_start_arg* arg = (struct xenpfm_start_arg*)info;
  3.1684 +
  3.1685 +	local_irq_save(flags);
  3.1686 +	atomic_inc(&arg->started);
  3.1687 +	while (!atomic_read(&arg->finished))
  3.1688 +		cpu_relax();
  3.1689 +
  3.1690 +	arg->error[smp_processor_id()] = __xenpfm_start_stop(arg->is_start);
  3.1691 +
  3.1692 +	atomic_inc(&arg->finished);
  3.1693 +	local_irq_restore(flags);
  3.1694 +}
  3.1695 +
  3.1696 +static void
  3.1697 +xenpfm_start_stop_vcpu(struct vcpu* v, int is_start)
  3.1698 +{
  3.1699 +	struct pt_regs *regs = vcpu_regs(v);
  3.1700 +
  3.1701 +	if (is_start) {
  3.1702 +		/* set user level psr.pp for the caller */
  3.1703 +		ia64_psr(regs)->pp = 1;
  3.1704 +
  3.1705 +		/* activate monitoring at user level */
  3.1706 +		ia64_psr(regs)->up = 1;
  3.1707 +
  3.1708 +		/* don't allow user level control */
  3.1709 +		ia64_psr(regs)->sp = 0;
  3.1710 +	} else {
  3.1711 +		/*
  3.1712 +		 * stop monitoring in the caller
  3.1713 +		 */
  3.1714 +		ia64_psr(regs)->pp = 0;
  3.1715 +		
  3.1716 +		/*
  3.1717 +	 	 * stop monitoring at the user level
  3.1718 +	 	 */
  3.1719 +		ia64_psr(regs)->up = 0;
  3.1720 +
  3.1721 +#if 0
  3.1722 +		/*
  3.1723 +		 * cancel user level control
  3.1724 +		 */
  3.1725 +		ia64_psr(regs)->sp = 1;
  3.1726 +#endif
  3.1727 +	}
  3.1728 +}
  3.1729 +
  3.1730 +static int
  3.1731 +xenpfm_start_stop_locked(int is_start)
  3.1732 +{
  3.1733 +	struct xenpfm_start_arg arg;
  3.1734 +	int cpus = num_online_cpus();
  3.1735 +	int cpu;
  3.1736 +	unsigned long flags;
  3.1737 +	struct domain* d;
  3.1738 +	struct vcpu* v;
  3.1739 +	int error = 0;
  3.1740 +
  3.1741 +	arg.is_start = is_start;
  3.1742 +	atomic_set(&arg.started, 1); /* 1 for this cpu */
  3.1743 +	atomic_set(&arg.finished, 0);
  3.1744 +	for_each_cpu(cpu)
  3.1745 +		arg.error[cpu] = 0;
  3.1746 +
  3.1747 +	BUG_ON(!spin_is_locked(&xenpfm_context_lock));
  3.1748 +	smp_call_function(&xenpfm_start_stop_cpu, &arg, 1, 0);
  3.1749 +	local_irq_save(flags);
  3.1750 +
  3.1751 +	while (atomic_read(&arg.started) != cpus)
  3.1752 +		cpu_relax();
  3.1753 +
  3.1754 +	for_each_domain(d) {
  3.1755 +		for_each_vcpu(d, v)
  3.1756 +			xenpfm_start_stop_vcpu(v, is_start);
  3.1757 +	}
  3.1758 +
  3.1759 +	arg.error[smp_processor_id()] = __xenpfm_start_stop(is_start);
  3.1760 +	atomic_inc(&arg.finished);
  3.1761 +
  3.1762 +	while (atomic_read(&arg.finished) != cpus)
  3.1763 +		cpu_relax();
  3.1764 +	local_irq_restore(flags);
  3.1765 +
  3.1766 +	for_each_online_cpu(cpu) {
  3.1767 +		if (!arg.error[cpu]) {
  3.1768 +			gdprintk(XENLOG_INFO, "%s: cpu %d error %d\n", 
  3.1769 +				__func__, cpu, arg.error[cpu]);
  3.1770 +			error = arg.error[cpu];
  3.1771 +		}
  3.1772 +	}
  3.1773 +	return error;
  3.1774 +}
  3.1775 +
  3.1776 +static int
  3.1777 +xenpfm_start_stop(int is_start)
  3.1778 +{
  3.1779 +	int error;
  3.1780 +	
  3.1781 +	BUG_ON(in_irq());
  3.1782 +	read_lock(&domlist_lock);
  3.1783 +	spin_lock(&xenpfm_context_lock);
  3.1784 +	error =xenpfm_start_stop_locked(is_start);
  3.1785 +	spin_unlock(&xenpfm_context_lock);
  3.1786 +	read_unlock(&domlist_lock);
  3.1787 +
  3.1788 +	return error;
  3.1789 +}
  3.1790 +
  3.1791 +#define NONPRIV_OP(cmd) (((cmd) == PFM_GET_FEATURES))
  3.1792 +
  3.1793 +int
  3.1794 +do_perfmon_op(unsigned long cmd,
  3.1795 +	      XEN_GUEST_HANDLE(void) arg1, unsigned long count)
  3.1796 +{
  3.1797 +	unsigned long error = 0;
  3.1798 +
  3.1799 +	if (!NONPRIV_OP(cmd) && current->domain != xenoprof_primary_profiler) {
  3.1800 +		gdprintk(XENLOG_INFO, "xen perfmon: "
  3.1801 +			 "dom %d denied privileged operation %ld\n",
  3.1802 +			 current->domain->domain_id, cmd);
  3.1803 +		return -EPERM;
  3.1804 +	}
  3.1805 +	switch (cmd) {
  3.1806 +        case PFM_GET_FEATURES:
  3.1807 +		error = xenpfm_get_features(guest_handle_cast(arg1, pfarg_features_t));
  3.1808 +		break;
  3.1809 +
  3.1810 +        case PFM_CREATE_CONTEXT:
  3.1811 +		error = xenpfm_context_create(guest_handle_cast(arg1, pfarg_context_t));
  3.1812 +		break;
  3.1813 +        case PFM_DESTROY_CONTEXT:
  3.1814 +		error = xenpfm_context_destroy();
  3.1815 +		break;
  3.1816 +
  3.1817 +        case PFM_WRITE_PMCS:
  3.1818 +		error = xenpfm_write_pmcs(guest_handle_cast(arg1, pfarg_reg_t), count);
  3.1819 +		break;
  3.1820 +        case PFM_WRITE_PMDS:
  3.1821 +		error = xenpfm_write_pmds(guest_handle_cast(arg1, pfarg_reg_t), count);
  3.1822 +		break;
  3.1823 +        case PFM_READ_PMDS:
  3.1824 +		error = -ENOSYS;
  3.1825 +		break;
  3.1826 +        case PFM_GET_PMC_RESET_VAL:
  3.1827 +		error = -ENOSYS;
  3.1828 +		break;
  3.1829 +
  3.1830 +        case PFM_LOAD_CONTEXT:
  3.1831 +		error = xenpfm_context_load(guest_handle_cast(arg1, pfarg_load_t));
  3.1832 +		break;
  3.1833 +        case PFM_UNLOAD_CONTEXT:
  3.1834 +		error = xenpfm_context_unload();
  3.1835 +		break;
  3.1836 +
  3.1837 +        case PFM_START:
  3.1838 +		error = xenpfm_start_stop(1);
  3.1839 +		break;
  3.1840 +        case PFM_STOP:
  3.1841 +		error = xenpfm_start_stop(0);
  3.1842 +		break;
  3.1843 +        case PFM_RESTART:
  3.1844 +		error = -ENOSYS;
  3.1845 +		break;
  3.1846 +
  3.1847 +        case PFM_DEBUG:
  3.1848 +		error = -ENOSYS;
  3.1849 +		break;
  3.1850 +
  3.1851 +        case PFM_ENABLE:
  3.1852 +        case PFM_DISABLE:
  3.1853 +        case PFM_PROTECT_CONTEXT:
  3.1854 +        case PFM_UNPROTECT_CONTEXT:
  3.1855 +	default:
  3.1856 +		error = -EINVAL;
  3.1857 +		break;
  3.1858 +	}
  3.1859 +	return error;
  3.1860 +}
  3.1861 +#endif
     4.1 --- a/xen/arch/ia64/linux-xen/perfmon_default_smpl.c	Tue Nov 28 11:34:03 2006 -0700
     4.2 +++ b/xen/arch/ia64/linux-xen/perfmon_default_smpl.c	Tue Nov 28 21:35:13 2006 -0700
     4.3 @@ -16,9 +16,15 @@
     4.4  #include <asm/perfmon.h>
     4.5  #include <asm/perfmon_default_smpl.h>
     4.6  
     4.7 +#ifndef XEN
     4.8  MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
     4.9  MODULE_DESCRIPTION("perfmon default sampling format");
    4.10  MODULE_LICENSE("GPL");
    4.11 +#endif
    4.12 +
    4.13 +#ifdef XEN
    4.14 +#define pid	vcpu_id
    4.15 +#endif
    4.16  
    4.17  #define DEFAULT_DEBUG 1
    4.18  
    4.19 @@ -157,7 +163,9 @@ default_handler(struct task_struct *task
    4.20  	 * system-wide:
    4.21  	 * 	- this is not necessarily the task controlling the session
    4.22  	 */
    4.23 +#ifndef XEN
    4.24  	ent->pid            = current->pid;
    4.25 +#endif
    4.26  	ent->ovfl_pmd  	    = ovfl_pmd;
    4.27  	ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val;
    4.28  
    4.29 @@ -169,7 +177,9 @@ default_handler(struct task_struct *task
    4.30  	ent->tstamp    = stamp;
    4.31  	ent->cpu       = smp_processor_id();
    4.32  	ent->set       = arg->active_set;
    4.33 +#ifndef XEN
    4.34  	ent->tgid      = current->tgid;
    4.35 +#endif
    4.36  
    4.37  	/*
    4.38  	 * selectively store PMDs in increasing index number
    4.39 @@ -263,6 +273,7 @@ static pfm_buffer_fmt_t default_fmt={
    4.40   	.fmt_exit	    = default_exit,
    4.41  };
    4.42  
    4.43 +#ifndef XEN
    4.44  static int __init
    4.45  pfm_default_smpl_init_module(void)
    4.46  {
    4.47 @@ -282,6 +293,7 @@ pfm_default_smpl_init_module(void)
    4.48  
    4.49  	return ret;
    4.50  }
    4.51 +#endif
    4.52  
    4.53  static void __exit
    4.54  pfm_default_smpl_cleanup_module(void)
    4.55 @@ -292,6 +304,8 @@ pfm_default_smpl_cleanup_module(void)
    4.56  	printk("perfmon_default_smpl: unregister %s=%d\n", default_fmt.fmt_name, ret);
    4.57  }
    4.58  
    4.59 +#ifndef XEN
    4.60  module_init(pfm_default_smpl_init_module);
    4.61  module_exit(pfm_default_smpl_cleanup_module);
    4.62 +#endif
    4.63  
     5.1 --- a/xen/arch/ia64/linux/Makefile	Tue Nov 28 11:34:03 2006 -0700
     5.2 +++ b/xen/arch/ia64/linux/Makefile	Tue Nov 28 21:35:13 2006 -0700
     5.3 @@ -22,6 +22,7 @@ obj-y += __divdi3.o
     5.4  obj-y += __udivdi3.o
     5.5  obj-y += __moddi3.o
     5.6  obj-y += __umoddi3.o
     5.7 +obj-y += carta_random.o
     5.8  
     5.9  ## variants of divide/modulo
    5.10  ## see files in xen/arch/ia64/linux/lib (linux/arch/ia64/lib)
     6.1 --- a/xen/arch/ia64/xen/Makefile	Tue Nov 28 11:34:03 2006 -0700
     6.2 +++ b/xen/arch/ia64/xen/Makefile	Tue Nov 28 21:35:13 2006 -0700
     6.3 @@ -1,3 +1,5 @@
     6.4 +subdir-y += oprofile
     6.5 +
     6.6  obj-y += acpi.o
     6.7  obj-y += dom0_ops.o
     6.8  obj-y += domain.o
     7.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Tue Nov 28 11:34:03 2006 -0700
     7.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Tue Nov 28 21:35:13 2006 -0700
     7.3 @@ -343,6 +343,12 @@ do_dom0vp_op(unsigned long cmd,
     7.4      case IA64_DOM0VP_expose_p2m:
     7.5          ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
     7.6          break;
     7.7 +    case IA64_DOM0VP_perfmon: {
     7.8 +        XEN_GUEST_HANDLE(void) hnd;
     7.9 +        set_xen_guest_handle(hnd, (void*)arg1);
    7.10 +        ret = do_perfmon_op(arg0, hnd, arg2);
    7.11 +        break;
    7.12 +    }
    7.13      default:
    7.14          ret = -1;
    7.15  		printk("unknown dom0_vp_op 0x%lx\n", cmd);
     8.1 --- a/xen/arch/ia64/xen/domain.c	Tue Nov 28 11:34:03 2006 -0700
     8.2 +++ b/xen/arch/ia64/xen/domain.c	Tue Nov 28 21:35:13 2006 -0700
     8.3 @@ -48,6 +48,7 @@
     8.4  #include <asm/shadow.h>
     8.5  #include <xen/guest_access.h>
     8.6  #include <asm/tlb_track.h>
     8.7 +#include <asm/perfmon.h>
     8.8  
     8.9  unsigned long dom0_size = 512*1024*1024;
    8.10  
    8.11 @@ -231,11 +232,35 @@ void continue_running(struct vcpu *same)
    8.12  	/* nothing to do */
    8.13  }
    8.14  
    8.15 +#ifdef CONFIG_PERFMON
    8.16 +static int pal_halt        = 1;
    8.17 +static int can_do_pal_halt = 1;
    8.18 +
    8.19 +static int __init nohalt_setup(char * str)
    8.20 +{
    8.21 +       pal_halt = can_do_pal_halt = 0;
    8.22 +       return 1;
    8.23 +}
    8.24 +__setup("nohalt", nohalt_setup);
    8.25 +
    8.26 +void
    8.27 +update_pal_halt_status(int status)
    8.28 +{
    8.29 +       can_do_pal_halt = pal_halt && status;
    8.30 +}
    8.31 +#else
    8.32 +#define can_do_pal_halt	(1)
    8.33 +#endif
    8.34 +
    8.35  static void default_idle(void)
    8.36  {
    8.37  	local_irq_disable();
    8.38 -	if ( !softirq_pending(smp_processor_id()) )
    8.39 -	        safe_halt();
    8.40 +	if ( !softirq_pending(smp_processor_id()) ) {
    8.41 +		if (can_do_pal_halt)
    8.42 +			safe_halt();
    8.43 +		else
    8.44 +			cpu_relax();
    8.45 +	}
    8.46  	local_irq_enable();
    8.47  }
    8.48  
    8.49 @@ -628,6 +653,9 @@ void domain_relinquish_resources(struct 
    8.50  
    8.51      if (d->arch.is_vti && d->arch.sal_data)
    8.52  	    xfree(d->arch.sal_data);
    8.53 +
    8.54 +    /* Free page used by xen oprofile buffer */
    8.55 +    free_xenoprof_pages(d);
    8.56  }
    8.57  
    8.58  unsigned long
     9.1 --- a/xen/arch/ia64/xen/hypercall.c	Tue Nov 28 11:34:03 2006 -0700
     9.2 +++ b/xen/arch/ia64/xen/hypercall.c	Tue Nov 28 21:35:13 2006 -0700
     9.3 @@ -68,7 +68,7 @@ const hypercall_t ia64_hypercall_table[N
     9.4  	(hypercall_t)do_ni_hypercall,		/* do_nmi_op */
     9.5  	(hypercall_t)do_sched_op,
     9.6  	(hypercall_t)do_callback_op,		/*  */                 /* 30 */
     9.7 -	(hypercall_t)do_ni_hypercall,		/*  */
     9.8 +	(hypercall_t)do_xenoprof_op,		/*  */
     9.9  	(hypercall_t)do_event_channel_op,
    9.10  	(hypercall_t)do_physdev_op,
    9.11  	(hypercall_t)do_hvm_op,			/*  */
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/arch/ia64/xen/oprofile/Makefile	Tue Nov 28 21:35:13 2006 -0700
    10.3 @@ -0,0 +1,1 @@
    10.4 +obj-y += perfmon.o xenoprof.o
    11.1 --- a/xen/arch/ia64/xen/oprofile/perfmon.c	Tue Nov 28 11:34:03 2006 -0700
    11.2 +++ b/xen/arch/ia64/xen/oprofile/perfmon.c	Tue Nov 28 21:35:13 2006 -0700
    11.3 @@ -1,3 +1,25 @@
    11.4 +/******************************************************************************
    11.5 + * perfmon.c for xenoprof
    11.6 + * This is based linux/arch/ia64/oprofile/perfmon.c, but heavily rewritten.
    11.7 + *
    11.8 + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
    11.9 + *                    VA Linux Systems Japan K.K.
   11.10 + *
   11.11 + * This program is free software; you can redistribute it and/or modify
   11.12 + * it under the terms of the GNU General Public License as published by
   11.13 + * the Free Software Foundation; either version 2 of the License, or
   11.14 + * (at your option) any later version.
   11.15 + *
   11.16 + * This program is distributed in the hope that it will be useful,
   11.17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11.18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   11.19 + * GNU General Public License for more details.
   11.20 + *
   11.21 + * You should have received a copy of the GNU General Public License
   11.22 + * along with this program; if not, write to the Free Software
   11.23 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   11.24 + *
   11.25 + */
   11.26  /**
   11.27   * @file perfmon.c
   11.28   *
   11.29 @@ -7,94 +29,172 @@
   11.30   * @author John Levon <levon@movementarian.org>
   11.31   */
   11.32  
   11.33 -#include <linux/kernel.h>
   11.34 -#include <linux/config.h>
   11.35 -#include <linux/oprofile.h>
   11.36 -#include <linux/sched.h>
   11.37 +#include <xen/config.h>
   11.38 +#include <xen/sched.h>
   11.39 +#include <xen/event.h>
   11.40 +#include <xen/xenoprof.h>
   11.41  #include <asm/perfmon.h>
   11.42  #include <asm/ptrace.h>
   11.43 -#include <asm/errno.h>
   11.44  
   11.45 +// XXX move them to an appropriate header file
   11.46 +extern void xenoprof_log_event(struct vcpu *vcpu,
   11.47 +                               unsigned long eip, int mode, int event);
   11.48 +extern int is_active(struct domain *d);
   11.49 +
   11.50 +static int allow_virq;
   11.51  static int allow_ints;
   11.52  
   11.53  static int
   11.54 -perfmon_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg,
   11.55 -                struct pt_regs *regs, unsigned long stamp)
   11.56 +xenoprof_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg,
   11.57 +                 struct pt_regs *regs, unsigned long stamp)
   11.58  {
   11.59 -	int event = arg->pmd_eventid;
   11.60 +    unsigned long ip = regs->cr_iip;
   11.61 +    int event = arg->pmd_eventid;
   11.62   
   11.63 -	arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1;
   11.64 +    arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1;
   11.65 +    if (!allow_virq || !allow_ints)
   11.66 +        return 0;
   11.67  
   11.68 -	/* the owner of the oprofile event buffer may have exited
   11.69 -	 * without perfmon being shutdown (e.g. SIGSEGV)
   11.70 -	 */
   11.71 -	if (allow_ints)
   11.72 -		oprofile_add_sample(regs, event);
   11.73 -	return 0;
   11.74 +    xenoprof_log_event(current, ip, xenoprofile_get_mode(task, regs), event);
   11.75 +    
   11.76 +    // send VIRQ_XENOPROF
   11.77 +    if (is_active(current->domain) && !ring_0(regs))
   11.78 +        send_guest_vcpu_virq(current, VIRQ_XENOPROF);
   11.79 +
   11.80 +    return 0;
   11.81  }
   11.82  
   11.83 +// same as linux OPROFILE_FMT_UUID
   11.84 +#define XENOPROF_FMT_UUID { \
   11.85 +    0x77, 0x7a, 0x6e, 0x61, 0x20, 0x65, 0x73, 0x69, 0x74, 0x6e, 0x72, 0x20, 0x61, 0x65, 0x0a, 0x6c }
   11.86  
   11.87 -static int perfmon_start(void)
   11.88 +static pfm_buffer_fmt_t xenoprof_fmt = {
   11.89 +    .fmt_name    = "xenoprof_format",
   11.90 +    .fmt_uuid    = XENOPROF_FMT_UUID,
   11.91 +    .fmt_handler = xenoprof_handler,
   11.92 +};
   11.93 +
   11.94 +static char * get_cpu_type(void)
   11.95 +{
   11.96 +    __u8 family = local_cpu_data->family;
   11.97 +
   11.98 +    switch (family) {
   11.99 +        case 0x07:
  11.100 +            return "ia64/itanium";
  11.101 +        case 0x1f:
  11.102 +            return "ia64/itanium2";
  11.103 +        default:
  11.104 +            return "ia64/ia64";
  11.105 +    }
  11.106 +}
  11.107 +
  11.108 +static int using_xenoprof;
  11.109 +
  11.110 +int __init
  11.111 +xenprof_perfmon_init(void)
  11.112 +{
  11.113 +    int ret = pfm_register_buffer_fmt(&xenoprof_fmt);
  11.114 +    if (ret)
  11.115 +        return -ENODEV;
  11.116 +    using_xenoprof = 1;
  11.117 +    printk("xenoprof: using perfmon.\n");
  11.118 +    return 0;
  11.119 +}
  11.120 +__initcall(xenprof_perfmon_init);
  11.121 +
  11.122 +#ifdef notyet
  11.123 +void xenoprof_perfmon_exit(void)
  11.124 +{
  11.125 +    if (!using_xenoprof)
  11.126 +        return;
  11.127 +
  11.128 +    pfm_unregister_buffer_fmt(xenoprof_fmt.fmt_uuid);
  11.129 +}
  11.130 +__exitcall(xenoprof_perfmon_exit);
  11.131 +#endif
  11.132 +
  11.133 +///////////////////////////////////////////////////////////////////////////
  11.134 +// glue methods for xenoprof and perfmon.
  11.135 +int
  11.136 +xenoprof_arch_init(int *num_events, int *is_primary, char *cpu_type)
  11.137 +{
  11.138 +    *num_events = 0;
  11.139 +    strncpy(cpu_type, get_cpu_type(), XENOPROF_CPU_TYPE_SIZE - 1);
  11.140 +    cpu_type[XENOPROF_CPU_TYPE_SIZE - 1] = '\0';
  11.141 +
  11.142 +    *is_primary = 0;
  11.143 +    if (xenoprof_primary_profiler == NULL) {
  11.144 +        /* For now, only dom0 can be the primary profiler */
  11.145 +        if (current->domain->domain_id == 0) {
  11.146 +            *is_primary = 1;
  11.147 +        }
  11.148 +    } else if (xenoprof_primary_profiler == current->domain)
  11.149 +        *is_primary = 1;
  11.150 +    return 0;
  11.151 +}
  11.152 +
  11.153 +int
  11.154 +xenoprof_arch_reserve_counters(void)
  11.155 +{
  11.156 +    // perfmon takes care
  11.157 +    return 0;
  11.158 +}
  11.159 +
  11.160 +int
  11.161 +xenoprof_arch_counter(XEN_GUEST_HANDLE(void) arg)
  11.162 +{
  11.163 +    return -ENOSYS;
  11.164 +}
  11.165 +
  11.166 +int
  11.167 +xenoprof_arch_setup_events(void)
  11.168 +{
  11.169 +    // perfmon takes care
  11.170 +    return 0;
  11.171 +}
  11.172 +
  11.173 +//XXX SMP: sync by IPI?
  11.174 +int
  11.175 +xenoprof_arch_enable_virq(void)
  11.176 +{
  11.177 +    allow_virq = 1;
  11.178 +    return 0;
  11.179 +}
  11.180 +
  11.181 +//XXX SMP: sync by IPI?
  11.182 +int
  11.183 +xenoprof_arch_start(void)
  11.184  {
  11.185  	allow_ints = 1;
  11.186 -	return 0;
  11.187 +    return 0;
  11.188  }
  11.189  
  11.190 -
  11.191 -static void perfmon_stop(void)
  11.192 +//XXX SMP: sync by IPI?
  11.193 +void
  11.194 +xenoprof_arch_stop(void)
  11.195  {
  11.196  	allow_ints = 0;
  11.197  }
  11.198  
  11.199 -
  11.200 -#define OPROFILE_FMT_UUID { \
  11.201 -	0x77, 0x7a, 0x6e, 0x61, 0x20, 0x65, 0x73, 0x69, 0x74, 0x6e, 0x72, 0x20, 0x61, 0x65, 0x0a, 0x6c }
  11.202 -
  11.203 -static pfm_buffer_fmt_t oprofile_fmt = {
  11.204 - 	.fmt_name 	    = "oprofile_format",
  11.205 - 	.fmt_uuid	    = OPROFILE_FMT_UUID,
  11.206 - 	.fmt_handler	    = perfmon_handler,
  11.207 -};
  11.208 -
  11.209 -
  11.210 -static char * get_cpu_type(void)
  11.211 +//XXX SMP: sync by IPI?
  11.212 +void
  11.213 +xenoprof_arch_disable_virq(void)
  11.214  {
  11.215 -	__u8 family = local_cpu_data->family;
  11.216 -
  11.217 -	switch (family) {
  11.218 -		case 0x07:
  11.219 -			return "ia64/itanium";
  11.220 -		case 0x1f:
  11.221 -			return "ia64/itanium2";
  11.222 -		default:
  11.223 -			return "ia64/ia64";
  11.224 -	}
  11.225 +    allow_virq = 0;
  11.226  }
  11.227  
  11.228 -
  11.229 -/* all the ops are handled via userspace for IA64 perfmon */
  11.230 -
  11.231 -static int using_perfmon;
  11.232 -
  11.233 -int perfmon_init(struct oprofile_operations * ops)
  11.234 +void
  11.235 +xenoprof_arch_release_counters(void)
  11.236  {
  11.237 -	int ret = pfm_register_buffer_fmt(&oprofile_fmt);
  11.238 -	if (ret)
  11.239 -		return -ENODEV;
  11.240 -
  11.241 -	ops->cpu_type = get_cpu_type();
  11.242 -	ops->start = perfmon_start;
  11.243 -	ops->stop = perfmon_stop;
  11.244 -	using_perfmon = 1;
  11.245 -	printk(KERN_INFO "oprofile: using perfmon.\n");
  11.246 -	return 0;
  11.247 +    // perfmon takes care
  11.248  }
  11.249  
  11.250 -
  11.251 -void perfmon_exit(void)
  11.252 -{
  11.253 -	if (!using_perfmon)
  11.254 -		return;
  11.255 -
  11.256 -	pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid);
  11.257 -}
  11.258 +/*
  11.259 + * Local variables:
  11.260 + * mode: C
  11.261 + * c-set-style: "BSD"
  11.262 + * c-basic-offset: 4
  11.263 + * tab-width: 4
  11.264 + * indent-tabs-mode: nil
  11.265 + * End:
  11.266 + */
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xen/arch/ia64/xen/oprofile/xenoprof.c	Tue Nov 28 21:35:13 2006 -0700
    12.3 @@ -0,0 +1,56 @@
    12.4 +/******************************************************************************
    12.5 + * xenoprof.c 
    12.6 + *
    12.7 + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
    12.8 + *                    VA Linux Systems Japan K.K.
    12.9 + *
   12.10 + * This program is free software; you can redistribute it and/or modify
   12.11 + * it under the terms of the GNU General Public License as published by
   12.12 + * the Free Software Foundation; either version 2 of the License, or
   12.13 + * (at your option) any later version.
   12.14 + *
   12.15 + * This program is distributed in the hope that it will be useful,
   12.16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   12.17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12.18 + * GNU General Public License for more details.
   12.19 + *
   12.20 + * You should have received a copy of the GNU General Public License
   12.21 + * along with this program; if not, write to the Free Software
   12.22 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   12.23 + *
   12.24 + */
   12.25 +
   12.26 +#include <xen/config.h>
   12.27 +#include <xen/sched.h>
   12.28 +#include <public/xen.h>
   12.29 +#include <xen/xenoprof.h>
   12.30 +
   12.31 +int
   12.32 +xenoprofile_get_mode(struct vcpu *v, struct cpu_user_regs * const regs)
   12.33 +{
   12.34 +    int mode = 0;
   12.35 +
   12.36 +    // mode
   12.37 +    // 0: user, 1: kernel, 2: xen
   12.38 +    // Xen/IA64 uses ring2 for kernel, and doesn't use ring1.
   12.39 +    if (ring_2(regs))
   12.40 +        mode = 1;
   12.41 +    else if (ring_0(regs))
   12.42 +        mode = 2;
   12.43 +    else if (ring_1(regs)) {
   12.44 +        gdprintk(XENLOG_ERR, "%s:%d ring1 is used!\n", __func__, __LINE__);
   12.45 +        mode = 1;// fall back to kernel mode.
   12.46 +    }
   12.47 +
   12.48 +    return mode;
   12.49 +}
   12.50 +
   12.51 +/*
   12.52 + * Local variables:
   12.53 + * mode: C
   12.54 + * c-set-style: "BSD"
   12.55 + * c-basic-offset: 4
   12.56 + * tab-width: 4
   12.57 + * indent-tabs-mode: nil
   12.58 + * End:
   12.59 + */
    13.1 --- a/xen/include/asm-ia64/config.h	Tue Nov 28 11:34:03 2006 -0700
    13.2 +++ b/xen/include/asm-ia64/config.h	Tue Nov 28 21:35:13 2006 -0700
    13.3 @@ -125,6 +125,7 @@ extern char _end[]; /* standard ELF symb
    13.4  // from include/asm-ia64/smp.h
    13.5  #define	get_cpu()	smp_processor_id()
    13.6  #define put_cpu()	do {} while(0)
    13.7 +#define put_cpu_no_resched()	do{} while (0)
    13.8  
    13.9  // needed for common/dom0_ops.c until hyperthreading is supported
   13.10  #ifdef CONFIG_SMP
   13.11 @@ -166,6 +167,8 @@ extern int smp_num_siblings;
   13.12  #define ____cacheline_aligned_in_smp ____cacheline_aligned
   13.13  #endif
   13.14  
   13.15 +#define CONFIG_PERFMON
   13.16 +
   13.17  #ifndef __ASSEMBLY__
   13.18  #include "asm/types.h"	// for u64
   13.19  #include "linux/linkage.h"	// for asmlinkage which is used by
    14.1 --- a/xen/include/asm-ia64/domain.h	Tue Nov 28 11:34:03 2006 -0700
    14.2 +++ b/xen/include/asm-ia64/domain.h	Tue Nov 28 21:35:13 2006 -0700
    14.3 @@ -211,6 +211,10 @@ struct arch_vcpu {
    14.4  #define IO_PORTS_PADDR          0x00000ffffc000000UL
    14.5  #define IO_PORTS_SIZE           0x0000000004000000UL
    14.6  
    14.7 +int
    14.8 +do_perfmon_op(unsigned long cmd,
    14.9 +              XEN_GUEST_HANDLE(void) arg1, unsigned long arg2);
   14.10 +
   14.11  #endif /* __ASM_DOMAIN_H__ */
   14.12  
   14.13  /*
    15.1 --- a/xen/include/asm-ia64/linux-xen/asm/perfmon.h	Tue Nov 28 11:34:03 2006 -0700
    15.2 +++ b/xen/include/asm-ia64/linux-xen/asm/perfmon.h	Tue Nov 28 21:35:13 2006 -0700
    15.3 @@ -6,6 +6,14 @@
    15.4  #ifndef _ASM_IA64_PERFMON_H
    15.5  #define _ASM_IA64_PERFMON_H
    15.6  
    15.7 +#ifdef XEN
    15.8 +#include <asm/config.h>
    15.9 +#ifndef pt_regs
   15.10 +#define pt_regs cpu_user_regs
   15.11 +#endif
   15.12 +struct cpu_user_regs;
   15.13 +#endif
   15.14 +
   15.15  /*
   15.16   * perfmon comamnds supported on all CPU models
   15.17   */
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/xen/include/asm-ia64/xenoprof.h	Tue Nov 28 21:35:13 2006 -0700
    16.3 @@ -0,0 +1,64 @@
    16.4 +/******************************************************************************
    16.5 + * asm-ia64/xenoprof.h
    16.6 + * xenoprof ia64 arch specific header file
    16.7 + *
    16.8 + * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
    16.9 + *                    VA Linux Systems Japan K.K.
   16.10 + *
   16.11 + * This program is free software; you can redistribute it and/or modify
   16.12 + * it under the terms of the GNU General Public License as published by
   16.13 + * the Free Software Foundation; either version 2 of the License, or
   16.14 + * (at your option) any later version.
   16.15 + *
   16.16 + * This program is distributed in the hope that it will be useful,
   16.17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
   16.18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   16.19 + * GNU General Public License for more details.
   16.20 + *
   16.21 + * You should have received a copy of the GNU General Public License
   16.22 + * along with this program; if not, write to the Free Software
   16.23 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   16.24 + *
   16.25 + */
   16.26 +
   16.27 +#ifndef __ASM_XENOPROF_H__
   16.28 +#define __ASM_XENOPROF_H__
   16.29 +
   16.30 +int xenoprof_arch_init(int *num_events, int *is_primary, char *cpu_type);
   16.31 +int xenoprof_arch_reserve_counters(void);
   16.32 +int xenoprof_arch_counter(XEN_GUEST_HANDLE(void) arg);
   16.33 +int xenoprof_arch_setup_events(void);
   16.34 +int xenoprof_arch_enable_virq(void);
   16.35 +int xenoprof_arch_start(void);
   16.36 +void xenoprof_arch_stop(void);
   16.37 +void xenoprof_arch_disable_virq(void);
   16.38 +void xenoprof_arch_release_counters(void);
   16.39 +
   16.40 +struct vcpu;
   16.41 +struct cpu_user_regs;
   16.42 +int xenoprofile_get_mode(struct vcpu *v, struct cpu_user_regs * const regs);
   16.43 +
   16.44 +#define xenoprof_shared_gmfn(d, gmaddr, maddr)  \
   16.45 +    assign_domain_page((d), (gmaddr), (maddr));
   16.46 +
   16.47 +static inline int
   16.48 +ring(const struct pt_regs* regs)
   16.49 +{
   16.50 +    return ((struct ia64_psr*)(&(regs)->cr_ipsr))->cpl;
   16.51 +}
   16.52 +#define ring_0(r)       (ring(r) == 0)
   16.53 +#define ring_1(r)       (ring(r) == 1)
   16.54 +#define ring_2(r)       (ring(r) == 2)
   16.55 +#define ring_3(r)       (ring(r) == 3)
   16.56 +
   16.57 +#endif /* __ASM_XENOPROF_H__ */
   16.58 +
   16.59 +/*
   16.60 + * Local variables:
   16.61 + * mode: C
   16.62 + * c-set-style: "BSD"
   16.63 + * c-basic-offset: 4
   16.64 + * tab-width: 4
   16.65 + * indent-tabs-mode: nil
   16.66 + * End:
   16.67 + */
    17.1 --- a/xen/include/public/arch-ia64.h	Tue Nov 28 11:34:03 2006 -0700
    17.2 +++ b/xen/include/public/arch-ia64.h	Tue Nov 28 21:35:13 2006 -0700
    17.3 @@ -376,6 +376,9 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_conte
    17.4  /* expose the p2m table into domain */
    17.5  #define IA64_DOM0VP_expose_p2m          7
    17.6  
    17.7 +/* xen perfmon */
    17.8 +#define IA64_DOM0VP_perfmon             8
    17.9 +
   17.10  // flags for page assignement to pseudo physical address space
   17.11  #define _ASSIGN_readonly                0
   17.12  #define ASSIGN_readonly                 (1UL << _ASSIGN_readonly)
   17.13 @@ -462,6 +465,25 @@ struct xen_ia64_boot_param {
   17.14    (((unsigned long)(addr) & XENCOMM_INLINE_MASK) == XENCOMM_INLINE_FLAG)
   17.15  #define XENCOMM_INLINE_ADDR(addr) \
   17.16    ((unsigned long)(addr) & ~XENCOMM_INLINE_MASK)
   17.17 +
   17.18 +/* xen perfmon */
   17.19 +#ifdef XEN
   17.20 +#ifndef __ASSEMBLY__
   17.21 +#ifndef _ASM_IA64_PERFMON_H
   17.22 +
   17.23 +#include <xen/list.h>   // asm/perfmon.h requires struct list_head
   17.24 +#include <asm/perfmon.h>
   17.25 +// for PFM_xxx and pfarg_features_t, pfarg_context_t, pfarg_reg_t, pfarg_load_t
   17.26 +
   17.27 +#endif /* _ASM_IA64_PERFMON_H */
   17.28 +
   17.29 +DEFINE_XEN_GUEST_HANDLE(pfarg_features_t);
   17.30 +DEFINE_XEN_GUEST_HANDLE(pfarg_context_t);
   17.31 +DEFINE_XEN_GUEST_HANDLE(pfarg_reg_t);
   17.32 +DEFINE_XEN_GUEST_HANDLE(pfarg_load_t);
   17.33 +#endif /* __ASSEMBLY__ */
   17.34 +#endif /* XEN */
   17.35 +
   17.36  #endif /* __HYPERVISOR_IF_IA64_H__ */
   17.37  
   17.38  /*