ia64/xen-unstable

changeset 10929:7cde0d938ef4

[IA64] convert more privop_stat to perfc

Convert most privop stats to perfc.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Fri Aug 04 09:02:43 2006 -0600 (2006-08-04)
parents e7394daf098d
children 679683333917
files xen/arch/ia64/asm-offsets.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/faults.c xen/arch/ia64/xen/hyperprivop.S xen/arch/ia64/xen/privop.c xen/arch/ia64/xen/privop_stat.c xen/arch/ia64/xen/vhpt.c xen/common/page_alloc.c xen/include/asm-ia64/perfc_defn.h xen/include/asm-ia64/privop_stat.h xen/include/asm-ia64/vhpt.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Fri Aug 04 08:37:24 2006 -0600
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Fri Aug 04 09:02:43 2006 -0600
     1.3 @@ -215,5 +215,7 @@ void foo(void)
     1.4  	BLANK();
     1.5  	DEFINE(RECOVER_TO_PAGE_FAULT_PERFC_OFS, offsetof (struct perfcounter, recover_to_page_fault));
     1.6  	DEFINE(RECOVER_TO_BREAK_FAULT_PERFC_OFS, offsetof (struct perfcounter, recover_to_break_fault));
     1.7 +	DEFINE(FAST_HYPERPRIVOP_PERFC_OFS, offsetof (struct perfcounter, fast_hyperprivop));
     1.8 +	DEFINE(FAST_REFLECT_PERFC_OFS, offsetof (struct perfcounter, fast_reflect));
     1.9  #endif
    1.10  }
     2.1 --- a/xen/arch/ia64/xen/domain.c	Fri Aug 04 08:37:24 2006 -0600
     2.2 +++ b/xen/arch/ia64/xen/domain.c	Fri Aug 04 09:02:43 2006 -0600
     2.3 @@ -136,7 +136,6 @@ void context_switch(struct vcpu *prev, s
     2.4      uint64_t pta;
     2.5  
     2.6      local_irq_save(spsr);
     2.7 -    perfc_incrc(context_switch);
     2.8  
     2.9      __ia64_save_fpu(prev->arch._thread.fph);
    2.10      __ia64_load_fpu(next->arch._thread.fph);
     3.1 --- a/xen/arch/ia64/xen/faults.c	Fri Aug 04 08:37:24 2006 -0600
     3.2 +++ b/xen/arch/ia64/xen/faults.c	Fri Aug 04 09:02:43 2006 -0600
     3.3 @@ -51,8 +51,6 @@ extern IA64FAULT ia64_hypercall(struct p
     3.4  
     3.5  extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
     3.6  
     3.7 -#define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
     3.8 -
     3.9  // should never panic domain... if it does, stack may have been overrun
    3.10  void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
    3.11  {
    3.12 @@ -92,7 +90,7 @@ void reflect_interruption(unsigned long 
    3.13  	v->vcpu_info->evtchn_upcall_mask = 1;
    3.14  	PSCB(v,interrupt_collection_enabled) = 0;
    3.15  
    3.16 -	inc_slow_reflect_count(vector);
    3.17 +	perfc_incra(slow_reflect, vector >> 8);
    3.18  }
    3.19  
    3.20  static unsigned long pending_false_positive = 0;
    3.21 @@ -247,7 +245,7 @@ void ia64_do_page_fault (unsigned long a
    3.22  		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    3.23  		// NOTE: nested trap must NOT pass PSCB address
    3.24  		//regs->r31 = (unsigned long) &PSCB(current);
    3.25 -		inc_slow_reflect_count(fault);
    3.26 +		perfc_incra(slow_reflect, fault >> 8);
    3.27  		return;
    3.28  	}
    3.29  
     4.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Fri Aug 04 08:37:24 2006 -0600
     4.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Fri Aug 04 09:02:43 2006 -0600
     4.3 @@ -22,34 +22,39 @@
     4.4  #define _PAGE_PL_2	(2<<7)
     4.5  
     4.6  #if 1	 // change to 0 to turn off all fast paths
     4.7 -#define FAST_HYPERPRIVOPS
     4.8 -#define FAST_HYPERPRIVOP_CNT
     4.9 -#define FAST_REFLECT_CNT
    4.10 +# define FAST_HYPERPRIVOPS
    4.11 +# ifdef PERF_COUNTERS
    4.12 +#  define FAST_HYPERPRIVOP_CNT
    4.13 +#  define FAST_HYPERPRIVOP_PERFC(N) \
    4.14 +	(perfcounters + FAST_HYPERPRIVOP_PERFC_OFS + (4 * N))
    4.15 +#  define FAST_REFLECT_CNT
    4.16 +# endif
    4.17 +	
    4.18  //#define FAST_TICK // mostly working (unat problems) but default off for now
    4.19  //#define FAST_TLB_MISS_REFLECT	// mostly working but default off for now
    4.20 -#ifdef CONFIG_XEN_IA64_DOM0_VP
    4.21 -#undef FAST_ITC	//XXX CONFIG_XEN_IA64_DOM0_VP
    4.22 +# ifdef CONFIG_XEN_IA64_DOM0_VP
    4.23 +#  undef FAST_ITC	//XXX CONFIG_XEN_IA64_DOM0_VP
    4.24  		//    TODO fast_itc doesn't suport dom0 vp yet.
    4.25 -#else
    4.26 -//#define FAST_ITC	// to be reviewed
    4.27 -#endif
    4.28 -#define FAST_BREAK
    4.29 -#ifndef CONFIG_XEN_IA64_DOM0_VP
    4.30 -# define FAST_ACCESS_REFLECT
    4.31 -#else
    4.32 -# undef FAST_ACCESS_REFLECT //XXX CONFIG_XEN_IA64_DOM0_VP
    4.33 +# else
    4.34 +//#  define FAST_ITC	// to be reviewed
    4.35 +# endif
    4.36 +# define FAST_BREAK
    4.37 +# ifndef CONFIG_XEN_IA64_DOM0_VP
    4.38 +#  define FAST_ACCESS_REFLECT
    4.39 +# else
    4.40 +#  undef FAST_ACCESS_REFLECT //XXX CONFIG_XEN_IA64_DOM0_VP
    4.41                              //    TODO fast_access_reflect
    4.42                              //    doesn't support dom0 vp yet.
    4.43 -#endif
    4.44 -#define FAST_RFI
    4.45 -#define FAST_SSM_I
    4.46 -#define FAST_PTC_GA
    4.47 -#undef RFI_TO_INTERRUPT // not working yet
    4.48 +# endif
    4.49 +# define FAST_RFI
    4.50 +# define FAST_SSM_I
    4.51 +# define FAST_PTC_GA
    4.52 +# undef RFI_TO_INTERRUPT // not working yet
    4.53  #endif
    4.54  
    4.55  #ifdef CONFIG_SMP
    4.56 -//#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
    4.57 -#undef FAST_PTC_GA
    4.58 + //#warning "FIXME: ptc.ga instruction requires spinlock for SMP"
    4.59 + #undef FAST_PTC_GA
    4.60  #endif
    4.61  
    4.62  // FIXME: turn off for now... but NaTs may crash Xen so re-enable soon!
    4.63 @@ -237,10 +242,10 @@ ENTRY(hyper_ssm_i)
    4.64  	cmp.ne p7,p0=r21,r0
    4.65  (p7)	br.sptk.many dispatch_break_fault ;;
    4.66  #ifdef FAST_HYPERPRIVOP_CNT
    4.67 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SSM_I);;
    4.68 -	ld8 r21=[r20];;
    4.69 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SSM_I);;
    4.70 +	ld4 r21=[r20];;
    4.71  	adds r21=1,r21;;
    4.72 -	st8 [r20]=r21;;
    4.73 +	st4 [r20]=r21;;
    4.74  #endif
    4.75  	// set shared_mem iip to instruction after HYPER_SSM_I
    4.76  	extr.u r20=r30,41,2 ;;
    4.77 @@ -373,10 +378,10 @@ GLOBAL_ENTRY(fast_tick_reflect)
    4.78  	mov rp=r29;;
    4.79  	mov cr.itm=r26;;	// ensure next tick
    4.80  #ifdef FAST_REFLECT_CNT
    4.81 -	movl r20=fast_reflect_count+((0x3000>>8)*8);;
    4.82 -	ld8 r21=[r20];;
    4.83 +	movl r20=perfcounters+FAST_REFLECT_PERFC_OFS+((0x3000>>8)*4);;
    4.84 +	ld4 r21=[r20];;
    4.85  	adds r21=1,r21;;
    4.86 -	st8 [r20]=r21;;
    4.87 +	st4 [r20]=r21;;
    4.88  #endif
    4.89  	// vcpu_pend_timer(current)
    4.90  	movl r18=THIS_CPU(current_psr_ic_addr)
    4.91 @@ -611,12 +616,12 @@ END(fast_break_reflect)
    4.92  //	r31 == pr
    4.93  ENTRY(fast_reflect)
    4.94  #ifdef FAST_REFLECT_CNT
    4.95 -	movl r22=fast_reflect_count;
    4.96 -	shr r23=r20,5;;
    4.97 +	movl r22=perfcounters+FAST_REFLECT_PERFC_OFS;
    4.98 +	shr r23=r20,8-2;;
    4.99  	add r22=r22,r23;;
   4.100 -	ld8 r21=[r22];;
   4.101 +	ld4 r21=[r22];;
   4.102  	adds r21=1,r21;;
   4.103 -	st8 [r22]=r21;;
   4.104 +	st4 [r22]=r21;;
   4.105  #endif
   4.106  	// save iip in shared_info (DON'T POINT TO NEXT INSTRUCTION!)
   4.107  	adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.108 @@ -948,10 +953,10 @@ 1:	// check the guest VHPT
   4.109  (p7)	br.cond.spnt.few page_not_present;;
   4.110  
   4.111  #ifdef FAST_REFLECT_CNT
   4.112 -	movl r21=fast_vhpt_translate_count;;
   4.113 -	ld8 r22=[r21];;
   4.114 +	movl r21=perfcounter+FAST_VHPT_TRANSLATE_PERFC_OFS;;
   4.115 +	ld4 r22=[r21];;
   4.116  	adds r22=1,r22;;
   4.117 -	st8 [r21]=r22;;
   4.118 +	st4 [r21]=r22;;
   4.119  #endif
   4.120  
   4.121  // prepare for fast_insert(PSCB(ifa),PSCB(itir),r16=pte)
   4.122 @@ -979,9 +984,9 @@ END(fast_tlb_miss_reflect)
   4.123  ENTRY(recover_and_page_fault)
   4.124  #ifdef PERF_COUNTERS
   4.125  	movl r21=perfcounters + RECOVER_TO_PAGE_FAULT_PERFC_OFS;;
   4.126 -	ld8 r22=[r21];;
   4.127 +	ld4 r22=[r21];;
   4.128  	adds r22=1,r22;;
   4.129 -	st8 [r21]=r22;;
   4.130 +	st4 [r21]=r22;;
   4.131  #endif
   4.132  	mov b0=r29;;
   4.133  	br.cond.sptk.many page_fault;;
   4.134 @@ -1083,10 +1088,10 @@ 1:
   4.135  
   4.136  1:	// OK now, let's do an rfi.
   4.137  #ifdef FAST_HYPERPRIVOP_CNT
   4.138 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_RFI);;
   4.139 -	ld8 r23=[r20];;
   4.140 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_RFI);;
   4.141 +	ld4 r23=[r20];;
   4.142  	adds r23=1,r23;;
   4.143 -	st8 [r20]=r23;;
   4.144 +	st4 [r20]=r23;;
   4.145  #endif
   4.146  #ifdef RFI_TO_INTERRUPT
   4.147  	// maybe do an immediate interrupt delivery?
   4.148 @@ -1339,10 +1344,10 @@ END(rfi_with_interrupt)
   4.149  
   4.150  ENTRY(hyper_cover)
   4.151  #ifdef FAST_HYPERPRIVOP_CNT
   4.152 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_COVER);;
   4.153 -	ld8 r21=[r20];;
   4.154 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_COVER);;
   4.155 +	ld4 r21=[r20];;
   4.156  	adds r21=1,r21;;
   4.157 -	st8 [r20]=r21;;
   4.158 +	st4 [r20]=r21;;
   4.159  #endif
   4.160  	mov r24=cr.ipsr
   4.161  	mov r25=cr.iip;;
   4.162 @@ -1375,10 +1380,10 @@ END(hyper_cover)
   4.163  // return from metaphysical mode (meta=1) to virtual mode (meta=0)
   4.164  ENTRY(hyper_ssm_dt)
   4.165  #ifdef FAST_HYPERPRIVOP_CNT
   4.166 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SSM_DT);;
   4.167 -	ld8 r21=[r20];;
   4.168 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SSM_DT);;
   4.169 +	ld4 r21=[r20];;
   4.170  	adds r21=1,r21;;
   4.171 -	st8 [r20]=r21;;
   4.172 +	st4 [r20]=r21;;
   4.173  #endif
   4.174  	mov r24=cr.ipsr
   4.175  	mov r25=cr.iip;;
   4.176 @@ -1412,10 +1417,10 @@ END(hyper_ssm_dt)
   4.177  // go to metaphysical mode (meta=1) from virtual mode (meta=0)
   4.178  ENTRY(hyper_rsm_dt)
   4.179  #ifdef FAST_HYPERPRIVOP_CNT
   4.180 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_RSM_DT);;
   4.181 -	ld8 r21=[r20];;
   4.182 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_RSM_DT);;
   4.183 +	ld4 r21=[r20];;
   4.184  	adds r21=1,r21;;
   4.185 -	st8 [r20]=r21;;
   4.186 +	st4 [r20]=r21;;
   4.187  #endif
   4.188  	mov r24=cr.ipsr
   4.189  	mov r25=cr.iip;;
   4.190 @@ -1449,10 +1454,10 @@ END(hyper_rsm_dt)
   4.191  
   4.192  ENTRY(hyper_get_tpr)
   4.193  #ifdef FAST_HYPERPRIVOP_CNT
   4.194 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_GET_TPR);;
   4.195 -	ld8 r21=[r20];;
   4.196 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_TPR);;
   4.197 +	ld4 r21=[r20];;
   4.198  	adds r21=1,r21;;
   4.199 -	st8 [r20]=r21;;
   4.200 +	st4 [r20]=r21;;
   4.201  #endif
   4.202  	mov r24=cr.ipsr
   4.203  	mov r25=cr.iip;;
   4.204 @@ -1478,10 +1483,10 @@ END(hyper_get_tpr)
   4.205  // (or accidentally missing) delivering an interrupt
   4.206  ENTRY(hyper_set_tpr)
   4.207  #ifdef FAST_HYPERPRIVOP_CNT
   4.208 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SET_TPR);;
   4.209 -	ld8 r21=[r20];;
   4.210 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_TPR);;
   4.211 +	ld4 r21=[r20];;
   4.212  	adds r21=1,r21;;
   4.213 -	st8 [r20]=r21;;
   4.214 +	st4 [r20]=r21;;
   4.215  #endif
   4.216  	mov r24=cr.ipsr
   4.217  	mov r25=cr.iip;;
   4.218 @@ -1506,10 +1511,10 @@ END(hyper_set_tpr)
   4.219  
   4.220  ENTRY(hyper_get_ivr)
   4.221  #ifdef FAST_HYPERPRIVOP_CNT
   4.222 -	movl r22=fast_hyperpriv_cnt+(8*HYPERPRIVOP_GET_IVR);;
   4.223 -	ld8 r21=[r22];;
   4.224 +	movl r22=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_IVR);;
   4.225 +	ld4 r21=[r22];;
   4.226  	adds r21=1,r21;;
   4.227 -	st8 [r22]=r21;;
   4.228 +	st4 [r22]=r21;;
   4.229  #endif
   4.230  	mov r8=15;;
   4.231  	// when we get to here r20=~=interrupts pending
   4.232 @@ -1618,10 +1623,10 @@ ENTRY(hyper_eoi)
   4.233  	cmp.ne p7,p0=r20,r0
   4.234  (p7)	br.spnt.many dispatch_break_fault ;;
   4.235  #ifdef FAST_HYPERPRIVOP_CNT
   4.236 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_EOI);;
   4.237 -	ld8 r21=[r20];;
   4.238 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_EOI);;
   4.239 +	ld4 r21=[r20];;
   4.240  	adds r21=1,r21;;
   4.241 -	st8 [r20]=r21;;
   4.242 +	st4 [r20]=r21;;
   4.243  #endif
   4.244  	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   4.245  	ld8 r22=[r22];;
   4.246 @@ -1682,10 +1687,10 @@ ENTRY(hyper_set_itm)
   4.247  	cmp.ne p7,p0=r20,r0
   4.248  (p7)	br.spnt.many dispatch_break_fault ;;
   4.249  #ifdef FAST_HYPERPRIVOP_CNT
   4.250 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SET_ITM);;
   4.251 -	ld8 r21=[r20];;
   4.252 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_ITM);;
   4.253 +	ld4 r21=[r20];;
   4.254  	adds r21=1,r21;;
   4.255 -	st8 [r20]=r21;;
   4.256 +	st4 [r20]=r21;;
   4.257  #endif
   4.258  	movl r20=THIS_CPU(cpu_info)+IA64_CPUINFO_ITM_NEXT_OFFSET;;
   4.259  	ld8 r21=[r20];;
   4.260 @@ -1723,10 +1728,10 @@ END(hyper_set_itm)
   4.261  
   4.262  ENTRY(hyper_get_rr)
   4.263  #ifdef FAST_HYPERPRIVOP_CNT
   4.264 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_GET_RR);;
   4.265 -	ld8 r21=[r20];;
   4.266 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_RR);;
   4.267 +	ld4 r21=[r20];;
   4.268  	adds r21=1,r21;;
   4.269 -	st8 [r20]=r21;;
   4.270 +	st4 [r20]=r21;;
   4.271  #endif
   4.272  	extr.u r25=r8,61,3;;
   4.273  	adds r20=XSI_RR0_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.274 @@ -1755,10 +1760,10 @@ ENTRY(hyper_set_rr)
   4.275  	cmp.leu p7,p0=7,r25	// punt on setting rr7
   4.276  (p7)	br.spnt.many dispatch_break_fault ;;
   4.277  #ifdef FAST_HYPERPRIVOP_CNT
   4.278 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SET_RR);;
   4.279 -	ld8 r21=[r20];;
   4.280 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_RR);;
   4.281 +	ld4 r21=[r20];;
   4.282  	adds r21=1,r21;;
   4.283 -	st8 [r20]=r21;;
   4.284 +	st4 [r20]=r21;;
   4.285  #endif
   4.286  	extr.u r26=r9,8,24	// r26 = r9.rid
   4.287  	movl r20=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   4.288 @@ -1813,10 +1818,10 @@ ENTRY(hyper_set_kr)
   4.289  	cmp.ne p7,p0=r0,r25	// if kr# > 7, go slow way
   4.290  (p7)	br.spnt.many dispatch_break_fault ;;
   4.291  #ifdef FAST_HYPERPRIVOP_CNT
   4.292 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_SET_KR);;
   4.293 -	ld8 r21=[r20];;
   4.294 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_SET_KR);;
   4.295 +	ld4 r21=[r20];;
   4.296  	adds r21=1,r21;;
   4.297 -	st8 [r20]=r21;;
   4.298 +	st4 [r20]=r21;;
   4.299  #endif
   4.300  	adds r21=XSI_KR0_OFS-XSI_PSR_IC_OFS,r18 ;;
   4.301  	shl r20=r8,3;;
   4.302 @@ -1871,10 +1876,10 @@ END(hyper_set_kr)
   4.303  //	r31 == pr
   4.304  ENTRY(hyper_thash)
   4.305  #ifdef FAST_HYPERPRIVOP_CNT
   4.306 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_THASH);;
   4.307 -	ld8 r21=[r20];;
   4.308 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_THASH);;
   4.309 +	ld4 r21=[r20];;
   4.310  	adds r21=1,r21;;
   4.311 -	st8 [r20]=r21;;
   4.312 +	st4 [r20]=r21;;
   4.313  #endif
   4.314  	shr.u r20 = r8, 61
   4.315  	addl r25 = 1, r0
   4.316 @@ -1940,10 +1945,10 @@ ENTRY(hyper_ptc_ga)
   4.317  #endif
   4.318  	// FIXME: validate not flushing Xen addresses
   4.319  #ifdef FAST_HYPERPRIVOP_CNT
   4.320 -	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_PTC_GA);;
   4.321 -	ld8 r21=[r20];;
   4.322 +	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_PTC_GA);;
   4.323 +	ld4 r21=[r20];;
   4.324  	adds r21=1,r21;;
   4.325 -	st8 [r20]=r21;;
   4.326 +	st4 [r20]=r21;;
   4.327  #endif
   4.328  	mov r28=r8
   4.329  	extr.u r19=r9,2,6		// addr_range=1<<((r9&0xfc)>>2)
   4.330 @@ -2011,9 +2016,9 @@ END(hyper_ptc_ga)
   4.331  ENTRY(recover_and_dispatch_break_fault)
   4.332  #ifdef PERF_COUNTERS
   4.333  	movl r21=perfcounters + RECOVER_TO_BREAK_FAULT_PERFC_OFS;;
   4.334 -	ld8 r22=[r21];;
   4.335 +	ld4 r22=[r21];;
   4.336  	adds r22=1,r22;;
   4.337 -	st8 [r21]=r22;;
   4.338 +	st4 [r21]=r22;;
   4.339  #endif
   4.340  	mov b0=r29 ;;
   4.341  	br.sptk.many dispatch_break_fault;;
   4.342 @@ -2054,11 +2059,11 @@ hyper_itc_d:
   4.343  (p7)	br.spnt.many dispatch_break_fault ;;
   4.344  #ifdef FAST_HYPERPRIVOP_CNT
   4.345  	cmp.eq p6,p7=HYPERPRIVOP_ITC_D,r17;;
   4.346 -(p6)	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_ITC_D);;
   4.347 -(p7)	movl r20=fast_hyperpriv_cnt+(8*HYPERPRIVOP_ITC_I);;
   4.348 -	ld8 r21=[r20];;
   4.349 +(p6)	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_ITC_D);;
   4.350 +(p7)	movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_ITC_I);;
   4.351 +	ld4 r21=[r20];;
   4.352  	adds r21=1,r21;;
   4.353 -	st8 [r20]=r21;;
   4.354 +	st4 [r20]=r21;;
   4.355  #endif
   4.356  (p6)	mov r17=2;;
   4.357  (p7)	mov r17=3;;
     5.1 --- a/xen/arch/ia64/xen/privop.c	Fri Aug 04 08:37:24 2006 -0600
     5.2 +++ b/xen/arch/ia64/xen/privop.c	Fri Aug 04 09:02:43 2006 -0600
     5.3 @@ -275,7 +275,7 @@ static IA64FAULT priv_mov_to_pmd(VCPU *v
     5.4  static IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
     5.5  {
     5.6  	UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
     5.7 -	privcnt.to_cr_cnt[inst.M32.cr3]++;
     5.8 +	perfc_incra(mov_to_cr, inst.M32.cr3);
     5.9  	switch (inst.M32.cr3) {
    5.10  	    case 0: return vcpu_set_dcr(vcpu,val);
    5.11  	    case 1: return vcpu_set_itm(vcpu,val);
    5.12 @@ -417,7 +417,7 @@ static IA64FAULT priv_mov_from_cr(VCPU *
    5.13  	UINT64 val;
    5.14  	IA64FAULT fault;
    5.15  
    5.16 -	privcnt.from_cr_cnt[inst.M33.cr3]++;
    5.17 +	perfc_incra(mov_from_cr, inst.M33.cr3);
    5.18  	switch (inst.M33.cr3) {
    5.19  	    case 0: return cr_get(dcr);
    5.20  	    case 1: return cr_get(itm);
    5.21 @@ -563,15 +563,15 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
    5.22  #endif
    5.23  			if (inst.M29.x3 != 0) break;
    5.24  			if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
    5.25 -				privcnt.mov_to_ar_imm++;
    5.26 +				perfc_incrc(mov_to_ar_imm);
    5.27  				return priv_mov_to_ar_imm(vcpu,inst);
    5.28  			}
    5.29  			if (inst.M44.x4 == 6) {
    5.30 -				privcnt.ssm++;
    5.31 +				perfc_incrc(ssm);
    5.32  				return priv_ssm(vcpu,inst);
    5.33  			}
    5.34  			if (inst.M44.x4 == 7) {
    5.35 -				privcnt.rsm++;
    5.36 +				perfc_incrc(rsm);
    5.37  				return priv_rsm(vcpu,inst);
    5.38  			}
    5.39  			break;
    5.40 @@ -580,8 +580,9 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
    5.41  		x6 = inst.M29.x6;
    5.42  		if (x6 == 0x2a) {
    5.43  			if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8)
    5.44 -				privcnt.mov_from_ar++; // privified mov from kr
    5.45 -			else privcnt.mov_to_ar_reg++;
    5.46 +				perfc_incrc(mov_from_ar); // privified mov from kr
    5.47 +			else
    5.48 +				perfc_incrc(mov_to_ar_reg);
    5.49  			return priv_mov_to_ar_reg(vcpu,inst);
    5.50  		}
    5.51  		if (inst.M29.x3 != 0) break;
    5.52 @@ -593,31 +594,33 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
    5.53  			}
    5.54  		}
    5.55  		if (privify_en && x6 == 52 && inst.M28.r3 > 63)
    5.56 -			privcnt.fc++;
    5.57 +			perfc_incrc(fc);
    5.58  		else if (privify_en && x6 == 16 && inst.M43.r3 > 63)
    5.59 -			privcnt.cpuid++;
    5.60 -		else privcnt.Mpriv_cnt[x6]++;
    5.61 +			perfc_incrc(cpuid);
    5.62 +		else
    5.63 +			perfc_incra(misc_privop, x6);
    5.64  		return (*pfunc)(vcpu,inst);
    5.65  		break;
    5.66  	    case B:
    5.67  		if (inst.generic.major != 0) break;
    5.68  		if (inst.B8.x6 == 0x08) {
    5.69  			IA64FAULT fault;
    5.70 -			privcnt.rfi++;
    5.71 +			perfc_incrc(rfi);
    5.72  			fault = priv_rfi(vcpu,inst);
    5.73  			if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
    5.74  			return fault;
    5.75  		}
    5.76  		if (inst.B8.x6 == 0x0c) {
    5.77 -			privcnt.bsw0++;
    5.78 +			perfc_incrc(bsw0);
    5.79  			return priv_bsw0(vcpu,inst);
    5.80  		}
    5.81  		if (inst.B8.x6 == 0x0d) {
    5.82 -			privcnt.bsw1++;
    5.83 +			perfc_incrc(bsw1);
    5.84  			return priv_bsw1(vcpu,inst);
    5.85  		}
    5.86 -		if (inst.B8.x6 == 0x0) { // break instr for privified cover
    5.87 -			privcnt.cover++;
    5.88 +		if (inst.B8.x6 == 0x0) {
    5.89 +			// break instr for privified cover
    5.90 +			perfc_incrc(cover);
    5.91  			return priv_cover(vcpu,inst);
    5.92  		}
    5.93  		break;
    5.94 @@ -625,19 +628,20 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
    5.95  		if (inst.generic.major != 0) break;
    5.96  #if 0
    5.97  		if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
    5.98 -			privcnt.cover++;
    5.99 +			perfc_incrc(cover);
   5.100  			return priv_cover(vcpu,inst);
   5.101  		}
   5.102  #endif
   5.103  		if (inst.I26.x3 != 0) break;  // I26.x3 == I27.x3
   5.104  		if (inst.I26.x6 == 0x2a) {
   5.105  			if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8)
   5.106 -				privcnt.mov_from_ar++; // privified mov from kr
   5.107 -			else privcnt.mov_to_ar_reg++;
   5.108 +				perfc_incrc(mov_from_ar); // privified mov from kr
   5.109 +			else 
   5.110 +				perfc_incrc(mov_to_ar_reg);
   5.111  			return priv_mov_to_ar_reg(vcpu,inst);
   5.112  		}
   5.113  		if (inst.I27.x6 == 0x0a) {
   5.114 -			privcnt.mov_to_ar_imm++;
   5.115 +			perfc_incrc(mov_to_ar_imm);
   5.116  			return priv_mov_to_ar_imm(vcpu,inst);
   5.117  		}
   5.118  		break;
   5.119 @@ -705,7 +709,7 @@ ia64_hyperprivop(unsigned long iim, REGS
   5.120  		             iim, regs->cr_iip);
   5.121  		return 1;
   5.122  	}
   5.123 -	slow_hyperpriv_cnt[iim]++;
   5.124 +	perfc_incra(slow_hyperprivop, iim);
   5.125  	switch(iim) {
   5.126  	    case HYPERPRIVOP_RFI:
   5.127  		(void)vcpu_rfi(v);
     6.1 --- a/xen/arch/ia64/xen/privop_stat.c	Fri Aug 04 08:37:24 2006 -0600
     6.2 +++ b/xen/arch/ia64/xen/privop_stat.c	Fri Aug 04 09:02:43 2006 -0600
     6.3 @@ -3,14 +3,6 @@
     6.4  #include <xen/lib.h>
     6.5  #include <asm/uaccess.h>
     6.6  
     6.7 -unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
     6.8 -unsigned long fast_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
     6.9 -
    6.10 -unsigned long slow_reflect_count[0x80] = { 0 };
    6.11 -unsigned long fast_reflect_count[0x80] = { 0 };
    6.12 -
    6.13 -struct privop_counters privcnt;
    6.14 -
    6.15  #ifdef PRIVOP_ADDR_COUNT
    6.16  #define PRIVOP_COUNT_NINSTS 2
    6.17  #define PRIVOP_COUNT_NADDRS 30
    6.18 @@ -85,6 +77,7 @@ static void zero_privop_addrs(void)
    6.19  Privileged operation instrumentation routines
    6.20  **************************************************************************/
    6.21  
    6.22 +#if 0
    6.23  static const char * const Mpriv_str[64] = {
    6.24  	"mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
    6.25  	"mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
    6.26 @@ -120,185 +113,12 @@ static const char * const cr_str[128] = 
    6.27  	RS,RS,RS,RS,RS,RS,RS,RS
    6.28  };
    6.29  
    6.30 -// FIXME: should use snprintf to ensure no buffer overflow
    6.31 -static int dump_privop_counts(char *buf)
    6.32 -{
    6.33 -	int i, j;
    6.34 -	unsigned long sum = 0;
    6.35 -	char *s = buf;
    6.36 -
    6.37 -	// this is ugly and should probably produce sorted output
    6.38 -	// but it will have to do for now
    6.39 -	sum += privcnt.mov_to_ar_imm; sum += privcnt.mov_to_ar_reg;
    6.40 -	sum += privcnt.ssm; sum += privcnt.rsm;
    6.41 -	sum += privcnt.rfi; sum += privcnt.bsw0;
    6.42 -	sum += privcnt.bsw1; sum += privcnt.cover;
    6.43 -	for (i=0; i < 64; i++)
    6.44 -		sum += privcnt.Mpriv_cnt[i];
    6.45 -	s += sprintf(s,"Privop statistics: (Total privops: %ld)\n",sum);
    6.46 -	if (privcnt.mov_to_ar_imm)
    6.47 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_to_ar_imm,
    6.48 -			"mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
    6.49 -	if (privcnt.mov_to_ar_reg)
    6.50 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_to_ar_reg,
    6.51 -			"mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
    6.52 -	if (privcnt.mov_from_ar)
    6.53 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_from_ar,
    6.54 -			"privified-mov_from_ar", (privcnt.mov_from_ar*100L)/sum);
    6.55 -	if (privcnt.ssm)
    6.56 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.ssm,
    6.57 -			"ssm", (privcnt.ssm*100L)/sum);
    6.58 -	if (privcnt.rsm)
    6.59 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.rsm,
    6.60 -			"rsm", (privcnt.rsm*100L)/sum);
    6.61 -	if (privcnt.rfi)
    6.62 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.rfi,
    6.63 -			"rfi", (privcnt.rfi*100L)/sum);
    6.64 -	if (privcnt.bsw0)
    6.65 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.bsw0,
    6.66 -			"bsw0", (privcnt.bsw0*100L)/sum);
    6.67 -	if (privcnt.bsw1)
    6.68 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.bsw1,
    6.69 -			"bsw1", (privcnt.bsw1*100L)/sum);
    6.70 -	if (privcnt.cover)
    6.71 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.cover,
    6.72 -			"cover", (privcnt.cover*100L)/sum);
    6.73 -	if (privcnt.fc)
    6.74 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.fc,
    6.75 -			"privified-fc", (privcnt.fc*100L)/sum);
    6.76 -	if (privcnt.cpuid)
    6.77 -		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.cpuid,
    6.78 -			"privified-getcpuid", (privcnt.cpuid*100L)/sum);
    6.79 -	for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
    6.80 -		if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\n");
    6.81 -		else s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.Mpriv_cnt[i],
    6.82 -			Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
    6.83 -		if (i == 0x24) { // mov from CR
    6.84 -			s += sprintf(s,"            [");
    6.85 -			for (j=0; j < 128; j++) if (privcnt.from_cr_cnt[j]) {
    6.86 -				if (!cr_str[j])
    6.87 -					s += sprintf(s,"PRIVSTRING NULL!!\n");
    6.88 -				else
    6.89 -					s += sprintf(s,"%s(%ld),",cr_str[j],
    6.90 -						     privcnt.from_cr_cnt[j]);
    6.91 -			}
    6.92 -			s += sprintf(s,"]\n");
    6.93 -		}
    6.94 -		else if (i == 0x2c) { // mov to CR
    6.95 -			s += sprintf(s,"            [");
    6.96 -			for (j=0; j < 128; j++) if (privcnt.to_cr_cnt[j]) {
    6.97 -				if (!cr_str[j])
    6.98 -					s += sprintf(s,"PRIVSTRING NULL!!\n");
    6.99 -				else
   6.100 -					s += sprintf(s,"%s(%ld),",cr_str[j],
   6.101 -						     privcnt.to_cr_cnt[j]);
   6.102 -			}
   6.103 -			s += sprintf(s,"]\n");
   6.104 -		}
   6.105 -	}
   6.106 -	return s - buf;
   6.107 -}
   6.108 -
   6.109 -static int zero_privop_counts(char *buf)
   6.110 -{
   6.111 -	int i, j;
   6.112 -	char *s = buf;
   6.113 -
   6.114 -	// this is ugly and should probably produce sorted output
   6.115 -	// but it will have to do for now
   6.116 -	privcnt.mov_to_ar_imm = 0;
   6.117 -	privcnt.mov_to_ar_reg = 0;
   6.118 -	privcnt.mov_from_ar = 0;
   6.119 -	privcnt.ssm = 0; privcnt.rsm = 0;
   6.120 -	privcnt.rfi = 0; privcnt.bsw0 = 0;
   6.121 -	privcnt.bsw1 = 0; privcnt.cover = 0;
   6.122 -	privcnt.fc = 0; privcnt.cpuid = 0;
   6.123 -	for (i=0; i < 64; i++)
   6.124 -		privcnt.Mpriv_cnt[i] = 0;
   6.125 -	for (j=0; j < 128; j++)
   6.126 -		privcnt.from_cr_cnt[j] = 0;
   6.127 -	for (j=0; j < 128; j++)
   6.128 -		privcnt.to_cr_cnt[j] = 0;
   6.129 -	s += sprintf(s,"All privop statistics zeroed\n");
   6.130 -	return s - buf;
   6.131 -}
   6.132 -
   6.133  static const char * const hyperpriv_str[HYPERPRIVOP_MAX+1] = {
   6.134  	0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
   6.135  	"=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
   6.136  	"=rr", "rr=", "kr=", "fc", "=cpuid", "=pmd", "=ar.eflg", "ar.eflg="
   6.137  };
   6.138 -
   6.139 -
   6.140 -static int dump_hyperprivop_counts(char *buf)
   6.141 -{
   6.142 -	int i;
   6.143 -	char *s = buf;
   6.144 -	unsigned long total = 0;
   6.145 -	for (i = 1; i <= HYPERPRIVOP_MAX; i++)
   6.146 -		total += slow_hyperpriv_cnt[i];
   6.147 -	s += sprintf(s,"Slow hyperprivops (total %ld):\n",total);
   6.148 -	for (i = 1; i <= HYPERPRIVOP_MAX; i++)
   6.149 -		if (slow_hyperpriv_cnt[i])
   6.150 -			s += sprintf(s,"%10ld %s\n",
   6.151 -				slow_hyperpriv_cnt[i], hyperpriv_str[i]);
   6.152 -	total = 0;
   6.153 -	for (i = 1; i <= HYPERPRIVOP_MAX; i++)
   6.154 -		total += fast_hyperpriv_cnt[i];
   6.155 -	s += sprintf(s,"Fast hyperprivops (total %ld):\n",total);
   6.156 -	for (i = 1; i <= HYPERPRIVOP_MAX; i++)
   6.157 -		if (fast_hyperpriv_cnt[i])
   6.158 -			s += sprintf(s,"%10ld %s\n",
   6.159 -				fast_hyperpriv_cnt[i], hyperpriv_str[i]);
   6.160 -	return s - buf;
   6.161 -}
   6.162 -
   6.163 -static void zero_hyperprivop_counts(void)
   6.164 -{
   6.165 -	int i;
   6.166 -	for (i = 0; i <= HYPERPRIVOP_MAX; i++)
   6.167 -		slow_hyperpriv_cnt[i] = 0;
   6.168 -	for (i = 0; i <= HYPERPRIVOP_MAX; i++)
   6.169 -		fast_hyperpriv_cnt[i] = 0;
   6.170 -}
   6.171 -
   6.172 -static void zero_reflect_counts(void)
   6.173 -{
   6.174 -	int i;
   6.175 -	for (i=0; i < 0x80; i++)
   6.176 -		slow_reflect_count[i] = 0;
   6.177 -	for (i=0; i < 0x80; i++)
   6.178 -		fast_reflect_count[i] = 0;
   6.179 -}
   6.180 -
   6.181 -static int dump_reflect_counts(char *buf)
   6.182 -{
   6.183 -	int i,j,cnt;
   6.184 -	char *s = buf;
   6.185 -
   6.186 -	s += sprintf(s,"Slow reflections by vector:\n");
   6.187 -	for (i = 0, j = 0; i < 0x80; i++) {
   6.188 -		if ( (cnt = slow_reflect_count[i]) != 0 ) {
   6.189 -			s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
   6.190 -			if ((j++ & 3) == 3)
   6.191 -				s += sprintf(s,"\n");
   6.192 -		}
   6.193 -	}
   6.194 -	if (j & 3)
   6.195 -		s += sprintf(s,"\n");
   6.196 -	s += sprintf(s,"Fast reflections by vector:\n");
   6.197 -	for (i = 0, j = 0; i < 0x80; i++) {
   6.198 -		if ( (cnt = fast_reflect_count[i]) != 0 ) {
   6.199 -			s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
   6.200 -			if ((j++ & 3) == 3)
   6.201 -				s += sprintf(s,"\n");
   6.202 -		}
   6.203 -	}
   6.204 -	if (j & 3)
   6.205 -		s += sprintf(s,"\n");
   6.206 -	return s - buf;
   6.207 -}
   6.208 -
   6.209 +#endif
   6.210  
   6.211  #define TMPBUFLEN 8*1024
   6.212  int dump_privop_counts_to_user(char __user *ubuf, int len)
   6.213 @@ -309,9 +129,7 @@ int dump_privop_counts_to_user(char __us
   6.214  	if (len < TMPBUFLEN)
   6.215  		return -1;
   6.216  
   6.217 -	n = dump_privop_counts(buf);
   6.218 -	n += dump_hyperprivop_counts(buf + n);
   6.219 -	n += dump_reflect_counts(buf + n);
   6.220 +	n = 0;
   6.221  #ifdef PRIVOP_ADDR_COUNT
   6.222  	n += dump_privop_addrs(buf + n);
   6.223  #endif
   6.224 @@ -323,21 +141,8 @@ int dump_privop_counts_to_user(char __us
   6.225  
   6.226  int zero_privop_counts_to_user(char __user *ubuf, int len)
   6.227  {
   6.228 -	char buf[TMPBUFLEN];
   6.229 -	int n;
   6.230 -
   6.231 -	if (len < TMPBUFLEN)
   6.232 -		return -1;
   6.233 -
   6.234 -	n = zero_privop_counts(buf);
   6.235 -
   6.236 -	zero_hyperprivop_counts();
   6.237  #ifdef PRIVOP_ADDR_COUNT
   6.238  	zero_privop_addrs();
   6.239  #endif
   6.240 -	zero_vhpt_stats();
   6.241 -	zero_reflect_counts();
   6.242 -	if (__copy_to_user(ubuf,buf,n))
   6.243 -		return -1;
   6.244 -	return n;
   6.245 +	return 0;
   6.246  }
     7.1 --- a/xen/arch/ia64/xen/vhpt.c	Fri Aug 04 08:37:24 2006 -0600
     7.2 +++ b/xen/arch/ia64/xen/vhpt.c	Fri Aug 04 09:02:43 2006 -0600
     7.3 @@ -261,11 +261,6 @@ void flush_tlb_mask(cpumask_t mask)
     7.4              (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
     7.5  }
     7.6  
     7.7 -void zero_vhpt_stats(void)
     7.8 -{
     7.9 -	return;
    7.10 -}
    7.11 -
    7.12  int dump_vhpt_stats(char *buf)
    7.13  {
    7.14  	int i, cpu;
     8.1 --- a/xen/common/page_alloc.c	Fri Aug 04 08:37:24 2006 -0600
     8.2 +++ b/xen/common/page_alloc.c	Fri Aug 04 09:02:43 2006 -0600
     8.3 @@ -24,7 +24,6 @@
     8.4  #include <xen/init.h>
     8.5  #include <xen/types.h>
     8.6  #include <xen/lib.h>
     8.7 -#include <xen/perfc.h>
     8.8  #include <xen/sched.h>
     8.9  #include <xen/spinlock.h>
    8.10  #include <xen/mm.h>
    8.11 @@ -33,6 +32,7 @@
    8.12  #include <xen/shadow.h>
    8.13  #include <xen/domain_page.h>
    8.14  #include <xen/keyhandler.h>
    8.15 +#include <xen/perfc.h>
    8.16  #include <asm/page.h>
    8.17  
    8.18  /*
     9.1 --- a/xen/include/asm-ia64/perfc_defn.h	Fri Aug 04 08:37:24 2006 -0600
     9.2 +++ b/xen/include/asm-ia64/perfc_defn.h	Fri Aug 04 09:02:43 2006 -0600
     9.3 @@ -1,21 +1,42 @@
     9.4  /* This file is legitimately included multiple times. */
     9.5  
     9.6 -PERFCOUNTER_CPU(dtlb_translate,		"dtlb hit")
     9.7 -
     9.8 -PERFCOUNTER_CPU(tr_translate,		"TR hit")
     9.9 -
    9.10 -PERFCOUNTER_CPU(vhpt_translate,		"virtual vhpt translation")
    9.11 -PERFCOUNTER_CPU(fast_vhpt_translate,	"virtual vhpt fast translation")
    9.12 +PERFCOUNTER_CPU(dtlb_translate,       "dtlb hit")
    9.13  
    9.14 -PERFCOUNTER(recover_to_page_fault,	"recoveries to page fault")
    9.15 -PERFCOUNTER(recover_to_break_fault,	"recoveries to break fault")
    9.16 -
    9.17 -PERFCOUNTER_CPU(phys_translate,		"metaphysical translation")
    9.18 +PERFCOUNTER_CPU(tr_translate,         "TR hit")
    9.19  
    9.20 -PERFCOUNTER_CPU(idle_when_pending,	"vcpu idle at event")
    9.21 +PERFCOUNTER_CPU(vhpt_translate,       "virtual vhpt translation")
    9.22 +PERFCOUNTER_CPU(fast_vhpt_translate,  "virtual vhpt fast translation")
    9.23  
    9.24 -PERFCOUNTER_CPU(pal_halt_light,		"calls to pal_halt_light")
    9.25 +PERFCOUNTER(recover_to_page_fault,    "recoveries to page fault")
    9.26 +PERFCOUNTER(recover_to_break_fault,   "recoveries to break fault")
    9.27  
    9.28 -PERFCOUNTER_CPU(context_switch,		"context switch")
    9.29 +PERFCOUNTER_CPU(phys_translate,       "metaphysical translation")
    9.30  
    9.31 -PERFCOUNTER_CPU(lazy_cover,		"lazy cover")
    9.32 +PERFCOUNTER_CPU(idle_when_pending,    "vcpu idle at event")
    9.33 +
    9.34 +PERFCOUNTER_CPU(pal_halt_light,       "calls to pal_halt_light")
    9.35 +
    9.36 +PERFCOUNTER_CPU(lazy_cover,           "lazy cover")
    9.37 +
    9.38 +PERFCOUNTER_CPU(mov_to_ar_imm,        "privop mov_to_ar_imm")
    9.39 +PERFCOUNTER_CPU(mov_to_ar_reg,        "privop mov_to_ar_reg")
    9.40 +PERFCOUNTER_CPU(mov_from_ar,          "privop privified-mov_from_ar")
    9.41 +PERFCOUNTER_CPU(ssm,                  "privop ssm")
    9.42 +PERFCOUNTER_CPU(rsm,                  "privop rsm")
    9.43 +PERFCOUNTER_CPU(rfi,                  "privop rfi")
    9.44 +PERFCOUNTER_CPU(bsw0,                 "privop bsw0")
    9.45 +PERFCOUNTER_CPU(bsw1,                 "privop bsw1")
    9.46 +PERFCOUNTER_CPU(cover,                "privop cover")
    9.47 +PERFCOUNTER_CPU(fc,                   "privop privified-fc")
    9.48 +PERFCOUNTER_CPU(cpuid,                "privop privified-cpuid")
    9.49 +
    9.50 +PERFCOUNTER_ARRAY(mov_to_cr,          "privop mov to cr", 128)
    9.51 +PERFCOUNTER_ARRAY(mov_from_cr,        "privop mov from cr", 128)
    9.52 +
    9.53 +PERFCOUNTER_ARRAY(misc_privop,        "privop misc", 64)
    9.54 +
    9.55 +PERFCOUNTER_ARRAY(slow_hyperprivop,   "slow hyperprivops", HYPERPRIVOP_MAX + 1)
    9.56 +PERFCOUNTER_ARRAY(fast_hyperprivop,   "fast hyperprivops", HYPERPRIVOP_MAX + 1)
    9.57 +
    9.58 +PERFCOUNTER_ARRAY(slow_reflect,       "slow reflection", 0x80)
    9.59 +PERFCOUNTER_ARRAY(fast_reflect,       "fast reflection", 0x80)
    10.1 --- a/xen/include/asm-ia64/privop_stat.h	Fri Aug 04 08:37:24 2006 -0600
    10.2 +++ b/xen/include/asm-ia64/privop_stat.h	Fri Aug 04 09:02:43 2006 -0600
    10.3 @@ -7,32 +7,6 @@ extern int zero_privop_counts_to_user(ch
    10.4  
    10.5  #define PRIVOP_ADDR_COUNT
    10.6  
    10.7 -extern unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1];
    10.8 -extern unsigned long fast_hyperpriv_cnt[HYPERPRIVOP_MAX+1];
    10.9 -
   10.10 -extern unsigned long slow_reflect_count[0x80];
   10.11 -extern unsigned long fast_reflect_count[0x80];
   10.12 -
   10.13 -struct privop_counters {
   10.14 -	unsigned long mov_to_ar_imm;
   10.15 -	unsigned long mov_to_ar_reg;
   10.16 -	unsigned long mov_from_ar;
   10.17 -	unsigned long ssm;
   10.18 -	unsigned long rsm;
   10.19 -	unsigned long rfi;
   10.20 -	unsigned long bsw0;
   10.21 -	unsigned long bsw1;
   10.22 -	unsigned long cover;
   10.23 -	unsigned long fc;
   10.24 -	unsigned long cpuid;
   10.25 -	unsigned long Mpriv_cnt[64];
   10.26 -
   10.27 -	unsigned long to_cr_cnt[128]; /* Number of mov to cr privop.  */
   10.28 -	unsigned long from_cr_cnt[128]; /* Number of mov from cr privop.  */
   10.29 -};
   10.30 -
   10.31 -extern struct privop_counters privcnt;
   10.32 -
   10.33  #ifdef PRIVOP_ADDR_COUNT
   10.34  
   10.35  /* INST argument of PRIVOP_COUNT_ADDR.  */
    11.1 --- a/xen/include/asm-ia64/vhpt.h	Fri Aug 04 08:37:24 2006 -0600
    11.2 +++ b/xen/include/asm-ia64/vhpt.h	Fri Aug 04 09:02:43 2006 -0600
    11.3 @@ -36,7 +36,6 @@ struct vhpt_lf_entry {
    11.4  #define INVALID_TI_TAG 0x8000000000000000L
    11.5  
    11.6  extern void vhpt_init (void);
    11.7 -extern void zero_vhpt_stats(void);
    11.8  extern int dump_vhpt_stats(char *buf);
    11.9  extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
   11.10  				 unsigned long logps);