direct-io.hg
changeset 14587:ea0b50ca4999
xen: Remove legacy references to explicitly per-cpu perf counters.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
line diff
1.1 --- a/xen/arch/ia64/linux-xen/irq_ia64.c Tue Mar 27 16:35:37 2007 +0100 1.2 +++ b/xen/arch/ia64/linux-xen/irq_ia64.c Tue Mar 27 16:42:47 2007 +0100 1.3 @@ -113,7 +113,7 @@ ia64_handle_irq (ia64_vector vector, str 1.4 unsigned long saved_tpr; 1.5 1.6 #ifdef XEN 1.7 - perfc_incrc(irqs); 1.8 + perfc_incr(irqs); 1.9 #endif 1.10 #if IRQ_DEBUG 1.11 #ifdef XEN
2.1 --- a/xen/arch/ia64/linux-xen/smp.c Tue Mar 27 16:35:37 2007 +0100 2.2 +++ b/xen/arch/ia64/linux-xen/smp.c Tue Mar 27 16:42:47 2007 +0100 2.3 @@ -148,7 +148,7 @@ handle_IPI (int irq, void *dev_id, struc 2.4 unsigned long ops; 2.5 2.6 #ifdef XEN 2.7 - perfc_incrc(ipis); 2.8 + perfc_incr(ipis); 2.9 #endif 2.10 mb(); /* Order interrupt and bit testing. */ 2.11 while ((ops = xchg(pending_ipis, 0)) != 0) {
3.1 --- a/xen/arch/ia64/vmx/pal_emul.c Tue Mar 27 16:35:37 2007 +0100 3.2 +++ b/xen/arch/ia64/vmx/pal_emul.c Tue Mar 27 16:42:47 2007 +0100 3.3 @@ -37,7 +37,7 @@ pal_emul(struct vcpu *vcpu) 3.4 vcpu_get_gr_nat(vcpu, 30, &gr30); 3.5 vcpu_get_gr_nat(vcpu, 31, &gr31); 3.6 3.7 - perfc_incrc(vmx_pal_emul); 3.8 + perfc_incr(vmx_pal_emul); 3.9 result = xen_pal_emulator(gr28, gr29, gr30, gr31); 3.10 3.11 vcpu_set_gr(vcpu, 8, result.status, 0);
4.1 --- a/xen/arch/ia64/vmx/vmx_process.c Tue Mar 27 16:35:37 2007 +0100 4.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Tue Mar 27 16:42:47 2007 +0100 4.3 @@ -151,7 +151,7 @@ vmx_ia64_handle_break (unsigned long ifa 4.4 struct domain *d = current->domain; 4.5 struct vcpu *v = current; 4.6 4.7 - perfc_incrc(vmx_ia64_handle_break); 4.8 + perfc_incr(vmx_ia64_handle_break); 4.9 #ifdef CRASH_DEBUG 4.10 if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) && 4.11 IS_VMM_ADDRESS(regs->cr_iip)) {
5.1 --- a/xen/arch/ia64/vmx/vmx_virt.c Tue Mar 27 16:35:37 2007 +0100 5.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c Tue Mar 27 16:42:47 2007 +0100 5.3 @@ -1401,159 +1401,159 @@ if ( (cause == 0xff && opcode == 0x1e000 5.4 5.5 switch(cause) { 5.6 case EVENT_RSM: 5.7 - perfc_incrc(vmx_rsm); 5.8 + perfc_incr(vmx_rsm); 5.9 status=vmx_emul_rsm(vcpu, inst); 5.10 break; 5.11 case EVENT_SSM: 5.12 - perfc_incrc(vmx_ssm); 5.13 + perfc_incr(vmx_ssm); 5.14 status=vmx_emul_ssm(vcpu, inst); 5.15 break; 5.16 case EVENT_MOV_TO_PSR: 5.17 - perfc_incrc(vmx_mov_to_psr); 5.18 + perfc_incr(vmx_mov_to_psr); 5.19 status=vmx_emul_mov_to_psr(vcpu, inst); 5.20 break; 5.21 case EVENT_MOV_FROM_PSR: 5.22 - perfc_incrc(vmx_mov_from_psr); 5.23 + perfc_incr(vmx_mov_from_psr); 5.24 status=vmx_emul_mov_from_psr(vcpu, inst); 5.25 break; 5.26 case EVENT_MOV_FROM_CR: 5.27 - perfc_incrc(vmx_mov_from_cr); 5.28 + perfc_incr(vmx_mov_from_cr); 5.29 status=vmx_emul_mov_from_cr(vcpu, inst); 5.30 break; 5.31 case EVENT_MOV_TO_CR: 5.32 - perfc_incrc(vmx_mov_to_cr); 5.33 + perfc_incr(vmx_mov_to_cr); 5.34 status=vmx_emul_mov_to_cr(vcpu, inst); 5.35 break; 5.36 case EVENT_BSW_0: 5.37 - perfc_incrc(vmx_bsw0); 5.38 + perfc_incr(vmx_bsw0); 5.39 status=vmx_emul_bsw0(vcpu, inst); 5.40 break; 5.41 case EVENT_BSW_1: 5.42 - perfc_incrc(vmx_bsw1); 5.43 + perfc_incr(vmx_bsw1); 5.44 status=vmx_emul_bsw1(vcpu, inst); 5.45 break; 5.46 case EVENT_COVER: 5.47 - perfc_incrc(vmx_cover); 5.48 + perfc_incr(vmx_cover); 5.49 status=vmx_emul_cover(vcpu, inst); 5.50 break; 5.51 case EVENT_RFI: 5.52 - perfc_incrc(vmx_rfi); 5.53 + perfc_incr(vmx_rfi); 5.54 status=vmx_emul_rfi(vcpu, inst); 5.55 break; 5.56 case EVENT_ITR_D: 5.57 - perfc_incrc(vmx_itr_d); 5.58 + perfc_incr(vmx_itr_d); 5.59 status=vmx_emul_itr_d(vcpu, inst); 5.60 break; 5.61 case EVENT_ITR_I: 5.62 - perfc_incrc(vmx_itr_i); 5.63 + perfc_incr(vmx_itr_i); 5.64 status=vmx_emul_itr_i(vcpu, inst); 5.65 break; 5.66 case EVENT_PTR_D: 5.67 - perfc_incrc(vmx_ptr_d); 5.68 + perfc_incr(vmx_ptr_d); 5.69 status=vmx_emul_ptr_d(vcpu, inst); 5.70 break; 5.71 case EVENT_PTR_I: 5.72 - perfc_incrc(vmx_ptr_i); 5.73 + perfc_incr(vmx_ptr_i); 5.74 status=vmx_emul_ptr_i(vcpu, inst); 5.75 break; 5.76 case EVENT_ITC_D: 5.77 - perfc_incrc(vmx_itc_d); 5.78 + perfc_incr(vmx_itc_d); 5.79 status=vmx_emul_itc_d(vcpu, inst); 5.80 break; 5.81 case EVENT_ITC_I: 5.82 - perfc_incrc(vmx_itc_i); 5.83 + perfc_incr(vmx_itc_i); 5.84 status=vmx_emul_itc_i(vcpu, inst); 5.85 break; 5.86 case EVENT_PTC_L: 5.87 - perfc_incrc(vmx_ptc_l); 5.88 + perfc_incr(vmx_ptc_l); 5.89 status=vmx_emul_ptc_l(vcpu, inst); 5.90 break; 5.91 case EVENT_PTC_G: 5.92 - perfc_incrc(vmx_ptc_g); 5.93 + perfc_incr(vmx_ptc_g); 5.94 status=vmx_emul_ptc_g(vcpu, inst); 5.95 break; 5.96 case EVENT_PTC_GA: 5.97 - perfc_incrc(vmx_ptc_ga); 5.98 + perfc_incr(vmx_ptc_ga); 5.99 status=vmx_emul_ptc_ga(vcpu, inst); 5.100 break; 5.101 case EVENT_PTC_E: 5.102 - perfc_incrc(vmx_ptc_e); 5.103 + perfc_incr(vmx_ptc_e); 5.104 status=vmx_emul_ptc_e(vcpu, inst); 5.105 break; 5.106 case EVENT_MOV_TO_RR: 5.107 - perfc_incrc(vmx_mov_to_rr); 5.108 + perfc_incr(vmx_mov_to_rr); 5.109 status=vmx_emul_mov_to_rr(vcpu, inst); 5.110 break; 5.111 case EVENT_MOV_FROM_RR: 5.112 - perfc_incrc(vmx_mov_from_rr); 5.113 + perfc_incr(vmx_mov_from_rr); 5.114 status=vmx_emul_mov_from_rr(vcpu, inst); 5.115 break; 5.116 case EVENT_THASH: 5.117 - perfc_incrc(vmx_thash); 5.118 + perfc_incr(vmx_thash); 5.119 status=vmx_emul_thash(vcpu, inst); 5.120 break; 5.121 case EVENT_TTAG: 5.122 - perfc_incrc(vmx_ttag); 5.123 + perfc_incr(vmx_ttag); 5.124 status=vmx_emul_ttag(vcpu, inst); 5.125 break; 5.126 case EVENT_TPA: 5.127 - perfc_incrc(vmx_tpa); 5.128 + perfc_incr(vmx_tpa); 5.129 status=vmx_emul_tpa(vcpu, inst); 5.130 break; 5.131 case EVENT_TAK: 5.132 - perfc_incrc(vmx_tak); 5.133 + perfc_incr(vmx_tak); 5.134 status=vmx_emul_tak(vcpu, inst); 5.135 break; 5.136 case EVENT_MOV_TO_AR_IMM: 5.137 - perfc_incrc(vmx_mov_to_ar_imm); 5.138 + perfc_incr(vmx_mov_to_ar_imm); 5.139 status=vmx_emul_mov_to_ar_imm(vcpu, inst); 5.140 break; 5.141 case EVENT_MOV_TO_AR: 5.142 - perfc_incrc(vmx_mov_to_ar_reg); 5.143 + perfc_incr(vmx_mov_to_ar_reg); 5.144 status=vmx_emul_mov_to_ar_reg(vcpu, inst); 5.145 break; 5.146 case EVENT_MOV_FROM_AR: 5.147 - perfc_incrc(vmx_mov_from_ar_reg); 5.148 + perfc_incr(vmx_mov_from_ar_reg); 5.149 status=vmx_emul_mov_from_ar_reg(vcpu, inst); 5.150 break; 5.151 case EVENT_MOV_TO_DBR: 5.152 - perfc_incrc(vmx_mov_to_dbr); 5.153 + perfc_incr(vmx_mov_to_dbr); 5.154 status=vmx_emul_mov_to_dbr(vcpu, inst); 5.155 break; 5.156 case EVENT_MOV_TO_IBR: 5.157 - perfc_incrc(vmx_mov_to_ibr); 5.158 + perfc_incr(vmx_mov_to_ibr); 5.159 status=vmx_emul_mov_to_ibr(vcpu, inst); 5.160 break; 5.161 case EVENT_MOV_TO_PMC: 5.162 - perfc_incrc(vmx_mov_to_pmc); 5.163 + perfc_incr(vmx_mov_to_pmc); 5.164 status=vmx_emul_mov_to_pmc(vcpu, inst); 5.165 break; 5.166 case EVENT_MOV_TO_PMD: 5.167 - perfc_incrc(vmx_mov_to_pmd); 5.168 + perfc_incr(vmx_mov_to_pmd); 5.169 status=vmx_emul_mov_to_pmd(vcpu, inst); 5.170 break; 5.171 case EVENT_MOV_TO_PKR: 5.172 - perfc_incrc(vmx_mov_to_pkr); 5.173 + perfc_incr(vmx_mov_to_pkr); 5.174 status=vmx_emul_mov_to_pkr(vcpu, inst); 5.175 break; 5.176 case EVENT_MOV_FROM_DBR: 5.177 - perfc_incrc(vmx_mov_from_dbr); 5.178 + perfc_incr(vmx_mov_from_dbr); 5.179 status=vmx_emul_mov_from_dbr(vcpu, inst); 5.180 break; 5.181 case EVENT_MOV_FROM_IBR: 5.182 - perfc_incrc(vmx_mov_from_ibr); 5.183 + perfc_incr(vmx_mov_from_ibr); 5.184 status=vmx_emul_mov_from_ibr(vcpu, inst); 5.185 break; 5.186 case EVENT_MOV_FROM_PMC: 5.187 - perfc_incrc(vmx_mov_from_pmc); 5.188 + perfc_incr(vmx_mov_from_pmc); 5.189 status=vmx_emul_mov_from_pmc(vcpu, inst); 5.190 break; 5.191 case EVENT_MOV_FROM_PKR: 5.192 - perfc_incrc(vmx_mov_from_pkr); 5.193 + perfc_incr(vmx_mov_from_pkr); 5.194 status=vmx_emul_mov_from_pkr(vcpu, inst); 5.195 break; 5.196 case EVENT_MOV_FROM_CPUID: 5.197 - perfc_incrc(vmx_mov_from_cpuid); 5.198 + perfc_incr(vmx_mov_from_cpuid); 5.199 status=vmx_emul_mov_from_cpuid(vcpu, inst); 5.200 break; 5.201 case EVENT_VMSW:
6.1 --- a/xen/arch/ia64/xen/dom0_ops.c Tue Mar 27 16:35:37 2007 +0100 6.2 +++ b/xen/arch/ia64/xen/dom0_ops.c Tue Mar 27 16:42:47 2007 +0100 6.3 @@ -372,7 +372,7 @@ do_dom0vp_op(unsigned long cmd, 6.4 } else { 6.5 ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn() 6.6 } 6.7 - perfc_incrc(dom0vp_phystomach); 6.8 + perfc_incr(dom0vp_phystomach); 6.9 break; 6.10 case IA64_DOM0VP_machtophys: 6.11 if (!mfn_valid(arg0)) { 6.12 @@ -380,7 +380,7 @@ do_dom0vp_op(unsigned long cmd, 6.13 break; 6.14 } 6.15 ret = get_gpfn_from_mfn(arg0); 6.16 - perfc_incrc(dom0vp_machtophys); 6.17 + perfc_incr(dom0vp_machtophys); 6.18 break; 6.19 case IA64_DOM0VP_zap_physmap: 6.20 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
7.1 --- a/xen/arch/ia64/xen/domain.c Tue Mar 27 16:35:37 2007 +0100 7.2 +++ b/xen/arch/ia64/xen/domain.c Tue Mar 27 16:42:47 2007 +0100 7.3 @@ -131,11 +131,11 @@ static void flush_vtlb_for_context_switc 7.4 if (vhpt_is_flushed || NEED_FLUSH(__get_cpu_var(tlbflush_time), 7.5 last_tlbflush_timestamp)) { 7.6 local_flush_tlb_all(); 7.7 - perfc_incrc(tlbflush_clock_cswitch_purge); 7.8 + perfc_incr(tlbflush_clock_cswitch_purge); 7.9 } else { 7.10 - perfc_incrc(tlbflush_clock_cswitch_skip); 7.11 + perfc_incr(tlbflush_clock_cswitch_skip); 7.12 } 7.13 - perfc_incrc(flush_vtlb_for_context_switch); 7.14 + perfc_incr(flush_vtlb_for_context_switch); 7.15 } 7.16 } 7.17
8.1 --- a/xen/arch/ia64/xen/faults.c Tue Mar 27 16:35:37 2007 +0100 8.2 +++ b/xen/arch/ia64/xen/faults.c Tue Mar 27 16:42:47 2007 +0100 8.3 @@ -187,7 +187,7 @@ static int handle_lazy_cover(struct vcpu 8.4 if (!PSCB(v, interrupt_collection_enabled)) { 8.5 PSCB(v, ifs) = regs->cr_ifs; 8.6 regs->cr_ifs = 0; 8.7 - perfc_incrc(lazy_cover); 8.8 + perfc_incr(lazy_cover); 8.9 return 1; // retry same instruction with cr.ifs off 8.10 } 8.11 return 0;
9.1 --- a/xen/arch/ia64/xen/hypercall.c Tue Mar 27 16:35:37 2007 +0100 9.2 +++ b/xen/arch/ia64/xen/hypercall.c Tue Mar 27 16:42:47 2007 +0100 9.3 @@ -161,7 +161,7 @@ ia64_hypercall(struct pt_regs *regs) 9.4 if (regs->r28 == PAL_HALT_LIGHT) { 9.5 if (vcpu_deliverable_interrupts(v) || 9.6 event_pending(v)) { 9.7 - perfc_incrc(idle_when_pending); 9.8 + perfc_incr(idle_when_pending); 9.9 vcpu_pend_unspecified_interrupt(v); 9.10 //printk("idle w/int#%d pending!\n",pi); 9.11 //this shouldn't happen, but it apparently does quite a bit! so don't 9.12 @@ -170,7 +170,7 @@ ia64_hypercall(struct pt_regs *regs) 9.13 //as deliver_pending_interrupt is called on the way out and will deliver it 9.14 } 9.15 else { 9.16 - perfc_incrc(pal_halt_light); 9.17 + perfc_incr(pal_halt_light); 9.18 migrate_timer(&v->arch.hlt_timer, 9.19 v->processor); 9.20 set_timer(&v->arch.hlt_timer,
10.1 --- a/xen/arch/ia64/xen/mm.c Tue Mar 27 16:35:37 2007 +0100 10.2 +++ b/xen/arch/ia64/xen/mm.c Tue Mar 27 16:42:47 2007 +0100 10.3 @@ -1139,7 +1139,7 @@ assign_domain_page_replace(struct domain 10.4 domain_put_page(d, mpaddr, pte, old_pte, 1); 10.5 } 10.6 } 10.7 - perfc_incrc(assign_domain_page_replace); 10.8 + perfc_incr(assign_domain_page_replace); 10.9 } 10.10 10.11 // caller must get_page(new_page) before 10.12 @@ -1202,7 +1202,7 @@ assign_domain_page_cmpxchg_rel(struct do 10.13 set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY); 10.14 10.15 domain_page_flush_and_put(d, mpaddr, pte, old_pte, old_page); 10.16 - perfc_incrc(assign_domain_pge_cmpxchg_rel); 10.17 + perfc_incr(assign_domain_pge_cmpxchg_rel); 10.18 return 0; 10.19 } 10.20 10.21 @@ -1264,7 +1264,7 @@ zap_domain_page_one(struct domain *d, un 10.22 // guest_physmap_remove_page() 10.23 // zap_domain_page_one() 10.24 domain_put_page(d, mpaddr, pte, old_pte, (page_get_owner(page) != NULL)); 10.25 - perfc_incrc(zap_dcomain_page_one); 10.26 + perfc_incr(zap_dcomain_page_one); 10.27 } 10.28 10.29 unsigned long 10.30 @@ -1277,7 +1277,7 @@ dom0vp_zap_physmap(struct domain *d, uns 10.31 } 10.32 10.33 zap_domain_page_one(d, gpfn << PAGE_SHIFT, INVALID_MFN); 10.34 - perfc_incrc(dom0vp_zap_physmap); 10.35 + perfc_incr(dom0vp_zap_physmap); 10.36 return 0; 10.37 } 10.38 10.39 @@ -1331,7 +1331,7 @@ static unsigned long 10.40 get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY); 10.41 assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags); 10.42 //don't update p2m table because this page belongs to rd, not d. 10.43 - perfc_incrc(dom0vp_add_physmap); 10.44 + perfc_incr(dom0vp_add_physmap); 10.45 out1: 10.46 put_domain(rd); 10.47 return error; 10.48 @@ -1501,7 +1501,7 @@ create_grant_host_mapping(unsigned long 10.49 #endif 10.50 ((flags & GNTMAP_readonly) ? 10.51 ASSIGN_readonly : ASSIGN_writable)); 10.52 - perfc_incrc(create_grant_host_mapping); 10.53 + perfc_incr(create_grant_host_mapping); 10.54 return GNTST_okay; 10.55 } 10.56 10.57 @@ -1565,7 +1565,7 @@ destroy_grant_host_mapping(unsigned long 10.58 get_gpfn_from_mfn(mfn) == gpfn); 10.59 domain_page_flush_and_put(d, gpaddr, pte, old_pte, page); 10.60 10.61 - perfc_incrc(destroy_grant_host_mapping); 10.62 + perfc_incr(destroy_grant_host_mapping); 10.63 return GNTST_okay; 10.64 } 10.65 10.66 @@ -1629,7 +1629,7 @@ steal_page(struct domain *d, struct page 10.67 free_domheap_page(new); 10.68 return -1; 10.69 } 10.70 - perfc_incrc(steal_page_refcount); 10.71 + perfc_incr(steal_page_refcount); 10.72 } 10.73 10.74 spin_lock(&d->page_alloc_lock); 10.75 @@ -1703,7 +1703,7 @@ steal_page(struct domain *d, struct page 10.76 list_del(&page->list); 10.77 10.78 spin_unlock(&d->page_alloc_lock); 10.79 - perfc_incrc(steal_page); 10.80 + perfc_incr(steal_page); 10.81 return 0; 10.82 } 10.83 10.84 @@ -1723,7 +1723,7 @@ guest_physmap_add_page(struct domain *d, 10.85 10.86 //BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >> PAGE_SHIFT)); 10.87 10.88 - perfc_incrc(guest_physmap_add_page); 10.89 + perfc_incr(guest_physmap_add_page); 10.90 } 10.91 10.92 void 10.93 @@ -1732,7 +1732,7 @@ guest_physmap_remove_page(struct domain 10.94 { 10.95 BUG_ON(mfn == 0);//XXX 10.96 zap_domain_page_one(d, gpfn << PAGE_SHIFT, mfn); 10.97 - perfc_incrc(guest_physmap_remove_page); 10.98 + perfc_incr(guest_physmap_remove_page); 10.99 } 10.100 10.101 static void 10.102 @@ -1812,7 +1812,7 @@ domain_page_flush_and_put(struct domain* 10.103 break; 10.104 } 10.105 #endif 10.106 - perfc_incrc(domain_page_flush_and_put); 10.107 + perfc_incr(domain_page_flush_and_put); 10.108 } 10.109 10.110 int 10.111 @@ -2009,7 +2009,7 @@ int get_page_type(struct page_info *page 10.112 10.113 if ( unlikely(!cpus_empty(mask)) ) 10.114 { 10.115 - perfc_incrc(need_flush_tlb_flush); 10.116 + perfc_incr(need_flush_tlb_flush); 10.117 flush_tlb_mask(mask); 10.118 } 10.119
11.1 --- a/xen/arch/ia64/xen/privop.c Tue Mar 27 16:35:37 2007 +0100 11.2 +++ b/xen/arch/ia64/xen/privop.c Tue Mar 27 16:42:47 2007 +0100 11.3 @@ -641,15 +641,15 @@ static IA64FAULT priv_handle_op(VCPU * v 11.4 if (inst.M29.x3 != 0) 11.5 break; 11.6 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) { 11.7 - perfc_incrc(mov_to_ar_imm); 11.8 + perfc_incr(mov_to_ar_imm); 11.9 return priv_mov_to_ar_imm(vcpu, inst); 11.10 } 11.11 if (inst.M44.x4 == 6) { 11.12 - perfc_incrc(ssm); 11.13 + perfc_incr(ssm); 11.14 return priv_ssm(vcpu, inst); 11.15 } 11.16 if (inst.M44.x4 == 7) { 11.17 - perfc_incrc(rsm); 11.18 + perfc_incr(rsm); 11.19 return priv_rsm(vcpu, inst); 11.20 } 11.21 break; 11.22 @@ -658,9 +658,9 @@ static IA64FAULT priv_handle_op(VCPU * v 11.23 x6 = inst.M29.x6; 11.24 if (x6 == 0x2a) { 11.25 if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) 11.26 - perfc_incrc(mov_from_ar); // privified mov from kr 11.27 + perfc_incr(mov_from_ar); // privified mov from kr 11.28 else 11.29 - perfc_incrc(mov_to_ar_reg); 11.30 + perfc_incr(mov_to_ar_reg); 11.31 return priv_mov_to_ar_reg(vcpu, inst); 11.32 } 11.33 if (inst.M29.x3 != 0) 11.34 @@ -676,9 +676,9 @@ static IA64FAULT priv_handle_op(VCPU * v 11.35 } 11.36 } 11.37 if (privify_en && x6 == 52 && inst.M28.r3 > 63) 11.38 - perfc_incrc(fc); 11.39 + perfc_incr(fc); 11.40 else if (privify_en && x6 == 16 && inst.M43.r3 > 63) 11.41 - perfc_incrc(cpuid); 11.42 + perfc_incr(cpuid); 11.43 else 11.44 perfc_incra(misc_privop, x6); 11.45 return (*pfunc) (vcpu, inst); 11.46 @@ -688,23 +688,23 @@ static IA64FAULT priv_handle_op(VCPU * v 11.47 break; 11.48 if (inst.B8.x6 == 0x08) { 11.49 IA64FAULT fault; 11.50 - perfc_incrc(rfi); 11.51 + perfc_incr(rfi); 11.52 fault = priv_rfi(vcpu, inst); 11.53 if (fault == IA64_NO_FAULT) 11.54 fault = IA64_RFI_IN_PROGRESS; 11.55 return fault; 11.56 } 11.57 if (inst.B8.x6 == 0x0c) { 11.58 - perfc_incrc(bsw0); 11.59 + perfc_incr(bsw0); 11.60 return priv_bsw0(vcpu, inst); 11.61 } 11.62 if (inst.B8.x6 == 0x0d) { 11.63 - perfc_incrc(bsw1); 11.64 + perfc_incr(bsw1); 11.65 return priv_bsw1(vcpu, inst); 11.66 } 11.67 if (inst.B8.x6 == 0x0) { 11.68 // break instr for privified cover 11.69 - perfc_incrc(cover); 11.70 + perfc_incr(cover); 11.71 return priv_cover(vcpu, inst); 11.72 } 11.73 break; 11.74 @@ -713,7 +713,7 @@ static IA64FAULT priv_handle_op(VCPU * v 11.75 break; 11.76 #if 0 11.77 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) { 11.78 - perfc_incrc(cover); 11.79 + perfc_incr(cover); 11.80 return priv_cover(vcpu, inst); 11.81 } 11.82 #endif 11.83 @@ -721,13 +721,13 @@ static IA64FAULT priv_handle_op(VCPU * v 11.84 break; // I26.x3 == I27.x3 11.85 if (inst.I26.x6 == 0x2a) { 11.86 if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8) 11.87 - perfc_incrc(mov_from_ar); // privified mov from kr 11.88 + perfc_incr(mov_from_ar); // privified mov from kr 11.89 else 11.90 - perfc_incrc(mov_to_ar_reg); 11.91 + perfc_incr(mov_to_ar_reg); 11.92 return priv_mov_to_ar_reg(vcpu, inst); 11.93 } 11.94 if (inst.I27.x6 == 0x0a) { 11.95 - perfc_incrc(mov_to_ar_imm); 11.96 + perfc_incr(mov_to_ar_imm); 11.97 return priv_mov_to_ar_imm(vcpu, inst); 11.98 } 11.99 break;
12.1 --- a/xen/arch/ia64/xen/tlb_track.c Tue Mar 27 16:35:37 2007 +0100 12.2 +++ b/xen/arch/ia64/xen/tlb_track.c Tue Mar 27 16:42:47 2007 +0100 12.3 @@ -216,14 +216,14 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.4 TLB_TRACK_RET_T ret = TLB_TRACK_NOT_FOUND; 12.5 12.6 #if 0 /* this is done at vcpu_tlb_track_insert_or_dirty() */ 12.7 - perfc_incrc(tlb_track_iod); 12.8 + perfc_incr(tlb_track_iod); 12.9 if (!pte_tlb_tracking(old_pte)) { 12.10 - perfc_incrc(tlb_track_iod_not_tracked); 12.11 + perfc_incr(tlb_track_iod_not_tracked); 12.12 return TLB_TRACK_NOT_TRACKED; 12.13 } 12.14 #endif 12.15 if (pte_tlb_inserted_many(old_pte)) { 12.16 - perfc_incrc(tlb_track_iod_tracked_many); 12.17 + perfc_incr(tlb_track_iod_tracked_many); 12.18 return TLB_TRACK_MANY; 12.19 } 12.20 12.21 @@ -260,7 +260,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.22 if (entry->vaddr == vaddr && entry->rid == rid) { 12.23 // tlb_track_printd("TLB_TRACK_FOUND\n"); 12.24 ret = TLB_TRACK_FOUND; 12.25 - perfc_incrc(tlb_track_iod_found); 12.26 + perfc_incr(tlb_track_iod_found); 12.27 #ifdef CONFIG_TLB_TRACK_CNT 12.28 entry->cnt++; 12.29 if (entry->cnt > TLB_TRACK_CNT_FORCE_MANY) { 12.30 @@ -276,7 +276,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.31 */ 12.32 // tlb_track_entry_printf(entry); 12.33 // tlb_track_printd("cnt = %ld\n", entry->cnt); 12.34 - perfc_incrc(tlb_track_iod_force_many); 12.35 + perfc_incr(tlb_track_iod_force_many); 12.36 goto force_many; 12.37 } 12.38 #endif 12.39 @@ -294,14 +294,14 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.40 if (pte_val(ret_pte) != pte_val(old_pte)) { 12.41 // tlb_track_printd("TLB_TRACK_AGAIN\n"); 12.42 ret = TLB_TRACK_AGAIN; 12.43 - perfc_incrc(tlb_track_iod_again); 12.44 + perfc_incr(tlb_track_iod_again); 12.45 } else { 12.46 // tlb_track_printd("TLB_TRACK_MANY del entry 0x%p\n", 12.47 // entry); 12.48 ret = TLB_TRACK_MANY; 12.49 list_del(&entry->list); 12.50 // tlb_track_entry_printf(entry); 12.51 - perfc_incrc(tlb_track_iod_tracked_many_del); 12.52 + perfc_incr(tlb_track_iod_tracked_many_del); 12.53 } 12.54 goto out; 12.55 } 12.56 @@ -314,7 +314,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.57 */ 12.58 // tlb_track_printd("TLB_TRACK_AGAIN\n"); 12.59 ret = TLB_TRACK_AGAIN; 12.60 - perfc_incrc(tlb_track_iod_again); 12.61 + perfc_incr(tlb_track_iod_again); 12.62 goto out; 12.63 } 12.64 12.65 @@ -323,7 +323,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.66 /* Other thread else removed the tlb_track_entry after we got old_pte 12.67 before we got spin lock. */ 12.68 ret = TLB_TRACK_AGAIN; 12.69 - perfc_incrc(tlb_track_iod_again); 12.70 + perfc_incr(tlb_track_iod_again); 12.71 goto out; 12.72 } 12.73 if (new_entry == NULL && bit_to_be_set == _PAGE_TLB_INSERTED) { 12.74 @@ -334,10 +334,10 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.75 /* entry can't be allocated. 12.76 fall down into full flush mode. */ 12.77 bit_to_be_set |= _PAGE_TLB_INSERTED_MANY; 12.78 - perfc_incrc(tlb_track_iod_new_failed); 12.79 + perfc_incr(tlb_track_iod_new_failed); 12.80 } 12.81 // tlb_track_printd("new_entry 0x%p\n", new_entry); 12.82 - perfc_incrc(tlb_track_iod_new_entry); 12.83 + perfc_incr(tlb_track_iod_new_entry); 12.84 goto again; 12.85 } 12.86 12.87 @@ -348,7 +348,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.88 if (tlb_track_pte_zapped(old_pte, ret_pte)) { 12.89 // tlb_track_printd("zapped TLB_TRACK_AGAIN\n"); 12.90 ret = TLB_TRACK_AGAIN; 12.91 - perfc_incrc(tlb_track_iod_again); 12.92 + perfc_incr(tlb_track_iod_again); 12.93 goto out; 12.94 } 12.95 12.96 @@ -359,7 +359,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.97 // tlb_track_printd("iserted TLB_TRACK_MANY\n"); 12.98 BUG_ON(!pte_tlb_inserted(ret_pte)); 12.99 ret = TLB_TRACK_MANY; 12.100 - perfc_incrc(tlb_track_iod_new_many); 12.101 + perfc_incr(tlb_track_iod_new_many); 12.102 goto out; 12.103 } 12.104 BUG_ON(pte_tlb_inserted(ret_pte)); 12.105 @@ -381,7 +381,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.106 #ifdef CONFIG_TLB_TRACK_CNT 12.107 entry->cnt = 0; 12.108 #endif 12.109 - perfc_incrc(tlb_track_iod_insert); 12.110 + perfc_incr(tlb_track_iod_insert); 12.111 // tlb_track_entry_printf(entry); 12.112 } else { 12.113 goto out; 12.114 @@ -392,7 +392,7 @@ tlb_track_insert_or_dirty(struct tlb_tra 12.115 cpu_set(v->processor, entry->pcpu_dirty_mask); 12.116 BUG_ON(v->vcpu_id >= NR_CPUS); 12.117 vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask); 12.118 - perfc_incrc(tlb_track_iod_dirtied); 12.119 + perfc_incr(tlb_track_iod_dirtied); 12.120 12.121 out: 12.122 spin_unlock(&tlb_track->hash_lock); 12.123 @@ -432,19 +432,19 @@ tlb_track_search_and_remove(struct tlb_t 12.124 struct list_head* head = tlb_track_hash_head(tlb_track, ptep); 12.125 struct tlb_track_entry* entry; 12.126 12.127 - perfc_incrc(tlb_track_sar); 12.128 + perfc_incr(tlb_track_sar); 12.129 if (!pte_tlb_tracking(old_pte)) { 12.130 - perfc_incrc(tlb_track_sar_not_tracked); 12.131 + perfc_incr(tlb_track_sar_not_tracked); 12.132 return TLB_TRACK_NOT_TRACKED; 12.133 } 12.134 if (!pte_tlb_inserted(old_pte)) { 12.135 BUG_ON(pte_tlb_inserted_many(old_pte)); 12.136 - perfc_incrc(tlb_track_sar_not_found); 12.137 + perfc_incr(tlb_track_sar_not_found); 12.138 return TLB_TRACK_NOT_FOUND; 12.139 } 12.140 if (pte_tlb_inserted_many(old_pte)) { 12.141 BUG_ON(!pte_tlb_inserted(old_pte)); 12.142 - perfc_incrc(tlb_track_sar_many); 12.143 + perfc_incr(tlb_track_sar_many); 12.144 return TLB_TRACK_MANY; 12.145 } 12.146 12.147 @@ -475,14 +475,14 @@ tlb_track_search_and_remove(struct tlb_t 12.148 pte_tlb_inserted(current_pte))) { 12.149 BUG_ON(pte_tlb_inserted_many(current_pte)); 12.150 spin_unlock(&tlb_track->hash_lock); 12.151 - perfc_incrc(tlb_track_sar_many); 12.152 + perfc_incr(tlb_track_sar_many); 12.153 return TLB_TRACK_MANY; 12.154 } 12.155 12.156 list_del(&entry->list); 12.157 spin_unlock(&tlb_track->hash_lock); 12.158 *entryp = entry; 12.159 - perfc_incrc(tlb_track_sar_found); 12.160 + perfc_incr(tlb_track_sar_found); 12.161 // tlb_track_entry_printf(entry); 12.162 #ifdef CONFIG_TLB_TRACK_CNT 12.163 // tlb_track_printd("cnt = %ld\n", entry->cnt);
13.1 --- a/xen/arch/ia64/xen/vcpu.c Tue Mar 27 16:35:37 2007 +0100 13.2 +++ b/xen/arch/ia64/xen/vcpu.c Tue Mar 27 16:42:47 2007 +0100 13.3 @@ -1616,7 +1616,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 13.4 *pteval = (address & _PAGE_PPN_MASK) | 13.5 __DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX; 13.6 *itir = PAGE_SHIFT << 2; 13.7 - perfc_incrc(phys_translate); 13.8 + perfc_incr(phys_translate); 13.9 return IA64_NO_FAULT; 13.10 } 13.11 } else if (!region && warn_region0_address) { 13.12 @@ -1637,7 +1637,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 13.13 if (trp != NULL) { 13.14 *pteval = trp->pte.val; 13.15 *itir = trp->itir; 13.16 - perfc_incrc(tr_translate); 13.17 + perfc_incr(tr_translate); 13.18 return IA64_NO_FAULT; 13.19 } 13.20 } 13.21 @@ -1647,7 +1647,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 13.22 if (trp != NULL) { 13.23 *pteval = trp->pte.val; 13.24 *itir = trp->itir; 13.25 - perfc_incrc(tr_translate); 13.26 + perfc_incr(tr_translate); 13.27 return IA64_NO_FAULT; 13.28 } 13.29 } 13.30 @@ -1660,7 +1660,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 13.31 && vcpu_match_tr_entry_no_p(trp, address, rid)) { 13.32 *pteval = pte.val; 13.33 *itir = trp->itir; 13.34 - perfc_incrc(dtlb_translate); 13.35 + perfc_incr(dtlb_translate); 13.36 return IA64_USE_TLB; 13.37 } 13.38 13.39 @@ -1709,7 +1709,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u6 13.40 out: 13.41 *itir = rr & RR_PS_MASK; 13.42 *pteval = pte.val; 13.43 - perfc_incrc(vhpt_translate); 13.44 + perfc_incr(vhpt_translate); 13.45 return IA64_NO_FAULT; 13.46 } 13.47
14.1 --- a/xen/arch/ia64/xen/vhpt.c Tue Mar 27 16:35:37 2007 +0100 14.2 +++ b/xen/arch/ia64/xen/vhpt.c Tue Mar 27 16:42:47 2007 +0100 14.3 @@ -48,14 +48,14 @@ local_vhpt_flush(void) 14.4 /* this must be after flush */ 14.5 tlbflush_update_time(&__get_cpu_var(vhpt_tlbflush_timestamp), 14.6 flush_time); 14.7 - perfc_incrc(local_vhpt_flush); 14.8 + perfc_incr(local_vhpt_flush); 14.9 } 14.10 14.11 void 14.12 vcpu_vhpt_flush(struct vcpu* v) 14.13 { 14.14 __vhpt_flush(vcpu_vhpt_maddr(v)); 14.15 - perfc_incrc(vcpu_vhpt_flush); 14.16 + perfc_incr(vcpu_vhpt_flush); 14.17 } 14.18 14.19 static void 14.20 @@ -248,7 +248,7 @@ void vcpu_flush_vtlb_all(struct vcpu *v) 14.21 not running on this processor. There is currently no easy way to 14.22 check this. */ 14.23 14.24 - perfc_incrc(vcpu_flush_vtlb_all); 14.25 + perfc_incr(vcpu_flush_vtlb_all); 14.26 } 14.27 14.28 static void __vcpu_flush_vtlb_all(void *vcpu) 14.29 @@ -280,7 +280,7 @@ void domain_flush_vtlb_all(struct domain 14.30 __vcpu_flush_vtlb_all, 14.31 v, 1, 1); 14.32 } 14.33 - perfc_incrc(domain_flush_vtlb_all); 14.34 + perfc_incr(domain_flush_vtlb_all); 14.35 } 14.36 14.37 // Callers may need to call smp_mb() before/after calling this. 14.38 @@ -322,7 +322,7 @@ void vcpu_flush_tlb_vhpt_range (u64 vadr 14.39 vadr, 1UL << log_range); 14.40 ia64_ptcl(vadr, log_range << 2); 14.41 ia64_srlz_i(); 14.42 - perfc_incrc(vcpu_flush_tlb_vhpt_range); 14.43 + perfc_incr(vcpu_flush_tlb_vhpt_range); 14.44 } 14.45 14.46 void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range) 14.47 @@ -361,7 +361,7 @@ void domain_flush_vtlb_range (struct dom 14.48 14.49 /* ptc.ga */ 14.50 platform_global_tlb_purge(vadr, vadr + addr_range, PAGE_SHIFT); 14.51 - perfc_incrc(domain_flush_vtlb_range); 14.52 + perfc_incr(domain_flush_vtlb_range); 14.53 } 14.54 14.55 #ifdef CONFIG_XEN_IA64_TLB_TRACK 14.56 @@ -391,11 +391,11 @@ void 14.57 */ 14.58 vcpu_get_rr(current, VRN7 << VRN_SHIFT, &rr7_rid); 14.59 if (likely(rr7_rid == entry->rid)) { 14.60 - perfc_incrc(tlb_track_use_rr7); 14.61 + perfc_incr(tlb_track_use_rr7); 14.62 } else { 14.63 swap_rr0 = 1; 14.64 vaddr = (vaddr << 3) >> 3;// force vrn0 14.65 - perfc_incrc(tlb_track_swap_rr0); 14.66 + perfc_incr(tlb_track_swap_rr0); 14.67 } 14.68 14.69 // tlb_track_entry_printf(entry); 14.70 @@ -435,18 +435,18 @@ void 14.71 /* ptc.ga */ 14.72 if (local_purge) { 14.73 ia64_ptcl(vaddr, PAGE_SHIFT << 2); 14.74 - perfc_incrc(domain_flush_vtlb_local); 14.75 + perfc_incr(domain_flush_vtlb_local); 14.76 } else { 14.77 /* ptc.ga has release semantics. */ 14.78 platform_global_tlb_purge(vaddr, vaddr + PAGE_SIZE, 14.79 PAGE_SHIFT); 14.80 - perfc_incrc(domain_flush_vtlb_global); 14.81 + perfc_incr(domain_flush_vtlb_global); 14.82 } 14.83 14.84 if (swap_rr0) { 14.85 vcpu_set_rr(current, 0, old_rid); 14.86 } 14.87 - perfc_incrc(domain_flush_vtlb_track_entry); 14.88 + perfc_incr(domain_flush_vtlb_track_entry); 14.89 } 14.90 14.91 void
15.1 --- a/xen/arch/powerpc/mm.c Tue Mar 27 16:35:37 2007 +0100 15.2 +++ b/xen/arch/powerpc/mm.c Tue Mar 27 16:42:47 2007 +0100 15.3 @@ -261,7 +261,7 @@ int get_page_type(struct page_info *page 15.4 15.5 if ( unlikely(!cpus_empty(mask)) ) 15.6 { 15.7 - perfc_incrc(need_flush_tlb_flush); 15.8 + perfc_incr(need_flush_tlb_flush); 15.9 flush_tlb_mask(mask); 15.10 } 15.11
16.1 --- a/xen/arch/x86/apic.c Tue Mar 27 16:35:37 2007 +0100 16.2 +++ b/xen/arch/x86/apic.c Tue Mar 27 16:42:47 2007 +0100 16.3 @@ -1076,7 +1076,7 @@ int reprogram_timer(s_time_t timeout) 16.4 fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs) 16.5 { 16.6 ack_APIC_irq(); 16.7 - perfc_incrc(apic_timer); 16.8 + perfc_incr(apic_timer); 16.9 raise_softirq(TIMER_SOFTIRQ); 16.10 } 16.11
17.1 --- a/xen/arch/x86/extable.c Tue Mar 27 16:35:37 2007 +0100 17.2 +++ b/xen/arch/x86/extable.c Tue Mar 27 16:42:47 2007 +0100 17.3 @@ -72,7 +72,7 @@ search_pre_exception_table(struct cpu_us 17.4 if ( fixup ) 17.5 { 17.6 dprintk(XENLOG_INFO, "Pre-exception: %p -> %p\n", _p(addr), _p(fixup)); 17.7 - perfc_incrc(exception_fixed); 17.8 + perfc_incr(exception_fixed); 17.9 } 17.10 return fixup; 17.11 }
18.1 --- a/xen/arch/x86/irq.c Tue Mar 27 16:35:37 2007 +0100 18.2 +++ b/xen/arch/x86/irq.c Tue Mar 27 16:42:47 2007 +0100 18.3 @@ -56,7 +56,7 @@ asmlinkage void do_IRQ(struct cpu_user_r 18.4 irq_desc_t *desc = &irq_desc[vector]; 18.5 struct irqaction *action; 18.6 18.7 - perfc_incrc(irqs); 18.8 + perfc_incr(irqs); 18.9 18.10 spin_lock(&desc->lock); 18.11 desc->handler->ack(vector);
19.1 --- a/xen/arch/x86/mm.c Tue Mar 27 16:35:37 2007 +0100 19.2 +++ b/xen/arch/x86/mm.c Tue Mar 27 16:42:47 2007 +0100 19.3 @@ -1726,7 +1726,7 @@ int get_page_type(struct page_info *page 19.4 (!shadow_mode_enabled(page_get_owner(page)) || 19.5 ((nx & PGT_type_mask) == PGT_writable_page)) ) 19.6 { 19.7 - perfc_incrc(need_flush_tlb_flush); 19.8 + perfc_incr(need_flush_tlb_flush); 19.9 flush_tlb_mask(mask); 19.10 } 19.11 19.12 @@ -2729,7 +2729,7 @@ int do_update_va_mapping(unsigned long v 19.13 cpumask_t pmask; 19.14 int rc = 0; 19.15 19.16 - perfc_incrc(calls_to_update_va); 19.17 + perfc_incr(calls_to_update_va); 19.18 19.19 if ( unlikely(!__addr_ok(va) && !paging_mode_external(d)) ) 19.20 return -EINVAL; 19.21 @@ -3386,7 +3386,7 @@ int ptwr_do_page_fault(struct vcpu *v, u 19.22 goto bail; 19.23 19.24 UNLOCK_BIGLOCK(d); 19.25 - perfc_incrc(ptwr_emulations); 19.26 + perfc_incr(ptwr_emulations); 19.27 return EXCRET_fault_fixed; 19.28 19.29 bail:
20.1 --- a/xen/arch/x86/mm/shadow/common.c Tue Mar 27 16:35:37 2007 +0100 20.2 +++ b/xen/arch/x86/mm/shadow/common.c Tue Mar 27 16:42:47 2007 +0100 20.3 @@ -276,7 +276,7 @@ hvm_emulate_write(enum x86_segment seg, 20.4 20.5 /* How many emulations could we save if we unshadowed on stack writes? */ 20.6 if ( seg == x86_seg_ss ) 20.7 - perfc_incrc(shadow_fault_emulate_stack); 20.8 + perfc_incr(shadow_fault_emulate_stack); 20.9 20.10 rc = hvm_translate_linear_addr( 20.11 seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); 20.12 @@ -804,7 +804,7 @@ void shadow_prealloc(struct domain *d, u 20.13 ASSERT(v != NULL); /* Shouldn't have enabled shadows if we've no vcpus */ 20.14 20.15 /* Stage one: walk the list of pinned pages, unpinning them */ 20.16 - perfc_incrc(shadow_prealloc_1); 20.17 + perfc_incr(shadow_prealloc_1); 20.18 list_for_each_backwards_safe(l, t, &d->arch.paging.shadow.pinned_shadows) 20.19 { 20.20 sp = list_entry(l, struct shadow_page_info, list); 20.21 @@ -820,7 +820,7 @@ void shadow_prealloc(struct domain *d, u 20.22 /* Stage two: all shadow pages are in use in hierarchies that are 20.23 * loaded in cr3 on some vcpu. Walk them, unhooking the non-Xen 20.24 * mappings. */ 20.25 - perfc_incrc(shadow_prealloc_2); 20.26 + perfc_incr(shadow_prealloc_2); 20.27 20.28 for_each_vcpu(d, v2) 20.29 for ( i = 0 ; i < 4 ; i++ ) 20.30 @@ -929,7 +929,7 @@ mfn_t shadow_alloc(struct domain *d, 20.31 ASSERT(shadow_locked_by_me(d)); 20.32 ASSERT(order <= SHADOW_MAX_ORDER); 20.33 ASSERT(shadow_type != SH_type_none); 20.34 - perfc_incrc(shadow_alloc); 20.35 + perfc_incr(shadow_alloc); 20.36 20.37 /* Find smallest order which can satisfy the request. */ 20.38 for ( i = order; i <= SHADOW_MAX_ORDER; i++ ) 20.39 @@ -967,7 +967,7 @@ mfn_t shadow_alloc(struct domain *d, 20.40 tlbflush_filter(mask, sp[i].tlbflush_timestamp); 20.41 if ( unlikely(!cpus_empty(mask)) ) 20.42 { 20.43 - perfc_incrc(shadow_alloc_tlbflush); 20.44 + perfc_incr(shadow_alloc_tlbflush); 20.45 flush_tlb_mask(mask); 20.46 } 20.47 /* Now safe to clear the page for reuse */ 20.48 @@ -997,7 +997,7 @@ void shadow_free(struct domain *d, mfn_t 20.49 int i; 20.50 20.51 ASSERT(shadow_locked_by_me(d)); 20.52 - perfc_incrc(shadow_free); 20.53 + perfc_incr(shadow_free); 20.54 20.55 shadow_type = sp->type; 20.56 ASSERT(shadow_type != SH_type_none); 20.57 @@ -1406,7 +1406,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v, 20.58 20.59 sh_hash_audit(d); 20.60 20.61 - perfc_incrc(shadow_hash_lookups); 20.62 + perfc_incr(shadow_hash_lookups); 20.63 key = sh_hash(n, t); 20.64 sh_hash_audit_bucket(d, key); 20.65 20.66 @@ -1434,7 +1434,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v, 20.67 } 20.68 else 20.69 { 20.70 - perfc_incrc(shadow_hash_lookup_head); 20.71 + perfc_incr(shadow_hash_lookup_head); 20.72 } 20.73 return shadow_page_to_mfn(sp); 20.74 } 20.75 @@ -1442,7 +1442,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v, 20.76 sp = sp->next_shadow; 20.77 } 20.78 20.79 - perfc_incrc(shadow_hash_lookup_miss); 20.80 + perfc_incr(shadow_hash_lookup_miss); 20.81 return _mfn(INVALID_MFN); 20.82 } 20.83 20.84 @@ -1460,7 +1460,7 @@ void shadow_hash_insert(struct vcpu *v, 20.85 20.86 sh_hash_audit(d); 20.87 20.88 - perfc_incrc(shadow_hash_inserts); 20.89 + perfc_incr(shadow_hash_inserts); 20.90 key = sh_hash(n, t); 20.91 sh_hash_audit_bucket(d, key); 20.92 20.93 @@ -1486,7 +1486,7 @@ void shadow_hash_delete(struct vcpu *v, 20.94 20.95 sh_hash_audit(d); 20.96 20.97 - perfc_incrc(shadow_hash_deletes); 20.98 + perfc_incr(shadow_hash_deletes); 20.99 key = sh_hash(n, t); 20.100 sh_hash_audit_bucket(d, key); 20.101 20.102 @@ -1713,7 +1713,7 @@ int sh_remove_write_access(struct vcpu * 20.103 || (pg->u.inuse.type_info & PGT_count_mask) == 0 ) 20.104 return 0; 20.105 20.106 - perfc_incrc(shadow_writeable); 20.107 + perfc_incr(shadow_writeable); 20.108 20.109 /* If this isn't a "normal" writeable page, the domain is trying to 20.110 * put pagetables in special memory of some kind. We can't allow that. */ 20.111 @@ -1735,7 +1735,7 @@ int sh_remove_write_access(struct vcpu * 20.112 20.113 #define GUESS(_a, _h) do { \ 20.114 if ( v->arch.paging.mode->shadow.guess_wrmap(v, (_a), gmfn) ) \ 20.115 - perfc_incrc(shadow_writeable_h_ ## _h); \ 20.116 + perfc_incr(shadow_writeable_h_ ## _h); \ 20.117 if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) \ 20.118 return 1; \ 20.119 } while (0) 20.120 @@ -1808,7 +1808,7 @@ int sh_remove_write_access(struct vcpu * 20.121 callbacks[shtype](v, last_smfn, gmfn); 20.122 20.123 if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count ) 20.124 - perfc_incrc(shadow_writeable_h_5); 20.125 + perfc_incr(shadow_writeable_h_5); 20.126 } 20.127 20.128 if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 ) 20.129 @@ -1817,7 +1817,7 @@ int sh_remove_write_access(struct vcpu * 20.130 #endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */ 20.131 20.132 /* Brute-force search of all the shadows, by walking the hash */ 20.133 - perfc_incrc(shadow_writeable_bf); 20.134 + perfc_incr(shadow_writeable_bf); 20.135 hash_foreach(v, callback_mask, callbacks, gmfn); 20.136 20.137 /* If that didn't catch the mapping, something is very wrong */ 20.138 @@ -1888,7 +1888,7 @@ int sh_remove_all_mappings(struct vcpu * 20.139 | 1 << SH_type_fl1_64_shadow 20.140 ; 20.141 20.142 - perfc_incrc(shadow_mappings); 20.143 + perfc_incr(shadow_mappings); 20.144 if ( (page->count_info & PGC_count_mask) == 0 ) 20.145 return 0; 20.146 20.147 @@ -1903,7 +1903,7 @@ int sh_remove_all_mappings(struct vcpu * 20.148 * Heuristics for finding the (probably) single mapping of this gmfn */ 20.149 20.150 /* Brute-force search of all the shadows, by walking the hash */ 20.151 - perfc_incrc(shadow_mappings_bf); 20.152 + perfc_incr(shadow_mappings_bf); 20.153 hash_foreach(v, callback_mask, callbacks, gmfn); 20.154 20.155 /* If that didn't catch the mapping, something is very wrong */ 20.156 @@ -1992,9 +1992,9 @@ static int sh_remove_shadow_via_pointer( 20.157 20.158 sh_unmap_domain_page(vaddr); 20.159 if ( rc ) 20.160 - perfc_incrc(shadow_up_pointer); 20.161 + perfc_incr(shadow_up_pointer); 20.162 else 20.163 - perfc_incrc(shadow_unshadow_bf); 20.164 + perfc_incr(shadow_unshadow_bf); 20.165 20.166 return rc; 20.167 } 20.168 @@ -2093,7 +2093,7 @@ void sh_remove_shadows(struct vcpu *v, m 20.169 } 20.170 20.171 /* Search for this shadow in all appropriate shadows */ 20.172 - perfc_incrc(shadow_unshadow); 20.173 + perfc_incr(shadow_unshadow); 20.174 sh_flags = pg->shadow_flags; 20.175 20.176 /* Lower-level shadows need to be excised from upper-level shadows.
21.1 --- a/xen/arch/x86/mm/shadow/multi.c Tue Mar 27 16:35:37 2007 +0100 21.2 +++ b/xen/arch/x86/mm/shadow/multi.c Tue Mar 27 16:42:47 2007 +0100 21.3 @@ -109,7 +109,7 @@ get_shadow_status(struct vcpu *v, mfn_t 21.4 /* Look for shadows in the hash table */ 21.5 { 21.6 mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn), shadow_type); 21.7 - perfc_incrc(shadow_get_shadow_status); 21.8 + perfc_incr(shadow_get_shadow_status); 21.9 return smfn; 21.10 } 21.11 21.12 @@ -209,7 +209,7 @@ guest_walk_tables(struct vcpu *v, unsign 21.13 { 21.14 ASSERT(!guest_op || shadow_locked_by_me(v->domain)); 21.15 21.16 - perfc_incrc(shadow_guest_walk); 21.17 + perfc_incr(shadow_guest_walk); 21.18 memset(gw, 0, sizeof(*gw)); 21.19 gw->va = va; 21.20 21.21 @@ -448,14 +448,14 @@ static u32 guest_set_ad_bits(struct vcpu 21.22 == (_PAGE_DIRTY | _PAGE_ACCESSED) ) 21.23 return flags; /* Guest already has A and D bits set */ 21.24 flags |= _PAGE_DIRTY | _PAGE_ACCESSED; 21.25 - perfc_incrc(shadow_ad_update); 21.26 + perfc_incr(shadow_ad_update); 21.27 } 21.28 else 21.29 { 21.30 if ( flags & _PAGE_ACCESSED ) 21.31 return flags; /* Guest already has A bit set */ 21.32 flags |= _PAGE_ACCESSED; 21.33 - perfc_incrc(shadow_a_update); 21.34 + perfc_incr(shadow_a_update); 21.35 } 21.36 21.37 /* Set the bit(s) */ 21.38 @@ -863,7 +863,7 @@ shadow_write_entries(void *d, void *s, i 21.39 * using map_domain_page() to get a writeable mapping if we need to. */ 21.40 if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 ) 21.41 { 21.42 - perfc_incrc(shadow_linear_map_failed); 21.43 + perfc_incr(shadow_linear_map_failed); 21.44 map = sh_map_domain_page(mfn); 21.45 ASSERT(map != NULL); 21.46 dst = map + ((unsigned long)dst & (PAGE_SIZE - 1)); 21.47 @@ -925,7 +925,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl 21.48 21.49 if ( unlikely(!res) ) 21.50 { 21.51 - perfc_incrc(shadow_get_page_fail); 21.52 + perfc_incr(shadow_get_page_fail); 21.53 SHADOW_PRINTK("failed: l1e=" SH_PRI_pte "\n"); 21.54 } 21.55 21.56 @@ -2198,7 +2198,7 @@ static int validate_gl4e(struct vcpu *v, 21.57 mfn_t sl3mfn = _mfn(INVALID_MFN); 21.58 int result = 0; 21.59 21.60 - perfc_incrc(shadow_validate_gl4e_calls); 21.61 + perfc_incr(shadow_validate_gl4e_calls); 21.62 21.63 if ( guest_l4e_get_flags(*new_gl4e) & _PAGE_PRESENT ) 21.64 { 21.65 @@ -2250,7 +2250,7 @@ static int validate_gl3e(struct vcpu *v, 21.66 mfn_t sl2mfn = _mfn(INVALID_MFN); 21.67 int result = 0; 21.68 21.69 - perfc_incrc(shadow_validate_gl3e_calls); 21.70 + perfc_incr(shadow_validate_gl3e_calls); 21.71 21.72 if ( guest_l3e_get_flags(*new_gl3e) & _PAGE_PRESENT ) 21.73 { 21.74 @@ -2277,7 +2277,7 @@ static int validate_gl2e(struct vcpu *v, 21.75 mfn_t sl1mfn = _mfn(INVALID_MFN); 21.76 int result = 0; 21.77 21.78 - perfc_incrc(shadow_validate_gl2e_calls); 21.79 + perfc_incr(shadow_validate_gl2e_calls); 21.80 21.81 if ( guest_l2e_get_flags(*new_gl2e) & _PAGE_PRESENT ) 21.82 { 21.83 @@ -2363,7 +2363,7 @@ static int validate_gl1e(struct vcpu *v, 21.84 mfn_t gmfn; 21.85 int result = 0, mmio; 21.86 21.87 - perfc_incrc(shadow_validate_gl1e_calls); 21.88 + perfc_incr(shadow_validate_gl1e_calls); 21.89 21.90 gfn = guest_l1e_get_gfn(*new_gl1e); 21.91 gmfn = vcpu_gfn_to_mfn(v, gfn); 21.92 @@ -2523,7 +2523,7 @@ static inline void check_for_early_unsha 21.93 u32 flags = mfn_to_page(gmfn)->shadow_flags; 21.94 if ( !(flags & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64)) ) 21.95 { 21.96 - perfc_incrc(shadow_early_unshadow); 21.97 + perfc_incr(shadow_early_unshadow); 21.98 sh_remove_shadows(v, gmfn, 0, 0 /* Slow, can fail to unshadow */ ); 21.99 } 21.100 } 21.101 @@ -2642,7 +2642,7 @@ static int sh_page_fault(struct vcpu *v, 21.102 SHADOW_PRINTK("d:v=%u:%u va=%#lx err=%u\n", 21.103 v->domain->domain_id, v->vcpu_id, va, regs->error_code); 21.104 21.105 - perfc_incrc(shadow_fault); 21.106 + perfc_incr(shadow_fault); 21.107 // 21.108 // XXX: Need to think about eventually mapping superpages directly in the 21.109 // shadow (when possible), as opposed to splintering them into a 21.110 @@ -2670,7 +2670,7 @@ static int sh_page_fault(struct vcpu *v, 21.111 ASSERT(regs->error_code & PFEC_page_present); 21.112 regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present); 21.113 reset_early_unshadow(v); 21.114 - perfc_incrc(shadow_fault_fast_gnp); 21.115 + perfc_incr(shadow_fault_fast_gnp); 21.116 SHADOW_PRINTK("fast path not-present\n"); 21.117 return 0; 21.118 } 21.119 @@ -2688,7 +2688,7 @@ static int sh_page_fault(struct vcpu *v, 21.120 << PAGE_SHIFT) 21.121 | (va & ~PAGE_MASK); 21.122 } 21.123 - perfc_incrc(shadow_fault_fast_mmio); 21.124 + perfc_incr(shadow_fault_fast_mmio); 21.125 SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa); 21.126 reset_early_unshadow(v); 21.127 handle_mmio(gpa); 21.128 @@ -2699,7 +2699,7 @@ static int sh_page_fault(struct vcpu *v, 21.129 /* This should be exceptionally rare: another vcpu has fixed 21.130 * the tables between the fault and our reading the l1e. 21.131 * Retry and let the hardware give us the right fault next time. */ 21.132 - perfc_incrc(shadow_fault_fast_fail); 21.133 + perfc_incr(shadow_fault_fast_fail); 21.134 SHADOW_PRINTK("fast path false alarm!\n"); 21.135 return EXCRET_fault_fixed; 21.136 } 21.137 @@ -2746,7 +2746,7 @@ static int sh_page_fault(struct vcpu *v, 21.138 goto mmio; 21.139 } 21.140 21.141 - perfc_incrc(shadow_fault_bail_not_present); 21.142 + perfc_incr(shadow_fault_bail_not_present); 21.143 goto not_a_shadow_fault; 21.144 } 21.145 21.146 @@ -2761,7 +2761,7 @@ static int sh_page_fault(struct vcpu *v, 21.147 !(accumulated_gflags & _PAGE_USER) ) 21.148 { 21.149 /* illegal user-mode access to supervisor-only page */ 21.150 - perfc_incrc(shadow_fault_bail_user_supervisor); 21.151 + perfc_incr(shadow_fault_bail_user_supervisor); 21.152 goto not_a_shadow_fault; 21.153 } 21.154 21.155 @@ -2772,7 +2772,7 @@ static int sh_page_fault(struct vcpu *v, 21.156 { 21.157 if ( unlikely(!(accumulated_gflags & _PAGE_RW)) ) 21.158 { 21.159 - perfc_incrc(shadow_fault_bail_ro_mapping); 21.160 + perfc_incr(shadow_fault_bail_ro_mapping); 21.161 goto not_a_shadow_fault; 21.162 } 21.163 } 21.164 @@ -2787,7 +2787,7 @@ static int sh_page_fault(struct vcpu *v, 21.165 if ( accumulated_gflags & _PAGE_NX_BIT ) 21.166 { 21.167 /* NX prevented this code fetch */ 21.168 - perfc_incrc(shadow_fault_bail_nx); 21.169 + perfc_incr(shadow_fault_bail_nx); 21.170 goto not_a_shadow_fault; 21.171 } 21.172 } 21.173 @@ -2802,7 +2802,7 @@ static int sh_page_fault(struct vcpu *v, 21.174 21.175 if ( !mmio && !mfn_valid(gmfn) ) 21.176 { 21.177 - perfc_incrc(shadow_fault_bail_bad_gfn); 21.178 + perfc_incr(shadow_fault_bail_bad_gfn); 21.179 SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n", 21.180 gfn_x(gfn), mfn_x(gmfn)); 21.181 goto not_a_shadow_fault; 21.182 @@ -2844,12 +2844,12 @@ static int sh_page_fault(struct vcpu *v, 21.183 { 21.184 if ( ft == ft_demand_write ) 21.185 { 21.186 - perfc_incrc(shadow_fault_emulate_write); 21.187 + perfc_incr(shadow_fault_emulate_write); 21.188 goto emulate; 21.189 } 21.190 else if ( shadow_mode_trap_reads(d) && ft == ft_demand_read ) 21.191 { 21.192 - perfc_incrc(shadow_fault_emulate_read); 21.193 + perfc_incr(shadow_fault_emulate_read); 21.194 goto emulate; 21.195 } 21.196 } 21.197 @@ -2860,7 +2860,7 @@ static int sh_page_fault(struct vcpu *v, 21.198 goto mmio; 21.199 } 21.200 21.201 - perfc_incrc(shadow_fault_fixed); 21.202 + perfc_incr(shadow_fault_fixed); 21.203 d->arch.paging.shadow.fault_count++; 21.204 reset_early_unshadow(v); 21.205 21.206 @@ -2920,7 +2920,7 @@ static int sh_page_fault(struct vcpu *v, 21.207 { 21.208 SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n", 21.209 mfn_x(gmfn)); 21.210 - perfc_incrc(shadow_fault_emulate_failed); 21.211 + perfc_incr(shadow_fault_emulate_failed); 21.212 /* If this is actually a page table, then we have a bug, and need 21.213 * to support more operations in the emulator. More likely, 21.214 * though, this is a hint that this page should not be shadowed. */ 21.215 @@ -2935,7 +2935,7 @@ static int sh_page_fault(struct vcpu *v, 21.216 mmio: 21.217 if ( !guest_mode(regs) ) 21.218 goto not_a_shadow_fault; 21.219 - perfc_incrc(shadow_fault_mmio); 21.220 + perfc_incr(shadow_fault_mmio); 21.221 sh_audit_gw(v, &gw); 21.222 unmap_walk(v, &gw); 21.223 SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa); 21.224 @@ -2964,7 +2964,7 @@ sh_invlpg(struct vcpu *v, unsigned long 21.225 { 21.226 shadow_l2e_t sl2e; 21.227 21.228 - perfc_incrc(shadow_invlpg); 21.229 + perfc_incr(shadow_invlpg); 21.230 21.231 /* First check that we can safely read the shadow l2e. SMP/PAE linux can 21.232 * run as high as 6% of invlpg calls where we haven't shadowed the l2 21.233 @@ -2983,7 +2983,7 @@ sh_invlpg(struct vcpu *v, unsigned long 21.234 + shadow_l3_linear_offset(va)), 21.235 sizeof (sl3e)) != 0 ) 21.236 { 21.237 - perfc_incrc(shadow_invlpg_fault); 21.238 + perfc_incr(shadow_invlpg_fault); 21.239 return 0; 21.240 } 21.241 if ( (!shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) ) 21.242 @@ -3002,7 +3002,7 @@ sh_invlpg(struct vcpu *v, unsigned long 21.243 sh_linear_l2_table(v) + shadow_l2_linear_offset(va), 21.244 sizeof (sl2e)) != 0 ) 21.245 { 21.246 - perfc_incrc(shadow_invlpg_fault); 21.247 + perfc_incr(shadow_invlpg_fault); 21.248 return 0; 21.249 } 21.250
22.1 --- a/xen/arch/x86/smp.c Tue Mar 27 16:35:37 2007 +0100 22.2 +++ b/xen/arch/x86/smp.c Tue Mar 27 16:42:47 2007 +0100 22.3 @@ -169,7 +169,7 @@ static unsigned long flush_va; 22.4 fastcall void smp_invalidate_interrupt(void) 22.5 { 22.6 ack_APIC_irq(); 22.7 - perfc_incrc(ipis); 22.8 + perfc_incr(ipis); 22.9 irq_enter(); 22.10 if ( !__sync_lazy_execstate() ) 22.11 { 22.12 @@ -329,7 +329,7 @@ void smp_send_stop(void) 22.13 fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs) 22.14 { 22.15 ack_APIC_irq(); 22.16 - perfc_incrc(ipis); 22.17 + perfc_incr(ipis); 22.18 } 22.19 22.20 fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs) 22.21 @@ -338,7 +338,7 @@ fastcall void smp_call_function_interrup 22.22 void *info = call_data->info; 22.23 22.24 ack_APIC_irq(); 22.25 - perfc_incrc(ipis); 22.26 + perfc_incr(ipis); 22.27 22.28 if ( !cpu_isset(smp_processor_id(), call_data->selected) ) 22.29 return;
23.1 --- a/xen/arch/x86/traps.c Tue Mar 27 16:35:37 2007 +0100 23.2 +++ b/xen/arch/x86/traps.c Tue Mar 27 16:42:47 2007 +0100 23.3 @@ -956,7 +956,7 @@ asmlinkage int do_page_fault(struct cpu_ 23.4 23.5 DEBUGGER_trap_entry(TRAP_page_fault, regs); 23.6 23.7 - perfc_incrc(page_faults); 23.8 + perfc_incr(page_faults); 23.9 23.10 if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) ) 23.11 return rc; 23.12 @@ -968,7 +968,7 @@ asmlinkage int do_page_fault(struct cpu_ 23.13 23.14 if ( likely((fixup = search_exception_table(regs->eip)) != 0) ) 23.15 { 23.16 - perfc_incrc(copy_user_faults); 23.17 + perfc_incr(copy_user_faults); 23.18 regs->eip = fixup; 23.19 return 0; 23.20 }
24.1 --- a/xen/arch/x86/x86_32/domain_page.c Tue Mar 27 16:35:37 2007 +0100 24.2 +++ b/xen/arch/x86/x86_32/domain_page.c Tue Mar 27 16:42:47 2007 +0100 24.3 @@ -50,7 +50,7 @@ void *map_domain_page(unsigned long mfn) 24.4 24.5 ASSERT(!in_irq()); 24.6 24.7 - perfc_incrc(map_domain_page_count); 24.8 + perfc_incr(map_domain_page_count); 24.9 24.10 v = mapcache_current_vcpu(); 24.11 24.12 @@ -76,7 +76,7 @@ void *map_domain_page(unsigned long mfn) 24.13 cache->shadow_epoch[vcpu] = cache->epoch; 24.14 if ( NEED_FLUSH(this_cpu(tlbflush_time), cache->tlbflush_timestamp) ) 24.15 { 24.16 - perfc_incrc(domain_page_tlb_flush); 24.17 + perfc_incr(domain_page_tlb_flush); 24.18 local_flush_tlb(); 24.19 } 24.20 } 24.21 @@ -92,7 +92,7 @@ void *map_domain_page(unsigned long mfn) 24.22 } 24.23 24.24 /* /Second/, flush TLBs. */ 24.25 - perfc_incrc(domain_page_tlb_flush); 24.26 + perfc_incr(domain_page_tlb_flush); 24.27 local_flush_tlb(); 24.28 cache->shadow_epoch[vcpu] = ++cache->epoch; 24.29 cache->tlbflush_timestamp = tlbflush_current_time();
25.1 --- a/xen/arch/x86/x86_32/seg_fixup.c Tue Mar 27 16:35:37 2007 +0100 25.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c Tue Mar 27 16:42:47 2007 +0100 25.3 @@ -434,7 +434,7 @@ int gpf_emulate_4gb(struct cpu_user_regs 25.4 goto fail; 25.5 25.6 /* Success! */ 25.7 - perfc_incrc(seg_fixups); 25.8 + perfc_incr(seg_fixups); 25.9 25.10 /* If requested, give a callback on otherwise unused vector 15. */ 25.11 if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) )
26.1 --- a/xen/common/page_alloc.c Tue Mar 27 16:35:37 2007 +0100 26.2 +++ b/xen/common/page_alloc.c Tue Mar 27 16:42:47 2007 +0100 26.3 @@ -423,7 +423,7 @@ static struct page_info *alloc_heap_page 26.4 26.5 if ( unlikely(!cpus_empty(mask)) ) 26.6 { 26.7 - perfc_incrc(need_flush_tlb_flush); 26.8 + perfc_incr(need_flush_tlb_flush); 26.9 flush_tlb_mask(mask); 26.10 } 26.11
27.1 --- a/xen/common/schedule.c Tue Mar 27 16:35:37 2007 +0100 27.2 +++ b/xen/common/schedule.c Tue Mar 27 16:42:47 2007 +0100 27.3 @@ -606,7 +606,7 @@ static void schedule(void) 27.4 ASSERT(!in_irq()); 27.5 ASSERT(this_cpu(mc_state).flags == 0); 27.6 27.7 - perfc_incrc(sched_run); 27.8 + perfc_incr(sched_run); 27.9 27.10 sd = &this_cpu(schedule_data); 27.11 27.12 @@ -654,7 +654,7 @@ static void schedule(void) 27.13 27.14 spin_unlock_irq(&sd->schedule_lock); 27.15 27.16 - perfc_incrc(sched_ctx); 27.17 + perfc_incr(sched_ctx); 27.18 27.19 stop_timer(&prev->periodic_timer); 27.20 27.21 @@ -681,7 +681,7 @@ void context_saved(struct vcpu *prev) 27.22 static void s_timer_fn(void *unused) 27.23 { 27.24 raise_softirq(SCHEDULE_SOFTIRQ); 27.25 - perfc_incrc(sched_irq); 27.26 + perfc_incr(sched_irq); 27.27 } 27.28 27.29 /* Per-VCPU periodic timer function: sends a virtual timer interrupt. */
28.1 --- a/xen/include/asm-ia64/perfc_defn.h Tue Mar 27 16:35:37 2007 +0100 28.2 +++ b/xen/include/asm-ia64/perfc_defn.h Tue Mar 27 16:42:47 2007 +0100 28.3 @@ -1,34 +1,34 @@ 28.4 /* This file is legitimately included multiple times. */ 28.5 28.6 -PERFCOUNTER_CPU(dtlb_translate, "dtlb hit") 28.7 +PERFCOUNTER(dtlb_translate, "dtlb hit") 28.8 28.9 -PERFCOUNTER_CPU(tr_translate, "TR hit") 28.10 +PERFCOUNTER(tr_translate, "TR hit") 28.11 28.12 -PERFCOUNTER_CPU(vhpt_translate, "virtual vhpt translation") 28.13 -PERFCOUNTER_CPU(fast_vhpt_translate, "virtual vhpt fast translation") 28.14 +PERFCOUNTER(vhpt_translate, "virtual vhpt translation") 28.15 +PERFCOUNTER(fast_vhpt_translate, "virtual vhpt fast translation") 28.16 28.17 PERFCOUNTER(recover_to_page_fault, "recoveries to page fault") 28.18 PERFCOUNTER(recover_to_break_fault, "recoveries to break fault") 28.19 28.20 -PERFCOUNTER_CPU(phys_translate, "metaphysical translation") 28.21 +PERFCOUNTER(phys_translate, "metaphysical translation") 28.22 28.23 -PERFCOUNTER_CPU(idle_when_pending, "vcpu idle at event") 28.24 +PERFCOUNTER(idle_when_pending, "vcpu idle at event") 28.25 28.26 -PERFCOUNTER_CPU(pal_halt_light, "calls to pal_halt_light") 28.27 +PERFCOUNTER(pal_halt_light, "calls to pal_halt_light") 28.28 28.29 -PERFCOUNTER_CPU(lazy_cover, "lazy cover") 28.30 +PERFCOUNTER(lazy_cover, "lazy cover") 28.31 28.32 -PERFCOUNTER_CPU(mov_to_ar_imm, "privop mov_to_ar_imm") 28.33 -PERFCOUNTER_CPU(mov_to_ar_reg, "privop mov_to_ar_reg") 28.34 -PERFCOUNTER_CPU(mov_from_ar, "privop privified-mov_from_ar") 28.35 -PERFCOUNTER_CPU(ssm, "privop ssm") 28.36 -PERFCOUNTER_CPU(rsm, "privop rsm") 28.37 -PERFCOUNTER_CPU(rfi, "privop rfi") 28.38 -PERFCOUNTER_CPU(bsw0, "privop bsw0") 28.39 -PERFCOUNTER_CPU(bsw1, "privop bsw1") 28.40 -PERFCOUNTER_CPU(cover, "privop cover") 28.41 -PERFCOUNTER_CPU(fc, "privop privified-fc") 28.42 -PERFCOUNTER_CPU(cpuid, "privop privified-cpuid") 28.43 +PERFCOUNTER(mov_to_ar_imm, "privop mov_to_ar_imm") 28.44 +PERFCOUNTER(mov_to_ar_reg, "privop mov_to_ar_reg") 28.45 +PERFCOUNTER(mov_from_ar, "privop privified-mov_from_ar") 28.46 +PERFCOUNTER(ssm, "privop ssm") 28.47 +PERFCOUNTER(rsm, "privop rsm") 28.48 +PERFCOUNTER(rfi, "privop rfi") 28.49 +PERFCOUNTER(bsw0, "privop bsw0") 28.50 +PERFCOUNTER(bsw1, "privop bsw1") 28.51 +PERFCOUNTER(cover, "privop cover") 28.52 +PERFCOUNTER(fc, "privop privified-fc") 28.53 +PERFCOUNTER(cpuid, "privop privified-cpuid") 28.54 28.55 PERFCOUNTER_ARRAY(mov_to_cr, "privop mov to cr", 128) 28.56 PERFCOUNTER_ARRAY(mov_from_cr, "privop mov from cr", 128) 28.57 @@ -36,45 +36,45 @@ PERFCOUNTER_ARRAY(mov_from_cr, "p 28.58 PERFCOUNTER_ARRAY(misc_privop, "privop misc", 64) 28.59 28.60 // privileged instructions to fall into vmx_entry 28.61 -PERFCOUNTER_CPU(vmx_rsm, "vmx privop rsm") 28.62 -PERFCOUNTER_CPU(vmx_ssm, "vmx privop ssm") 28.63 -PERFCOUNTER_CPU(vmx_mov_to_psr, "vmx privop mov_to_psr") 28.64 -PERFCOUNTER_CPU(vmx_mov_from_psr, "vmx privop mov_from_psr") 28.65 -PERFCOUNTER_CPU(vmx_mov_from_cr, "vmx privop mov_from_cr") 28.66 -PERFCOUNTER_CPU(vmx_mov_to_cr, "vmx privop mov_to_cr") 28.67 -PERFCOUNTER_CPU(vmx_bsw0, "vmx privop bsw0") 28.68 -PERFCOUNTER_CPU(vmx_bsw1, "vmx privop bsw1") 28.69 -PERFCOUNTER_CPU(vmx_cover, "vmx privop cover") 28.70 -PERFCOUNTER_CPU(vmx_rfi, "vmx privop rfi") 28.71 -PERFCOUNTER_CPU(vmx_itr_d, "vmx privop itr_d") 28.72 -PERFCOUNTER_CPU(vmx_itr_i, "vmx privop itr_i") 28.73 -PERFCOUNTER_CPU(vmx_ptr_d, "vmx privop ptr_d") 28.74 -PERFCOUNTER_CPU(vmx_ptr_i, "vmx privop ptr_i") 28.75 -PERFCOUNTER_CPU(vmx_itc_d, "vmx privop itc_d") 28.76 -PERFCOUNTER_CPU(vmx_itc_i, "vmx privop itc_i") 28.77 -PERFCOUNTER_CPU(vmx_ptc_l, "vmx privop ptc_l") 28.78 -PERFCOUNTER_CPU(vmx_ptc_g, "vmx privop ptc_g") 28.79 -PERFCOUNTER_CPU(vmx_ptc_ga, "vmx privop ptc_ga") 28.80 -PERFCOUNTER_CPU(vmx_ptc_e, "vmx privop ptc_e") 28.81 -PERFCOUNTER_CPU(vmx_mov_to_rr, "vmx privop mov_to_rr") 28.82 -PERFCOUNTER_CPU(vmx_mov_from_rr, "vmx privop mov_from_rr") 28.83 -PERFCOUNTER_CPU(vmx_thash, "vmx privop thash") 28.84 -PERFCOUNTER_CPU(vmx_ttag, "vmx privop ttag") 28.85 -PERFCOUNTER_CPU(vmx_tpa, "vmx privop tpa") 28.86 -PERFCOUNTER_CPU(vmx_tak, "vmx privop tak") 28.87 -PERFCOUNTER_CPU(vmx_mov_to_ar_imm, "vmx privop mov_to_ar_imm") 28.88 -PERFCOUNTER_CPU(vmx_mov_to_ar_reg, "vmx privop mov_to_ar_reg") 28.89 -PERFCOUNTER_CPU(vmx_mov_from_ar_reg, "vmx privop mov_from_ar_reg") 28.90 -PERFCOUNTER_CPU(vmx_mov_to_dbr, "vmx privop mov_to_dbr") 28.91 -PERFCOUNTER_CPU(vmx_mov_to_ibr, "vmx privop mov_to_ibr") 28.92 -PERFCOUNTER_CPU(vmx_mov_to_pmc, "vmx privop mov_to_pmc") 28.93 -PERFCOUNTER_CPU(vmx_mov_to_pmd, "vmx privop mov_to_pmd") 28.94 -PERFCOUNTER_CPU(vmx_mov_to_pkr, "vmx privop mov_to_pkr") 28.95 -PERFCOUNTER_CPU(vmx_mov_from_dbr, "vmx privop mov_from_dbr") 28.96 -PERFCOUNTER_CPU(vmx_mov_from_ibr, "vmx privop mov_from_ibr") 28.97 -PERFCOUNTER_CPU(vmx_mov_from_pmc, "vmx privop mov_from_pmc") 28.98 -PERFCOUNTER_CPU(vmx_mov_from_pkr, "vmx privop mov_from_pkr") 28.99 -PERFCOUNTER_CPU(vmx_mov_from_cpuid, "vmx privop mov_from_cpuid") 28.100 +PERFCOUNTER(vmx_rsm, "vmx privop rsm") 28.101 +PERFCOUNTER(vmx_ssm, "vmx privop ssm") 28.102 +PERFCOUNTER(vmx_mov_to_psr, "vmx privop mov_to_psr") 28.103 +PERFCOUNTER(vmx_mov_from_psr, "vmx privop mov_from_psr") 28.104 +PERFCOUNTER(vmx_mov_from_cr, "vmx privop mov_from_cr") 28.105 +PERFCOUNTER(vmx_mov_to_cr, "vmx privop mov_to_cr") 28.106 +PERFCOUNTER(vmx_bsw0, "vmx privop bsw0") 28.107 +PERFCOUNTER(vmx_bsw1, "vmx privop bsw1") 28.108 +PERFCOUNTER(vmx_cover, "vmx privop cover") 28.109 +PERFCOUNTER(vmx_rfi, "vmx privop rfi") 28.110 +PERFCOUNTER(vmx_itr_d, "vmx privop itr_d") 28.111 +PERFCOUNTER(vmx_itr_i, "vmx privop itr_i") 28.112 +PERFCOUNTER(vmx_ptr_d, "vmx privop ptr_d") 28.113 +PERFCOUNTER(vmx_ptr_i, "vmx privop ptr_i") 28.114 +PERFCOUNTER(vmx_itc_d, "vmx privop itc_d") 28.115 +PERFCOUNTER(vmx_itc_i, "vmx privop itc_i") 28.116 +PERFCOUNTER(vmx_ptc_l, "vmx privop ptc_l") 28.117 +PERFCOUNTER(vmx_ptc_g, "vmx privop ptc_g") 28.118 +PERFCOUNTER(vmx_ptc_ga, "vmx privop ptc_ga") 28.119 +PERFCOUNTER(vmx_ptc_e, "vmx privop ptc_e") 28.120 +PERFCOUNTER(vmx_mov_to_rr, "vmx privop mov_to_rr") 28.121 +PERFCOUNTER(vmx_mov_from_rr, "vmx privop mov_from_rr") 28.122 +PERFCOUNTER(vmx_thash, "vmx privop thash") 28.123 +PERFCOUNTER(vmx_ttag, "vmx privop ttag") 28.124 +PERFCOUNTER(vmx_tpa, "vmx privop tpa") 28.125 +PERFCOUNTER(vmx_tak, "vmx privop tak") 28.126 +PERFCOUNTER(vmx_mov_to_ar_imm, "vmx privop mov_to_ar_imm") 28.127 +PERFCOUNTER(vmx_mov_to_ar_reg, "vmx privop mov_to_ar_reg") 28.128 +PERFCOUNTER(vmx_mov_from_ar_reg, "vmx privop mov_from_ar_reg") 28.129 +PERFCOUNTER(vmx_mov_to_dbr, "vmx privop mov_to_dbr") 28.130 +PERFCOUNTER(vmx_mov_to_ibr, "vmx privop mov_to_ibr") 28.131 +PERFCOUNTER(vmx_mov_to_pmc, "vmx privop mov_to_pmc") 28.132 +PERFCOUNTER(vmx_mov_to_pmd, "vmx privop mov_to_pmd") 28.133 +PERFCOUNTER(vmx_mov_to_pkr, "vmx privop mov_to_pkr") 28.134 +PERFCOUNTER(vmx_mov_from_dbr, "vmx privop mov_from_dbr") 28.135 +PERFCOUNTER(vmx_mov_from_ibr, "vmx privop mov_from_ibr") 28.136 +PERFCOUNTER(vmx_mov_from_pmc, "vmx privop mov_from_pmc") 28.137 +PERFCOUNTER(vmx_mov_from_pkr, "vmx privop mov_from_pkr") 28.138 +PERFCOUNTER(vmx_mov_from_cpuid, "vmx privop mov_from_cpuid") 28.139 28.140 28.141 PERFCOUNTER_ARRAY(slow_hyperprivop, "slow hyperprivops", HYPERPRIVOP_MAX + 1) 28.142 @@ -87,9 +87,9 @@ PERFSTATUS(vhpt_nbr_entries, "n 28.143 PERFSTATUS(vhpt_valid_entries, "nbr of valid entries in VHPT") 28.144 28.145 PERFCOUNTER_ARRAY(vmx_mmio_access, "vmx_mmio_access", 8) 28.146 -PERFCOUNTER_CPU(vmx_pal_emul, "vmx_pal_emul") 28.147 +PERFCOUNTER(vmx_pal_emul, "vmx_pal_emul") 28.148 PERFCOUNTER_ARRAY(vmx_switch_mm_mode, "vmx_switch_mm_mode", 8) 28.149 -PERFCOUNTER_CPU(vmx_ia64_handle_break,"vmx_ia64_handle_break") 28.150 +PERFCOUNTER(vmx_ia64_handle_break,"vmx_ia64_handle_break") 28.151 PERFCOUNTER_ARRAY(vmx_inject_guest_interruption, 28.152 "vmx_inject_guest_interruption", 0x80) 28.153 PERFCOUNTER_ARRAY(fw_hypercall, "fw_hypercall", 0x20) 28.154 @@ -111,66 +111,66 @@ PERFPRIVOPADDR(thash) 28.155 #endif 28.156 28.157 // vhpt.c 28.158 -PERFCOUNTER_CPU(local_vhpt_flush, "local_vhpt_flush") 28.159 -PERFCOUNTER_CPU(vcpu_vhpt_flush, "vcpu_vhpt_flush") 28.160 -PERFCOUNTER_CPU(vcpu_flush_vtlb_all, "vcpu_flush_vtlb_all") 28.161 -PERFCOUNTER_CPU(domain_flush_vtlb_all, "domain_flush_vtlb_all") 28.162 -PERFCOUNTER_CPU(vcpu_flush_tlb_vhpt_range, "vcpu_flush_tlb_vhpt_range") 28.163 -PERFCOUNTER_CPU(domain_flush_vtlb_track_entry, "domain_flush_vtlb_track_entry") 28.164 -PERFCOUNTER_CPU(domain_flush_vtlb_local, "domain_flush_vtlb_local") 28.165 -PERFCOUNTER_CPU(domain_flush_vtlb_global, "domain_flush_vtlb_global") 28.166 -PERFCOUNTER_CPU(domain_flush_vtlb_range, "domain_flush_vtlb_range") 28.167 +PERFCOUNTER(local_vhpt_flush, "local_vhpt_flush") 28.168 +PERFCOUNTER(vcpu_vhpt_flush, "vcpu_vhpt_flush") 28.169 +PERFCOUNTER(vcpu_flush_vtlb_all, "vcpu_flush_vtlb_all") 28.170 +PERFCOUNTER(domain_flush_vtlb_all, "domain_flush_vtlb_all") 28.171 +PERFCOUNTER(vcpu_flush_tlb_vhpt_range, "vcpu_flush_tlb_vhpt_range") 28.172 +PERFCOUNTER(domain_flush_vtlb_track_entry, "domain_flush_vtlb_track_entry") 28.173 +PERFCOUNTER(domain_flush_vtlb_local, "domain_flush_vtlb_local") 28.174 +PERFCOUNTER(domain_flush_vtlb_global, "domain_flush_vtlb_global") 28.175 +PERFCOUNTER(domain_flush_vtlb_range, "domain_flush_vtlb_range") 28.176 28.177 // domain.c 28.178 -PERFCOUNTER_CPU(flush_vtlb_for_context_switch, "flush_vtlb_for_context_switch") 28.179 +PERFCOUNTER(flush_vtlb_for_context_switch, "flush_vtlb_for_context_switch") 28.180 28.181 // mm.c 28.182 -PERFCOUNTER_CPU(assign_domain_page_replace, "assign_domain_page_replace") 28.183 -PERFCOUNTER_CPU(assign_domain_pge_cmpxchg_rel, "assign_domain_pge_cmpxchg_rel") 28.184 -PERFCOUNTER_CPU(zap_dcomain_page_one, "zap_dcomain_page_one") 28.185 -PERFCOUNTER_CPU(dom0vp_zap_physmap, "dom0vp_zap_physmap") 28.186 -PERFCOUNTER_CPU(dom0vp_add_physmap, "dom0vp_add_physmap") 28.187 -PERFCOUNTER_CPU(create_grant_host_mapping, "create_grant_host_mapping") 28.188 -PERFCOUNTER_CPU(destroy_grant_host_mapping, "destroy_grant_host_mapping") 28.189 -PERFCOUNTER_CPU(steal_page_refcount, "steal_page_refcount") 28.190 -PERFCOUNTER_CPU(steal_page, "steal_page") 28.191 -PERFCOUNTER_CPU(guest_physmap_add_page, "guest_physmap_add_page") 28.192 -PERFCOUNTER_CPU(guest_physmap_remove_page, "guest_physmap_remove_page") 28.193 -PERFCOUNTER_CPU(domain_page_flush_and_put, "domain_page_flush_and_put") 28.194 +PERFCOUNTER(assign_domain_page_replace, "assign_domain_page_replace") 28.195 +PERFCOUNTER(assign_domain_pge_cmpxchg_rel, "assign_domain_pge_cmpxchg_rel") 28.196 +PERFCOUNTER(zap_dcomain_page_one, "zap_dcomain_page_one") 28.197 +PERFCOUNTER(dom0vp_zap_physmap, "dom0vp_zap_physmap") 28.198 +PERFCOUNTER(dom0vp_add_physmap, "dom0vp_add_physmap") 28.199 +PERFCOUNTER(create_grant_host_mapping, "create_grant_host_mapping") 28.200 +PERFCOUNTER(destroy_grant_host_mapping, "destroy_grant_host_mapping") 28.201 +PERFCOUNTER(steal_page_refcount, "steal_page_refcount") 28.202 +PERFCOUNTER(steal_page, "steal_page") 28.203 +PERFCOUNTER(guest_physmap_add_page, "guest_physmap_add_page") 28.204 +PERFCOUNTER(guest_physmap_remove_page, "guest_physmap_remove_page") 28.205 +PERFCOUNTER(domain_page_flush_and_put, "domain_page_flush_and_put") 28.206 28.207 // dom0vp 28.208 -PERFCOUNTER_CPU(dom0vp_phystomach, "dom0vp_phystomach") 28.209 -PERFCOUNTER_CPU(dom0vp_machtophys, "dom0vp_machtophys") 28.210 +PERFCOUNTER(dom0vp_phystomach, "dom0vp_phystomach") 28.211 +PERFCOUNTER(dom0vp_machtophys, "dom0vp_machtophys") 28.212 28.213 #ifdef CONFIG_XEN_IA64_TLB_TRACK 28.214 // insert or dirty 28.215 -PERFCOUNTER_CPU(tlb_track_iod, "tlb_track_iod") 28.216 -PERFCOUNTER_CPU(tlb_track_iod_again, "tlb_track_iod_again") 28.217 -PERFCOUNTER_CPU(tlb_track_iod_not_tracked, "tlb_track_iod_not_tracked") 28.218 -PERFCOUNTER_CPU(tlb_track_iod_force_many, "tlb_track_iod_force_many") 28.219 -PERFCOUNTER_CPU(tlb_track_iod_tracked_many, "tlb_track_iod_tracked_many") 28.220 -PERFCOUNTER_CPU(tlb_track_iod_tracked_many_del, "tlb_track_iod_tracked_many_del") 28.221 -PERFCOUNTER_CPU(tlb_track_iod_found, "tlb_track_iod_found") 28.222 -PERFCOUNTER_CPU(tlb_track_iod_new_entry, "tlb_track_iod_new_entry") 28.223 -PERFCOUNTER_CPU(tlb_track_iod_new_failed, "tlb_track_iod_new_failed") 28.224 -PERFCOUNTER_CPU(tlb_track_iod_new_many, "tlb_track_iod_new_many") 28.225 -PERFCOUNTER_CPU(tlb_track_iod_insert, "tlb_track_iod_insert") 28.226 -PERFCOUNTER_CPU(tlb_track_iod_dirtied, "tlb_track_iod_dirtied") 28.227 +PERFCOUNTER(tlb_track_iod, "tlb_track_iod") 28.228 +PERFCOUNTER(tlb_track_iod_again, "tlb_track_iod_again") 28.229 +PERFCOUNTER(tlb_track_iod_not_tracked, "tlb_track_iod_not_tracked") 28.230 +PERFCOUNTER(tlb_track_iod_force_many, "tlb_track_iod_force_many") 28.231 +PERFCOUNTER(tlb_track_iod_tracked_many, "tlb_track_iod_tracked_many") 28.232 +PERFCOUNTER(tlb_track_iod_tracked_many_del, "tlb_track_iod_tracked_many_del") 28.233 +PERFCOUNTER(tlb_track_iod_found, "tlb_track_iod_found") 28.234 +PERFCOUNTER(tlb_track_iod_new_entry, "tlb_track_iod_new_entry") 28.235 +PERFCOUNTER(tlb_track_iod_new_failed, "tlb_track_iod_new_failed") 28.236 +PERFCOUNTER(tlb_track_iod_new_many, "tlb_track_iod_new_many") 28.237 +PERFCOUNTER(tlb_track_iod_insert, "tlb_track_iod_insert") 28.238 +PERFCOUNTER(tlb_track_iod_dirtied, "tlb_track_iod_dirtied") 28.239 28.240 // search and remove 28.241 -PERFCOUNTER_CPU(tlb_track_sar, "tlb_track_sar") 28.242 -PERFCOUNTER_CPU(tlb_track_sar_not_tracked, "tlb_track_sar_not_tracked") 28.243 -PERFCOUNTER_CPU(tlb_track_sar_not_found, "tlb_track_sar_not_found") 28.244 -PERFCOUNTER_CPU(tlb_track_sar_found, "tlb_track_sar_found") 28.245 -PERFCOUNTER_CPU(tlb_track_sar_many, "tlb_track_sar_many") 28.246 +PERFCOUNTER(tlb_track_sar, "tlb_track_sar") 28.247 +PERFCOUNTER(tlb_track_sar_not_tracked, "tlb_track_sar_not_tracked") 28.248 +PERFCOUNTER(tlb_track_sar_not_found, "tlb_track_sar_not_found") 28.249 +PERFCOUNTER(tlb_track_sar_found, "tlb_track_sar_found") 28.250 +PERFCOUNTER(tlb_track_sar_many, "tlb_track_sar_many") 28.251 28.252 // flush 28.253 -PERFCOUNTER_CPU(tlb_track_use_rr7, "tlb_track_use_rr7") 28.254 -PERFCOUNTER_CPU(tlb_track_swap_rr0, "tlb_track_swap_rr0") 28.255 +PERFCOUNTER(tlb_track_use_rr7, "tlb_track_use_rr7") 28.256 +PERFCOUNTER(tlb_track_swap_rr0, "tlb_track_swap_rr0") 28.257 #endif 28.258 28.259 // tlb flush clock 28.260 #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK 28.261 -PERFCOUNTER_CPU(tlbflush_clock_cswitch_purge, "tlbflush_clock_cswitch_purge") 28.262 -PERFCOUNTER_CPU(tlbflush_clock_cswitch_skip, "tlbflush_clock_cswitch_skip") 28.263 +PERFCOUNTER(tlbflush_clock_cswitch_purge, "tlbflush_clock_cswitch_purge") 28.264 +PERFCOUNTER(tlbflush_clock_cswitch_skip, "tlbflush_clock_cswitch_skip") 28.265 #endif
29.1 --- a/xen/include/asm-ia64/tlb_track.h Tue Mar 27 16:35:37 2007 +0100 29.2 +++ b/xen/include/asm-ia64/tlb_track.h Tue Mar 27 16:42:47 2007 +0100 29.3 @@ -97,9 +97,9 @@ vcpu_tlb_track_insert_or_dirty(struct vc 29.4 { 29.5 /* optimization. 29.6 non-tracking pte is most common. */ 29.7 - perfc_incrc(tlb_track_iod); 29.8 + perfc_incr(tlb_track_iod); 29.9 if (!pte_tlb_tracking(entry->used)) { 29.10 - perfc_incrc(tlb_track_iod_not_tracked); 29.11 + perfc_incr(tlb_track_iod_not_tracked); 29.12 return; 29.13 } 29.14
30.1 --- a/xen/include/asm-x86/perfc_defn.h Tue Mar 27 16:35:37 2007 +0100 30.2 +++ b/xen/include/asm-x86/perfc_defn.h Tue Mar 27 16:42:47 2007 +0100 30.3 @@ -12,83 +12,83 @@ PERFCOUNTER_ARRAY(cause_vector, 30.4 #define SVM_PERF_EXIT_REASON_SIZE (1+136) 30.5 PERFCOUNTER_ARRAY(svmexits, "SVMexits", SVM_PERF_EXIT_REASON_SIZE) 30.6 30.7 -PERFCOUNTER_CPU(seg_fixups, "segmentation fixups") 30.8 +PERFCOUNTER(seg_fixups, "segmentation fixups") 30.9 30.10 -PERFCOUNTER_CPU(apic_timer, "apic timer interrupts") 30.11 +PERFCOUNTER(apic_timer, "apic timer interrupts") 30.12 30.13 -PERFCOUNTER_CPU(domain_page_tlb_flush, "domain page tlb flushes") 30.14 +PERFCOUNTER(domain_page_tlb_flush, "domain page tlb flushes") 30.15 30.16 PERFCOUNTER(calls_to_mmuext_op, "calls to mmuext_op") 30.17 PERFCOUNTER(num_mmuext_ops, "mmuext ops") 30.18 PERFCOUNTER(calls_to_mmu_update, "calls to mmu_update") 30.19 PERFCOUNTER(num_page_updates, "page updates") 30.20 PERFCOUNTER(calls_to_update_va, "calls to update_va_map") 30.21 -PERFCOUNTER_CPU(page_faults, "page faults") 30.22 -PERFCOUNTER_CPU(copy_user_faults, "copy_user faults") 30.23 +PERFCOUNTER(page_faults, "page faults") 30.24 +PERFCOUNTER(copy_user_faults, "copy_user faults") 30.25 30.26 -PERFCOUNTER_CPU(map_domain_page_count, "map_domain_page count") 30.27 -PERFCOUNTER_CPU(ptwr_emulations, "writable pt emulations") 30.28 +PERFCOUNTER(map_domain_page_count, "map_domain_page count") 30.29 +PERFCOUNTER(ptwr_emulations, "writable pt emulations") 30.30 30.31 -PERFCOUNTER_CPU(exception_fixed, "pre-exception fixed") 30.32 +PERFCOUNTER(exception_fixed, "pre-exception fixed") 30.33 30.34 30.35 /* Shadow counters */ 30.36 -PERFCOUNTER_CPU(shadow_alloc, "calls to shadow_alloc") 30.37 -PERFCOUNTER_CPU(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs") 30.38 +PERFCOUNTER(shadow_alloc, "calls to shadow_alloc") 30.39 +PERFCOUNTER(shadow_alloc_tlbflush, "shadow_alloc flushed TLBs") 30.40 30.41 /* STATUS counters do not reset when 'P' is hit */ 30.42 PERFSTATUS(shadow_alloc_count, "number of shadow pages in use") 30.43 -PERFCOUNTER_CPU(shadow_free, "calls to shadow_free") 30.44 -PERFCOUNTER_CPU(shadow_prealloc_1, "shadow recycles old shadows") 30.45 -PERFCOUNTER_CPU(shadow_prealloc_2, "shadow recycles in-use shadows") 30.46 -PERFCOUNTER_CPU(shadow_linear_map_failed, "shadow hit read-only linear map") 30.47 -PERFCOUNTER_CPU(shadow_a_update, "shadow A bit update") 30.48 -PERFCOUNTER_CPU(shadow_ad_update, "shadow A&D bit update") 30.49 -PERFCOUNTER_CPU(shadow_fault, "calls to shadow_fault") 30.50 -PERFCOUNTER_CPU(shadow_fault_fast_gnp, "shadow_fault fast path n/p") 30.51 -PERFCOUNTER_CPU(shadow_fault_fast_mmio, "shadow_fault fast path mmio") 30.52 -PERFCOUNTER_CPU(shadow_fault_fast_fail, "shadow_fault fast path error") 30.53 -PERFCOUNTER_CPU(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn") 30.54 -PERFCOUNTER_CPU(shadow_fault_bail_not_present, 30.55 +PERFCOUNTER(shadow_free, "calls to shadow_free") 30.56 +PERFCOUNTER(shadow_prealloc_1, "shadow recycles old shadows") 30.57 +PERFCOUNTER(shadow_prealloc_2, "shadow recycles in-use shadows") 30.58 +PERFCOUNTER(shadow_linear_map_failed, "shadow hit read-only linear map") 30.59 +PERFCOUNTER(shadow_a_update, "shadow A bit update") 30.60 +PERFCOUNTER(shadow_ad_update, "shadow A&D bit update") 30.61 +PERFCOUNTER(shadow_fault, "calls to shadow_fault") 30.62 +PERFCOUNTER(shadow_fault_fast_gnp, "shadow_fault fast path n/p") 30.63 +PERFCOUNTER(shadow_fault_fast_mmio, "shadow_fault fast path mmio") 30.64 +PERFCOUNTER(shadow_fault_fast_fail, "shadow_fault fast path error") 30.65 +PERFCOUNTER(shadow_fault_bail_bad_gfn, "shadow_fault guest bad gfn") 30.66 +PERFCOUNTER(shadow_fault_bail_not_present, 30.67 "shadow_fault guest not-present") 30.68 -PERFCOUNTER_CPU(shadow_fault_bail_nx, "shadow_fault guest NX fault") 30.69 -PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, "shadow_fault guest R/W fault") 30.70 -PERFCOUNTER_CPU(shadow_fault_bail_user_supervisor, 30.71 +PERFCOUNTER(shadow_fault_bail_nx, "shadow_fault guest NX fault") 30.72 +PERFCOUNTER(shadow_fault_bail_ro_mapping, "shadow_fault guest R/W fault") 30.73 +PERFCOUNTER(shadow_fault_bail_user_supervisor, 30.74 "shadow_fault guest U/S fault") 30.75 -PERFCOUNTER_CPU(shadow_fault_emulate_read, "shadow_fault emulates a read") 30.76 -PERFCOUNTER_CPU(shadow_fault_emulate_write, "shadow_fault emulates a write") 30.77 -PERFCOUNTER_CPU(shadow_fault_emulate_failed, "shadow_fault emulator fails") 30.78 -PERFCOUNTER_CPU(shadow_fault_emulate_stack, "shadow_fault emulate stack write") 30.79 -PERFCOUNTER_CPU(shadow_fault_mmio, "shadow_fault handled as mmio") 30.80 -PERFCOUNTER_CPU(shadow_fault_fixed, "shadow_fault fixed fault") 30.81 -PERFCOUNTER_CPU(shadow_ptwr_emulate, "shadow causes ptwr to emulate") 30.82 -PERFCOUNTER_CPU(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e") 30.83 -PERFCOUNTER_CPU(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e") 30.84 -PERFCOUNTER_CPU(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e") 30.85 -PERFCOUNTER_CPU(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e") 30.86 -PERFCOUNTER_CPU(shadow_hash_lookups, "calls to shadow_hash_lookup") 30.87 -PERFCOUNTER_CPU(shadow_hash_lookup_head, "shadow hash hit in bucket head") 30.88 -PERFCOUNTER_CPU(shadow_hash_lookup_miss, "shadow hash misses") 30.89 -PERFCOUNTER_CPU(shadow_get_shadow_status, "calls to get_shadow_status") 30.90 -PERFCOUNTER_CPU(shadow_hash_inserts, "calls to shadow_hash_insert") 30.91 -PERFCOUNTER_CPU(shadow_hash_deletes, "calls to shadow_hash_delete") 30.92 -PERFCOUNTER_CPU(shadow_writeable, "shadow removes write access") 30.93 -PERFCOUNTER_CPU(shadow_writeable_h_1, "shadow writeable: 32b w2k3") 30.94 -PERFCOUNTER_CPU(shadow_writeable_h_2, "shadow writeable: 32pae w2k3") 30.95 -PERFCOUNTER_CPU(shadow_writeable_h_3, "shadow writeable: 64b w2k3") 30.96 -PERFCOUNTER_CPU(shadow_writeable_h_4, "shadow writeable: 32b linux low") 30.97 -PERFCOUNTER_CPU(shadow_writeable_h_5, "shadow writeable: 32b linux high") 30.98 -PERFCOUNTER_CPU(shadow_writeable_bf, "shadow writeable brute-force") 30.99 -PERFCOUNTER_CPU(shadow_mappings, "shadow removes all mappings") 30.100 -PERFCOUNTER_CPU(shadow_mappings_bf, "shadow rm-mappings brute-force") 30.101 -PERFCOUNTER_CPU(shadow_early_unshadow, "shadow unshadows for fork/exit") 30.102 -PERFCOUNTER_CPU(shadow_unshadow, "shadow unshadows a page") 30.103 -PERFCOUNTER_CPU(shadow_up_pointer, "shadow unshadow by up-pointer") 30.104 -PERFCOUNTER_CPU(shadow_unshadow_bf, "shadow unshadow brute-force") 30.105 -PERFCOUNTER_CPU(shadow_get_page_fail, "shadow_get_page_from_l1e failed") 30.106 -PERFCOUNTER_CPU(shadow_guest_walk, "shadow walks guest tables") 30.107 -PERFCOUNTER_CPU(shadow_invlpg, "shadow emulates invlpg") 30.108 -PERFCOUNTER_CPU(shadow_invlpg_fault, "shadow invlpg faults") 30.109 +PERFCOUNTER(shadow_fault_emulate_read, "shadow_fault emulates a read") 30.110 +PERFCOUNTER(shadow_fault_emulate_write, "shadow_fault emulates a write") 30.111 +PERFCOUNTER(shadow_fault_emulate_failed, "shadow_fault emulator fails") 30.112 +PERFCOUNTER(shadow_fault_emulate_stack, "shadow_fault emulate stack write") 30.113 +PERFCOUNTER(shadow_fault_mmio, "shadow_fault handled as mmio") 30.114 +PERFCOUNTER(shadow_fault_fixed, "shadow_fault fixed fault") 30.115 +PERFCOUNTER(shadow_ptwr_emulate, "shadow causes ptwr to emulate") 30.116 +PERFCOUNTER(shadow_validate_gl1e_calls, "calls to shadow_validate_gl1e") 30.117 +PERFCOUNTER(shadow_validate_gl2e_calls, "calls to shadow_validate_gl2e") 30.118 +PERFCOUNTER(shadow_validate_gl3e_calls, "calls to shadow_validate_gl3e") 30.119 +PERFCOUNTER(shadow_validate_gl4e_calls, "calls to shadow_validate_gl4e") 30.120 +PERFCOUNTER(shadow_hash_lookups, "calls to shadow_hash_lookup") 30.121 +PERFCOUNTER(shadow_hash_lookup_head, "shadow hash hit in bucket head") 30.122 +PERFCOUNTER(shadow_hash_lookup_miss, "shadow hash misses") 30.123 +PERFCOUNTER(shadow_get_shadow_status, "calls to get_shadow_status") 30.124 +PERFCOUNTER(shadow_hash_inserts, "calls to shadow_hash_insert") 30.125 +PERFCOUNTER(shadow_hash_deletes, "calls to shadow_hash_delete") 30.126 +PERFCOUNTER(shadow_writeable, "shadow removes write access") 30.127 +PERFCOUNTER(shadow_writeable_h_1, "shadow writeable: 32b w2k3") 30.128 +PERFCOUNTER(shadow_writeable_h_2, "shadow writeable: 32pae w2k3") 30.129 +PERFCOUNTER(shadow_writeable_h_3, "shadow writeable: 64b w2k3") 30.130 +PERFCOUNTER(shadow_writeable_h_4, "shadow writeable: 32b linux low") 30.131 +PERFCOUNTER(shadow_writeable_h_5, "shadow writeable: 32b linux high") 30.132 +PERFCOUNTER(shadow_writeable_bf, "shadow writeable brute-force") 30.133 +PERFCOUNTER(shadow_mappings, "shadow removes all mappings") 30.134 +PERFCOUNTER(shadow_mappings_bf, "shadow rm-mappings brute-force") 30.135 +PERFCOUNTER(shadow_early_unshadow, "shadow unshadows for fork/exit") 30.136 +PERFCOUNTER(shadow_unshadow, "shadow unshadows a page") 30.137 +PERFCOUNTER(shadow_up_pointer, "shadow unshadow by up-pointer") 30.138 +PERFCOUNTER(shadow_unshadow_bf, "shadow unshadow brute-force") 30.139 +PERFCOUNTER(shadow_get_page_fail, "shadow_get_page_from_l1e failed") 30.140 +PERFCOUNTER(shadow_guest_walk, "shadow walks guest tables") 30.141 +PERFCOUNTER(shadow_invlpg, "shadow emulates invlpg") 30.142 +PERFCOUNTER(shadow_invlpg_fault, "shadow invlpg faults") 30.143 30.144 30.145 /*#endif*/ /* __XEN_PERFC_DEFN_H__ */
31.1 --- a/xen/include/xen/perfc.h Tue Mar 27 16:35:37 2007 +0100 31.2 +++ b/xen/include/xen/perfc.h Tue Mar 27 16:42:47 2007 +0100 31.3 @@ -1,4 +1,3 @@ 31.4 - 31.5 #ifndef __XEN_PERFC_H__ 31.6 #define __XEN_PERFC_H__ 31.7 31.8 @@ -8,13 +7,14 @@ 31.9 #include <xen/smp.h> 31.10 #include <xen/percpu.h> 31.11 31.12 -/* 31.13 +/* 31.14 * NOTE: new counters must be defined in perfc_defn.h 31.15 * 31.16 + * Counter declarations: 31.17 * PERFCOUNTER (counter, string) define a new performance counter 31.18 * PERFCOUNTER_ARRAY (counter, string, size) define an array of counters 31.19 * 31.20 - * unlike "COUNTERS", "STATUS" variables DO NOT RESET 31.21 + * Unlike counters, status variables do not reset: 31.22 * PERFSTATUS (counter, string) define a new performance stauts 31.23 * PERFSTATUS_ARRAY (counter, string, size) define an array of status vars 31.24 * 31.25 @@ -31,17 +31,14 @@ 31.26 */ 31.27 31.28 #define PERFCOUNTER( name, descr ) \ 31.29 - PERFC_ ## name, 31.30 + PERFC_##name, 31.31 #define PERFCOUNTER_ARRAY( name, descr, size ) \ 31.32 - PERFC_ ## name, \ 31.33 - PERFC_LAST_ ## name = PERFC_ ## name + (size) - sizeof(char[2 * !!(size) - 1]), 31.34 + PERFC_##name, \ 31.35 + PERFC_LAST_##name = PERFC_ ## name + (size) - sizeof(char[2 * !!(size) - 1]), 31.36 31.37 #define PERFSTATUS PERFCOUNTER 31.38 #define PERFSTATUS_ARRAY PERFCOUNTER_ARRAY 31.39 31.40 -/* Compatibility: This should go away once all users got converted. */ 31.41 -#define PERFCOUNTER_CPU PERFCOUNTER 31.42 - 31.43 enum perfcounter { 31.44 #include <xen/perfc_defn.h> 31.45 NUM_PERFCOUNTERS 31.46 @@ -115,7 +112,4 @@ int perfc_control(struct xen_sysctl_perf 31.47 31.48 #endif /* PERF_COUNTERS */ 31.49 31.50 -/* Compatibility: This should go away once all users got converted. */ 31.51 -#define perfc_incrc perfc_incr 31.52 - 31.53 #endif /* __XEN_PERFC_H__ */
32.1 --- a/xen/include/xen/perfc_defn.h Tue Mar 27 16:35:37 2007 +0100 32.2 +++ b/xen/include/xen/perfc_defn.h Tue Mar 27 16:42:47 2007 +0100 32.3 @@ -9,13 +9,13 @@ PERFCOUNTER_ARRAY(hypercalls, 32.4 PERFCOUNTER(calls_to_multicall, "calls to multicall") 32.5 PERFCOUNTER(calls_from_multicall, "calls from multicall") 32.6 32.7 -PERFCOUNTER_CPU(irqs, "#interrupts") 32.8 -PERFCOUNTER_CPU(ipis, "#IPIs") 32.9 +PERFCOUNTER(irqs, "#interrupts") 32.10 +PERFCOUNTER(ipis, "#IPIs") 32.11 32.12 -PERFCOUNTER_CPU(sched_irq, "sched: timer") 32.13 -PERFCOUNTER_CPU(sched_run, "sched: runs through scheduler") 32.14 -PERFCOUNTER_CPU(sched_ctx, "sched: context switches") 32.15 +PERFCOUNTER(sched_irq, "sched: timer") 32.16 +PERFCOUNTER(sched_run, "sched: runs through scheduler") 32.17 +PERFCOUNTER(sched_ctx, "sched: context switches") 32.18 32.19 -PERFCOUNTER_CPU(need_flush_tlb_flush, "PG_need_flush tlb flushes") 32.20 +PERFCOUNTER(need_flush_tlb_flush, "PG_need_flush tlb flushes") 32.21 32.22 /*#endif*/ /* __XEN_PERFC_DEFN_H__ */