ia64/xen-unstable
changeset 9765:7c7bcf173f8b
[IA64] cleanup vtlb code
This patch is to clean up vtlb code.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
This patch is to clean up vtlb code.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author | awilliam@xenbuild.aw |
---|---|
date | Tue Apr 25 20:53:38 2006 -0600 (2006-04-25) |
parents | 2d2ef3f4c747 |
children | ffba1376c4fb |
files | xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vtlb.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx_vcpu.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/vmmu.c Tue Apr 25 17:05:16 2006 -0600 1.2 +++ b/xen/arch/ia64/vmx/vmmu.c Tue Apr 25 20:53:38 2006 -0600 1.3 @@ -128,73 +128,58 @@ purge_machine_tc_by_domid(domid_t domid) 1.4 #endif 1.5 } 1.6 1.7 -static thash_cb_t *init_domain_vhpt(struct vcpu *d, void *vbase, void *vcur) 1.8 +static void init_domain_vhpt(struct vcpu *v) 1.9 { 1.10 -// struct page_info *page; 1.11 - thash_cb_t *vhpt; 1.12 - PTA pta_value; 1.13 - vcur -= sizeof (thash_cb_t); 1.14 - vhpt = vcur; 1.15 - vhpt->ht = THASH_VHPT; 1.16 - vhpt->vcpu = d; 1.17 - /* Setup guest pta */ 1.18 - pta_value.val = 0; 1.19 - pta_value.ve = 1; 1.20 - pta_value.vf = 1; 1.21 - pta_value.size = VCPU_VHPT_SHIFT - 1; /* 16M*/ 1.22 - pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT; 1.23 - d->arch.arch_vmx.mpta = pta_value.val; 1.24 - 1.25 - vhpt->hash = vbase; 1.26 - vhpt->hash_sz = VCPU_VHPT_SIZE/2; 1.27 - vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz); 1.28 - vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf; 1.29 - thash_init(vhpt,VCPU_VHPT_SHIFT-1); 1.30 - return vhpt; 1.31 + struct page_info *page; 1.32 + void * vbase; 1.33 + page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0); 1.34 + if ( page == NULL ) { 1.35 + panic("No enough contiguous memory for init_domain_vhpt\n"); 1.36 + } 1.37 + vbase = page_to_virt(page); 1.38 + memset(vbase, 0, VCPU_VHPT_SIZE); 1.39 + printk("Allocate domain tlb at 0x%p\n", vbase); 1.40 + 1.41 + VHPT(v,hash) = vbase; 1.42 + VHPT(v,hash_sz) = VCPU_VHPT_SIZE/2; 1.43 + VHPT(v,cch_buf) = (void *)((u64)vbase + VHPT(v,hash_sz)); 1.44 + VHPT(v,cch_sz) = VCPU_VHPT_SIZE - VHPT(v,hash_sz); 1.45 + thash_init(&(v->arch.vhpt),VCPU_VHPT_SHIFT-1); 1.46 + v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val; 1.47 } 1.48 1.49 1.50 1.51 -thash_cb_t *init_domain_tlb(struct vcpu *d) 1.52 +void init_domain_tlb(struct vcpu *v) 1.53 { 1.54 struct page_info *page; 1.55 - void *vbase, *vhptbase, *vcur; 1.56 - thash_cb_t *tlb; 1.57 - 1.58 - page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0); 1.59 + void * vbase; 1.60 + init_domain_vhpt(v); 1.61 + page = alloc_domheap_pages (NULL, VCPU_VTLB_ORDER, 0); 1.62 if ( page == NULL ) { 1.63 - panic("No enough contiguous memory for init_domain_mm\n"); 1.64 + panic("No enough contiguous memory for init_domain_tlb\n"); 1.65 } 1.66 - vhptbase = page_to_virt(page); 1.67 - memset(vhptbase, 0, VCPU_VHPT_SIZE); 1.68 - printk("Allocate domain tlb&vhpt at 0x%lx\n", (u64)vhptbase); 1.69 - vbase =vhptbase + VCPU_VHPT_SIZE - VCPU_VTLB_SIZE; 1.70 - vcur = (void*)((u64)vbase + VCPU_VTLB_SIZE); 1.71 - vcur -= sizeof (thash_cb_t); 1.72 - tlb = vcur; 1.73 - tlb->ht = THASH_TLB; 1.74 - tlb->vcpu = d; 1.75 - tlb->vhpt = init_domain_vhpt(d,vhptbase,vbase); 1.76 -// tlb->hash_func = machine_thash; 1.77 - tlb->hash = vbase; 1.78 - tlb->hash_sz = VCPU_VTLB_SIZE/2; 1.79 - tlb->cch_buf = (void *)(vbase + tlb->hash_sz); 1.80 - tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf; 1.81 -// tlb->recycle_notifier = recycle_message; 1.82 - thash_init(tlb,VCPU_VTLB_SHIFT-1); 1.83 - return tlb; 1.84 + vbase = page_to_virt(page); 1.85 + memset(vbase, 0, VCPU_VTLB_SIZE); 1.86 + printk("Allocate domain tlb at 0x%p\n", vbase); 1.87 + 1.88 + VTLB(v,hash) = vbase; 1.89 + VTLB(v,hash_sz) = VCPU_VTLB_SIZE/2; 1.90 + VTLB(v,cch_buf) = (void *)((u64)vbase + VTLB(v,hash_sz)); 1.91 + VTLB(v,cch_sz) = VCPU_VTLB_SIZE - VTLB(v,hash_sz); 1.92 + thash_init(&(v->arch.vtlb),VCPU_VTLB_SHIFT-1); 1.93 } 1.94 1.95 void free_domain_tlb(struct vcpu *v) 1.96 { 1.97 struct page_info *page; 1.98 - void *vhptbase; 1.99 - thash_cb_t *tlb; 1.100 1.101 - if ( v->arch.vtlb ) { 1.102 - tlb = v->arch.vtlb; 1.103 - vhptbase = (void*)((u64)tlb + sizeof (thash_cb_t)) - VCPU_VHPT_SIZE; 1.104 - page = virt_to_page(vhptbase); 1.105 + if ( v->arch.vtlb.hash) { 1.106 + page = virt_to_page(v->arch.vtlb.hash); 1.107 + free_domheap_pages(page, VCPU_VTLB_ORDER); 1.108 + } 1.109 + if ( v->arch.vhpt.hash) { 1.110 + page = virt_to_page(v->arch.vhpt.hash); 1.111 free_domheap_pages(page, VCPU_VHPT_ORDER); 1.112 } 1.113 } 1.114 @@ -324,17 +309,15 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod 1.115 u64 gpip=0; // guest physical IP 1.116 u64 *vpa; 1.117 thash_data_t *tlb; 1.118 - thash_cb_t *hcb; 1.119 u64 mfn; 1.120 1.121 if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode 1.122 gpip = gip; 1.123 } 1.124 else { 1.125 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.126 - tlb = vtlb_lookup(hcb, gip, ISIDE_TLB); 1.127 + tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB); 1.128 // if( tlb == NULL ) 1.129 -// tlb = vtlb_lookup(hcb, gip, DSIDE_TLB ); 1.130 +// tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB ); 1.131 if (tlb) 1.132 gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & (PSIZE(tlb->ps)-1) ); 1.133 } 1.134 @@ -357,8 +340,6 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN 1.135 { 1.136 int slot; 1.137 u64 ps, va; 1.138 - thash_cb_t *hcb; 1.139 - 1.140 ps = itir_ps(itir); 1.141 va = PAGEALIGN(ifa, ps); 1.142 slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB); 1.143 @@ -367,8 +348,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN 1.144 panic("Tlb conflict!!"); 1.145 return IA64_FAULT; 1.146 } 1.147 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.148 - thash_purge_and_insert(hcb, pte, itir, ifa); 1.149 + thash_purge_and_insert(vcpu, pte, itir, ifa); 1.150 return IA64_NO_FAULT; 1.151 } 1.152 1.153 @@ -376,8 +356,6 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN 1.154 { 1.155 int slot; 1.156 u64 ps, va, gpfn; 1.157 - thash_cb_t *hcb; 1.158 - 1.159 ps = itir_ps(itir); 1.160 va = PAGEALIGN(ifa, ps); 1.161 slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB); 1.162 @@ -386,11 +364,10 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN 1.163 panic("Tlb conflict!!"); 1.164 return IA64_FAULT; 1.165 } 1.166 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.167 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT; 1.168 - if(__gpfn_is_io(vcpu->domain,gpfn)) 1.169 + if(VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain,gpfn)) 1.170 pte |= VTLB_PTE_IO; 1.171 - thash_purge_and_insert(hcb, pte, itir, ifa); 1.172 + thash_purge_and_insert(vcpu, pte, itir, ifa); 1.173 return IA64_NO_FAULT; 1.174 1.175 } 1.176 @@ -402,7 +379,6 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 1.177 { 1.178 int index; 1.179 u64 ps, va, rid; 1.180 - thash_cb_t *hcb; 1.181 1.182 ps = itir_ps(itir); 1.183 va = PAGEALIGN(ifa, ps); 1.184 @@ -412,8 +388,7 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 1.185 panic("Tlb conflict!!"); 1.186 return IA64_FAULT; 1.187 } 1.188 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.189 - thash_purge_entries(hcb, va, ps); 1.190 + thash_purge_entries(vcpu, va, ps); 1.191 vcpu_get_rr(vcpu, va, &rid); 1.192 rid = rid& RR_RID_MASK; 1.193 vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.itrs[slot], pte, itir, va, rid); 1.194 @@ -426,7 +401,6 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 1.195 { 1.196 int index; 1.197 u64 ps, va, gpfn, rid; 1.198 - thash_cb_t *hcb; 1.199 1.200 ps = itir_ps(itir); 1.201 va = PAGEALIGN(ifa, ps); 1.202 @@ -436,8 +410,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 1.203 panic("Tlb conflict!!"); 1.204 return IA64_FAULT; 1.205 } 1.206 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.207 - thash_purge_entries(hcb, va, ps); 1.208 + thash_purge_entries(vcpu, va, ps); 1.209 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT; 1.210 if(__gpfn_is_io(vcpu->domain,gpfn)) 1.211 pte |= VTLB_PTE_IO; 1.212 @@ -454,7 +427,6 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT 1.213 { 1.214 int index; 1.215 u64 va; 1.216 - thash_cb_t *hcb; 1.217 1.218 va = PAGEALIGN(ifa, ps); 1.219 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB); 1.220 @@ -462,8 +434,7 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT 1.221 vcpu->arch.dtrs[index].pte.p=0; 1.222 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB); 1.223 } 1.224 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.225 - thash_purge_entries(hcb, va, ps); 1.226 + thash_purge_entries(vcpu, va, ps); 1.227 return IA64_NO_FAULT; 1.228 } 1.229 1.230 @@ -471,7 +442,6 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT 1.231 { 1.232 int index; 1.233 u64 va; 1.234 - thash_cb_t *hcb; 1.235 1.236 va = PAGEALIGN(ifa, ps); 1.237 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB); 1.238 @@ -479,26 +449,21 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT 1.239 vcpu->arch.itrs[index].pte.p=0; 1.240 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB); 1.241 } 1.242 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.243 - thash_purge_entries(hcb, va, ps); 1.244 + thash_purge_entries(vcpu, va, ps); 1.245 return IA64_NO_FAULT; 1.246 } 1.247 1.248 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 va, UINT64 ps) 1.249 { 1.250 - thash_cb_t *hcb; 1.251 va = PAGEALIGN(va, ps); 1.252 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.253 - thash_purge_entries(hcb, va, ps); 1.254 + thash_purge_entries(vcpu, va, ps); 1.255 return IA64_NO_FAULT; 1.256 } 1.257 1.258 1.259 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va) 1.260 { 1.261 - thash_cb_t *hcb; 1.262 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.263 - thash_purge_all(hcb); 1.264 + thash_purge_all(vcpu); 1.265 return IA64_NO_FAULT; 1.266 } 1.267 1.268 @@ -554,12 +519,10 @@ IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT 1.269 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr) 1.270 { 1.271 thash_data_t *data; 1.272 - thash_cb_t *hcb; 1.273 ISR visr,pt_isr; 1.274 REGS *regs; 1.275 u64 vhpt_adr; 1.276 IA64_PSR vpsr; 1.277 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.278 regs=vcpu_regs(vcpu); 1.279 pt_isr.val=VMX(vcpu,cr_isr); 1.280 visr.val=0; 1.281 @@ -570,7 +533,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6 1.282 visr.ni=1; 1.283 } 1.284 visr.na=1; 1.285 - data = vtlb_lookup(hcb, vadr, DSIDE_TLB); 1.286 + data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); 1.287 if(data){ 1.288 if(data->p==0){ 1.289 visr.na=1; 1.290 @@ -618,7 +581,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6 1.291 } 1.292 else{ 1.293 vmx_vcpu_thash(vcpu, vadr, &vhpt_adr); 1.294 - data = vtlb_lookup(hcb, vhpt_adr, DSIDE_TLB); 1.295 + data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB); 1.296 if(data){ 1.297 if(vpsr.ic){ 1.298 vcpu_set_isr(vcpu, visr.val); 1.299 @@ -648,15 +611,13 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6 1.300 IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key) 1.301 { 1.302 thash_data_t *data; 1.303 - thash_cb_t *hcb; 1.304 PTA vpta; 1.305 vmx_vcpu_get_pta(vcpu, &vpta.val); 1.306 if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){ 1.307 *key=1; 1.308 return IA64_NO_FAULT; 1.309 } 1.310 - hcb = vmx_vcpu_get_vtlb(vcpu); 1.311 - data = vtlb_lookup(hcb, vadr, DSIDE_TLB); 1.312 + data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); 1.313 if(!data||!data->p){ 1.314 *key=1; 1.315 }else{ 1.316 @@ -688,13 +649,11 @@ long 1.317 __domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len) 1.318 { 1.319 unsigned long mpfn, gpfn, m, n = *len; 1.320 - thash_cb_t *vtlb; 1.321 unsigned long end; /* end of the area mapped by current entry */ 1.322 thash_data_t *entry; 1.323 struct vcpu *v = current; 1.324 1.325 - vtlb = vmx_vcpu_get_vtlb(v); 1.326 - entry = vtlb_lookup(vtlb, va, DSIDE_TLB); 1.327 + entry = vtlb_lookup(v, va, DSIDE_TLB); 1.328 if (entry == NULL) 1.329 return -EFAULT; 1.330
2.1 --- a/xen/arch/ia64/vmx/vmx_init.c Tue Apr 25 17:05:16 2006 -0600 2.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Tue Apr 25 20:53:38 2006 -0600 2.3 @@ -279,8 +279,9 @@ vmx_final_setup_guest(struct vcpu *v) 2.4 /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick 2.5 * to this solution. Maybe it can be deferred until we know created 2.6 * one as vmx domain */ 2.7 - v->arch.vtlb = init_domain_tlb(v); 2.8 - 2.9 +#ifndef HASH_VHPT 2.10 + init_domain_tlb(v); 2.11 +#endif 2.12 /* v->arch.schedule_tail = arch_vmx_do_launch; */ 2.13 vmx_create_vp(v); 2.14
3.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c Tue Apr 25 17:05:16 2006 -0600 3.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Tue Apr 25 20:53:38 2006 -0600 3.3 @@ -166,9 +166,9 @@ vmx_init_all_rr(VCPU *vcpu) 3.4 VMX(vcpu,vrr[VRN6]) = 0x660; 3.5 VMX(vcpu,vrr[VRN7]) = 0x760; 3.6 #if 0 3.7 - VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38); 3.8 - VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60); 3.9 - VMX(vcpu,mrr7) = vmx_vrrtomrr(vcpu, 0x60); 3.10 + VMX(vcpu,mrr5) = vrrtomrr(vcpu, 0x38); 3.11 + VMX(vcpu,mrr6) = vrrtomrr(vcpu, 0x60); 3.12 + VMX(vcpu,mrr7) = vrrtomrr(vcpu, 0x60); 3.13 #endif 3.14 } 3.15 3.16 @@ -177,8 +177,8 @@ vmx_load_all_rr(VCPU *vcpu) 3.17 { 3.18 unsigned long psr; 3.19 ia64_rr phy_rr; 3.20 + 3.21 extern void * pal_vaddr; 3.22 - 3.23 local_irq_save(psr); 3.24 3.25 3.26 @@ -189,37 +189,37 @@ vmx_load_all_rr(VCPU *vcpu) 3.27 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) 3.28 panic("Unexpected domain switch in phy emul\n"); 3.29 phy_rr.rrval = vcpu->arch.metaphysical_rr0; 3.30 -// phy_rr.ps = PAGE_SHIFT; 3.31 + //phy_rr.ps = PAGE_SHIFT; 3.32 phy_rr.ve = 1; 3.33 3.34 ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval); 3.35 phy_rr.rrval = vcpu->arch.metaphysical_rr4; 3.36 -// phy_rr.ps = PAGE_SHIFT; 3.37 + //phy_rr.ps = PAGE_SHIFT; 3.38 phy_rr.ve = 1; 3.39 3.40 ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval); 3.41 } else { 3.42 ia64_set_rr((VRN0 << VRN_SHIFT), 3.43 - vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0]))); 3.44 + vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0]))); 3.45 ia64_set_rr((VRN4 << VRN_SHIFT), 3.46 - vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4]))); 3.47 + vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4]))); 3.48 } 3.49 3.50 /* rr567 will be postponed to last point when resuming back to guest */ 3.51 ia64_set_rr((VRN1 << VRN_SHIFT), 3.52 - vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1]))); 3.53 + vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1]))); 3.54 ia64_set_rr((VRN2 << VRN_SHIFT), 3.55 - vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2]))); 3.56 + vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2]))); 3.57 ia64_set_rr((VRN3 << VRN_SHIFT), 3.58 - vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3]))); 3.59 + vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3]))); 3.60 ia64_set_rr((VRN5 << VRN_SHIFT), 3.61 - vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5]))); 3.62 + vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5]))); 3.63 ia64_set_rr((VRN6 << VRN_SHIFT), 3.64 - vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6]))); 3.65 - vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])), 3.66 + vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6]))); 3.67 + vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])), 3.68 (void *)vcpu->domain->shared_info, 3.69 (void *)vcpu->arch.privregs, 3.70 - (void *)vcpu->arch.vtlb->vhpt->hash, pal_vaddr ); 3.71 + (void *)vcpu->arch.vhpt.hash, pal_vaddr ); 3.72 ia64_set_pta(vcpu->arch.arch_vmx.mpta); 3.73 3.74 ia64_srlz_d(); 3.75 @@ -262,10 +262,10 @@ switch_to_virtual_rid(VCPU *vcpu) 3.76 psr=ia64_clear_ic(); 3.77 3.78 vcpu_get_rr(vcpu,VRN0<<VRN_SHIFT,&mrr.rrval); 3.79 - ia64_set_rr(VRN0<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval)); 3.80 + ia64_set_rr(VRN0<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval)); 3.81 ia64_srlz_d(); 3.82 vcpu_get_rr(vcpu,VRN4<<VRN_SHIFT,&mrr.rrval); 3.83 - ia64_set_rr(VRN4<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval)); 3.84 + ia64_set_rr(VRN4<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval)); 3.85 ia64_srlz_d(); 3.86 ia64_set_psr(psr); 3.87 ia64_srlz_i();
4.1 --- a/xen/arch/ia64/vmx/vmx_process.c Tue Apr 25 17:05:16 2006 -0600 4.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Tue Apr 25 20:53:38 2006 -0600 4.3 @@ -305,10 +305,8 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 4.4 u64 vhpt_adr, gppa; 4.5 ISR misr; 4.6 // REGS *regs; 4.7 - thash_cb_t *vtlb; 4.8 thash_data_t *data; 4.9 VCPU *v = current; 4.10 - vtlb=vmx_vcpu_get_vtlb(v); 4.11 #ifdef VTLB_DEBUG 4.12 check_vtlb_sanity(vtlb); 4.13 dump_vtlb(vtlb); 4.14 @@ -344,7 +342,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 4.15 4.16 // prepare_if_physical_mode(v); 4.17 4.18 - if((data=vtlb_lookup(vtlb, vadr,type))!=0){ 4.19 + if((data=vtlb_lookup(v, vadr,type))!=0){ 4.20 // gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps); 4.21 // if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){ 4.22 if(v->domain!=dom0 && data->io && type==DSIDE_TLB ){ 4.23 @@ -362,7 +360,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 4.24 } 4.25 else{ 4.26 */ 4.27 - thash_vhpt_insert(vtlb->vhpt,data->page_flags, data->itir ,vadr); 4.28 + thash_vhpt_insert(&v->arch.vhpt,data->page_flags, data->itir ,vadr); 4.29 // } 4.30 // } 4.31 }else if(type == DSIDE_TLB){ 4.32 @@ -383,7 +381,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 4.33 } 4.34 } else{ 4.35 vmx_vcpu_thash(v, vadr, &vhpt_adr); 4.36 - if(vhpt_lookup(vhpt_adr) || vtlb_lookup(vtlb, vhpt_adr, DSIDE_TLB)){ 4.37 + if(vhpt_lookup(vhpt_adr) || vtlb_lookup(v, vhpt_adr, DSIDE_TLB)){ 4.38 if(vpsr.ic){ 4.39 vcpu_set_isr(v, misr.val); 4.40 dtlb_fault(v, vadr); 4.41 @@ -425,7 +423,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 4.42 return IA64_FAULT; 4.43 } else{ 4.44 vmx_vcpu_thash(v, vadr, &vhpt_adr); 4.45 - if(vhpt_lookup(vhpt_adr) || vtlb_lookup(vtlb, vhpt_adr, DSIDE_TLB)){ 4.46 + if(vhpt_lookup(vhpt_adr) || vtlb_lookup(v, vhpt_adr, DSIDE_TLB)){ 4.47 if(!vpsr.ic){ 4.48 misr.ni=1; 4.49 }
5.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c Tue Apr 25 17:05:16 2006 -0600 5.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Tue Apr 25 20:53:38 2006 -0600 5.3 @@ -190,13 +190,6 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu) 5.4 } 5.5 5.6 5.7 -thash_cb_t * 5.8 -vmx_vcpu_get_vtlb(VCPU *vcpu) 5.9 -{ 5.10 - return vcpu->arch.vtlb; 5.11 -} 5.12 - 5.13 - 5.14 struct virtual_platform_def * 5.15 vmx_vcpu_get_plat(VCPU *vcpu) 5.16 { 5.17 @@ -208,7 +201,6 @@ vmx_vcpu_get_plat(VCPU *vcpu) 5.18 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val) 5.19 { 5.20 ia64_rr oldrr,newrr; 5.21 - thash_cb_t *hcb; 5.22 extern void * pal_vaddr; 5.23 5.24 vcpu_get_rr(vcpu, reg, &oldrr.rrval); 5.25 @@ -216,18 +208,17 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI 5.26 if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits)) 5.27 panic_domain (NULL, "use of invalid rid %lx\n", newrr.rid); 5.28 if(oldrr.ps!=newrr.ps){ 5.29 - hcb = vmx_vcpu_get_vtlb(vcpu); 5.30 - thash_purge_all(hcb); 5.31 + thash_purge_all(vcpu); 5.32 } 5.33 VMX(vcpu,vrr[reg>>61]) = val; 5.34 switch((u64)(reg>>61)) { 5.35 case VRN7: 5.36 - vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info, 5.37 + vmx_switch_rr7(vrrtomrr(vcpu,val),vcpu->domain->shared_info, 5.38 (void *)vcpu->arch.privregs, 5.39 - (void *)vcpu->arch.vtlb->vhpt->hash, pal_vaddr ); 5.40 + (void *)vcpu->arch.vhpt.hash, pal_vaddr ); 5.41 break; 5.42 default: 5.43 - ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val)); 5.44 + ia64_set_rr(reg,vrrtomrr(vcpu,val)); 5.45 break; 5.46 } 5.47
6.1 --- a/xen/arch/ia64/vmx/vtlb.c Tue Apr 25 17:05:16 2006 -0600 6.2 +++ b/xen/arch/ia64/vmx/vtlb.c Tue Apr 25 20:53:38 2006 -0600 6.3 @@ -36,7 +36,7 @@ thash_data_t *__alloc_chain(thash_cb_t * 6.4 6.5 static void cch_mem_init(thash_cb_t *hcb) 6.6 { 6.7 - thash_cch_mem_t *p, *q; 6.8 + thash_data_t *p, *q; 6.9 6.10 hcb->cch_freelist = p = hcb->cch_buf; 6.11 6.12 @@ -49,11 +49,11 @@ static void cch_mem_init(thash_cb_t *hcb 6.13 6.14 static thash_data_t *cch_alloc(thash_cb_t *hcb) 6.15 { 6.16 - thash_cch_mem_t *p; 6.17 + thash_data_t *p; 6.18 6.19 if ( (p = hcb->cch_freelist) != NULL ) { 6.20 hcb->cch_freelist = p->next; 6.21 - return (thash_data_t *)p; 6.22 + return p; 6.23 }else{ 6.24 return NULL; 6.25 } 6.26 @@ -61,10 +61,8 @@ static thash_data_t *cch_alloc(thash_cb_ 6.27 6.28 static void cch_free(thash_cb_t *hcb, thash_data_t *cch) 6.29 { 6.30 - thash_cch_mem_t *p = (thash_cch_mem_t*)cch; 6.31 - 6.32 - p->next = hcb->cch_freelist; 6.33 - hcb->cch_freelist = p; 6.34 + cch->next = hcb->cch_freelist; 6.35 + hcb->cch_freelist = cch; 6.36 } 6.37 6.38 /* 6.39 @@ -181,15 +179,16 @@ int __tlb_to_vhpt(thash_cb_t *hcb, thash 6.40 6.41 static void thash_remove_cch(thash_cb_t *hcb, thash_data_t *hash) 6.42 { 6.43 - thash_data_t *prev, *next; 6.44 - prev = hash; next= hash->next; 6.45 - while(next){ 6.46 - prev=next; 6.47 - next=prev->next; 6.48 - cch_free(hcb, prev); 6.49 + thash_data_t *p; 6.50 + if(hash->next){ 6.51 + p=hash->next; 6.52 + while(p->next) 6.53 + p=p->next; 6.54 + p->next=hcb->cch_freelist; 6.55 + hcb->cch_freelist=hash->next; 6.56 + hash->next=0; 6.57 + hash->len=0; 6.58 } 6.59 - hash->next = NULL; 6.60 - hash->len = 0; 6.61 } 6.62 6.63 /* vhpt only has entries with PAGE_SIZE page size */ 6.64 @@ -200,8 +199,6 @@ void thash_vhpt_insert(thash_cb_t *hcb, 6.65 vhpt_entry.page_flags = pte & ~PAGE_FLAGS_RV_MASK; 6.66 vhpt_entry.itir=itir; 6.67 6.68 -// ia64_rr vrr; 6.69 - 6.70 if ( !__tlb_to_vhpt(hcb, &vhpt_entry, ifa) ) { 6.71 return; 6.72 //panic("Can't convert to machine VHPT entry\n"); 6.73 @@ -469,10 +466,11 @@ int vtr_find_overlap(VCPU *vcpu, u64 va, 6.74 /* 6.75 * Purge entries in VTLB and VHPT 6.76 */ 6.77 -void thash_purge_entries(thash_cb_t *hcb, u64 va, u64 ps) 6.78 +void thash_purge_entries(VCPU *v, u64 va, u64 ps) 6.79 { 6.80 - vtlb_purge(hcb, va, ps); 6.81 - vhpt_purge(hcb->vhpt, va, ps); 6.82 + if(vcpu_quick_region_check(v->arch.tc_regions,va)) 6.83 + vtlb_purge(&v->arch.vtlb, va, ps); 6.84 + vhpt_purge(&v->arch.vhpt, va, ps); 6.85 } 6.86 6.87 6.88 @@ -480,18 +478,21 @@ void thash_purge_entries(thash_cb_t *hcb 6.89 * Purge overlap TCs and then insert the new entry to emulate itc ops. 6.90 * Notes: Only TC entry can purge and insert. 6.91 */ 6.92 -void thash_purge_and_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa) 6.93 +void thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa) 6.94 { 6.95 u64 ps, va; 6.96 ps = itir_ps(itir); 6.97 va = PAGEALIGN(ifa,ps); 6.98 - vtlb_purge(hcb, va, ps); 6.99 - vhpt_purge(hcb->vhpt, va, ps); 6.100 - if((ps!=PAGE_SHIFT)||(pte&VTLB_PTE_IO)) 6.101 - vtlb_insert(hcb, pte, itir, va); 6.102 + if(vcpu_quick_region_check(v->arch.tc_regions,va)) 6.103 + vtlb_purge(&v->arch.vtlb, va, ps); 6.104 + vhpt_purge(&v->arch.vhpt, va, ps); 6.105 + if((ps!=PAGE_SHIFT)||(pte&VTLB_PTE_IO)){ 6.106 + vtlb_insert(&v->arch.vtlb, pte, itir, va); 6.107 + vcpu_quick_region_set(PSCBX(v,tc_regions),va); 6.108 + } 6.109 if(!(pte&VTLB_PTE_IO)){ 6.110 va = PAGEALIGN(ifa,PAGE_SHIFT); 6.111 - thash_vhpt_insert(hcb->vhpt, pte, itir, va); 6.112 + thash_vhpt_insert(&v->arch.vhpt, pte, itir, va); 6.113 } 6.114 } 6.115 6.116 @@ -503,13 +504,14 @@ void thash_purge_and_insert(thash_cb_t * 6.117 */ 6.118 6.119 // TODO: add sections. 6.120 -void thash_purge_all(thash_cb_t *hcb) 6.121 +void thash_purge_all(VCPU *v) 6.122 { 6.123 thash_data_t *hash_table; 6.124 /* thash_data_t *entry; */ 6.125 - thash_cb_t *vhpt; 6.126 + thash_cb_t *hcb,*vhpt; 6.127 /* u64 i, start, end; */ 6.128 - 6.129 + hcb =&v->arch.vtlb; 6.130 + vhpt =&v->arch.vhpt; 6.131 #ifdef VTLB_DEBUG 6.132 extern u64 sanity_check; 6.133 static u64 statistics_before_purge_all=0; 6.134 @@ -526,7 +528,6 @@ void thash_purge_all(thash_cb_t *hcb) 6.135 } 6.136 cch_mem_init (hcb); 6.137 6.138 - vhpt = hcb->vhpt; 6.139 hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz); 6.140 for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) { 6.141 INVALIDATE_VHPT_HEADER(hash_table); 6.142 @@ -544,18 +545,22 @@ void thash_purge_all(thash_cb_t *hcb) 6.143 * in: TLB format for both VHPT & TLB. 6.144 */ 6.145 6.146 -thash_data_t *vtlb_lookup(thash_cb_t *hcb, u64 va,int is_data) 6.147 +thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data) 6.148 { 6.149 thash_data_t *hash_table, *cch; 6.150 u64 tag; 6.151 ia64_rr vrr; 6.152 - 6.153 + thash_cb_t * hcb= &v->arch.vtlb; 6.154 ASSERT ( hcb->ht == THASH_TLB ); 6.155 6.156 - cch = __vtr_lookup(hcb->vcpu, va, is_data);; 6.157 + cch = __vtr_lookup(v, va, is_data);; 6.158 if ( cch ) return cch; 6.159 6.160 - vcpu_get_rr(hcb->vcpu,va,&vrr.rrval); 6.161 + if(vcpu_quick_region_check(v->arch.tc_regions,va)==0) 6.162 + return NULL; 6.163 + 6.164 + 6.165 + vcpu_get_rr(v,va,&vrr.rrval); 6.166 hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag); 6.167 6.168 if ( INVALID_ENTRY(hcb, hash_table ) ) 6.169 @@ -578,7 +583,6 @@ void thash_init(thash_cb_t *hcb, u64 sz) 6.170 thash_data_t *hash_table; 6.171 6.172 cch_mem_init (hcb); 6.173 - hcb->magic = THASH_CB_MAGIC; 6.174 hcb->pta.val = (unsigned long)hcb->hash; 6.175 hcb->pta.vf = 1; 6.176 hcb->pta.ve = 1;
7.1 --- a/xen/include/asm-ia64/domain.h Tue Apr 25 17:05:16 2006 -0600 7.2 +++ b/xen/include/asm-ia64/domain.h Tue Apr 25 20:53:38 2006 -0600 7.3 @@ -74,7 +74,8 @@ struct arch_vcpu { 7.4 unsigned long dtlb_pte; 7.5 unsigned long irr[4]; 7.6 unsigned long insvc[4]; 7.7 - unsigned long iva; 7.8 + unsigned long tc_regions; 7.9 + unsigned long iva; 7.10 unsigned long dcr; 7.11 unsigned long itc; 7.12 unsigned long domain_itm; 7.13 @@ -91,7 +92,8 @@ struct arch_vcpu { 7.14 int ending_rid; /* one beyond highest RID assigned to domain */ 7.15 struct thread_struct _thread; // this must be last 7.16 7.17 - thash_cb_t *vtlb; 7.18 + thash_cb_t vtlb; 7.19 + thash_cb_t vhpt; 7.20 char irq_new_pending; 7.21 char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI 7.22 char hypercall_continuation;
8.1 --- a/xen/include/asm-ia64/vmmu.h Tue Apr 25 17:05:16 2006 -0600 8.2 +++ b/xen/include/asm-ia64/vmmu.h Tue Apr 25 20:53:38 2006 -0600 8.3 @@ -30,7 +30,8 @@ 8.4 #define VCPU_VHPT_SHIFT (24) // 16M for VTLB 8.5 #define VCPU_VHPT_SIZE (1UL<<VCPU_VHPT_SHIFT) 8.6 #define VCPU_VHPT_ORDER (VCPU_VHPT_SHIFT - PAGE_SHIFT) 8.7 - 8.8 +#define VTLB(v,_x) (v->arch.vtlb._x) 8.9 +#define VHPT(v,_x) (v->arch.vhpt._x) 8.10 #ifndef __ASSEMBLY__ 8.11 8.12 #include <xen/config.h> 8.13 @@ -180,12 +181,6 @@ typedef enum { 8.14 } THASH_TYPE; 8.15 8.16 struct thash_cb; 8.17 -typedef union thash_cch_mem { 8.18 - thash_data_t data; 8.19 - union thash_cch_mem *next; 8.20 -} thash_cch_mem_t; 8.21 - 8.22 - 8.23 /* 8.24 * Use to calculate the HASH index of thash_data_t. 8.25 */ 8.26 @@ -230,11 +225,11 @@ typedef struct thash_internal { 8.27 u64 _eva; 8.28 } thash_internal_t; 8.29 */ 8.30 -#define THASH_CB_MAGIC 0x55aa00aa55aa55aaUL 8.31 +//#define THASH_CB_MAGIC 0x55aa00aa55aa55aaUL 8.32 typedef struct thash_cb { 8.33 /* THASH base information */ 8.34 - THASH_TYPE ht; // For TLB or VHPT 8.35 - u64 magic; 8.36 +// THASH_TYPE ht; // For TLB or VHPT 8.37 +// u64 magic; 8.38 thash_data_t *hash; // hash table pointer, aligned at thash_sz. 8.39 u64 hash_sz; // size of above data. 8.40 void *cch_buf; // base address of collision chain. 8.41 @@ -242,10 +237,10 @@ typedef struct thash_cb { 8.42 // THASH_FN *hash_func; 8.43 // GET_RR_FN *get_rr_fn; 8.44 // RECYCLE_FN *recycle_notifier; 8.45 - thash_cch_mem_t *cch_freelist; 8.46 - struct vcpu *vcpu; 8.47 + thash_data_t *cch_freelist; 8.48 +// struct vcpu *vcpu; 8.49 PTA pta; 8.50 - struct thash_cb *vhpt; 8.51 +// struct thash_cb *vhpt; 8.52 /* VTLB/VHPT common information */ 8.53 // FIND_OVERLAP_FN *find_overlap; 8.54 // FIND_NEXT_OVL_FN *next_overlap; 8.55 @@ -347,21 +342,21 @@ extern thash_data_t *thash_find_next_ove 8.56 * NOTES: 8.57 * 8.58 */ 8.59 -extern void thash_purge_entries(thash_cb_t *hcb, u64 va, u64 ps); 8.60 -extern void thash_purge_and_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa); 8.61 +extern void thash_purge_entries(struct vcpu *v, u64 va, u64 ps); 8.62 +extern void thash_purge_and_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa); 8.63 8.64 /* 8.65 * Purge all TCs or VHPT entries including those in Hash table. 8.66 * 8.67 */ 8.68 -extern void thash_purge_all(thash_cb_t *hcb); 8.69 +extern void thash_purge_all(struct vcpu *v); 8.70 8.71 /* 8.72 * Lookup the hash table and its collision chain to find an entry 8.73 * covering this address rid:va. 8.74 * 8.75 */ 8.76 -extern thash_data_t *vtlb_lookup(thash_cb_t *hcb,u64 va,int is_data); 8.77 +extern thash_data_t *vtlb_lookup(struct vcpu *v,u64 va,int is_data); 8.78 extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock); 8.79 8.80 8.81 @@ -372,7 +367,7 @@ extern u64 machine_thash(PTA pta, u64 va 8.82 extern void purge_machine_tc_by_domid(domid_t domid); 8.83 extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb); 8.84 extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va); 8.85 -extern thash_cb_t *init_domain_tlb(struct vcpu *d); 8.86 +extern void init_domain_tlb(struct vcpu *d); 8.87 extern void free_domain_tlb(struct vcpu *v); 8.88 extern thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag); 8.89 extern thash_data_t * vhpt_lookup(u64 va);
9.1 --- a/xen/include/asm-ia64/vmx_vcpu.h Tue Apr 25 17:05:16 2006 -0600 9.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h Tue Apr 25 20:53:38 2006 -0600 9.3 @@ -64,8 +64,6 @@ extern void vmx_vcpu_set_psr(VCPU *vcpu, 9.4 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value); 9.5 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value); 9.6 extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu); 9.7 -extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu); 9.8 -extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu); 9.9 extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val); 9.10 extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval); 9.11 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val); 9.12 @@ -461,7 +459,7 @@ IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu) 9.13 #define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff)) 9.14 #endif 9.15 static inline unsigned long 9.16 -vmx_vrrtomrr(VCPU *v, unsigned long val) 9.17 +vrrtomrr(VCPU *v, unsigned long val) 9.18 { 9.19 ia64_rr rr; 9.20 9.21 @@ -477,6 +475,17 @@ vmx_vrrtomrr(VCPU *v, unsigned long val) 9.22 #endif 9.23 9.24 } 9.25 +static inline thash_cb_t * 9.26 +vmx_vcpu_get_vtlb(VCPU *vcpu) 9.27 +{ 9.28 + return &vcpu->arch.vtlb; 9.29 +} 9.30 + 9.31 +static inline thash_cb_t * 9.32 +vcpu_get_vhpt(VCPU *vcpu) 9.33 +{ 9.34 + return &vcpu->arch.vhpt; 9.35 +} 9.36 9.37 #define check_work_pending(v) \ 9.38 (event_pending((v)) || ((v)->arch.irq_new_pending))