ia64/xen-unstable
changeset 9011:cfe20f41f043
[IA64] VTI: updated vtlb, support_non_contiguous memory on vtidomain
Previously VTI-domain only supported contiguous memory,
this patch is intended to make VTI-domain support non-contiguous memory.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
Previously VTI-domain only supported contiguous memory,
this patch is intended to make VTI-domain support non-contiguous memory.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
line diff
1.1 --- a/xen/arch/ia64/vmx/mm.c Tue Feb 28 13:18:08 2006 -0700 1.2 +++ b/xen/arch/ia64/vmx/mm.c Wed Mar 01 08:29:00 2006 -0700 1.3 @@ -117,6 +117,7 @@ int vmx_do_mmu_update(mmu_update_t *ureq 1.4 copy_from_user(&req, ureqs, sizeof(req)); 1.5 cmd = req.ptr&3; 1.6 req.ptr &= ~3; 1.7 +/* 1.8 if(cmd ==MMU_NORMAL_PT_UPDATE){ 1.9 entry.page_flags = req.val; 1.10 entry.locked = 1; 1.11 @@ -135,8 +136,10 @@ int vmx_do_mmu_update(mmu_update_t *ureq 1.12 panic("Tlb conflict!!"); 1.13 return -1; 1.14 } 1.15 - thash_purge_and_insert(hcb, &entry); 1.16 - }else if(cmd == MMU_MACHPHYS_UPDATE){ 1.17 + thash_purge_and_insert(hcb, &entry, req.ptr); 1.18 + }else 1.19 + */ 1.20 + if(cmd == MMU_MACHPHYS_UPDATE){ 1.21 mfn = req.ptr >>PAGE_SHIFT; 1.22 gpfn = req.val; 1.23 set_machinetophys(mfn,gpfn);
2.1 --- a/xen/arch/ia64/vmx/vmmu.c Tue Feb 28 13:18:08 2006 -0700 2.2 +++ b/xen/arch/ia64/vmx/vmmu.c Wed Mar 01 08:29:00 2006 -0700 2.3 @@ -34,37 +34,23 @@ 2.4 #include <xen/irq.h> 2.5 2.6 /* 2.7 - * Architecture ppn is in 4KB unit while XEN 2.8 - * page may be different(1<<PAGE_SHIFT). 2.9 - */ 2.10 -static inline u64 arch_ppn_to_xen_ppn(u64 appn) 2.11 -{ 2.12 - return (appn << ARCH_PAGE_SHIFT) >> PAGE_SHIFT; 2.13 -} 2.14 - 2.15 -static inline u64 xen_ppn_to_arch_ppn(u64 xppn) 2.16 -{ 2.17 - return (xppn << PAGE_SHIFT) >> ARCH_PAGE_SHIFT; 2.18 -} 2.19 - 2.20 - 2.21 -/* 2.22 * Get the machine page frame number in 16KB unit 2.23 * Input: 2.24 * d: 2.25 */ 2.26 -u64 get_mfn(domid_t domid, u64 gpfn, u64 pages) 2.27 +u64 get_mfn(struct domain *d, u64 gpfn) 2.28 { 2.29 - struct domain *d; 2.30 +// struct domain *d; 2.31 u64 xen_gppn, xen_mppn, mpfn; 2.32 - 2.33 +/* 2.34 if ( domid == DOMID_SELF ) { 2.35 d = current->domain; 2.36 } 2.37 else { 2.38 d = find_domain_by_id(domid); 2.39 } 2.40 - xen_gppn = arch_ppn_to_xen_ppn(gpfn); 2.41 + */ 2.42 + xen_gppn = arch_to_xen_ppn(gpfn); 2.43 xen_mppn = gmfn_to_mfn(d, xen_gppn); 2.44 /* 2.45 for (i=0; i<pages; i++) { 2.46 @@ -73,8 +59,8 @@ u64 get_mfn(domid_t domid, u64 gpfn, u64 2.47 } 2.48 } 2.49 */ 2.50 - mpfn= xen_ppn_to_arch_ppn(xen_mppn); 2.51 - mpfn = mpfn | (((1UL <<(PAGE_SHIFT-12))-1)&gpfn); 2.52 + mpfn= xen_to_arch_ppn(xen_mppn); 2.53 + mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn); 2.54 return mpfn; 2.55 2.56 } 2.57 @@ -142,66 +128,67 @@ purge_machine_tc_by_domid(domid_t domid) 2.58 #endif 2.59 } 2.60 2.61 -static thash_cb_t *init_domain_vhpt(struct vcpu *d) 2.62 +static thash_cb_t *init_domain_vhpt(struct vcpu *d, void *vbase, void *vcur) 2.63 { 2.64 - struct page_info *page; 2.65 - void *vbase,*vcur; 2.66 - vhpt_special *vs; 2.67 +// struct page_info *page; 2.68 thash_cb_t *vhpt; 2.69 PTA pta_value; 2.70 - 2.71 - page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0); 2.72 +/* 2.73 + page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0); 2.74 if ( page == NULL ) { 2.75 panic("No enough contiguous memory for init_domain_mm\n"); 2.76 } 2.77 vbase = page_to_virt(page); 2.78 printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase); 2.79 - memset(vbase, 0, VCPU_TLB_SIZE); 2.80 - vcur = (void*)((u64)vbase + VCPU_TLB_SIZE); 2.81 + memset(vbase, 0, VCPU_VHPT_SIZE); 2.82 + */ 2.83 +// vcur = (void*)((u64)vbase + VCPU_VHPT_SIZE); 2.84 vcur -= sizeof (thash_cb_t); 2.85 vhpt = vcur; 2.86 vhpt->ht = THASH_VHPT; 2.87 vhpt->vcpu = d; 2.88 - vhpt->hash_func = machine_thash; 2.89 - vcur -= sizeof (vhpt_special); 2.90 - vs = vcur; 2.91 +// vhpt->hash_func = machine_thash; 2.92 +// vcur -= sizeof (vhpt_special); 2.93 +// vs = vcur; 2.94 2.95 /* Setup guest pta */ 2.96 pta_value.val = 0; 2.97 pta_value.ve = 1; 2.98 pta_value.vf = 1; 2.99 - pta_value.size = VCPU_TLB_SHIFT - 1; /* 2M */ 2.100 + pta_value.size = VCPU_VHPT_SHIFT - 1; /* 16M*/ 2.101 pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT; 2.102 d->arch.arch_vmx.mpta = pta_value.val; 2.103 - 2.104 - vhpt->vs = vs; 2.105 - vhpt->vs->get_mfn = get_mfn; 2.106 - vhpt->vs->tag_func = machine_ttag; 2.107 + 2.108 +// vhpt->vs = vs; 2.109 +// vhpt->vs->get_mfn = __gpfn_to_mfn_foreign; 2.110 +// vhpt->vs->tag_func = machine_ttag; 2.111 vhpt->hash = vbase; 2.112 - vhpt->hash_sz = VCPU_TLB_SIZE/2; 2.113 + vhpt->hash_sz = VCPU_VHPT_SIZE/2; 2.114 vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz); 2.115 vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf; 2.116 - vhpt->recycle_notifier = recycle_message; 2.117 - thash_init(vhpt,VCPU_TLB_SHIFT-1); 2.118 +// vhpt->recycle_notifier = recycle_message; 2.119 + thash_init(vhpt,VCPU_VHPT_SHIFT-1); 2.120 return vhpt; 2.121 } 2.122 2.123 2.124 + 2.125 thash_cb_t *init_domain_tlb(struct vcpu *d) 2.126 { 2.127 struct page_info *page; 2.128 - void *vbase,*vcur; 2.129 + void *vbase, *vhptbase, *vcur; 2.130 tlb_special_t *ts; 2.131 thash_cb_t *tlb; 2.132 2.133 - page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0); 2.134 + page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0); 2.135 if ( page == NULL ) { 2.136 panic("No enough contiguous memory for init_domain_mm\n"); 2.137 } 2.138 - vbase = page_to_virt(page); 2.139 - printk("Allocate domain tlb at 0x%lx\n", (u64)vbase); 2.140 - memset(vbase, 0, VCPU_TLB_SIZE); 2.141 - vcur = (void*)((u64)vbase + VCPU_TLB_SIZE); 2.142 + vhptbase = page_to_virt(page); 2.143 + memset(vhptbase, 0, VCPU_VHPT_SIZE); 2.144 + printk("Allocate domain tlb&vhpt at 0x%lx\n", (u64)vhptbase); 2.145 + vbase =vhptbase + VCPU_VHPT_SIZE - VCPU_VTLB_SIZE; 2.146 + vcur = (void*)((u64)vbase + VCPU_VTLB_SIZE); 2.147 vcur -= sizeof (thash_cb_t); 2.148 tlb = vcur; 2.149 tlb->ht = THASH_TLB; 2.150 @@ -209,14 +196,14 @@ thash_cb_t *init_domain_tlb(struct vcpu 2.151 vcur -= sizeof (tlb_special_t); 2.152 ts = vcur; 2.153 tlb->ts = ts; 2.154 - tlb->ts->vhpt = init_domain_vhpt(d); 2.155 - tlb->hash_func = machine_thash; 2.156 + tlb->ts->vhpt = init_domain_vhpt(d,vhptbase,vbase); 2.157 +// tlb->hash_func = machine_thash; 2.158 tlb->hash = vbase; 2.159 - tlb->hash_sz = VCPU_TLB_SIZE/2; 2.160 - tlb->cch_buf = (void *)((u64)vbase + tlb->hash_sz); 2.161 + tlb->hash_sz = VCPU_VTLB_SIZE/2; 2.162 + tlb->cch_buf = (void *)(vbase + tlb->hash_sz); 2.163 tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf; 2.164 - tlb->recycle_notifier = recycle_message; 2.165 - thash_init(tlb,VCPU_TLB_SHIFT-1); 2.166 +// tlb->recycle_notifier = recycle_message; 2.167 + thash_init(tlb,VCPU_VTLB_SHIFT-1); 2.168 return tlb; 2.169 } 2.170 2.171 @@ -250,12 +237,12 @@ void machine_tlb_insert(struct vcpu *d, 2.172 u64 psr; 2.173 thash_data_t mtlb; 2.174 unsigned int cl = tlb->cl; 2.175 - unsigned long mtlb_ppn; 2.176 + unsigned long mtlb_ppn; 2.177 mtlb.ifa = tlb->vadr; 2.178 mtlb.itir = tlb->itir & ~ITIR_RV_MASK; 2.179 //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value); 2.180 mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK; 2.181 - mtlb.ppn = (unsigned long)get_mfn(DOMID_SELF,tlb->ppn, 1); 2.182 + mtlb.ppn = get_mfn(d->domain,tlb->ppn); 2.183 mtlb_ppn=mtlb.ppn; 2.184 if (mtlb_ppn == INVALID_MFN) 2.185 panic("Machine tlb insert with invalid mfn number.\n"); 2.186 @@ -289,42 +276,33 @@ void machine_tlb_purge(u64 va, u64 ps) 2.187 // ia64_srlz_i(); 2.188 // return; 2.189 } 2.190 - 2.191 -u64 machine_thash(PTA pta, u64 va) 2.192 +/* 2.193 +u64 machine_thash(u64 va) 2.194 { 2.195 - u64 saved_pta; 2.196 - u64 hash_addr; 2.197 - unsigned long psr; 2.198 - 2.199 - saved_pta = ia64_getreg(_IA64_REG_CR_PTA); 2.200 - psr = ia64_clear_ic(); 2.201 - ia64_setreg(_IA64_REG_CR_PTA, pta.val); 2.202 - hash_addr = ia64_thash(va); 2.203 - ia64_setreg(_IA64_REG_CR_PTA, saved_pta); 2.204 - ia64_set_psr(psr); 2.205 - ia64_srlz_i(); 2.206 - return hash_addr; 2.207 + return ia64_thash(va); 2.208 } 2.209 2.210 -u64 machine_ttag(PTA pta, u64 va) 2.211 +u64 machine_ttag(u64 va) 2.212 +{ 2.213 + return ia64_ttag(va); 2.214 +} 2.215 +*/ 2.216 +thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag) 2.217 { 2.218 -// u64 saved_pta; 2.219 -// u64 hash_addr, tag; 2.220 -// u64 psr; 2.221 -// struct vcpu *v = current; 2.222 - 2.223 -// saved_pta = ia64_getreg(_IA64_REG_CR_PTA); 2.224 -// psr = ia64_clear_ic(); 2.225 -// ia64_setreg(_IA64_REG_CR_PTA, pta.val); 2.226 -// tag = ia64_ttag(va); 2.227 - return ia64_ttag(va); 2.228 -// ia64_setreg(_IA64_REG_CR_PTA, saved_pta); 2.229 -// ia64_set_psr(psr); 2.230 -// ia64_srlz_i(); 2.231 -// return tag; 2.232 + u64 index,pfn,rid,pfn_bits; 2.233 + pfn_bits = vpta.size-5-8; 2.234 + pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr); 2.235 + rid = _REGION_ID(vrr); 2.236 + index = ((rid&0xff)<<pfn_bits)|(pfn&((1UL<<pfn_bits)-1)); 2.237 + *tag = ((rid>>8)&0xffff) | ((pfn >>pfn_bits)<<16); 2.238 + return (thash_data_t *)((vpta.base<<PTA_BASE_SHIFT)+(index<<5)); 2.239 +// return ia64_call_vsa(PAL_VPS_THASH,va,vrr,vpta,0,0,0,0); 2.240 } 2.241 2.242 - 2.243 +//u64 vsa_ttag(u64 va, u64 vrr) 2.244 +//{ 2.245 +// return ia64_call_vsa(PAL_VPS_TTAG,va,vrr,0,0,0,0,0); 2.246 +//} 2.247 2.248 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref) 2.249 { 2.250 @@ -371,11 +349,12 @@ int unimplemented_gva(VCPU *vcpu,u64 vad 2.251 * num: number of dword (8byts) to read. 2.252 */ 2.253 int 2.254 -fetch_code(VCPU *vcpu, u64 gip, u64 *code) 2.255 +fetch_code(VCPU *vcpu, u64 gip, u64 *code1, u64 *code2) 2.256 { 2.257 - u64 gpip; // guest physical IP 2.258 - u64 mpa; 2.259 + u64 gpip=0; // guest physical IP 2.260 + u64 *vpa; 2.261 thash_data_t *tlb; 2.262 + thash_cb_t *hcb; 2.263 ia64_rr vrr; 2.264 u64 mfn; 2.265 2.266 @@ -384,19 +363,26 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod 2.267 } 2.268 else { 2.269 vmx_vcpu_get_rr(vcpu, gip, &vrr.rrval); 2.270 - tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu), 2.271 - vrr.rid, gip, ISIDE_TLB ); 2.272 + hcb = vmx_vcpu_get_vtlb(vcpu); 2.273 + tlb = vtlb_lookup_ex (hcb, vrr.rid, gip, ISIDE_TLB ); 2.274 if( tlb == NULL ) 2.275 - tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu), 2.276 + tlb = vtlb_lookup_ex (hcb, 2.277 vrr.rid, gip, DSIDE_TLB ); 2.278 - if ( tlb == NULL ) panic("No entry found in ITLB and DTLB\n"); 2.279 - gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) ); 2.280 + if (tlb) 2.281 + gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) ); 2.282 } 2.283 - mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT); 2.284 - if ( mfn == INVALID_MFN ) return 0; 2.285 - 2.286 - mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT); 2.287 - *code = *(u64*)__va(mpa); 2.288 + if( gpip){ 2.289 + mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT); 2.290 + if( mfn == INVALID_MFN ) panic("fetch_code: invalid memory\n"); 2.291 + vpa =(u64 *)__va( (gip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT)); 2.292 + }else{ 2.293 + tlb = vhpt_lookup(gip); 2.294 + if( tlb == NULL) 2.295 + panic("No entry found in ITLB and DTLB\n"); 2.296 + vpa =(u64 *)__va((tlb->ppn>>(PAGE_SHIFT-ARCH_PAGE_SHIFT)<<PAGE_SHIFT)|(gip&(PAGE_SIZE-1))); 2.297 + } 2.298 + *code1 = *vpa++; 2.299 + *code2 = *vpa; 2.300 return 1; 2.301 } 2.302 2.303 @@ -420,13 +406,13 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN 2.304 sections.tr = 1; 2.305 sections.tc = 0; 2.306 2.307 - ovl = thash_find_overlap(hcb, &data, sections); 2.308 + ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB); 2.309 while (ovl) { 2.310 // generate MCA. 2.311 panic("Tlb conflict!!"); 2.312 return IA64_FAULT; 2.313 } 2.314 - thash_purge_and_insert(hcb, &data); 2.315 + thash_purge_and_insert(hcb, &data, ifa); 2.316 return IA64_NO_FAULT; 2.317 } 2.318 2.319 @@ -447,24 +433,26 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN 2.320 data.vadr=PAGEALIGN(ifa,data.ps); 2.321 data.tc = 1; 2.322 data.cl=DSIDE_TLB; 2.323 - vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr); 2.324 + vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr); 2.325 data.rid = vrr.rid; 2.326 sections.tr = 1; 2.327 sections.tc = 0; 2.328 2.329 - ovl = thash_find_overlap(hcb, &data, sections); 2.330 + ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB); 2.331 if (ovl) { 2.332 // generate MCA. 2.333 panic("Tlb conflict!!"); 2.334 return IA64_FAULT; 2.335 } 2.336 - thash_purge_and_insert(hcb, &data); 2.337 + thash_purge_and_insert(hcb, &data, ifa); 2.338 return IA64_NO_FAULT; 2.339 } 2.340 2.341 /* 2.342 * Return TRUE/FALSE for success of lock operation 2.343 */ 2.344 + 2.345 +/* 2.346 int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock) 2.347 { 2.348 2.349 @@ -472,12 +460,15 @@ int vmx_lock_guest_dtc (VCPU *vcpu, UINT 2.350 ia64_rr vrr; 2.351 u64 preferred_size; 2.352 2.353 - vmx_vcpu_get_rr(vcpu, va, (UINT64 *)&vrr); 2.354 + vmx_vcpu_get_rr(vcpu, va, &vrr); 2.355 hcb = vmx_vcpu_get_vtlb(vcpu); 2.356 va = PAGEALIGN(va,vrr.ps); 2.357 preferred_size = PSIZE(vrr.ps); 2.358 return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock); 2.359 } 2.360 + */ 2.361 + 2.362 + 2.363 2.364 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx) 2.365 { 2.366 @@ -486,6 +477,7 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN 2.367 thash_cb_t *hcb; 2.368 search_section_t sections; 2.369 ia64_rr vrr; 2.370 + u64 mfn,psr; 2.371 2.372 hcb = vmx_vcpu_get_vtlb(vcpu); 2.373 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 2.374 @@ -498,7 +490,8 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN 2.375 sections.tr = 1; 2.376 sections.tc = 0; 2.377 2.378 - ovl = thash_find_overlap(hcb, &data, sections); 2.379 + 2.380 + ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB); 2.381 if (ovl) { 2.382 // generate MCA. 2.383 panic("Tlb conflict!!"); 2.384 @@ -507,7 +500,23 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN 2.385 sections.tr = 0; 2.386 sections.tc = 1; 2.387 thash_purge_entries(hcb, &data, sections); 2.388 +/* if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){ 2.389 + data.contiguous=1; 2.390 + } 2.391 + */ 2.392 thash_tr_insert(hcb, &data, ifa, idx); 2.393 +/* 2.394 + if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){ 2.395 + mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn)); 2.396 + data.page_flags=pte&~PAGE_FLAGS_RV_MASK; 2.397 + data.ppn = xen_to_arch_ppn(mfn); 2.398 + psr = ia64_clear_ic(); 2.399 + ia64_itr(0x1, IA64_ITR_GUEST_KERNEL, data.vadr, data.page_flags, data.ps); 2.400 + ia64_set_psr(psr); // restore psr 2.401 + ia64_srlz_i(); 2.402 +// return IA64_NO_FAULT; 2.403 + } 2.404 +*/ 2.405 return IA64_NO_FAULT; 2.406 } 2.407 2.408 @@ -518,7 +527,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN 2.409 thash_cb_t *hcb; 2.410 search_section_t sections; 2.411 ia64_rr vrr; 2.412 - 2.413 + u64 mfn,psr; 2.414 2.415 hcb = vmx_vcpu_get_vtlb(vcpu); 2.416 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 2.417 @@ -526,12 +535,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN 2.418 data.vadr=PAGEALIGN(ifa,data.ps); 2.419 data.tc = 0; 2.420 data.cl=DSIDE_TLB; 2.421 - vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr); 2.422 + vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr); 2.423 data.rid = vrr.rid; 2.424 sections.tr = 1; 2.425 sections.tc = 0; 2.426 2.427 - ovl = thash_find_overlap(hcb, &data, sections); 2.428 + ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB); 2.429 while (ovl) { 2.430 // generate MCA. 2.431 panic("Tlb conflict!!"); 2.432 @@ -540,7 +549,25 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN 2.433 sections.tr = 0; 2.434 sections.tc = 1; 2.435 thash_purge_entries(hcb, &data, sections); 2.436 +/* 2.437 + if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){ 2.438 + data.contiguous=1; 2.439 + } 2.440 + */ 2.441 thash_tr_insert(hcb, &data, ifa, idx); 2.442 +/* 2.443 + if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){ 2.444 + mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn)); 2.445 + data.page_flags=pte&~PAGE_FLAGS_RV_MASK; 2.446 + data.ppn = xen_to_arch_ppn(mfn); 2.447 + psr = ia64_clear_ic(); 2.448 + ia64_itr(0x2,IA64_DTR_GUEST_KERNEL , data.vadr, data.page_flags, data.ps); 2.449 + ia64_set_psr(psr); // restore psr 2.450 + ia64_srlz_i(); 2.451 +// return IA64_NO_FAULT; 2.452 + } 2.453 +*/ 2.454 + 2.455 return IA64_NO_FAULT; 2.456 } 2.457 2.458 @@ -685,7 +712,25 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6 2.459 *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1)); 2.460 return IA64_NO_FAULT; 2.461 } 2.462 - }else{ 2.463 + } 2.464 + data = vhpt_lookup(vadr); 2.465 + if(data){ 2.466 + if(data->p==0){ 2.467 + visr.na=1; 2.468 + vcpu_set_isr(vcpu,visr.val); 2.469 + page_not_present(vcpu, vadr); 2.470 + return IA64_FAULT; 2.471 + }else if(data->ma == VA_MATTR_NATPAGE){ 2.472 + visr.na = 1; 2.473 + vcpu_set_isr(vcpu, visr.val); 2.474 + dnat_page_consumption(vcpu, vadr); 2.475 + return IA64_FAULT; 2.476 + }else{ 2.477 + *padr = ((*(mpt_table+arch_to_xen_ppn(data->ppn)))<<PAGE_SHIFT) | (vadr&(PAGE_SIZE-1)); 2.478 + return IA64_NO_FAULT; 2.479 + } 2.480 + } 2.481 + else{ 2.482 if(!vhpt_enabled(vcpu, vadr, NA_REF)){ 2.483 if(vpsr.ic){ 2.484 vcpu_set_isr(vcpu, visr.val);
3.1 --- a/xen/arch/ia64/vmx/vmx_entry.S Tue Feb 28 13:18:08 2006 -0700 3.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S Wed Mar 01 08:29:00 2006 -0700 3.3 @@ -34,6 +34,7 @@ 3.4 #include <asm/thread_info.h> 3.5 #include <asm/unistd.h> 3.6 #include <asm/vhpt.h> 3.7 +#include <asm/vmmu.h> 3.8 #include "vmx_minstate.h" 3.9 3.10 /* 3.11 @@ -696,7 +697,7 @@ 1: 3.12 movl r25=PAGE_KERNEL 3.13 ;; 3.14 or loc5 = r25,loc5 // construct PA | page properties 3.15 - mov r23 = IA64_GRANULE_SHIFT <<2 3.16 + mov r23 = VCPU_VHPT_SHIFT <<2 3.17 ;; 3.18 ptr.d in3,r23 3.19 ;;
4.1 --- a/xen/arch/ia64/vmx/vmx_hypercall.c Tue Feb 28 13:18:08 2006 -0700 4.2 +++ b/xen/arch/ia64/vmx/vmx_hypercall.c Wed Mar 01 08:29:00 2006 -0700 4.3 @@ -178,6 +178,8 @@ static int do_lock_page(VCPU *vcpu, u64 4.4 * Lock guest page in vTLB, so that it's not relinquished by recycle 4.5 * session when HV is servicing that hypercall. 4.6 */ 4.7 + 4.8 +/* 4.9 void hyper_lock_page(void) 4.10 { 4.11 //TODO: 4.12 @@ -190,6 +192,7 @@ void hyper_lock_page(void) 4.13 4.14 vmx_vcpu_increment_iip(vcpu); 4.15 } 4.16 + */ 4.17 4.18 static int do_set_shared_page(VCPU *vcpu, u64 gpa) 4.19 {
5.1 --- a/xen/arch/ia64/vmx/vmx_init.c Tue Feb 28 13:18:08 2006 -0700 5.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Wed Mar 01 08:29:00 2006 -0700 5.3 @@ -172,7 +172,15 @@ static vpd_t *alloc_vpd(void) 5.4 cpuid3.number = 4; /* 5 - 1 */ 5.5 vpd->vcpuid[3] = cpuid3.value; 5.6 5.7 + vpd->vac.a_from_int_cr = 1; 5.8 + vpd->vac.a_to_int_cr = 1; 5.9 + vpd->vac.a_from_psr = 1; 5.10 + vpd->vac.a_from_cpuid = 1; 5.11 + vpd->vac.a_cover = 1; 5.12 + vpd->vac.a_bsw = 1; 5.13 + 5.14 vpd->vdc.d_vmsw = 1; 5.15 + 5.16 return vpd; 5.17 } 5.18 5.19 @@ -300,7 +308,7 @@ io_range_t io_ranges[] = { 5.20 int vmx_alloc_contig_pages(struct domain *d) 5.21 { 5.22 unsigned int order; 5.23 - unsigned long i, j, start, end, pgnr, conf_nr; 5.24 + unsigned long i, j, start,tmp, end, pgnr, conf_nr; 5.25 struct page_info *page; 5.26 struct vcpu *v = d->vcpu[0]; 5.27 5.28 @@ -315,52 +323,100 @@ int vmx_alloc_contig_pages(struct domain 5.29 } 5.30 5.31 conf_nr = VMX_CONFIG_PAGES(d); 5.32 + if((conf_nr<<PAGE_SHIFT)<(1UL<<(_PAGE_SIZE_64M+1))) 5.33 + panic("vti domain needs 128M memory at least\n"); 5.34 +/* 5.35 order = get_order_from_pages(conf_nr); 5.36 if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) { 5.37 printk("Could not allocate order=%d pages for vmx contig alloc\n", 5.38 order); 5.39 return -1; 5.40 } 5.41 +*/ 5.42 + 5.43 +/* reserve contiguous 64M for linux kernel */ 5.44 + 5.45 + if (unlikely((page = alloc_domheap_pages(d,(KERNEL_TR_PAGE_SHIFT-PAGE_SHIFT), 0)) == NULL)) { 5.46 + printk("No enough memory for vti domain!!!\n"); 5.47 + return -1; 5.48 + } 5.49 + pgnr = page_to_mfn(page); 5.50 + for (i=(1UL<<KERNEL_TR_PAGE_SHIFT);i<(1UL<<(KERNEL_TR_PAGE_SHIFT+1));i+=PAGE_SIZE,pgnr++){ 5.51 + assign_domain_page(d, i, pgnr << PAGE_SHIFT); 5.52 + } 5.53 + 5.54 + for (i = 0; i < (1UL<<KERNEL_TR_PAGE_SHIFT) ; i += PAGE_SIZE){ 5.55 + if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 5.56 + printk("No enough memory for vti domain!!!\n"); 5.57 + return -1; 5.58 + } 5.59 + pgnr = page_to_mfn(page); 5.60 + assign_domain_page(d, i, pgnr << PAGE_SHIFT); 5.61 + } 5.62 5.63 /* Map normal memory below 3G */ 5.64 - pgnr = page_to_mfn(page); 5.65 end = conf_nr << PAGE_SHIFT; 5.66 - for (i = 0; 5.67 - i < (end < MMIO_START ? end : MMIO_START); 5.68 - i += PAGE_SIZE, pgnr++) 5.69 + tmp = end < MMIO_START ? end : MMIO_START; 5.70 + for (i = (1UL<<(KERNEL_TR_PAGE_SHIFT+1)); i < tmp; i += PAGE_SIZE){ 5.71 + if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 5.72 + printk("No enough memory for vti domain!!!\n"); 5.73 + return -1; 5.74 + } 5.75 + pgnr = page_to_mfn(page); 5.76 assign_domain_page(d, i, pgnr << PAGE_SHIFT); 5.77 - 5.78 + } 5.79 /* Map normal memory beyond 4G */ 5.80 if (unlikely(end > MMIO_START)) { 5.81 start = 4 * MEM_G; 5.82 end = start + (end - 3 * MEM_G); 5.83 - for (i = start; i < end; i += PAGE_SIZE, pgnr++) 5.84 - assign_domain_page(d, i, pgnr << PAGE_SHIFT); 5.85 + for (i = start; i < end; i += PAGE_SIZE){ 5.86 + if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 5.87 + printk("No enough memory for vti domain!!!\n"); 5.88 + return -1; 5.89 + } 5.90 + pgnr = page_to_mfn(page); 5.91 + assign_domain_page(d, i, pgnr << PAGE_SHIFT); 5.92 + } 5.93 } 5.94 5.95 d->arch.max_pfn = end >> PAGE_SHIFT; 5.96 - 5.97 +/* 5.98 order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT); 5.99 if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) { 5.100 printk("Could not allocate order=%d pages for vmx contig alloc\n", 5.101 - order); 5.102 + order);` 5.103 return -1; 5.104 } 5.105 - 5.106 +*/ 5.107 /* Map guest firmware */ 5.108 - pgnr = page_to_mfn(page); 5.109 - for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++) 5.110 + for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++){ 5.111 + if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 5.112 + printk("No enough memory for vti domain!!!\n"); 5.113 + return -1; 5.114 + } 5.115 + pgnr = page_to_mfn(page); 5.116 assign_domain_page(d, i, pgnr << PAGE_SHIFT); 5.117 + } 5.118 5.119 +/* 5.120 if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) { 5.121 printk("Could not allocate order=1 pages for vmx contig alloc\n"); 5.122 return -1; 5.123 } 5.124 - 5.125 +*/ 5.126 /* Map for shared I/O page and xenstore */ 5.127 + if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 5.128 + printk("No enough memory for vti domain!!!\n"); 5.129 + return -1; 5.130 + } 5.131 pgnr = page_to_mfn(page); 5.132 assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT); 5.133 - pgnr++; 5.134 + 5.135 + if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 5.136 + printk("No enough memory for vti domain!!!\n"); 5.137 + return -1; 5.138 + } 5.139 + pgnr = page_to_mfn(page); 5.140 assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT); 5.141 5.142 set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
6.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S Tue Feb 28 13:18:08 2006 -0700 6.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S Wed Mar 01 08:29:00 2006 -0700 6.3 @@ -269,6 +269,10 @@ ENTRY(vmx_alt_itlb_miss) 6.4 (p7)br.sptk vmx_fault_3 6.5 vmx_alt_itlb_miss_1: 6.6 mov r16=cr.ifa // get address that caused the TLB miss 6.7 + ;; 6.8 + tbit.z p6,p7=r16,63 6.9 +(p6)br.sptk vmx_fault_3 6.10 + ;; 6.11 movl r17=PAGE_KERNEL 6.12 mov r24=cr.ipsr 6.13 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 6.14 @@ -300,6 +304,10 @@ ENTRY(vmx_alt_dtlb_miss) 6.15 (p7)br.sptk vmx_fault_4 6.16 vmx_alt_dtlb_miss_1: 6.17 mov r16=cr.ifa // get address that caused the TLB miss 6.18 + ;; 6.19 + tbit.z p6,p7=r16,63 6.20 +(p6)br.sptk vmx_fault_4 6.21 + ;; 6.22 movl r17=PAGE_KERNEL 6.23 mov r20=cr.isr 6.24 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 6.25 @@ -397,7 +405,7 @@ END(vmx_break_fault) 6.26 ///////////////////////////////////////////////////////////////////////////////////////// 6.27 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 6.28 ENTRY(vmx_interrupt) 6.29 - VMX_DBG_FAULT(12) 6.30 +// VMX_DBG_FAULT(12) 6.31 mov r31=pr // prepare to save predicates 6.32 mov r19=12 6.33 mov r29=cr.ipsr 6.34 @@ -734,7 +742,7 @@ END(vmx_single_step_trap) 6.35 ///////////////////////////////////////////////////////////////////////////////////////// 6.36 // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault 6.37 ENTRY(vmx_virtualization_fault) 6.38 - VMX_DBG_FAULT(37) 6.39 +// VMX_DBG_FAULT(37) 6.40 mov r31=pr 6.41 mov r19=37 6.42 adds r16 = IA64_VCPU_CAUSE_OFFSET,r21 6.43 @@ -1138,5 +1146,5 @@ hyper_call_table: 6.44 data8 hyper_not_support //hyper_boot_vcpu 6.45 data8 hyper_not_support //hyper_ni_hypercall /* 25 */ 6.46 data8 hyper_not_support //hyper_mmuext_op 6.47 - data8 hyper_lock_page 6.48 + data8 hyper_not_support //tata8 hyper_lock_page 6.49 data8 hyper_set_shared_page
7.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c Tue Feb 28 13:18:08 2006 -0700 7.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Wed Mar 01 08:29:00 2006 -0700 7.3 @@ -27,7 +27,7 @@ 7.4 #include <asm/vmx_phy_mode.h> 7.5 #include <xen/sched.h> 7.6 #include <asm/pgtable.h> 7.7 - 7.8 +#include <asm/vmmu.h> 7.9 int valid_mm_mode[8] = { 7.10 GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */ 7.11 INV_MODE, 7.12 @@ -94,7 +94,7 @@ int mm_switch_table[8][8] = { 7.13 * (1,1,1)->(1,0,0) 7.14 */ 7.15 7.16 - {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF} 7.17 + {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF}, 7.18 }; 7.19 7.20 void 7.21 @@ -104,9 +104,8 @@ physical_mode_init(VCPU *vcpu) 7.22 vcpu->arch.mode_flags = GUEST_IN_PHY; 7.23 } 7.24 7.25 -extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages); 7.26 +extern u64 get_mfn(struct domain *d, u64 gpfn); 7.27 extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *); 7.28 - 7.29 void 7.30 physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr) 7.31 { 7.32 @@ -115,7 +114,7 @@ physical_itlb_miss_dom0(VCPU *vcpu, u64 7.33 u64 mppn,gppn; 7.34 vpsr.val=vmx_vcpu_get_psr(vcpu); 7.35 gppn=(vadr<<1)>>13; 7.36 - mppn = get_mfn(DOMID_SELF,gppn,1); 7.37 + mppn = get_mfn(vcpu->domain,gppn); 7.38 mppn=(mppn<<12)|(vpsr.cpl<<7); 7.39 // if(vadr>>63) 7.40 // mppn |= PHY_PAGE_UC; 7.41 @@ -147,7 +146,7 @@ physical_dtlb_miss(VCPU *vcpu, u64 vadr) 7.42 // panic("dom n physical dtlb miss happen\n"); 7.43 vpsr.val=vmx_vcpu_get_psr(vcpu); 7.44 gppn=(vadr<<1)>>13; 7.45 - mppn = get_mfn(DOMID_SELF,gppn,1); 7.46 + mppn = get_mfn(vcpu->domain, gppn); 7.47 mppn=(mppn<<12)|(vpsr.cpl<<7); 7.48 if(vadr>>63) 7.49 mppn |= PHY_PAGE_UC;
8.1 --- a/xen/arch/ia64/vmx/vmx_process.c Tue Feb 28 13:18:08 2006 -0700 8.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Wed Mar 01 08:29:00 2006 -0700 8.3 @@ -47,6 +47,7 @@ 8.4 #include <asm/vmx_vcpu.h> 8.5 #include <asm/kregs.h> 8.6 #include <asm/vmx.h> 8.7 +#include <asm/vmmu.h> 8.8 #include <asm/vmx_mm_def.h> 8.9 #include <asm/vmx_phy_mode.h> 8.10 #include <xen/mm.h> 8.11 @@ -314,6 +315,10 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 8.12 return; 8.13 } 8.14 */ 8.15 + if(vadr == 0x1ea18c00 ){ 8.16 + ia64_clear_ic(); 8.17 + while(1); 8.18 + } 8.19 if(is_physical_mode(v)&&(!(vadr<<1>>62))){ 8.20 if(vec==1){ 8.21 physical_itlb_miss(v, vadr); 8.22 @@ -342,12 +347,18 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 8.23 return IA64_FAULT; 8.24 } 8.25 8.26 - if ( data->ps != vrr.ps ) { 8.27 +// if ( data->ps != vrr.ps ) { 8.28 +// machine_tlb_insert(v, data); 8.29 +// } 8.30 +// else { 8.31 +/* if ( data->contiguous&&(!data->tc)){ 8.32 machine_tlb_insert(v, data); 8.33 - } 8.34 - else { 8.35 - thash_insert(vtlb->ts->vhpt,data,vadr); 8.36 - } 8.37 + } 8.38 + else{ 8.39 + */ 8.40 + thash_vhpt_insert(vtlb->ts->vhpt,data,vadr); 8.41 +// } 8.42 +// } 8.43 }else if(type == DSIDE_TLB){ 8.44 if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){ 8.45 if(vpsr.ic){ 8.46 @@ -367,8 +378,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 8.47 } else{ 8.48 vmx_vcpu_thash(v, vadr, &vhpt_adr); 8.49 vrr=vmx_vcpu_rr(v,vhpt_adr); 8.50 - data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB); 8.51 - if(data){ 8.52 + if(vhpt_lookup(vhpt_adr) || vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB)){ 8.53 if(vpsr.ic){ 8.54 vcpu_set_isr(v, misr.val); 8.55 dtlb_fault(v, vadr); 8.56 @@ -411,8 +421,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 8.57 } else{ 8.58 vmx_vcpu_thash(v, vadr, &vhpt_adr); 8.59 vrr=vmx_vcpu_rr(v,vhpt_adr); 8.60 - data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB); 8.61 - if(data){ 8.62 + if(vhpt_lookup(vhpt_adr) || vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB)){ 8.63 if(!vpsr.ic){ 8.64 misr.ni=1; 8.65 }
9.1 --- a/xen/arch/ia64/vmx/vmx_virt.c Tue Feb 28 13:18:08 2006 -0700 9.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c Wed Mar 01 08:29:00 2006 -0700 9.3 @@ -1300,9 +1300,7 @@ extern IA64_SLOT_TYPE slot_types[0x20][ 9.4 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip) 9.5 { 9.6 IA64_BUNDLE bundle; 9.7 - 9.8 - fetch_code( current,iip, &bundle.i64[0]); 9.9 - fetch_code( current,iip+8, &bundle.i64[1]); 9.10 + fetch_code( current, iip, &bundle.i64[0], &bundle.i64[1]); 9.11 return bundle; 9.12 } 9.13
10.1 --- a/xen/arch/ia64/vmx/vtlb.c Tue Feb 28 13:18:08 2006 -0700 10.2 +++ b/xen/arch/ia64/vmx/vtlb.c Wed Mar 01 08:29:00 2006 -0700 10.3 @@ -28,8 +28,10 @@ 10.4 #include <asm/gcc_intrin.h> 10.5 #include <linux/interrupt.h> 10.6 #include <asm/vmx_vcpu.h> 10.7 +#include <asm/vmmu.h> 10.8 #define MAX_CCH_LENGTH 40 10.9 10.10 +thash_data_t *__alloc_chain(thash_cb_t *, thash_data_t *); 10.11 10.12 static void cch_mem_init(thash_cb_t *hcb) 10.13 { 10.14 @@ -50,8 +52,10 @@ static thash_data_t *cch_alloc(thash_cb_ 10.15 10.16 if ( (p = hcb->cch_freelist) != NULL ) { 10.17 hcb->cch_freelist = p->next; 10.18 + return p; 10.19 + }else{ 10.20 + return NULL; 10.21 } 10.22 - return &(p->data); 10.23 } 10.24 10.25 static void cch_free(thash_cb_t *hcb, thash_data_t *cch) 10.26 @@ -65,36 +69,38 @@ static void cch_free(thash_cb_t *hcb, th 10.27 /* 10.28 * Check to see if the address rid:va is translated by the TLB 10.29 */ 10.30 -static int __is_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE cl) 10.31 + 10.32 +static int __is_tr_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE cl) 10.33 { 10.34 - u64 size1,sa1,ea1; 10.35 - if ( tlb->rid != rid ||(!tlb->tc && tlb->cl != cl) ) 10.36 - return 0; 10.37 - size1 = PSIZE(tlb->ps); 10.38 - sa1 = tlb->vadr & ~(size1-1); // mask the low address bits 10.39 - ea1 = sa1 + size1; 10.40 - 10.41 - if ( va >= sa1 && (va < ea1 || ea1 == 0) ) 10.42 + u64 size; 10.43 + size = PSIZE(tlb->ps); 10.44 + if(tlb->vadr&(size-1)) 10.45 + while(1); 10.46 + if ((tlb->rid == rid) && ((va-tlb->vadr)<size)) 10.47 return 1; 10.48 else 10.49 return 0; 10.50 } 10.51 10.52 /* 10.53 - * Only for TLB format. 10.54 + * Only for GUEST TR format. 10.55 */ 10.56 static int 10.57 -__is_tlb_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, u64 eva) 10.58 +__is_tr_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, u64 eva) 10.59 { 10.60 - uint64_t size1,sa1,ea1; 10.61 + uint64_t size, sa1, ea1; 10.62 10.63 - if ( entry->invalid || entry->rid != rid || (!entry->tc && entry->cl != cl ) ) { 10.64 +// if ( entry->invalid || entry->rid != rid || (entry->cl != cl ) ) { 10.65 + if ( entry->invalid || entry->rid != rid ) { 10.66 return 0; 10.67 } 10.68 - size1=PSIZE(entry->ps); 10.69 - sa1 = entry->vadr & ~(size1-1); // mask the low address bits 10.70 - ea1 = sa1 + size1; 10.71 - if ( (sva >= ea1 && ea1 != 0) || (eva <= sa1 && eva != 0) ) 10.72 + size = PSIZE(entry->ps); 10.73 + sa1 = entry->vadr; 10.74 + ea1 = sa1 + size -1; 10.75 + eva -= 1; 10.76 + if(sa1&(size-1)) 10.77 + while(1); 10.78 + if ( (sva>ea1) || (sa1>eva) ) 10.79 return 0; 10.80 else 10.81 return 1; 10.82 @@ -103,9 +109,11 @@ static int 10.83 10.84 static void __rem_tr (thash_cb_t *hcb, thash_data_t *tr) 10.85 { 10.86 +/* 10.87 if ( hcb->remove_notifier ) { 10.88 (hcb->remove_notifier)(hcb,tr); 10.89 } 10.90 +*/ 10.91 tr->invalid = 1; 10.92 } 10.93 10.94 @@ -142,7 +150,7 @@ static void rep_tr(thash_cb_t *hcb,thash 10.95 else { 10.96 tr = &DTR(hcb,idx); 10.97 } 10.98 - if ( !INVALID_TLB(tr) ) { 10.99 + if ( !INVALID_TR(tr) ) { 10.100 __rem_tr(hcb, tr); 10.101 } 10.102 __set_tr (tr, insert, idx); 10.103 @@ -151,6 +159,7 @@ static void rep_tr(thash_cb_t *hcb,thash 10.104 /* 10.105 * remove TR entry. 10.106 */ 10.107 +/* 10.108 static void rem_tr(thash_cb_t *hcb,CACHE_LINE_TYPE cl, int idx) 10.109 { 10.110 thash_data_t *tr; 10.111 @@ -161,17 +170,18 @@ static void rem_tr(thash_cb_t *hcb,CACHE 10.112 else { 10.113 tr = &DTR(hcb,idx); 10.114 } 10.115 - if ( !INVALID_TLB(tr) ) { 10.116 + if ( !INVALID_TR(tr) ) { 10.117 __rem_tr(hcb, tr); 10.118 } 10.119 } 10.120 - 10.121 + */ 10.122 /* 10.123 * Delete an thash entry in collision chain. 10.124 * prev: the previous entry. 10.125 * rem: the removed entry. 10.126 */ 10.127 -static void __rem_chain(thash_cb_t *hcb/*, thash_data_t *prev*/, thash_data_t *rem) 10.128 +/* 10.129 +static void __rem_chain(thash_cb_t *hcb, thash_data_t *prev, thash_data_t *rem) 10.130 { 10.131 //prev->next = rem->next; 10.132 if ( hcb->remove_notifier ) { 10.133 @@ -179,6 +189,7 @@ static void __rem_chain(thash_cb_t *hcb/ 10.134 } 10.135 cch_free (hcb, rem); 10.136 } 10.137 + */ 10.138 10.139 /* 10.140 * Delete an thash entry leading collision chain. 10.141 @@ -187,15 +198,16 @@ static void __rem_hash_head(thash_cb_t * 10.142 { 10.143 thash_data_t *next=hash->next; 10.144 10.145 - if ( hcb->remove_notifier ) { 10.146 +/* if ( hcb->remove_notifier ) { 10.147 (hcb->remove_notifier)(hcb,hash); 10.148 - } 10.149 + } */ 10.150 if ( next != NULL ) { 10.151 + next->len=hash->len-1; 10.152 *hash = *next; 10.153 cch_free (hcb, next); 10.154 } 10.155 else { 10.156 - INVALIDATE_HASH(hcb, hash); 10.157 + INVALIDATE_HASH_HEADER(hcb, hash); 10.158 } 10.159 } 10.160 10.161 @@ -215,8 +227,8 @@ thash_data_t *__vtr_lookup(thash_cb_t *h 10.162 num = NDTRS; 10.163 } 10.164 for ( i=0; i<num; i++ ) { 10.165 - if ( !INVALID_ENTRY(hcb,&tr[i]) && 10.166 - __is_translated(&tr[i], rid, va, cl) ) 10.167 + if ( !INVALID_TR(&tr[i]) && 10.168 + __is_tr_translated(&tr[i], rid, va, cl) ) 10.169 return &tr[i]; 10.170 } 10.171 return NULL; 10.172 @@ -227,6 +239,7 @@ thash_data_t *__vtr_lookup(thash_cb_t *h 10.173 * Find overlap VHPT entry within current collision chain 10.174 * base on internal priv info. 10.175 */ 10.176 +/* 10.177 static inline thash_data_t* _vhpt_next_overlap_in_chain(thash_cb_t *hcb) 10.178 { 10.179 thash_data_t *cch; 10.180 @@ -240,26 +253,27 @@ static inline thash_data_t* _vhpt_next_o 10.181 } 10.182 return NULL; 10.183 } 10.184 - 10.185 +*/ 10.186 /* 10.187 * Find overlap TLB/VHPT entry within current collision chain 10.188 * base on internal priv info. 10.189 */ 10.190 +/* 10.191 static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb) 10.192 { 10.193 thash_data_t *cch; 10.194 thash_internal_t *priv = &hcb->priv; 10.195 10.196 - /* Find overlap TLB entry */ 10.197 + // Find overlap TLB entry 10.198 for (cch=priv->cur_cch; cch; cch = cch->next) { 10.199 if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr ) && 10.200 - __is_tlb_overlap(hcb, cch, priv->rid, priv->cl, 10.201 - priv->_curva, priv->_eva) ) { 10.202 + __is_translated( cch, priv->rid, priv->_curva, priv->cl)) { 10.203 return cch; 10.204 } 10.205 } 10.206 return NULL; 10.207 } 10.208 + */ 10.209 10.210 /* 10.211 * Get the machine format of VHPT entry. 10.212 @@ -281,26 +295,190 @@ int __tlb_to_vhpt(thash_cb_t *hcb, 10.213 thash_data_t *tlb, u64 va, 10.214 thash_data_t *vhpt) 10.215 { 10.216 - u64 pages,mfn; 10.217 - ia64_rr vrr; 10.218 - 10.219 + u64 pages,mfn,padr,pte; 10.220 +// ia64_rr vrr; 10.221 ASSERT ( hcb->ht == THASH_VHPT ); 10.222 - vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 10.223 - pages = PSIZE(vrr.ps) >> PAGE_SHIFT; 10.224 - mfn = (unsigned long)(hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages); 10.225 - if ( mfn == INVALID_MFN ) return 0; 10.226 - 10.227 +// vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 10.228 + padr = tlb->ppn >>(tlb->ps-ARCH_PAGE_SHIFT)<<tlb->ps; 10.229 + padr += va&((1UL<<tlb->ps)-1); 10.230 + pte=lookup_domain_mpa(current->domain,padr); 10.231 + if((pte>>56)) 10.232 + return 0; 10.233 // TODO with machine discontinuous address space issue. 10.234 - vhpt->etag =(unsigned long) (hcb->vs->tag_func)( hcb->pta, tlb->vadr); 10.235 + vhpt->etag = ia64_ttag(va); 10.236 //vhpt->ti = 0; 10.237 vhpt->itir = tlb->itir & ~ITIR_RV_MASK; 10.238 vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK; 10.239 - vhpt->ppn = mfn; 10.240 + vhpt->ps = PAGE_SHIFT; 10.241 + vhpt->ppn = (pte&((1UL<<IA64_MAX_PHYS_BITS)-(1UL<<PAGE_SHIFT)))>>ARCH_PAGE_SHIFT; 10.242 vhpt->next = 0; 10.243 return 1; 10.244 } 10.245 10.246 +static void thash_remove_cch(thash_cb_t *hcb, thash_data_t *hash) 10.247 +{ 10.248 + thash_data_t *prev, *next; 10.249 + prev = hash; next= hash->next; 10.250 + while(next){ 10.251 + prev=next; 10.252 + next=prev->next; 10.253 + cch_free(hcb, prev); 10.254 + } 10.255 + hash->next = NULL; 10.256 + hash->len = 0; 10.257 +} 10.258 10.259 +/* vhpt only has entries with PAGE_SIZE page size */ 10.260 + 10.261 +void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) 10.262 +{ 10.263 + thash_data_t vhpt_entry, *hash_table, *cch; 10.264 +// ia64_rr vrr; 10.265 + 10.266 + if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) { 10.267 + return; 10.268 + //panic("Can't convert to machine VHPT entry\n"); 10.269 + } 10.270 + 10.271 + hash_table = ia64_thash(va); 10.272 + if( INVALID_VHPT(hash_table) ) { 10.273 + *hash_table = vhpt_entry; 10.274 + hash_table->next = 0; 10.275 + return; 10.276 + } 10.277 + 10.278 + cch = hash_table; 10.279 + while(cch){ 10.280 + if(cch->etag == vhpt_entry.etag){ 10.281 + if(cch->ppn == vhpt_entry.ppn) 10.282 + return; 10.283 + else 10.284 + while(1); 10.285 + } 10.286 + cch = cch->next; 10.287 + } 10.288 + if(hash_table->len>=MAX_CCN_DEPTH){ 10.289 + thash_remove_cch(hcb, hash_table); 10.290 + cch = cch_alloc(hcb); 10.291 + *cch = *hash_table; 10.292 + *hash_table = vhpt_entry; 10.293 + hash_table->len = 1; 10.294 + hash_table->next = cch; 10.295 + return; 10.296 + } 10.297 + 10.298 + // TODO: Add collision chain length limitation. 10.299 + cch = __alloc_chain(hcb,entry); 10.300 + if(cch == NULL){ 10.301 + *hash_table = vhpt_entry; 10.302 + hash_table->next = 0; 10.303 + }else{ 10.304 + *cch = *hash_table; 10.305 + *hash_table = vhpt_entry; 10.306 + hash_table->next = cch; 10.307 + hash_table->len = cch->len + 1; 10.308 + cch->len = 0; 10.309 +// if(hash_table->tag==hash_table->next->tag) 10.310 +// while(1); 10.311 + 10.312 + } 10.313 + return /*hash_table*/; 10.314 +} 10.315 + 10.316 +/* 10.317 + * vhpt lookup 10.318 + */ 10.319 + 10.320 +thash_data_t * vhpt_lookup(u64 va) 10.321 +{ 10.322 + thash_data_t *hash; 10.323 + u64 tag; 10.324 + hash = ia64_thash(va); 10.325 + tag = ia64_ttag(va); 10.326 + while(hash){ 10.327 + if(hash->etag == tag) 10.328 + return hash; 10.329 + hash=hash->next; 10.330 + } 10.331 + return NULL; 10.332 +} 10.333 + 10.334 + 10.335 +/* 10.336 + * purge software guest tlb 10.337 + */ 10.338 + 10.339 +static void vtlb_purge(thash_cb_t *hcb, u64 va, u64 ps) 10.340 +{ 10.341 + thash_data_t *hash_table, *prev, *next; 10.342 + u64 start, end, size, tag, rid; 10.343 + ia64_rr vrr; 10.344 + vrr=vmx_vcpu_rr(current, va); 10.345 + rid = vrr.rid; 10.346 + size = PSIZE(ps); 10.347 + start = va & (-size); 10.348 + end = start + size; 10.349 + while(start < end){ 10.350 + hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag); 10.351 +// tag = ia64_ttag(start); 10.352 + if(!INVALID_TLB(hash_table)){ 10.353 + if(hash_table->etag == tag){ 10.354 + __rem_hash_head(hcb, hash_table); 10.355 + } 10.356 + else{ 10.357 + prev=hash_table; 10.358 + next=prev->next; 10.359 + while(next){ 10.360 + if(next->etag == tag){ 10.361 + prev->next=next->next; 10.362 + cch_free(hcb,next); 10.363 + hash_table->len--; 10.364 + break; 10.365 + } 10.366 + prev=next; 10.367 + next=next->next; 10.368 + } 10.369 + } 10.370 + } 10.371 + start += PAGE_SIZE; 10.372 + } 10.373 +// machine_tlb_purge(va, ps); 10.374 +} 10.375 +/* 10.376 + * purge VHPT and machine TLB 10.377 + */ 10.378 + 10.379 +static void vhpt_purge(thash_cb_t *hcb, u64 va, u64 ps) 10.380 +{ 10.381 + thash_data_t *hash_table, *prev, *next; 10.382 + u64 start, end, size, tag; 10.383 + size = PSIZE(ps); 10.384 + start = va & (-size); 10.385 + end = start + size; 10.386 + while(start < end){ 10.387 + hash_table = ia64_thash(start); 10.388 + tag = ia64_ttag(start); 10.389 + if(hash_table->etag == tag ){ 10.390 + __rem_hash_head(hcb, hash_table); 10.391 + } 10.392 + else{ 10.393 + prev=hash_table; 10.394 + next=prev->next; 10.395 + while(next){ 10.396 + if(next->etag == tag){ 10.397 + prev->next=next->next; 10.398 + cch_free(hcb,next); 10.399 + hash_table->len--; 10.400 + break; 10.401 + } 10.402 + prev=next; 10.403 + next=next->next; 10.404 + } 10.405 + } 10.406 + start += PAGE_SIZE; 10.407 + } 10.408 + machine_tlb_purge(va, ps); 10.409 +} 10.410 /* 10.411 * Insert an entry to hash table. 10.412 * NOTES: 10.413 @@ -327,43 +505,62 @@ void thash_tr_insert(thash_cb_t *hcb, th 10.414 entry->vadr = PAGEALIGN(entry->vadr,entry->ps); 10.415 entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12); 10.416 rep_tr(hcb, entry, idx); 10.417 +// thash_vhpt_insert(hcb->ts->vhpt, entry, va); 10.418 return ; 10.419 } 10.420 + 10.421 + 10.422 +/* 10.423 + * Recycle all collisions chain in VTLB or VHPT. 10.424 + * 10.425 + */ 10.426 + 10.427 +void thash_recycle_cch(thash_cb_t *hcb) 10.428 +{ 10.429 + thash_data_t *hash_table; 10.430 + 10.431 + hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz); 10.432 + for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) { 10.433 + thash_remove_cch(hcb,hash_table); 10.434 + } 10.435 +} 10.436 +/* 10.437 thash_data_t *vtlb_alloc_chain(thash_cb_t *hcb,thash_data_t *entry) 10.438 { 10.439 thash_data_t *cch; 10.440 - 10.441 + 10.442 cch = cch_alloc(hcb); 10.443 if(cch == NULL){ 10.444 - thash_purge_all(hcb); 10.445 + thash_recycle_cch(hcb); 10.446 + cch = cch_alloc(hcb); 10.447 } 10.448 return cch; 10.449 } 10.450 - 10.451 +*/ 10.452 10.453 thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry) 10.454 { 10.455 thash_data_t *cch; 10.456 - 10.457 + 10.458 cch = cch_alloc(hcb); 10.459 if(cch == NULL){ 10.460 // recycle 10.461 - if ( hcb->recycle_notifier ) { 10.462 - hcb->recycle_notifier(hcb,(u64)entry); 10.463 - } 10.464 - thash_purge_all(hcb); 10.465 -// cch = cch_alloc(hcb); 10.466 +// if ( hcb->recycle_notifier ) { 10.467 +// hcb->recycle_notifier(hcb,(u64)entry); 10.468 +// } 10.469 + thash_recycle_cch(hcb); 10.470 + cch = cch_alloc(hcb); 10.471 } 10.472 return cch; 10.473 } 10.474 - 10.475 + 10.476 /* 10.477 * Insert an entry into hash TLB or VHPT. 10.478 * NOTES: 10.479 * 1: When inserting VHPT to thash, "va" is a must covered 10.480 * address by the inserted machine VHPT entry. 10.481 * 2: The format of entry is always in TLB. 10.482 - * 3: The caller need to make sure the new entry will not overlap 10.483 + * 3: The caller need to make sure the new entry will not overlap 10.484 * with any existed entry. 10.485 */ 10.486 void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) 10.487 @@ -372,16 +569,32 @@ void vtlb_insert(thash_cb_t *hcb, thash_ 10.488 int flag; 10.489 ia64_rr vrr; 10.490 u64 gppn; 10.491 - u64 ppns, ppne; 10.492 - 10.493 - hash_table = (thash_data_t *)(hcb->hash_func)(hcb->pta, va); 10.494 - if( INVALID_ENTRY(hcb, hash_table) ) { 10.495 + u64 ppns, ppne, tag; 10.496 + vrr=vmx_vcpu_rr(current, va); 10.497 + if (vrr.ps != entry->ps) { 10.498 +// machine_tlb_insert(hcb->vcpu, entry); 10.499 + panic("not preferred ps with va: 0x%lx\n", va); 10.500 + return; 10.501 + } 10.502 + entry->vadr = PAGEALIGN(entry->vadr,entry->ps); 10.503 + entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12); 10.504 + hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag); 10.505 + entry->etag = tag; 10.506 + if( INVALID_TLB(hash_table) ) { 10.507 *hash_table = *entry; 10.508 hash_table->next = 0; 10.509 } 10.510 + else if (hash_table->len>=MAX_CCN_DEPTH){ 10.511 + thash_remove_cch(hcb, hash_table); 10.512 + cch = cch_alloc(hcb); 10.513 + *cch = *hash_table; 10.514 + *hash_table = *entry; 10.515 + hash_table->len = 1; 10.516 + hash_table->next = cch; 10.517 + } 10.518 else { 10.519 // TODO: Add collision chain length limitation. 10.520 - cch = vtlb_alloc_chain(hcb,entry); 10.521 + cch = __alloc_chain(hcb,entry); 10.522 if(cch == NULL){ 10.523 *hash_table = *entry; 10.524 hash_table->next = 0; 10.525 @@ -389,22 +602,17 @@ void vtlb_insert(thash_cb_t *hcb, thash_ 10.526 *cch = *hash_table; 10.527 *hash_table = *entry; 10.528 hash_table->next = cch; 10.529 + hash_table->len = cch->len + 1; 10.530 + cch->len = 0; 10.531 } 10.532 } 10.533 +#if 0 10.534 if(hcb->vcpu->domain->domain_id==0){ 10.535 thash_insert(hcb->ts->vhpt, entry, va); 10.536 return; 10.537 } 10.538 - 10.539 -#if 1 10.540 - vrr=vmx_vcpu_rr(current, va); 10.541 - if (vrr.ps != entry->ps) { 10.542 - machine_tlb_insert(hcb->vcpu, entry); 10.543 - printk("not preferred ps with va: 0x%lx\n", va); 10.544 - return; 10.545 - } 10.546 -#endif 10.547 - 10.548 +#endif 10.549 +/* 10.550 flag = 1; 10.551 gppn = (POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT; 10.552 ppns = PAGEALIGN((entry->ppn<<12),entry->ps); 10.553 @@ -413,46 +621,18 @@ void vtlb_insert(thash_cb_t *hcb, thash_ 10.554 flag = 0; 10.555 if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag)) 10.556 thash_insert(hcb->ts->vhpt, entry, va); 10.557 +*/ 10.558 return ; 10.559 } 10.560 10.561 -static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) 10.562 -{ 10.563 - thash_data_t vhpt_entry, *hash_table, *cch; 10.564 10.565 - if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) { 10.566 - panic("Can't convert to machine VHPT entry\n"); 10.567 - } 10.568 - hash_table = (thash_data_t *)(hcb->hash_func)(hcb->pta, va); 10.569 - if( INVALID_ENTRY(hcb, hash_table) ) { 10.570 - *hash_table = vhpt_entry; 10.571 - hash_table->next = 0; 10.572 - } 10.573 - else { 10.574 - // TODO: Add collision chain length limitation. 10.575 - cch = __alloc_chain(hcb,entry); 10.576 - if(cch == NULL){ 10.577 - *hash_table = vhpt_entry; 10.578 - hash_table->next = 0; 10.579 - }else{ 10.580 - *cch = *hash_table; 10.581 - *hash_table = vhpt_entry; 10.582 - hash_table->next = cch; 10.583 - if(hash_table->tag==hash_table->next->tag) 10.584 - while(1); 10.585 - 10.586 - } 10.587 - 10.588 - } 10.589 - return /*hash_table*/; 10.590 -} 10.591 - 10.592 +/* 10.593 void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) 10.594 { 10.595 - //thash_data_t *hash_table; 10.596 + thash_data_t *hash_table; 10.597 ia64_rr vrr; 10.598 10.599 - vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr); 10.600 + vrr = vmx_vcpu_rr(hcb->vcpu,entry->vadr); 10.601 if ( entry->ps != vrr.ps && entry->tc ) { 10.602 panic("Not support for multiple page size now\n"); 10.603 } 10.604 @@ -461,11 +641,13 @@ void thash_insert(thash_cb_t *hcb, thash 10.605 (hcb->ins_hash)(hcb, entry, va); 10.606 10.607 } 10.608 - 10.609 +*/ 10.610 +/* 10.611 static void rem_thash(thash_cb_t *hcb, thash_data_t *entry) 10.612 { 10.613 thash_data_t *hash_table, *p, *q; 10.614 thash_internal_t *priv = &hcb->priv; 10.615 + int idx; 10.616 10.617 hash_table = priv->hash_base; 10.618 if ( hash_table == entry ) { 10.619 @@ -481,6 +663,7 @@ static void rem_thash(thash_cb_t *hcb, t 10.620 // if ( PURGABLE_ENTRY(hcb,q ) ) { 10.621 p->next = q->next; 10.622 __rem_chain(hcb, entry); 10.623 + hash_table->len--; 10.624 // } 10.625 return ; 10.626 } 10.627 @@ -488,16 +671,20 @@ static void rem_thash(thash_cb_t *hcb, t 10.628 } 10.629 panic("Entry not existed or bad sequence\n"); 10.630 } 10.631 - 10.632 +*/ 10.633 +/* 10.634 static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry) 10.635 { 10.636 + thash_data_t *hash_table, *p, *q; 10.637 + thash_internal_t *priv = &hcb->priv; 10.638 + int idx; 10.639 10.640 if ( !entry->tc ) { 10.641 return rem_tr(hcb, entry->cl, entry->tr_idx); 10.642 } 10.643 rem_thash(hcb, entry); 10.644 } 10.645 - 10.646 +*/ 10.647 int cch_depth=0; 10.648 /* 10.649 * Purge the collision chain starting from cch. 10.650 @@ -505,6 +692,7 @@ int cch_depth=0; 10.651 * For those UN-Purgable entries(FM), this function will return 10.652 * the head of left collision chain. 10.653 */ 10.654 +/* 10.655 static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch) 10.656 { 10.657 thash_data_t *next; 10.658 @@ -536,10 +724,11 @@ static thash_data_t *thash_rem_cch(thash 10.659 * hash: The head of collision chain (hash table) 10.660 * 10.661 */ 10.662 +/* 10.663 static void thash_rem_line(thash_cb_t *hcb, thash_data_t *hash) 10.664 { 10.665 if ( INVALID_ENTRY(hcb, hash) ) return; 10.666 - 10.667 + 10.668 if ( hash->next ) { 10.669 cch_depth = 0; 10.670 hash->next = thash_rem_cch(hcb, hash->next); 10.671 @@ -549,6 +738,7 @@ static void thash_rem_line(thash_cb_t *h 10.672 __rem_hash_head(hcb, hash); 10.673 } 10.674 } 10.675 + */ 10.676 10.677 /* 10.678 * Find an overlap entry in hash table and its collision chain. 10.679 @@ -563,35 +753,18 @@ static void thash_rem_line(thash_cb_t *h 10.680 * NOTES: 10.681 * 10.682 */ 10.683 -thash_data_t *thash_find_overlap(thash_cb_t *hcb, 10.684 + 10.685 +/* 10.686 +thash_data_t *thash_find_overlap(thash_cb_t *hcb, 10.687 thash_data_t *in, search_section_t s_sect) 10.688 { 10.689 - return (hcb->find_overlap)(hcb, in->vadr, 10.690 + return (hcb->find_overlap)(hcb, in->vadr, 10.691 PSIZE(in->ps), in->rid, in->cl, s_sect); 10.692 } 10.693 - 10.694 -static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb, 10.695 - u64 va, u64 size, int rid, char cl, search_section_t s_sect) 10.696 -{ 10.697 - thash_data_t *hash_table; 10.698 - thash_internal_t *priv = &hcb->priv; 10.699 - ia64_rr vrr; 10.700 +*/ 10.701 10.702 - priv->_curva = va & ~(size-1); 10.703 - priv->_eva = priv->_curva + size; 10.704 - priv->rid = rid; 10.705 - vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 10.706 - priv->ps = vrr.ps; 10.707 - hash_table =(thash_data_t *)(hcb->hash_func)(hcb->pta, priv->_curva); 10.708 - priv->s_sect = s_sect; 10.709 - priv->cl = cl; 10.710 - priv->_tr_idx = 0; 10.711 - priv->hash_base = hash_table; 10.712 - priv->cur_cch = hash_table; 10.713 - return (hcb->next_overlap)(hcb); 10.714 -} 10.715 - 10.716 -static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb, 10.717 +/* 10.718 +static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb, 10.719 u64 va, u64 size, int rid, char cl, search_section_t s_sect) 10.720 { 10.721 thash_data_t *hash_table; 10.722 @@ -602,17 +775,67 @@ static thash_data_t *vhpt_find_overlap(t 10.723 priv->_curva = va & ~(size-1); 10.724 priv->_eva = priv->_curva + size; 10.725 priv->rid = rid; 10.726 - vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 10.727 + vrr = vmx_vcpu_rr(hcb->vcpu,va); 10.728 priv->ps = vrr.ps; 10.729 - hash_table = (thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva); 10.730 - tag = (unsigned long)(hcb->vs->tag_func)( hcb->pta, priv->_curva); 10.731 + hash_table = vsa_thash(hcb->pta, priv->_curva, vrr.rrval, &tag); 10.732 + priv->s_sect = s_sect; 10.733 + priv->cl = cl; 10.734 + priv->_tr_idx = 0; 10.735 + priv->hash_base = hash_table; 10.736 + priv->cur_cch = hash_table; 10.737 + return (hcb->next_overlap)(hcb); 10.738 +} 10.739 +*/ 10.740 + 10.741 +/* 10.742 +static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb, 10.743 + u64 va, u64 size, int rid, char cl, search_section_t s_sect) 10.744 +{ 10.745 + thash_data_t *hash_table; 10.746 + thash_internal_t *priv = &hcb->priv; 10.747 + u64 tag; 10.748 + ia64_rr vrr; 10.749 + 10.750 + priv->_curva = va & ~(size-1); 10.751 + priv->_eva = priv->_curva + size; 10.752 + priv->rid = rid; 10.753 + vrr = vmx_vcpu_rr(hcb->vcpu,va); 10.754 + priv->ps = vrr.ps; 10.755 + hash_table = ia64_thash(priv->_curva); 10.756 + tag = ia64_ttag(priv->_curva); 10.757 priv->tag = tag; 10.758 priv->hash_base = hash_table; 10.759 priv->cur_cch = hash_table; 10.760 return (hcb->next_overlap)(hcb); 10.761 } 10.762 +*/ 10.763 10.764 10.765 +thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, char cl) 10.766 +{ 10.767 + thash_data_t *tr; 10.768 + int i,num; 10.769 + u64 end; 10.770 + 10.771 + if (cl == ISIDE_TLB ) { 10.772 + num = NITRS; 10.773 + tr = &ITR(hcb,0); 10.774 + } 10.775 + else { 10.776 + num = NDTRS; 10.777 + tr = &DTR(hcb,0); 10.778 + } 10.779 + end=data->vadr + PSIZE(data->ps); 10.780 + for (i=0; i<num; i++ ) { 10.781 + if ( __is_tr_overlap(hcb, &tr[i], data->rid, cl, data->vadr, end )) { 10.782 + return &tr[i]; 10.783 + } 10.784 + } 10.785 + return NULL; 10.786 +} 10.787 + 10.788 + 10.789 +/* 10.790 static thash_data_t *vtr_find_next_overlap(thash_cb_t *hcb) 10.791 { 10.792 thash_data_t *tr; 10.793 @@ -628,25 +851,27 @@ static thash_data_t *vtr_find_next_overl 10.794 tr = &DTR(hcb,0); 10.795 } 10.796 for (; priv->_tr_idx < num; priv->_tr_idx ++ ) { 10.797 - if ( __is_tlb_overlap(hcb, &tr[(unsigned)priv->_tr_idx], 10.798 + if ( __is_tr_overlap(hcb, &tr[priv->_tr_idx], 10.799 priv->rid, priv->cl, 10.800 priv->_curva, priv->_eva) ) { 10.801 - return &tr[(unsigned)priv->_tr_idx++]; 10.802 + return &tr[priv->_tr_idx++]; 10.803 } 10.804 } 10.805 return NULL; 10.806 } 10.807 +*/ 10.808 10.809 /* 10.810 * Similar with vtlb_next_overlap but find next entry. 10.811 * NOTES: 10.812 * Intermediate position information is stored in hcb->priv. 10.813 */ 10.814 +/* 10.815 static thash_data_t *vtlb_next_overlap(thash_cb_t *hcb) 10.816 { 10.817 thash_data_t *ovl; 10.818 thash_internal_t *priv = &hcb->priv; 10.819 - u64 rr_psize; 10.820 + u64 addr,rr_psize,tag; 10.821 ia64_rr vrr; 10.822 10.823 if ( priv->s_sect.tr ) { 10.824 @@ -655,7 +880,7 @@ static thash_data_t *vtlb_next_overlap(t 10.825 priv->s_sect.tr = 0; 10.826 } 10.827 if ( priv->s_sect.v == 0 ) return NULL; 10.828 - vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva); 10.829 + vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva); 10.830 rr_psize = PSIZE(vrr.ps); 10.831 10.832 while ( priv->_curva < priv->_eva ) { 10.833 @@ -667,20 +892,23 @@ static thash_data_t *vtlb_next_overlap(t 10.834 } 10.835 } 10.836 priv->_curva += rr_psize; 10.837 - priv->hash_base = (thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva); 10.838 + priv->hash_base = vsa_thash( hcb->pta, priv->_curva, vrr.rrval, &tag); 10.839 priv->cur_cch = priv->hash_base; 10.840 } 10.841 return NULL; 10.842 } 10.843 + */ 10.844 10.845 + 10.846 +/* 10.847 static thash_data_t *vhpt_next_overlap(thash_cb_t *hcb) 10.848 { 10.849 thash_data_t *ovl; 10.850 thash_internal_t *priv = &hcb->priv; 10.851 - u64 rr_psize; 10.852 + u64 addr,rr_psize; 10.853 ia64_rr vrr; 10.854 10.855 - vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva); 10.856 + vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva); 10.857 rr_psize = PSIZE(vrr.ps); 10.858 10.859 while ( priv->_curva < priv->_eva ) { 10.860 @@ -692,13 +920,13 @@ static thash_data_t *vhpt_next_overlap(t 10.861 } 10.862 } 10.863 priv->_curva += rr_psize; 10.864 - priv->hash_base =(thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva); 10.865 - priv->tag = (unsigned long)(hcb->vs->tag_func)( hcb->pta, priv->_curva); 10.866 + priv->hash_base = ia64_thash(priv->_curva); 10.867 + priv->tag = ia64_ttag(priv->_curva); 10.868 priv->cur_cch = priv->hash_base; 10.869 } 10.870 return NULL; 10.871 } 10.872 - 10.873 +*/ 10.874 10.875 /* 10.876 * Find and purge overlap entries in hash table and its collision chain. 10.877 @@ -710,7 +938,7 @@ static thash_data_t *vhpt_next_overlap(t 10.878 * NOTES: 10.879 * 10.880 */ 10.881 -void thash_purge_entries(thash_cb_t *hcb, 10.882 +void thash_purge_entries(thash_cb_t *hcb, 10.883 thash_data_t *in, search_section_t p_sect) 10.884 { 10.885 return thash_purge_entries_ex(hcb, in->rid, in->vadr, 10.886 @@ -718,30 +946,33 @@ void thash_purge_entries(thash_cb_t *hcb 10.887 } 10.888 10.889 void thash_purge_entries_ex(thash_cb_t *hcb, 10.890 - u64 rid, u64 va, u64 ps, 10.891 - search_section_t p_sect, 10.892 + u64 rid, u64 va, u64 ps, 10.893 + search_section_t p_sect, 10.894 CACHE_LINE_TYPE cl) 10.895 { 10.896 thash_data_t *ovl; 10.897 10.898 - ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect); 10.899 +/* ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect); 10.900 while ( ovl != NULL ) { 10.901 (hcb->rem_hash)(hcb, ovl); 10.902 ovl = (hcb->next_overlap)(hcb); 10.903 }; 10.904 + */ 10.905 + vtlb_purge(hcb, va, ps); 10.906 + vhpt_purge(hcb->ts->vhpt, va, ps); 10.907 } 10.908 10.909 /* 10.910 * Purge overlap TCs and then insert the new entry to emulate itc ops. 10.911 * Notes: Only TC entry can purge and insert. 10.912 */ 10.913 -void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in) 10.914 +void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va) 10.915 { 10.916 thash_data_t *ovl; 10.917 search_section_t sections; 10.918 10.919 #ifdef XEN_DEBUGGER 10.920 - vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr); 10.921 + vrr = vmx_vcpu_rr(hcb->vcpu,in->vadr); 10.922 if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) { 10.923 panic ("Oops, wrong call for purge_and_insert\n"); 10.924 return; 10.925 @@ -751,10 +982,14 @@ void thash_purge_and_insert(thash_cb_t * 10.926 in->ppn = PAGEALIGN(in->ppn, in->ps-12); 10.927 sections.tr = 0; 10.928 sections.tc = 1; 10.929 +/* 10.930 ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps), 10.931 in->rid, in->cl, sections); 10.932 if(ovl) 10.933 (hcb->rem_hash)(hcb, ovl); 10.934 + */ 10.935 + vtlb_purge(hcb, va, in->ps); 10.936 + vhpt_purge(hcb->ts->vhpt, va, in->ps); 10.937 #ifdef XEN_DEBUGGER 10.938 ovl = (hcb->next_overlap)(hcb); 10.939 if ( ovl ) { 10.940 @@ -762,7 +997,9 @@ void thash_purge_and_insert(thash_cb_t * 10.941 return; 10.942 } 10.943 #endif 10.944 - (hcb->ins_hash)(hcb, in, in->vadr); 10.945 + if(in->ps!=PAGE_SHIFT) 10.946 + vtlb_insert(hcb, in, va); 10.947 + thash_vhpt_insert(hcb->ts->vhpt, in, va); 10.948 } 10.949 /* 10.950 * Purge one hash line (include the entry in hash table). 10.951 @@ -771,6 +1008,7 @@ void thash_purge_and_insert(thash_cb_t * 10.952 * hash: The head of collision chain (hash table) 10.953 * 10.954 */ 10.955 +/* 10.956 static void thash_purge_line(thash_cb_t *hcb, thash_data_t *hash) 10.957 { 10.958 if ( INVALID_ENTRY(hcb, hash) ) return; 10.959 @@ -784,6 +1022,16 @@ static void thash_purge_line(thash_cb_t 10.960 // Then hash table itself. 10.961 INVALIDATE_HASH(hcb, hash); 10.962 } 10.963 +*/ 10.964 + 10.965 + 10.966 + 10.967 + 10.968 + 10.969 + 10.970 + 10.971 + 10.972 + 10.973 /* 10.974 * Purge all TCs or VHPT entries including those in Hash table. 10.975 * 10.976 @@ -792,8 +1040,10 @@ static void thash_purge_line(thash_cb_t 10.977 // TODO: add sections. 10.978 void thash_purge_all(thash_cb_t *hcb) 10.979 { 10.980 - thash_data_t *hash_table; 10.981 - 10.982 + thash_data_t *hash_table, *entry; 10.983 + thash_cb_t *vhpt; 10.984 + u64 i, start, end; 10.985 + 10.986 #ifdef VTLB_DEBUG 10.987 extern u64 sanity_check; 10.988 static u64 statistics_before_purge_all=0; 10.989 @@ -802,18 +1052,35 @@ void thash_purge_all(thash_cb_t *hcb) 10.990 check_vtlb_sanity(hcb); 10.991 } 10.992 #endif 10.993 + ASSERT ( hcb->ht == THASH_TLB ); 10.994 10.995 hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz); 10.996 for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) { 10.997 - thash_purge_line(hcb, hash_table); 10.998 + INVALIDATE_TLB_HEADER(hash_table); 10.999 + } 10.1000 + cch_mem_init (hcb); 10.1001 + 10.1002 + vhpt = hcb->ts->vhpt; 10.1003 + hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz); 10.1004 + for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) { 10.1005 + INVALIDATE_VHPT_HEADER(hash_table); 10.1006 } 10.1007 - if(hcb->ht== THASH_TLB) { 10.1008 - hcb = hcb->ts->vhpt; 10.1009 - hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz); 10.1010 - for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) { 10.1011 - thash_purge_line(hcb, hash_table); 10.1012 + cch_mem_init (vhpt); 10.1013 + 10.1014 +/* 10.1015 + entry = &hcb->ts->itr[0]; 10.1016 + for(i=0; i< (NITRS+NDTRS); i++){ 10.1017 + if(!INVALID_TLB(entry)){ 10.1018 + start=entry->vadr & (-PSIZE(entry->ps)); 10.1019 + end = start + PSIZE(entry->ps); 10.1020 + while(start<end){ 10.1021 + thash_vhpt_insert(vhpt, entry, start); 10.1022 + start += PAGE_SIZE; 10.1023 + } 10.1024 } 10.1025 + entry++; 10.1026 } 10.1027 +*/ 10.1028 local_flush_tlb_all(); 10.1029 } 10.1030 10.1031 @@ -836,22 +1103,24 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t 10.1032 CACHE_LINE_TYPE cl) 10.1033 { 10.1034 thash_data_t *hash_table, *cch; 10.1035 + u64 tag; 10.1036 ia64_rr vrr; 10.1037 10.1038 - ASSERT ( hcb->ht == THASH_VTLB ); 10.1039 + ASSERT ( hcb->ht == THASH_TLB ); 10.1040 10.1041 cch = __vtr_lookup(hcb, rid, va, cl);; 10.1042 if ( cch ) return cch; 10.1043 10.1044 - vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 10.1045 - hash_table = (thash_data_t *)(hcb->hash_func)( hcb->pta, va); 10.1046 + vrr = vmx_vcpu_rr(hcb->vcpu,va); 10.1047 + hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag); 10.1048 10.1049 if ( INVALID_ENTRY(hcb, hash_table ) ) 10.1050 return NULL; 10.1051 10.1052 10.1053 for (cch=hash_table; cch; cch = cch->next) { 10.1054 - if ( __is_translated(cch, rid, va, cl) ) 10.1055 +// if ( __is_translated(cch, rid, va, cl) ) 10.1056 + if(cch->etag == tag) 10.1057 return cch; 10.1058 } 10.1059 return NULL; 10.1060 @@ -864,6 +1133,7 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t 10.1061 * 1: failure 10.1062 * 0: success 10.1063 */ 10.1064 +/* 10.1065 int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock) 10.1066 { 10.1067 thash_data_t *ovl; 10.1068 @@ -893,6 +1163,7 @@ int thash_lock_tc(thash_cb_t *hcb, u64 v 10.1069 } 10.1070 return 1; 10.1071 } 10.1072 +*/ 10.1073 10.1074 /* 10.1075 * Notifier when TLB is deleted from hash table and its collision chain. 10.1076 @@ -904,16 +1175,17 @@ int thash_lock_tc(thash_cb_t *hcb, u64 v 10.1077 * 2: The format of entry is always in TLB. 10.1078 * 10.1079 */ 10.1080 -void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry) 10.1081 -{ 10.1082 +//void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry) 10.1083 +//{ 10.1084 +// vhpt_purge(hcb->ts->vhpt,entry->vadr,entry->ps); 10.1085 // thash_cb_t *vhpt; 10.1086 - search_section_t s_sect; 10.1087 10.1088 - s_sect.v = 0; 10.1089 - thash_purge_entries(hcb->ts->vhpt, entry, s_sect); 10.1090 - machine_tlb_purge(entry->vadr, entry->ps); 10.1091 - return; 10.1092 -} 10.1093 +// search_section_t s_sect; 10.1094 + 10.1095 +// s_sect.v = 0; 10.1096 +// thash_purge_entries(hcb->ts->vhpt, entry, s_sect); 10.1097 +// machine_tlb_purge(entry->vadr, entry->ps); 10.1098 +//} 10.1099 10.1100 /* 10.1101 * Initialize internal control data before service. 10.1102 @@ -928,30 +1200,29 @@ void thash_init(thash_cb_t *hcb, u64 sz) 10.1103 hcb->pta.vf = 1; 10.1104 hcb->pta.ve = 1; 10.1105 hcb->pta.size = sz; 10.1106 - hcb->get_rr_fn = vmmu_get_rr; 10.1107 +// hcb->get_rr_fn = vmmu_get_rr; 10.1108 ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 ); 10.1109 if ( hcb->ht == THASH_TLB ) { 10.1110 - hcb->remove_notifier = tlb_remove_notifier; 10.1111 - hcb->find_overlap = vtlb_find_overlap; 10.1112 - hcb->next_overlap = vtlb_next_overlap; 10.1113 - hcb->rem_hash = rem_vtlb; 10.1114 - hcb->ins_hash = vtlb_insert; 10.1115 +// hcb->remove_notifier = NULL; //tlb_remove_notifier; 10.1116 +// hcb->find_overlap = vtlb_find_overlap; 10.1117 +// hcb->next_overlap = vtlb_next_overlap; 10.1118 +// hcb->rem_hash = rem_vtlb; 10.1119 +// hcb->ins_hash = vtlb_insert; 10.1120 __init_tr(hcb); 10.1121 } 10.1122 else { 10.1123 - hcb->remove_notifier = NULL; 10.1124 - hcb->find_overlap = vhpt_find_overlap; 10.1125 - hcb->next_overlap = vhpt_next_overlap; 10.1126 - hcb->rem_hash = rem_thash; 10.1127 - hcb->ins_hash = vhpt_insert; 10.1128 +// hcb->remove_notifier = NULL; 10.1129 +// hcb->find_overlap = vhpt_find_overlap; 10.1130 +// hcb->next_overlap = vhpt_next_overlap; 10.1131 +// hcb->rem_hash = rem_thash; 10.1132 +// hcb->ins_hash = thash_vhpt_insert; 10.1133 } 10.1134 hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz); 10.1135 - 10.1136 + 10.1137 for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) { 10.1138 - INVALIDATE_HASH(hcb,hash_table); 10.1139 + INVALIDATE_HASH_HEADER(hcb,hash_table); 10.1140 } 10.1141 } 10.1142 -#define VTLB_DEBUG 10.1143 #ifdef VTLB_DEBUG 10.1144 static u64 cch_length_statistics[MAX_CCH_LENGTH+1]; 10.1145 u64 sanity_check=0; 10.1146 @@ -961,7 +1232,7 @@ u64 vtlb_chain_sanity(thash_cb_t *vtlb, 10.1147 thash_data_t *ovl; 10.1148 search_section_t s_sect; 10.1149 u64 num=0; 10.1150 - 10.1151 + 10.1152 s_sect.v = 0; 10.1153 for (cch=hash; cch; cch=cch->next) { 10.1154 ovl = thash_find_overlap(vhpt, cch, s_sect); 10.1155 @@ -991,7 +1262,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb) 10.1156 search_section_t s_sect; 10.1157 thash_cb_t *vhpt = vtlb->ts->vhpt; 10.1158 u64 invalid_ratio; 10.1159 - 10.1160 + 10.1161 if ( sanity_check == 0 ) return; 10.1162 sanity_check --; 10.1163 s_sect.v = 0; 10.1164 @@ -1012,9 +1283,9 @@ void check_vtlb_sanity(thash_cb_t *vtlb) 10.1165 for ( i=0; i < sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) { 10.1166 cch_length_statistics[i] = 0; 10.1167 } 10.1168 - 10.1169 + 10.1170 local_irq_save(psr); 10.1171 - 10.1172 + 10.1173 hash = vhpt->hash; 10.1174 for (i=0; i < hash_num; i++) { 10.1175 if ( !INVALID_ENTRY(vhpt, hash) ) { 10.1176 @@ -1097,12 +1368,12 @@ void dump_vtlb(thash_cb_t *vtlb) 10.1177 static u64 dump_vtlb=0; 10.1178 thash_data_t *hash, *cch, *tr; 10.1179 u64 hash_num,i; 10.1180 - 10.1181 + 10.1182 if ( dump_vtlb == 0 ) return; 10.1183 dump_vtlb --; 10.1184 hash_num = vtlb->hash_sz / sizeof(thash_data_t); 10.1185 hash = vtlb->hash; 10.1186 - 10.1187 + 10.1188 printf("Dump vTC\n"); 10.1189 for ( i = 0; i < hash_num; i++ ) { 10.1190 if ( !INVALID_ENTRY(vtlb, hash) ) {
11.1 --- a/xen/arch/ia64/xen/domain.c Tue Feb 28 13:18:08 2006 -0700 11.2 +++ b/xen/arch/ia64/xen/domain.c Wed Mar 01 08:29:00 2006 -0700 11.3 @@ -484,6 +484,9 @@ void assign_domain_page(struct domain *d 11.4 __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX))); 11.5 } 11.6 else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr); 11.7 + if((physaddr>>PAGE_SHIFT)<max_page){ 11.8 + *(mpt_table + (physaddr>>PAGE_SHIFT))=(mpaddr>>PAGE_SHIFT); 11.9 + } 11.10 } 11.11 #if 0 11.12 /* map a physical address with specified I/O flag */
12.1 --- a/xen/include/asm-ia64/config.h Tue Feb 28 13:18:08 2006 -0700 12.2 +++ b/xen/include/asm-ia64/config.h Wed Mar 01 08:29:00 2006 -0700 12.3 @@ -67,7 +67,7 @@ typedef unsigned long paddr_t; 12.4 extern unsigned long xenheap_phys_end; 12.5 extern unsigned long xen_pstart; 12.6 extern unsigned long xenheap_size; 12.7 -extern struct domain *dom0; 12.8 +//extern struct domain *dom0; 12.9 extern unsigned long dom0_start; 12.10 extern unsigned long dom0_size; 12.11
13.1 --- a/xen/include/asm-ia64/mm.h Tue Feb 28 13:18:08 2006 -0700 13.2 +++ b/xen/include/asm-ia64/mm.h Wed Mar 01 08:29:00 2006 -0700 13.3 @@ -134,6 +134,8 @@ extern void __init init_frametable(void) 13.4 #endif 13.5 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe); 13.6 13.7 +extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn); 13.8 + 13.9 static inline void put_page(struct page_info *page) 13.10 { 13.11 #ifdef VALIDATE_VT // doesn't work with non-VTI in grant tables yet 13.12 @@ -215,8 +217,8 @@ void memguard_unguard_range(void *p, uns 13.13 #endif 13.14 13.15 // prototype of misc memory stuff 13.16 -unsigned long __get_free_pages(unsigned int mask, unsigned int order); 13.17 -void __free_pages(struct page *page, unsigned int order); 13.18 +//unsigned long __get_free_pages(unsigned int mask, unsigned int order); 13.19 +//void __free_pages(struct page *page, unsigned int order); 13.20 void *pgtable_quicklist_alloc(void); 13.21 void pgtable_quicklist_free(void *pgtable_entry); 13.22 13.23 @@ -436,12 +438,22 @@ extern unsigned long lookup_domain_mpa(s 13.24 13.25 /* Return I/O type if trye */ 13.26 #define __gpfn_is_io(_d, gpfn) \ 13.27 - (__gmfn_valid(_d, gpfn) ? \ 13.28 - (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) : 0) 13.29 +({ \ 13.30 + u64 pte, ret=0; \ 13.31 + pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)); \ 13.32 + if(!(pte&GPFN_INV_MASK)) \ 13.33 + ret = pte & GPFN_IO_MASK; \ 13.34 + ret; \ 13.35 +}) 13.36 13.37 #define __gpfn_is_mem(_d, gpfn) \ 13.38 - (__gmfn_valid(_d, gpfn) ? \ 13.39 - ((lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) == GPFN_MEM) : 0) 13.40 +({ \ 13.41 + u64 pte, ret=0; \ 13.42 + pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)); \ 13.43 + if((!(pte&GPFN_INV_MASK))&&((pte & GPFN_IO_MASK)==GPFN_MEM)) \ 13.44 + ret = 1; \ 13.45 + ret; \ 13.46 +}) 13.47 13.48 13.49 #define __gpa_to_mpa(_d, gpa) \
14.1 --- a/xen/include/asm-ia64/vcpu.h Tue Feb 28 13:18:08 2006 -0700 14.2 +++ b/xen/include/asm-ia64/vcpu.h Wed Mar 01 08:29:00 2006 -0700 14.3 @@ -104,7 +104,6 @@ extern IA64FAULT vcpu_set_itv(VCPU *vcpu 14.4 extern IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val); 14.5 extern IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val); 14.6 /* interval timer registers */ 14.7 -extern IA64FAULT vcpu_set_itm(VCPU *vcpu,UINT64 val); 14.8 extern IA64FAULT vcpu_set_itc(VCPU *vcpu,UINT64 val); 14.9 extern UINT64 vcpu_timer_pending_early(VCPU *vcpu); 14.10 /* debug breakpoint registers */
15.1 --- a/xen/include/asm-ia64/vmmu.h Tue Feb 28 13:18:08 2006 -0700 15.2 +++ b/xen/include/asm-ia64/vmmu.h Wed Mar 01 08:29:00 2006 -0700 15.3 @@ -23,12 +23,26 @@ 15.4 #ifndef XEN_TLBthash_H 15.5 #define XEN_TLBthash_H 15.6 15.7 +#define MAX_CCN_DEPTH 15 // collision chain depth 15.8 +#define VCPU_VTLB_SHIFT (20) // 1M for VTLB 15.9 +#define VCPU_VTLB_SIZE (1UL<<VCPU_VTLB_SHIFT) 15.10 +#define VCPU_VTLB_ORDER (VCPU_VTLB_SHIFT - PAGE_SHIFT) 15.11 +#define VCPU_VHPT_SHIFT (24) // 16M for VTLB 15.12 +#define VCPU_VHPT_SIZE (1UL<<VCPU_VHPT_SHIFT) 15.13 +#define VCPU_VHPT_ORDER (VCPU_VHPT_SHIFT - PAGE_SHIFT) 15.14 + 15.15 +#define PTA_BASE_SHIFT (15) 15.16 + 15.17 + 15.18 + 15.19 +#ifndef __ASSEMBLY__ 15.20 + 15.21 #include <xen/config.h> 15.22 #include <xen/types.h> 15.23 #include <public/xen.h> 15.24 #include <asm/tlb.h> 15.25 #include <asm/regionreg.h> 15.26 - 15.27 +#include <asm/vmx_mm_def.h> 15.28 //#define THASH_TLB_TR 0 15.29 //#define THASH_TLB_TC 1 15.30 15.31 @@ -39,7 +53,15 @@ 15.32 15.33 /* 15.34 * Next bit definition must be same with THASH_TLB_XX 15.35 +#define PTA_BASE_SHIFT (15) 15.36 */ 15.37 + 15.38 + 15.39 + 15.40 + 15.41 +#define HIGH_32BITS(x) bits(x,32,63) 15.42 +#define LOW_32BITS(x) bits(x,0,31) 15.43 + 15.44 typedef union search_section { 15.45 struct { 15.46 u32 tr : 1; 15.47 @@ -49,15 +71,6 @@ typedef union search_section { 15.48 u32 v; 15.49 } search_section_t; 15.50 15.51 -#define MAX_CCN_DEPTH 4 // collision chain depth 15.52 -#define VCPU_TLB_SHIFT (22) 15.53 -#define VCPU_TLB_SIZE (1UL<<VCPU_TLB_SHIFT) 15.54 -#define VCPU_TLB_ORDER VCPU_TLB_SHIFT - PAGE_SHIFT 15.55 -#define PTA_BASE_SHIFT (15) 15.56 - 15.57 -#ifndef __ASSEMBLY__ 15.58 -#define HIGH_32BITS(x) bits(x,32,63) 15.59 -#define LOW_32BITS(x) bits(x,0,31) 15.60 15.61 typedef enum { 15.62 ISIDE_TLB=0, 15.63 @@ -77,18 +90,21 @@ typedef struct thash_data { 15.64 u64 ppn : 38; // 12-49 15.65 u64 rv2 : 2; // 50-51 15.66 u64 ed : 1; // 52 15.67 - u64 ig1 : 11; //53-63 15.68 + u64 ig1 : 3; // 53-55 15.69 + u64 len : 4; // 56-59 15.70 + u64 ig2 : 3; // 60-63 15.71 }; 15.72 struct { 15.73 u64 __rv1 : 53; // 0-52 15.74 + u64 contiguous : 1; //53 15.75 + u64 tc : 1; // 54 TR or TC 15.76 + CACHE_LINE_TYPE cl : 1; // 55 I side or D side cache line 15.77 // next extension to ig1, only for TLB instance 15.78 - u64 tc : 1; // 53 TR or TC 15.79 - u64 locked : 1; // 54 entry locked or not 15.80 - CACHE_LINE_TYPE cl : 1; // I side or D side cache line 15.81 - u64 nomap : 1; // entry cann't be inserted into machine TLB. 15.82 - u64 __ig1 : 5; // 56-61 15.83 - u64 checked : 1; // for VTLB/VHPT sanity check 15.84 - u64 invalid : 1; // invalid entry 15.85 + u64 __ig1 : 4; // 56-59 15.86 + u64 locked : 1; // 60 entry locked or not 15.87 + u64 nomap : 1; // 61 entry cann't be inserted into machine TLB. 15.88 + u64 checked : 1; // 62 for VTLB/VHPT sanity check 15.89 + u64 invalid : 1; // 63 invalid entry 15.90 }; 15.91 u64 page_flags; 15.92 }; // same for VHPT and TLB 15.93 @@ -128,10 +144,37 @@ typedef struct thash_data { 15.94 }; 15.95 } thash_data_t; 15.96 15.97 +#define INVALIDATE_VHPT_HEADER(hdata) \ 15.98 +{ ((hdata)->page_flags)=0; \ 15.99 + ((hdata)->ti)=1; \ 15.100 + ((hdata)->next)=0; } 15.101 + 15.102 +#define INVALIDATE_TLB_HEADER(hdata) \ 15.103 +{ ((hdata)->page_flags)=0; \ 15.104 + ((hdata)->ti)=1; \ 15.105 + ((hdata)->next)=0; } 15.106 + 15.107 #define INVALID_VHPT(hdata) ((hdata)->ti) 15.108 -#define INVALID_TLB(hdata) ((hdata)->invalid) 15.109 -#define INVALID_ENTRY(hcb, hdata) \ 15.110 - ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata)) 15.111 +#define INVALID_TLB(hdata) ((hdata)->ti) 15.112 +#define INVALID_TR(hdata) ((hdata)->invalid) 15.113 +#define INVALID_ENTRY(hcb, hdata) INVALID_VHPT(hdata) 15.114 + 15.115 +/* ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata)) */ 15.116 + 15.117 + 15.118 +/* 15.119 + * Architecture ppn is in 4KB unit while XEN 15.120 + * page may be different(1<<PAGE_SHIFT). 15.121 + */ 15.122 +static inline u64 arch_to_xen_ppn(u64 appn) 15.123 +{ 15.124 + return (appn >>(PAGE_SHIFT-ARCH_PAGE_SHIFT)); 15.125 +} 15.126 + 15.127 +static inline u64 xen_to_arch_ppn(u64 xppn) 15.128 +{ 15.129 + return (xppn <<(PAGE_SHIFT- ARCH_PAGE_SHIFT)); 15.130 +} 15.131 15.132 typedef enum { 15.133 THASH_TLB=0, 15.134 @@ -166,11 +209,11 @@ typedef struct tlb_special { 15.135 struct thash_cb *vhpt; 15.136 } tlb_special_t; 15.137 15.138 -typedef struct vhpt_cb { 15.139 +//typedef struct vhpt_cb { 15.140 //u64 pta; // pta value. 15.141 - GET_MFN_FN *get_mfn; 15.142 - TTAG_FN *tag_func; 15.143 -} vhpt_special; 15.144 +// GET_MFN_FN *get_mfn; 15.145 +// TTAG_FN *tag_func; 15.146 +//} vhpt_special; 15.147 15.148 typedef struct thash_internal { 15.149 thash_data_t *hash_base; 15.150 @@ -198,36 +241,38 @@ typedef struct thash_cb { 15.151 u64 hash_sz; // size of above data. 15.152 void *cch_buf; // base address of collision chain. 15.153 u64 cch_sz; // size of above data. 15.154 - THASH_FN *hash_func; 15.155 - GET_RR_FN *get_rr_fn; 15.156 - RECYCLE_FN *recycle_notifier; 15.157 +// THASH_FN *hash_func; 15.158 +// GET_RR_FN *get_rr_fn; 15.159 +// RECYCLE_FN *recycle_notifier; 15.160 thash_cch_mem_t *cch_freelist; 15.161 struct vcpu *vcpu; 15.162 PTA pta; 15.163 /* VTLB/VHPT common information */ 15.164 - FIND_OVERLAP_FN *find_overlap; 15.165 - FIND_NEXT_OVL_FN *next_overlap; 15.166 - REM_THASH_FN *rem_hash; // remove hash entry. 15.167 - INS_THASH_FN *ins_hash; // insert hash entry. 15.168 - REM_NOTIFIER_FN *remove_notifier; 15.169 +// FIND_OVERLAP_FN *find_overlap; 15.170 +// FIND_NEXT_OVL_FN *next_overlap; 15.171 +// REM_THASH_FN *rem_hash; // remove hash entry. 15.172 +// INS_THASH_FN *ins_hash; // insert hash entry. 15.173 +// REM_NOTIFIER_FN *remove_notifier; 15.174 /* private information */ 15.175 - thash_internal_t priv; 15.176 +// thash_internal_t priv; 15.177 union { 15.178 tlb_special_t *ts; 15.179 - vhpt_special *vs; 15.180 +// vhpt_special *vs; 15.181 }; 15.182 // Internal positon information, buffer and storage etc. TBD 15.183 } thash_cb_t; 15.184 15.185 #define ITR(hcb,id) ((hcb)->ts->itr[id]) 15.186 #define DTR(hcb,id) ((hcb)->ts->dtr[id]) 15.187 -#define INVALIDATE_HASH(hcb,hash) { \ 15.188 - if ((hcb)->ht==THASH_TLB) \ 15.189 - INVALID_TLB(hash) = 1; \ 15.190 - else \ 15.191 - INVALID_VHPT(hash) = 1; \ 15.192 - hash->next = NULL; } 15.193 - 15.194 +#define INVALIDATE_HASH_HEADER(hcb,hash) INVALIDATE_TLB_HEADER(hash) 15.195 +/* \ 15.196 +{ if ((hcb)->ht==THASH_TLB){ \ 15.197 + INVALIDATE_TLB_HEADER(hash); \ 15.198 + }else{ \ 15.199 + INVALIDATE_VHPT_HEADER(hash); \ 15.200 + } \ 15.201 +} 15.202 + */ 15.203 #define PURGABLE_ENTRY(hcb,en) 1 15.204 // ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) ) 15.205 15.206 @@ -242,18 +287,20 @@ extern void thash_init(thash_cb_t *hcb, 15.207 * NOTES: 15.208 * 1: TLB entry may be TR, TC or Foreign Map. For TR entry, 15.209 * itr[]/dtr[] need to be updated too. 15.210 - * 2: Inserting to collision chain may trigger recycling if 15.211 + * 2: Inserting to collision chain may trigger recycling if 15.212 * the buffer for collision chain is empty. 15.213 * 3: The new entry is inserted at the hash table. 15.214 * (I.e. head of the collision chain) 15.215 * 4: Return the entry in hash table or collision chain. 15.216 * 15.217 */ 15.218 -extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va); 15.219 +extern void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va); 15.220 +//extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va); 15.221 extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx); 15.222 - 15.223 +extern thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, char cl); 15.224 +extern u64 get_mfn(struct domain *d, u64 gpfn); 15.225 /* 15.226 - * Force to delete a found entry no matter TR or foreign map for TLB. 15.227 + * Force to delete a found entry no matter TR or foreign map for TLB. 15.228 * NOTES: 15.229 * 1: TLB entry may be TR, TC or Foreign Map. For TR entry, 15.230 * itr[]/dtr[] need to be updated too. 15.231 @@ -307,7 +354,7 @@ extern void thash_purge_entries_ex(thash 15.232 u64 rid, u64 va, u64 sz, 15.233 search_section_t p_sect, 15.234 CACHE_LINE_TYPE cl); 15.235 -extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in); 15.236 +extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va); 15.237 15.238 /* 15.239 * Purge all TCs or VHPT entries including those in Hash table. 15.240 @@ -335,8 +382,10 @@ extern void purge_machine_tc_by_domid(do 15.241 extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb); 15.242 extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va); 15.243 extern thash_cb_t *init_domain_tlb(struct vcpu *d); 15.244 +extern thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag); 15.245 +extern thash_data_t * vhpt_lookup(u64 va); 15.246 15.247 -#define VTLB_DEBUG 15.248 +//#define VTLB_DEBUG 15.249 #ifdef VTLB_DEBUG 15.250 extern void check_vtlb_sanity(thash_cb_t *vtlb); 15.251 extern void dump_vtlb(thash_cb_t *vtlb);
16.1 --- a/xen/include/asm-ia64/vmx_mm_def.h Tue Feb 28 13:18:08 2006 -0700 16.2 +++ b/xen/include/asm-ia64/vmx_mm_def.h Wed Mar 01 08:29:00 2006 -0700 16.3 @@ -34,7 +34,7 @@ 16.4 #define POFFSET(vaddr, ps) ((vaddr) & (PSIZE(ps) - 1)) 16.5 #define PPN_2_PA(ppn) ((ppn)<<12) 16.6 #define CLEARLSB(ppn, nbits) ((((uint64_t)ppn) >> (nbits)) << (nbits)) 16.7 -#define PAGEALIGN(va, ps) (va & ~(PSIZE(ps)-1)) 16.8 +#define PAGEALIGN(va, ps) CLEARLSB(va, ps) 16.9 16.10 #define TLB_AR_R 0 16.11 #define TLB_AR_RX 1 16.12 @@ -104,6 +104,7 @@ 16.13 16.14 #define VRN_MASK 0xe000000000000000L 16.15 #define PTA_BASE_MASK 0x3fffffffffffL 16.16 +#define PTA_BASE_SHIFT 15 16.17 #define VHPT_OFFSET_MASK 0x7fff 16.18 16.19 #define BITS_SHIFT_256MB 28
17.1 --- a/xen/include/asm-ia64/vmx_platform.h Tue Feb 28 13:18:08 2006 -0700 17.2 +++ b/xen/include/asm-ia64/vmx_platform.h Wed Mar 01 08:29:00 2006 -0700 17.3 @@ -54,7 +54,7 @@ extern uint64_t dummy_tmr[]; 17.4 #define VCPU(_v,_x) _v->arch.privregs->_x 17.5 #define VLAPIC_ID(l) (uint16_t)(VCPU((l)->vcpu, lid) >> 16) 17.6 #define VLAPIC_IRR(l) VCPU((l)->vcpu, irr[0]) 17.7 - 17.8 +struct vlapic* apic_round_robin(struct domain *d, uint8_t dest_mode, uint8_t vector, uint32_t bitmap); 17.9 extern int vmx_vcpu_pend_interrupt(struct vcpu *vcpu, uint8_t vector); 17.10 static inline int vlapic_set_irq(struct vlapic *t, uint8_t vec, uint8_t trig) 17.11 {
18.1 --- a/xen/include/asm-ia64/vmx_vcpu.h Tue Feb 28 13:18:08 2006 -0700 18.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h Wed Mar 01 08:29:00 2006 -0700 18.3 @@ -464,6 +464,7 @@ vmx_vrrtomrr(VCPU *v, unsigned long val) 18.4 18.5 rr.rrval=val; 18.6 rr.rid = rr.rid + v->arch.starting_rid; 18.7 + rr.ps = PAGE_SHIFT; 18.8 rr.ve = 1; 18.9 return vmMangleRID(rr.rrval); 18.10 /* Disable this rid allocation algorithm for now */
19.1 --- a/xen/include/asm-ia64/xenkregs.h Tue Feb 28 13:18:08 2006 -0700 19.2 +++ b/xen/include/asm-ia64/xenkregs.h Wed Mar 01 08:29:00 2006 -0700 19.3 @@ -8,7 +8,8 @@ 19.4 #define IA64_TR_VHPT 4 /* dtr4: vhpt */ 19.5 #define IA64_TR_ARCH_INFO 5 19.6 #define IA64_TR_PERVP_VHPT 6 19.7 - 19.8 +#define IA64_DTR_GUEST_KERNEL 7 19.9 +#define IA64_ITR_GUEST_KERNEL 2 19.10 /* Processor status register bits: */ 19.11 #define IA64_PSR_VM_BIT 46 19.12 #define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)