ia64/xen-unstable

view xen/arch/ia64/vmx/vtlb.c @ 9765:7c7bcf173f8b

[IA64] cleanup vtlb code

This patch is to clean up vtlb code.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 25 20:53:38 2006 -0600 (2006-04-25)
parents 77afc93c7cad
children cd1df13fb1c4
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vtlb.c: guest virtual tlb handling module.
5 * Copyright (c) 2004, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 * XiaoYan Feng (Fleming Feng) (Fleming.feng@intel.com)
22 */
24 #include <linux/sched.h>
25 #include <asm/tlb.h>
26 #include <asm/mm.h>
27 #include <asm/vmx_mm_def.h>
28 #include <asm/gcc_intrin.h>
29 #include <linux/interrupt.h>
30 #include <asm/vmx_vcpu.h>
31 #include <asm/vmmu.h>
32 #include <asm/tlbflush.h>
33 #define MAX_CCH_LENGTH 40
35 thash_data_t *__alloc_chain(thash_cb_t *);
37 static void cch_mem_init(thash_cb_t *hcb)
38 {
39 thash_data_t *p, *q;
41 hcb->cch_freelist = p = hcb->cch_buf;
43 for ( q=p+1; (u64)(q + 1) <= (u64)hcb->cch_buf + hcb->cch_sz;
44 p++, q++ ) {
45 p->next = q;
46 }
47 p->next = NULL;
48 }
50 static thash_data_t *cch_alloc(thash_cb_t *hcb)
51 {
52 thash_data_t *p;
54 if ( (p = hcb->cch_freelist) != NULL ) {
55 hcb->cch_freelist = p->next;
56 return p;
57 }else{
58 return NULL;
59 }
60 }
62 static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
63 {
64 cch->next = hcb->cch_freelist;
65 hcb->cch_freelist = cch;
66 }
68 /*
69 * Check to see if the address rid:va is translated by the TLB
70 */
72 static inline int __is_tr_translated(thash_data_t *trp, u64 rid, u64 va)
73 {
74 return ((trp->p) && (trp->rid == rid) && ((va-trp->vadr)<PSIZE(trp->ps)));
75 }
77 /*
78 * Only for GUEST TR format.
79 */
80 static int
81 __is_tr_overlap(thash_data_t *trp, u64 rid, u64 sva, u64 eva)
82 {
83 uint64_t sa1, ea1;
85 if (!trp->p || trp->rid != rid ) {
86 return 0;
87 }
88 sa1 = trp->vadr;
89 ea1 = sa1 + PSIZE(trp->ps) -1;
90 eva -= 1;
91 if ( (sva>ea1) || (sa1>eva) )
92 return 0;
93 else
94 return 1;
96 }
98 /*
99 * Delete an thash entry leading collision chain.
100 */
101 static void __rem_hash_head(thash_cb_t *hcb, thash_data_t *hash)
102 {
103 thash_data_t *next=hash->next;
105 /* if ( hcb->remove_notifier ) {
106 (hcb->remove_notifier)(hcb,hash);
107 } */
108 if ( next != NULL ) {
109 next->len=hash->len-1;
110 *hash = *next;
111 cch_free (hcb, next);
112 }
113 else {
114 INVALIDATE_HASH_HEADER(hcb, hash);
115 }
116 }
118 thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
119 {
121 thash_data_t *trp;
122 int i;
123 u64 rid;
124 vcpu_get_rr(vcpu, va, &rid);
125 rid = rid&RR_RID_MASK;;
126 if (is_data) {
127 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
128 for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
129 if (__is_tr_translated(trp, rid, va)) {
130 return trp;
131 }
132 }
133 }
134 }
135 else {
136 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
137 for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
138 if (__is_tr_translated(trp, rid, va)) {
139 return trp;
140 }
141 }
142 }
143 }
144 return NULL;
145 }
148 /*
149 * Get the machine format of VHPT entry.
150 * PARAS:
151 * 1: tlb: means the tlb format hash entry converting to VHPT.
152 * 2: va means the guest virtual address that must be coverd by
153 * the translated machine VHPT.
154 * 3: vhpt: means the machine format VHPT converting from tlb.
155 * NOTES:
156 * 1: In case of the machine address is discontiguous,
157 * "tlb" needs to be covered by several machine VHPT. va
158 * is used to choice one of them.
159 * 2: Foreign map is supported in this API.
160 * RETURN:
161 * 0/1: means successful or fail.
162 *
163 */
164 int __tlb_to_vhpt(thash_cb_t *hcb, thash_data_t *vhpt, u64 va)
165 {
166 u64 padr,pte;
167 ASSERT ( hcb->ht == THASH_VHPT );
168 padr = vhpt->ppn >>(vhpt->ps-ARCH_PAGE_SHIFT)<<vhpt->ps;
169 padr += va&((1UL<<vhpt->ps)-1);
170 pte=lookup_domain_mpa(current->domain,padr);
171 if((pte>>56))
172 return 0;
173 vhpt->etag = ia64_ttag(va);
174 vhpt->ps = PAGE_SHIFT;
175 vhpt->ppn = (pte&((1UL<<IA64_MAX_PHYS_BITS)-(1UL<<PAGE_SHIFT)))>>ARCH_PAGE_SHIFT;
176 vhpt->next = 0;
177 return 1;
178 }
180 static void thash_remove_cch(thash_cb_t *hcb, thash_data_t *hash)
181 {
182 thash_data_t *p;
183 if(hash->next){
184 p=hash->next;
185 while(p->next)
186 p=p->next;
187 p->next=hcb->cch_freelist;
188 hcb->cch_freelist=hash->next;
189 hash->next=0;
190 hash->len=0;
191 }
192 }
194 /* vhpt only has entries with PAGE_SIZE page size */
196 void thash_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
197 {
198 thash_data_t vhpt_entry, *hash_table, *cch;
199 vhpt_entry.page_flags = pte & ~PAGE_FLAGS_RV_MASK;
200 vhpt_entry.itir=itir;
202 if ( !__tlb_to_vhpt(hcb, &vhpt_entry, ifa) ) {
203 return;
204 //panic("Can't convert to machine VHPT entry\n");
205 }
207 hash_table = (thash_data_t *)ia64_thash(ifa);
208 if( INVALID_VHPT(hash_table) ) {
209 *hash_table = vhpt_entry;
210 hash_table->next = 0;
211 return;
212 }
214 cch = hash_table;
215 while(cch){
216 if(cch->etag == vhpt_entry.etag){
217 if(cch->ppn == vhpt_entry.ppn)
218 return;
219 else
220 while(1);
221 }
222 cch = cch->next;
223 }
225 if(hash_table->len>=MAX_CCN_DEPTH){
226 thash_remove_cch(hcb, hash_table);
227 cch = cch_alloc(hcb);
228 *cch = *hash_table;
229 *hash_table = vhpt_entry;
230 hash_table->len = 1;
231 hash_table->next = cch;
232 return;
233 }
235 // TODO: Add collision chain length limitation.
236 cch = __alloc_chain(hcb);
237 if(cch == NULL){
238 *hash_table = vhpt_entry;
239 hash_table->next = 0;
240 }else{
241 *cch = *hash_table;
242 *hash_table = vhpt_entry;
243 hash_table->next = cch;
244 hash_table->len = cch->len + 1;
245 cch->len = 0;
247 }
248 return /*hash_table*/;
249 }
251 /*
252 * vhpt lookup
253 */
255 thash_data_t * vhpt_lookup(u64 va)
256 {
257 thash_data_t *hash;
258 u64 tag;
259 hash = (thash_data_t *)ia64_thash(va);
260 tag = ia64_ttag(va);
261 while(hash){
262 if(hash->etag == tag)
263 return hash;
264 hash=hash->next;
265 }
266 return NULL;
267 }
270 /*
271 * purge software guest tlb
272 */
274 static void vtlb_purge(thash_cb_t *hcb, u64 va, u64 ps)
275 {
276 thash_data_t *hash_table, *prev, *next;
277 u64 start, end, size, tag, rid;
278 ia64_rr vrr;
279 vcpu_get_rr(current, va, &vrr.rrval);
280 rid = vrr.rid;
281 size = PSIZE(ps);
282 start = va & (-size);
283 end = start + size;
284 while(start < end){
285 hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
286 // tag = ia64_ttag(start);
287 if(!INVALID_TLB(hash_table)){
288 if(hash_table->etag == tag){
289 __rem_hash_head(hcb, hash_table);
290 }
291 else{
292 prev=hash_table;
293 next=prev->next;
294 while(next){
295 if(next->etag == tag){
296 prev->next=next->next;
297 cch_free(hcb,next);
298 hash_table->len--;
299 break;
300 }
301 prev=next;
302 next=next->next;
303 }
304 }
305 }
306 start += PAGE_SIZE;
307 }
308 // machine_tlb_purge(va, ps);
309 }
310 /*
311 * purge VHPT and machine TLB
312 */
314 static void vhpt_purge(thash_cb_t *hcb, u64 va, u64 ps)
315 {
316 thash_data_t *hash_table, *prev, *next;
317 u64 start, end, size, tag;
318 size = PSIZE(ps);
319 start = va & (-size);
320 end = start + size;
321 while(start < end){
322 hash_table = (thash_data_t *)ia64_thash(start);
323 tag = ia64_ttag(start);
324 if(hash_table->etag == tag ){
325 __rem_hash_head(hcb, hash_table);
326 }
327 else{
328 prev=hash_table;
329 next=prev->next;
330 while(next){
331 if(next->etag == tag){
332 prev->next=next->next;
333 cch_free(hcb,next);
334 hash_table->len--;
335 break;
336 }
337 prev=next;
338 next=next->next;
339 }
340 }
341 start += PAGE_SIZE;
342 }
343 machine_tlb_purge(va, ps);
344 }
346 /*
347 * Recycle all collisions chain in VTLB or VHPT.
348 *
349 */
351 void thash_recycle_cch(thash_cb_t *hcb)
352 {
353 thash_data_t *hash_table;
355 hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
356 for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
357 thash_remove_cch(hcb,hash_table);
358 }
359 }
361 thash_data_t *__alloc_chain(thash_cb_t *hcb)
362 {
363 thash_data_t *cch;
365 cch = cch_alloc(hcb);
366 if(cch == NULL){
367 thash_recycle_cch(hcb);
368 cch = cch_alloc(hcb);
369 }
370 return cch;
371 }
373 /*
374 * Insert an entry into hash TLB or VHPT.
375 * NOTES:
376 * 1: When inserting VHPT to thash, "va" is a must covered
377 * address by the inserted machine VHPT entry.
378 * 2: The format of entry is always in TLB.
379 * 3: The caller need to make sure the new entry will not overlap
380 * with any existed entry.
381 */
382 void vtlb_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 va)
383 {
384 thash_data_t *hash_table, *cch;
385 /* int flag; */
386 ia64_rr vrr;
387 /* u64 gppn, ppns, ppne; */
388 u64 tag, ps;
389 ps = itir_ps(itir);
390 vcpu_get_rr(current, va, &vrr.rrval);
391 if (vrr.ps != ps) {
392 // machine_tlb_insert(hcb->vcpu, entry);
393 panic_domain(NULL, "not preferred ps with va: 0x%lx vrr.ps=%d ps=%d\n",
394 va, vrr.ps, ps);
395 return;
396 }
397 hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
398 if( INVALID_TLB(hash_table) ) {
399 hash_table->page_flags = pte;
400 hash_table->itir=itir;
401 hash_table->etag=tag;
402 hash_table->next = 0;
403 }
404 else if (hash_table->len>=MAX_CCN_DEPTH){
405 thash_remove_cch(hcb, hash_table);
406 cch = cch_alloc(hcb);
407 *cch = *hash_table;
408 hash_table->page_flags = pte;
409 hash_table->itir=itir;
410 hash_table->etag=tag;
411 hash_table->len = 1;
412 hash_table->next = cch;
413 }
415 else {
416 // TODO: Add collision chain length limitation.
417 cch = __alloc_chain(hcb);
418 if(cch == NULL){
419 hash_table->page_flags = pte;
420 hash_table->itir=itir;
421 hash_table->etag=tag;
422 hash_table->next = 0;
423 }else{
424 *cch = *hash_table;
425 hash_table->page_flags = pte;
426 hash_table->itir=itir;
427 hash_table->etag=tag;
428 hash_table->next = cch;
429 hash_table->len = cch->len + 1;
430 cch->len = 0;
431 }
432 }
433 return ;
434 }
437 int vtr_find_overlap(VCPU *vcpu, u64 va, u64 ps, int is_data)
438 {
439 thash_data_t *trp;
440 int i;
441 u64 end, rid;
442 vcpu_get_rr(vcpu, va, &rid);
443 rid = rid&RR_RID_MASK;;
444 end = va + PSIZE(ps);
445 if (is_data) {
446 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
447 for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
448 if (__is_tr_overlap(trp, rid, va, end )) {
449 return i;
450 }
451 }
452 }
453 }
454 else {
455 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
456 for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
457 if (__is_tr_overlap(trp, rid, va, end )) {
458 return i;
459 }
460 }
461 }
462 }
463 return -1;
464 }
466 /*
467 * Purge entries in VTLB and VHPT
468 */
469 void thash_purge_entries(VCPU *v, u64 va, u64 ps)
470 {
471 if(vcpu_quick_region_check(v->arch.tc_regions,va))
472 vtlb_purge(&v->arch.vtlb, va, ps);
473 vhpt_purge(&v->arch.vhpt, va, ps);
474 }
477 /*
478 * Purge overlap TCs and then insert the new entry to emulate itc ops.
479 * Notes: Only TC entry can purge and insert.
480 */
481 void thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa)
482 {
483 u64 ps, va;
484 ps = itir_ps(itir);
485 va = PAGEALIGN(ifa,ps);
486 if(vcpu_quick_region_check(v->arch.tc_regions,va))
487 vtlb_purge(&v->arch.vtlb, va, ps);
488 vhpt_purge(&v->arch.vhpt, va, ps);
489 if((ps!=PAGE_SHIFT)||(pte&VTLB_PTE_IO)){
490 vtlb_insert(&v->arch.vtlb, pte, itir, va);
491 vcpu_quick_region_set(PSCBX(v,tc_regions),va);
492 }
493 if(!(pte&VTLB_PTE_IO)){
494 va = PAGEALIGN(ifa,PAGE_SHIFT);
495 thash_vhpt_insert(&v->arch.vhpt, pte, itir, va);
496 }
497 }
501 /*
502 * Purge all TCs or VHPT entries including those in Hash table.
503 *
504 */
506 // TODO: add sections.
507 void thash_purge_all(VCPU *v)
508 {
509 thash_data_t *hash_table;
510 /* thash_data_t *entry; */
511 thash_cb_t *hcb,*vhpt;
512 /* u64 i, start, end; */
513 hcb =&v->arch.vtlb;
514 vhpt =&v->arch.vhpt;
515 #ifdef VTLB_DEBUG
516 extern u64 sanity_check;
517 static u64 statistics_before_purge_all=0;
518 if ( statistics_before_purge_all ) {
519 sanity_check = 1;
520 check_vtlb_sanity(hcb);
521 }
522 #endif
523 ASSERT ( hcb->ht == THASH_TLB );
525 hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
526 for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
527 INVALIDATE_TLB_HEADER(hash_table);
528 }
529 cch_mem_init (hcb);
531 hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz);
532 for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) {
533 INVALIDATE_VHPT_HEADER(hash_table);
534 }
535 cch_mem_init (vhpt);
536 local_flush_tlb_all();
537 }
540 /*
541 * Lookup the hash table and its collision chain to find an entry
542 * covering this address rid:va or the entry.
543 *
544 * INPUT:
545 * in: TLB format for both VHPT & TLB.
546 */
548 thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
549 {
550 thash_data_t *hash_table, *cch;
551 u64 tag;
552 ia64_rr vrr;
553 thash_cb_t * hcb= &v->arch.vtlb;
554 ASSERT ( hcb->ht == THASH_TLB );
556 cch = __vtr_lookup(v, va, is_data);;
557 if ( cch ) return cch;
559 if(vcpu_quick_region_check(v->arch.tc_regions,va)==0)
560 return NULL;
563 vcpu_get_rr(v,va,&vrr.rrval);
564 hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
566 if ( INVALID_ENTRY(hcb, hash_table ) )
567 return NULL;
570 for (cch=hash_table; cch; cch = cch->next) {
571 if(cch->etag == tag)
572 return cch;
573 }
574 return NULL;
575 }
578 /*
579 * Initialize internal control data before service.
580 */
581 void thash_init(thash_cb_t *hcb, u64 sz)
582 {
583 thash_data_t *hash_table;
585 cch_mem_init (hcb);
586 hcb->pta.val = (unsigned long)hcb->hash;
587 hcb->pta.vf = 1;
588 hcb->pta.ve = 1;
589 hcb->pta.size = sz;
590 // hcb->get_rr_fn = vmmu_get_rr;
591 ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 );
592 hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
594 for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
595 INVALIDATE_HASH_HEADER(hcb,hash_table);
596 }
597 }
599 #ifdef VTLB_DEBUG
600 /*
601 static u64 cch_length_statistics[MAX_CCH_LENGTH+1];
602 u64 sanity_check=0;
603 u64 vtlb_chain_sanity(thash_cb_t *vtlb, thash_cb_t *vhpt, thash_data_t *hash)
604 {
605 thash_data_t *cch;
606 thash_data_t *ovl;
607 search_section_t s_sect;
608 u64 num=0;
610 s_sect.v = 0;
611 for (cch=hash; cch; cch=cch->next) {
612 ovl = thash_find_overlap(vhpt, cch, s_sect);
613 while ( ovl != NULL ) {
614 ovl->checked = 1;
615 ovl = (vhpt->next_overlap)(vhpt);
616 };
617 num ++;
618 }
619 if ( num >= MAX_CCH_LENGTH ) {
620 cch_length_statistics[MAX_CCH_LENGTH] ++;
621 }
622 else {
623 cch_length_statistics[num] ++;
624 }
625 return num;
626 }
628 void check_vtlb_sanity(thash_cb_t *vtlb)
629 {
630 // struct page_info *page;
631 u64 hash_num, i, psr;
632 static u64 check_ok_num, check_fail_num,check_invalid;
633 // void *vb1, *vb2;
634 thash_data_t *hash, *cch;
635 thash_data_t *ovl;
636 search_section_t s_sect;
637 thash_cb_t *vhpt = vtlb->vhpt;
638 u64 invalid_ratio;
640 if ( sanity_check == 0 ) return;
641 sanity_check --;
642 s_sect.v = 0;
643 // page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
644 // if ( page == NULL ) {
645 // panic("No enough contiguous memory for init_domain_mm\n");
646 // };
647 // vb1 = page_to_virt(page);
648 // printf("Allocated page=%lp vbase=%lp\n", page, vb1);
649 // vb2 = vb1 + vtlb->hash_sz;
650 hash_num = vhpt->hash_sz / sizeof(thash_data_t);
651 // printf("vb2=%lp, size=%lx hash_num=%lx\n", vb2, vhpt->hash_sz, hash_num);
652 printf("vtlb=%p, hash=%p size=0x%lx; vhpt=%p, hash=%p size=0x%lx\n",
653 vtlb, vtlb->hash,vtlb->hash_sz,
654 vhpt, vhpt->hash, vhpt->hash_sz);
655 //memcpy(vb1, vtlb->hash, vtlb->hash_sz);
656 //memcpy(vb2, vhpt->hash, vhpt->hash_sz);
657 for ( i=0; i < sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
658 cch_length_statistics[i] = 0;
659 }
661 local_irq_save(psr);
663 hash = vhpt->hash;
664 for (i=0; i < hash_num; i++) {
665 if ( !INVALID_ENTRY(vhpt, hash) ) {
666 for ( cch= hash; cch; cch=cch->next) {
667 cch->checked = 0;
668 }
669 }
670 hash ++;
671 }
672 printf("Done vhpt clear checked flag, hash_num=0x%lx\n", hash_num);
673 check_invalid = 0;
674 check_ok_num=0;
675 hash = vtlb->hash;
676 for ( i=0; i< hash_num; i++ ) {
677 if ( !INVALID_ENTRY(vtlb, hash) ) {
678 check_ok_num += vtlb_chain_sanity(vtlb, vhpt, hash);
679 }
680 else {
681 check_invalid++;
682 }
683 hash ++;
684 }
685 printf("Done vtlb entry check, hash=%p\n", hash);
686 printf("check_ok_num = 0x%lx check_invalid=0x%lx\n", check_ok_num,check_invalid);
687 invalid_ratio = 1000*check_invalid / hash_num;
688 printf("%02ld.%01ld%% entries are invalid\n",
689 invalid_ratio/10, invalid_ratio % 10 );
690 for (i=0; i<NDTRS; i++) {
691 ovl = thash_find_overlap(vhpt, &vtlb->ts->dtr[i], s_sect);
692 while ( ovl != NULL ) {
693 ovl->checked = 1;
694 ovl = (vhpt->next_overlap)(vhpt);
695 };
696 }
697 printf("Done dTR\n");
698 for (i=0; i<NITRS; i++) {
699 ovl = thash_find_overlap(vhpt, &vtlb->ts->itr[i], s_sect);
700 while ( ovl != NULL ) {
701 ovl->checked = 1;
702 ovl = (vhpt->next_overlap)(vhpt);
703 };
704 }
705 printf("Done iTR\n");
706 check_fail_num = 0;
707 check_invalid = 0;
708 check_ok_num=0;
709 hash = vhpt->hash;
710 for (i=0; i < hash_num; i++) {
711 if ( !INVALID_ENTRY(vhpt, hash) ) {
712 for ( cch= hash; cch; cch=cch->next) {
713 if ( !cch->checked ) {
714 printf ("!!!Hash=%p cch=%p not within vtlb\n", hash, cch);
715 check_fail_num ++;
716 }
717 else {
718 check_ok_num++;
719 }
720 }
721 }
722 else {
723 check_invalid ++;
724 }
725 hash ++;
726 }
727 local_irq_restore(psr);
728 printf("check_ok_num=0x%lx check_fail_num=0x%lx check_invalid=0x%lx\n",
729 check_ok_num, check_fail_num, check_invalid);
730 //memcpy(vtlb->hash, vb1, vtlb->hash_sz);
731 //memcpy(vhpt->hash, vb2, vhpt->hash_sz);
732 printf("The statistics of collision chain length is listed\n");
733 for ( i=0; i < sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
734 printf("CCH length=%02ld, chain number=%ld\n", i, cch_length_statistics[i]);
735 }
736 // free_domheap_pages(page, VCPU_TLB_ORDER);
737 printf("Done check_vtlb\n");
738 }
740 void dump_vtlb(thash_cb_t *vtlb)
741 {
742 static u64 dump_vtlb=0;
743 thash_data_t *hash, *cch, *tr;
744 u64 hash_num,i;
746 if ( dump_vtlb == 0 ) return;
747 dump_vtlb --;
748 hash_num = vtlb->hash_sz / sizeof(thash_data_t);
749 hash = vtlb->hash;
751 printf("Dump vTC\n");
752 for ( i = 0; i < hash_num; i++ ) {
753 if ( !INVALID_ENTRY(vtlb, hash) ) {
754 printf("VTLB at hash=%p\n", hash);
755 for (cch=hash; cch; cch=cch->next) {
756 printf("Entry %p va=%lx ps=%d rid=%d\n",
757 cch, cch->vadr, cch->ps, cch->rid);
758 }
759 }
760 hash ++;
761 }
762 printf("Dump vDTR\n");
763 for (i=0; i<NDTRS; i++) {
764 tr = &DTR(vtlb,i);
765 printf("Entry %p va=%lx ps=%d rid=%d\n",
766 tr, tr->vadr, tr->ps, tr->rid);
767 }
768 printf("Dump vITR\n");
769 for (i=0; i<NITRS; i++) {
770 tr = &ITR(vtlb,i);
771 printf("Entry %p va=%lx ps=%d rid=%d\n",
772 tr, tr->vadr, tr->ps, tr->rid);
773 }
774 printf("End of vTLB dump\n");
775 }
776 */
777 #endif