ia64/xen-unstable

view xen/arch/ia64/vtlb.c @ 6552:a9873d384da4

Merge.
author adsharma@los-vmm.sc.intel.com
date Thu Aug 25 12:24:48 2005 -0700 (2005-08-25)
parents 112d44270733 fa0754a9f64f
children dfaf788ab18c
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vtlb.c: guest virtual tlb handling module.
5 * Copyright (c) 2004, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 * XiaoYan Feng (Fleming Feng) (Fleming.feng@intel.com)
22 */
24 #include <linux/sched.h>
25 #include <asm/tlb.h>
26 #include <asm/mm.h>
27 #include <asm/vmx_mm_def.h>
28 #include <asm/gcc_intrin.h>
29 #include <linux/interrupt.h>
30 #include <asm/vmx_vcpu.h>
31 #define MAX_CCH_LENGTH 40
34 static void cch_mem_init(thash_cb_t *hcb)
35 {
36 thash_cch_mem_t *p, *q;
38 hcb->cch_freelist = p = hcb->cch_buf;
40 for ( q=p+1; (u64)(q + 1) <= (u64)hcb->cch_buf + hcb->cch_sz;
41 p++, q++ ) {
42 p->next = q;
43 }
44 p->next = NULL;
45 }
47 static thash_data_t *cch_alloc(thash_cb_t *hcb)
48 {
49 thash_cch_mem_t *p;
51 if ( (p = hcb->cch_freelist) != NULL ) {
52 hcb->cch_freelist = p->next;
53 }
54 return &(p->data);
55 }
57 static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
58 {
59 thash_cch_mem_t *p = (thash_cch_mem_t*)cch;
61 p->next = hcb->cch_freelist;
62 hcb->cch_freelist = p;
63 }
65 /*
66 * Check to see if the address rid:va is translated by the TLB
67 */
68 static int __is_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE cl)
69 {
70 u64 size1,sa1,ea1;
72 if ( tlb->rid != rid || tlb->cl != cl )
73 return 0;
74 size1 = PSIZE(tlb->ps);
75 sa1 = tlb->vadr & ~(size1-1); // mask the low address bits
76 ea1 = sa1 + size1;
78 if ( va >= sa1 && (va < ea1 || ea1 == 0) )
79 return 1;
80 else
81 return 0;
82 }
84 /*
85 * Only for TLB format.
86 */
87 static int
88 __is_tlb_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, u64 eva)
89 {
90 uint64_t size1,size2,sa1,ea1,ea2;
92 if ( entry->invalid || entry->rid != rid || entry->cl != cl ) {
93 return 0;
94 }
95 size1=PSIZE(entry->ps);
96 sa1 = entry->vadr & ~(size1-1); // mask the low address bits
97 ea1 = sa1 + size1;
98 if ( (sva >= ea1 && ea1 != 0) || (eva <= sa1 && eva != 0) )
99 return 0;
100 else
101 return 1;
103 }
105 static void __rem_tr (thash_cb_t *hcb, thash_data_t *tr)
106 {
107 if ( hcb->remove_notifier ) {
108 (hcb->remove_notifier)(hcb,tr);
109 }
110 tr->invalid = 1;
111 }
113 static inline void __set_tr (thash_data_t *tr, thash_data_t *data, int idx)
114 {
115 *tr = *data;
116 tr->tr_idx = idx;
117 }
120 static void __init_tr(thash_cb_t *hcb)
121 {
122 int i;
123 thash_data_t *tr;
125 for ( i=0, tr = &ITR(hcb,0); i<NITRS; i++ ) {
126 tr[i].invalid = 1;
127 }
128 for ( i=0, tr = &DTR(hcb,0); i<NDTRS; i++ ) {
129 tr[i].invalid = 1;
130 }
131 }
133 /*
134 * Replace TR entry.
135 */
136 static void rep_tr(thash_cb_t *hcb,thash_data_t *insert, int idx)
137 {
138 thash_data_t *tr;
140 if ( insert->cl == ISIDE_TLB ) {
141 tr = &ITR(hcb,idx);
142 }
143 else {
144 tr = &DTR(hcb,idx);
145 }
146 if ( !INVALID_TLB(tr) ) {
147 __rem_tr(hcb, tr);
148 }
149 __set_tr (tr, insert, idx);
150 }
152 /*
153 * remove TR entry.
154 */
155 static void rem_tr(thash_cb_t *hcb,CACHE_LINE_TYPE cl, int idx)
156 {
157 thash_data_t *tr;
159 if ( cl == ISIDE_TLB ) {
160 tr = &ITR(hcb,idx);
161 }
162 else {
163 tr = &DTR(hcb,idx);
164 }
165 if ( !INVALID_TLB(tr) ) {
166 __rem_tr(hcb, tr);
167 }
168 }
170 /*
171 * Delete an thash entry in collision chain.
172 * prev: the previous entry.
173 * rem: the removed entry.
174 */
175 static void __rem_chain(thash_cb_t *hcb/*, thash_data_t *prev*/, thash_data_t *rem)
176 {
177 //prev->next = rem->next;
178 if ( hcb->remove_notifier ) {
179 (hcb->remove_notifier)(hcb,rem);
180 }
181 cch_free (hcb, rem);
182 }
184 /*
185 * Delete an thash entry leading collision chain.
186 */
187 static void __rem_hash_head(thash_cb_t *hcb, thash_data_t *hash)
188 {
189 thash_data_t *next=hash->next;
191 if ( hcb->remove_notifier ) {
192 (hcb->remove_notifier)(hcb,hash);
193 }
194 if ( next != NULL ) {
195 *hash = *next;
196 cch_free (hcb, next);
197 }
198 else {
199 INVALIDATE_HASH(hcb, hash);
200 }
201 }
203 thash_data_t *__vtr_lookup(thash_cb_t *hcb,
204 u64 rid, u64 va,
205 CACHE_LINE_TYPE cl)
206 {
207 thash_data_t *tr;
208 int num,i;
210 if ( cl == ISIDE_TLB ) {
211 tr = &ITR(hcb,0);
212 num = NITRS;
213 }
214 else {
215 tr = &DTR(hcb,0);
216 num = NDTRS;
217 }
218 for ( i=0; i<num; i++ ) {
219 if ( !INVALID_ENTRY(hcb,&tr[i]) &&
220 __is_translated(&tr[i], rid, va, cl) )
221 return &tr[i];
222 }
223 return NULL;
224 }
227 /*
228 * Find overlap VHPT entry within current collision chain
229 * base on internal priv info.
230 */
231 static inline thash_data_t* _vhpt_next_overlap_in_chain(thash_cb_t *hcb)
232 {
233 thash_data_t *cch;
234 thash_internal_t *priv = &hcb->priv;
237 for (cch=priv->cur_cch; cch; cch = cch->next) {
238 if ( priv->tag == cch->etag ) {
239 return cch;
240 }
241 }
242 return NULL;
243 }
245 /*
246 * Find overlap TLB/VHPT entry within current collision chain
247 * base on internal priv info.
248 */
249 static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb)
250 {
251 thash_data_t *cch;
252 thash_internal_t *priv = &hcb->priv;
254 /* Find overlap TLB entry */
255 for (cch=priv->cur_cch; cch; cch = cch->next) {
256 if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr ) &&
257 __is_tlb_overlap(hcb, cch, priv->rid, priv->cl,
258 priv->_curva, priv->_eva) ) {
259 return cch;
260 }
261 }
262 return NULL;
263 }
265 /*
266 * Get the machine format of VHPT entry.
267 * PARAS:
268 * 1: tlb: means the tlb format hash entry converting to VHPT.
269 * 2: va means the guest virtual address that must be coverd by
270 * the translated machine VHPT.
271 * 3: vhpt: means the machine format VHPT converting from tlb.
272 * NOTES:
273 * 1: In case of the machine address is discontiguous,
274 * "tlb" needs to be covered by several machine VHPT. va
275 * is used to choice one of them.
276 * 2: Foreign map is supported in this API.
277 * RETURN:
278 * 0/1: means successful or fail.
279 *
280 */
281 int __tlb_to_vhpt(thash_cb_t *hcb,
282 thash_data_t *tlb, u64 va,
283 thash_data_t *vhpt)
284 {
285 u64 pages,mfn;
286 rr_t vrr;
288 ASSERT ( hcb->ht == THASH_VHPT );
289 vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
290 pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
291 mfn = (hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages);
292 if ( mfn == INVALID_MFN ) return 0;
294 // TODO with machine discontinuous address space issue.
295 vhpt->etag = (hcb->vs->tag_func)( hcb->pta,
296 tlb->vadr, tlb->rid, tlb->ps);
297 //vhpt->ti = 0;
298 vhpt->itir = tlb->itir & ~ITIR_RV_MASK;
299 vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
300 vhpt->ppn = mfn;
301 vhpt->next = 0;
302 return 1;
303 }
306 /*
307 * Insert an entry to hash table.
308 * NOTES:
309 * 1: TLB entry may be TR, TC or Foreign Map. For TR entry,
310 * itr[]/dtr[] need to be updated too.
311 * 2: Inserting to collision chain may trigger recycling if
312 * the buffer for collision chain is empty.
313 * 3: The new entry is inserted at the next of hash table.
314 * (I.e. head of the collision chain)
315 * 4: The buffer holding the entry is allocated internally
316 * from cch_buf or just in the hash table.
317 * 5: Return the entry in hash table or collision chain.
318 * 6: Input parameter, entry, should be in TLB format.
319 * I.e. Has va, rid, ps...
320 * 7: This API is invoked by emulating ITC/ITR and tlb_miss.
321 *
322 */
324 void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx)
325 {
326 if ( hcb->ht != THASH_TLB || entry->tc ) {
327 panic("wrong parameter\n");
328 }
329 entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
330 entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
331 rep_tr(hcb, entry, idx);
332 return ;
333 }
335 thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
336 {
337 thash_data_t *cch;
339 cch = cch_alloc(hcb);
340 if(cch == NULL){
341 // recycle
342 if ( hcb->recycle_notifier ) {
343 hcb->recycle_notifier(hcb,(u64)entry);
344 }
345 thash_purge_all(hcb);
346 cch = cch_alloc(hcb);
347 }
348 return cch;
349 }
351 /*
352 * Insert an entry into hash TLB or VHPT.
353 * NOTES:
354 * 1: When inserting VHPT to thash, "va" is a must covered
355 * address by the inserted machine VHPT entry.
356 * 2: The format of entry is always in TLB.
357 * 3: The caller need to make sure the new entry will not overlap
358 * with any existed entry.
359 */
360 void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
361 {
362 thash_data_t *hash_table, *cch;
363 int flag;
364 rr_t vrr;
365 u64 gppn;
366 u64 ppns, ppne;
368 hash_table = (hcb->hash_func)(hcb->pta,
369 va, entry->rid, entry->ps);
370 if( INVALID_ENTRY(hcb, hash_table) ) {
371 *hash_table = *entry;
372 hash_table->next = 0;
373 }
374 else {
375 // TODO: Add collision chain length limitation.
376 cch = __alloc_chain(hcb,entry);
378 *cch = *hash_table;
379 *hash_table = *entry;
380 hash_table->next = cch;
381 }
382 if(hcb->vcpu->domain->domain_id==0){
383 thash_insert(hcb->ts->vhpt, entry, va);
384 return;
385 }
386 flag = 1;
387 gppn = (POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT;
388 ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
389 ppne = ppns + PSIZE(entry->ps);
390 if(((ppns<=0xa0000)&&(ppne>0xa0000))||((ppne>0xc0000)&&(ppns<=0xc0000)))
391 flag = 0;
392 if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag))
393 thash_insert(hcb->ts->vhpt, entry, va);
394 return ;
395 }
397 static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
398 {
399 thash_data_t *hash_table, *cch;
400 rr_t vrr;
402 hash_table = (hcb->hash_func)(hcb->pta,
403 va, entry->rid, entry->ps);
404 if( INVALID_ENTRY(hcb, hash_table) ) {
405 if ( !__tlb_to_vhpt(hcb, entry, va, hash_table) ) {
406 panic("Can't convert to machine VHPT entry\n");
407 }
408 hash_table->next = 0;
409 }
410 else {
411 // TODO: Add collision chain length limitation.
412 cch = __alloc_chain(hcb,entry);
414 *cch = *hash_table;
415 if ( !__tlb_to_vhpt(hcb, entry, va, hash_table) ) {
416 panic("Can't convert to machine VHPT entry\n");
417 }
418 hash_table->next = cch;
419 if(hash_table->tag==hash_table->next->tag)
420 while(1);
421 }
422 return /*hash_table*/;
423 }
425 void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
426 {
427 thash_data_t *hash_table;
428 rr_t vrr;
430 vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
431 if ( entry->ps != vrr.ps && entry->tc ) {
432 panic("Not support for multiple page size now\n");
433 }
434 entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
435 entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
436 (hcb->ins_hash)(hcb, entry, va);
438 }
440 static void rem_thash(thash_cb_t *hcb, thash_data_t *entry)
441 {
442 thash_data_t *hash_table, *p, *q;
443 thash_internal_t *priv = &hcb->priv;
444 int idx;
446 hash_table = priv->hash_base;
447 if ( hash_table == entry ) {
448 // if ( PURGABLE_ENTRY(hcb, entry) ) {
449 __rem_hash_head (hcb, entry);
450 // }
451 return ;
452 }
453 // remove from collision chain
454 p = hash_table;
455 for ( q=p->next; q; q = p->next ) {
456 if ( q == entry ){
457 // if ( PURGABLE_ENTRY(hcb,q ) ) {
458 p->next = q->next;
459 __rem_chain(hcb, entry);
460 // }
461 return ;
462 }
463 p = q;
464 }
465 panic("Entry not existed or bad sequence\n");
466 }
468 static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
469 {
470 thash_data_t *hash_table, *p, *q;
471 thash_internal_t *priv = &hcb->priv;
472 int idx;
474 if ( !entry->tc ) {
475 return rem_tr(hcb, entry->cl, entry->tr_idx);
476 }
477 rem_thash(hcb, entry);
478 }
480 int cch_depth=0;
481 /*
482 * Purge the collision chain starting from cch.
483 * NOTE:
484 * For those UN-Purgable entries(FM), this function will return
485 * the head of left collision chain.
486 */
487 static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch)
488 {
489 thash_data_t *next;
491 if ( ++cch_depth > MAX_CCH_LENGTH ) {
492 printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
493 while(1);
494 }
495 if ( cch -> next ) {
496 next = thash_rem_cch(hcb, cch->next);
497 }
498 else {
499 next = NULL;
500 }
501 if ( PURGABLE_ENTRY(hcb, cch) ) {
502 __rem_chain(hcb, cch);
503 return next;
504 }
505 else {
506 cch->next = next;
507 return cch;
508 }
509 }
511 /*
512 * Purge one hash line (include the entry in hash table).
513 * Can only be called by thash_purge_all.
514 * Input:
515 * hash: The head of collision chain (hash table)
516 *
517 */
518 static void thash_rem_line(thash_cb_t *hcb, thash_data_t *hash)
519 {
520 if ( INVALID_ENTRY(hcb, hash) ) return;
522 if ( hash->next ) {
523 cch_depth = 0;
524 hash->next = thash_rem_cch(hcb, hash->next);
525 }
526 // Then hash table itself.
527 if ( PURGABLE_ENTRY(hcb, hash) ) {
528 __rem_hash_head(hcb, hash);
529 }
530 }
533 /*
534 * Find an overlap entry in hash table and its collision chain.
535 * Refer to SDM2 4.1.1.4 for overlap definition.
536 * PARAS:
537 * 1: in: TLB format entry, rid:ps must be same with vrr[].
538 * va & ps identify the address space for overlap lookup
539 * 2: section can be combination of TR, TC and FM. (THASH_SECTION_XX)
540 * 3: cl means I side or D side.
541 * RETURNS:
542 * NULL to indicate the end of findings.
543 * NOTES:
544 *
545 */
546 thash_data_t *thash_find_overlap(thash_cb_t *hcb,
547 thash_data_t *in, search_section_t s_sect)
548 {
549 return (hcb->find_overlap)(hcb, in->vadr,
550 PSIZE(in->ps), in->rid, in->cl, s_sect);
551 }
553 static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
554 u64 va, u64 size, int rid, char cl, search_section_t s_sect)
555 {
556 thash_data_t *hash_table;
557 thash_internal_t *priv = &hcb->priv;
558 u64 tag;
559 rr_t vrr;
561 priv->_curva = va & ~(size-1);
562 priv->_eva = priv->_curva + size;
563 priv->rid = rid;
564 vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
565 priv->ps = vrr.ps;
566 hash_table = (hcb->hash_func)(hcb->pta,
567 priv->_curva, rid, priv->ps);
569 priv->s_sect = s_sect;
570 priv->cl = cl;
571 priv->_tr_idx = 0;
572 priv->hash_base = hash_table;
573 priv->cur_cch = hash_table;
574 return (hcb->next_overlap)(hcb);
575 }
577 static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb,
578 u64 va, u64 size, int rid, char cl, search_section_t s_sect)
579 {
580 thash_data_t *hash_table;
581 thash_internal_t *priv = &hcb->priv;
582 u64 tag;
583 rr_t vrr;
585 priv->_curva = va & ~(size-1);
586 priv->_eva = priv->_curva + size;
587 priv->rid = rid;
588 vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
589 priv->ps = vrr.ps;
590 hash_table = (hcb->hash_func)( hcb->pta,
591 priv->_curva, rid, priv->ps);
592 tag = (hcb->vs->tag_func)( hcb->pta,
593 priv->_curva, rid, priv->ps);
595 priv->tag = tag;
596 priv->hash_base = hash_table;
597 priv->cur_cch = hash_table;
598 return (hcb->next_overlap)(hcb);
599 }
602 static thash_data_t *vtr_find_next_overlap(thash_cb_t *hcb)
603 {
604 thash_data_t *tr;
605 thash_internal_t *priv = &hcb->priv;
606 int num;
608 if ( priv->cl == ISIDE_TLB ) {
609 num = NITRS;
610 tr = &ITR(hcb,0);
611 }
612 else {
613 num = NDTRS;
614 tr = &DTR(hcb,0);
615 }
616 for (; priv->_tr_idx < num; priv->_tr_idx ++ ) {
617 if ( __is_tlb_overlap(hcb, &tr[priv->_tr_idx],
618 priv->rid, priv->cl,
619 priv->_curva, priv->_eva) ) {
620 return &tr[priv->_tr_idx++];
621 }
622 }
623 return NULL;
624 }
626 /*
627 * Similar with vtlb_next_overlap but find next entry.
628 * NOTES:
629 * Intermediate position information is stored in hcb->priv.
630 */
631 static thash_data_t *vtlb_next_overlap(thash_cb_t *hcb)
632 {
633 thash_data_t *ovl;
634 thash_internal_t *priv = &hcb->priv;
635 u64 addr,rr_psize;
636 rr_t vrr;
638 if ( priv->s_sect.tr ) {
639 ovl = vtr_find_next_overlap (hcb);
640 if ( ovl ) return ovl;
641 priv->s_sect.tr = 0;
642 }
643 if ( priv->s_sect.v == 0 ) return NULL;
644 vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
645 rr_psize = PSIZE(vrr.ps);
647 while ( priv->_curva < priv->_eva ) {
648 if ( !INVALID_ENTRY(hcb, priv->hash_base) ) {
649 ovl = _vtlb_next_overlap_in_chain(hcb);
650 if ( ovl ) {
651 priv->cur_cch = ovl->next;
652 return ovl;
653 }
654 }
655 priv->_curva += rr_psize;
656 priv->hash_base = (hcb->hash_func)( hcb->pta,
657 priv->_curva, priv->rid, priv->ps);
658 priv->cur_cch = priv->hash_base;
659 }
660 return NULL;
661 }
663 static thash_data_t *vhpt_next_overlap(thash_cb_t *hcb)
664 {
665 thash_data_t *ovl;
666 thash_internal_t *priv = &hcb->priv;
667 u64 addr,rr_psize;
668 rr_t vrr;
670 vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
671 rr_psize = PSIZE(vrr.ps);
673 while ( priv->_curva < priv->_eva ) {
674 if ( !INVALID_ENTRY(hcb, priv->hash_base) ) {
675 ovl = _vhpt_next_overlap_in_chain(hcb);
676 if ( ovl ) {
677 priv->cur_cch = ovl->next;
678 return ovl;
679 }
680 }
681 priv->_curva += rr_psize;
682 priv->hash_base = (hcb->hash_func)( hcb->pta,
683 priv->_curva, priv->rid, priv->ps);
684 priv->tag = (hcb->vs->tag_func)( hcb->pta,
685 priv->_curva, priv->rid, priv->ps);
686 priv->cur_cch = priv->hash_base;
687 }
688 return NULL;
689 }
692 /*
693 * Find and purge overlap entries in hash table and its collision chain.
694 * PARAS:
695 * 1: in: TLB format entry, rid:ps must be same with vrr[].
696 * rid, va & ps identify the address space for purge
697 * 2: section can be combination of TR, TC and FM. (thash_SECTION_XX)
698 * 3: cl means I side or D side.
699 * NOTES:
700 *
701 */
702 void thash_purge_entries(thash_cb_t *hcb,
703 thash_data_t *in, search_section_t p_sect)
704 {
705 return thash_purge_entries_ex(hcb, in->rid, in->vadr,
706 in->ps, p_sect, in->cl);
707 }
709 void thash_purge_entries_ex(thash_cb_t *hcb,
710 u64 rid, u64 va, u64 ps,
711 search_section_t p_sect,
712 CACHE_LINE_TYPE cl)
713 {
714 thash_data_t *ovl;
716 ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
717 while ( ovl != NULL ) {
718 (hcb->rem_hash)(hcb, ovl);
719 ovl = (hcb->next_overlap)(hcb);
720 };
721 }
723 /*
724 * Purge overlap TCs and then insert the new entry to emulate itc ops.
725 * Notes: Only TC entry can purge and insert.
726 */
727 void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in)
728 {
729 thash_data_t *ovl;
730 search_section_t sections;
732 #ifdef XEN_DEBUGGER
733 vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr);
734 if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
735 panic ("Oops, wrong call for purge_and_insert\n");
736 return;
737 }
738 #endif
739 in->vadr = PAGEALIGN(in->vadr,in->ps);
740 in->ppn = PAGEALIGN(in->ppn, in->ps-12);
741 sections.tr = 0;
742 sections.tc = 1;
743 ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
744 in->rid, in->cl, sections);
745 if(ovl)
746 (hcb->rem_hash)(hcb, ovl);
747 #ifdef XEN_DEBUGGER
748 ovl = (hcb->next_overlap)(hcb);
749 if ( ovl ) {
750 panic ("Oops, 2+ overlaps for purge_and_insert\n");
751 return;
752 }
753 #endif
754 (hcb->ins_hash)(hcb, in, in->vadr);
755 }
757 /*
758 * Purge all TCs or VHPT entries including those in Hash table.
759 *
760 */
762 // TODO: add sections.
763 void thash_purge_all(thash_cb_t *hcb)
764 {
765 thash_data_t *hash_table;
767 #ifdef VTLB_DEBUG
768 extern u64 sanity_check;
769 static u64 statistics_before_purge_all=0;
770 if ( statistics_before_purge_all ) {
771 sanity_check = 1;
772 check_vtlb_sanity(hcb);
773 }
774 #endif
776 hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
778 for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
779 thash_rem_line(hcb, hash_table);
780 }
781 }
784 /*
785 * Lookup the hash table and its collision chain to find an entry
786 * covering this address rid:va or the entry.
787 *
788 * INPUT:
789 * in: TLB format for both VHPT & TLB.
790 */
791 thash_data_t *vtlb_lookup(thash_cb_t *hcb,
792 thash_data_t *in)
793 {
794 return vtlb_lookup_ex(hcb, in->rid, in->vadr, in->cl);
795 }
797 thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb,
798 u64 rid, u64 va,
799 CACHE_LINE_TYPE cl)
800 {
801 thash_data_t *hash_table, *cch;
802 u64 tag;
803 rr_t vrr;
805 ASSERT ( hcb->ht == THASH_VTLB );
807 cch = __vtr_lookup(hcb, rid, va, cl);;
808 if ( cch ) return cch;
810 vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
811 hash_table = (hcb->hash_func)( hcb->pta,va, rid, vrr.ps);
813 if ( INVALID_ENTRY(hcb, hash_table ) )
814 return NULL;
817 for (cch=hash_table; cch; cch = cch->next) {
818 if ( __is_translated(cch, rid, va, cl) )
819 return cch;
820 }
821 return NULL;
822 }
824 /*
825 * Lock/Unlock TC if found.
826 * NOTES: Only the page in prefered size can be handled.
827 * return:
828 * 1: failure
829 * 0: success
830 */
831 int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock)
832 {
833 thash_data_t *ovl;
834 search_section_t sections;
836 sections.tr = 1;
837 sections.tc = 1;
838 ovl = (hcb->find_overlap)(hcb, va, size, rid, cl, sections);
839 if ( ovl ) {
840 if ( !ovl->tc ) {
841 // panic("Oops, TR for lock\n");
842 return 0;
843 }
844 else if ( lock ) {
845 if ( ovl->locked ) {
846 DPRINTK("Oops, already locked entry\n");
847 }
848 ovl->locked = 1;
849 }
850 else if ( !lock ) {
851 if ( !ovl->locked ) {
852 DPRINTK("Oops, already unlocked entry\n");
853 }
854 ovl->locked = 0;
855 }
856 return 0;
857 }
858 return 1;
859 }
861 /*
862 * Notifier when TLB is deleted from hash table and its collision chain.
863 * NOTES:
864 * The typical situation is that TLB remove needs to inform
865 * VHPT to remove too.
866 * PARAS:
867 * 1: hcb is TLB object.
868 * 2: The format of entry is always in TLB.
869 *
870 */
871 void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
872 {
873 thash_cb_t *vhpt;
874 search_section_t s_sect;
876 s_sect.v = 0;
877 thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
878 machine_tlb_purge(entry->rid, entry->vadr, entry->ps);
879 }
881 /*
882 * Initialize internal control data before service.
883 */
884 void thash_init(thash_cb_t *hcb, u64 sz)
885 {
886 thash_data_t *hash_table;
888 cch_mem_init (hcb);
889 hcb->magic = THASH_CB_MAGIC;
890 hcb->pta.val = hcb->hash;
891 hcb->pta.vf = 1;
892 hcb->pta.ve = 1;
893 hcb->pta.size = sz;
894 hcb->get_rr_fn = vmmu_get_rr;
895 ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 );
896 if ( hcb->ht == THASH_TLB ) {
897 hcb->remove_notifier = tlb_remove_notifier;
898 hcb->find_overlap = vtlb_find_overlap;
899 hcb->next_overlap = vtlb_next_overlap;
900 hcb->rem_hash = rem_vtlb;
901 hcb->ins_hash = vtlb_insert;
902 __init_tr(hcb);
903 }
904 else {
905 hcb->remove_notifier = NULL;
906 hcb->find_overlap = vhpt_find_overlap;
907 hcb->next_overlap = vhpt_next_overlap;
908 hcb->rem_hash = rem_thash;
909 hcb->ins_hash = vhpt_insert;
910 }
911 hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
913 for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
914 INVALIDATE_HASH(hcb,hash_table);
915 }
916 }
918 #ifdef VTLB_DEBUG
919 static u64 cch_length_statistics[MAX_CCH_LENGTH+1];
920 u64 sanity_check=0;
921 u64 vtlb_chain_sanity(thash_cb_t *vtlb, thash_cb_t *vhpt, thash_data_t *hash)
922 {
923 thash_data_t *cch;
924 thash_data_t *ovl;
925 search_section_t s_sect;
926 u64 num=0;
928 s_sect.v = 0;
929 for (cch=hash; cch; cch=cch->next) {
930 ovl = thash_find_overlap(vhpt, cch, s_sect);
931 while ( ovl != NULL ) {
932 ovl->checked = 1;
933 ovl = (vhpt->next_overlap)(vhpt);
934 };
935 num ++;
936 }
937 if ( num >= MAX_CCH_LENGTH ) {
938 cch_length_statistics[MAX_CCH_LENGTH] ++;
939 }
940 else {
941 cch_length_statistics[num] ++;
942 }
943 return num;
944 }
946 void check_vtlb_sanity(thash_cb_t *vtlb)
947 {
948 // struct pfn_info *page;
949 u64 hash_num, i, psr;
950 static u64 check_ok_num, check_fail_num,check_invalid;
951 // void *vb1, *vb2;
952 thash_data_t *hash, *cch;
953 thash_data_t *ovl;
954 search_section_t s_sect;
955 thash_cb_t *vhpt = vtlb->ts->vhpt;
956 u64 invalid_ratio;
958 if ( sanity_check == 0 ) return;
959 sanity_check --;
960 s_sect.v = 0;
961 // page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
962 // if ( page == NULL ) {
963 // panic("No enough contiguous memory for init_domain_mm\n");
964 // };
965 // vb1 = page_to_virt(page);
966 // printf("Allocated page=%lp vbase=%lp\n", page, vb1);
967 // vb2 = vb1 + vtlb->hash_sz;
968 hash_num = vhpt->hash_sz / sizeof(thash_data_t);
969 // printf("vb2=%lp, size=%lx hash_num=%lx\n", vb2, vhpt->hash_sz, hash_num);
970 printf("vtlb=%lp, hash=%lp size=0x%lx; vhpt=%lp, hash=%lp size=0x%lx\n",
971 vtlb, vtlb->hash,vtlb->hash_sz,
972 vhpt, vhpt->hash, vhpt->hash_sz);
973 //memcpy(vb1, vtlb->hash, vtlb->hash_sz);
974 //memcpy(vb2, vhpt->hash, vhpt->hash_sz);
975 for ( i=0; i < sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
976 cch_length_statistics[i] = 0;
977 }
979 local_irq_save(psr);
981 hash = vhpt->hash;
982 for (i=0; i < hash_num; i++) {
983 if ( !INVALID_ENTRY(vhpt, hash) ) {
984 for ( cch= hash; cch; cch=cch->next) {
985 cch->checked = 0;
986 }
987 }
988 hash ++;
989 }
990 printf("Done vhpt clear checked flag, hash_num=0x%lx\n", hash_num);
991 check_invalid = 0;
992 check_ok_num=0;
993 hash = vtlb->hash;
994 for ( i=0; i< hash_num; i++ ) {
995 if ( !INVALID_ENTRY(vtlb, hash) ) {
996 check_ok_num += vtlb_chain_sanity(vtlb, vhpt, hash);
997 }
998 else {
999 check_invalid++;
1001 hash ++;
1003 printf("Done vtlb entry check, hash=%lp\n", hash);
1004 printf("check_ok_num = 0x%lx check_invalid=0x%lx\n", check_ok_num,check_invalid);
1005 invalid_ratio = 1000*check_invalid / hash_num;
1006 printf("%02ld.%01ld%% entries are invalid\n",
1007 invalid_ratio/10, invalid_ratio % 10 );
1008 for (i=0; i<NDTRS; i++) {
1009 ovl = thash_find_overlap(vhpt, &vtlb->ts->dtr[i], s_sect);
1010 while ( ovl != NULL ) {
1011 ovl->checked = 1;
1012 ovl = (vhpt->next_overlap)(vhpt);
1013 };
1015 printf("Done dTR\n");
1016 for (i=0; i<NITRS; i++) {
1017 ovl = thash_find_overlap(vhpt, &vtlb->ts->itr[i], s_sect);
1018 while ( ovl != NULL ) {
1019 ovl->checked = 1;
1020 ovl = (vhpt->next_overlap)(vhpt);
1021 };
1023 printf("Done iTR\n");
1024 check_fail_num = 0;
1025 check_invalid = 0;
1026 check_ok_num=0;
1027 hash = vhpt->hash;
1028 for (i=0; i < hash_num; i++) {
1029 if ( !INVALID_ENTRY(vhpt, hash) ) {
1030 for ( cch= hash; cch; cch=cch->next) {
1031 if ( !cch->checked ) {
1032 printf ("!!!Hash=%lp cch=%lp not within vtlb\n", hash, cch);
1033 check_fail_num ++;
1035 else {
1036 check_ok_num++;
1040 else {
1041 check_invalid ++;
1043 hash ++;
1045 local_irq_restore(psr);
1046 printf("check_ok_num=0x%lx check_fail_num=0x%lx check_invalid=0x%lx\n",
1047 check_ok_num, check_fail_num, check_invalid);
1048 //memcpy(vtlb->hash, vb1, vtlb->hash_sz);
1049 //memcpy(vhpt->hash, vb2, vhpt->hash_sz);
1050 printf("The statistics of collision chain length is listed\n");
1051 for ( i=0; i < sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
1052 printf("CCH length=%02ld, chain number=%ld\n", i, cch_length_statistics[i]);
1054 // free_domheap_pages(page, VCPU_TLB_ORDER);
1055 printf("Done check_vtlb\n");
1058 void dump_vtlb(thash_cb_t *vtlb)
1060 static u64 dump_vtlb=0;
1061 thash_data_t *hash, *cch, *tr;
1062 u64 hash_num,i;
1064 if ( dump_vtlb == 0 ) return;
1065 dump_vtlb --;
1066 hash_num = vtlb->hash_sz / sizeof(thash_data_t);
1067 hash = vtlb->hash;
1069 printf("Dump vTC\n");
1070 for ( i = 0; i < hash_num; i++ ) {
1071 if ( !INVALID_ENTRY(vtlb, hash) ) {
1072 printf("VTLB at hash=%lp\n", hash);
1073 for (cch=hash; cch; cch=cch->next) {
1074 printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
1075 cch, cch->vadr, cch->ps, cch->rid);
1078 hash ++;
1080 printf("Dump vDTR\n");
1081 for (i=0; i<NDTRS; i++) {
1082 tr = &DTR(vtlb,i);
1083 printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
1084 tr, tr->vadr, tr->ps, tr->rid);
1086 printf("Dump vITR\n");
1087 for (i=0; i<NITRS; i++) {
1088 tr = &ITR(vtlb,i);
1089 printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
1090 tr, tr->vadr, tr->ps, tr->rid);
1092 printf("End of vTLB dump\n");
1094 #endif