ia64/xen-unstable

view xen/arch/ia64/vtlb.c @ 5797:ca44d2dbb273

Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:58:56 2005 -0700 (2005-07-09)
parents 649cd37aa1ab
children a83ac0806d6b
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vtlb.c: guest virtual tlb handling module.
5 * Copyright (c) 2004, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 * XiaoYan Feng (Fleming Feng) (Fleming.feng@intel.com)
22 */
24 #include <linux/sched.h>
25 #include <asm/tlb.h>
26 #include <asm/vmx_mm_def.h>
27 #include <asm/gcc_intrin.h>
28 #include <xen/interrupt.h>
29 #include <asm/vmx_vcpu.h>
30 #define MAX_CCH_LENGTH 40
33 static void cch_mem_init(thash_cb_t *hcb)
34 {
35 thash_cch_mem_t *p, *q;
37 hcb->cch_freelist = p = hcb->cch_buf;
39 for ( q=p+1; (u64)(q + 1) <= (u64)hcb->cch_buf + hcb->cch_sz;
40 p++, q++ ) {
41 p->next = q;
42 }
43 p->next = NULL;
44 }
46 static thash_data_t *cch_alloc(thash_cb_t *hcb)
47 {
48 thash_cch_mem_t *p;
50 if ( (p = hcb->cch_freelist) != NULL ) {
51 hcb->cch_freelist = p->next;
52 }
53 return &(p->data);
54 }
56 static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
57 {
58 thash_cch_mem_t *p = (thash_cch_mem_t*)cch;
60 p->next = hcb->cch_freelist;
61 hcb->cch_freelist = p;
62 }
64 /*
65 * Check to see if the address rid:va is translated by the TLB
66 */
67 static int __is_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE cl)
68 {
69 u64 size1,sa1,ea1;
71 if ( tlb->rid != rid || tlb->cl != cl )
72 return 0;
73 size1 = PSIZE(tlb->ps);
74 sa1 = tlb->vadr & ~(size1-1); // mask the low address bits
75 ea1 = sa1 + size1;
77 if ( va >= sa1 && (va < ea1 || ea1 == 0) )
78 return 1;
79 else
80 return 0;
81 }
83 /*
84 * Only for TLB format.
85 */
86 static int
87 __is_tlb_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, u64 eva)
88 {
89 uint64_t size1,size2,sa1,ea1,ea2;
91 if ( entry->invalid || entry->rid != rid || entry->cl != cl ) {
92 return 0;
93 }
94 size1=PSIZE(entry->ps);
95 sa1 = entry->vadr & ~(size1-1); // mask the low address bits
96 ea1 = sa1 + size1;
97 if ( (sva >= ea1 && ea1 != 0) || (eva <= sa1 && eva != 0) )
98 return 0;
99 else
100 return 1;
102 }
104 static void __rem_tr (thash_cb_t *hcb, thash_data_t *tr)
105 {
106 if ( hcb->remove_notifier ) {
107 (hcb->remove_notifier)(hcb,tr);
108 }
109 tr->invalid = 1;
110 }
112 static inline void __set_tr (thash_data_t *tr, thash_data_t *data, int idx)
113 {
114 *tr = *data;
115 tr->tr_idx = idx;
116 }
119 static void __init_tr(thash_cb_t *hcb)
120 {
121 int i;
122 thash_data_t *tr;
124 for ( i=0, tr = &ITR(hcb,0); i<NITRS; i++ ) {
125 tr[i].invalid = 1;
126 }
127 for ( i=0, tr = &DTR(hcb,0); i<NDTRS; i++ ) {
128 tr[i].invalid = 1;
129 }
130 }
132 /*
133 * Replace TR entry.
134 */
135 static void rep_tr(thash_cb_t *hcb,thash_data_t *insert, int idx)
136 {
137 thash_data_t *tr;
139 if ( insert->cl == ISIDE_TLB ) {
140 tr = &ITR(hcb,idx);
141 }
142 else {
143 tr = &DTR(hcb,idx);
144 }
145 if ( !INVALID_TLB(tr) ) {
146 __rem_tr(hcb, tr);
147 }
148 __set_tr (tr, insert, idx);
149 }
151 /*
152 * remove TR entry.
153 */
154 static void rem_tr(thash_cb_t *hcb,CACHE_LINE_TYPE cl, int idx)
155 {
156 thash_data_t *tr;
158 if ( cl == ISIDE_TLB ) {
159 tr = &ITR(hcb,idx);
160 }
161 else {
162 tr = &DTR(hcb,idx);
163 }
164 if ( !INVALID_TLB(tr) ) {
165 __rem_tr(hcb, tr);
166 }
167 }
169 /*
170 * Delete an thash entry in collision chain.
171 * prev: the previous entry.
172 * rem: the removed entry.
173 */
174 static void __rem_chain(thash_cb_t *hcb/*, thash_data_t *prev*/, thash_data_t *rem)
175 {
176 //prev->next = rem->next;
177 if ( hcb->remove_notifier ) {
178 (hcb->remove_notifier)(hcb,rem);
179 }
180 cch_free (hcb, rem);
181 }
183 /*
184 * Delete an thash entry leading collision chain.
185 */
186 static void __rem_hash_head(thash_cb_t *hcb, thash_data_t *hash)
187 {
188 thash_data_t *next=hash->next;
190 if ( hcb->remove_notifier ) {
191 (hcb->remove_notifier)(hcb,hash);
192 }
193 if ( next != NULL ) {
194 *hash = *next;
195 cch_free (hcb, next);
196 }
197 else {
198 INVALIDATE_HASH(hcb, hash);
199 }
200 }
202 thash_data_t *__vtr_lookup(thash_cb_t *hcb,
203 u64 rid, u64 va,
204 CACHE_LINE_TYPE cl)
205 {
206 thash_data_t *tr;
207 int num,i;
209 if ( cl == ISIDE_TLB ) {
210 tr = &ITR(hcb,0);
211 num = NITRS;
212 }
213 else {
214 tr = &DTR(hcb,0);
215 num = NDTRS;
216 }
217 for ( i=0; i<num; i++ ) {
218 if ( !INVALID_ENTRY(hcb,&tr[i]) &&
219 __is_translated(&tr[i], rid, va, cl) )
220 return &tr[i];
221 }
222 return NULL;
223 }
226 /*
227 * Find overlap VHPT entry within current collision chain
228 * base on internal priv info.
229 */
230 static inline thash_data_t* _vhpt_next_overlap_in_chain(thash_cb_t *hcb)
231 {
232 thash_data_t *cch;
233 thash_internal_t *priv = &hcb->priv;
236 for (cch=priv->cur_cch; cch; cch = cch->next) {
237 if ( priv->tag == cch->etag ) {
238 return cch;
239 }
240 }
241 return NULL;
242 }
244 /*
245 * Find overlap TLB/VHPT entry within current collision chain
246 * base on internal priv info.
247 */
248 static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb)
249 {
250 thash_data_t *cch;
251 thash_internal_t *priv = &hcb->priv;
253 /* Find overlap TLB entry */
254 for (cch=priv->cur_cch; cch; cch = cch->next) {
255 if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr ) &&
256 __is_tlb_overlap(hcb, cch, priv->rid, priv->cl,
257 priv->_curva, priv->_eva) ) {
258 return cch;
259 }
260 }
261 return NULL;
262 }
264 /*
265 * Get the machine format of VHPT entry.
266 * PARAS:
267 * 1: tlb: means the tlb format hash entry converting to VHPT.
268 * 2: va means the guest virtual address that must be coverd by
269 * the translated machine VHPT.
270 * 3: vhpt: means the machine format VHPT converting from tlb.
271 * NOTES:
272 * 1: In case of the machine address is discontiguous,
273 * "tlb" needs to be covered by several machine VHPT. va
274 * is used to choice one of them.
275 * 2: Foreign map is supported in this API.
276 * RETURN:
277 * 0/1: means successful or fail.
278 *
279 */
280 int __tlb_to_vhpt(thash_cb_t *hcb,
281 thash_data_t *tlb, u64 va,
282 thash_data_t *vhpt)
283 {
284 u64 pages,mfn;
285 rr_t vrr;
287 ASSERT ( hcb->ht == THASH_VHPT );
288 vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
289 pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
290 mfn = (hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages);
291 if ( mfn == INVALID_MFN ) return 0;
293 // TODO with machine discontinuous address space issue.
294 vhpt->etag = (hcb->vs->tag_func)( hcb->pta,
295 tlb->vadr, tlb->rid, tlb->ps);
296 //vhpt->ti = 0;
297 vhpt->itir = tlb->itir & ~ITIR_RV_MASK;
298 vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
299 vhpt->ppn = mfn;
300 vhpt->next = 0;
301 return 1;
302 }
305 /*
306 * Insert an entry to hash table.
307 * NOTES:
308 * 1: TLB entry may be TR, TC or Foreign Map. For TR entry,
309 * itr[]/dtr[] need to be updated too.
310 * 2: Inserting to collision chain may trigger recycling if
311 * the buffer for collision chain is empty.
312 * 3: The new entry is inserted at the next of hash table.
313 * (I.e. head of the collision chain)
314 * 4: The buffer holding the entry is allocated internally
315 * from cch_buf or just in the hash table.
316 * 5: Return the entry in hash table or collision chain.
317 * 6: Input parameter, entry, should be in TLB format.
318 * I.e. Has va, rid, ps...
319 * 7: This API is invoked by emulating ITC/ITR and tlb_miss.
320 *
321 */
323 void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx)
324 {
325 if ( hcb->ht != THASH_TLB || entry->tc ) {
326 panic("wrong parameter\n");
327 }
328 entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
329 entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
330 rep_tr(hcb, entry, idx);
331 return ;
332 }
334 thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
335 {
336 thash_data_t *cch;
338 cch = cch_alloc(hcb);
339 if(cch == NULL){
340 // recycle
341 if ( hcb->recycle_notifier ) {
342 hcb->recycle_notifier(hcb,(u64)entry);
343 }
344 thash_purge_all(hcb);
345 cch = cch_alloc(hcb);
346 }
347 return cch;
348 }
350 /*
351 * Insert an entry into hash TLB or VHPT.
352 * NOTES:
353 * 1: When inserting VHPT to thash, "va" is a must covered
354 * address by the inserted machine VHPT entry.
355 * 2: The format of entry is always in TLB.
356 * 3: The caller need to make sure the new entry will not overlap
357 * with any existed entry.
358 */
359 void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
360 {
361 thash_data_t *hash_table, *cch;
362 rr_t vrr;
364 hash_table = (hcb->hash_func)(hcb->pta,
365 va, entry->rid, entry->ps);
366 if( INVALID_ENTRY(hcb, hash_table) ) {
367 *hash_table = *entry;
368 hash_table->next = 0;
369 }
370 else {
371 // TODO: Add collision chain length limitation.
372 cch = __alloc_chain(hcb,entry);
374 *cch = *hash_table;
375 *hash_table = *entry;
376 hash_table->next = cch;
377 }
378 thash_insert (hcb->ts->vhpt, entry, va);
379 return ;
380 }
382 static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
383 {
384 thash_data_t *hash_table, *cch;
385 rr_t vrr;
387 hash_table = (hcb->hash_func)(hcb->pta,
388 va, entry->rid, entry->ps);
389 if( INVALID_ENTRY(hcb, hash_table) ) {
390 if ( !__tlb_to_vhpt(hcb, entry, va, hash_table) ) {
391 panic("Can't convert to machine VHPT entry\n");
392 }
393 hash_table->next = 0;
394 }
395 else {
396 // TODO: Add collision chain length limitation.
397 cch = __alloc_chain(hcb,entry);
399 *cch = *hash_table;
400 if ( !__tlb_to_vhpt(hcb, entry, va, hash_table) ) {
401 panic("Can't convert to machine VHPT entry\n");
402 }
403 hash_table->next = cch;
404 if(hash_table->tag==hash_table->next->tag)
405 while(1);
406 }
407 return /*hash_table*/;
408 }
410 void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
411 {
412 thash_data_t *hash_table;
413 rr_t vrr;
415 vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
416 if ( entry->ps != vrr.ps && entry->tc ) {
417 panic("Not support for multiple page size now\n");
418 }
419 entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
420 entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
421 (hcb->ins_hash)(hcb, entry, va);
423 }
425 static void rem_thash(thash_cb_t *hcb, thash_data_t *entry)
426 {
427 thash_data_t *hash_table, *p, *q;
428 thash_internal_t *priv = &hcb->priv;
429 int idx;
431 hash_table = priv->hash_base;
432 if ( hash_table == entry ) {
433 __rem_hash_head (hcb, entry);
434 return ;
435 }
436 // remove from collision chain
437 p = hash_table;
438 for ( q=p->next; q; q = p->next ) {
439 if ( q == entry ) {
440 p->next = q->next;
441 __rem_chain(hcb, entry);
442 return ;
443 }
444 p = q;
445 }
446 panic("Entry not existed or bad sequence\n");
447 }
449 static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
450 {
451 thash_data_t *hash_table, *p, *q;
452 thash_internal_t *priv = &hcb->priv;
453 int idx;
455 if ( !entry->tc ) {
456 return rem_tr(hcb, entry->cl, entry->tr_idx);
457 }
458 rem_thash(hcb, entry);
459 }
461 int cch_depth=0;
462 /*
463 * Purge the collision chain starting from cch.
464 * NOTE:
465 * For those UN-Purgable entries(FM), this function will return
466 * the head of left collision chain.
467 */
468 static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch)
469 {
470 thash_data_t *next;
472 if ( ++cch_depth > MAX_CCH_LENGTH ) {
473 printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
474 while(1);
475 }
476 if ( cch -> next ) {
477 next = thash_rem_cch(hcb, cch->next);
478 }
479 else {
480 next = NULL;
481 }
482 if ( PURGABLE_ENTRY(hcb, cch) ) {
483 __rem_chain(hcb, cch);
484 return next;
485 }
486 else {
487 cch->next = next;
488 return cch;
489 }
490 }
492 /*
493 * Purge one hash line (include the entry in hash table).
494 * Can only be called by thash_purge_all.
495 * Input:
496 * hash: The head of collision chain (hash table)
497 *
498 */
499 static void thash_rem_line(thash_cb_t *hcb, thash_data_t *hash)
500 {
501 if ( INVALID_ENTRY(hcb, hash) ) return;
503 if ( hash->next ) {
504 cch_depth = 0;
505 hash->next = thash_rem_cch(hcb, hash->next);
506 }
507 // Then hash table itself.
508 if ( PURGABLE_ENTRY(hcb, hash) ) {
509 __rem_hash_head(hcb, hash);
510 }
511 }
514 /*
515 * Find an overlap entry in hash table and its collision chain.
516 * Refer to SDM2 4.1.1.4 for overlap definition.
517 * PARAS:
518 * 1: in: TLB format entry, rid:ps must be same with vrr[].
519 * va & ps identify the address space for overlap lookup
520 * 2: section can be combination of TR, TC and FM. (THASH_SECTION_XX)
521 * 3: cl means I side or D side.
522 * RETURNS:
523 * NULL to indicate the end of findings.
524 * NOTES:
525 *
526 */
527 thash_data_t *thash_find_overlap(thash_cb_t *hcb,
528 thash_data_t *in, search_section_t s_sect)
529 {
530 return (hcb->find_overlap)(hcb, in->vadr,
531 PSIZE(in->ps), in->rid, in->cl, s_sect);
532 }
534 static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
535 u64 va, u64 size, int rid, char cl, search_section_t s_sect)
536 {
537 thash_data_t *hash_table;
538 thash_internal_t *priv = &hcb->priv;
539 u64 tag;
540 rr_t vrr;
542 priv->_curva = va & ~(size-1);
543 priv->_eva = priv->_curva + size;
544 priv->rid = rid;
545 vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
546 priv->ps = vrr.ps;
547 hash_table = (hcb->hash_func)(hcb->pta,
548 priv->_curva, rid, priv->ps);
550 priv->s_sect = s_sect;
551 priv->cl = cl;
552 priv->_tr_idx = 0;
553 priv->hash_base = hash_table;
554 priv->cur_cch = hash_table;
555 return (hcb->next_overlap)(hcb);
556 }
558 static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb,
559 u64 va, u64 size, int rid, char cl, search_section_t s_sect)
560 {
561 thash_data_t *hash_table;
562 thash_internal_t *priv = &hcb->priv;
563 u64 tag;
564 rr_t vrr;
566 priv->_curva = va & ~(size-1);
567 priv->_eva = priv->_curva + size;
568 priv->rid = rid;
569 vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
570 priv->ps = vrr.ps;
571 hash_table = (hcb->hash_func)( hcb->pta,
572 priv->_curva, rid, priv->ps);
573 tag = (hcb->vs->tag_func)( hcb->pta,
574 priv->_curva, rid, priv->ps);
576 priv->tag = tag;
577 priv->hash_base = hash_table;
578 priv->cur_cch = hash_table;
579 return (hcb->next_overlap)(hcb);
580 }
583 static thash_data_t *vtr_find_next_overlap(thash_cb_t *hcb)
584 {
585 thash_data_t *tr;
586 thash_internal_t *priv = &hcb->priv;
587 int num;
589 if ( priv->cl == ISIDE_TLB ) {
590 num = NITRS;
591 tr = &ITR(hcb,0);
592 }
593 else {
594 num = NDTRS;
595 tr = &DTR(hcb,0);
596 }
597 for (; priv->_tr_idx < num; priv->_tr_idx ++ ) {
598 if ( __is_tlb_overlap(hcb, &tr[priv->_tr_idx],
599 priv->rid, priv->cl,
600 priv->_curva, priv->_eva) ) {
601 return &tr[priv->_tr_idx++];
602 }
603 }
604 return NULL;
605 }
607 /*
608 * Similar with vtlb_next_overlap but find next entry.
609 * NOTES:
610 * Intermediate position information is stored in hcb->priv.
611 */
612 static thash_data_t *vtlb_next_overlap(thash_cb_t *hcb)
613 {
614 thash_data_t *ovl;
615 thash_internal_t *priv = &hcb->priv;
616 u64 addr,rr_psize;
617 rr_t vrr;
619 if ( priv->s_sect.tr ) {
620 ovl = vtr_find_next_overlap (hcb);
621 if ( ovl ) return ovl;
622 priv->s_sect.tr = 0;
623 }
624 if ( priv->s_sect.v == 0 ) return NULL;
625 vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
626 rr_psize = PSIZE(vrr.ps);
628 while ( priv->_curva < priv->_eva ) {
629 if ( !INVALID_ENTRY(hcb, priv->hash_base) ) {
630 ovl = _vtlb_next_overlap_in_chain(hcb);
631 if ( ovl ) {
632 priv->cur_cch = ovl->next;
633 return ovl;
634 }
635 }
636 priv->_curva += rr_psize;
637 priv->hash_base = (hcb->hash_func)( hcb->pta,
638 priv->_curva, priv->rid, priv->ps);
639 priv->cur_cch = priv->hash_base;
640 }
641 return NULL;
642 }
644 static thash_data_t *vhpt_next_overlap(thash_cb_t *hcb)
645 {
646 thash_data_t *ovl;
647 thash_internal_t *priv = &hcb->priv;
648 u64 addr,rr_psize;
649 rr_t vrr;
651 vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
652 rr_psize = PSIZE(vrr.ps);
654 while ( priv->_curva < priv->_eva ) {
655 if ( !INVALID_ENTRY(hcb, priv->hash_base) ) {
656 ovl = _vhpt_next_overlap_in_chain(hcb);
657 if ( ovl ) {
658 priv->cur_cch = ovl->next;
659 return ovl;
660 }
661 }
662 priv->_curva += rr_psize;
663 priv->hash_base = (hcb->hash_func)( hcb->pta,
664 priv->_curva, priv->rid, priv->ps);
665 priv->tag = (hcb->vs->tag_func)( hcb->pta,
666 priv->_curva, priv->rid, priv->ps);
667 priv->cur_cch = priv->hash_base;
668 }
669 return NULL;
670 }
673 /*
674 * Find and purge overlap entries in hash table and its collision chain.
675 * PARAS:
676 * 1: in: TLB format entry, rid:ps must be same with vrr[].
677 * rid, va & ps identify the address space for purge
678 * 2: section can be combination of TR, TC and FM. (thash_SECTION_XX)
679 * 3: cl means I side or D side.
680 * NOTES:
681 *
682 */
683 void thash_purge_entries(thash_cb_t *hcb,
684 thash_data_t *in, search_section_t p_sect)
685 {
686 return thash_purge_entries_ex(hcb, in->rid, in->vadr,
687 in->ps, p_sect, in->cl);
688 }
690 void thash_purge_entries_ex(thash_cb_t *hcb,
691 u64 rid, u64 va, u64 ps,
692 search_section_t p_sect,
693 CACHE_LINE_TYPE cl)
694 {
695 thash_data_t *ovl;
697 ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
698 while ( ovl != NULL ) {
699 (hcb->rem_hash)(hcb, ovl);
700 ovl = (hcb->next_overlap)(hcb);
701 };
702 }
704 /*
705 * Purge overlap TCs and then insert the new entry to emulate itc ops.
706 * Notes: Only TC entry can purge and insert.
707 */
708 void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in)
709 {
710 thash_data_t *ovl;
711 search_section_t sections;
713 #ifdef XEN_DEBUGGER
714 vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr);
715 if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
716 panic ("Oops, wrong call for purge_and_insert\n");
717 return;
718 }
719 #endif
720 in->vadr = PAGEALIGN(in->vadr,in->ps);
721 in->ppn = PAGEALIGN(in->ppn, in->ps-12);
722 sections.tr = 0;
723 sections.tc = 1;
724 ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
725 in->rid, in->cl, sections);
726 if(ovl)
727 (hcb->rem_hash)(hcb, ovl);
728 #ifdef XEN_DEBUGGER
729 ovl = (hcb->next_overlap)(hcb);
730 if ( ovl ) {
731 panic ("Oops, 2+ overlaps for purge_and_insert\n");
732 return;
733 }
734 #endif
735 (hcb->ins_hash)(hcb, in, in->vadr);
736 }
738 /*
739 * Purge all TCs or VHPT entries including those in Hash table.
740 *
741 */
743 // TODO: add sections.
744 void thash_purge_all(thash_cb_t *hcb)
745 {
746 thash_data_t *hash_table;
748 #ifdef VTLB_DEBUG
749 extern u64 sanity_check;
750 static u64 statistics_before_purge_all=0;
751 if ( statistics_before_purge_all ) {
752 sanity_check = 1;
753 check_vtlb_sanity(hcb);
754 }
755 #endif
757 hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
759 for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
760 thash_rem_line(hcb, hash_table);
761 }
762 }
765 /*
766 * Lookup the hash table and its collision chain to find an entry
767 * covering this address rid:va or the entry.
768 *
769 * INPUT:
770 * in: TLB format for both VHPT & TLB.
771 */
772 thash_data_t *vtlb_lookup(thash_cb_t *hcb,
773 thash_data_t *in)
774 {
775 return vtlb_lookup_ex(hcb, in->rid, in->vadr, in->cl);
776 }
778 thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb,
779 u64 rid, u64 va,
780 CACHE_LINE_TYPE cl)
781 {
782 thash_data_t *hash_table, *cch;
783 u64 tag;
784 rr_t vrr;
786 ASSERT ( hcb->ht == THASH_VTLB );
788 cch = __vtr_lookup(hcb, rid, va, cl);;
789 if ( cch ) return cch;
791 vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
792 hash_table = (hcb->hash_func)( hcb->pta,va, rid, vrr.ps);
794 if ( INVALID_ENTRY(hcb, hash_table ) )
795 return NULL;
798 for (cch=hash_table; cch; cch = cch->next) {
799 if ( __is_translated(cch, rid, va, cl) )
800 return cch;
801 }
802 return NULL;
803 }
805 /*
806 * Lock/Unlock TC if found.
807 * NOTES: Only the page in prefered size can be handled.
808 * return:
809 * 1: failure
810 * 0: success
811 */
812 int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock)
813 {
814 thash_data_t *ovl;
815 search_section_t sections;
817 sections.tr = 1;
818 sections.tc = 1;
819 ovl = (hcb->find_overlap)(hcb, va, size, rid, cl, sections);
820 if ( ovl ) {
821 if ( !ovl->tc ) {
822 // panic("Oops, TR for lock\n");
823 return 0;
824 }
825 else if ( lock ) {
826 if ( ovl->locked ) {
827 DPRINTK("Oops, already locked entry\n");
828 }
829 ovl->locked = 1;
830 }
831 else if ( !lock ) {
832 if ( !ovl->locked ) {
833 DPRINTK("Oops, already unlocked entry\n");
834 }
835 ovl->locked = 0;
836 }
837 return 0;
838 }
839 return 1;
840 }
842 /*
843 * Notifier when TLB is deleted from hash table and its collision chain.
844 * NOTES:
845 * The typical situation is that TLB remove needs to inform
846 * VHPT to remove too.
847 * PARAS:
848 * 1: hcb is TLB object.
849 * 2: The format of entry is always in TLB.
850 *
851 */
852 void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
853 {
854 thash_cb_t *vhpt;
855 search_section_t s_sect;
857 s_sect.v = 0;
858 thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
859 machine_tlb_purge(entry->rid, entry->vadr, entry->ps);
860 }
862 /*
863 * Initialize internal control data before service.
864 */
865 void thash_init(thash_cb_t *hcb, u64 sz)
866 {
867 thash_data_t *hash_table;
869 cch_mem_init (hcb);
870 hcb->magic = THASH_CB_MAGIC;
871 hcb->pta.val = hcb->hash;
872 hcb->pta.vf = 1;
873 hcb->pta.ve = 1;
874 hcb->pta.size = sz;
875 hcb->get_rr_fn = vmmu_get_rr;
876 ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 );
877 if ( hcb->ht == THASH_TLB ) {
878 hcb->remove_notifier = tlb_remove_notifier;
879 hcb->find_overlap = vtlb_find_overlap;
880 hcb->next_overlap = vtlb_next_overlap;
881 hcb->rem_hash = rem_vtlb;
882 hcb->ins_hash = vtlb_insert;
883 __init_tr(hcb);
884 }
885 else {
886 hcb->remove_notifier = NULL;
887 hcb->find_overlap = vhpt_find_overlap;
888 hcb->next_overlap = vhpt_next_overlap;
889 hcb->rem_hash = rem_thash;
890 hcb->ins_hash = vhpt_insert;
891 }
892 hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
894 for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
895 INVALIDATE_HASH(hcb,hash_table);
896 }
897 }
899 #ifdef VTLB_DEBUG
900 static u64 cch_length_statistics[MAX_CCH_LENGTH+1];
901 u64 sanity_check=0;
902 u64 vtlb_chain_sanity(thash_cb_t *vtlb, thash_cb_t *vhpt, thash_data_t *hash)
903 {
904 thash_data_t *cch;
905 thash_data_t *ovl;
906 search_section_t s_sect;
907 u64 num=0;
909 s_sect.v = 0;
910 for (cch=hash; cch; cch=cch->next) {
911 ovl = thash_find_overlap(vhpt, cch, s_sect);
912 while ( ovl != NULL ) {
913 ovl->checked = 1;
914 ovl = (vhpt->next_overlap)(vhpt);
915 };
916 num ++;
917 }
918 if ( num >= MAX_CCH_LENGTH ) {
919 cch_length_statistics[MAX_CCH_LENGTH] ++;
920 }
921 else {
922 cch_length_statistics[num] ++;
923 }
924 return num;
925 }
927 void check_vtlb_sanity(thash_cb_t *vtlb)
928 {
929 // struct pfn_info *page;
930 u64 hash_num, i, psr;
931 static u64 check_ok_num, check_fail_num,check_invalid;
932 // void *vb1, *vb2;
933 thash_data_t *hash, *cch;
934 thash_data_t *ovl;
935 search_section_t s_sect;
936 thash_cb_t *vhpt = vtlb->ts->vhpt;
937 u64 invalid_ratio;
939 if ( sanity_check == 0 ) return;
940 sanity_check --;
941 s_sect.v = 0;
942 // page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
943 // if ( page == NULL ) {
944 // panic("No enough contiguous memory for init_domain_mm\n");
945 // };
946 // vb1 = page_to_virt(page);
947 // printf("Allocated page=%lp vbase=%lp\n", page, vb1);
948 // vb2 = vb1 + vtlb->hash_sz;
949 hash_num = vhpt->hash_sz / sizeof(thash_data_t);
950 // printf("vb2=%lp, size=%lx hash_num=%lx\n", vb2, vhpt->hash_sz, hash_num);
951 printf("vtlb=%lp, hash=%lp size=0x%lx; vhpt=%lp, hash=%lp size=0x%lx\n",
952 vtlb, vtlb->hash,vtlb->hash_sz,
953 vhpt, vhpt->hash, vhpt->hash_sz);
954 //memcpy(vb1, vtlb->hash, vtlb->hash_sz);
955 //memcpy(vb2, vhpt->hash, vhpt->hash_sz);
956 for ( i=0; i < sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
957 cch_length_statistics[i] = 0;
958 }
960 local_irq_save(psr);
962 hash = vhpt->hash;
963 for (i=0; i < hash_num; i++) {
964 if ( !INVALID_ENTRY(vhpt, hash) ) {
965 for ( cch= hash; cch; cch=cch->next) {
966 cch->checked = 0;
967 }
968 }
969 hash ++;
970 }
971 printf("Done vhpt clear checked flag, hash_num=0x%lx\n", hash_num);
972 check_invalid = 0;
973 check_ok_num=0;
974 hash = vtlb->hash;
975 for ( i=0; i< hash_num; i++ ) {
976 if ( !INVALID_ENTRY(vtlb, hash) ) {
977 check_ok_num += vtlb_chain_sanity(vtlb, vhpt, hash);
978 }
979 else {
980 check_invalid++;
981 }
982 hash ++;
983 }
984 printf("Done vtlb entry check, hash=%lp\n", hash);
985 printf("check_ok_num = 0x%lx check_invalid=0x%lx\n", check_ok_num,check_invalid);
986 invalid_ratio = 1000*check_invalid / hash_num;
987 printf("%02ld.%01ld%% entries are invalid\n",
988 invalid_ratio/10, invalid_ratio % 10 );
989 for (i=0; i<NDTRS; i++) {
990 ovl = thash_find_overlap(vhpt, &vtlb->ts->dtr[i], s_sect);
991 while ( ovl != NULL ) {
992 ovl->checked = 1;
993 ovl = (vhpt->next_overlap)(vhpt);
994 };
995 }
996 printf("Done dTR\n");
997 for (i=0; i<NITRS; i++) {
998 ovl = thash_find_overlap(vhpt, &vtlb->ts->itr[i], s_sect);
999 while ( ovl != NULL ) {
1000 ovl->checked = 1;
1001 ovl = (vhpt->next_overlap)(vhpt);
1002 };
1004 printf("Done iTR\n");
1005 check_fail_num = 0;
1006 check_invalid = 0;
1007 check_ok_num=0;
1008 hash = vhpt->hash;
1009 for (i=0; i < hash_num; i++) {
1010 if ( !INVALID_ENTRY(vhpt, hash) ) {
1011 for ( cch= hash; cch; cch=cch->next) {
1012 if ( !cch->checked ) {
1013 printf ("!!!Hash=%lp cch=%lp not within vtlb\n", hash, cch);
1014 check_fail_num ++;
1016 else {
1017 check_ok_num++;
1021 else {
1022 check_invalid ++;
1024 hash ++;
1026 local_irq_restore(psr);
1027 printf("check_ok_num=0x%lx check_fail_num=0x%lx check_invalid=0x%lx\n",
1028 check_ok_num, check_fail_num, check_invalid);
1029 //memcpy(vtlb->hash, vb1, vtlb->hash_sz);
1030 //memcpy(vhpt->hash, vb2, vhpt->hash_sz);
1031 printf("The statistics of collision chain length is listed\n");
1032 for ( i=0; i < sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
1033 printf("CCH length=%02ld, chain number=%ld\n", i, cch_length_statistics[i]);
1035 // free_domheap_pages(page, VCPU_TLB_ORDER);
1036 printf("Done check_vtlb\n");
1039 void dump_vtlb(thash_cb_t *vtlb)
1041 static u64 dump_vtlb=0;
1042 thash_data_t *hash, *cch, *tr;
1043 u64 hash_num,i;
1045 if ( dump_vtlb == 0 ) return;
1046 dump_vtlb --;
1047 hash_num = vtlb->hash_sz / sizeof(thash_data_t);
1048 hash = vtlb->hash;
1050 printf("Dump vTC\n");
1051 for ( i = 0; i < hash_num; i++ ) {
1052 if ( !INVALID_ENTRY(vtlb, hash) ) {
1053 printf("VTLB at hash=%lp\n", hash);
1054 for (cch=hash; cch; cch=cch->next) {
1055 printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
1056 cch, cch->vadr, cch->ps, cch->rid);
1059 hash ++;
1061 printf("Dump vDTR\n");
1062 for (i=0; i<NDTRS; i++) {
1063 tr = &DTR(vtlb,i);
1064 printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
1065 tr, tr->vadr, tr->ps, tr->rid);
1067 printf("Dump vITR\n");
1068 for (i=0; i<NITRS; i++) {
1069 tr = &ITR(vtlb,i);
1070 printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
1071 tr, tr->vadr, tr->ps, tr->rid);
1073 printf("End of vTLB dump\n");
1075 #endif