direct-io.hg

view xen/arch/ia64/vmx/vtlb.c @ 10372:09982b366f93

[IA64] fix debug=y build: remove ASSERT(THASH_TLB)

ht is no longer a member of thash_cb_t, and THASH_TYPE is no longer
tracked. Remove this bogus ASSERT.

Signed-off-by: Aron Griffis <aron@hp.com>
author awilliam@xenbuild.aw
date Sat Jun 03 14:41:24 2006 -0600 (2006-06-03)
parents 380f87e8003f
children b20733e82ab6
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vtlb.c: guest virtual tlb handling module.
5 * Copyright (c) 2004, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 * XiaoYan Feng (Fleming Feng) (Fleming.feng@intel.com)
22 */
24 #include <linux/sched.h>
25 #include <asm/tlb.h>
26 #include <asm/mm.h>
27 #include <asm/vmx_mm_def.h>
28 #include <asm/gcc_intrin.h>
29 #include <linux/interrupt.h>
30 #include <asm/vmx_vcpu.h>
31 #include <asm/vmmu.h>
32 #include <asm/tlbflush.h>
33 #define MAX_CCH_LENGTH 40
35 thash_data_t *__alloc_chain(thash_cb_t *);
37 static void cch_mem_init(thash_cb_t *hcb)
38 {
39 int num;
40 thash_data_t *p;
42 hcb->cch_freelist = p = hcb->cch_buf;
43 num = (hcb->cch_sz/sizeof(thash_data_t))-1;
44 do{
45 p->next =p+1;
46 p++;
47 num--;
48 }while(num);
49 p->next = NULL;
50 }
52 static thash_data_t *cch_alloc(thash_cb_t *hcb)
53 {
54 thash_data_t *p;
55 if ( (p = hcb->cch_freelist) != NULL ) {
56 hcb->cch_freelist = p->next;
57 }
58 return p;
59 }
61 static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
62 {
63 cch->next = hcb->cch_freelist;
64 hcb->cch_freelist = cch;
65 }
67 /*
68 * Check to see if the address rid:va is translated by the TLB
69 */
71 static inline int __is_tr_translated(thash_data_t *trp, u64 rid, u64 va)
72 {
73 return ((trp->p) && (trp->rid == rid) && ((va-trp->vadr)<PSIZE(trp->ps)));
74 }
76 /*
77 * Only for GUEST TR format.
78 */
79 static int
80 __is_tr_overlap(thash_data_t *trp, u64 rid, u64 sva, u64 eva)
81 {
82 uint64_t sa1, ea1;
84 if (!trp->p || trp->rid != rid ) {
85 return 0;
86 }
87 sa1 = trp->vadr;
88 ea1 = sa1 + PSIZE(trp->ps) -1;
89 eva -= 1;
90 if ( (sva>ea1) || (sa1>eva) )
91 return 0;
92 else
93 return 1;
95 }
97 /*
98 * Delete an thash entry leading collision chain.
99 */
100 static void __rem_hash_head(thash_cb_t *hcb, thash_data_t *hash)
101 {
102 thash_data_t *next=hash->next;
103 if ( next) {
104 next->len=hash->len-1;
105 *hash = *next;
106 cch_free (hcb, next);
107 }
108 else {
109 hash->ti=1;
110 }
111 }
113 thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
114 {
116 thash_data_t *trp;
117 int i;
118 u64 rid;
119 vcpu_get_rr(vcpu, va, &rid);
120 rid = rid&RR_RID_MASK;;
121 if (is_data) {
122 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
123 for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
124 if (__is_tr_translated(trp, rid, va)) {
125 return trp;
126 }
127 }
128 }
129 }
130 else {
131 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
132 for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
133 if (__is_tr_translated(trp, rid, va)) {
134 return trp;
135 }
136 }
137 }
138 }
139 return NULL;
140 }
143 static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
144 {
145 thash_data_t *p;
146 int i=0;
148 p=hash;
149 for(i=0; i < MAX_CCN_DEPTH; i++){
150 p=p->next;
151 }
152 p->next=hcb->cch_freelist;
153 hcb->cch_freelist=hash->next;
154 hash->len=0;
155 hash->next=0;
156 }
161 static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
162 {
163 u64 tag;
164 thash_data_t *head, *cch;
165 pte = pte & ~PAGE_FLAGS_RV_MASK;
167 head = (thash_data_t *)ia64_thash(ifa);
168 tag = ia64_ttag(ifa);
169 if( INVALID_VHPT(head) ) {
170 head->page_flags = pte;
171 head->etag = tag;
172 return;
173 }
175 if(head->len>=MAX_CCN_DEPTH){
176 thash_recycle_cch(hcb, head);
177 cch = cch_alloc(hcb);
178 }
179 else{
180 cch = __alloc_chain(hcb);
181 }
182 cch->page_flags=head->page_flags;
183 cch->etag=head->etag;
184 cch->next=head->next;
185 head->page_flags=pte;
186 head->etag=tag;
187 head->next = cch;
188 head->len = cch->len+1;
189 cch->len = 0;
190 return;
191 }
193 void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va)
194 {
195 u64 phy_pte;
196 phy_pte=translate_phy_pte(v, &pte, itir, va);
197 vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
198 }
199 /*
200 * vhpt lookup
201 */
203 thash_data_t * vhpt_lookup(u64 va)
204 {
205 thash_data_t *hash, *head;
206 u64 tag, pte;
207 head = (thash_data_t *)ia64_thash(va);
208 hash=head;
209 tag = ia64_ttag(va);
210 do{
211 if(hash->etag == tag)
212 break;
213 hash=hash->next;
214 }while(hash);
215 if(hash && hash!=head){
216 pte = hash->page_flags;
217 hash->page_flags = head->page_flags;
218 head->page_flags = pte;
219 tag = hash->etag;
220 hash->etag = head->etag;
221 head->etag = tag;
222 head->len = hash->len;
223 hash->len=0;
224 return head;
225 }
226 return hash;
227 }
229 u64 guest_vhpt_lookup(u64 iha, u64 *pte)
230 {
231 u64 ret;
232 vhpt_lookup(iha);
233 asm volatile ("rsm psr.ic|psr.i;;"
234 "srlz.d;;"
235 "ld8.s r9=[%1];;"
236 "tnat.nz p6,p7=r9;;"
237 "(p6) mov %0=1;"
238 "(p6) mov r9=r0;"
239 "(p7) mov %0=r0;"
240 "(p7) st8 [%2]=r9;;"
241 "ssm psr.ic;;"
242 "srlz.d;;"
243 "ssm psr.i;;"
244 : "=r"(ret) : "r"(iha), "r"(pte):"memory");
245 return ret;
246 }
249 /*
250 * purge software guest tlb
251 */
253 static void vtlb_purge(thash_cb_t *hcb, u64 va, u64 ps)
254 {
255 thash_data_t *hash_table, *prev, *next;
256 u64 start, end, size, tag, rid, def_size;
257 ia64_rr vrr;
258 vcpu_get_rr(current, va, &vrr.rrval);
259 rid = vrr.rid;
260 size = PSIZE(ps);
261 start = va & (-size);
262 end = start + size;
263 def_size = PSIZE(vrr.ps);
264 while(start < end){
265 hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
266 if(!INVALID_TLB(hash_table)){
267 if(hash_table->etag == tag){
268 __rem_hash_head(hcb, hash_table);
269 }
270 else{
271 prev=hash_table;
272 next=prev->next;
273 while(next){
274 if(next->etag == tag){
275 prev->next=next->next;
276 cch_free(hcb,next);
277 hash_table->len--;
278 break;
279 }
280 prev=next;
281 next=next->next;
282 }
283 }
284 }
285 start += def_size;
286 }
287 // machine_tlb_purge(va, ps);
288 }
289 /*
290 * purge VHPT and machine TLB
291 */
292 static void vhpt_purge(thash_cb_t *hcb, u64 va, u64 ps)
293 {
294 thash_data_t *hash_table, *prev, *next;
295 u64 start, end, size, tag;
296 size = PSIZE(ps);
297 start = va & (-size);
298 end = start + size;
299 while(start < end){
300 hash_table = (thash_data_t *)ia64_thash(start);
301 tag = ia64_ttag(start);
302 if(hash_table->etag == tag ){
303 __rem_hash_head(hcb, hash_table);
304 }
305 else{
306 prev=hash_table;
307 next=prev->next;
308 while(next){
309 if(next->etag == tag){
310 prev->next=next->next;
311 cch_free(hcb,next);
312 hash_table->len--;
313 break;
314 }
315 prev=next;
316 next=next->next;
317 }
318 }
319 start += PAGE_SIZE;
320 }
321 machine_tlb_purge(va, ps);
322 }
324 /*
325 * Recycle all collisions chain in VTLB or VHPT.
326 *
327 */
328 void thash_recycle_cch_all(thash_cb_t *hcb)
329 {
330 int num;
331 thash_data_t *head;
332 head=hcb->hash;
333 num = (hcb->hash_sz/sizeof(thash_data_t));
334 do{
335 head->len = 0;
336 head->next = 0;
337 head++;
338 num--;
339 }while(num);
340 cch_mem_init(hcb);
341 }
344 thash_data_t *__alloc_chain(thash_cb_t *hcb)
345 {
346 thash_data_t *cch;
348 cch = cch_alloc(hcb);
349 if(cch == NULL){
350 thash_recycle_cch_all(hcb);
351 cch = cch_alloc(hcb);
352 }
353 return cch;
354 }
356 /*
357 * Insert an entry into hash TLB or VHPT.
358 * NOTES:
359 * 1: When inserting VHPT to thash, "va" is a must covered
360 * address by the inserted machine VHPT entry.
361 * 2: The format of entry is always in TLB.
362 * 3: The caller need to make sure the new entry will not overlap
363 * with any existed entry.
364 */
365 void vtlb_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 va)
366 {
367 thash_data_t *hash_table, *cch;
368 /* int flag; */
369 ia64_rr vrr;
370 /* u64 gppn, ppns, ppne; */
371 u64 tag;
372 vcpu_get_rr(current, va, &vrr.rrval);
373 #ifdef VTLB_DEBUG
374 if (vrr.ps != itir_ps(itir)) {
375 // machine_tlb_insert(hcb->vcpu, entry);
376 panic_domain(NULL, "not preferred ps with va: 0x%lx vrr.ps=%d ps=%ld\n",
377 va, vrr.ps, itir_ps(itir));
378 return;
379 }
380 #endif
381 hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
382 if( INVALID_TLB(hash_table) ) {
383 hash_table->page_flags = pte;
384 hash_table->itir=itir;
385 hash_table->etag=tag;
386 hash_table->next = 0;
387 return;
388 }
389 if (hash_table->len>=MAX_CCN_DEPTH){
390 thash_recycle_cch(hcb, hash_table);
391 cch = cch_alloc(hcb);
392 }
393 else {
394 cch = __alloc_chain(hcb);
395 }
396 *cch = *hash_table;
397 hash_table->page_flags = pte;
398 hash_table->itir=itir;
399 hash_table->etag=tag;
400 hash_table->next = cch;
401 hash_table->len = cch->len + 1;
402 cch->len = 0;
403 return ;
404 }
407 int vtr_find_overlap(VCPU *vcpu, u64 va, u64 ps, int is_data)
408 {
409 thash_data_t *trp;
410 int i;
411 u64 end, rid;
412 vcpu_get_rr(vcpu, va, &rid);
413 rid = rid&RR_RID_MASK;;
414 end = va + PSIZE(ps);
415 if (is_data) {
416 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
417 for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
418 if (__is_tr_overlap(trp, rid, va, end )) {
419 return i;
420 }
421 }
422 }
423 }
424 else {
425 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
426 for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
427 if (__is_tr_overlap(trp, rid, va, end )) {
428 return i;
429 }
430 }
431 }
432 }
433 return -1;
434 }
436 /*
437 * Purge entries in VTLB and VHPT
438 */
439 void thash_purge_entries(VCPU *v, u64 va, u64 ps)
440 {
441 if(vcpu_quick_region_check(v->arch.tc_regions,va))
442 vtlb_purge(&v->arch.vtlb, va, ps);
443 vhpt_purge(&v->arch.vhpt, va, ps);
444 }
446 u64 translate_phy_pte(VCPU *v, u64 *pte, u64 itir, u64 va)
447 {
448 u64 ps, addr;
449 union pte_flags phy_pte;
450 ps = itir_ps(itir);
451 phy_pte.val = *pte;
452 addr = *pte;
453 addr = ((addr & _PAGE_PPN_MASK)>>ps<<ps)|(va&((1UL<<ps)-1));
454 addr = lookup_domain_mpa(v->domain, addr);
455 if(addr & GPFN_IO_MASK){
456 *pte |= VTLB_PTE_IO;
457 return -1;
458 }
459 phy_pte.ppn = addr >> ARCH_PAGE_SHIFT;
460 return phy_pte.val;
461 }
464 /*
465 * Purge overlap TCs and then insert the new entry to emulate itc ops.
466 * Notes: Only TC entry can purge and insert.
467 */
468 void thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa)
469 {
470 u64 ps;//, va;
471 u64 phy_pte;
472 ps = itir_ps(itir);
474 if(VMX_DOMAIN(v)){
475 phy_pte = translate_phy_pte(v, &pte, itir, ifa);
476 if(ps==PAGE_SHIFT){
477 if(!(pte&VTLB_PTE_IO)){
478 vhpt_purge(&v->arch.vhpt, ifa, ps);
479 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
480 }
481 else{
482 vhpt_purge(&v->arch.vhpt, ifa, ps);
483 vtlb_insert(&v->arch.vtlb, pte, itir, ifa);
484 vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
485 }
486 }
487 else{
488 vhpt_purge(&v->arch.vhpt, ifa, ps);
489 vtlb_insert(&v->arch.vtlb, pte, itir, ifa);
490 vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
491 if(!(pte&VTLB_PTE_IO)){
492 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
493 }
494 }
495 }
496 else{
497 phy_pte = translate_phy_pte(v, &pte, itir, ifa);
498 if(ps!=PAGE_SHIFT){
499 vtlb_insert(&v->arch.vtlb, pte, itir, ifa);
500 vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
501 }
502 machine_tlb_purge(ifa, ps);
503 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
504 }
505 }
507 /*
508 * Purge all TCs or VHPT entries including those in Hash table.
509 *
510 */
512 //TODO: add sections.
513 void thash_purge_all(VCPU *v)
514 {
515 int num;
516 thash_data_t *head;
517 thash_cb_t *vtlb,*vhpt;
518 vtlb =&v->arch.vtlb;
519 vhpt =&v->arch.vhpt;
521 head=vtlb->hash;
522 num = (vtlb->hash_sz/sizeof(thash_data_t));
523 do{
524 head->page_flags = 0;
525 head->etag = 1UL<<63;
526 head->next = 0;
527 head++;
528 num--;
529 }while(num);
530 cch_mem_init(vtlb);
532 head=vhpt->hash;
533 num = (vhpt->hash_sz/sizeof(thash_data_t));
534 do{
535 head->page_flags = 0;
536 head->etag = 1UL<<63;
537 head->next = 0;
538 head++;
539 num--;
540 }while(num);
541 cch_mem_init(vhpt);
543 local_flush_tlb_all();
544 }
547 /*
548 * Lookup the hash table and its collision chain to find an entry
549 * covering this address rid:va or the entry.
550 *
551 * INPUT:
552 * in: TLB format for both VHPT & TLB.
553 */
555 thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
556 {
557 thash_data_t *cch;
558 u64 tag;
559 ia64_rr vrr;
560 thash_cb_t * hcb= &v->arch.vtlb;
562 cch = __vtr_lookup(v, va, is_data);;
563 if ( cch ) return cch;
565 if(vcpu_quick_region_check(v->arch.tc_regions,va)==0)
566 return NULL;
568 vcpu_get_rr(v,va,&vrr.rrval);
569 cch = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
571 do{
572 if(cch->etag == tag)
573 return cch;
574 cch = cch->next;
575 }while(cch);
576 return NULL;
577 }
580 /*
581 * Initialize internal control data before service.
582 */
583 void thash_init(thash_cb_t *hcb, u64 sz)
584 {
585 int num;
586 thash_data_t *head, *p;
588 hcb->pta.val = (unsigned long)hcb->hash;
589 hcb->pta.vf = 1;
590 hcb->pta.ve = 1;
591 hcb->pta.size = sz;
592 hcb->cch_rec_head = hcb->hash;
594 head=hcb->hash;
595 num = (hcb->hash_sz/sizeof(thash_data_t));
596 do{
597 head->itir = PAGE_SHIFT<<2;
598 head->etag = 1UL<<63;
599 head->next = 0;
600 head++;
601 num--;
602 }while(num);
604 hcb->cch_freelist = p = hcb->cch_buf;
605 num = (hcb->cch_sz/sizeof(thash_data_t))-1;
606 do{
607 p->itir = PAGE_SHIFT<<2;
608 p->next =p+1;
609 p++;
610 num--;
611 }while(num);
612 p->itir = PAGE_SHIFT<<2;
613 p->next = NULL;
614 }