ia64/xen-unstable

view xen/arch/ia64/vmx/vtlb.c @ 14420:e014b94333aa

[IA64] Fix thash_init()

Lost initializating page_flag of the last member in cch.

Signed-off-by: Zhang Xin <xing.z.zhang@intel.com>
author awilliam@xenbuild2.aw
date Tue Mar 20 09:16:09 2007 -0600 (2007-03-20)
parents b8de061f3dc6
children c07b1dc6dc6d
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vtlb.c: guest virtual tlb handling module.
5 * Copyright (c) 2004, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 * XiaoYan Feng (Fleming Feng) (Fleming.feng@intel.com)
22 */
24 #include <linux/sched.h>
25 #include <asm/tlb.h>
26 #include <xen/mm.h>
27 #include <asm/vmx_mm_def.h>
28 #include <asm/gcc_intrin.h>
29 #include <linux/interrupt.h>
30 #include <asm/vmx_vcpu.h>
31 #include <asm/vmx_phy_mode.h>
32 #include <asm/vmmu.h>
33 #include <asm/tlbflush.h>
34 #include <asm/regionreg.h>
35 #define MAX_CCH_LENGTH 40
37 thash_data_t *__alloc_chain(thash_cb_t *);
39 static void cch_mem_init(thash_cb_t *hcb)
40 {
41 int num;
42 thash_data_t *p;
44 hcb->cch_freelist = p = hcb->cch_buf;
45 num = (hcb->cch_sz/sizeof(thash_data_t))-1;
46 do{
47 p->next =p+1;
48 p++;
49 num--;
50 }while(num);
51 p->next = NULL;
52 }
54 static thash_data_t *cch_alloc(thash_cb_t *hcb)
55 {
56 thash_data_t *p;
57 if ( (p = hcb->cch_freelist) != NULL ) {
58 hcb->cch_freelist = p->next;
59 }
60 return p;
61 }
63 /*
64 * Check to see if the address rid:va is translated by the TLB
65 */
67 static inline int __is_tr_translated(thash_data_t *trp, u64 rid, u64 va)
68 {
69 return ((trp->p) && (trp->rid == rid) && ((va-trp->vadr)<PSIZE(trp->ps)));
70 }
72 /*
73 * Only for GUEST TR format.
74 */
75 static int
76 __is_tr_overlap(thash_data_t *trp, u64 rid, u64 sva, u64 eva)
77 {
78 uint64_t sa1, ea1;
80 if (!trp->p || trp->rid != rid ) {
81 return 0;
82 }
83 sa1 = trp->vadr;
84 ea1 = sa1 + PSIZE(trp->ps) -1;
85 eva -= 1;
86 if ( (sva>ea1) || (sa1>eva) )
87 return 0;
88 else
89 return 1;
91 }
93 thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
94 {
96 thash_data_t *trp;
97 int i;
98 u64 rid;
99 vcpu_get_rr(vcpu, va, &rid);
100 rid = rid&RR_RID_MASK;;
101 if (is_data) {
102 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
103 for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
104 if (__is_tr_translated(trp, rid, va)) {
105 return trp;
106 }
107 }
108 }
109 }
110 else {
111 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
112 for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
113 if (__is_tr_translated(trp, rid, va)) {
114 return trp;
115 }
116 }
117 }
118 }
119 return NULL;
120 }
123 static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
124 {
125 thash_data_t *p, *q;
126 int i=0;
128 p=hash;
129 for(i=0; i < MAX_CCN_DEPTH; i++){
130 p=p->next;
131 }
132 q=hash->next;
133 hash->len=0;
134 hash->next=0;
135 p->next=hcb->cch_freelist;
136 hcb->cch_freelist=q;
137 }
142 static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
143 {
144 u64 tag ,len;
145 ia64_rr rr;
146 thash_data_t *head, *cch;
147 pte = pte & ~PAGE_FLAGS_RV_MASK;
148 rr.rrval = ia64_get_rr(ifa);
149 head = (thash_data_t *)ia64_thash(ifa);
150 tag = ia64_ttag(ifa);
151 cch = head;
152 while (cch) {
153 if (INVALID_VHPT(cch)) {
154 len = cch->len;
155 cch->page_flags = pte;
156 cch->len = len;
157 cch->itir = rr.ps << 2;
158 cch->etag = tag;
159 return;
160 }
161 cch = cch->next;
162 }
164 if(head->len>=MAX_CCN_DEPTH){
165 thash_recycle_cch(hcb, head);
166 cch = cch_alloc(hcb);
167 }
168 else{
169 cch = __alloc_chain(hcb);
170 }
171 *cch = *head;
172 head->page_flags=pte;
173 head->itir = rr.ps << 2;
174 head->etag=tag;
175 head->next = cch;
176 head->len = cch->len+1;
177 cch->len = 0;
178 return;
179 }
181 void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va, int type)
182 {
183 u64 phy_pte, psr;
184 ia64_rr mrr;
186 mrr.rrval = ia64_get_rr(va);
187 phy_pte=translate_phy_pte(v, &pte, itir, va);
189 if (itir_ps(itir) >= mrr.ps) {
190 vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
191 } else {
192 phy_pte &= ~PAGE_FLAGS_RV_MASK;
193 psr = ia64_clear_ic();
194 ia64_itc(type + 1, va, phy_pte, itir_ps(itir));
195 ia64_set_psr(psr);
196 ia64_srlz_i();
197 }
198 }
199 /*
200 * vhpt lookup
201 */
203 thash_data_t * vhpt_lookup(u64 va)
204 {
205 thash_data_t *hash, *head;
206 u64 tag, pte, itir;
207 head = (thash_data_t *)ia64_thash(va);
208 hash=head;
209 tag = ia64_ttag(va);
210 do{
211 if(hash->etag == tag)
212 break;
213 hash=hash->next;
214 }while(hash);
215 if(hash && hash!=head){
216 pte = hash->page_flags;
217 hash->page_flags = head->page_flags;
218 head->page_flags = pte;
219 tag = hash->etag;
220 hash->etag = head->etag;
221 head->etag = tag;
222 itir = hash->itir;
223 hash->itir = head->itir;
224 head->itir = itir;
225 head->len = hash->len;
226 hash->len=0;
227 return head;
228 }
229 return hash;
230 }
232 u64 guest_vhpt_lookup(u64 iha, u64 *pte)
233 {
234 u64 ret;
235 thash_data_t * data;
237 data = vhpt_lookup(iha);
238 if (data == NULL) {
239 data = vtlb_lookup(current, iha, DSIDE_TLB);
240 if (data != NULL)
241 thash_vhpt_insert(current, data->page_flags, data->itir,
242 iha, DSIDE_TLB);
243 }
245 asm volatile ("rsm psr.ic|psr.i;;"
246 "srlz.d;;"
247 "ld8.s r9=[%1];;"
248 "tnat.nz p6,p7=r9;;"
249 "(p6) mov %0=1;"
250 "(p6) mov r9=r0;"
251 "(p7) extr.u r9=r9,0,53;;"
252 "(p7) mov %0=r0;"
253 "(p7) st8 [%2]=r9;;"
254 "ssm psr.ic;;"
255 "srlz.d;;"
256 "ssm psr.i;;"
257 : "=r"(ret) : "r"(iha), "r"(pte):"memory");
258 return ret;
259 }
261 /*
262 * purge software guest tlb
263 */
265 static void vtlb_purge(VCPU *v, u64 va, u64 ps)
266 {
267 thash_data_t *cur;
268 u64 start, curadr, size, psbits, tag, rr_ps, num;
269 ia64_rr vrr;
270 thash_cb_t *hcb = &v->arch.vtlb;
272 vcpu_get_rr(v, va, &vrr.rrval);
273 psbits = VMX(v, psbits[(va >> 61)]);
274 start = va & ~((1UL << ps) - 1);
275 while (psbits) {
276 curadr = start;
277 rr_ps = __ffs(psbits);
278 psbits &= ~(1UL << rr_ps);
279 num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
280 size = PSIZE(rr_ps);
281 vrr.ps = rr_ps;
282 while (num) {
283 cur = vsa_thash(hcb->pta, curadr, vrr.rrval, &tag);
284 while (cur) {
285 if (cur->etag == tag && cur->ps == rr_ps)
286 cur->etag = 1UL << 63;
287 cur = cur->next;
288 }
289 curadr += size;
290 num--;
291 }
292 }
293 }
296 /*
297 * purge VHPT and machine TLB
298 */
299 static void vhpt_purge(VCPU *v, u64 va, u64 ps)
300 {
301 //thash_cb_t *hcb = &v->arch.vhpt;
302 thash_data_t *cur;
303 u64 start, size, tag, num;
304 ia64_rr rr;
306 start = va & ~((1UL << ps) - 1);
307 rr.rrval = ia64_get_rr(va);
308 size = PSIZE(rr.ps);
309 num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
310 while (num) {
311 cur = (thash_data_t *)ia64_thash(start);
312 tag = ia64_ttag(start);
313 while (cur) {
314 if (cur->etag == tag)
315 cur->etag = 1UL << 63;
316 cur = cur->next;
317 }
318 start += size;
319 num--;
320 }
321 machine_tlb_purge(va, ps);
322 }
324 /*
325 * Recycle all collisions chain in VTLB or VHPT.
326 *
327 */
328 void thash_recycle_cch_all(thash_cb_t *hcb)
329 {
330 int num;
331 thash_data_t *head;
332 head=hcb->hash;
333 num = (hcb->hash_sz/sizeof(thash_data_t));
334 do{
335 head->len = 0;
336 head->next = 0;
337 head++;
338 num--;
339 }while(num);
340 cch_mem_init(hcb);
341 }
344 thash_data_t *__alloc_chain(thash_cb_t *hcb)
345 {
346 thash_data_t *cch;
348 cch = cch_alloc(hcb);
349 if(cch == NULL){
350 thash_recycle_cch_all(hcb);
351 cch = cch_alloc(hcb);
352 }
353 return cch;
354 }
356 /*
357 * Insert an entry into hash TLB or VHPT.
358 * NOTES:
359 * 1: When inserting VHPT to thash, "va" is a must covered
360 * address by the inserted machine VHPT entry.
361 * 2: The format of entry is always in TLB.
362 * 3: The caller need to make sure the new entry will not overlap
363 * with any existed entry.
364 */
365 void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va)
366 {
367 thash_data_t *hash_table, *cch;
368 /* int flag; */
369 ia64_rr vrr;
370 /* u64 gppn, ppns, ppne; */
371 u64 tag, len;
372 thash_cb_t *hcb = &v->arch.vtlb;
373 vcpu_get_rr(v, va, &vrr.rrval);
374 vrr.ps = itir_ps(itir);
375 VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
376 hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
377 cch = hash_table;
378 while (cch) {
379 if (INVALID_TLB(cch)) {
380 len = cch->len;
381 cch->page_flags = pte;
382 cch->len = len;
383 cch->itir=itir;
384 cch->etag=tag;
385 return;
386 }
387 cch = cch->next;
388 }
389 if (hash_table->len>=MAX_CCN_DEPTH){
390 thash_recycle_cch(hcb, hash_table);
391 cch = cch_alloc(hcb);
392 }
393 else {
394 cch = __alloc_chain(hcb);
395 }
396 *cch = *hash_table;
397 hash_table->page_flags = pte;
398 hash_table->itir=itir;
399 hash_table->etag=tag;
400 hash_table->next = cch;
401 hash_table->len = cch->len + 1;
402 cch->len = 0;
403 return ;
404 }
407 int vtr_find_overlap(VCPU *vcpu, u64 va, u64 ps, int is_data)
408 {
409 thash_data_t *trp;
410 int i;
411 u64 end, rid;
412 vcpu_get_rr(vcpu, va, &rid);
413 rid = rid&RR_RID_MASK;;
414 end = va + PSIZE(ps);
415 if (is_data) {
416 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
417 for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
418 if (__is_tr_overlap(trp, rid, va, end )) {
419 return i;
420 }
421 }
422 }
423 }
424 else {
425 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
426 for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
427 if (__is_tr_overlap(trp, rid, va, end )) {
428 return i;
429 }
430 }
431 }
432 }
433 return -1;
434 }
436 /*
437 * Purge entries in VTLB and VHPT
438 */
439 void thash_purge_entries(VCPU *v, u64 va, u64 ps)
440 {
441 if(vcpu_quick_region_check(v->arch.tc_regions,va))
442 vtlb_purge(v, va, ps);
443 vhpt_purge(v, va, ps);
444 }
446 void thash_purge_entries_remote(VCPU *v, u64 va, u64 ps)
447 {
448 u64 old_va = va;
449 va = REGION_OFFSET(va);
450 if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
451 vtlb_purge(v, va, ps);
452 vhpt_purge(v, va, ps);
453 }
455 u64 translate_phy_pte(VCPU *v, u64 *pte, u64 itir, u64 va)
456 {
457 u64 ps, ps_mask, paddr, maddr;
458 // ia64_rr rr;
459 union pte_flags phy_pte;
460 ps = itir_ps(itir);
461 ps_mask = ~((1UL << ps) - 1);
462 phy_pte.val = *pte;
463 paddr = *pte;
464 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
465 maddr = lookup_domain_mpa(v->domain, paddr, NULL);
466 if (maddr & GPFN_IO_MASK) {
467 *pte |= VTLB_PTE_IO;
468 return -1;
469 }
470 // rr.rrval = ia64_get_rr(va);
471 // ps = rr.ps;
472 maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
473 phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
474 return phy_pte.val;
475 }
478 /*
479 * Purge overlap TCs and then insert the new entry to emulate itc ops.
480 * Notes: Only TC entry can purge and insert.
481 */
482 void thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type)
483 {
484 u64 ps;//, va;
485 u64 phy_pte;
486 ia64_rr vrr, mrr;
487 ps = itir_ps(itir);
488 vcpu_get_rr(current, ifa, &vrr.rrval);
489 mrr.rrval = ia64_get_rr(ifa);
490 if(VMX_DOMAIN(v)){
491 /* Ensure WB attribute if pte is related to a normal mem page,
492 * which is required by vga acceleration since qemu maps shared
493 * vram buffer with WB.
494 */
495 if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT))
496 pte &= ~_PAGE_MA_MASK;
498 phy_pte = translate_phy_pte(v, &pte, itir, ifa);
499 vtlb_purge(v, ifa, ps);
500 vhpt_purge(v, ifa, ps);
501 if (ps == mrr.ps) {
502 if(!(pte&VTLB_PTE_IO)){
503 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
504 }
505 else{
506 vtlb_insert(v, pte, itir, ifa);
507 vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
508 }
509 }
510 else if (ps > mrr.ps) {
511 vtlb_insert(v, pte, itir, ifa);
512 vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
513 if(!(pte&VTLB_PTE_IO)){
514 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
515 }
516 }
517 else {
518 u64 psr;
519 phy_pte &= ~PAGE_FLAGS_RV_MASK;
520 psr = ia64_clear_ic();
521 ia64_itc(type + 1, ifa, phy_pte, ps);
522 ia64_set_psr(psr);
523 ia64_srlz_i();
524 // ps < mrr.ps, this is not supported
525 // panic_domain(NULL, "%s: ps (%lx) < mrr.ps \n", __func__, ps);
526 }
527 }
528 else{
529 phy_pte = translate_phy_pte(v, &pte, itir, ifa);
530 if(ps!=PAGE_SHIFT){
531 vtlb_insert(v, pte, itir, ifa);
532 vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
533 }
534 machine_tlb_purge(ifa, ps);
535 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
536 }
537 }
539 /*
540 * Purge all TCs or VHPT entries including those in Hash table.
541 *
542 */
544 //TODO: add sections.
545 void thash_purge_all(VCPU *v)
546 {
547 int num;
548 thash_data_t *head;
549 thash_cb_t *vtlb,*vhpt;
550 vtlb =&v->arch.vtlb;
551 vhpt =&v->arch.vhpt;
553 for (num = 0; num < 8; num++)
554 VMX(v, psbits[num]) = 0;
556 head=vtlb->hash;
557 num = (vtlb->hash_sz/sizeof(thash_data_t));
558 do{
559 head->page_flags = 0;
560 head->etag = 1UL<<63;
561 head->itir = 0;
562 head->next = 0;
563 head++;
564 num--;
565 }while(num);
566 cch_mem_init(vtlb);
568 head=vhpt->hash;
569 num = (vhpt->hash_sz/sizeof(thash_data_t));
570 do{
571 head->page_flags = 0;
572 head->etag = 1UL<<63;
573 head->next = 0;
574 head++;
575 num--;
576 }while(num);
577 cch_mem_init(vhpt);
578 local_flush_tlb_all();
579 }
582 /*
583 * Lookup the hash table and its collision chain to find an entry
584 * covering this address rid:va or the entry.
585 *
586 * INPUT:
587 * in: TLB format for both VHPT & TLB.
588 */
590 thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
591 {
592 thash_data_t *cch;
593 u64 psbits, ps, tag;
594 ia64_rr vrr;
595 thash_cb_t * hcb= &v->arch.vtlb;
597 cch = __vtr_lookup(v, va, is_data);;
598 if ( cch ) return cch;
600 if(vcpu_quick_region_check(v->arch.tc_regions,va)==0)
601 return NULL;
602 psbits = VMX(v, psbits[(va >> 61)]);
603 vcpu_get_rr(v,va,&vrr.rrval);
604 while (psbits) {
605 ps = __ffs(psbits);
606 psbits &= ~(1UL << ps);
607 vrr.ps = ps;
608 cch = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
609 do {
610 if (cch->etag == tag && cch->ps == ps)
611 return cch;
612 cch = cch->next;
613 } while(cch);
614 }
615 return NULL;
616 }
619 /*
620 * Initialize internal control data before service.
621 */
622 void thash_init(thash_cb_t *hcb, u64 sz)
623 {
624 int num;
625 thash_data_t *head, *p;
627 hcb->pta.val = (unsigned long)hcb->hash;
628 hcb->pta.vf = 1;
629 hcb->pta.ve = 1;
630 hcb->pta.size = sz;
631 hcb->cch_rec_head = hcb->hash;
633 head=hcb->hash;
634 num = (hcb->hash_sz/sizeof(thash_data_t));
635 do{
636 head->page_flags = 0;
637 head->itir = 0;
638 head->etag = 1UL<<63;
639 head->next = 0;
640 head++;
641 num--;
642 }while(num);
644 hcb->cch_freelist = p = hcb->cch_buf;
645 num = hcb->cch_sz / sizeof(thash_data_t);
646 do{
647 p->page_flags = 0;
648 p->itir = 0;
649 p->next =p+1;
650 p++;
651 num--;
652 }while(num);
654 (p - 1)->next = NULL;
655 }