ia64/xen-unstable

view xen/arch/ia64/vmx/vtlb.c @ 15565:37833b33ae77

[IA64] Remove useless code

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jul 26 14:35:01 2007 -0600 (2007-07-26)
parents 834ac63f4894
children 255abff9d1f7
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vtlb.c: guest virtual tlb handling module.
5 * Copyright (c) 2004, Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 * XiaoYan Feng (Fleming Feng) (Fleming.feng@intel.com)
22 */
24 #include <linux/sched.h>
25 #include <asm/tlb.h>
26 #include <xen/mm.h>
27 #include <asm/vmx_mm_def.h>
28 #include <asm/gcc_intrin.h>
29 #include <linux/interrupt.h>
30 #include <asm/vmx_vcpu.h>
31 #include <asm/vmx_phy_mode.h>
32 #include <asm/vmmu.h>
33 #include <asm/tlbflush.h>
34 #include <asm/regionreg.h>
35 #define MAX_CCH_LENGTH 40
37 thash_data_t *__alloc_chain(thash_cb_t *);
39 static inline void cch_mem_init(thash_cb_t *hcb)
40 {
41 hcb->cch_free_idx = 0;
42 hcb->cch_freelist = NULL;
43 }
45 static thash_data_t *cch_alloc(thash_cb_t *hcb)
46 {
47 thash_data_t *p;
48 if ( (p = hcb->cch_freelist) != NULL ) {
49 hcb->cch_freelist = p->next;
50 return p;
51 }
52 if (hcb->cch_free_idx < hcb->cch_sz/sizeof(thash_data_t)) {
53 p = &((thash_data_t *)hcb->cch_buf)[hcb->cch_free_idx++];
54 p->page_flags = 0;
55 p->itir = 0;
56 p->next = NULL;
57 return p;
58 }
59 return NULL;
60 }
62 /*
63 * Check to see if the address rid:va is translated by the TLB
64 */
66 static inline int __is_tr_translated(thash_data_t *trp, u64 rid, u64 va)
67 {
68 return ((trp->p) && (trp->rid == rid) && ((va-trp->vadr)<PSIZE(trp->ps)));
69 }
71 /*
72 * Only for GUEST TR format.
73 */
74 static int
75 __is_tr_overlap(thash_data_t *trp, u64 rid, u64 sva, u64 eva)
76 {
77 uint64_t sa1, ea1;
79 if (!trp->p || trp->rid != rid ) {
80 return 0;
81 }
82 sa1 = trp->vadr;
83 ea1 = sa1 + PSIZE(trp->ps) -1;
84 eva -= 1;
85 if ( (sva>ea1) || (sa1>eva) )
86 return 0;
87 else
88 return 1;
90 }
92 thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
93 {
95 thash_data_t *trp;
96 int i;
97 u64 rid;
98 vcpu_get_rr(vcpu, va, &rid);
99 rid = rid&RR_RID_MASK;;
100 if (is_data) {
101 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
102 for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
103 if (__is_tr_translated(trp, rid, va)) {
104 return trp;
105 }
106 }
107 }
108 }
109 else {
110 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
111 for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
112 if (__is_tr_translated(trp, rid, va)) {
113 return trp;
114 }
115 }
116 }
117 }
118 return NULL;
119 }
122 static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
123 {
124 thash_data_t *p, *q;
125 int i=0;
127 p=hash;
128 for(i=0; i < MAX_CCN_DEPTH; i++){
129 p=p->next;
130 }
131 q=hash->next;
132 hash->len=0;
133 hash->next=0;
134 p->next=hcb->cch_freelist;
135 hcb->cch_freelist=q;
136 }
141 static void vmx_vhpt_insert(thash_cb_t *hcb, u64 pte, u64 itir, u64 ifa)
142 {
143 u64 tag;
144 ia64_rr rr;
145 thash_data_t *head, *cch;
146 pte = pte & ~PAGE_FLAGS_RV_MASK;
147 rr.rrval = ia64_get_rr(ifa);
148 head = (thash_data_t *)ia64_thash(ifa);
149 tag = ia64_ttag(ifa);
150 cch = head;
151 while (cch) {
152 if (INVALID_VHPT(cch))
153 break;
154 cch = cch->next;
155 }
156 if (cch) {
157 if (cch != head) {
158 local_irq_disable();
159 cch->page_flags = head->page_flags;
160 cch->itir = head->itir;
161 cch->etag = head->etag;
162 head->ti = 1;
163 local_irq_enable();
164 }
165 }
166 else{
167 if (head->len >= MAX_CCN_DEPTH) {
168 thash_recycle_cch(hcb, head);
169 cch = cch_alloc(hcb);
170 } else {
171 cch = __alloc_chain(hcb);
172 }
173 local_irq_disable();
174 *cch = *head;
175 head->ti = 1;
176 head->next = cch;
177 head->len = cch->len + 1;
178 cch->len = 0;
179 local_irq_enable();
180 }
181 //here head is invalid
182 wmb();
183 head->page_flags=pte;
184 head->itir = rr.ps << 2;
185 *(volatile unsigned long*)&head->etag = tag;
186 return;
187 }
189 void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va, int type)
190 {
191 u64 phy_pte, psr;
192 ia64_rr mrr;
194 mrr.rrval = ia64_get_rr(va);
195 phy_pte=translate_phy_pte(v, &pte, itir, va);
197 if (itir_ps(itir) >= mrr.ps) {
198 vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
199 } else {
200 phy_pte &= ~PAGE_FLAGS_RV_MASK;
201 psr = ia64_clear_ic();
202 ia64_itc(type + 1, va, phy_pte, itir_ps(itir));
203 ia64_set_psr(psr);
204 ia64_srlz_i();
205 }
206 }
208 int vhpt_access_rights_fixup(VCPU *v, u64 ifa, int is_data)
209 {
210 thash_data_t *trp, *data;
211 u64 ps, tag, mask;
213 trp = __vtr_lookup(v, ifa, is_data);
214 if (trp) {
215 ps = _REGION_PAGE_SIZE(ia64_get_rr(ifa));
216 if (trp->ps < ps)
217 return 0;
218 ifa = PAGEALIGN(ifa, ps);
219 data = (thash_data_t *)ia64_thash(ifa);
220 tag = ia64_ttag(ifa);
221 do {
222 if (data->etag == tag) {
223 mask = trp->page_flags & PAGE_FLAGS_AR_PL_MASK;
224 if (mask != (data->page_flags & PAGE_FLAGS_AR_PL_MASK)) {
225 data->page_flags &= ~PAGE_FLAGS_AR_PL_MASK;
226 data->page_flags |= mask;
227 machine_tlb_purge(ifa, ps);
228 return 1;
229 }
230 return 0;
231 }
232 data = data->next;
233 } while(data);
234 }
235 return 0;
236 }
238 /*
239 * vhpt lookup
240 */
242 thash_data_t * vhpt_lookup(u64 va)
243 {
244 thash_data_t *hash, *head;
245 u64 tag, pte, itir;
246 head = (thash_data_t *)ia64_thash(va);
247 hash=head;
248 tag = ia64_ttag(va);
249 do{
250 if(hash->etag == tag)
251 break;
252 hash=hash->next;
253 }while(hash);
254 if(hash && hash!=head){
255 pte = hash->page_flags;
256 hash->page_flags = head->page_flags;
257 head->page_flags = pte;
258 tag = hash->etag;
259 hash->etag = head->etag;
260 head->etag = tag;
261 itir = hash->itir;
262 hash->itir = head->itir;
263 head->itir = itir;
264 head->len = hash->len;
265 hash->len=0;
266 return head;
267 }
268 return hash;
269 }
271 u64 guest_vhpt_lookup(u64 iha, u64 *pte)
272 {
273 u64 ret;
274 thash_data_t * data;
276 data = vhpt_lookup(iha);
277 if (data == NULL) {
278 data = __vtr_lookup(current, iha, DSIDE_TLB);
279 if (data != NULL)
280 thash_vhpt_insert(current, data->page_flags, data->itir,
281 iha, DSIDE_TLB);
282 }
284 asm volatile ("rsm psr.ic|psr.i;;"
285 "srlz.d;;"
286 "ld8.s r9=[%1];;"
287 "tnat.nz p6,p7=r9;;"
288 "(p6) mov %0=1;"
289 "(p6) mov r9=r0;"
290 "(p7) extr.u r9=r9,0,53;;"
291 "(p7) mov %0=r0;"
292 "(p7) st8 [%2]=r9;;"
293 "ssm psr.ic;;"
294 "srlz.d;;"
295 "ssm psr.i;;"
296 : "=r"(ret) : "r"(iha), "r"(pte):"memory");
297 return ret;
298 }
300 /*
301 * purge software guest tlb
302 */
304 static void vtlb_purge(VCPU *v, u64 va, u64 ps)
305 {
306 thash_data_t *cur;
307 u64 start, curadr, size, psbits, tag, rr_ps, num;
308 ia64_rr vrr;
309 thash_cb_t *hcb = &v->arch.vtlb;
311 vcpu_get_rr(v, va, &vrr.rrval);
312 psbits = VMX(v, psbits[(va >> 61)]);
313 start = va & ~((1UL << ps) - 1);
314 while (psbits) {
315 curadr = start;
316 rr_ps = __ffs(psbits);
317 psbits &= ~(1UL << rr_ps);
318 num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
319 size = PSIZE(rr_ps);
320 vrr.ps = rr_ps;
321 while (num) {
322 cur = vsa_thash(hcb->pta, curadr, vrr.rrval, &tag);
323 while (cur) {
324 if (cur->etag == tag && cur->ps == rr_ps)
325 cur->etag = 1UL << 63;
326 cur = cur->next;
327 }
328 curadr += size;
329 num--;
330 }
331 }
332 }
335 /*
336 * purge VHPT and machine TLB
337 */
338 static void vhpt_purge(VCPU *v, u64 va, u64 ps)
339 {
340 //thash_cb_t *hcb = &v->arch.vhpt;
341 thash_data_t *cur;
342 u64 start, size, tag, num;
343 ia64_rr rr;
345 start = va & ~((1UL << ps) - 1);
346 rr.rrval = ia64_get_rr(va);
347 size = PSIZE(rr.ps);
348 num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
349 while (num) {
350 cur = (thash_data_t *)ia64_thash(start);
351 tag = ia64_ttag(start);
352 while (cur) {
353 if (cur->etag == tag)
354 cur->etag = 1UL << 63;
355 cur = cur->next;
356 }
357 start += size;
358 num--;
359 }
360 machine_tlb_purge(va, ps);
361 }
363 /*
364 * Recycle all collisions chain in VTLB or VHPT.
365 *
366 */
367 void thash_recycle_cch_all(thash_cb_t *hcb)
368 {
369 int num;
370 thash_data_t *head;
371 head=hcb->hash;
372 num = (hcb->hash_sz/sizeof(thash_data_t));
373 do{
374 head->len = 0;
375 head->next = 0;
376 head++;
377 num--;
378 }while(num);
379 cch_mem_init(hcb);
380 }
383 thash_data_t *__alloc_chain(thash_cb_t *hcb)
384 {
385 thash_data_t *cch;
387 cch = cch_alloc(hcb);
388 if(cch == NULL){
389 thash_recycle_cch_all(hcb);
390 cch = cch_alloc(hcb);
391 }
392 return cch;
393 }
395 /*
396 * Insert an entry into hash TLB or VHPT.
397 * NOTES:
398 * 1: When inserting VHPT to thash, "va" is a must covered
399 * address by the inserted machine VHPT entry.
400 * 2: The format of entry is always in TLB.
401 * 3: The caller need to make sure the new entry will not overlap
402 * with any existed entry.
403 */
404 void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va)
405 {
406 thash_data_t *hash_table, *cch;
407 /* int flag; */
408 ia64_rr vrr;
409 /* u64 gppn, ppns, ppne; */
410 u64 tag, len;
411 thash_cb_t *hcb = &v->arch.vtlb;
412 vcpu_get_rr(v, va, &vrr.rrval);
413 vrr.ps = itir_ps(itir);
414 VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
415 hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
416 cch = hash_table;
417 while (cch) {
418 if (INVALID_TLB(cch)) {
419 len = cch->len;
420 cch->page_flags = pte;
421 cch->len = len;
422 cch->itir=itir;
423 cch->etag=tag;
424 return;
425 }
426 cch = cch->next;
427 }
428 if (hash_table->len>=MAX_CCN_DEPTH){
429 thash_recycle_cch(hcb, hash_table);
430 cch = cch_alloc(hcb);
431 }
432 else {
433 cch = __alloc_chain(hcb);
434 }
435 cch->page_flags = pte;
436 cch->itir = itir;
437 cch->etag = tag;
438 cch->next = hash_table->next;
439 wmb();
440 hash_table->next = cch;
441 hash_table->len += 1;
442 return;
443 }
446 int vtr_find_overlap(VCPU *vcpu, u64 va, u64 ps, int is_data)
447 {
448 thash_data_t *trp;
449 int i;
450 u64 end, rid;
451 vcpu_get_rr(vcpu, va, &rid);
452 rid = rid&RR_RID_MASK;;
453 end = va + PSIZE(ps);
454 if (is_data) {
455 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,va)) {
456 for (trp =(thash_data_t *) vcpu->arch.dtrs,i=0; i<NDTRS; i++, trp++) {
457 if (__is_tr_overlap(trp, rid, va, end )) {
458 return i;
459 }
460 }
461 }
462 }
463 else {
464 if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) {
465 for (trp =(thash_data_t *) vcpu->arch.itrs,i=0; i<NITRS; i++, trp++) {
466 if (__is_tr_overlap(trp, rid, va, end )) {
467 return i;
468 }
469 }
470 }
471 }
472 return -1;
473 }
475 /*
476 * Purge entries in VTLB and VHPT
477 */
478 void thash_purge_entries(VCPU *v, u64 va, u64 ps)
479 {
480 if(vcpu_quick_region_check(v->arch.tc_regions,va))
481 vtlb_purge(v, va, ps);
482 vhpt_purge(v, va, ps);
483 }
485 void thash_purge_entries_remote(VCPU *v, u64 va, u64 ps)
486 {
487 u64 old_va = va;
488 va = REGION_OFFSET(va);
489 if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
490 vtlb_purge(v, va, ps);
491 vhpt_purge(v, va, ps);
492 }
494 u64 translate_phy_pte(VCPU *v, u64 *pte, u64 itir, u64 va)
495 {
496 u64 ps, ps_mask, paddr, maddr;
497 // ia64_rr rr;
498 union pte_flags phy_pte;
499 ps = itir_ps(itir);
500 ps_mask = ~((1UL << ps) - 1);
501 phy_pte.val = *pte;
502 paddr = *pte;
503 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
504 maddr = lookup_domain_mpa(v->domain, paddr, NULL);
505 if (maddr & GPFN_IO_MASK) {
506 *pte |= VTLB_PTE_IO;
507 return -1;
508 }
509 /* Ensure WB attribute if pte is related to a normal mem page,
510 * which is required by vga acceleration since qemu maps shared
511 * vram buffer with WB.
512 */
513 if (phy_pte.ma != VA_MATTR_NATPAGE)
514 phy_pte.ma = VA_MATTR_WB;
516 // rr.rrval = ia64_get_rr(va);
517 // ps = rr.ps;
518 maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
519 phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
520 return phy_pte.val;
521 }
524 /*
525 * Purge overlap TCs and then insert the new entry to emulate itc ops.
526 * Notes: Only TC entry can purge and insert.
527 * 1 indicates this is MMIO
528 */
529 int thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type)
530 {
531 u64 ps;//, va;
532 u64 phy_pte;
533 ia64_rr mrr;
534 int ret = 0;
536 ps = itir_ps(itir);
537 mrr.rrval = ia64_get_rr(ifa);
538 if(VMX_DOMAIN(v)){
539 phy_pte = translate_phy_pte(v, &pte, itir, ifa);
541 if (pte & VTLB_PTE_IO)
542 ret = 1;
543 vtlb_purge(v, ifa, ps);
544 vhpt_purge(v, ifa, ps);
545 if (ps == mrr.ps) {
546 if(!(pte&VTLB_PTE_IO)){
547 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
548 }
549 else{
550 vtlb_insert(v, pte, itir, ifa);
551 vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
552 }
553 }
554 else if (ps > mrr.ps) {
555 vtlb_insert(v, pte, itir, ifa);
556 vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
557 if(!(pte&VTLB_PTE_IO)){
558 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
559 }
560 }
561 else {
562 u64 psr;
563 phy_pte &= ~PAGE_FLAGS_RV_MASK;
564 psr = ia64_clear_ic();
565 ia64_itc(type + 1, ifa, phy_pte, ps);
566 ia64_set_psr(psr);
567 ia64_srlz_i();
568 // ps < mrr.ps, this is not supported
569 // panic_domain(NULL, "%s: ps (%lx) < mrr.ps \n", __func__, ps);
570 }
571 }
572 else{
573 phy_pte = translate_phy_pte(v, &pte, itir, ifa);
574 if(ps!=PAGE_SHIFT){
575 vtlb_insert(v, pte, itir, ifa);
576 vcpu_quick_region_set(PSCBX(v,tc_regions),ifa);
577 }
578 machine_tlb_purge(ifa, ps);
579 vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
580 }
581 return ret;
582 }
584 /*
585 * Purge all TCs or VHPT entries including those in Hash table.
586 *
587 */
589 //TODO: add sections.
590 void thash_purge_all(VCPU *v)
591 {
592 int num;
593 thash_data_t *head;
594 thash_cb_t *vtlb,*vhpt;
595 vtlb =&v->arch.vtlb;
596 vhpt =&v->arch.vhpt;
598 for (num = 0; num < 8; num++)
599 VMX(v, psbits[num]) = 0;
601 head=vtlb->hash;
602 num = (vtlb->hash_sz/sizeof(thash_data_t));
603 do{
604 head->page_flags = 0;
605 head->etag = 1UL<<63;
606 head->itir = 0;
607 head->next = 0;
608 head++;
609 num--;
610 }while(num);
611 cch_mem_init(vtlb);
613 head=vhpt->hash;
614 num = (vhpt->hash_sz/sizeof(thash_data_t));
615 do{
616 head->page_flags = 0;
617 head->etag = 1UL<<63;
618 head->next = 0;
619 head++;
620 num--;
621 }while(num);
622 cch_mem_init(vhpt);
623 local_flush_tlb_all();
624 }
627 /*
628 * Lookup the hash table and its collision chain to find an entry
629 * covering this address rid:va or the entry.
630 *
631 * INPUT:
632 * in: TLB format for both VHPT & TLB.
633 */
635 thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data)
636 {
637 thash_data_t *cch;
638 u64 psbits, ps, tag;
639 ia64_rr vrr;
640 thash_cb_t * hcb= &v->arch.vtlb;
642 cch = __vtr_lookup(v, va, is_data);;
643 if ( cch ) return cch;
645 if(vcpu_quick_region_check(v->arch.tc_regions,va)==0)
646 return NULL;
647 psbits = VMX(v, psbits[(va >> 61)]);
648 vcpu_get_rr(v,va,&vrr.rrval);
649 while (psbits) {
650 ps = __ffs(psbits);
651 psbits &= ~(1UL << ps);
652 vrr.ps = ps;
653 cch = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
654 do {
655 if (cch->etag == tag && cch->ps == ps)
656 return cch;
657 cch = cch->next;
658 } while(cch);
659 }
660 return NULL;
661 }
664 /*
665 * Initialize internal control data before service.
666 */
667 void thash_init(thash_cb_t *hcb, u64 sz)
668 {
669 int num;
670 thash_data_t *head;
672 hcb->pta.val = (unsigned long)hcb->hash;
673 hcb->pta.vf = 1;
674 hcb->pta.ve = 1;
675 hcb->pta.size = sz;
677 head=hcb->hash;
678 num = (hcb->hash_sz/sizeof(thash_data_t));
679 do{
680 head->page_flags = 0;
681 head->itir = 0;
682 head->etag = 1UL<<63;
683 head->next = 0;
684 head++;
685 num--;
686 }while(num);
688 hcb->cch_free_idx = 0;
689 hcb->cch_freelist = NULL;
690 }