ia64/xen-unstable

view xen/arch/ia64/vmx/vmmu.c @ 15419:962f22223817

[IA64] Domain debugger for VTi: virtualize ibr and dbr.

Misc cleanup.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Mon Jul 02 10:10:32 2007 -0600 (2007-07-02)
parents 2d26b66901d2
children 255abff9d1f7
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmmu.c: virtual memory management unit components.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
22 #include <linux/sched.h>
23 #include <linux/mm.h>
24 #include <asm/tlb.h>
25 #include <asm/gcc_intrin.h>
26 #include <asm/vcpu.h>
27 #include <linux/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/vmx_mm_def.h>
30 #include <asm/vmx.h>
31 #include <asm/hw_irq.h>
32 #include <asm/vmx_pal_vsa.h>
33 #include <asm/kregs.h>
34 #include <asm/vcpu.h>
35 #include <xen/irq.h>
36 #include <xen/errno.h>
37 #include <xen/sched-if.h>
39 /*
40 * Get the machine page frame number in 16KB unit
41 * Input:
42 * d:
43 */
44 u64 get_mfn(struct domain *d, u64 gpfn)
45 {
46 // struct domain *d;
47 u64 xen_gppn, xen_mppn, mpfn;
48 /*
49 if ( domid == DOMID_SELF ) {
50 d = current->domain;
51 }
52 else {
53 d = get_domain_by_id(domid);
54 }
55 */
56 xen_gppn = arch_to_xen_ppn(gpfn);
57 xen_mppn = gmfn_to_mfn(d, xen_gppn);
58 /*
59 for (i=0; i<pages; i++) {
60 if ( gmfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
61 return INVALID_MFN;
62 }
63 }
64 */
65 mpfn= xen_to_arch_ppn(xen_mppn);
66 mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn);
67 return mpfn;
69 }
71 /*
72 * The VRN bits of va stand for which rr to get.
73 */
74 //ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va)
75 //{
76 // ia64_rr vrr;
77 // vcpu_get_rr(vcpu, va, &vrr.rrval);
78 // return vrr;
79 //}
81 /*
82 void recycle_message(thash_cb_t *hcb, u64 para)
83 {
84 if(hcb->ht == THASH_VHPT)
85 {
86 printk("ERROR : vhpt recycle happenning!!!\n");
87 }
88 printk("hcb=%p recycled with %lx\n",hcb,para);
89 }
90 */
92 /*
93 * Purge all guest TCs in logical processor.
94 * Instead of purging all LP TCs, we should only purge
95 * TCs that belong to this guest.
96 */
97 void
98 purge_machine_tc_by_domid(domid_t domid)
99 {
100 #ifndef PURGE_GUEST_TC_ONLY
101 // purge all TCs
102 struct ia64_pal_retval result;
103 u64 addr;
104 u32 count1,count2;
105 u32 stride1,stride2;
106 u32 i,j;
107 u64 psr;
109 result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
110 if ( result.status != 0 ) {
111 panic ("PAL_PTCE_INFO failed\n");
112 }
113 addr = result.v0;
114 count1 = HIGH_32BITS(result.v1);
115 count2 = LOW_32BITS (result.v1);
116 stride1 = HIGH_32BITS(result.v2);
117 stride2 = LOW_32BITS (result.v2);
119 local_irq_save(psr);
120 for (i=0; i<count1; i++) {
121 for (j=0; j<count2; j++) {
122 ia64_ptce(addr);
123 addr += stride2;
124 }
125 addr += stride1;
126 }
127 local_irq_restore(psr);
128 #else
129 // purge all TCs belong to this guest.
130 #endif
131 }
133 static int init_domain_vhpt(struct vcpu *v)
134 {
135 struct page_info *page;
136 void * vbase;
137 page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
138 if ( page == NULL ) {
139 printk("No enough contiguous memory for init_domain_vhpt\n");
140 return -ENOMEM;
141 }
142 vbase = page_to_virt(page);
143 memset(vbase, 0, VCPU_VHPT_SIZE);
144 printk(XENLOG_DEBUG "Allocate domain vhpt at 0x%p\n", vbase);
146 VHPT(v,hash) = vbase;
147 VHPT(v,hash_sz) = VCPU_VHPT_SIZE/2;
148 VHPT(v,cch_buf) = (void *)((u64)vbase + VHPT(v,hash_sz));
149 VHPT(v,cch_sz) = VCPU_VHPT_SIZE - VHPT(v,hash_sz);
150 thash_init(&(v->arch.vhpt),VCPU_VHPT_SHIFT-1);
151 v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
153 return 0;
154 }
157 static void free_domain_vhpt(struct vcpu *v)
158 {
159 struct page_info *page;
161 if (v->arch.vhpt.hash) {
162 page = virt_to_page(v->arch.vhpt.hash);
163 free_domheap_pages(page, VCPU_VHPT_ORDER);
164 v->arch.vhpt.hash = 0;
165 }
167 return;
168 }
170 int init_domain_tlb(struct vcpu *v)
171 {
172 struct page_info *page;
173 void * vbase;
174 int rc;
176 rc = init_domain_vhpt(v);
177 if (rc)
178 return rc;
180 page = alloc_domheap_pages (NULL, VCPU_VTLB_ORDER, 0);
181 if ( page == NULL ) {
182 printk("No enough contiguous memory for init_domain_tlb\n");
183 free_domain_vhpt(v);
184 return -ENOMEM;
185 }
186 vbase = page_to_virt(page);
187 memset(vbase, 0, VCPU_VTLB_SIZE);
188 printk(XENLOG_DEBUG "Allocate domain vtlb at 0x%p\n", vbase);
190 VTLB(v,hash) = vbase;
191 VTLB(v,hash_sz) = VCPU_VTLB_SIZE/2;
192 VTLB(v,cch_buf) = (void *)((u64)vbase + VTLB(v,hash_sz));
193 VTLB(v,cch_sz) = VCPU_VTLB_SIZE - VTLB(v,hash_sz);
194 thash_init(&(v->arch.vtlb),VCPU_VTLB_SHIFT-1);
196 return 0;
197 }
200 void free_domain_tlb(struct vcpu *v)
201 {
202 struct page_info *page;
204 if ( v->arch.vtlb.hash) {
205 page = virt_to_page(v->arch.vtlb.hash);
206 free_domheap_pages(page, VCPU_VTLB_ORDER);
207 }
209 free_domain_vhpt(v);
210 }
212 /*
213 * Insert guest TLB to machine TLB.
214 * data: In TLB format
215 */
216 void machine_tlb_insert(struct vcpu *v, thash_data_t *tlb)
217 {
218 u64 psr;
219 thash_data_t mtlb;
220 unsigned int cl = tlb->cl;
221 unsigned long mtlb_ppn;
222 mtlb.ifa = tlb->vadr;
223 mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
224 mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
225 mtlb.ppn = get_mfn(v->domain, tlb->ppn);
226 mtlb_ppn=mtlb.ppn;
228 #if 0
229 if (mtlb_ppn == INVALID_MFN)
230 panic_domain(vcpu_regs(v), "Machine tlb insert with invalid mfn number.\n");
231 #endif
233 psr = ia64_clear_ic();
234 if ( cl == ISIDE_TLB ) {
235 ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps);
236 }
237 else {
238 ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps);
239 }
240 ia64_set_psr(psr);
241 ia64_srlz_i();
242 return;
243 }
245 /*
246 * Purge machine tlb.
247 * INPUT
248 * rr: guest rr.
249 * va: only bits 0:60 is valid
250 * size: bits format (1<<size) for the address range to purge.
251 *
252 */
253 void machine_tlb_purge(u64 va, u64 ps)
254 {
255 // u64 psr;
256 // psr = ia64_clear_ic();
257 ia64_ptcl(va, ps << 2);
258 // ia64_set_psr(psr);
259 // ia64_srlz_i();
260 // return;
261 }
262 /*
263 u64 machine_thash(u64 va)
264 {
265 return ia64_thash(va);
266 }
268 u64 machine_ttag(u64 va)
269 {
270 return ia64_ttag(va);
271 }
272 */
273 thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
274 {
275 u64 index,pfn,rid,pfn_bits;
276 pfn_bits = vpta.size-5-8;
277 pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr);
278 rid = _REGION_ID(vrr);
279 index = ((rid&0xff)<<pfn_bits)|(pfn&((1UL<<pfn_bits)-1));
280 *tag = ((rid>>8)&0xffff) | ((pfn >>pfn_bits)<<16);
281 return (thash_data_t *)((vpta.base<<PTA_BASE_SHIFT)+(index<<5));
282 // return ia64_call_vsa(PAL_VPS_THASH,va,vrr,vpta,0,0,0,0);
283 }
285 //u64 vsa_ttag(u64 va, u64 vrr)
286 //{
287 // return ia64_call_vsa(PAL_VPS_TTAG,va,vrr,0,0,0,0,0);
288 //}
290 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
291 {
292 ia64_rr vrr;
293 PTA vpta;
294 IA64_PSR vpsr;
296 vpsr.val = VCPU(vcpu, vpsr);
297 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
298 vpta.val = vmx_vcpu_get_pta(vcpu);
300 if ( vrr.ve & vpta.ve ) {
301 switch ( ref ) {
302 case DATA_REF:
303 case NA_REF:
304 return vpsr.dt;
305 case INST_REF:
306 return vpsr.dt && vpsr.it && vpsr.ic;
307 case RSE_REF:
308 return vpsr.dt && vpsr.rt;
310 }
311 }
312 return 0;
313 }
316 int unimplemented_gva(VCPU *vcpu,u64 vadr)
317 {
318 #if 0
319 int bit=vcpu->domain->arch.imp_va_msb;
320 u64 ladr =(vadr<<3)>>(3+bit);
321 if(!ladr||ladr==(1U<<(61-bit))-1){
322 return 0;
323 }else{
324 return 1;
325 }
326 #else
327 return 0;
328 #endif
329 }
332 /*
333 * Fetch guest bundle code.
334 * INPUT:
335 * gip: guest ip
336 * pbundle: used to return fetched bundle.
337 */
338 unsigned long
339 fetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle)
340 {
341 u64 gpip=0; // guest physical IP
342 u64 *vpa;
343 thash_data_t *tlb;
344 u64 mfn, maddr;
345 struct page_info* page;
347 again:
348 if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
349 gpip = gip;
350 }
351 else {
352 tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
353 // if( tlb == NULL )
354 // tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
355 if (tlb)
356 gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & (PSIZE(tlb->ps)-1) );
357 }
358 if( gpip){
359 mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
360 if( mfn == INVALID_MFN ) panic_domain(vcpu_regs(vcpu),"fetch_code: invalid memory\n");
361 maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
362 }else{
363 tlb = vhpt_lookup(gip);
364 if (tlb == NULL) {
365 ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
366 return IA64_RETRY;
367 }
368 maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
369 (gip & (PSIZE(tlb->ps) - 1));
370 mfn = maddr >> PAGE_SHIFT;
371 }
373 page = mfn_to_page(mfn);
374 if (get_page(page, vcpu->domain) == 0) {
375 if (page_get_owner(page) != vcpu->domain) {
376 // This page might be a page granted by another domain.
377 panic_domain(NULL, "domain tries to execute foreign domain "
378 "page which might be mapped by grant table.\n");
379 }
380 goto again;
381 }
382 vpa = (u64 *)__va(maddr);
384 pbundle->i64[0] = *vpa++;
385 pbundle->i64[1] = *vpa;
386 put_page(page);
387 return IA64_NO_FAULT;
388 }
390 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
391 {
392 #ifdef VTLB_DEBUG
393 int slot;
394 u64 ps, va;
395 ps = itir_ps(itir);
396 va = PAGEALIGN(ifa, ps);
397 slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
398 if (slot >=0) {
399 // generate MCA.
400 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
401 return IA64_FAULT;
402 }
403 #endif //VTLB_DEBUG
404 pte &= ~PAGE_FLAGS_RV_MASK;
405 thash_purge_and_insert(vcpu, pte, itir, ifa, ISIDE_TLB);
406 return IA64_NO_FAULT;
407 }
409 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
410 {
411 #ifdef VTLB_DEBUG
412 int slot;
413 u64 ps, va;
414 ps = itir_ps(itir);
415 va = PAGEALIGN(ifa, ps);
416 slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
417 if (slot >=0) {
418 // generate MCA.
419 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
420 return IA64_FAULT;
421 }
422 #endif //VTLB_DEBUG
423 pte &= ~PAGE_FLAGS_RV_MASK;
424 thash_purge_and_insert(vcpu, pte, itir, ifa, DSIDE_TLB);
425 return IA64_NO_FAULT;
427 }
432 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
433 {
434 #ifdef VTLB_DEBUG
435 int index;
436 #endif
437 u64 ps, va, rid;
438 thash_data_t * p_itr;
439 ps = itir_ps(itir);
440 va = PAGEALIGN(ifa, ps);
441 #ifdef VTLB_DEBUG
442 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
443 if (index >=0) {
444 // generate MCA.
445 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
446 return IA64_FAULT;
447 }
448 thash_purge_entries(vcpu, va, ps);
449 #endif
450 pte &= ~PAGE_FLAGS_RV_MASK;
451 vcpu_get_rr(vcpu, va, &rid);
452 rid = rid& RR_RID_MASK;
453 p_itr = (thash_data_t *)&vcpu->arch.itrs[slot];
454 vmx_vcpu_set_tr(p_itr, pte, itir, va, rid);
455 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
456 return IA64_NO_FAULT;
457 }
460 IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
461 {
462 #ifdef VTLB_DEBUG
463 int index;
464 #endif
465 u64 gpfn;
466 u64 ps, va, rid;
467 thash_data_t * p_dtr;
468 ps = itir_ps(itir);
469 va = PAGEALIGN(ifa, ps);
470 #ifdef VTLB_DEBUG
471 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
472 if (index>=0) {
473 // generate MCA.
474 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
475 return IA64_FAULT;
476 }
477 #endif
478 pte &= ~PAGE_FLAGS_RV_MASK;
480 /* This is a bad workaround
481 In Linux, region 7 use 16M pagesize and is identity mapped.
482 VHPT page size is 16K in XEN. If purge VHPT while guest insert 16M,
483 it will iteratively purge VHPT 1024 times, which makes XEN/IPF very
484 slow. XEN doesn't purge VHPT
485 */
486 if (ps != _PAGE_SIZE_16M)
487 thash_purge_entries(vcpu, va, ps);
488 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
489 if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
490 pte |= VTLB_PTE_IO;
491 vcpu_get_rr(vcpu, va, &rid);
492 rid = rid& RR_RID_MASK;
493 p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
494 vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
495 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
496 return IA64_NO_FAULT;
497 }
501 IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,u64 ifa, u64 ps)
502 {
503 int index;
504 u64 va;
506 va = PAGEALIGN(ifa, ps);
507 while ((index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB)) >= 0) {
508 vcpu->arch.dtrs[index].pte.p=0;
509 }
510 thash_purge_entries(vcpu, va, ps);
511 return IA64_NO_FAULT;
512 }
514 IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu, u64 ifa, u64 ps)
515 {
516 int index;
517 u64 va;
519 va = PAGEALIGN(ifa, ps);
520 while ((index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB)) >= 0) {
521 vcpu->arch.itrs[index].pte.p=0;
522 }
523 thash_purge_entries(vcpu, va, ps);
524 return IA64_NO_FAULT;
525 }
527 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, u64 va, u64 ps)
528 {
529 va = PAGEALIGN(va, ps);
530 thash_purge_entries(vcpu, va, ps);
531 return IA64_NO_FAULT;
532 }
535 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, u64 va)
536 {
537 thash_purge_all(vcpu);
538 return IA64_NO_FAULT;
539 }
541 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, u64 va, u64 ps)
542 {
543 return vmx_vcpu_ptc_ga(vcpu, va, ps);
544 }
545 /*
546 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
547 {
548 vmx_vcpu_ptc_l(vcpu, va, ps);
549 return IA64_NO_FAULT;
550 }
551 */
552 struct ptc_ga_args {
553 unsigned long vadr;
554 unsigned long rid;
555 unsigned long ps;
556 struct vcpu *vcpu;
557 };
559 static void ptc_ga_remote_func (void *varg)
560 {
561 u64 oldrid, moldrid, mpta, oldpsbits, vadr, flags;
562 struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
563 VCPU *v = args->vcpu;
564 int cpu = v->processor;
566 vadr = args->vadr;
568 /* Try again if VCPU has migrated. */
569 if (cpu != current->processor)
570 return;
571 local_irq_save(flags);
572 if (!spin_trylock(&per_cpu(schedule_data, cpu).schedule_lock))
573 goto bail2;
574 if (v->processor != cpu)
575 goto bail1;
576 oldrid = VMX(v, vrr[0]);
577 VMX(v, vrr[0]) = args->rid;
578 oldpsbits = VMX(v, psbits[0]);
579 VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vadr)]);
580 moldrid = ia64_get_rr(0x0);
581 ia64_set_rr(0x0,vrrtomrr(v,args->rid));
582 mpta = ia64_get_pta();
583 ia64_set_pta(v->arch.arch_vmx.mpta&(~1));
584 ia64_srlz_d();
585 vadr = PAGEALIGN(vadr, args->ps);
586 thash_purge_entries_remote(v, vadr, args->ps);
587 VMX(v, vrr[0]) = oldrid;
588 VMX(v, psbits[0]) = oldpsbits;
589 ia64_set_rr(0x0,moldrid);
590 ia64_set_pta(mpta);
591 ia64_dv_serialize_data();
592 args->vcpu = NULL;
593 bail1:
594 spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock);
595 bail2:
596 local_irq_restore(flags);
597 }
600 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
601 {
603 struct domain *d = vcpu->domain;
604 struct vcpu *v;
605 struct ptc_ga_args args;
606 int cpu;
608 args.vadr = va;
609 vcpu_get_rr(vcpu, va, &args.rid);
610 args.ps = ps;
611 for_each_vcpu (d, v) {
612 if (!v->is_initialised)
613 continue;
615 if (v == vcpu) {
616 vmx_vcpu_ptc_l(v, va, ps);
617 continue;
618 }
620 args.vcpu = v;
621 do {
622 cpu = v->processor;
623 if (cpu != current->processor) {
624 spin_unlock_wait(&per_cpu(schedule_data, cpu).schedule_lock);
625 /* Flush VHPT on remote processors. */
626 smp_call_function_single(cpu, &ptc_ga_remote_func,
627 &args, 0, 1);
628 } else {
629 ptc_ga_remote_func(&args);
630 }
631 } while (args.vcpu != NULL);
632 }
633 return IA64_NO_FAULT;
634 }
637 u64 vmx_vcpu_thash(VCPU *vcpu, u64 vadr)
638 {
639 PTA vpta;
640 ia64_rr vrr;
641 u64 pval;
642 u64 vhpt_offset;
643 vpta.val = vmx_vcpu_get_pta(vcpu);
644 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
645 if(vpta.vf){
646 pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.rrval,
647 vpta.val, 0, 0, 0, 0);
648 pval = vpta.val & ~0xffff;
649 }else{
650 vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
651 pval = (vadr & VRN_MASK) |
652 (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
653 vhpt_offset;
654 }
655 return pval;
656 }
659 u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr)
660 {
661 ia64_rr vrr;
662 PTA vpta;
663 u64 pval;
664 vpta.val = vmx_vcpu_get_pta(vcpu);
665 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
666 if(vpta.vf){
667 pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.rrval, 0, 0, 0, 0, 0);
668 }else{
669 pval = 1;
670 }
671 return pval;
672 }
676 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 vadr, u64 *padr)
677 {
678 thash_data_t *data;
679 ISR visr,pt_isr;
680 REGS *regs;
681 u64 vhpt_adr, madr;
682 IA64_PSR vpsr;
683 regs=vcpu_regs(vcpu);
684 pt_isr.val=VMX(vcpu,cr_isr);
685 visr.val=0;
686 visr.ei=pt_isr.ei;
687 visr.ir=pt_isr.ir;
688 vpsr.val = VCPU(vcpu, vpsr);
689 visr.na=1;
690 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
691 if(data){
692 if(data->p==0){
693 vcpu_set_isr(vcpu,visr.val);
694 data_page_not_present(vcpu, vadr);
695 return IA64_FAULT;
696 }else if(data->ma == VA_MATTR_NATPAGE){
697 vcpu_set_isr(vcpu, visr.val);
698 dnat_page_consumption(vcpu, vadr);
699 return IA64_FAULT;
700 }else{
701 *padr = ((data->ppn >> (data->ps - 12)) << data->ps) |
702 (vadr & (PSIZE(data->ps) - 1));
703 return IA64_NO_FAULT;
704 }
705 }
706 data = vhpt_lookup(vadr);
707 if(data){
708 if(data->p==0){
709 vcpu_set_isr(vcpu,visr.val);
710 data_page_not_present(vcpu, vadr);
711 return IA64_FAULT;
712 }else if(data->ma == VA_MATTR_NATPAGE){
713 vcpu_set_isr(vcpu, visr.val);
714 dnat_page_consumption(vcpu, vadr);
715 return IA64_FAULT;
716 }else{
717 madr = (data->ppn >> (data->ps - 12) << data->ps) |
718 (vadr & (PSIZE(data->ps) - 1));
719 *padr = __mpa_to_gpa(madr);
720 return IA64_NO_FAULT;
721 }
722 }
723 else{
724 if(!vhpt_enabled(vcpu, vadr, NA_REF)){
725 if(vpsr.ic){
726 vcpu_set_isr(vcpu, visr.val);
727 alt_dtlb(vcpu, vadr);
728 return IA64_FAULT;
729 }
730 else{
731 nested_dtlb(vcpu);
732 return IA64_FAULT;
733 }
734 }
735 else{
736 vhpt_adr = vmx_vcpu_thash(vcpu, vadr);
737 data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB);
738 if(data){
739 if(vpsr.ic){
740 vcpu_set_isr(vcpu, visr.val);
741 dtlb_fault(vcpu, vadr);
742 return IA64_FAULT;
743 }
744 else{
745 nested_dtlb(vcpu);
746 return IA64_FAULT;
747 }
748 }
749 else{
750 if(vpsr.ic){
751 vcpu_set_isr(vcpu, visr.val);
752 dvhpt_fault(vcpu, vadr);
753 return IA64_FAULT;
754 }
755 else{
756 nested_dtlb(vcpu);
757 return IA64_FAULT;
758 }
759 }
760 }
761 }
762 }
764 u64 vmx_vcpu_tak(VCPU *vcpu, u64 vadr)
765 {
766 thash_data_t *data;
767 PTA vpta;
768 u64 key;
769 vpta.val = vmx_vcpu_get_pta(vcpu);
770 if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
771 key=1;
772 return key;
773 }
774 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
775 if(!data||!data->p){
776 key = 1;
777 }else{
778 key = data->key;
779 }
780 return key;
781 }