ia64/xen-unstable

view xen/arch/ia64/vmx/vmmu.c @ 14422:2dbee4f1ee63

[IA64] Avoid double free of VHPT for HVM domain

If vTLB area is not able to allocate with shortage of domheap,
Xen does a panic for double free of VHPT.

At first:
vmx_final_setup_guest()
-> init_domain_tlb()
-> free_domain_vhpt()

At second:
free_domain()
-> vcpu_destroy()
-> vmx_relinquish_vcpu_resources()
-> free_domain_tlb()
-> free_domain_vhpt()

This patch avoids double free of VHPT by clearing v->arch.vhpt.hash
in free_domain_vhpt().

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author awilliam@xenbuild2.aw
date Tue Mar 20 09:24:02 2007 -0600 (2007-03-20)
parents 21d0d23a2cca
children 93e11f6d6791
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmmu.c: virtual memory management unit components.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
22 #include <linux/sched.h>
23 #include <linux/mm.h>
24 #include <asm/tlb.h>
25 #include <asm/gcc_intrin.h>
26 #include <asm/vcpu.h>
27 #include <linux/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/vmx_mm_def.h>
30 #include <asm/vmx.h>
31 #include <asm/hw_irq.h>
32 #include <asm/vmx_pal_vsa.h>
33 #include <asm/kregs.h>
34 #include <asm/vcpu.h>
35 #include <xen/irq.h>
36 #include <xen/errno.h>
38 /*
39 * Get the machine page frame number in 16KB unit
40 * Input:
41 * d:
42 */
43 u64 get_mfn(struct domain *d, u64 gpfn)
44 {
45 // struct domain *d;
46 u64 xen_gppn, xen_mppn, mpfn;
47 /*
48 if ( domid == DOMID_SELF ) {
49 d = current->domain;
50 }
51 else {
52 d = get_domain_by_id(domid);
53 }
54 */
55 xen_gppn = arch_to_xen_ppn(gpfn);
56 xen_mppn = gmfn_to_mfn(d, xen_gppn);
57 /*
58 for (i=0; i<pages; i++) {
59 if ( gmfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
60 return INVALID_MFN;
61 }
62 }
63 */
64 mpfn= xen_to_arch_ppn(xen_mppn);
65 mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn);
66 return mpfn;
68 }
70 /*
71 * The VRN bits of va stand for which rr to get.
72 */
73 //ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va)
74 //{
75 // ia64_rr vrr;
76 // vcpu_get_rr(vcpu, va, &vrr.rrval);
77 // return vrr;
78 //}
80 /*
81 void recycle_message(thash_cb_t *hcb, u64 para)
82 {
83 if(hcb->ht == THASH_VHPT)
84 {
85 printk("ERROR : vhpt recycle happenning!!!\n");
86 }
87 printk("hcb=%p recycled with %lx\n",hcb,para);
88 }
89 */
91 /*
92 * Purge all guest TCs in logical processor.
93 * Instead of purging all LP TCs, we should only purge
94 * TCs that belong to this guest.
95 */
96 void
97 purge_machine_tc_by_domid(domid_t domid)
98 {
99 #ifndef PURGE_GUEST_TC_ONLY
100 // purge all TCs
101 struct ia64_pal_retval result;
102 u64 addr;
103 u32 count1,count2;
104 u32 stride1,stride2;
105 u32 i,j;
106 u64 psr;
108 result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
109 if ( result.status != 0 ) {
110 panic ("PAL_PTCE_INFO failed\n");
111 }
112 addr = result.v0;
113 count1 = HIGH_32BITS(result.v1);
114 count2 = LOW_32BITS (result.v1);
115 stride1 = HIGH_32BITS(result.v2);
116 stride2 = LOW_32BITS (result.v2);
118 local_irq_save(psr);
119 for (i=0; i<count1; i++) {
120 for (j=0; j<count2; j++) {
121 ia64_ptce(addr);
122 addr += stride2;
123 }
124 addr += stride1;
125 }
126 local_irq_restore(psr);
127 #else
128 // purge all TCs belong to this guest.
129 #endif
130 }
132 static int init_domain_vhpt(struct vcpu *v)
133 {
134 struct page_info *page;
135 void * vbase;
136 page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
137 if ( page == NULL ) {
138 printk("No enough contiguous memory for init_domain_vhpt\n");
140 return -1;
141 }
142 vbase = page_to_virt(page);
143 memset(vbase, 0, VCPU_VHPT_SIZE);
144 printk(XENLOG_DEBUG "Allocate domain vhpt at 0x%p\n", vbase);
146 VHPT(v,hash) = vbase;
147 VHPT(v,hash_sz) = VCPU_VHPT_SIZE/2;
148 VHPT(v,cch_buf) = (void *)((u64)vbase + VHPT(v,hash_sz));
149 VHPT(v,cch_sz) = VCPU_VHPT_SIZE - VHPT(v,hash_sz);
150 thash_init(&(v->arch.vhpt),VCPU_VHPT_SHIFT-1);
151 v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
153 return 0;
154 }
157 static void free_domain_vhpt(struct vcpu *v)
158 {
159 struct page_info *page;
161 if (v->arch.vhpt.hash) {
162 page = virt_to_page(v->arch.vhpt.hash);
163 free_domheap_pages(page, VCPU_VHPT_ORDER);
164 v->arch.vhpt.hash = 0;
165 }
167 return;
168 }
170 int init_domain_tlb(struct vcpu *v)
171 {
172 struct page_info *page;
173 void * vbase;
175 if (init_domain_vhpt(v) != 0)
176 return -1;
178 page = alloc_domheap_pages (NULL, VCPU_VTLB_ORDER, 0);
179 if ( page == NULL ) {
180 printk("No enough contiguous memory for init_domain_tlb\n");
181 free_domain_vhpt(v);
182 return -1;
183 }
184 vbase = page_to_virt(page);
185 memset(vbase, 0, VCPU_VTLB_SIZE);
186 printk(XENLOG_DEBUG "Allocate domain vtlb at 0x%p\n", vbase);
188 VTLB(v,hash) = vbase;
189 VTLB(v,hash_sz) = VCPU_VTLB_SIZE/2;
190 VTLB(v,cch_buf) = (void *)((u64)vbase + VTLB(v,hash_sz));
191 VTLB(v,cch_sz) = VCPU_VTLB_SIZE - VTLB(v,hash_sz);
192 thash_init(&(v->arch.vtlb),VCPU_VTLB_SHIFT-1);
194 return 0;
195 }
198 void free_domain_tlb(struct vcpu *v)
199 {
200 struct page_info *page;
202 if ( v->arch.vtlb.hash) {
203 page = virt_to_page(v->arch.vtlb.hash);
204 free_domheap_pages(page, VCPU_VTLB_ORDER);
205 }
207 free_domain_vhpt(v);
208 }
210 /*
211 * Insert guest TLB to machine TLB.
212 * data: In TLB format
213 */
214 void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
215 {
216 u64 psr;
217 thash_data_t mtlb;
218 unsigned int cl = tlb->cl;
219 unsigned long mtlb_ppn;
220 mtlb.ifa = tlb->vadr;
221 mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
222 mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
223 mtlb.ppn = get_mfn(d->domain,tlb->ppn);
224 mtlb_ppn=mtlb.ppn;
226 #if 0
227 if (mtlb_ppn == INVALID_MFN)
228 panic_domain(vcpu_regs(d),"Machine tlb insert with invalid mfn number.\n");
229 #endif
231 psr = ia64_clear_ic();
232 if ( cl == ISIDE_TLB ) {
233 ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps);
234 }
235 else {
236 ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps);
237 }
238 ia64_set_psr(psr);
239 ia64_srlz_i();
240 return;
241 }
243 /*
244 * Purge machine tlb.
245 * INPUT
246 * rr: guest rr.
247 * va: only bits 0:60 is valid
248 * size: bits format (1<<size) for the address range to purge.
249 *
250 */
251 void machine_tlb_purge(u64 va, u64 ps)
252 {
253 // u64 psr;
254 // psr = ia64_clear_ic();
255 ia64_ptcl(va, ps << 2);
256 // ia64_set_psr(psr);
257 // ia64_srlz_i();
258 // return;
259 }
260 /*
261 u64 machine_thash(u64 va)
262 {
263 return ia64_thash(va);
264 }
266 u64 machine_ttag(u64 va)
267 {
268 return ia64_ttag(va);
269 }
270 */
271 thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
272 {
273 u64 index,pfn,rid,pfn_bits;
274 pfn_bits = vpta.size-5-8;
275 pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr);
276 rid = _REGION_ID(vrr);
277 index = ((rid&0xff)<<pfn_bits)|(pfn&((1UL<<pfn_bits)-1));
278 *tag = ((rid>>8)&0xffff) | ((pfn >>pfn_bits)<<16);
279 return (thash_data_t *)((vpta.base<<PTA_BASE_SHIFT)+(index<<5));
280 // return ia64_call_vsa(PAL_VPS_THASH,va,vrr,vpta,0,0,0,0);
281 }
283 //u64 vsa_ttag(u64 va, u64 vrr)
284 //{
285 // return ia64_call_vsa(PAL_VPS_TTAG,va,vrr,0,0,0,0,0);
286 //}
288 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
289 {
290 ia64_rr vrr;
291 PTA vpta;
292 IA64_PSR vpsr;
294 vpsr.val = VCPU(vcpu, vpsr);
295 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
296 vmx_vcpu_get_pta(vcpu,&vpta.val);
298 if ( vrr.ve & vpta.ve ) {
299 switch ( ref ) {
300 case DATA_REF:
301 case NA_REF:
302 return vpsr.dt;
303 case INST_REF:
304 return vpsr.dt && vpsr.it && vpsr.ic;
305 case RSE_REF:
306 return vpsr.dt && vpsr.rt;
308 }
309 }
310 return 0;
311 }
314 int unimplemented_gva(VCPU *vcpu,u64 vadr)
315 {
316 #if 0
317 int bit=vcpu->domain->arch.imp_va_msb;
318 u64 ladr =(vadr<<3)>>(3+bit);
319 if(!ladr||ladr==(1U<<(61-bit))-1){
320 return 0;
321 }else{
322 return 1;
323 }
324 #else
325 return 0;
326 #endif
327 }
330 /*
331 * Fetch guest bundle code.
332 * INPUT:
333 * gip: guest ip
334 * pbundle: used to return fetched bundle.
335 */
336 unsigned long
337 fetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle)
338 {
339 u64 gpip=0; // guest physical IP
340 u64 *vpa;
341 thash_data_t *tlb;
342 u64 mfn, maddr;
343 struct page_info* page;
345 again:
346 if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
347 gpip = gip;
348 }
349 else {
350 tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
351 // if( tlb == NULL )
352 // tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
353 if (tlb)
354 gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & (PSIZE(tlb->ps)-1) );
355 }
356 if( gpip){
357 mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
358 if( mfn == INVALID_MFN ) panic_domain(vcpu_regs(vcpu),"fetch_code: invalid memory\n");
359 maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
360 }else{
361 tlb = vhpt_lookup(gip);
362 if (tlb == NULL) {
363 ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
364 return IA64_RETRY;
365 }
366 maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
367 (gip & (PSIZE(tlb->ps) - 1));
368 mfn = maddr >> PAGE_SHIFT;
369 }
371 page = mfn_to_page(mfn);
372 if (get_page(page, vcpu->domain) == 0) {
373 if (page_get_owner(page) != vcpu->domain) {
374 // This page might be a page granted by another domain.
375 panic_domain(NULL, "domain tries to execute foreign domain "
376 "page which might be mapped by grant table.\n");
377 }
378 goto again;
379 }
380 vpa = (u64 *)__va(maddr);
382 pbundle->i64[0] = *vpa++;
383 pbundle->i64[1] = *vpa;
384 put_page(page);
385 return IA64_NO_FAULT;
386 }
388 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
389 {
390 #ifdef VTLB_DEBUG
391 int slot;
392 u64 ps, va;
393 ps = itir_ps(itir);
394 va = PAGEALIGN(ifa, ps);
395 slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
396 if (slot >=0) {
397 // generate MCA.
398 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
399 return IA64_FAULT;
400 }
401 #endif //VTLB_DEBUG
402 pte &= ~PAGE_FLAGS_RV_MASK;
403 thash_purge_and_insert(vcpu, pte, itir, ifa, ISIDE_TLB);
404 return IA64_NO_FAULT;
405 }
407 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
408 {
409 u64 gpfn;
410 #ifdef VTLB_DEBUG
411 int slot;
412 u64 ps, va;
413 ps = itir_ps(itir);
414 va = PAGEALIGN(ifa, ps);
415 slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
416 if (slot >=0) {
417 // generate MCA.
418 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
419 return IA64_FAULT;
420 }
421 #endif //VTLB_DEBUG
422 pte &= ~PAGE_FLAGS_RV_MASK;
423 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
424 if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
425 pte |= VTLB_PTE_IO;
426 thash_purge_and_insert(vcpu, pte, itir, ifa, DSIDE_TLB);
427 return IA64_NO_FAULT;
429 }
434 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
435 {
436 #ifdef VTLB_DEBUG
437 int index;
438 #endif
439 u64 ps, va, rid;
440 thash_data_t * p_itr;
441 ps = itir_ps(itir);
442 va = PAGEALIGN(ifa, ps);
443 #ifdef VTLB_DEBUG
444 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
445 if (index >=0) {
446 // generate MCA.
447 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
448 return IA64_FAULT;
449 }
450 thash_purge_entries(vcpu, va, ps);
451 #endif
452 pte &= ~PAGE_FLAGS_RV_MASK;
453 vcpu_get_rr(vcpu, va, &rid);
454 rid = rid& RR_RID_MASK;
455 p_itr = (thash_data_t *)&vcpu->arch.itrs[slot];
456 vmx_vcpu_set_tr(p_itr, pte, itir, va, rid);
457 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
458 return IA64_NO_FAULT;
459 }
462 IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
463 {
464 #ifdef VTLB_DEBUG
465 int index;
466 #endif
467 u64 gpfn;
468 u64 ps, va, rid;
469 thash_data_t * p_dtr;
470 ps = itir_ps(itir);
471 va = PAGEALIGN(ifa, ps);
472 #ifdef VTLB_DEBUG
473 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
474 if (index>=0) {
475 // generate MCA.
476 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
477 return IA64_FAULT;
478 }
479 #endif
480 pte &= ~PAGE_FLAGS_RV_MASK;
482 /* This is a bad workaround
483 In Linux, region 7 use 16M pagesize and is identity mapped.
484 VHPT page size is 16K in XEN. If purge VHPT while guest insert 16M,
485 it will iteratively purge VHPT 1024 times, which makes XEN/IPF very
486 slow. XEN doesn't purge VHPT
487 */
488 if (ps != _PAGE_SIZE_16M)
489 thash_purge_entries(vcpu, va, ps);
490 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
491 if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
492 pte |= VTLB_PTE_IO;
493 vcpu_get_rr(vcpu, va, &rid);
494 rid = rid& RR_RID_MASK;
495 p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
496 vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
497 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
498 return IA64_NO_FAULT;
499 }
503 IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,u64 ifa, u64 ps)
504 {
505 int index;
506 u64 va;
508 va = PAGEALIGN(ifa, ps);
509 while ((index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB)) >= 0) {
510 vcpu->arch.dtrs[index].pte.p=0;
511 }
512 thash_purge_entries(vcpu, va, ps);
513 return IA64_NO_FAULT;
514 }
516 IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu, u64 ifa, u64 ps)
517 {
518 int index;
519 u64 va;
521 va = PAGEALIGN(ifa, ps);
522 while ((index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB)) >= 0) {
523 vcpu->arch.itrs[index].pte.p=0;
524 }
525 thash_purge_entries(vcpu, va, ps);
526 return IA64_NO_FAULT;
527 }
529 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, u64 va, u64 ps)
530 {
531 va = PAGEALIGN(va, ps);
532 thash_purge_entries(vcpu, va, ps);
533 return IA64_NO_FAULT;
534 }
537 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, u64 va)
538 {
539 thash_purge_all(vcpu);
540 return IA64_NO_FAULT;
541 }
543 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, u64 va, u64 ps)
544 {
545 vmx_vcpu_ptc_ga(vcpu, va, ps);
546 return IA64_ILLOP_FAULT;
547 }
548 /*
549 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
550 {
551 vmx_vcpu_ptc_l(vcpu, va, ps);
552 return IA64_NO_FAULT;
553 }
554 */
555 struct ptc_ga_args {
556 unsigned long vadr;
557 unsigned long rid;
558 unsigned long ps;
559 struct vcpu *vcpu;
560 };
562 static void ptc_ga_remote_func (void *varg)
563 {
564 u64 oldrid, moldrid, mpta, oldpsbits, vadr;
565 struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
566 VCPU *v = args->vcpu;
567 vadr = args->vadr;
569 oldrid = VMX(v, vrr[0]);
570 VMX(v, vrr[0]) = args->rid;
571 oldpsbits = VMX(v, psbits[0]);
572 VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vadr)]);
573 moldrid = ia64_get_rr(0x0);
574 ia64_set_rr(0x0,vrrtomrr(v,args->rid));
575 mpta = ia64_get_pta();
576 ia64_set_pta(v->arch.arch_vmx.mpta&(~1));
577 ia64_srlz_d();
578 vadr = PAGEALIGN(vadr, args->ps);
579 thash_purge_entries_remote(v, vadr, args->ps);
580 VMX(v, vrr[0]) = oldrid;
581 VMX(v, psbits[0]) = oldpsbits;
582 ia64_set_rr(0x0,moldrid);
583 ia64_set_pta(mpta);
584 ia64_dv_serialize_data();
585 }
588 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
589 {
591 struct domain *d = vcpu->domain;
592 struct vcpu *v;
593 struct ptc_ga_args args;
594 int proc;
596 args.vadr = va;
597 vcpu_get_rr(vcpu, va, &args.rid);
598 args.ps = ps;
599 for_each_vcpu (d, v) {
600 if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
601 continue;
603 args.vcpu = v;
604 again: /* Try again if VCPU has migrated. */
605 proc = v->processor;
606 if (proc != vcpu->processor) {
607 /* Flush VHPT on remote processors. */
608 smp_call_function_single(v->processor,
609 &ptc_ga_remote_func, &args, 0, 1);
610 if (proc != v->processor)
611 goto again;
612 } else if (v == vcpu) {
613 vmx_vcpu_ptc_l(v, va, ps);
614 } else {
615 ptc_ga_remote_func(&args);
616 if (proc != v->processor)
617 goto again;
618 }
619 }
620 return IA64_NO_FAULT;
621 }
624 IA64FAULT vmx_vcpu_thash(VCPU *vcpu, u64 vadr, u64 *pval)
625 {
626 PTA vpta;
627 ia64_rr vrr;
628 u64 vhpt_offset;
629 vmx_vcpu_get_pta(vcpu, &vpta.val);
630 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
631 if(vpta.vf){
632 *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
633 *pval = vpta.val & ~0xffff;
634 }else{
635 vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
636 *pval = (vadr&VRN_MASK)|
637 (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
638 vhpt_offset;
639 }
640 return IA64_NO_FAULT;
641 }
644 IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, u64 vadr, u64 *pval)
645 {
646 ia64_rr vrr;
647 PTA vpta;
648 vmx_vcpu_get_pta(vcpu, &vpta.val);
649 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
650 if(vpta.vf){
651 *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
652 }else{
653 *pval = 1;
654 }
655 return IA64_NO_FAULT;
656 }
660 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 vadr, u64 *padr)
661 {
662 thash_data_t *data;
663 ISR visr,pt_isr;
664 REGS *regs;
665 u64 vhpt_adr, madr;
666 IA64_PSR vpsr;
667 regs=vcpu_regs(vcpu);
668 pt_isr.val=VMX(vcpu,cr_isr);
669 visr.val=0;
670 visr.ei=pt_isr.ei;
671 visr.ir=pt_isr.ir;
672 vpsr.val = VCPU(vcpu, vpsr);
673 visr.na=1;
674 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
675 if(data){
676 if(data->p==0){
677 vcpu_set_isr(vcpu,visr.val);
678 data_page_not_present(vcpu, vadr);
679 return IA64_FAULT;
680 }else if(data->ma == VA_MATTR_NATPAGE){
681 vcpu_set_isr(vcpu, visr.val);
682 dnat_page_consumption(vcpu, vadr);
683 return IA64_FAULT;
684 }else{
685 *padr = ((data->ppn >> (data->ps - 12)) << data->ps) |
686 (vadr & (PSIZE(data->ps) - 1));
687 return IA64_NO_FAULT;
688 }
689 }
690 data = vhpt_lookup(vadr);
691 if(data){
692 if(data->p==0){
693 vcpu_set_isr(vcpu,visr.val);
694 data_page_not_present(vcpu, vadr);
695 return IA64_FAULT;
696 }else if(data->ma == VA_MATTR_NATPAGE){
697 vcpu_set_isr(vcpu, visr.val);
698 dnat_page_consumption(vcpu, vadr);
699 return IA64_FAULT;
700 }else{
701 madr = (data->ppn >> (data->ps - 12) << data->ps) |
702 (vadr & (PSIZE(data->ps) - 1));
703 *padr = __mpa_to_gpa(madr);
704 return IA64_NO_FAULT;
705 }
706 }
707 else{
708 if(!vhpt_enabled(vcpu, vadr, NA_REF)){
709 if(vpsr.ic){
710 vcpu_set_isr(vcpu, visr.val);
711 alt_dtlb(vcpu, vadr);
712 return IA64_FAULT;
713 }
714 else{
715 nested_dtlb(vcpu);
716 return IA64_FAULT;
717 }
718 }
719 else{
720 vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
721 data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB);
722 if(data){
723 if(vpsr.ic){
724 vcpu_set_isr(vcpu, visr.val);
725 dtlb_fault(vcpu, vadr);
726 return IA64_FAULT;
727 }
728 else{
729 nested_dtlb(vcpu);
730 return IA64_FAULT;
731 }
732 }
733 else{
734 if(vpsr.ic){
735 vcpu_set_isr(vcpu, visr.val);
736 dvhpt_fault(vcpu, vadr);
737 return IA64_FAULT;
738 }
739 else{
740 nested_dtlb(vcpu);
741 return IA64_FAULT;
742 }
743 }
744 }
745 }
746 }
748 IA64FAULT vmx_vcpu_tak(VCPU *vcpu, u64 vadr, u64 *key)
749 {
750 thash_data_t *data;
751 PTA vpta;
752 vmx_vcpu_get_pta(vcpu, &vpta.val);
753 if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
754 *key=1;
755 return IA64_NO_FAULT;
756 }
757 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
758 if(!data||!data->p){
759 *key=1;
760 }else{
761 *key=data->key;
762 }
763 return IA64_NO_FAULT;
764 }