direct-io.hg

view xen/arch/ia64/vmx/vmmu.c @ 10619:e97e31e26bd5

[IA64] ptc.g virtualization fix 2

When emulating ptc.ga, VMM needs to change pta temporarily,
which cause VHPT is not mapped by TR, then vhpt fault may
happen inside VMM, it is not correct.
This patch is to fix this issue.
When changing pta, VMM makes VHPT disabled temporarily.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Jun 27 16:35:59 2006 -0600 (2006-06-27)
parents 3dca270188da
children 8dc4af3f192c
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmmu.c: virtual memory management unit components.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
22 #include <linux/sched.h>
23 #include <linux/mm.h>
24 #include <asm/tlb.h>
25 #include <asm/gcc_intrin.h>
26 #include <asm/vcpu.h>
27 #include <linux/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/vmx_mm_def.h>
30 #include <asm/vmx.h>
31 #include <asm/hw_irq.h>
32 #include <asm/vmx_pal_vsa.h>
33 #include <asm/kregs.h>
34 #include <asm/vcpu.h>
35 #include <xen/irq.h>
37 /*
38 * Get the machine page frame number in 16KB unit
39 * Input:
40 * d:
41 */
42 u64 get_mfn(struct domain *d, u64 gpfn)
43 {
44 // struct domain *d;
45 u64 xen_gppn, xen_mppn, mpfn;
46 /*
47 if ( domid == DOMID_SELF ) {
48 d = current->domain;
49 }
50 else {
51 d = find_domain_by_id(domid);
52 }
53 */
54 xen_gppn = arch_to_xen_ppn(gpfn);
55 xen_mppn = gmfn_to_mfn(d, xen_gppn);
56 /*
57 for (i=0; i<pages; i++) {
58 if ( gmfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
59 return INVALID_MFN;
60 }
61 }
62 */
63 mpfn= xen_to_arch_ppn(xen_mppn);
64 mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn);
65 return mpfn;
67 }
69 /*
70 * The VRN bits of va stand for which rr to get.
71 */
72 //ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va)
73 //{
74 // ia64_rr vrr;
75 // vcpu_get_rr(vcpu, va, &vrr.rrval);
76 // return vrr;
77 //}
79 /*
80 void recycle_message(thash_cb_t *hcb, u64 para)
81 {
82 if(hcb->ht == THASH_VHPT)
83 {
84 printk("ERROR : vhpt recycle happenning!!!\n");
85 }
86 printk("hcb=%p recycled with %lx\n",hcb,para);
87 }
88 */
90 /*
91 * Purge all guest TCs in logical processor.
92 * Instead of purging all LP TCs, we should only purge
93 * TCs that belong to this guest.
94 */
95 void
96 purge_machine_tc_by_domid(domid_t domid)
97 {
98 #ifndef PURGE_GUEST_TC_ONLY
99 // purge all TCs
100 struct ia64_pal_retval result;
101 u64 addr;
102 u32 count1,count2;
103 u32 stride1,stride2;
104 u32 i,j;
105 u64 psr;
107 result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
108 if ( result.status != 0 ) {
109 panic ("PAL_PTCE_INFO failed\n");
110 }
111 addr = result.v0;
112 count1 = HIGH_32BITS(result.v1);
113 count2 = LOW_32BITS (result.v1);
114 stride1 = HIGH_32BITS(result.v2);
115 stride2 = LOW_32BITS (result.v2);
117 local_irq_save(psr);
118 for (i=0; i<count1; i++) {
119 for (j=0; j<count2; j++) {
120 ia64_ptce(addr);
121 addr += stride2;
122 }
123 addr += stride1;
124 }
125 local_irq_restore(psr);
126 #else
127 // purge all TCs belong to this guest.
128 #endif
129 }
131 static void init_domain_vhpt(struct vcpu *v)
132 {
133 struct page_info *page;
134 void * vbase;
135 page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
136 if ( page == NULL ) {
137 panic_domain(vcpu_regs(v),"No enough contiguous memory for init_domain_vhpt\n");
138 }
139 vbase = page_to_virt(page);
140 memset(vbase, 0, VCPU_VHPT_SIZE);
141 printk("Allocate domain vhpt at 0x%p\n", vbase);
143 VHPT(v,hash) = vbase;
144 VHPT(v,hash_sz) = VCPU_VHPT_SIZE/2;
145 VHPT(v,cch_buf) = (void *)((u64)vbase + VHPT(v,hash_sz));
146 VHPT(v,cch_sz) = VCPU_VHPT_SIZE - VHPT(v,hash_sz);
147 thash_init(&(v->arch.vhpt),VCPU_VHPT_SHIFT-1);
148 v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
149 }
153 void init_domain_tlb(struct vcpu *v)
154 {
155 struct page_info *page;
156 void * vbase;
157 init_domain_vhpt(v);
158 page = alloc_domheap_pages (NULL, VCPU_VTLB_ORDER, 0);
159 if ( page == NULL ) {
160 panic_domain(vcpu_regs(v),"No enough contiguous memory for init_domain_tlb\n");
161 }
162 vbase = page_to_virt(page);
163 memset(vbase, 0, VCPU_VTLB_SIZE);
164 printk("Allocate domain vtlb at 0x%p\n", vbase);
166 VTLB(v,hash) = vbase;
167 VTLB(v,hash_sz) = VCPU_VTLB_SIZE/2;
168 VTLB(v,cch_buf) = (void *)((u64)vbase + VTLB(v,hash_sz));
169 VTLB(v,cch_sz) = VCPU_VTLB_SIZE - VTLB(v,hash_sz);
170 thash_init(&(v->arch.vtlb),VCPU_VTLB_SHIFT-1);
171 }
173 void free_domain_tlb(struct vcpu *v)
174 {
175 struct page_info *page;
177 if ( v->arch.vtlb.hash) {
178 page = virt_to_page(v->arch.vtlb.hash);
179 free_domheap_pages(page, VCPU_VTLB_ORDER);
180 }
181 if ( v->arch.vhpt.hash) {
182 page = virt_to_page(v->arch.vhpt.hash);
183 free_domheap_pages(page, VCPU_VHPT_ORDER);
184 }
185 }
187 /*
188 * Insert guest TLB to machine TLB.
189 * data: In TLB format
190 */
191 void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
192 {
193 u64 psr;
194 thash_data_t mtlb;
195 unsigned int cl = tlb->cl;
196 unsigned long mtlb_ppn;
197 mtlb.ifa = tlb->vadr;
198 mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
199 mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
200 mtlb.ppn = get_mfn(d->domain,tlb->ppn);
201 mtlb_ppn=mtlb.ppn;
203 #if 0
204 if (mtlb_ppn == INVALID_MFN)
205 panic_domain(vcpu_regs(d),"Machine tlb insert with invalid mfn number.\n");
206 #endif
208 psr = ia64_clear_ic();
209 if ( cl == ISIDE_TLB ) {
210 ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps);
211 }
212 else {
213 ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps);
214 }
215 ia64_set_psr(psr);
216 ia64_srlz_i();
217 return;
218 }
220 /*
221 * Purge machine tlb.
222 * INPUT
223 * rr: guest rr.
224 * va: only bits 0:60 is valid
225 * size: bits format (1<<size) for the address range to purge.
226 *
227 */
228 void machine_tlb_purge(u64 va, u64 ps)
229 {
230 // u64 psr;
231 // psr = ia64_clear_ic();
232 ia64_ptcl(va, ps << 2);
233 // ia64_set_psr(psr);
234 // ia64_srlz_i();
235 // return;
236 }
237 /*
238 u64 machine_thash(u64 va)
239 {
240 return ia64_thash(va);
241 }
243 u64 machine_ttag(u64 va)
244 {
245 return ia64_ttag(va);
246 }
247 */
248 thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
249 {
250 u64 index,pfn,rid,pfn_bits;
251 pfn_bits = vpta.size-5-8;
252 pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr);
253 rid = _REGION_ID(vrr);
254 index = ((rid&0xff)<<pfn_bits)|(pfn&((1UL<<pfn_bits)-1));
255 *tag = ((rid>>8)&0xffff) | ((pfn >>pfn_bits)<<16);
256 return (thash_data_t *)((vpta.base<<PTA_BASE_SHIFT)+(index<<5));
257 // return ia64_call_vsa(PAL_VPS_THASH,va,vrr,vpta,0,0,0,0);
258 }
260 //u64 vsa_ttag(u64 va, u64 vrr)
261 //{
262 // return ia64_call_vsa(PAL_VPS_TTAG,va,vrr,0,0,0,0,0);
263 //}
265 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
266 {
267 ia64_rr vrr;
268 PTA vpta;
269 IA64_PSR vpsr;
271 vpsr.val = vmx_vcpu_get_psr(vcpu);
272 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
273 vmx_vcpu_get_pta(vcpu,&vpta.val);
275 if ( vrr.ve & vpta.ve ) {
276 switch ( ref ) {
277 case DATA_REF:
278 case NA_REF:
279 return vpsr.dt;
280 case INST_REF:
281 return vpsr.dt && vpsr.it && vpsr.ic;
282 case RSE_REF:
283 return vpsr.dt && vpsr.rt;
285 }
286 }
287 return 0;
288 }
291 int unimplemented_gva(VCPU *vcpu,u64 vadr)
292 {
293 int bit=vcpu->domain->arch.imp_va_msb;
294 u64 ladr =(vadr<<3)>>(3+bit);
295 if(!ladr||ladr==(1U<<(61-bit))-1){
296 return 0;
297 }else{
298 return 1;
299 }
300 }
303 /*
304 * Prefetch guest bundle code.
305 * INPUT:
306 * code: buffer pointer to hold the read data.
307 * num: number of dword (8byts) to read.
308 */
309 int
310 fetch_code(VCPU *vcpu, u64 gip, u64 *code1, u64 *code2)
311 {
312 u64 gpip=0; // guest physical IP
313 u64 *vpa;
314 thash_data_t *tlb;
315 u64 mfn;
316 struct page_info* page;
318 again:
319 if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
320 gpip = gip;
321 }
322 else {
323 tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
324 // if( tlb == NULL )
325 // tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
326 if (tlb)
327 gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & (PSIZE(tlb->ps)-1) );
328 }
329 if( gpip){
330 mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
331 if( mfn == INVALID_MFN ) panic_domain(vcpu_regs(vcpu),"fetch_code: invalid memory\n");
332 }else{
333 tlb = vhpt_lookup(gip);
334 if( tlb == NULL)
335 panic_domain(vcpu_regs(vcpu),"No entry found in ITLB and DTLB\n");
336 mfn = tlb->ppn >> (PAGE_SHIFT - ARCH_PAGE_SHIFT);
337 }
339 page = mfn_to_page(mfn);
340 if (get_page(page, vcpu->domain) == 0) {
341 if (page_get_owner(page) != vcpu->domain) {
342 // This page might be a page granted by another domain.
343 panic_domain(NULL, "domain tries to execute foreign domain "
344 "page which might be mapped by grant table.\n");
345 }
346 goto again;
347 }
348 vpa = (u64 *)__va((mfn << PAGE_SHIFT) | (gip & (PAGE_SIZE - 1)));
350 *code1 = *vpa++;
351 *code2 = *vpa;
352 put_page(page);
353 return 1;
354 }
356 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
357 {
358 #ifdef VTLB_DEBUG
359 int slot;
360 u64 ps, va;
361 ps = itir_ps(itir);
362 va = PAGEALIGN(ifa, ps);
363 slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
364 if (slot >=0) {
365 // generate MCA.
366 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
367 return IA64_FAULT;
368 }
369 #endif //VTLB_DEBUG
370 thash_purge_and_insert(vcpu, pte, itir, ifa);
371 return IA64_NO_FAULT;
372 }
374 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
375 {
376 u64 gpfn;
377 #ifdef VTLB_DEBUG
378 int slot;
379 u64 ps, va;
380 ps = itir_ps(itir);
381 va = PAGEALIGN(ifa, ps);
382 slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
383 if (slot >=0) {
384 // generate MCA.
385 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
386 return IA64_FAULT;
387 }
388 #endif //VTLB_DEBUG
389 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
390 if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
391 pte |= VTLB_PTE_IO;
392 thash_purge_and_insert(vcpu, pte, itir, ifa);
393 return IA64_NO_FAULT;
395 }
400 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
401 {
402 #ifdef VTLB_DEBUG
403 int index;
404 #endif
405 u64 ps, va, rid;
406 thash_data_t * p_itr;
407 ps = itir_ps(itir);
408 va = PAGEALIGN(ifa, ps);
409 #ifdef VTLB_DEBUG
410 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
411 if (index >=0) {
412 // generate MCA.
413 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
414 return IA64_FAULT;
415 }
416 thash_purge_entries(vcpu, va, ps);
417 #endif
418 vcpu_get_rr(vcpu, va, &rid);
419 rid = rid& RR_RID_MASK;
420 p_itr = (thash_data_t *)&vcpu->arch.itrs[slot];
421 vmx_vcpu_set_tr(p_itr, pte, itir, va, rid);
422 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
423 return IA64_NO_FAULT;
424 }
427 IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
428 {
429 #ifdef VTLB_DEBUG
430 int index;
431 u64 gpfn;
432 #endif
433 u64 ps, va, rid;
434 thash_data_t * p_dtr;
435 ps = itir_ps(itir);
436 va = PAGEALIGN(ifa, ps);
437 #ifdef VTLB_DEBUG
438 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
439 if (index>=0) {
440 // generate MCA.
441 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
442 return IA64_FAULT;
443 }
444 thash_purge_entries(vcpu, va, ps);
445 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
446 if(VMX_DOMAIN(vcpu) && _gpfn_is_io(vcpu->domain,gpfn))
447 pte |= VTLB_PTE_IO;
448 #endif
449 vcpu_get_rr(vcpu, va, &rid);
450 rid = rid& RR_RID_MASK;
451 p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
452 vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
453 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
454 return IA64_NO_FAULT;
455 }
459 IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 ifa,UINT64 ps)
460 {
461 int index;
462 u64 va;
464 va = PAGEALIGN(ifa, ps);
465 while ((index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB)) >= 0) {
466 vcpu->arch.dtrs[index].pte.p=0;
467 }
468 thash_purge_entries(vcpu, va, ps);
469 return IA64_NO_FAULT;
470 }
472 IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 ifa,UINT64 ps)
473 {
474 int index;
475 u64 va;
477 va = PAGEALIGN(ifa, ps);
478 while ((index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB)) >= 0) {
479 vcpu->arch.itrs[index].pte.p=0;
480 }
481 thash_purge_entries(vcpu, va, ps);
482 return IA64_NO_FAULT;
483 }
485 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 va, UINT64 ps)
486 {
487 va = PAGEALIGN(va, ps);
488 thash_purge_entries(vcpu, va, ps);
489 return IA64_NO_FAULT;
490 }
493 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va)
494 {
495 thash_purge_all(vcpu);
496 return IA64_NO_FAULT;
497 }
499 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
500 {
501 vmx_vcpu_ptc_ga(vcpu, va, ps);
502 return IA64_ILLOP_FAULT;
503 }
504 /*
505 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
506 {
507 vmx_vcpu_ptc_l(vcpu, va, ps);
508 return IA64_NO_FAULT;
509 }
510 */
511 struct ptc_ga_args {
512 unsigned long vadr;
513 unsigned long rid;
514 unsigned long ps;
515 struct vcpu *vcpu;
516 };
518 static void ptc_ga_remote_func (void *varg)
519 {
520 u64 oldrid, moldrid, mpta;
521 struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
522 VCPU *v = args->vcpu;
524 oldrid = VMX(v, vrr[0]);
525 VMX(v, vrr[0]) = args->rid;
526 moldrid = ia64_get_rr(0x0);
527 ia64_set_rr(0x0,vrrtomrr(v,args->rid));
528 mpta = ia64_get_pta();
529 ia64_set_pta(v->arch.arch_vmx.mpta&(~1));
530 ia64_srlz_d();
531 vmx_vcpu_ptc_l(v, args->vadr, args->ps);
532 VMX(v, vrr[0]) = oldrid;
533 ia64_set_rr(0x0,moldrid);
534 ia64_set_pta(mpta);
535 ia64_dv_serialize_data();
536 }
539 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
540 {
542 struct domain *d = vcpu->domain;
543 struct vcpu *v;
544 struct ptc_ga_args args;
546 args.vadr = va<<3>>3;
547 vcpu_get_rr(vcpu, va, &args.rid);
548 args.ps = ps;
549 for_each_vcpu (d, v) {
550 if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
551 continue;
553 args.vcpu = v;
554 if (v->processor != vcpu->processor) {
555 int proc;
556 /* Flush VHPT on remote processors. */
557 do {
558 proc = v->processor;
559 smp_call_function_single(v->processor,
560 &ptc_ga_remote_func, &args, 0, 1);
561 /* Try again if VCPU has migrated. */
562 } while (proc != v->processor);
563 }
564 else if(v == vcpu)
565 vmx_vcpu_ptc_l(v, va, ps);
566 else
567 ptc_ga_remote_func(&args);
568 }
569 return IA64_NO_FAULT;
570 }
573 IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
574 {
575 PTA vpta;
576 ia64_rr vrr;
577 u64 vhpt_offset;
578 vmx_vcpu_get_pta(vcpu, &vpta.val);
579 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
580 if(vpta.vf){
581 *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
582 *pval = vpta.val & ~0xffff;
583 }else{
584 vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
585 *pval = (vadr&VRN_MASK)|
586 (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
587 vhpt_offset;
588 }
589 return IA64_NO_FAULT;
590 }
593 IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
594 {
595 ia64_rr vrr;
596 PTA vpta;
597 vmx_vcpu_get_pta(vcpu, &vpta.val);
598 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
599 if(vpta.vf){
600 *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
601 }else{
602 *pval = 1;
603 }
604 return IA64_NO_FAULT;
605 }
609 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
610 {
611 thash_data_t *data;
612 ISR visr,pt_isr;
613 REGS *regs;
614 u64 vhpt_adr;
615 IA64_PSR vpsr;
616 regs=vcpu_regs(vcpu);
617 pt_isr.val=VMX(vcpu,cr_isr);
618 visr.val=0;
619 visr.ei=pt_isr.ei;
620 visr.ir=pt_isr.ir;
621 vpsr.val = vmx_vcpu_get_psr(vcpu);
622 if(vpsr.ic==0){
623 visr.ni=1;
624 }
625 visr.na=1;
626 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
627 if(data){
628 if(data->p==0){
629 visr.na=1;
630 vcpu_set_isr(vcpu,visr.val);
631 page_not_present(vcpu, vadr);
632 return IA64_FAULT;
633 }else if(data->ma == VA_MATTR_NATPAGE){
634 visr.na = 1;
635 vcpu_set_isr(vcpu, visr.val);
636 dnat_page_consumption(vcpu, vadr);
637 return IA64_FAULT;
638 }else{
639 *padr = ((data->ppn >> (data->ps - 12)) << data->ps) |
640 (vadr & (PSIZE(data->ps) - 1));
641 return IA64_NO_FAULT;
642 }
643 }
644 data = vhpt_lookup(vadr);
645 if(data){
646 if(data->p==0){
647 visr.na=1;
648 vcpu_set_isr(vcpu,visr.val);
649 page_not_present(vcpu, vadr);
650 return IA64_FAULT;
651 }else if(data->ma == VA_MATTR_NATPAGE){
652 visr.na = 1;
653 vcpu_set_isr(vcpu, visr.val);
654 dnat_page_consumption(vcpu, vadr);
655 return IA64_FAULT;
656 }else{
657 *padr = (get_gpfn_from_mfn(arch_to_xen_ppn(data->ppn)) << PAGE_SHIFT) | (vadr & (PAGE_SIZE - 1));
658 return IA64_NO_FAULT;
659 }
660 }
661 else{
662 if(!vhpt_enabled(vcpu, vadr, NA_REF)){
663 if(vpsr.ic){
664 vcpu_set_isr(vcpu, visr.val);
665 alt_dtlb(vcpu, vadr);
666 return IA64_FAULT;
667 }
668 else{
669 nested_dtlb(vcpu);
670 return IA64_FAULT;
671 }
672 }
673 else{
674 vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
675 data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB);
676 if(data){
677 if(vpsr.ic){
678 vcpu_set_isr(vcpu, visr.val);
679 dtlb_fault(vcpu, vadr);
680 return IA64_FAULT;
681 }
682 else{
683 nested_dtlb(vcpu);
684 return IA64_FAULT;
685 }
686 }
687 else{
688 if(vpsr.ic){
689 vcpu_set_isr(vcpu, visr.val);
690 dvhpt_fault(vcpu, vadr);
691 return IA64_FAULT;
692 }
693 else{
694 nested_dtlb(vcpu);
695 return IA64_FAULT;
696 }
697 }
698 }
699 }
700 }
702 IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
703 {
704 thash_data_t *data;
705 PTA vpta;
706 vmx_vcpu_get_pta(vcpu, &vpta.val);
707 if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
708 *key=1;
709 return IA64_NO_FAULT;
710 }
711 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
712 if(!data||!data->p){
713 *key=1;
714 }else{
715 *key=data->key;
716 }
717 return IA64_NO_FAULT;
718 }
720 /*
721 * [FIXME] Is there any effective way to move this routine
722 * into vmx_uaccess.h? struct exec_domain is incomplete type
723 * in that way...
724 *
725 * This is the interface to lookup virtual TLB, and then
726 * return corresponding machine address in 2nd parameter.
727 * The 3rd parameter contains how many bytes mapped by
728 * matched vTLB entry, thus to allow caller copy more once.
729 *
730 * If failed to lookup, -EFAULT is returned. Or else reutrn
731 * 0. All upper domain access utilities rely on this routine
732 * to determine the real machine address.
733 *
734 * Yes, put_user and get_user seems to somhow slow upon it.
735 * However it's the necessary steps for any vmx domain virtual
736 * address, since that's difference address space as HV's one.
737 * Later some short-circuit may be created for special case
738 */
739 long
740 __domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
741 {
742 unsigned long mpfn, gpfn, m, n = *len;
743 unsigned long end; /* end of the area mapped by current entry */
744 thash_data_t *entry;
745 struct vcpu *v = current;
747 entry = vtlb_lookup(v, va, DSIDE_TLB);
748 if (entry == NULL)
749 return -EFAULT;
751 gpfn =(entry->ppn>>(PAGE_SHIFT-12));
752 gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
753 gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT));
755 mpfn = gmfn_to_mfn(v->domain, gpfn);
756 m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
757 /* machine address may be not continuous */
758 end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
759 /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
760 /* Current entry can't map all requested area */
761 if ((m + n) > end)
762 n = end - m;
764 *ma = m;
765 *len = n;
766 return 0;
767 }