ia64/xen-unstable

view xen/arch/ia64/vmx/vmmu.c @ 10938:bfc69471550e

[IA64] fix a fetch code bug

Fetch code may fail, if there is no corresponding tlb entry
in THASH-VTLB. This patch adds "retry mechanism" to resolve
this issue.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Aug 09 08:01:52 2006 -0600 (2006-08-09)
parents 679683333917
children d42e9a6f5378
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmmu.c: virtual memory management unit components.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
22 #include <linux/sched.h>
23 #include <linux/mm.h>
24 #include <asm/tlb.h>
25 #include <asm/gcc_intrin.h>
26 #include <asm/vcpu.h>
27 #include <linux/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/vmx_mm_def.h>
30 #include <asm/vmx.h>
31 #include <asm/hw_irq.h>
32 #include <asm/vmx_pal_vsa.h>
33 #include <asm/kregs.h>
34 #include <asm/vcpu.h>
35 #include <xen/irq.h>
37 /*
38 * Get the machine page frame number in 16KB unit
39 * Input:
40 * d:
41 */
42 u64 get_mfn(struct domain *d, u64 gpfn)
43 {
44 // struct domain *d;
45 u64 xen_gppn, xen_mppn, mpfn;
46 /*
47 if ( domid == DOMID_SELF ) {
48 d = current->domain;
49 }
50 else {
51 d = find_domain_by_id(domid);
52 }
53 */
54 xen_gppn = arch_to_xen_ppn(gpfn);
55 xen_mppn = gmfn_to_mfn(d, xen_gppn);
56 /*
57 for (i=0; i<pages; i++) {
58 if ( gmfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
59 return INVALID_MFN;
60 }
61 }
62 */
63 mpfn= xen_to_arch_ppn(xen_mppn);
64 mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn);
65 return mpfn;
67 }
69 /*
70 * The VRN bits of va stand for which rr to get.
71 */
72 //ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va)
73 //{
74 // ia64_rr vrr;
75 // vcpu_get_rr(vcpu, va, &vrr.rrval);
76 // return vrr;
77 //}
79 /*
80 void recycle_message(thash_cb_t *hcb, u64 para)
81 {
82 if(hcb->ht == THASH_VHPT)
83 {
84 printk("ERROR : vhpt recycle happenning!!!\n");
85 }
86 printk("hcb=%p recycled with %lx\n",hcb,para);
87 }
88 */
90 /*
91 * Purge all guest TCs in logical processor.
92 * Instead of purging all LP TCs, we should only purge
93 * TCs that belong to this guest.
94 */
95 void
96 purge_machine_tc_by_domid(domid_t domid)
97 {
98 #ifndef PURGE_GUEST_TC_ONLY
99 // purge all TCs
100 struct ia64_pal_retval result;
101 u64 addr;
102 u32 count1,count2;
103 u32 stride1,stride2;
104 u32 i,j;
105 u64 psr;
107 result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
108 if ( result.status != 0 ) {
109 panic ("PAL_PTCE_INFO failed\n");
110 }
111 addr = result.v0;
112 count1 = HIGH_32BITS(result.v1);
113 count2 = LOW_32BITS (result.v1);
114 stride1 = HIGH_32BITS(result.v2);
115 stride2 = LOW_32BITS (result.v2);
117 local_irq_save(psr);
118 for (i=0; i<count1; i++) {
119 for (j=0; j<count2; j++) {
120 ia64_ptce(addr);
121 addr += stride2;
122 }
123 addr += stride1;
124 }
125 local_irq_restore(psr);
126 #else
127 // purge all TCs belong to this guest.
128 #endif
129 }
131 static void init_domain_vhpt(struct vcpu *v)
132 {
133 struct page_info *page;
134 void * vbase;
135 page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
136 if ( page == NULL ) {
137 panic_domain(vcpu_regs(v),"No enough contiguous memory for init_domain_vhpt\n");
138 }
139 vbase = page_to_virt(page);
140 memset(vbase, 0, VCPU_VHPT_SIZE);
141 printk("Allocate domain vhpt at 0x%p\n", vbase);
143 VHPT(v,hash) = vbase;
144 VHPT(v,hash_sz) = VCPU_VHPT_SIZE/2;
145 VHPT(v,cch_buf) = (void *)((u64)vbase + VHPT(v,hash_sz));
146 VHPT(v,cch_sz) = VCPU_VHPT_SIZE - VHPT(v,hash_sz);
147 thash_init(&(v->arch.vhpt),VCPU_VHPT_SHIFT-1);
148 v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
149 }
153 void init_domain_tlb(struct vcpu *v)
154 {
155 struct page_info *page;
156 void * vbase;
157 init_domain_vhpt(v);
158 page = alloc_domheap_pages (NULL, VCPU_VTLB_ORDER, 0);
159 if ( page == NULL ) {
160 panic_domain(vcpu_regs(v),"No enough contiguous memory for init_domain_tlb\n");
161 }
162 vbase = page_to_virt(page);
163 memset(vbase, 0, VCPU_VTLB_SIZE);
164 printk("Allocate domain vtlb at 0x%p\n", vbase);
166 VTLB(v,hash) = vbase;
167 VTLB(v,hash_sz) = VCPU_VTLB_SIZE/2;
168 VTLB(v,cch_buf) = (void *)((u64)vbase + VTLB(v,hash_sz));
169 VTLB(v,cch_sz) = VCPU_VTLB_SIZE - VTLB(v,hash_sz);
170 thash_init(&(v->arch.vtlb),VCPU_VTLB_SHIFT-1);
171 }
173 void free_domain_tlb(struct vcpu *v)
174 {
175 struct page_info *page;
177 if ( v->arch.vtlb.hash) {
178 page = virt_to_page(v->arch.vtlb.hash);
179 free_domheap_pages(page, VCPU_VTLB_ORDER);
180 }
181 if ( v->arch.vhpt.hash) {
182 page = virt_to_page(v->arch.vhpt.hash);
183 free_domheap_pages(page, VCPU_VHPT_ORDER);
184 }
185 }
187 /*
188 * Insert guest TLB to machine TLB.
189 * data: In TLB format
190 */
191 void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
192 {
193 u64 psr;
194 thash_data_t mtlb;
195 unsigned int cl = tlb->cl;
196 unsigned long mtlb_ppn;
197 mtlb.ifa = tlb->vadr;
198 mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
199 mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
200 mtlb.ppn = get_mfn(d->domain,tlb->ppn);
201 mtlb_ppn=mtlb.ppn;
203 #if 0
204 if (mtlb_ppn == INVALID_MFN)
205 panic_domain(vcpu_regs(d),"Machine tlb insert with invalid mfn number.\n");
206 #endif
208 psr = ia64_clear_ic();
209 if ( cl == ISIDE_TLB ) {
210 ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps);
211 }
212 else {
213 ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps);
214 }
215 ia64_set_psr(psr);
216 ia64_srlz_i();
217 return;
218 }
220 /*
221 * Purge machine tlb.
222 * INPUT
223 * rr: guest rr.
224 * va: only bits 0:60 is valid
225 * size: bits format (1<<size) for the address range to purge.
226 *
227 */
228 void machine_tlb_purge(u64 va, u64 ps)
229 {
230 // u64 psr;
231 // psr = ia64_clear_ic();
232 ia64_ptcl(va, ps << 2);
233 // ia64_set_psr(psr);
234 // ia64_srlz_i();
235 // return;
236 }
237 /*
238 u64 machine_thash(u64 va)
239 {
240 return ia64_thash(va);
241 }
243 u64 machine_ttag(u64 va)
244 {
245 return ia64_ttag(va);
246 }
247 */
248 thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
249 {
250 u64 index,pfn,rid,pfn_bits;
251 pfn_bits = vpta.size-5-8;
252 pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr);
253 rid = _REGION_ID(vrr);
254 index = ((rid&0xff)<<pfn_bits)|(pfn&((1UL<<pfn_bits)-1));
255 *tag = ((rid>>8)&0xffff) | ((pfn >>pfn_bits)<<16);
256 return (thash_data_t *)((vpta.base<<PTA_BASE_SHIFT)+(index<<5));
257 // return ia64_call_vsa(PAL_VPS_THASH,va,vrr,vpta,0,0,0,0);
258 }
260 //u64 vsa_ttag(u64 va, u64 vrr)
261 //{
262 // return ia64_call_vsa(PAL_VPS_TTAG,va,vrr,0,0,0,0,0);
263 //}
265 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
266 {
267 ia64_rr vrr;
268 PTA vpta;
269 IA64_PSR vpsr;
271 vpsr.val = VCPU(vcpu, vpsr);
272 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
273 vmx_vcpu_get_pta(vcpu,&vpta.val);
275 if ( vrr.ve & vpta.ve ) {
276 switch ( ref ) {
277 case DATA_REF:
278 case NA_REF:
279 return vpsr.dt;
280 case INST_REF:
281 return vpsr.dt && vpsr.it && vpsr.ic;
282 case RSE_REF:
283 return vpsr.dt && vpsr.rt;
285 }
286 }
287 return 0;
288 }
291 int unimplemented_gva(VCPU *vcpu,u64 vadr)
292 {
293 #if 0
294 int bit=vcpu->domain->arch.imp_va_msb;
295 u64 ladr =(vadr<<3)>>(3+bit);
296 if(!ladr||ladr==(1U<<(61-bit))-1){
297 return 0;
298 }else{
299 return 1;
300 }
301 #else
302 return 0;
303 #endif
304 }
307 /*
308 * Fetch guest bundle code.
309 * INPUT:
310 * gip: guest ip
311 * pbundle: used to return fetched bundle.
312 */
313 unsigned long
314 fetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle)
315 {
316 u64 gpip=0; // guest physical IP
317 u64 *vpa;
318 thash_data_t *tlb;
319 u64 mfn, maddr;
320 struct page_info* page;
322 again:
323 if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
324 gpip = gip;
325 }
326 else {
327 tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
328 // if( tlb == NULL )
329 // tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
330 if (tlb)
331 gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & (PSIZE(tlb->ps)-1) );
332 }
333 if( gpip){
334 mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
335 if( mfn == INVALID_MFN ) panic_domain(vcpu_regs(vcpu),"fetch_code: invalid memory\n");
336 maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
337 }else{
338 tlb = vhpt_lookup(gip);
339 if (tlb == NULL) {
340 ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
341 return IA64_RETRY;
342 }
343 mfn = tlb->ppn >> (PAGE_SHIFT - ARCH_PAGE_SHIFT);
344 maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
345 (gip & (PSIZE(tlb->ps) - 1));
346 }
348 page = mfn_to_page(mfn);
349 if (get_page(page, vcpu->domain) == 0) {
350 if (page_get_owner(page) != vcpu->domain) {
351 // This page might be a page granted by another domain.
352 panic_domain(NULL, "domain tries to execute foreign domain "
353 "page which might be mapped by grant table.\n");
354 }
355 goto again;
356 }
357 vpa = (u64 *)__va(maddr);
359 pbundle->i64[0] = *vpa++;
360 pbundle->i64[1] = *vpa;
361 put_page(page);
362 return IA64_NO_FAULT;
363 }
365 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
366 {
367 #ifdef VTLB_DEBUG
368 int slot;
369 u64 ps, va;
370 ps = itir_ps(itir);
371 va = PAGEALIGN(ifa, ps);
372 slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
373 if (slot >=0) {
374 // generate MCA.
375 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
376 return IA64_FAULT;
377 }
378 #endif //VTLB_DEBUG
379 pte &= ~PAGE_FLAGS_RV_MASK;
380 thash_purge_and_insert(vcpu, pte, itir, ifa, ISIDE_TLB);
381 return IA64_NO_FAULT;
382 }
384 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
385 {
386 u64 gpfn;
387 #ifdef VTLB_DEBUG
388 int slot;
389 u64 ps, va;
390 ps = itir_ps(itir);
391 va = PAGEALIGN(ifa, ps);
392 slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
393 if (slot >=0) {
394 // generate MCA.
395 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
396 return IA64_FAULT;
397 }
398 #endif //VTLB_DEBUG
399 pte &= ~PAGE_FLAGS_RV_MASK;
400 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
401 if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
402 pte |= VTLB_PTE_IO;
403 thash_purge_and_insert(vcpu, pte, itir, ifa, DSIDE_TLB);
404 return IA64_NO_FAULT;
406 }
411 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
412 {
413 #ifdef VTLB_DEBUG
414 int index;
415 #endif
416 u64 ps, va, rid;
417 thash_data_t * p_itr;
418 ps = itir_ps(itir);
419 va = PAGEALIGN(ifa, ps);
420 #ifdef VTLB_DEBUG
421 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
422 if (index >=0) {
423 // generate MCA.
424 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
425 return IA64_FAULT;
426 }
427 thash_purge_entries(vcpu, va, ps);
428 #endif
429 pte &= ~PAGE_FLAGS_RV_MASK;
430 vcpu_get_rr(vcpu, va, &rid);
431 rid = rid& RR_RID_MASK;
432 p_itr = (thash_data_t *)&vcpu->arch.itrs[slot];
433 vmx_vcpu_set_tr(p_itr, pte, itir, va, rid);
434 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
435 return IA64_NO_FAULT;
436 }
439 IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
440 {
441 #ifdef VTLB_DEBUG
442 int index;
443 #endif
444 u64 gpfn;
445 u64 ps, va, rid;
446 thash_data_t * p_dtr;
447 ps = itir_ps(itir);
448 va = PAGEALIGN(ifa, ps);
449 #ifdef VTLB_DEBUG
450 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
451 if (index>=0) {
452 // generate MCA.
453 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
454 return IA64_FAULT;
455 }
456 #endif
457 pte &= ~PAGE_FLAGS_RV_MASK;
458 thash_purge_entries(vcpu, va, ps);
459 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
460 if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
461 pte |= VTLB_PTE_IO;
462 vcpu_get_rr(vcpu, va, &rid);
463 rid = rid& RR_RID_MASK;
464 p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
465 vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
466 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
467 return IA64_NO_FAULT;
468 }
472 IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 ifa,UINT64 ps)
473 {
474 int index;
475 u64 va;
477 va = PAGEALIGN(ifa, ps);
478 while ((index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB)) >= 0) {
479 vcpu->arch.dtrs[index].pte.p=0;
480 }
481 thash_purge_entries(vcpu, va, ps);
482 return IA64_NO_FAULT;
483 }
485 IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 ifa,UINT64 ps)
486 {
487 int index;
488 u64 va;
490 va = PAGEALIGN(ifa, ps);
491 while ((index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB)) >= 0) {
492 vcpu->arch.itrs[index].pte.p=0;
493 }
494 thash_purge_entries(vcpu, va, ps);
495 return IA64_NO_FAULT;
496 }
498 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 va, UINT64 ps)
499 {
500 va = PAGEALIGN(va, ps);
501 thash_purge_entries(vcpu, va, ps);
502 return IA64_NO_FAULT;
503 }
506 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va)
507 {
508 thash_purge_all(vcpu);
509 return IA64_NO_FAULT;
510 }
512 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
513 {
514 vmx_vcpu_ptc_ga(vcpu, va, ps);
515 return IA64_ILLOP_FAULT;
516 }
517 /*
518 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
519 {
520 vmx_vcpu_ptc_l(vcpu, va, ps);
521 return IA64_NO_FAULT;
522 }
523 */
524 struct ptc_ga_args {
525 unsigned long vadr;
526 unsigned long rid;
527 unsigned long ps;
528 struct vcpu *vcpu;
529 };
531 static void ptc_ga_remote_func (void *varg)
532 {
533 u64 oldrid, moldrid, mpta, oldpsbits, vadr;
534 struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
535 VCPU *v = args->vcpu;
536 vadr = args->vadr;
538 oldrid = VMX(v, vrr[0]);
539 VMX(v, vrr[0]) = args->rid;
540 oldpsbits = VMX(v, psbits[0]);
541 VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vadr)]);
542 moldrid = ia64_get_rr(0x0);
543 ia64_set_rr(0x0,vrrtomrr(v,args->rid));
544 mpta = ia64_get_pta();
545 ia64_set_pta(v->arch.arch_vmx.mpta&(~1));
546 ia64_srlz_d();
547 vmx_vcpu_ptc_l(v, REGION_OFFSET(vadr), args->ps);
548 VMX(v, vrr[0]) = oldrid;
549 VMX(v, psbits[0]) = oldpsbits;
550 ia64_set_rr(0x0,moldrid);
551 ia64_set_pta(mpta);
552 ia64_dv_serialize_data();
553 }
556 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
557 {
559 struct domain *d = vcpu->domain;
560 struct vcpu *v;
561 struct ptc_ga_args args;
563 args.vadr = va;
564 vcpu_get_rr(vcpu, va, &args.rid);
565 args.ps = ps;
566 for_each_vcpu (d, v) {
567 if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
568 continue;
570 args.vcpu = v;
571 if (v->processor != vcpu->processor) {
572 int proc;
573 /* Flush VHPT on remote processors. */
574 do {
575 proc = v->processor;
576 smp_call_function_single(v->processor,
577 &ptc_ga_remote_func, &args, 0, 1);
578 /* Try again if VCPU has migrated. */
579 } while (proc != v->processor);
580 }
581 else if(v == vcpu)
582 vmx_vcpu_ptc_l(v, va, ps);
583 else
584 ptc_ga_remote_func(&args);
585 }
586 return IA64_NO_FAULT;
587 }
590 IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
591 {
592 PTA vpta;
593 ia64_rr vrr;
594 u64 vhpt_offset;
595 vmx_vcpu_get_pta(vcpu, &vpta.val);
596 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
597 if(vpta.vf){
598 *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
599 *pval = vpta.val & ~0xffff;
600 }else{
601 vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
602 *pval = (vadr&VRN_MASK)|
603 (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
604 vhpt_offset;
605 }
606 return IA64_NO_FAULT;
607 }
610 IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
611 {
612 ia64_rr vrr;
613 PTA vpta;
614 vmx_vcpu_get_pta(vcpu, &vpta.val);
615 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
616 if(vpta.vf){
617 *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
618 }else{
619 *pval = 1;
620 }
621 return IA64_NO_FAULT;
622 }
626 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
627 {
628 thash_data_t *data;
629 ISR visr,pt_isr;
630 REGS *regs;
631 u64 vhpt_adr;
632 IA64_PSR vpsr;
633 regs=vcpu_regs(vcpu);
634 pt_isr.val=VMX(vcpu,cr_isr);
635 visr.val=0;
636 visr.ei=pt_isr.ei;
637 visr.ir=pt_isr.ir;
638 vpsr.val = VCPU(vcpu, vpsr);
639 if(vpsr.ic==0){
640 visr.ni=1;
641 }
642 visr.na=1;
643 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
644 if(data){
645 if(data->p==0){
646 visr.na=1;
647 vcpu_set_isr(vcpu,visr.val);
648 page_not_present(vcpu, vadr);
649 return IA64_FAULT;
650 }else if(data->ma == VA_MATTR_NATPAGE){
651 visr.na = 1;
652 vcpu_set_isr(vcpu, visr.val);
653 dnat_page_consumption(vcpu, vadr);
654 return IA64_FAULT;
655 }else{
656 *padr = ((data->ppn >> (data->ps - 12)) << data->ps) |
657 (vadr & (PSIZE(data->ps) - 1));
658 return IA64_NO_FAULT;
659 }
660 }
661 data = vhpt_lookup(vadr);
662 if(data){
663 if(data->p==0){
664 visr.na=1;
665 vcpu_set_isr(vcpu,visr.val);
666 page_not_present(vcpu, vadr);
667 return IA64_FAULT;
668 }else if(data->ma == VA_MATTR_NATPAGE){
669 visr.na = 1;
670 vcpu_set_isr(vcpu, visr.val);
671 dnat_page_consumption(vcpu, vadr);
672 return IA64_FAULT;
673 }else{
674 *padr = (get_gpfn_from_mfn(arch_to_xen_ppn(data->ppn)) << PAGE_SHIFT) | (vadr & (PAGE_SIZE - 1));
675 return IA64_NO_FAULT;
676 }
677 }
678 else{
679 if(!vhpt_enabled(vcpu, vadr, NA_REF)){
680 if(vpsr.ic){
681 vcpu_set_isr(vcpu, visr.val);
682 alt_dtlb(vcpu, vadr);
683 return IA64_FAULT;
684 }
685 else{
686 nested_dtlb(vcpu);
687 return IA64_FAULT;
688 }
689 }
690 else{
691 vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
692 data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB);
693 if(data){
694 if(vpsr.ic){
695 vcpu_set_isr(vcpu, visr.val);
696 dtlb_fault(vcpu, vadr);
697 return IA64_FAULT;
698 }
699 else{
700 nested_dtlb(vcpu);
701 return IA64_FAULT;
702 }
703 }
704 else{
705 if(vpsr.ic){
706 vcpu_set_isr(vcpu, visr.val);
707 dvhpt_fault(vcpu, vadr);
708 return IA64_FAULT;
709 }
710 else{
711 nested_dtlb(vcpu);
712 return IA64_FAULT;
713 }
714 }
715 }
716 }
717 }
719 IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
720 {
721 thash_data_t *data;
722 PTA vpta;
723 vmx_vcpu_get_pta(vcpu, &vpta.val);
724 if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
725 *key=1;
726 return IA64_NO_FAULT;
727 }
728 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
729 if(!data||!data->p){
730 *key=1;
731 }else{
732 *key=data->key;
733 }
734 return IA64_NO_FAULT;
735 }
737 /*
738 * [FIXME] Is there any effective way to move this routine
739 * into vmx_uaccess.h? struct exec_domain is incomplete type
740 * in that way...
741 *
742 * This is the interface to lookup virtual TLB, and then
743 * return corresponding machine address in 2nd parameter.
744 * The 3rd parameter contains how many bytes mapped by
745 * matched vTLB entry, thus to allow caller copy more once.
746 *
747 * If failed to lookup, -EFAULT is returned. Or else reutrn
748 * 0. All upper domain access utilities rely on this routine
749 * to determine the real machine address.
750 *
751 * Yes, put_user and get_user seems to somhow slow upon it.
752 * However it's the necessary steps for any vmx domain virtual
753 * address, since that's difference address space as HV's one.
754 * Later some short-circuit may be created for special case
755 */
756 long
757 __domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
758 {
759 unsigned long mpfn, gpfn, m, n = *len;
760 unsigned long end; /* end of the area mapped by current entry */
761 thash_data_t *entry;
762 struct vcpu *v = current;
764 entry = vtlb_lookup(v, va, DSIDE_TLB);
765 if (entry == NULL)
766 return -EFAULT;
768 gpfn =(entry->ppn>>(PAGE_SHIFT-12));
769 gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
770 gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT));
772 mpfn = gmfn_to_mfn(v->domain, gpfn);
773 m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
774 /* machine address may be not continuous */
775 end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
776 /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
777 /* Current entry can't map all requested area */
778 if ((m + n) > end)
779 n = end - m;
781 *ma = m;
782 *len = n;
783 return 0;
784 }