ia64/xen-unstable

view xen/arch/ia64/vmx/vmmu.c @ 9765:7c7bcf173f8b

[IA64] cleanup vtlb code

This patch is to clean up vtlb code.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 25 20:53:38 2006 -0600 (2006-04-25)
parents 6e3841e5ef8f
children 6e979aa0e6d2
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmmu.c: virtual memory management unit components.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
22 #include <linux/sched.h>
23 #include <linux/mm.h>
24 #include <asm/tlb.h>
25 #include <asm/gcc_intrin.h>
26 #include <asm/vcpu.h>
27 #include <linux/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/vmx_mm_def.h>
30 #include <asm/vmx.h>
31 #include <asm/hw_irq.h>
32 #include <asm/vmx_pal_vsa.h>
33 #include <asm/kregs.h>
34 #include <asm/vcpu.h>
35 #include <xen/irq.h>
37 /*
38 * Get the machine page frame number in 16KB unit
39 * Input:
40 * d:
41 */
42 u64 get_mfn(struct domain *d, u64 gpfn)
43 {
44 // struct domain *d;
45 u64 xen_gppn, xen_mppn, mpfn;
46 /*
47 if ( domid == DOMID_SELF ) {
48 d = current->domain;
49 }
50 else {
51 d = find_domain_by_id(domid);
52 }
53 */
54 xen_gppn = arch_to_xen_ppn(gpfn);
55 xen_mppn = gmfn_to_mfn(d, xen_gppn);
56 /*
57 for (i=0; i<pages; i++) {
58 if ( gmfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
59 return INVALID_MFN;
60 }
61 }
62 */
63 mpfn= xen_to_arch_ppn(xen_mppn);
64 mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn);
65 return mpfn;
67 }
69 /*
70 * The VRN bits of va stand for which rr to get.
71 */
72 //ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va)
73 //{
74 // ia64_rr vrr;
75 // vcpu_get_rr(vcpu, va, &vrr.rrval);
76 // return vrr;
77 //}
79 /*
80 void recycle_message(thash_cb_t *hcb, u64 para)
81 {
82 if(hcb->ht == THASH_VHPT)
83 {
84 printk("ERROR : vhpt recycle happenning!!!\n");
85 }
86 printk("hcb=%p recycled with %lx\n",hcb,para);
87 }
88 */
90 /*
91 * Purge all guest TCs in logical processor.
92 * Instead of purging all LP TCs, we should only purge
93 * TCs that belong to this guest.
94 */
95 void
96 purge_machine_tc_by_domid(domid_t domid)
97 {
98 #ifndef PURGE_GUEST_TC_ONLY
99 // purge all TCs
100 struct ia64_pal_retval result;
101 u64 addr;
102 u32 count1,count2;
103 u32 stride1,stride2;
104 u32 i,j;
105 u64 psr;
107 result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
108 if ( result.status != 0 ) {
109 panic ("PAL_PTCE_INFO failed\n");
110 }
111 addr = result.v0;
112 count1 = HIGH_32BITS(result.v1);
113 count2 = LOW_32BITS (result.v1);
114 stride1 = HIGH_32BITS(result.v2);
115 stride2 = LOW_32BITS (result.v2);
117 local_irq_save(psr);
118 for (i=0; i<count1; i++) {
119 for (j=0; j<count2; j++) {
120 ia64_ptce(addr);
121 addr += stride2;
122 }
123 addr += stride1;
124 }
125 local_irq_restore(psr);
126 #else
127 // purge all TCs belong to this guest.
128 #endif
129 }
131 static void init_domain_vhpt(struct vcpu *v)
132 {
133 struct page_info *page;
134 void * vbase;
135 page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
136 if ( page == NULL ) {
137 panic("No enough contiguous memory for init_domain_vhpt\n");
138 }
139 vbase = page_to_virt(page);
140 memset(vbase, 0, VCPU_VHPT_SIZE);
141 printk("Allocate domain tlb at 0x%p\n", vbase);
143 VHPT(v,hash) = vbase;
144 VHPT(v,hash_sz) = VCPU_VHPT_SIZE/2;
145 VHPT(v,cch_buf) = (void *)((u64)vbase + VHPT(v,hash_sz));
146 VHPT(v,cch_sz) = VCPU_VHPT_SIZE - VHPT(v,hash_sz);
147 thash_init(&(v->arch.vhpt),VCPU_VHPT_SHIFT-1);
148 v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
149 }
153 void init_domain_tlb(struct vcpu *v)
154 {
155 struct page_info *page;
156 void * vbase;
157 init_domain_vhpt(v);
158 page = alloc_domheap_pages (NULL, VCPU_VTLB_ORDER, 0);
159 if ( page == NULL ) {
160 panic("No enough contiguous memory for init_domain_tlb\n");
161 }
162 vbase = page_to_virt(page);
163 memset(vbase, 0, VCPU_VTLB_SIZE);
164 printk("Allocate domain tlb at 0x%p\n", vbase);
166 VTLB(v,hash) = vbase;
167 VTLB(v,hash_sz) = VCPU_VTLB_SIZE/2;
168 VTLB(v,cch_buf) = (void *)((u64)vbase + VTLB(v,hash_sz));
169 VTLB(v,cch_sz) = VCPU_VTLB_SIZE - VTLB(v,hash_sz);
170 thash_init(&(v->arch.vtlb),VCPU_VTLB_SHIFT-1);
171 }
173 void free_domain_tlb(struct vcpu *v)
174 {
175 struct page_info *page;
177 if ( v->arch.vtlb.hash) {
178 page = virt_to_page(v->arch.vtlb.hash);
179 free_domheap_pages(page, VCPU_VTLB_ORDER);
180 }
181 if ( v->arch.vhpt.hash) {
182 page = virt_to_page(v->arch.vhpt.hash);
183 free_domheap_pages(page, VCPU_VHPT_ORDER);
184 }
185 }
187 /*
188 * Insert guest TLB to machine TLB.
189 * data: In TLB format
190 */
191 void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
192 {
193 u64 psr;
194 thash_data_t mtlb;
195 unsigned int cl = tlb->cl;
196 unsigned long mtlb_ppn;
197 mtlb.ifa = tlb->vadr;
198 mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
199 mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
200 mtlb.ppn = get_mfn(d->domain,tlb->ppn);
201 mtlb_ppn=mtlb.ppn;
202 if (mtlb_ppn == INVALID_MFN)
203 panic("Machine tlb insert with invalid mfn number.\n");
205 psr = ia64_clear_ic();
206 if ( cl == ISIDE_TLB ) {
207 ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps);
208 }
209 else {
210 ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps);
211 }
212 ia64_set_psr(psr);
213 ia64_srlz_i();
214 return;
215 }
217 /*
218 * Purge machine tlb.
219 * INPUT
220 * rr: guest rr.
221 * va: only bits 0:60 is valid
222 * size: bits format (1<<size) for the address range to purge.
223 *
224 */
225 void machine_tlb_purge(u64 va, u64 ps)
226 {
227 // u64 psr;
228 // psr = ia64_clear_ic();
229 ia64_ptcl(va, ps << 2);
230 // ia64_set_psr(psr);
231 // ia64_srlz_i();
232 // return;
233 }
234 /*
235 u64 machine_thash(u64 va)
236 {
237 return ia64_thash(va);
238 }
240 u64 machine_ttag(u64 va)
241 {
242 return ia64_ttag(va);
243 }
244 */
245 thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
246 {
247 u64 index,pfn,rid,pfn_bits;
248 pfn_bits = vpta.size-5-8;
249 pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr);
250 rid = _REGION_ID(vrr);
251 index = ((rid&0xff)<<pfn_bits)|(pfn&((1UL<<pfn_bits)-1));
252 *tag = ((rid>>8)&0xffff) | ((pfn >>pfn_bits)<<16);
253 return (thash_data_t *)((vpta.base<<PTA_BASE_SHIFT)+(index<<5));
254 // return ia64_call_vsa(PAL_VPS_THASH,va,vrr,vpta,0,0,0,0);
255 }
257 //u64 vsa_ttag(u64 va, u64 vrr)
258 //{
259 // return ia64_call_vsa(PAL_VPS_TTAG,va,vrr,0,0,0,0,0);
260 //}
262 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
263 {
264 ia64_rr vrr;
265 PTA vpta;
266 IA64_PSR vpsr;
268 vpsr.val = vmx_vcpu_get_psr(vcpu);
269 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
270 vmx_vcpu_get_pta(vcpu,&vpta.val);
272 if ( vrr.ve & vpta.ve ) {
273 switch ( ref ) {
274 case DATA_REF:
275 case NA_REF:
276 return vpsr.dt;
277 case INST_REF:
278 return vpsr.dt && vpsr.it && vpsr.ic;
279 case RSE_REF:
280 return vpsr.dt && vpsr.rt;
282 }
283 }
284 return 0;
285 }
288 int unimplemented_gva(VCPU *vcpu,u64 vadr)
289 {
290 int bit=vcpu->domain->arch.imp_va_msb;
291 u64 ladr =(vadr<<3)>>(3+bit);
292 if(!ladr||ladr==(1U<<(61-bit))-1){
293 return 0;
294 }else{
295 return 1;
296 }
297 }
300 /*
301 * Prefetch guest bundle code.
302 * INPUT:
303 * code: buffer pointer to hold the read data.
304 * num: number of dword (8byts) to read.
305 */
306 int
307 fetch_code(VCPU *vcpu, u64 gip, u64 *code1, u64 *code2)
308 {
309 u64 gpip=0; // guest physical IP
310 u64 *vpa;
311 thash_data_t *tlb;
312 u64 mfn;
314 if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
315 gpip = gip;
316 }
317 else {
318 tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
319 // if( tlb == NULL )
320 // tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
321 if (tlb)
322 gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & (PSIZE(tlb->ps)-1) );
323 }
324 if( gpip){
325 mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
326 if( mfn == INVALID_MFN ) panic("fetch_code: invalid memory\n");
327 vpa =(u64 *)__va( (gip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT));
328 }else{
329 tlb = vhpt_lookup(gip);
330 if( tlb == NULL)
331 panic("No entry found in ITLB and DTLB\n");
332 vpa =(u64 *)__va((tlb->ppn>>(PAGE_SHIFT-ARCH_PAGE_SHIFT)<<PAGE_SHIFT)|(gip&(PAGE_SIZE-1)));
333 }
334 *code1 = *vpa++;
335 *code2 = *vpa;
336 return 1;
337 }
339 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
340 {
341 int slot;
342 u64 ps, va;
343 ps = itir_ps(itir);
344 va = PAGEALIGN(ifa, ps);
345 slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
346 if (slot >=0) {
347 // generate MCA.
348 panic("Tlb conflict!!");
349 return IA64_FAULT;
350 }
351 thash_purge_and_insert(vcpu, pte, itir, ifa);
352 return IA64_NO_FAULT;
353 }
355 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
356 {
357 int slot;
358 u64 ps, va, gpfn;
359 ps = itir_ps(itir);
360 va = PAGEALIGN(ifa, ps);
361 slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
362 if (slot >=0) {
363 // generate MCA.
364 panic("Tlb conflict!!");
365 return IA64_FAULT;
366 }
367 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
368 if(VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain,gpfn))
369 pte |= VTLB_PTE_IO;
370 thash_purge_and_insert(vcpu, pte, itir, ifa);
371 return IA64_NO_FAULT;
373 }
378 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
379 {
380 int index;
381 u64 ps, va, rid;
383 ps = itir_ps(itir);
384 va = PAGEALIGN(ifa, ps);
385 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
386 if (index >=0) {
387 // generate MCA.
388 panic("Tlb conflict!!");
389 return IA64_FAULT;
390 }
391 thash_purge_entries(vcpu, va, ps);
392 vcpu_get_rr(vcpu, va, &rid);
393 rid = rid& RR_RID_MASK;
394 vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.itrs[slot], pte, itir, va, rid);
395 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
396 return IA64_NO_FAULT;
397 }
400 IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
401 {
402 int index;
403 u64 ps, va, gpfn, rid;
405 ps = itir_ps(itir);
406 va = PAGEALIGN(ifa, ps);
407 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
408 if (index>=0) {
409 // generate MCA.
410 panic("Tlb conflict!!");
411 return IA64_FAULT;
412 }
413 thash_purge_entries(vcpu, va, ps);
414 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
415 if(__gpfn_is_io(vcpu->domain,gpfn))
416 pte |= VTLB_PTE_IO;
417 vcpu_get_rr(vcpu, va, &rid);
418 rid = rid& RR_RID_MASK;
419 vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
420 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
421 return IA64_NO_FAULT;
422 }
426 IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 ifa,UINT64 ps)
427 {
428 int index;
429 u64 va;
431 va = PAGEALIGN(ifa, ps);
432 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
433 if (index>=0) {
434 vcpu->arch.dtrs[index].pte.p=0;
435 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
436 }
437 thash_purge_entries(vcpu, va, ps);
438 return IA64_NO_FAULT;
439 }
441 IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 ifa,UINT64 ps)
442 {
443 int index;
444 u64 va;
446 va = PAGEALIGN(ifa, ps);
447 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
448 if (index>=0) {
449 vcpu->arch.itrs[index].pte.p=0;
450 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
451 }
452 thash_purge_entries(vcpu, va, ps);
453 return IA64_NO_FAULT;
454 }
456 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 va, UINT64 ps)
457 {
458 va = PAGEALIGN(va, ps);
459 thash_purge_entries(vcpu, va, ps);
460 return IA64_NO_FAULT;
461 }
464 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va)
465 {
466 thash_purge_all(vcpu);
467 return IA64_NO_FAULT;
468 }
470 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
471 {
472 vmx_vcpu_ptc_l(vcpu, va, ps);
473 return IA64_ILLOP_FAULT;
474 }
476 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
477 {
478 vmx_vcpu_ptc_l(vcpu, va, ps);
479 return IA64_NO_FAULT;
480 }
483 IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
484 {
485 PTA vpta;
486 ia64_rr vrr;
487 u64 vhpt_offset;
488 vmx_vcpu_get_pta(vcpu, &vpta.val);
489 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
490 if(vpta.vf){
491 *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
492 *pval = vpta.val & ~0xffff;
493 }else{
494 vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
495 *pval = (vadr&VRN_MASK)|
496 (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
497 vhpt_offset;
498 }
499 return IA64_NO_FAULT;
500 }
503 IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
504 {
505 ia64_rr vrr;
506 PTA vpta;
507 vmx_vcpu_get_pta(vcpu, &vpta.val);
508 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
509 if(vpta.vf){
510 *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
511 }else{
512 *pval = 1;
513 }
514 return IA64_NO_FAULT;
515 }
519 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
520 {
521 thash_data_t *data;
522 ISR visr,pt_isr;
523 REGS *regs;
524 u64 vhpt_adr;
525 IA64_PSR vpsr;
526 regs=vcpu_regs(vcpu);
527 pt_isr.val=VMX(vcpu,cr_isr);
528 visr.val=0;
529 visr.ei=pt_isr.ei;
530 visr.ir=pt_isr.ir;
531 vpsr.val = vmx_vcpu_get_psr(vcpu);
532 if(vpsr.ic==0){
533 visr.ni=1;
534 }
535 visr.na=1;
536 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
537 if(data){
538 if(data->p==0){
539 visr.na=1;
540 vcpu_set_isr(vcpu,visr.val);
541 page_not_present(vcpu, vadr);
542 return IA64_FAULT;
543 }else if(data->ma == VA_MATTR_NATPAGE){
544 visr.na = 1;
545 vcpu_set_isr(vcpu, visr.val);
546 dnat_page_consumption(vcpu, vadr);
547 return IA64_FAULT;
548 }else{
549 *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1));
550 return IA64_NO_FAULT;
551 }
552 }
553 data = vhpt_lookup(vadr);
554 if(data){
555 if(data->p==0){
556 visr.na=1;
557 vcpu_set_isr(vcpu,visr.val);
558 page_not_present(vcpu, vadr);
559 return IA64_FAULT;
560 }else if(data->ma == VA_MATTR_NATPAGE){
561 visr.na = 1;
562 vcpu_set_isr(vcpu, visr.val);
563 dnat_page_consumption(vcpu, vadr);
564 return IA64_FAULT;
565 }else{
566 *padr = ((*(mpt_table+arch_to_xen_ppn(data->ppn)))<<PAGE_SHIFT) | (vadr&(PAGE_SIZE-1));
567 return IA64_NO_FAULT;
568 }
569 }
570 else{
571 if(!vhpt_enabled(vcpu, vadr, NA_REF)){
572 if(vpsr.ic){
573 vcpu_set_isr(vcpu, visr.val);
574 alt_dtlb(vcpu, vadr);
575 return IA64_FAULT;
576 }
577 else{
578 nested_dtlb(vcpu);
579 return IA64_FAULT;
580 }
581 }
582 else{
583 vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
584 data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB);
585 if(data){
586 if(vpsr.ic){
587 vcpu_set_isr(vcpu, visr.val);
588 dtlb_fault(vcpu, vadr);
589 return IA64_FAULT;
590 }
591 else{
592 nested_dtlb(vcpu);
593 return IA64_FAULT;
594 }
595 }
596 else{
597 if(vpsr.ic){
598 vcpu_set_isr(vcpu, visr.val);
599 dvhpt_fault(vcpu, vadr);
600 return IA64_FAULT;
601 }
602 else{
603 nested_dtlb(vcpu);
604 return IA64_FAULT;
605 }
606 }
607 }
608 }
609 }
611 IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
612 {
613 thash_data_t *data;
614 PTA vpta;
615 vmx_vcpu_get_pta(vcpu, &vpta.val);
616 if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
617 *key=1;
618 return IA64_NO_FAULT;
619 }
620 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
621 if(!data||!data->p){
622 *key=1;
623 }else{
624 *key=data->key;
625 }
626 return IA64_NO_FAULT;
627 }
629 /*
630 * [FIXME] Is there any effective way to move this routine
631 * into vmx_uaccess.h? struct exec_domain is incomplete type
632 * in that way...
633 *
634 * This is the interface to lookup virtual TLB, and then
635 * return corresponding machine address in 2nd parameter.
636 * The 3rd parameter contains how many bytes mapped by
637 * matched vTLB entry, thus to allow caller copy more once.
638 *
639 * If failed to lookup, -EFAULT is returned. Or else reutrn
640 * 0. All upper domain access utilities rely on this routine
641 * to determine the real machine address.
642 *
643 * Yes, put_user and get_user seems to somhow slow upon it.
644 * However it's the necessary steps for any vmx domain virtual
645 * address, since that's difference address space as HV's one.
646 * Later some short-circuit may be created for special case
647 */
648 long
649 __domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
650 {
651 unsigned long mpfn, gpfn, m, n = *len;
652 unsigned long end; /* end of the area mapped by current entry */
653 thash_data_t *entry;
654 struct vcpu *v = current;
656 entry = vtlb_lookup(v, va, DSIDE_TLB);
657 if (entry == NULL)
658 return -EFAULT;
660 gpfn =(entry->ppn>>(PAGE_SHIFT-12));
661 gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
662 gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT));
664 mpfn = gmfn_to_mfn(v->domain, gpfn);
665 m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
666 /* machine address may be not continuous */
667 end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
668 /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
669 /* Current entry can't map all requested area */
670 if ((m + n) > end)
671 n = end - m;
673 *ma = m;
674 *len = n;
675 return 0;
676 }