ia64/xen-unstable

view xen/arch/ia64/vmx/vmmu.c @ 9679:6e3841e5ef8f

[IA64] VTi: add pseudo support of long format VHPT

Pseudo support of VHPT long format.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Thu Apr 13 14:08:30 2006 -0600 (2006-04-13)
parents b09e8f46c9f6
children 7c7bcf173f8b
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmmu.c: virtual memory management unit components.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
22 #include <linux/sched.h>
23 #include <linux/mm.h>
24 #include <asm/tlb.h>
25 #include <asm/gcc_intrin.h>
26 #include <asm/vcpu.h>
27 #include <linux/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/vmx_mm_def.h>
30 #include <asm/vmx.h>
31 #include <asm/hw_irq.h>
32 #include <asm/vmx_pal_vsa.h>
33 #include <asm/kregs.h>
34 #include <asm/vcpu.h>
35 #include <xen/irq.h>
37 /*
38 * Get the machine page frame number in 16KB unit
39 * Input:
40 * d:
41 */
42 u64 get_mfn(struct domain *d, u64 gpfn)
43 {
44 // struct domain *d;
45 u64 xen_gppn, xen_mppn, mpfn;
46 /*
47 if ( domid == DOMID_SELF ) {
48 d = current->domain;
49 }
50 else {
51 d = find_domain_by_id(domid);
52 }
53 */
54 xen_gppn = arch_to_xen_ppn(gpfn);
55 xen_mppn = gmfn_to_mfn(d, xen_gppn);
56 /*
57 for (i=0; i<pages; i++) {
58 if ( gmfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
59 return INVALID_MFN;
60 }
61 }
62 */
63 mpfn= xen_to_arch_ppn(xen_mppn);
64 mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn);
65 return mpfn;
67 }
69 /*
70 * The VRN bits of va stand for which rr to get.
71 */
72 //ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va)
73 //{
74 // ia64_rr vrr;
75 // vcpu_get_rr(vcpu, va, &vrr.rrval);
76 // return vrr;
77 //}
79 /*
80 void recycle_message(thash_cb_t *hcb, u64 para)
81 {
82 if(hcb->ht == THASH_VHPT)
83 {
84 printk("ERROR : vhpt recycle happenning!!!\n");
85 }
86 printk("hcb=%p recycled with %lx\n",hcb,para);
87 }
88 */
90 /*
91 * Purge all guest TCs in logical processor.
92 * Instead of purging all LP TCs, we should only purge
93 * TCs that belong to this guest.
94 */
95 void
96 purge_machine_tc_by_domid(domid_t domid)
97 {
98 #ifndef PURGE_GUEST_TC_ONLY
99 // purge all TCs
100 struct ia64_pal_retval result;
101 u64 addr;
102 u32 count1,count2;
103 u32 stride1,stride2;
104 u32 i,j;
105 u64 psr;
107 result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
108 if ( result.status != 0 ) {
109 panic ("PAL_PTCE_INFO failed\n");
110 }
111 addr = result.v0;
112 count1 = HIGH_32BITS(result.v1);
113 count2 = LOW_32BITS (result.v1);
114 stride1 = HIGH_32BITS(result.v2);
115 stride2 = LOW_32BITS (result.v2);
117 local_irq_save(psr);
118 for (i=0; i<count1; i++) {
119 for (j=0; j<count2; j++) {
120 ia64_ptce(addr);
121 addr += stride2;
122 }
123 addr += stride1;
124 }
125 local_irq_restore(psr);
126 #else
127 // purge all TCs belong to this guest.
128 #endif
129 }
131 static thash_cb_t *init_domain_vhpt(struct vcpu *d, void *vbase, void *vcur)
132 {
133 // struct page_info *page;
134 thash_cb_t *vhpt;
135 PTA pta_value;
136 vcur -= sizeof (thash_cb_t);
137 vhpt = vcur;
138 vhpt->ht = THASH_VHPT;
139 vhpt->vcpu = d;
140 /* Setup guest pta */
141 pta_value.val = 0;
142 pta_value.ve = 1;
143 pta_value.vf = 1;
144 pta_value.size = VCPU_VHPT_SHIFT - 1; /* 16M*/
145 pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
146 d->arch.arch_vmx.mpta = pta_value.val;
148 vhpt->hash = vbase;
149 vhpt->hash_sz = VCPU_VHPT_SIZE/2;
150 vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz);
151 vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
152 thash_init(vhpt,VCPU_VHPT_SHIFT-1);
153 return vhpt;
154 }
158 thash_cb_t *init_domain_tlb(struct vcpu *d)
159 {
160 struct page_info *page;
161 void *vbase, *vhptbase, *vcur;
162 thash_cb_t *tlb;
164 page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
165 if ( page == NULL ) {
166 panic("No enough contiguous memory for init_domain_mm\n");
167 }
168 vhptbase = page_to_virt(page);
169 memset(vhptbase, 0, VCPU_VHPT_SIZE);
170 printk("Allocate domain tlb&vhpt at 0x%lx\n", (u64)vhptbase);
171 vbase =vhptbase + VCPU_VHPT_SIZE - VCPU_VTLB_SIZE;
172 vcur = (void*)((u64)vbase + VCPU_VTLB_SIZE);
173 vcur -= sizeof (thash_cb_t);
174 tlb = vcur;
175 tlb->ht = THASH_TLB;
176 tlb->vcpu = d;
177 tlb->vhpt = init_domain_vhpt(d,vhptbase,vbase);
178 // tlb->hash_func = machine_thash;
179 tlb->hash = vbase;
180 tlb->hash_sz = VCPU_VTLB_SIZE/2;
181 tlb->cch_buf = (void *)(vbase + tlb->hash_sz);
182 tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
183 // tlb->recycle_notifier = recycle_message;
184 thash_init(tlb,VCPU_VTLB_SHIFT-1);
185 return tlb;
186 }
188 void free_domain_tlb(struct vcpu *v)
189 {
190 struct page_info *page;
191 void *vhptbase;
192 thash_cb_t *tlb;
194 if ( v->arch.vtlb ) {
195 tlb = v->arch.vtlb;
196 vhptbase = (void*)((u64)tlb + sizeof (thash_cb_t)) - VCPU_VHPT_SIZE;
197 page = virt_to_page(vhptbase);
198 free_domheap_pages(page, VCPU_VHPT_ORDER);
199 }
200 }
202 /*
203 * Insert guest TLB to machine TLB.
204 * data: In TLB format
205 */
206 void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
207 {
208 u64 psr;
209 thash_data_t mtlb;
210 unsigned int cl = tlb->cl;
211 unsigned long mtlb_ppn;
212 mtlb.ifa = tlb->vadr;
213 mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
214 mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
215 mtlb.ppn = get_mfn(d->domain,tlb->ppn);
216 mtlb_ppn=mtlb.ppn;
217 if (mtlb_ppn == INVALID_MFN)
218 panic("Machine tlb insert with invalid mfn number.\n");
220 psr = ia64_clear_ic();
221 if ( cl == ISIDE_TLB ) {
222 ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps);
223 }
224 else {
225 ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps);
226 }
227 ia64_set_psr(psr);
228 ia64_srlz_i();
229 return;
230 }
232 /*
233 * Purge machine tlb.
234 * INPUT
235 * rr: guest rr.
236 * va: only bits 0:60 is valid
237 * size: bits format (1<<size) for the address range to purge.
238 *
239 */
240 void machine_tlb_purge(u64 va, u64 ps)
241 {
242 // u64 psr;
243 // psr = ia64_clear_ic();
244 ia64_ptcl(va, ps << 2);
245 // ia64_set_psr(psr);
246 // ia64_srlz_i();
247 // return;
248 }
249 /*
250 u64 machine_thash(u64 va)
251 {
252 return ia64_thash(va);
253 }
255 u64 machine_ttag(u64 va)
256 {
257 return ia64_ttag(va);
258 }
259 */
260 thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
261 {
262 u64 index,pfn,rid,pfn_bits;
263 pfn_bits = vpta.size-5-8;
264 pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr);
265 rid = _REGION_ID(vrr);
266 index = ((rid&0xff)<<pfn_bits)|(pfn&((1UL<<pfn_bits)-1));
267 *tag = ((rid>>8)&0xffff) | ((pfn >>pfn_bits)<<16);
268 return (thash_data_t *)((vpta.base<<PTA_BASE_SHIFT)+(index<<5));
269 // return ia64_call_vsa(PAL_VPS_THASH,va,vrr,vpta,0,0,0,0);
270 }
272 //u64 vsa_ttag(u64 va, u64 vrr)
273 //{
274 // return ia64_call_vsa(PAL_VPS_TTAG,va,vrr,0,0,0,0,0);
275 //}
277 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
278 {
279 ia64_rr vrr;
280 PTA vpta;
281 IA64_PSR vpsr;
283 vpsr.val = vmx_vcpu_get_psr(vcpu);
284 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
285 vmx_vcpu_get_pta(vcpu,&vpta.val);
287 if ( vrr.ve & vpta.ve ) {
288 switch ( ref ) {
289 case DATA_REF:
290 case NA_REF:
291 return vpsr.dt;
292 case INST_REF:
293 return vpsr.dt && vpsr.it && vpsr.ic;
294 case RSE_REF:
295 return vpsr.dt && vpsr.rt;
297 }
298 }
299 return 0;
300 }
303 int unimplemented_gva(VCPU *vcpu,u64 vadr)
304 {
305 int bit=vcpu->domain->arch.imp_va_msb;
306 u64 ladr =(vadr<<3)>>(3+bit);
307 if(!ladr||ladr==(1U<<(61-bit))-1){
308 return 0;
309 }else{
310 return 1;
311 }
312 }
315 /*
316 * Prefetch guest bundle code.
317 * INPUT:
318 * code: buffer pointer to hold the read data.
319 * num: number of dword (8byts) to read.
320 */
321 int
322 fetch_code(VCPU *vcpu, u64 gip, u64 *code1, u64 *code2)
323 {
324 u64 gpip=0; // guest physical IP
325 u64 *vpa;
326 thash_data_t *tlb;
327 thash_cb_t *hcb;
328 u64 mfn;
330 if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
331 gpip = gip;
332 }
333 else {
334 hcb = vmx_vcpu_get_vtlb(vcpu);
335 tlb = vtlb_lookup(hcb, gip, ISIDE_TLB);
336 // if( tlb == NULL )
337 // tlb = vtlb_lookup(hcb, gip, DSIDE_TLB );
338 if (tlb)
339 gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & (PSIZE(tlb->ps)-1) );
340 }
341 if( gpip){
342 mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
343 if( mfn == INVALID_MFN ) panic("fetch_code: invalid memory\n");
344 vpa =(u64 *)__va( (gip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT));
345 }else{
346 tlb = vhpt_lookup(gip);
347 if( tlb == NULL)
348 panic("No entry found in ITLB and DTLB\n");
349 vpa =(u64 *)__va((tlb->ppn>>(PAGE_SHIFT-ARCH_PAGE_SHIFT)<<PAGE_SHIFT)|(gip&(PAGE_SIZE-1)));
350 }
351 *code1 = *vpa++;
352 *code2 = *vpa;
353 return 1;
354 }
356 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
357 {
358 int slot;
359 u64 ps, va;
360 thash_cb_t *hcb;
362 ps = itir_ps(itir);
363 va = PAGEALIGN(ifa, ps);
364 slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
365 if (slot >=0) {
366 // generate MCA.
367 panic("Tlb conflict!!");
368 return IA64_FAULT;
369 }
370 hcb = vmx_vcpu_get_vtlb(vcpu);
371 thash_purge_and_insert(hcb, pte, itir, ifa);
372 return IA64_NO_FAULT;
373 }
375 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
376 {
377 int slot;
378 u64 ps, va, gpfn;
379 thash_cb_t *hcb;
381 ps = itir_ps(itir);
382 va = PAGEALIGN(ifa, ps);
383 slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
384 if (slot >=0) {
385 // generate MCA.
386 panic("Tlb conflict!!");
387 return IA64_FAULT;
388 }
389 hcb = vmx_vcpu_get_vtlb(vcpu);
390 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
391 if(__gpfn_is_io(vcpu->domain,gpfn))
392 pte |= VTLB_PTE_IO;
393 thash_purge_and_insert(hcb, pte, itir, ifa);
394 return IA64_NO_FAULT;
396 }
401 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
402 {
403 int index;
404 u64 ps, va, rid;
405 thash_cb_t *hcb;
407 ps = itir_ps(itir);
408 va = PAGEALIGN(ifa, ps);
409 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
410 if (index >=0) {
411 // generate MCA.
412 panic("Tlb conflict!!");
413 return IA64_FAULT;
414 }
415 hcb = vmx_vcpu_get_vtlb(vcpu);
416 thash_purge_entries(hcb, va, ps);
417 vcpu_get_rr(vcpu, va, &rid);
418 rid = rid& RR_RID_MASK;
419 vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.itrs[slot], pte, itir, va, rid);
420 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
421 return IA64_NO_FAULT;
422 }
425 IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
426 {
427 int index;
428 u64 ps, va, gpfn, rid;
429 thash_cb_t *hcb;
431 ps = itir_ps(itir);
432 va = PAGEALIGN(ifa, ps);
433 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
434 if (index>=0) {
435 // generate MCA.
436 panic("Tlb conflict!!");
437 return IA64_FAULT;
438 }
439 hcb = vmx_vcpu_get_vtlb(vcpu);
440 thash_purge_entries(hcb, va, ps);
441 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
442 if(__gpfn_is_io(vcpu->domain,gpfn))
443 pte |= VTLB_PTE_IO;
444 vcpu_get_rr(vcpu, va, &rid);
445 rid = rid& RR_RID_MASK;
446 vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
447 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
448 return IA64_NO_FAULT;
449 }
453 IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 ifa,UINT64 ps)
454 {
455 int index;
456 u64 va;
457 thash_cb_t *hcb;
459 va = PAGEALIGN(ifa, ps);
460 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
461 if (index>=0) {
462 vcpu->arch.dtrs[index].pte.p=0;
463 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
464 }
465 hcb = vmx_vcpu_get_vtlb(vcpu);
466 thash_purge_entries(hcb, va, ps);
467 return IA64_NO_FAULT;
468 }
470 IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 ifa,UINT64 ps)
471 {
472 int index;
473 u64 va;
474 thash_cb_t *hcb;
476 va = PAGEALIGN(ifa, ps);
477 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
478 if (index>=0) {
479 vcpu->arch.itrs[index].pte.p=0;
480 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
481 }
482 hcb = vmx_vcpu_get_vtlb(vcpu);
483 thash_purge_entries(hcb, va, ps);
484 return IA64_NO_FAULT;
485 }
487 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 va, UINT64 ps)
488 {
489 thash_cb_t *hcb;
490 va = PAGEALIGN(va, ps);
491 hcb = vmx_vcpu_get_vtlb(vcpu);
492 thash_purge_entries(hcb, va, ps);
493 return IA64_NO_FAULT;
494 }
497 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 va)
498 {
499 thash_cb_t *hcb;
500 hcb = vmx_vcpu_get_vtlb(vcpu);
501 thash_purge_all(hcb);
502 return IA64_NO_FAULT;
503 }
505 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
506 {
507 vmx_vcpu_ptc_l(vcpu, va, ps);
508 return IA64_ILLOP_FAULT;
509 }
511 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
512 {
513 vmx_vcpu_ptc_l(vcpu, va, ps);
514 return IA64_NO_FAULT;
515 }
518 IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
519 {
520 PTA vpta;
521 ia64_rr vrr;
522 u64 vhpt_offset;
523 vmx_vcpu_get_pta(vcpu, &vpta.val);
524 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
525 if(vpta.vf){
526 *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
527 *pval = vpta.val & ~0xffff;
528 }else{
529 vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
530 *pval = (vadr&VRN_MASK)|
531 (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
532 vhpt_offset;
533 }
534 return IA64_NO_FAULT;
535 }
538 IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
539 {
540 ia64_rr vrr;
541 PTA vpta;
542 vmx_vcpu_get_pta(vcpu, &vpta.val);
543 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
544 if(vpta.vf){
545 *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
546 }else{
547 *pval = 1;
548 }
549 return IA64_NO_FAULT;
550 }
554 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
555 {
556 thash_data_t *data;
557 thash_cb_t *hcb;
558 ISR visr,pt_isr;
559 REGS *regs;
560 u64 vhpt_adr;
561 IA64_PSR vpsr;
562 hcb = vmx_vcpu_get_vtlb(vcpu);
563 regs=vcpu_regs(vcpu);
564 pt_isr.val=VMX(vcpu,cr_isr);
565 visr.val=0;
566 visr.ei=pt_isr.ei;
567 visr.ir=pt_isr.ir;
568 vpsr.val = vmx_vcpu_get_psr(vcpu);
569 if(vpsr.ic==0){
570 visr.ni=1;
571 }
572 visr.na=1;
573 data = vtlb_lookup(hcb, vadr, DSIDE_TLB);
574 if(data){
575 if(data->p==0){
576 visr.na=1;
577 vcpu_set_isr(vcpu,visr.val);
578 page_not_present(vcpu, vadr);
579 return IA64_FAULT;
580 }else if(data->ma == VA_MATTR_NATPAGE){
581 visr.na = 1;
582 vcpu_set_isr(vcpu, visr.val);
583 dnat_page_consumption(vcpu, vadr);
584 return IA64_FAULT;
585 }else{
586 *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1));
587 return IA64_NO_FAULT;
588 }
589 }
590 data = vhpt_lookup(vadr);
591 if(data){
592 if(data->p==0){
593 visr.na=1;
594 vcpu_set_isr(vcpu,visr.val);
595 page_not_present(vcpu, vadr);
596 return IA64_FAULT;
597 }else if(data->ma == VA_MATTR_NATPAGE){
598 visr.na = 1;
599 vcpu_set_isr(vcpu, visr.val);
600 dnat_page_consumption(vcpu, vadr);
601 return IA64_FAULT;
602 }else{
603 *padr = ((*(mpt_table+arch_to_xen_ppn(data->ppn)))<<PAGE_SHIFT) | (vadr&(PAGE_SIZE-1));
604 return IA64_NO_FAULT;
605 }
606 }
607 else{
608 if(!vhpt_enabled(vcpu, vadr, NA_REF)){
609 if(vpsr.ic){
610 vcpu_set_isr(vcpu, visr.val);
611 alt_dtlb(vcpu, vadr);
612 return IA64_FAULT;
613 }
614 else{
615 nested_dtlb(vcpu);
616 return IA64_FAULT;
617 }
618 }
619 else{
620 vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
621 data = vtlb_lookup(hcb, vhpt_adr, DSIDE_TLB);
622 if(data){
623 if(vpsr.ic){
624 vcpu_set_isr(vcpu, visr.val);
625 dtlb_fault(vcpu, vadr);
626 return IA64_FAULT;
627 }
628 else{
629 nested_dtlb(vcpu);
630 return IA64_FAULT;
631 }
632 }
633 else{
634 if(vpsr.ic){
635 vcpu_set_isr(vcpu, visr.val);
636 dvhpt_fault(vcpu, vadr);
637 return IA64_FAULT;
638 }
639 else{
640 nested_dtlb(vcpu);
641 return IA64_FAULT;
642 }
643 }
644 }
645 }
646 }
648 IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
649 {
650 thash_data_t *data;
651 thash_cb_t *hcb;
652 PTA vpta;
653 vmx_vcpu_get_pta(vcpu, &vpta.val);
654 if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
655 *key=1;
656 return IA64_NO_FAULT;
657 }
658 hcb = vmx_vcpu_get_vtlb(vcpu);
659 data = vtlb_lookup(hcb, vadr, DSIDE_TLB);
660 if(!data||!data->p){
661 *key=1;
662 }else{
663 *key=data->key;
664 }
665 return IA64_NO_FAULT;
666 }
668 /*
669 * [FIXME] Is there any effective way to move this routine
670 * into vmx_uaccess.h? struct exec_domain is incomplete type
671 * in that way...
672 *
673 * This is the interface to lookup virtual TLB, and then
674 * return corresponding machine address in 2nd parameter.
675 * The 3rd parameter contains how many bytes mapped by
676 * matched vTLB entry, thus to allow caller copy more once.
677 *
678 * If failed to lookup, -EFAULT is returned. Or else reutrn
679 * 0. All upper domain access utilities rely on this routine
680 * to determine the real machine address.
681 *
682 * Yes, put_user and get_user seems to somhow slow upon it.
683 * However it's the necessary steps for any vmx domain virtual
684 * address, since that's difference address space as HV's one.
685 * Later some short-circuit may be created for special case
686 */
687 long
688 __domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
689 {
690 unsigned long mpfn, gpfn, m, n = *len;
691 thash_cb_t *vtlb;
692 unsigned long end; /* end of the area mapped by current entry */
693 thash_data_t *entry;
694 struct vcpu *v = current;
696 vtlb = vmx_vcpu_get_vtlb(v);
697 entry = vtlb_lookup(vtlb, va, DSIDE_TLB);
698 if (entry == NULL)
699 return -EFAULT;
701 gpfn =(entry->ppn>>(PAGE_SHIFT-12));
702 gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
703 gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT));
705 mpfn = gmfn_to_mfn(v->domain, gpfn);
706 m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
707 /* machine address may be not continuous */
708 end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
709 /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
710 /* Current entry can't map all requested area */
711 if ((m + n) > end)
712 n = end - m;
714 *ma = m;
715 *len = n;
716 return 0;
717 }