ia64/xen-unstable

view xen/arch/ia64/vmmu.c @ 6552:a9873d384da4

Merge.
author adsharma@los-vmm.sc.intel.com
date Thu Aug 25 12:24:48 2005 -0700 (2005-08-25)
parents 112d44270733 fa0754a9f64f
children dfaf788ab18c
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmmu.c: virtual memory management unit components.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
22 #include <linux/sched.h>
23 #include <linux/mm.h>
24 #include <asm/tlb.h>
25 #include <asm/gcc_intrin.h>
26 #include <asm/vcpu.h>
27 #include <linux/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/vmx_mm_def.h>
30 #include <asm/vmx.h>
31 #include <asm/hw_irq.h>
32 #include <asm/vmx_pal_vsa.h>
33 #include <asm/kregs.h>
35 /*
36 * Architecture ppn is in 4KB unit while XEN
37 * page may be different(1<<PAGE_SHIFT).
38 */
39 static inline u64 arch_ppn_to_xen_ppn(u64 appn)
40 {
41 return (appn << ARCH_PAGE_SHIFT) >> PAGE_SHIFT;
42 }
44 static inline u64 xen_ppn_to_arch_ppn(u64 xppn)
45 {
46 return (xppn << PAGE_SHIFT) >> ARCH_PAGE_SHIFT;
47 }
50 /*
51 * Get the machine page frame number in 16KB unit
52 * Input:
53 * d:
54 */
55 u64 get_mfn(domid_t domid, u64 gpfn, u64 pages)
56 {
57 struct domain *d;
58 u64 i, xen_gppn, xen_mppn, mpfn;
60 if ( domid == DOMID_SELF ) {
61 d = current->domain;
62 }
63 else {
64 d = find_domain_by_id(domid);
65 }
66 xen_gppn = arch_ppn_to_xen_ppn(gpfn);
67 xen_mppn = __gpfn_to_mfn(d, xen_gppn);
68 /*
69 for (i=0; i<pages; i++) {
70 if ( __gpfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
71 return INVALID_MFN;
72 }
73 }
74 */
75 mpfn= xen_ppn_to_arch_ppn(xen_mppn);
76 mpfn = mpfn | (((1UL <<(PAGE_SHIFT-12))-1)&gpfn);
77 return mpfn;
79 }
81 /*
82 * The VRN bits of va stand for which rr to get.
83 */
84 rr_t vmmu_get_rr(VCPU *vcpu, u64 va)
85 {
86 rr_t vrr;
87 vmx_vcpu_get_rr(vcpu, va, &vrr.value);
88 return vrr;
89 }
92 void recycle_message(thash_cb_t *hcb, u64 para)
93 {
94 printk("hcb=%p recycled with %lx\n",hcb,para);
95 }
98 /*
99 * Purge all guest TCs in logical processor.
100 * Instead of purging all LP TCs, we should only purge
101 * TCs that belong to this guest.
102 */
103 void
104 purge_machine_tc_by_domid(domid_t domid)
105 {
106 #ifndef PURGE_GUEST_TC_ONLY
107 // purge all TCs
108 struct ia64_pal_retval result;
109 u64 addr;
110 u32 count1,count2;
111 u32 stride1,stride2;
112 u32 i,j;
113 u64 psr;
116 result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
117 if ( result.status != 0 ) {
118 panic ("PAL_PTCE_INFO failed\n");
119 }
120 addr = result.v0;
121 count1 = HIGH_32BITS(result.v1);
122 count2 = LOW_32BITS (result.v1);
123 stride1 = HIGH_32BITS(result.v2);
124 stride2 = LOW_32BITS (result.v2);
126 local_irq_save(psr);
127 for (i=0; i<count1; i++) {
128 for (j=0; j<count2; j++) {
129 ia64_ptce(addr);
130 addr += stride2;
131 }
132 addr += stride1;
133 }
134 local_irq_restore(psr);
135 #else
136 // purge all TCs belong to this guest.
137 #endif
138 }
140 static thash_cb_t *init_domain_vhpt(struct vcpu *d)
141 {
142 struct pfn_info *page;
143 void *vbase,*vcur;
144 vhpt_special *vs;
145 thash_cb_t *vhpt;
146 PTA pta_value;
148 page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
149 if ( page == NULL ) {
150 panic("No enough contiguous memory for init_domain_mm\n");
151 }
152 vbase = page_to_virt(page);
153 printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
154 memset(vbase, 0, VCPU_TLB_SIZE);
155 vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
156 vhpt = --((thash_cb_t*)vcur);
157 vhpt->ht = THASH_VHPT;
158 vhpt->vcpu = d;
159 vhpt->hash_func = machine_thash;
160 vs = --((vhpt_special *)vcur);
162 /* Setup guest pta */
163 pta_value.val = 0;
164 pta_value.ve = 1;
165 pta_value.vf = 1;
166 pta_value.size = VCPU_TLB_SHIFT - 1; /* 2M */
167 pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
168 d->arch.arch_vmx.mpta = pta_value.val;
170 vhpt->vs = vs;
171 vhpt->vs->get_mfn = get_mfn;
172 vhpt->vs->tag_func = machine_ttag;
173 vhpt->hash = vbase;
174 vhpt->hash_sz = VCPU_TLB_SIZE/2;
175 vhpt->cch_buf = (u64)vbase + vhpt->hash_sz;
176 vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
177 vhpt->recycle_notifier = recycle_message;
178 thash_init(vhpt,VCPU_TLB_SHIFT-1);
179 return vhpt;
180 }
183 thash_cb_t *init_domain_tlb(struct vcpu *d)
184 {
185 struct pfn_info *page;
186 void *vbase,*vcur;
187 tlb_special_t *ts;
188 thash_cb_t *tlb;
190 page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
191 if ( page == NULL ) {
192 panic("No enough contiguous memory for init_domain_mm\n");
193 }
194 vbase = page_to_virt(page);
195 printk("Allocate domain tlb at 0x%lx\n", (u64)vbase);
196 memset(vbase, 0, VCPU_TLB_SIZE);
197 vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
198 tlb = --((thash_cb_t*)vcur);
199 tlb->ht = THASH_TLB;
200 tlb->vcpu = d;
201 ts = --((tlb_special_t *)vcur);
202 tlb->ts = ts;
203 tlb->ts->vhpt = init_domain_vhpt(d);
204 tlb->hash_func = machine_thash;
205 tlb->hash = vbase;
206 tlb->hash_sz = VCPU_TLB_SIZE/2;
207 tlb->cch_buf = (u64)vbase + tlb->hash_sz;
208 tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
209 tlb->recycle_notifier = recycle_message;
210 thash_init(tlb,VCPU_TLB_SHIFT-1);
211 return tlb;
212 }
214 /* Allocate physical to machine mapping table for domN
215 * FIXME: Later this interface may be removed, if that table is provided
216 * by control panel. Dom0 has gpfn identical to mfn, which doesn't need
217 * this interface at all.
218 */
219 void
220 alloc_pmt(struct domain *d)
221 {
222 struct pfn_info *page;
224 /* Only called once */
225 ASSERT(d->arch.pmt);
227 page = alloc_domheap_pages(NULL, get_order(d->max_pages), 0);
228 ASSERT(page);
230 d->arch.pmt = page_to_virt(page);
231 memset(d->arch.pmt, 0x55, d->max_pages * 8);
232 }
234 /*
235 * Insert guest TLB to machine TLB.
236 * data: In TLB format
237 */
238 void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
239 {
240 u64 saved_itir, saved_ifa, saved_rr;
241 u64 pages;
242 thash_data_t mtlb;
243 rr_t vrr;
244 unsigned int cl = tlb->cl;
246 mtlb.ifa = tlb->vadr;
247 mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
248 vrr = vmmu_get_rr(d,mtlb.ifa);
249 //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
250 pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
251 mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
252 mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, pages);
253 if (mtlb.ppn == INVALID_MFN)
254 panic("Machine tlb insert with invalid mfn number.\n");
256 __asm __volatile("rsm psr.ic|psr.i;; srlz.i" );
258 saved_itir = ia64_getreg(_IA64_REG_CR_ITIR);
259 saved_ifa = ia64_getreg(_IA64_REG_CR_IFA);
260 saved_rr = ia64_get_rr(mtlb.ifa);
262 ia64_setreg(_IA64_REG_CR_ITIR, mtlb.itir);
263 ia64_setreg(_IA64_REG_CR_IFA, mtlb.ifa);
264 /* Only access memory stack which is mapped by TR,
265 * after rr is switched.
266 */
267 ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.value));
268 ia64_srlz_d();
269 if ( cl == ISIDE_TLB ) {
270 ia64_itci(mtlb.page_flags);
271 ia64_srlz_i();
272 }
273 else {
274 ia64_itcd(mtlb.page_flags);
275 ia64_srlz_d();
276 }
277 ia64_set_rr(mtlb.ifa,saved_rr);
278 ia64_srlz_d();
279 ia64_setreg(_IA64_REG_CR_IFA, saved_ifa);
280 ia64_setreg(_IA64_REG_CR_ITIR, saved_itir);
281 __asm __volatile("ssm psr.ic|psr.i;; srlz.i" );
282 }
284 u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
285 {
286 u64 saved_pta, saved_rr0;
287 u64 hash_addr, tag;
288 unsigned long psr;
289 struct vcpu *v = current;
290 rr_t vrr;
293 saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
294 saved_rr0 = ia64_get_rr(0);
295 vrr.value = saved_rr0;
296 vrr.rid = rid;
297 vrr.ps = ps;
299 va = (va << 3) >> 3; // set VRN to 0.
300 // TODO: Set to enforce lazy mode
301 local_irq_save(psr);
302 ia64_setreg(_IA64_REG_CR_PTA, pta.val);
303 ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
304 ia64_srlz_d();
306 hash_addr = ia64_thash(va);
307 ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
309 ia64_set_rr(0, saved_rr0);
310 ia64_srlz_d();
311 local_irq_restore(psr);
312 return hash_addr;
313 }
315 u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps)
316 {
317 u64 saved_pta, saved_rr0;
318 u64 hash_addr, tag;
319 u64 psr;
320 struct vcpu *v = current;
321 rr_t vrr;
323 // TODO: Set to enforce lazy mode
324 saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
325 saved_rr0 = ia64_get_rr(0);
326 vrr.value = saved_rr0;
327 vrr.rid = rid;
328 vrr.ps = ps;
330 va = (va << 3) >> 3; // set VRN to 0.
331 local_irq_save(psr);
332 ia64_setreg(_IA64_REG_CR_PTA, pta.val);
333 ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
334 ia64_srlz_d();
336 tag = ia64_ttag(va);
337 ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
339 ia64_set_rr(0, saved_rr0);
340 ia64_srlz_d();
341 local_irq_restore(psr);
342 return tag;
343 }
345 /*
346 * Purge machine tlb.
347 * INPUT
348 * rr: guest rr.
349 * va: only bits 0:60 is valid
350 * size: bits format (1<<size) for the address range to purge.
351 *
352 */
353 void machine_tlb_purge(u64 rid, u64 va, u64 ps)
354 {
355 u64 saved_rr0;
356 u64 psr;
357 rr_t vrr;
359 va = (va << 3) >> 3; // set VRN to 0.
360 saved_rr0 = ia64_get_rr(0);
361 vrr.value = saved_rr0;
362 vrr.rid = rid;
363 vrr.ps = ps;
364 local_irq_save(psr);
365 ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.value) );
366 ia64_srlz_d();
367 ia64_ptcl(va, ps << 2);
368 ia64_set_rr( 0, saved_rr0 );
369 ia64_srlz_d();
370 local_irq_restore(psr);
371 }
374 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
375 {
376 ia64_rr vrr;
377 PTA vpta;
378 IA64_PSR vpsr;
380 vpsr.val = vmx_vcpu_get_psr(vcpu);
381 vrr = vmx_vcpu_rr(vcpu, vadr);
382 vmx_vcpu_get_pta(vcpu,&vpta.val);
384 if ( vrr.ve & vpta.ve ) {
385 switch ( ref ) {
386 case DATA_REF:
387 case NA_REF:
388 return vpsr.dt;
389 case INST_REF:
390 return vpsr.dt && vpsr.it && vpsr.ic;
391 case RSE_REF:
392 return vpsr.dt && vpsr.rt;
394 }
395 }
396 return 0;
397 }
400 int unimplemented_gva(VCPU *vcpu,u64 vadr)
401 {
402 int bit=vcpu->domain->arch.imp_va_msb;
403 u64 ladr =(vadr<<3)>>(3+bit);
404 if(!ladr||ladr==(1U<<(61-bit))-1){
405 return 0;
406 }else{
407 return 1;
408 }
409 }
412 /*
413 * Prefetch guest bundle code.
414 * INPUT:
415 * code: buffer pointer to hold the read data.
416 * num: number of dword (8byts) to read.
417 */
418 int
419 fetch_code(VCPU *vcpu, u64 gip, u64 *code)
420 {
421 u64 gpip; // guest physical IP
422 u64 mpa;
423 thash_data_t *tlb;
424 rr_t vrr;
425 u64 mfn;
427 if ( !(VMX_VPD(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
428 gpip = gip;
429 }
430 else {
431 vmx_vcpu_get_rr(vcpu, gip, &vrr.value);
432 tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu),
433 vrr.rid, gip, ISIDE_TLB );
434 if ( tlb == NULL ) panic("No entry found in ITLB\n");
435 gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
436 }
437 mfn = __gpfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
438 if ( mfn == INVALID_MFN ) return 0;
440 mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT);
441 *code = *(u64*)__va(mpa);
442 return 1;
443 }
445 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
446 {
448 thash_data_t data, *ovl;
449 thash_cb_t *hcb;
450 search_section_t sections;
451 rr_t vrr;
453 hcb = vmx_vcpu_get_vtlb(vcpu);
454 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
455 data.itir=itir;
456 data.vadr=PAGEALIGN(ifa,data.ps);
457 data.tc = 1;
458 data.cl=ISIDE_TLB;
459 vmx_vcpu_get_rr(vcpu, ifa, &vrr);
460 data.rid = vrr.rid;
462 sections.tr = 1;
463 sections.tc = 0;
465 ovl = thash_find_overlap(hcb, &data, sections);
466 while (ovl) {
467 // generate MCA.
468 panic("Tlb conflict!!");
469 return;
470 }
471 thash_purge_and_insert(hcb, &data);
472 return IA64_NO_FAULT;
473 }
478 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
479 {
481 thash_data_t data, *ovl;
482 thash_cb_t *hcb;
483 search_section_t sections;
484 rr_t vrr;
486 hcb = vmx_vcpu_get_vtlb(vcpu);
487 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
488 data.itir=itir;
489 data.vadr=PAGEALIGN(ifa,data.ps);
490 data.tc = 1;
491 data.cl=DSIDE_TLB;
492 vmx_vcpu_get_rr(vcpu, ifa, &vrr);
493 data.rid = vrr.rid;
494 sections.tr = 1;
495 sections.tc = 0;
497 ovl = thash_find_overlap(hcb, &data, sections);
498 if (ovl) {
499 // generate MCA.
500 panic("Tlb conflict!!");
501 return;
502 }
503 thash_purge_and_insert(hcb, &data);
504 return IA64_NO_FAULT;
505 }
507 /*
508 * Return TRUE/FALSE for success of lock operation
509 */
510 int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
511 {
513 thash_cb_t *hcb;
514 rr_t vrr;
515 u64 preferred_size;
517 vmx_vcpu_get_rr(vcpu, va, &vrr);
518 hcb = vmx_vcpu_get_vtlb(vcpu);
519 va = PAGEALIGN(va,vrr.ps);
520 preferred_size = PSIZE(vrr.ps);
521 return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
522 }
524 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
525 {
527 thash_data_t data, *ovl;
528 thash_cb_t *hcb;
529 search_section_t sections;
530 rr_t vrr;
532 hcb = vmx_vcpu_get_vtlb(vcpu);
533 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
534 data.itir=itir;
535 data.vadr=PAGEALIGN(ifa,data.ps);
536 data.tc = 0;
537 data.cl=ISIDE_TLB;
538 vmx_vcpu_get_rr(vcpu, ifa, &vrr);
539 data.rid = vrr.rid;
540 sections.tr = 1;
541 sections.tc = 0;
543 ovl = thash_find_overlap(hcb, &data, sections);
544 if (ovl) {
545 // generate MCA.
546 panic("Tlb conflict!!");
547 return;
548 }
549 sections.tr = 0;
550 sections.tc = 1;
551 thash_purge_entries(hcb, &data, sections);
552 thash_tr_insert(hcb, &data, ifa, idx);
553 return IA64_NO_FAULT;
554 }
556 IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
557 {
559 thash_data_t data, *ovl;
560 thash_cb_t *hcb;
561 search_section_t sections;
562 rr_t vrr;
565 hcb = vmx_vcpu_get_vtlb(vcpu);
566 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
567 data.itir=itir;
568 data.vadr=PAGEALIGN(ifa,data.ps);
569 data.tc = 0;
570 data.cl=DSIDE_TLB;
571 vmx_vcpu_get_rr(vcpu, ifa, &vrr);
572 data.rid = vrr.rid;
573 sections.tr = 1;
574 sections.tc = 0;
576 ovl = thash_find_overlap(hcb, &data, sections);
577 while (ovl) {
578 // generate MCA.
579 panic("Tlb conflict!!");
580 return;
581 }
582 sections.tr = 0;
583 sections.tc = 1;
584 thash_purge_entries(hcb, &data, sections);
585 thash_tr_insert(hcb, &data, ifa, idx);
586 return IA64_NO_FAULT;
587 }
591 IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps)
592 {
593 thash_cb_t *hcb;
594 ia64_rr rr;
595 search_section_t sections;
597 hcb = vmx_vcpu_get_vtlb(vcpu);
598 rr=vmx_vcpu_rr(vcpu,vadr);
599 sections.tr = 1;
600 sections.tc = 1;
601 thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
602 return IA64_NO_FAULT;
603 }
605 IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps)
606 {
607 thash_cb_t *hcb;
608 ia64_rr rr;
609 search_section_t sections;
610 hcb = vmx_vcpu_get_vtlb(vcpu);
611 rr=vmx_vcpu_rr(vcpu,vadr);
612 sections.tr = 1;
613 sections.tc = 1;
614 thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
615 return IA64_NO_FAULT;
616 }
618 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps)
619 {
620 thash_cb_t *hcb;
621 ia64_rr vrr;
622 search_section_t sections;
623 thash_data_t data, *ovl;
624 hcb = vmx_vcpu_get_vtlb(vcpu);
625 vrr=vmx_vcpu_rr(vcpu,vadr);
626 sections.tr = 0;
627 sections.tc = 1;
628 vadr = PAGEALIGN(vadr, ps);
630 thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
631 thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,ISIDE_TLB);
632 return IA64_NO_FAULT;
633 }
636 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
637 {
638 thash_cb_t *hcb;
639 hcb = vmx_vcpu_get_vtlb(vcpu);
640 thash_purge_all(hcb);
641 return IA64_NO_FAULT;
642 }
644 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps)
645 {
646 vmx_vcpu_ptc_l(vcpu, vadr, ps);
647 return IA64_ILLOP_FAULT;
648 }
650 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps)
651 {
652 vmx_vcpu_ptc_l(vcpu, vadr, ps);
653 return IA64_NO_FAULT;
654 }
657 IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
658 {
659 PTA vpta;
660 ia64_rr vrr;
661 u64 vhpt_offset,tmp;
662 vmx_vcpu_get_pta(vcpu, &vpta.val);
663 vrr=vmx_vcpu_rr(vcpu, vadr);
664 if(vpta.vf){
665 panic("THASH,Don't support long format VHPT");
666 *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
667 }else{
668 vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
669 *pval = (vadr&VRN_MASK)|
670 (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
671 vhpt_offset;
672 }
673 return IA64_NO_FAULT;
674 }
677 IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
678 {
679 ia64_rr vrr;
680 PTA vpta;
681 vmx_vcpu_get_pta(vcpu, &vpta.val);
682 vrr=vmx_vcpu_rr(vcpu, vadr);
683 if(vpta.vf){
684 panic("THASH,Don't support long format VHPT");
685 *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
686 }else{
687 *pval = 1;
688 }
689 return IA64_NO_FAULT;
690 }
694 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
695 {
696 thash_data_t *data;
697 thash_cb_t *hcb;
698 ia64_rr vrr;
699 ISR visr,pt_isr;
700 REGS *regs;
701 u64 vhpt_adr;
702 IA64_PSR vpsr;
703 hcb = vmx_vcpu_get_vtlb(vcpu);
704 vrr=vmx_vcpu_rr(vcpu,vadr);
705 regs=vcpu_regs(vcpu);
706 pt_isr.val=regs->cr_isr;
707 visr.val=0;
708 visr.ei=pt_isr.ei;
709 visr.ir=pt_isr.ir;
710 vpsr.val = vmx_vcpu_get_psr(vcpu);
711 if(vpsr.ic==0){
712 visr.ni=1;
713 }
714 visr.na=1;
715 data = vtlb_lookup_ex(hcb, vrr.rid, vadr, DSIDE_TLB);
716 if(data){
717 if(data->p==0){
718 visr.na=1;
719 vmx_vcpu_set_isr(vcpu,visr.val);
720 page_not_present(vcpu, vadr);
721 return IA64_FAULT;
722 }else if(data->ma == VA_MATTR_NATPAGE){
723 visr.na = 1;
724 vmx_vcpu_set_isr(vcpu, visr.val);
725 dnat_page_consumption(vcpu, vadr);
726 return IA64_FAULT;
727 }else{
728 *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1));
729 return IA64_NO_FAULT;
730 }
731 }else{
732 if(!vhpt_enabled(vcpu, vadr, NA_REF)){
733 if(vpsr.ic){
734 vmx_vcpu_set_isr(vcpu, visr.val);
735 alt_dtlb(vcpu, vadr);
736 return IA64_FAULT;
737 }
738 else{
739 nested_dtlb(vcpu);
740 return IA64_FAULT;
741 }
742 }
743 else{
744 vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
745 vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
746 data = vtlb_lookup_ex(hcb, vrr.rid, vhpt_adr, DSIDE_TLB);
747 if(data){
748 if(vpsr.ic){
749 vmx_vcpu_set_isr(vcpu, visr.val);
750 dtlb_fault(vcpu, vadr);
751 return IA64_FAULT;
752 }
753 else{
754 nested_dtlb(vcpu);
755 return IA64_FAULT;
756 }
757 }
758 else{
759 if(vpsr.ic){
760 vmx_vcpu_set_isr(vcpu, visr.val);
761 dvhpt_fault(vcpu, vadr);
762 return IA64_FAULT;
763 }
764 else{
765 nested_dtlb(vcpu);
766 return IA64_FAULT;
767 }
768 }
769 }
770 }
771 }
773 IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
774 {
775 thash_data_t *data;
776 thash_cb_t *hcb;
777 ia64_rr rr;
778 PTA vpta;
779 vmx_vcpu_get_pta(vcpu, &vpta.val);
780 if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
781 *key=1;
782 return IA64_NO_FAULT;
783 }
784 hcb = vmx_vcpu_get_vtlb(vcpu);
785 rr=vmx_vcpu_rr(vcpu,vadr);
786 data = vtlb_lookup_ex(hcb, rr.rid, vadr, DSIDE_TLB);
787 if(!data||!data->p){
788 *key=1;
789 }else{
790 *key=data->key;
791 }
792 return IA64_NO_FAULT;
793 }
795 /*
796 * [FIXME] Is there any effective way to move this routine
797 * into vmx_uaccess.h? struct exec_domain is incomplete type
798 * in that way...
799 *
800 * This is the interface to lookup virtual TLB, and then
801 * return corresponding machine address in 2nd parameter.
802 * The 3rd parameter contains how many bytes mapped by
803 * matched vTLB entry, thus to allow caller copy more once.
804 *
805 * If failed to lookup, -EFAULT is returned. Or else reutrn
806 * 0. All upper domain access utilities rely on this routine
807 * to determine the real machine address.
808 *
809 * Yes, put_user and get_user seems to somhow slow upon it.
810 * However it's the necessary steps for any vmx domain virtual
811 * address, since that's difference address space as HV's one.
812 * Later some short-circuit may be created for special case
813 */
814 long
815 __domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
816 {
817 unsigned long mpfn, gpfn, m, n = *len;
818 thash_cb_t *vtlb;
819 unsigned long end; /* end of the area mapped by current entry */
820 thash_data_t *entry;
821 struct vcpu *v = current;
822 ia64_rr vrr;
824 vtlb = vmx_vcpu_get_vtlb(v);
825 vrr = vmx_vcpu_rr(v, va);
826 entry = vtlb_lookup_ex(vtlb, vrr.rid, va, DSIDE_TLB);
827 if (entry == NULL)
828 return -EFAULT;
830 gpfn =(entry->ppn>>(PAGE_SHIFT-12));
831 gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
832 gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT));
834 mpfn = __gpfn_to_mfn(v->domain, gpfn);
835 m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
836 /* machine address may be not continuous */
837 end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
838 /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
839 /* Current entry can't map all requested area */
840 if ((m + n) > end)
841 n = end - m;
843 *ma = m;
844 *len = n;
845 return 0;
846 }