ia64/xen-unstable

view xen/arch/ia64/vmmu.c @ 5305:1bc9c3554d61

bitkeeper revision 1.1665 (42a07586HA9yxpF1JLzGs-3hmLbG5g)

More include file cleanup fixes (for CONFIG_VTI)
Also a cpumask cleanup fix
Signed-off by: Dan Magenheimer <dan.magenheimer@hp.com>
author djm@sportsman.spdomain
date Fri Jun 03 15:21:42 2005 +0000 (2005-06-03)
parents 8651a99cdc09
children bb00ea361eb8 38763dfb2287 f494f01b62a8 85fab828d6ff 649cd37aa1ab
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmmu.c: virtual memory management unit components.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
22 #include <linux/sched.h>
23 #include <linux/mm.h>
24 #include <asm/tlb.h>
25 #include <asm/gcc_intrin.h>
26 #include <asm/vcpu.h>
27 #include <xen/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/vmx_mm_def.h>
30 #include <asm/vmx.h>
31 #include <asm/hw_irq.h>
32 #include <asm/vmx_pal_vsa.h>
33 #include <asm/kregs.h>
35 /*
36 * Architecture ppn is in 4KB unit while XEN
37 * page may be different(1<<PAGE_SHIFT).
38 */
39 static inline u64 arch_ppn_to_xen_ppn(u64 appn)
40 {
41 return (appn << ARCH_PAGE_SHIFT) >> PAGE_SHIFT;
42 }
44 static inline u64 xen_ppn_to_arch_ppn(u64 xppn)
45 {
46 return (xppn << PAGE_SHIFT) >> ARCH_PAGE_SHIFT;
47 }
50 /*
51 * Get the machine page frame number in 16KB unit
52 * Input:
53 * d:
54 */
55 u64 get_mfn(domid_t domid, u64 gpfn, u64 pages)
56 {
57 struct domain *d;
58 u64 i, xen_gppn, xen_mppn, mpfn;
60 if ( domid == DOMID_SELF ) {
61 d = current->domain;
62 }
63 else {
64 d = find_domain_by_id(domid);
65 }
66 xen_gppn = arch_ppn_to_xen_ppn(gpfn);
67 xen_mppn = __gpfn_to_mfn(d, xen_gppn);
68 /*
69 for (i=0; i<pages; i++) {
70 if ( __gpfn_to_mfn(d, gpfn+i) == INVALID_MFN ) {
71 return INVALID_MFN;
72 }
73 }
74 */
75 mpfn= xen_ppn_to_arch_ppn(xen_mppn);
76 mpfn = mpfn | (((1UL <<(PAGE_SHIFT-12))-1)&gpfn);
77 return mpfn;
79 }
81 /*
82 * The VRN bits of va stand for which rr to get.
83 */
84 rr_t vmmu_get_rr(VCPU *vcpu, u64 va)
85 {
86 rr_t vrr;
87 vmx_vcpu_get_rr(vcpu, va, &vrr.value);
88 return vrr;
89 }
92 void recycle_message(thash_cb_t *hcb, u64 para)
93 {
94 printk("hcb=%p recycled with %lx\n",hcb,para);
95 }
98 /*
99 * Purge all guest TCs in logical processor.
100 * Instead of purging all LP TCs, we should only purge
101 * TCs that belong to this guest.
102 */
103 void
104 purge_machine_tc_by_domid(domid_t domid)
105 {
106 #ifndef PURGE_GUEST_TC_ONLY
107 // purge all TCs
108 struct ia64_pal_retval result;
109 u64 addr;
110 u32 count1,count2;
111 u32 stride1,stride2;
112 u32 i,j;
113 u64 psr;
116 result = ia64_pal_call_static(PAL_PTCE_INFO,0,0,0, 0);
117 if ( result.status != 0 ) {
118 panic ("PAL_PTCE_INFO failed\n");
119 }
120 addr = result.v0;
121 count1 = HIGH_32BITS(result.v1);
122 count2 = LOW_32BITS (result.v1);
123 stride1 = HIGH_32BITS(result.v2);
124 stride2 = LOW_32BITS (result.v2);
126 local_irq_save(psr);
127 for (i=0; i<count1; i++) {
128 for (j=0; j<count2; j++) {
129 ia64_ptce(addr);
130 addr += stride2;
131 }
132 addr += stride1;
133 }
134 local_irq_restore(psr);
135 #else
136 // purge all TCs belong to this guest.
137 #endif
138 }
140 static thash_cb_t *init_domain_vhpt(struct vcpu *d)
141 {
142 struct pfn_info *page;
143 void *vbase,*vcur;
144 vhpt_special *vs;
145 thash_cb_t *vhpt;
146 PTA pta_value;
148 page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
149 if ( page == NULL ) {
150 panic("No enough contiguous memory for init_domain_mm\n");
151 }
152 vbase = page_to_virt(page);
153 printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
154 memset(vbase, 0, VCPU_TLB_SIZE);
155 vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
156 vhpt = --((thash_cb_t*)vcur);
157 vhpt->ht = THASH_VHPT;
158 vhpt->vcpu = d;
159 vhpt->hash_func = machine_thash;
160 vs = --((vhpt_special *)vcur);
162 /* Setup guest pta */
163 pta_value.val = 0;
164 pta_value.ve = 1;
165 pta_value.vf = 1;
166 pta_value.size = VCPU_TLB_SHIFT - 1; /* 2M */
167 pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
168 d->arch.arch_vmx.mpta = pta_value.val;
170 vhpt->vs = vs;
171 vhpt->vs->get_mfn = get_mfn;
172 vhpt->vs->tag_func = machine_ttag;
173 vhpt->hash = vbase;
174 vhpt->hash_sz = VCPU_TLB_SIZE/2;
175 vhpt->cch_buf = (u64)vbase + vhpt->hash_sz;
176 vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
177 vhpt->recycle_notifier = recycle_message;
178 thash_init(vhpt,VCPU_TLB_SHIFT-1);
179 return vhpt;
180 }
183 thash_cb_t *init_domain_tlb(struct vcpu *d)
184 {
185 struct pfn_info *page;
186 void *vbase,*vcur;
187 tlb_special_t *ts;
188 thash_cb_t *tlb;
190 page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER);
191 if ( page == NULL ) {
192 panic("No enough contiguous memory for init_domain_mm\n");
193 }
194 vbase = page_to_virt(page);
195 printk("Allocate domain tlb at 0x%lx\n", (u64)vbase);
196 memset(vbase, 0, VCPU_TLB_SIZE);
197 vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
198 tlb = --((thash_cb_t*)vcur);
199 tlb->ht = THASH_TLB;
200 tlb->vcpu = d;
201 ts = --((tlb_special_t *)vcur);
202 tlb->ts = ts;
203 tlb->ts->vhpt = init_domain_vhpt(d);
204 tlb->hash_func = machine_thash;
205 tlb->hash = vbase;
206 tlb->hash_sz = VCPU_TLB_SIZE/2;
207 tlb->cch_buf = (u64)vbase + tlb->hash_sz;
208 tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
209 tlb->recycle_notifier = recycle_message;
210 thash_init(tlb,VCPU_TLB_SHIFT-1);
211 return tlb;
212 }
214 /* Allocate physical to machine mapping table for domN
215 * FIXME: Later this interface may be removed, if that table is provided
216 * by control panel. Dom0 has gpfn identical to mfn, which doesn't need
217 * this interface at all.
218 */
219 void
220 alloc_pmt(struct domain *d)
221 {
222 struct pfn_info *page;
224 /* Only called once */
225 ASSERT(d->arch.pmt);
227 page = alloc_domheap_pages(NULL, get_order(d->max_pages));
228 ASSERT(page);
230 d->arch.pmt = page_to_virt(page);
231 memset(d->arch.pmt, 0x55, d->max_pages * 8);
232 }
234 /*
235 * Insert guest TLB to machine TLB.
236 * data: In TLB format
237 */
238 void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
239 {
240 u64 saved_itir, saved_ifa, saved_rr;
241 u64 pages;
242 thash_data_t mtlb;
243 rr_t vrr;
244 unsigned int cl = tlb->cl;
246 mtlb.ifa = tlb->vadr;
247 mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
248 vrr = vmmu_get_rr(d,mtlb.ifa);
249 //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
250 pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
251 mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
252 mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, pages);
253 if (mtlb.ppn == INVALID_MFN)
254 panic("Machine tlb insert with invalid mfn number.\n");
256 __asm __volatile("rsm psr.ic|psr.i;; srlz.i" );
258 saved_itir = ia64_getreg(_IA64_REG_CR_ITIR);
259 saved_ifa = ia64_getreg(_IA64_REG_CR_IFA);
260 saved_rr = ia64_get_rr(mtlb.ifa);
262 ia64_setreg(_IA64_REG_CR_ITIR, mtlb.itir);
263 ia64_setreg(_IA64_REG_CR_IFA, mtlb.ifa);
264 /* Only access memory stack which is mapped by TR,
265 * after rr is switched.
266 */
267 ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.value));
268 ia64_srlz_d();
269 if ( cl == ISIDE_TLB ) {
270 ia64_itci(mtlb.page_flags);
271 ia64_srlz_i();
272 }
273 else {
274 ia64_itcd(mtlb.page_flags);
275 ia64_srlz_d();
276 }
277 ia64_set_rr(mtlb.ifa,saved_rr);
278 ia64_srlz_d();
279 ia64_setreg(_IA64_REG_CR_IFA, saved_ifa);
280 ia64_setreg(_IA64_REG_CR_ITIR, saved_itir);
281 __asm __volatile("ssm psr.ic|psr.i;; srlz.i" );
282 }
284 u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
285 {
286 u64 saved_pta, saved_rr0;
287 u64 hash_addr, tag;
288 unsigned long psr;
289 struct vcpu *v = current;
290 rr_t vrr;
293 saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
294 saved_rr0 = ia64_get_rr(0);
295 vrr.value = saved_rr0;
296 vrr.rid = rid;
297 vrr.ps = ps;
299 va = (va << 3) >> 3; // set VRN to 0.
300 // TODO: Set to enforce lazy mode
301 local_irq_save(psr);
302 ia64_setreg(_IA64_REG_CR_PTA, pta.val);
303 ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
304 ia64_srlz_d();
306 hash_addr = ia64_thash(va);
307 ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
309 ia64_set_rr(0, saved_rr0);
310 ia64_srlz_d();
311 local_irq_restore(psr);
312 return hash_addr;
313 }
315 u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps)
316 {
317 u64 saved_pta, saved_rr0;
318 u64 hash_addr, tag;
319 u64 psr;
320 struct vcpu *v = current;
321 rr_t vrr;
323 // TODO: Set to enforce lazy mode
324 saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
325 saved_rr0 = ia64_get_rr(0);
326 vrr.value = saved_rr0;
327 vrr.rid = rid;
328 vrr.ps = ps;
330 va = (va << 3) >> 3; // set VRN to 0.
331 local_irq_save(psr);
332 ia64_setreg(_IA64_REG_CR_PTA, pta.val);
333 ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
334 ia64_srlz_d();
336 tag = ia64_ttag(va);
337 ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
339 ia64_set_rr(0, saved_rr0);
340 ia64_srlz_d();
341 local_irq_restore(psr);
342 return tag;
343 }
345 /*
346 * Purge machine tlb.
347 * INPUT
348 * rr: guest rr.
349 * va: only bits 0:60 is valid
350 * size: bits format (1<<size) for the address range to purge.
351 *
352 */
353 void machine_tlb_purge(u64 rid, u64 va, u64 ps)
354 {
355 u64 saved_rr0;
356 u64 psr;
357 rr_t vrr;
359 va = (va << 3) >> 3; // set VRN to 0.
360 saved_rr0 = ia64_get_rr(0);
361 vrr.value = saved_rr0;
362 vrr.rid = rid;
363 vrr.ps = ps;
364 local_irq_save(psr);
365 ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.value) );
366 ia64_srlz_d();
367 ia64_ptcl(va, ps << 2);
368 ia64_set_rr( 0, saved_rr0 );
369 ia64_srlz_d();
370 local_irq_restore(psr);
371 }
374 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
375 {
376 ia64_rr vrr;
377 PTA vpta;
378 IA64_PSR vpsr;
380 vpsr.val = vmx_vcpu_get_psr(vcpu);
381 vrr = vmx_vcpu_rr(vcpu, vadr);
382 vmx_vcpu_get_pta(vcpu,&vpta.val);
384 if ( vrr.ve & vpta.ve ) {
385 switch ( ref ) {
386 case DATA_REF:
387 case NA_REF:
388 return vpsr.dt;
389 case INST_REF:
390 return vpsr.dt && vpsr.it && vpsr.ic;
391 case RSE_REF:
392 return vpsr.dt && vpsr.rt;
394 }
395 }
396 return 0;
397 }
400 int unimplemented_gva(VCPU *vcpu,u64 vadr)
401 {
402 int bit=vcpu->domain->arch.imp_va_msb;
403 u64 ladr =(vadr<<3)>>(3+bit);
404 if(!ladr||ladr==(1U<<(61-bit))-1){
405 return 0;
406 }else{
407 return 1;
408 }
409 }
412 /*
413 * Prefetch guest bundle code.
414 * INPUT:
415 * code: buffer pointer to hold the read data.
416 * num: number of dword (8byts) to read.
417 */
418 int
419 fetch_code(VCPU *vcpu, u64 gip, u64 *code)
420 {
421 u64 gpip; // guest physical IP
422 u64 mpa;
423 thash_data_t *tlb;
424 rr_t vrr;
425 u64 mfn;
427 if ( !(VMX_VPD(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
428 gpip = gip;
429 }
430 else {
431 vmx_vcpu_get_rr(vcpu, gip, &vrr.value);
432 tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu),
433 vrr.rid, gip, ISIDE_TLB );
434 if ( tlb == NULL ) panic("No entry found in ITLB\n");
435 gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
436 }
437 mfn = __gpfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
438 if ( mfn == INVALID_MFN ) return 0;
440 mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT);
441 *code = *(u64*)__va(mpa);
442 return 1;
443 }
445 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
446 {
448 thash_data_t data, *ovl;
449 thash_cb_t *hcb;
450 search_section_t sections;
451 rr_t vrr;
453 hcb = vmx_vcpu_get_vtlb(vcpu);
454 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
455 data.itir=itir;
456 data.vadr=PAGEALIGN(ifa,data.ps);
457 data.section=THASH_TLB_TC;
458 data.cl=ISIDE_TLB;
459 vmx_vcpu_get_rr(vcpu, ifa, &vrr);
460 data.rid = vrr.rid;
462 sections.v = THASH_SECTION_TR;
464 ovl = thash_find_overlap(hcb, &data, sections);
465 while (ovl) {
466 // generate MCA.
467 panic("Tlb conflict!!");
468 return;
469 }
470 sections.v = THASH_SECTION_TC;
471 thash_purge_entries(hcb, &data, sections);
472 thash_insert(hcb, &data, ifa);
473 return IA64_NO_FAULT;
474 }
479 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
480 {
482 thash_data_t data, *ovl;
483 thash_cb_t *hcb;
484 search_section_t sections;
485 rr_t vrr;
487 hcb = vmx_vcpu_get_vtlb(vcpu);
488 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
489 data.itir=itir;
490 data.vadr=PAGEALIGN(ifa,data.ps);
491 data.section=THASH_TLB_TC;
492 data.cl=DSIDE_TLB;
493 vmx_vcpu_get_rr(vcpu, ifa, &vrr);
494 data.rid = vrr.rid;
495 sections.v = THASH_SECTION_TR;
497 ovl = thash_find_overlap(hcb, &data, sections);
498 if (ovl) {
499 // generate MCA.
500 panic("Tlb conflict!!");
501 return;
502 }
503 sections.v = THASH_SECTION_TC;
504 thash_purge_entries(hcb, &data, sections);
505 thash_insert(hcb, &data, ifa);
506 return IA64_NO_FAULT;
507 }
509 IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va)
510 {
512 thash_data_t data, *ovl;
513 thash_cb_t *hcb;
514 search_section_t sections;
515 rr_t vrr;
517 hcb = vmx_vcpu_get_vtlb(vcpu);
518 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
519 data.itir=0;
520 data.ps = ps;
521 data.vadr=PAGEALIGN(va,ps);
522 data.section=THASH_TLB_FM;
523 data.cl=DSIDE_TLB;
524 vmx_vcpu_get_rr(vcpu, va, &vrr);
525 data.rid = vrr.rid;
526 sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM;
528 ovl = thash_find_overlap(hcb, &data, sections);
529 if (ovl) {
530 // generate MCA.
531 panic("Foreignmap Tlb conflict!!");
532 return;
533 }
534 thash_insert(hcb, &data, va);
535 return IA64_NO_FAULT;
536 }
539 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
540 {
542 thash_data_t data, *ovl;
543 thash_cb_t *hcb;
544 search_section_t sections;
545 rr_t vrr;
547 hcb = vmx_vcpu_get_vtlb(vcpu);
548 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
549 data.itir=itir;
550 data.vadr=PAGEALIGN(ifa,data.ps);
551 data.section=THASH_TLB_TR;
552 data.cl=ISIDE_TLB;
553 vmx_vcpu_get_rr(vcpu, ifa, &vrr);
554 data.rid = vrr.rid;
555 sections.v = THASH_SECTION_TR;
557 ovl = thash_find_overlap(hcb, &data, sections);
558 if (ovl) {
559 // generate MCA.
560 panic("Tlb conflict!!");
561 return;
562 }
563 sections.v=THASH_SECTION_TC;
564 thash_purge_entries(hcb, &data, sections);
565 thash_tr_insert(hcb, &data, ifa, idx);
566 return IA64_NO_FAULT;
567 }
569 IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
570 {
572 thash_data_t data, *ovl;
573 thash_cb_t *hcb;
574 search_section_t sections;
575 rr_t vrr;
578 hcb = vmx_vcpu_get_vtlb(vcpu);
579 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
580 data.itir=itir;
581 data.vadr=PAGEALIGN(ifa,data.ps);
582 data.section=THASH_TLB_TR;
583 data.cl=DSIDE_TLB;
584 vmx_vcpu_get_rr(vcpu, ifa, &vrr);
585 data.rid = vrr.rid;
586 sections.v = THASH_SECTION_TR;
588 ovl = thash_find_overlap(hcb, &data, sections);
589 while (ovl) {
590 // generate MCA.
591 panic("Tlb conflict!!");
592 return;
593 }
594 sections.v=THASH_SECTION_TC;
595 thash_purge_entries(hcb, &data, sections);
596 thash_tr_insert(hcb, &data, ifa, idx);
597 return IA64_NO_FAULT;
598 }
602 IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps)
603 {
604 thash_cb_t *hcb;
605 ia64_rr rr;
606 search_section_t sections;
608 hcb = vmx_vcpu_get_vtlb(vcpu);
609 rr=vmx_vcpu_rr(vcpu,vadr);
610 sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
611 thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
612 return IA64_NO_FAULT;
613 }
615 IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps)
616 {
617 thash_cb_t *hcb;
618 ia64_rr rr;
619 search_section_t sections;
620 hcb = vmx_vcpu_get_vtlb(vcpu);
621 rr=vmx_vcpu_rr(vcpu,vadr);
622 sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
623 thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
624 return IA64_NO_FAULT;
625 }
627 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps)
628 {
629 thash_cb_t *hcb;
630 ia64_rr vrr;
631 search_section_t sections;
632 thash_data_t data, *ovl;
633 hcb = vmx_vcpu_get_vtlb(vcpu);
634 vrr=vmx_vcpu_rr(vcpu,vadr);
635 sections.v = THASH_SECTION_TC;
636 vadr = PAGEALIGN(vadr, ps);
638 thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
639 thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,ISIDE_TLB);
640 return IA64_NO_FAULT;
641 }
644 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
645 {
646 thash_cb_t *hcb;
647 hcb = vmx_vcpu_get_vtlb(vcpu);
648 thash_purge_all(hcb);
649 return IA64_NO_FAULT;
650 }
652 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps)
653 {
654 vmx_vcpu_ptc_l(vcpu, vadr, ps);
655 return IA64_ILLOP_FAULT;
656 }
658 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps)
659 {
660 vmx_vcpu_ptc_l(vcpu, vadr, ps);
661 return IA64_NO_FAULT;
662 }
665 IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
666 {
667 PTA vpta;
668 ia64_rr vrr;
669 u64 vhpt_offset,tmp;
670 vmx_vcpu_get_pta(vcpu, &vpta.val);
671 vrr=vmx_vcpu_rr(vcpu, vadr);
672 if(vpta.vf){
673 panic("THASH,Don't support long format VHPT");
674 *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
675 }else{
676 vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
677 *pval = (vadr&VRN_MASK)|
678 (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
679 vhpt_offset;
680 }
681 return IA64_NO_FAULT;
682 }
685 IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
686 {
687 ia64_rr vrr;
688 PTA vpta;
689 vmx_vcpu_get_pta(vcpu, &vpta.val);
690 vrr=vmx_vcpu_rr(vcpu, vadr);
691 if(vpta.vf){
692 panic("THASH,Don't support long format VHPT");
693 *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
694 }else{
695 *pval = 1;
696 }
697 return IA64_NO_FAULT;
698 }
702 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
703 {
704 thash_data_t *data;
705 thash_cb_t *hcb;
706 ia64_rr vrr;
707 ISR visr,pt_isr;
708 REGS *regs;
709 u64 vhpt_adr;
710 IA64_PSR vpsr;
711 hcb = vmx_vcpu_get_vtlb(vcpu);
712 vrr=vmx_vcpu_rr(vcpu,vadr);
713 regs=vcpu_regs(vcpu);
714 pt_isr.val=regs->cr_isr;
715 visr.val=0;
716 visr.ei=pt_isr.ei;
717 visr.ir=pt_isr.ir;
718 vpsr.val = vmx_vcpu_get_psr(vcpu);
719 if(vpsr.ic==0){
720 visr.ni=1;
721 }
722 visr.na=1;
723 data = vtlb_lookup_ex(hcb, vrr.rid, vadr, DSIDE_TLB);
724 if(data){
725 if(data->p==0){
726 visr.na=1;
727 vmx_vcpu_set_isr(vcpu,visr.val);
728 page_not_present(vcpu, vadr);
729 return IA64_FAULT;
730 }else if(data->ma == VA_MATTR_NATPAGE){
731 visr.na = 1;
732 vmx_vcpu_set_isr(vcpu, visr.val);
733 dnat_page_consumption(vcpu, vadr);
734 return IA64_FAULT;
735 }else{
736 *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1));
737 return IA64_NO_FAULT;
738 }
739 }else{
740 if(!vhpt_enabled(vcpu, vadr, NA_REF)){
741 if(vpsr.ic){
742 vmx_vcpu_set_isr(vcpu, visr.val);
743 alt_dtlb(vcpu, vadr);
744 return IA64_FAULT;
745 }
746 else{
747 nested_dtlb(vcpu);
748 return IA64_FAULT;
749 }
750 }
751 else{
752 vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
753 vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
754 data = vtlb_lookup_ex(hcb, vrr.rid, vhpt_adr, DSIDE_TLB);
755 if(data){
756 if(vpsr.ic){
757 vmx_vcpu_set_isr(vcpu, visr.val);
758 dtlb_fault(vcpu, vadr);
759 return IA64_FAULT;
760 }
761 else{
762 nested_dtlb(vcpu);
763 return IA64_FAULT;
764 }
765 }
766 else{
767 if(vpsr.ic){
768 vmx_vcpu_set_isr(vcpu, visr.val);
769 dvhpt_fault(vcpu, vadr);
770 return IA64_FAULT;
771 }
772 else{
773 nested_dtlb(vcpu);
774 return IA64_FAULT;
775 }
776 }
777 }
778 }
779 }
781 IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
782 {
783 thash_data_t *data;
784 thash_cb_t *hcb;
785 ia64_rr rr;
786 PTA vpta;
787 vmx_vcpu_get_pta(vcpu, &vpta.val);
788 if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
789 *key=1;
790 return IA64_NO_FAULT;
791 }
792 hcb = vmx_vcpu_get_vtlb(vcpu);
793 rr=vmx_vcpu_rr(vcpu,vadr);
794 data = vtlb_lookup_ex(hcb, rr.rid, vadr, DSIDE_TLB);
795 if(!data||!data->p){
796 *key=1;
797 }else{
798 *key=data->key;
799 }
800 return IA64_NO_FAULT;
801 }