ia64/xen-unstable

view xen/arch/ia64/vmx/vmmu.c @ 17601:6df8dcf48d9d

[IA64] cleanup: VTLB_PTE_IO_BIT is not used

VTLB_PTE_IO_BIT is not used any more.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon May 12 11:24:47 2008 +0900 (2008-05-12)
parents 408fcc50fd35
children d2f7243fc571
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmmu.c: virtual memory management unit components.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
22 #include <asm/vmx_vcpu.h>
23 #include <asm/vmx_pal_vsa.h>
24 #include <xen/sched-if.h>
25 #include <asm/vhpt.h>
27 static int default_vtlb_sz = DEFAULT_VTLB_SZ;
28 static int default_vhpt_sz = DEFAULT_VHPT_SZ;
30 static void __init parse_vtlb_size(char *s)
31 {
32 int sz = parse_size_and_unit(s, NULL);
34 if (sz > 0) {
35 default_vtlb_sz = fls(sz - 1);
36 /* minimum 16KB (for tag uniqueness) */
37 if (default_vtlb_sz < 14)
38 default_vtlb_sz = 14;
39 }
40 }
42 static void __init parse_vhpt_size(char *s)
43 {
44 int sz = parse_size_and_unit(s, NULL);
45 if (sz > 0) {
46 default_vhpt_sz = fls(sz - 1);
47 default_vhpt_sz = canonicalize_vhpt_size(default_vhpt_sz);
48 }
49 }
51 custom_param("vti_vtlb_size", parse_vtlb_size);
52 custom_param("vti_vhpt_size", parse_vhpt_size);
55 static int init_domain_vhpt(struct vcpu *v)
56 {
57 int rc;
58 u64 size = v->domain->arch.hvm_domain.params[HVM_PARAM_VHPT_SIZE];
60 if (size == 0)
61 size = default_vhpt_sz;
62 else
63 size = canonicalize_vhpt_size(size);
65 rc = thash_alloc(&(v->arch.vhpt), size, "vhpt");
66 v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
67 return rc;
68 }
71 static void free_domain_vhpt(struct vcpu *v)
72 {
73 if (v->arch.vhpt.hash)
74 thash_free(&(v->arch.vhpt));
75 }
77 int init_domain_tlb(struct vcpu *v)
78 {
79 int rc;
81 rc = init_domain_vhpt(v);
82 if (rc)
83 return rc;
85 rc = thash_alloc(&(v->arch.vtlb), default_vtlb_sz, "vtlb");
86 if (rc) {
87 free_domain_vhpt(v);
88 return rc;
89 }
91 return 0;
92 }
95 void free_domain_tlb(struct vcpu *v)
96 {
97 if (v->arch.vtlb.hash)
98 thash_free(&(v->arch.vtlb));
100 free_domain_vhpt(v);
101 }
104 int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
105 {
106 ia64_rr vrr;
107 PTA vpta;
108 IA64_PSR vpsr;
110 vpsr.val = VCPU(vcpu, vpsr);
111 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
112 vpta.val = vmx_vcpu_get_pta(vcpu);
114 if ( vrr.ve & vpta.ve ) {
115 switch ( ref ) {
116 case DATA_REF:
117 case NA_REF:
118 return vpsr.dt;
119 case INST_REF:
120 return vpsr.dt && vpsr.it && vpsr.ic;
121 case RSE_REF:
122 return vpsr.dt && vpsr.rt;
124 }
125 }
126 return 0;
127 }
130 int unimplemented_gva(VCPU *vcpu,u64 vadr)
131 {
132 #if 0
133 int bit=vcpu->domain->arch.imp_va_msb;
134 u64 ladr =(vadr<<3)>>(3+bit);
135 if(!ladr||ladr==(1U<<(61-bit))-1){
136 return 0;
137 }else{
138 return 1;
139 }
140 #else
141 return 0;
142 #endif
143 }
146 /*
147 * Fetch guest bundle code.
148 * INPUT:
149 * gip: guest ip
150 * pbundle: used to return fetched bundle.
151 */
152 unsigned long
153 fetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle)
154 {
155 u64 gpip=0; // guest physical IP
156 u64 *vpa;
157 thash_data_t *tlb;
158 u64 mfn, maddr;
159 struct page_info* page;
161 again:
162 if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode
163 gpip = pa_clear_uc(gip); // clear UC bit
164 }
165 else {
166 tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);
167 // if( tlb == NULL )
168 // tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB );
169 if (tlb)
170 gpip = thash_translate(tlb, gip);
171 }
172 if( gpip){
173 mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
174 if (mfn == INVALID_MFN)
175 panic_domain(vcpu_regs(vcpu), "fetch_code: invalid memory\n");
176 maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1));
177 }else{
178 tlb = vhpt_lookup(gip);
179 if (tlb == NULL) {
180 ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
181 return IA64_RETRY;
182 }
183 maddr = thash_translate(tlb, gip);
184 mfn = maddr >> PAGE_SHIFT;
185 }
187 page = mfn_to_page(mfn);
188 if (get_page(page, vcpu->domain) == 0) {
189 if (page_get_owner(page) != vcpu->domain) {
190 // This page might be a page granted by another domain.
191 panic_domain(NULL, "domain tries to execute foreign domain "
192 "page which might be mapped by grant table.\n");
193 }
194 goto again;
195 }
196 vpa = (u64 *)__va(maddr);
198 pbundle->i64[0] = *vpa++;
199 pbundle->i64[1] = *vpa;
200 put_page(page);
201 return IA64_NO_FAULT;
202 }
204 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
205 {
206 #ifdef VTLB_DEBUG
207 int slot;
208 u64 ps, va;
209 ps = itir_ps(itir);
210 va = PAGEALIGN(ifa, ps);
211 slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
212 if (slot >=0) {
213 // generate MCA.
214 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
215 return IA64_FAULT;
216 }
217 #endif //VTLB_DEBUG
218 pte &= ~PAGE_FLAGS_RV_MASK;
219 thash_purge_and_insert(vcpu, pte, itir, ifa, ISIDE_TLB);
220 return IA64_NO_FAULT;
221 }
223 IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
224 {
225 #ifdef VTLB_DEBUG
226 int slot;
227 u64 ps, va;
228 ps = itir_ps(itir);
229 va = PAGEALIGN(ifa, ps);
230 slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
231 if (slot >=0) {
232 // generate MCA.
233 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
234 return IA64_FAULT;
235 }
236 #endif //VTLB_DEBUG
237 pte &= ~PAGE_FLAGS_RV_MASK;
238 thash_purge_and_insert(vcpu, pte, itir, ifa, DSIDE_TLB);
239 return IA64_NO_FAULT;
241 }
246 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
247 {
248 #ifdef VTLB_DEBUG
249 int index;
250 #endif
251 u64 ps, va, rid;
252 thash_data_t * p_itr;
253 ps = itir_ps(itir);
254 va = PAGEALIGN(ifa, ps);
255 #ifdef VTLB_DEBUG
256 index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
257 if (index >=0) {
258 // generate MCA.
259 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
260 return IA64_FAULT;
261 }
262 thash_purge_entries(vcpu, va, ps);
263 #endif
265 if (slot >= NITRS) {
266 panic_domain(NULL, "bad itr.i slot (%ld)", slot);
267 return IA64_FAULT;
268 }
270 pte &= ~PAGE_FLAGS_RV_MASK;
271 vcpu_get_rr(vcpu, va, &rid);
272 rid = rid& RR_RID_MASK;
273 p_itr = (thash_data_t *)&vcpu->arch.itrs[slot];
274 vmx_vcpu_set_tr(p_itr, pte, itir, va, rid);
275 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va);
276 return IA64_NO_FAULT;
277 }
280 IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
281 {
282 #ifdef VTLB_DEBUG
283 int index;
284 #endif
285 u64 gpfn;
286 u64 ps, va, rid;
287 thash_data_t * p_dtr;
289 ps = itir_ps(itir);
290 va = PAGEALIGN(ifa, ps);
291 #ifdef VTLB_DEBUG
292 index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
293 if (index>=0) {
294 // generate MCA.
295 panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
296 return IA64_FAULT;
297 }
298 #endif
300 if (slot >= NDTRS) {
301 panic_domain(NULL, "bad itr.d slot (%ld)", slot);
302 return IA64_FAULT;
303 }
305 pte &= ~PAGE_FLAGS_RV_MASK;
307 /* This is a bad workaround
308 In Linux, region 7 use 16M pagesize and is identity mapped.
309 VHPT page size is 16K in XEN. If purge VHPT while guest insert 16M,
310 it will iteratively purge VHPT 1024 times, which makes XEN/IPF very
311 slow. XEN doesn't purge VHPT
312 */
313 if (ps != _PAGE_SIZE_16M)
314 thash_purge_entries(vcpu, va, ps);
315 gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
316 vcpu_get_rr(vcpu, va, &rid);
317 rid &= RR_RID_MASK;
318 p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
319 vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
320 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
321 return IA64_NO_FAULT;
322 }
326 IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,u64 ifa, u64 ps)
327 {
328 int index;
329 u64 va;
331 va = PAGEALIGN(ifa, ps);
332 while ((index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB)) >= 0) {
333 vcpu->arch.dtrs[index].pte.p=0;
334 }
335 thash_purge_entries(vcpu, va, ps);
336 return IA64_NO_FAULT;
337 }
339 IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu, u64 ifa, u64 ps)
340 {
341 int index;
342 u64 va;
344 va = PAGEALIGN(ifa, ps);
345 while ((index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB)) >= 0) {
346 vcpu->arch.itrs[index].pte.p=0;
347 }
348 thash_purge_entries(vcpu, va, ps);
349 return IA64_NO_FAULT;
350 }
352 IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, u64 va, u64 ps)
353 {
354 va = PAGEALIGN(va, ps);
355 thash_purge_entries(vcpu, va, ps);
356 return IA64_NO_FAULT;
357 }
360 IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, u64 va)
361 {
362 thash_purge_all(vcpu);
363 return IA64_NO_FAULT;
364 }
366 IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, u64 va, u64 ps)
367 {
368 return vmx_vcpu_ptc_ga(vcpu, va, ps);
369 }
370 /*
371 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
372 {
373 vmx_vcpu_ptc_l(vcpu, va, ps);
374 return IA64_NO_FAULT;
375 }
376 */
377 struct ptc_ga_args {
378 unsigned long vadr;
379 unsigned long rid;
380 unsigned long ps;
381 struct vcpu *vcpu;
382 };
384 static void ptc_ga_remote_func (void *varg)
385 {
386 u64 oldrid, moldrid, mpta, oldpsbits, vadr, flags;
387 struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
388 VCPU *v = args->vcpu;
389 int cpu = v->processor;
391 vadr = args->vadr;
393 /* Try again if VCPU has migrated. */
394 if (cpu != current->processor)
395 return;
396 local_irq_save(flags);
397 if (!spin_trylock(&per_cpu(schedule_data, cpu).schedule_lock))
398 goto bail2;
399 if (v->processor != cpu)
400 goto bail1;
401 oldrid = VMX(v, vrr[0]);
402 VMX(v, vrr[0]) = args->rid;
403 oldpsbits = VMX(v, psbits[0]);
404 VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vadr)]);
405 moldrid = ia64_get_rr(0x0);
406 ia64_set_rr(0x0,vrrtomrr(v,args->rid));
407 mpta = ia64_get_pta();
408 ia64_set_pta(v->arch.arch_vmx.mpta&(~1));
409 ia64_srlz_d();
410 vadr = PAGEALIGN(vadr, args->ps);
411 thash_purge_entries_remote(v, vadr, args->ps);
412 VMX(v, vrr[0]) = oldrid;
413 VMX(v, psbits[0]) = oldpsbits;
414 ia64_set_rr(0x0,moldrid);
415 ia64_set_pta(mpta);
416 ia64_dv_serialize_data();
417 args->vcpu = NULL;
418 bail1:
419 spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock);
420 bail2:
421 local_irq_restore(flags);
422 }
425 IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
426 {
428 struct domain *d = vcpu->domain;
429 struct vcpu *v;
430 struct ptc_ga_args args;
431 int cpu;
433 args.vadr = va;
434 vcpu_get_rr(vcpu, va, &args.rid);
435 args.ps = ps;
436 for_each_vcpu (d, v) {
437 if (!v->is_initialised)
438 continue;
440 if (v == vcpu) {
441 vmx_vcpu_ptc_l(v, va, ps);
442 continue;
443 }
445 args.vcpu = v;
446 do {
447 cpu = v->processor;
448 if (cpu != current->processor) {
449 spin_unlock_wait(&per_cpu(schedule_data, cpu).schedule_lock);
450 /* Flush VHPT on remote processors. */
451 smp_call_function_single(cpu, &ptc_ga_remote_func,
452 &args, 0, 1);
453 } else {
454 ptc_ga_remote_func(&args);
455 }
456 } while (args.vcpu != NULL);
457 }
458 return IA64_NO_FAULT;
459 }
462 u64 vmx_vcpu_thash(VCPU *vcpu, u64 vadr)
463 {
464 PTA vpta;
465 ia64_rr vrr;
466 u64 pval;
467 u64 vhpt_offset;
468 u64 mask;
470 vpta.val = vmx_vcpu_get_pta(vcpu);
471 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
472 mask = (1UL << vpta.size) - 1;
473 if (vpta.vf) {
474 vadr = (vadr & 0x1fffffffffffffffUL) >> vrr.ps;
475 vhpt_offset = vadr ^ vrr.rid;
476 pval = (vpta.val & ~0x7fffUL) + ((vhpt_offset << 5) & mask);
477 } else {
478 vhpt_offset=((vadr >> vrr.ps) << 3) & mask;
479 pval = (vadr & VRN_MASK) |
480 (vpta.val << 3 >> (vpta.size + 3) << vpta.size) |
481 vhpt_offset;
482 }
483 return pval;
484 }
487 u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr)
488 {
489 ia64_rr vrr;
490 PTA vpta;
491 u64 pval;
492 u64 rid;
493 vpta.val = vmx_vcpu_get_pta(vcpu);
494 vcpu_get_rr(vcpu, vadr, &vrr.rrval);
495 if(vpta.vf){
496 vadr = (vadr & 0x1fffffffffffffffUL) >> vrr.ps;
497 rid = vrr.rid;
498 pval = vadr ^ (rid << 39);
499 }else{
500 pval = 1;
501 }
502 return pval;
503 }
507 IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 vadr, u64 *padr)
508 {
509 thash_data_t *data;
510 ISR visr,pt_isr;
511 REGS *regs;
512 u64 vhpt_adr, madr;
513 IA64_PSR vpsr;
515 regs = vcpu_regs(vcpu);
516 pt_isr.val = VMX(vcpu, cr_isr);
517 visr.val = 0;
518 visr.ei = pt_isr.ei;
519 visr.ir = pt_isr.ir;
520 vpsr.val = VCPU(vcpu, vpsr);
521 visr.na = 1;
523 /* First look in VTLB. */
524 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
525 if (data) {
526 if (data->p == 0) {
527 vcpu_set_isr(vcpu,visr.val);
528 data_page_not_present(vcpu, vadr);
529 return IA64_FAULT;
530 } else if (data->ma == VA_MATTR_NATPAGE) {
531 vcpu_set_isr(vcpu, visr.val);
532 dnat_page_consumption(vcpu, vadr);
533 return IA64_FAULT;
534 } else {
535 *padr = thash_translate(data, vadr);
536 return IA64_NO_FAULT;
537 }
538 }
540 /* Look in mVHPT. */
541 data = vhpt_lookup(vadr);
542 if (data) {
543 if (data->p == 0) {
544 vcpu_set_isr(vcpu,visr.val);
545 data_page_not_present(vcpu, vadr);
546 return IA64_FAULT;
547 } else if (data->ma == VA_MATTR_NATPAGE) {
548 vcpu_set_isr(vcpu, visr.val);
549 dnat_page_consumption(vcpu, vadr);
550 return IA64_FAULT;
551 } else {
552 madr = thash_translate(data, vadr);
553 *padr = __mpa_to_gpa(madr);
554 return IA64_NO_FAULT;
555 }
556 }
558 /* If VHPT is not enabled, inject fault. */
559 if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
560 if (vpsr.ic) {
561 vcpu_set_isr(vcpu, visr.val);
562 alt_dtlb(vcpu, vadr);
563 return IA64_FAULT;
564 } else {
565 nested_dtlb(vcpu);
566 return IA64_FAULT;
567 }
568 }
570 /* Get gVHPT entry. */
571 vhpt_adr = vmx_vcpu_thash(vcpu, vadr);
572 data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB);
573 if (data) {
574 /* FIXME: we should read gadr from the entry! */
575 if (vpsr.ic) {
576 vcpu_set_isr(vcpu, visr.val);
577 dtlb_fault(vcpu, vadr);
578 return IA64_FAULT;
579 } else {
580 nested_dtlb(vcpu);
581 return IA64_FAULT;
582 }
583 } else {
584 if (vpsr.ic) {
585 vcpu_set_isr(vcpu, visr.val);
586 dvhpt_fault(vcpu, vadr);
587 return IA64_FAULT;
588 } else {
589 nested_dtlb(vcpu);
590 return IA64_FAULT;
591 }
592 }
593 }
595 u64 vmx_vcpu_tak(VCPU *vcpu, u64 vadr)
596 {
597 thash_data_t *data;
598 u64 key;
600 if (unimplemented_gva(vcpu, vadr)) {
601 key = 1;
602 return key;
603 }
605 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
606 if (data) {
607 if (data->p)
608 return data->key << 8;
609 else
610 return 1;
611 }
613 data = vhpt_lookup(vadr);
614 if (data) {
615 if (data->p)
616 return data->key << 8; /* FIXME: possible mangling/masking. */
617 else
618 return 1;
619 }
621 if (!vhpt_enabled(vcpu, vadr, NA_REF))
622 return 1;
624 /* FIXME: look in the guest VHPT. */
625 return 1;
626 }