ia64/xen-unstable

view xen/arch/ia64/xen/vcpu.c @ 9756:14a34d811e81

[IA64] introduce P2M conversion

introduce P2M conversion functions necessary for dom0vp model.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@ldap.hp.com
date Tue Apr 25 13:06:57 2006 -0600 (2006-04-25)
parents c3972d632ff6
children 2d2ef3f4c747
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004-2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <linux/sched.h>
10 #include <public/arch-ia64.h>
11 #include <asm/ia64_int.h>
12 #include <asm/vcpu.h>
13 #include <asm/regionreg.h>
14 #include <asm/tlb.h>
15 #include <asm/processor.h>
16 #include <asm/delay.h>
17 #include <asm/vmx_vcpu.h>
18 #include <asm/vhpt.h>
19 #include <asm/tlbflush.h>
20 #include <xen/event.h>
22 /* FIXME: where these declarations should be there ? */
23 extern void getreg(unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs);
24 extern void setreg(unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs);
25 extern void getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs);
27 extern void panic_domain(struct pt_regs *, const char *, ...);
28 extern unsigned long translate_domain_mpaddr(unsigned long);
29 extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
32 typedef union {
33 struct ia64_psr ia64_psr;
34 unsigned long i64;
35 } PSR;
37 // this def for vcpu_regs won't work if kernel stack is present
38 //#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
40 #define TRUE 1
41 #define FALSE 0
42 #define IA64_PTA_SZ_BIT 2
43 #define IA64_PTA_VF_BIT 8
44 #define IA64_PTA_BASE_BIT 15
45 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
46 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
48 #define STATIC
50 #ifdef PRIVOP_ADDR_COUNT
51 struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS+1] = {
52 { "=ifa", { 0 }, { 0 }, 0 },
53 { "thash", { 0 }, { 0 }, 0 },
54 { 0, { 0 }, { 0 }, 0 }
55 };
56 extern void privop_count_addr(unsigned long addr, int inst);
57 #define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
58 #else
59 #define PRIVOP_COUNT_ADDR(x,y) do {} while (0)
60 #endif
62 unsigned long dtlb_translate_count = 0;
63 unsigned long tr_translate_count = 0;
64 unsigned long phys_translate_count = 0;
66 unsigned long vcpu_verbose = 0;
68 /**************************************************************************
69 VCPU general register access routines
70 **************************************************************************/
71 #ifdef XEN
72 UINT64
73 vcpu_get_gr(VCPU *vcpu, unsigned long reg)
74 {
75 REGS *regs = vcpu_regs(vcpu);
76 UINT64 val;
78 if (!reg) return 0;
79 getreg(reg,&val,0,regs); // FIXME: handle NATs later
80 return val;
81 }
82 IA64FAULT
83 vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val)
84 {
85 REGS *regs = vcpu_regs(vcpu);
86 int nat;
88 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
89 if (nat)
90 return IA64_NAT_CONSUMPTION_VECTOR;
91 return 0;
92 }
94 // returns:
95 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
96 // IA64_NO_FAULT otherwise
97 IA64FAULT
98 vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int nat)
99 {
100 REGS *regs = vcpu_regs(vcpu);
101 long sof = (regs->cr_ifs) & 0x7f;
103 if (!reg) return IA64_ILLOP_FAULT;
104 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
105 setreg(reg,value,nat,regs); // FIXME: handle NATs later
106 return IA64_NO_FAULT;
107 }
109 IA64FAULT
110 vcpu_get_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val)
111 {
112 REGS *regs = vcpu_regs(vcpu);
113 getfpreg(reg,val,regs); // FIXME: handle NATs later
114 return 0;
115 }
117 #else
118 // returns:
119 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
120 // IA64_NO_FAULT otherwise
121 IA64FAULT
122 vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value)
123 {
124 REGS *regs = vcpu_regs(vcpu);
125 long sof = (regs->cr_ifs) & 0x7f;
127 if (!reg) return IA64_ILLOP_FAULT;
128 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
129 setreg(reg,value,0,regs); // FIXME: handle NATs later
130 return IA64_NO_FAULT;
131 }
133 #endif
134 /**************************************************************************
135 VCPU privileged application register access routines
136 **************************************************************************/
138 void vcpu_load_kernel_regs(VCPU *vcpu)
139 {
140 ia64_set_kr(0, VCPU(vcpu, krs[0]));
141 ia64_set_kr(1, VCPU(vcpu, krs[1]));
142 ia64_set_kr(2, VCPU(vcpu, krs[2]));
143 ia64_set_kr(3, VCPU(vcpu, krs[3]));
144 ia64_set_kr(4, VCPU(vcpu, krs[4]));
145 ia64_set_kr(5, VCPU(vcpu, krs[5]));
146 ia64_set_kr(6, VCPU(vcpu, krs[6]));
147 ia64_set_kr(7, VCPU(vcpu, krs[7]));
148 }
150 /* GCC 4.0.2 seems not to be able to suppress this call!. */
151 #define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
153 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
154 {
155 if (reg == 44) return (vcpu_set_itc(vcpu,val));
156 else if (reg == 27) return (IA64_ILLOP_FAULT);
157 else if (reg == 24)
158 printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
159 else if (reg > 7) return (IA64_ILLOP_FAULT);
160 else {
161 PSCB(vcpu,krs[reg]) = val;
162 ia64_set_kr(reg,val);
163 }
164 return IA64_NO_FAULT;
165 }
167 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
168 {
169 if (reg == 24)
170 printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
171 else if (reg > 7) return (IA64_ILLOP_FAULT);
172 else *val = PSCB(vcpu,krs[reg]);
173 return IA64_NO_FAULT;
174 }
176 /**************************************************************************
177 VCPU processor status register access routines
178 **************************************************************************/
180 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
181 {
182 /* only do something if mode changes */
183 if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
184 PSCB(vcpu,metaphysical_mode) = newmode;
185 if (newmode) set_metaphysical_rr0();
186 else if (PSCB(vcpu,rrs[0]) != -1)
187 set_one_rr(0, PSCB(vcpu,rrs[0]));
188 }
189 }
191 IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
192 {
193 vcpu_set_metaphysical_mode(vcpu,TRUE);
194 return IA64_NO_FAULT;
195 }
197 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
198 {
199 struct ia64_psr psr, imm, *ipsr;
200 REGS *regs = vcpu_regs(vcpu);
202 //PRIVOP_COUNT_ADDR(regs,_RSM);
203 // TODO: All of these bits need to be virtualized
204 // TODO: Only allowed for current vcpu
205 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
206 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
207 imm = *(struct ia64_psr *)&imm24;
208 // interrupt flag
209 if (imm.i)
210 vcpu->vcpu_info->evtchn_upcall_mask = 1;
211 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
212 // interrupt collection flag
213 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
214 // just handle psr.up and psr.pp for now
215 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
216 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
217 | IA64_PSR_DFL | IA64_PSR_DFH))
218 return (IA64_ILLOP_FAULT);
219 if (imm.dfh) ipsr->dfh = 0;
220 if (imm.dfl) ipsr->dfl = 0;
221 if (imm.pp) {
222 ipsr->pp = 1;
223 psr.pp = 1; // priv perf ctrs always enabled
224 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
225 PSCB(vcpu,tmp[8]) = 0; // but fool the domain if it gets psr
226 }
227 if (imm.up) { ipsr->up = 0; psr.up = 0; }
228 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
229 if (imm.be) ipsr->be = 0;
230 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
231 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
232 return IA64_NO_FAULT;
233 }
235 #define SPURIOUS_VECTOR 0xf
237 IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
238 {
239 vcpu_set_metaphysical_mode(vcpu,FALSE);
240 return IA64_NO_FAULT;
241 }
243 IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
244 {
245 vcpu->vcpu_info->evtchn_upcall_mask = 0;
246 PSCB(vcpu,interrupt_collection_enabled) = 1;
247 return IA64_NO_FAULT;
248 }
250 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
251 {
252 struct ia64_psr psr, imm, *ipsr;
253 REGS *regs = vcpu_regs(vcpu);
254 UINT64 mask, enabling_interrupts = 0;
256 //PRIVOP_COUNT_ADDR(regs,_SSM);
257 // TODO: All of these bits need to be virtualized
258 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
259 imm = *(struct ia64_psr *)&imm24;
260 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
261 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
262 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
263 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
264 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
265 if (imm.dfh) ipsr->dfh = 1;
266 if (imm.dfl) ipsr->dfl = 1;
267 if (imm.pp) {
268 ipsr->pp = 1; psr.pp = 1;
269 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
270 PSCB(vcpu,tmp[8]) = 1;
271 }
272 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
273 if (imm.i) {
274 if (vcpu->vcpu_info->evtchn_upcall_mask) {
275 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
276 enabling_interrupts = 1;
277 }
278 vcpu->vcpu_info->evtchn_upcall_mask = 0;
279 }
280 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
281 // TODO: do this faster
282 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
283 if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
284 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
285 if (imm.up) { ipsr->up = 1; psr.up = 1; }
286 if (imm.be) {
287 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
288 return (IA64_ILLOP_FAULT);
289 }
290 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
291 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
292 if (enabling_interrupts &&
293 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
294 PSCB(vcpu,pending_interruption) = 1;
295 return IA64_NO_FAULT;
296 }
298 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
299 {
300 struct ia64_psr psr, newpsr, *ipsr;
301 REGS *regs = vcpu_regs(vcpu);
302 UINT64 enabling_interrupts = 0;
304 // TODO: All of these bits need to be virtualized
305 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
306 newpsr = *(struct ia64_psr *)&val;
307 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
308 // just handle psr.up and psr.pp for now
309 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
310 // however trying to set other bits can't be an error as it is in ssm
311 if (newpsr.dfh) ipsr->dfh = 1;
312 if (newpsr.dfl) ipsr->dfl = 1;
313 if (newpsr.pp) {
314 ipsr->pp = 1; psr.pp = 1;
315 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
316 PSCB(vcpu,tmp[8]) = 1;
317 }
318 else {
319 ipsr->pp = 1; psr.pp = 1;
320 PSCB(vcpu,tmp[8]) = 0;
321 }
322 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
323 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
324 if (newpsr.i) {
325 if (vcpu->vcpu_info->evtchn_upcall_mask)
326 enabling_interrupts = 1;
327 vcpu->vcpu_info->evtchn_upcall_mask = 0;
328 }
329 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
330 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
331 if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
332 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
333 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
334 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
335 else vcpu_set_metaphysical_mode(vcpu,TRUE);
336 if (newpsr.be) {
337 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
338 return (IA64_ILLOP_FAULT);
339 }
340 if (enabling_interrupts &&
341 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
342 PSCB(vcpu,pending_interruption) = 1;
343 return IA64_NO_FAULT;
344 }
346 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
347 {
348 REGS *regs = vcpu_regs(vcpu);
349 struct ia64_psr newpsr;
351 newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
352 if (newpsr.cpl == 2) newpsr.cpl = 0;
353 if (!vcpu->vcpu_info->evtchn_upcall_mask) newpsr.i = 1;
354 else newpsr.i = 0;
355 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
356 else newpsr.ic = 0;
357 if (PSCB(vcpu,metaphysical_mode)) newpsr.dt = 0;
358 else newpsr.dt = 1;
359 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
360 if (PSCB(vcpu,tmp[8])) newpsr.pp = 1;
361 else newpsr.pp = 0;
362 *pval = *(unsigned long *)&newpsr;
363 return IA64_NO_FAULT;
364 }
366 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
367 {
368 return !!PSCB(vcpu,interrupt_collection_enabled);
369 }
371 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
372 {
373 return !vcpu->vcpu_info->evtchn_upcall_mask;
374 }
376 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
377 {
378 UINT64 dcr = PSCBX(vcpu,dcr);
379 PSR psr;
381 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
382 psr.i64 = prevpsr;
383 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
384 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
385 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
386 psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
387 psr.ia64_psr.bn = PSCB(vcpu,banknum);
388 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
389 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
390 // psr.pk = 1;
391 //printf("returns 0x%016lx...",psr.i64);
392 return psr.i64;
393 }
395 /**************************************************************************
396 VCPU control register access routines
397 **************************************************************************/
399 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
400 {
401 //extern unsigned long privop_trace;
402 //privop_trace=0;
403 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
404 // Reads of cr.dcr on Xen always have the sign bit set, so
405 // a domain can differentiate whether it is running on SP or not
406 *pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
407 return (IA64_NO_FAULT);
408 }
410 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
411 {
412 if(VMX_DOMAIN(vcpu)){
413 *pval = PSCB(vcpu,iva) & ~0x7fffL;
414 }else{
415 *pval = PSCBX(vcpu,iva) & ~0x7fffL;
416 }
417 return (IA64_NO_FAULT);
418 }
420 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
421 {
422 *pval = PSCB(vcpu,pta);
423 return (IA64_NO_FAULT);
424 }
426 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
427 {
428 //REGS *regs = vcpu_regs(vcpu);
429 //*pval = regs->cr_ipsr;
430 *pval = PSCB(vcpu,ipsr);
431 return (IA64_NO_FAULT);
432 }
434 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
435 {
436 *pval = PSCB(vcpu,isr);
437 return (IA64_NO_FAULT);
438 }
440 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
441 {
442 //REGS *regs = vcpu_regs(vcpu);
443 //*pval = regs->cr_iip;
444 *pval = PSCB(vcpu,iip);
445 return (IA64_NO_FAULT);
446 }
448 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
449 {
450 UINT64 val = PSCB(vcpu,ifa);
451 REGS *regs = vcpu_regs(vcpu);
452 PRIVOP_COUNT_ADDR(regs,_GET_IFA);
453 *pval = val;
454 return (IA64_NO_FAULT);
455 }
457 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
458 {
459 ia64_rr rr;
461 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
462 return(rr.ps);
463 }
465 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
466 {
467 ia64_rr rr;
469 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
470 return(rr.rid);
471 }
473 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
474 {
475 ia64_rr rr;
477 rr.rrval = 0;
478 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
479 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
480 return (rr.rrval);
481 }
484 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
485 {
486 UINT64 val = PSCB(vcpu,itir);
487 *pval = val;
488 return (IA64_NO_FAULT);
489 }
491 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
492 {
493 UINT64 val = PSCB(vcpu,iipa);
494 // SP entry code does not save iipa yet nor does it get
495 // properly delivered in the pscb
496 // printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
497 *pval = val;
498 return (IA64_NO_FAULT);
499 }
501 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
502 {
503 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
504 //*pval = PSCB(vcpu,regs).cr_ifs;
505 *pval = PSCB(vcpu,ifs);
506 PSCB(vcpu,incomplete_regframe) = 0;
507 return (IA64_NO_FAULT);
508 }
510 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
511 {
512 UINT64 val = PSCB(vcpu,iim);
513 *pval = val;
514 return (IA64_NO_FAULT);
515 }
517 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
518 {
519 //return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
520 UINT64 val = PSCB(vcpu,iha);
521 REGS *regs = vcpu_regs(vcpu);
522 PRIVOP_COUNT_ADDR(regs,_THASH);
523 *pval = val;
524 return (IA64_NO_FAULT);
525 }
527 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
528 {
529 //extern unsigned long privop_trace;
530 //privop_trace=1;
531 // Reads of cr.dcr on SP always have the sign bit set, so
532 // a domain can differentiate whether it is running on SP or not
533 // Thus, writes of DCR should ignore the sign bit
534 //verbose("vcpu_set_dcr: called\n");
535 PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
536 return (IA64_NO_FAULT);
537 }
539 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
540 {
541 if(VMX_DOMAIN(vcpu)){
542 PSCB(vcpu,iva) = val & ~0x7fffL;
543 }else{
544 PSCBX(vcpu,iva) = val & ~0x7fffL;
545 }
546 return (IA64_NO_FAULT);
547 }
549 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
550 {
551 if (val & IA64_PTA_LFMT) {
552 printf("*** No support for VHPT long format yet!!\n");
553 return (IA64_ILLOP_FAULT);
554 }
555 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
556 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
557 PSCB(vcpu,pta) = val;
558 return IA64_NO_FAULT;
559 }
561 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
562 {
563 PSCB(vcpu,ipsr) = val;
564 return IA64_NO_FAULT;
565 }
567 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
568 {
569 PSCB(vcpu,isr) = val;
570 return IA64_NO_FAULT;
571 }
573 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
574 {
575 PSCB(vcpu,iip) = val;
576 return IA64_NO_FAULT;
577 }
579 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
580 {
581 REGS *regs = vcpu_regs(vcpu);
582 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
583 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
584 else ipsr->ri++;
585 return (IA64_NO_FAULT);
586 }
588 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
589 {
590 PSCB(vcpu,ifa) = val;
591 return IA64_NO_FAULT;
592 }
594 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
595 {
596 PSCB(vcpu,itir) = val;
597 return IA64_NO_FAULT;
598 }
600 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
601 {
602 // SP entry code does not save iipa yet nor does it get
603 // properly delivered in the pscb
604 // printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
605 PSCB(vcpu,iipa) = val;
606 return IA64_NO_FAULT;
607 }
609 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
610 {
611 //REGS *regs = vcpu_regs(vcpu);
612 PSCB(vcpu,ifs) = val;
613 return IA64_NO_FAULT;
614 }
616 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
617 {
618 PSCB(vcpu,iim) = val;
619 return IA64_NO_FAULT;
620 }
622 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
623 {
624 PSCB(vcpu,iha) = val;
625 return IA64_NO_FAULT;
626 }
628 /**************************************************************************
629 VCPU interrupt control register access routines
630 **************************************************************************/
632 void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
633 {
634 PSCB(vcpu,pending_interruption) = 1;
635 }
637 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
638 {
639 if (vector & ~0xff) {
640 printf("vcpu_pend_interrupt: bad vector\n");
641 return;
642 }
643 if ( VMX_DOMAIN(vcpu) ) {
644 set_bit(vector,VCPU(vcpu,irr));
645 } else
646 {
647 if (test_bit(vector,PSCBX(vcpu,irr))) {
648 //printf("vcpu_pend_interrupt: overrun\n");
649 }
650 set_bit(vector,PSCBX(vcpu,irr));
651 PSCB(vcpu,pending_interruption) = 1;
652 }
653 }
655 #define IA64_TPR_MMI 0x10000
656 #define IA64_TPR_MIC 0x000f0
658 /* checks to see if a VCPU has any unmasked pending interrupts
659 * if so, returns the highest, else returns SPURIOUS_VECTOR */
660 /* NOTE: Since this gets called from vcpu_get_ivr() and the
661 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
662 * this routine also ignores pscb.interrupt_delivery_enabled
663 * and this must be checked independently; see vcpu_deliverable interrupts() */
664 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
665 {
666 UINT64 *p, *r, bits, bitnum, mask, i, vector;
668 /* Always check pending event, since guest may just ack the
669 * event injection without handle. Later guest may throw out
670 * the event itself.
671 */
672 check_start:
673 if (event_pending(vcpu) &&
674 !test_bit(vcpu->vcpu_info->arch.evtchn_vector,
675 &PSCBX(vcpu, insvc[0])))
676 vcpu_pend_interrupt(vcpu, vcpu->vcpu_info->arch.evtchn_vector);
678 p = &PSCBX(vcpu,irr[3]);
679 r = &PSCBX(vcpu,insvc[3]);
680 for (i = 3; ; p--, r--, i--) {
681 bits = *p ;
682 if (bits) break; // got a potential interrupt
683 if (*r) {
684 // nothing in this word which is pending+inservice
685 // but there is one inservice which masks lower
686 return SPURIOUS_VECTOR;
687 }
688 if (i == 0) {
689 // checked all bits... nothing pending+inservice
690 return SPURIOUS_VECTOR;
691 }
692 }
693 // have a pending,deliverable interrupt... see if it is masked
694 bitnum = ia64_fls(bits);
695 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
696 vector = bitnum+(i*64);
697 mask = 1L << bitnum;
698 /* sanity check for guest timer interrupt */
699 if (vector == (PSCB(vcpu,itv) & 0xff)) {
700 uint64_t now = ia64_get_itc();
701 if (now < PSCBX(vcpu,domain_itm)) {
702 // printk("Ooops, pending guest timer before its due\n");
703 PSCBX(vcpu,irr[i]) &= ~mask;
704 goto check_start;
705 }
706 }
707 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
708 if (*r >= mask) {
709 // masked by equal inservice
710 //printf("but masked by equal inservice\n");
711 return SPURIOUS_VECTOR;
712 }
713 if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
714 // tpr.mmi is set
715 //printf("but masked by tpr.mmi\n");
716 return SPURIOUS_VECTOR;
717 }
718 if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
719 //tpr.mic masks class
720 //printf("but masked by tpr.mic\n");
721 return SPURIOUS_VECTOR;
722 }
724 //printf("returned to caller\n");
725 return vector;
726 }
728 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
729 {
730 return (vcpu_get_psr_i(vcpu) &&
731 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
732 }
734 UINT64 vcpu_deliverable_timer(VCPU *vcpu)
735 {
736 return (vcpu_get_psr_i(vcpu) &&
737 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
738 }
740 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
741 {
742 /* Use real LID for domain0 until vIOSAPIC is present.
743 Use EID=0, ID=vcpu_id for domU. */
744 if (vcpu->domain == dom0)
745 *pval = ia64_getreg(_IA64_REG_CR_LID);
746 else
747 *pval = vcpu->vcpu_id << 24;
748 return IA64_NO_FAULT;
749 }
751 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
752 {
753 int i;
754 UINT64 vector, mask;
756 #define HEARTBEAT_FREQ 16 // period in seconds
757 #ifdef HEARTBEAT_FREQ
758 #define N_DOMS 16 // period in seconds
759 #if 0
760 static long count[N_DOMS] = { 0 };
761 #endif
762 static long nonclockcount[N_DOMS] = { 0 };
763 unsigned domid = vcpu->domain->domain_id;
764 #endif
765 #ifdef IRQ_DEBUG
766 static char firstivr = 1;
767 static char firsttime[256];
768 if (firstivr) {
769 int i;
770 for (i=0;i<256;i++) firsttime[i]=1;
771 firstivr=0;
772 }
773 #endif
775 vector = vcpu_check_pending_interrupts(vcpu);
776 if (vector == SPURIOUS_VECTOR) {
777 PSCB(vcpu,pending_interruption) = 0;
778 *pval = vector;
779 return IA64_NO_FAULT;
780 }
781 #ifdef HEARTBEAT_FREQ
782 if (domid >= N_DOMS) domid = N_DOMS-1;
783 #if 0
784 if (vector == (PSCB(vcpu,itv) & 0xff)) {
785 if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
786 printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
787 domid, count[domid], nonclockcount[domid]);
788 //count[domid] = 0;
789 //dump_runq();
790 }
791 }
792 #endif
793 else nonclockcount[domid]++;
794 #endif
795 // now have an unmasked, pending, deliverable vector!
796 // getting ivr has "side effects"
797 #ifdef IRQ_DEBUG
798 if (firsttime[vector]) {
799 printf("*** First get_ivr on vector=%lu,itc=%lx\n",
800 vector,ia64_get_itc());
801 firsttime[vector]=0;
802 }
803 #endif
804 /* if delivering a timer interrupt, remember domain_itm, which
805 * needs to be done before clearing irr
806 */
807 if (vector == (PSCB(vcpu,itv) & 0xff)) {
808 PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
809 }
811 i = vector >> 6;
812 mask = 1L << (vector & 0x3f);
813 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
814 PSCBX(vcpu,insvc[i]) |= mask;
815 PSCBX(vcpu,irr[i]) &= ~mask;
816 //PSCB(vcpu,pending_interruption)--;
817 *pval = vector;
818 return IA64_NO_FAULT;
819 }
821 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
822 {
823 *pval = PSCB(vcpu,tpr);
824 return (IA64_NO_FAULT);
825 }
827 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
828 {
829 *pval = 0L; // reads of eoi always return 0
830 return (IA64_NO_FAULT);
831 }
833 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
834 {
835 *pval = PSCBX(vcpu, irr[0]);
836 return (IA64_NO_FAULT);
837 }
839 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
840 {
841 *pval = PSCBX(vcpu, irr[1]);
842 return (IA64_NO_FAULT);
843 }
845 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
846 {
847 *pval = PSCBX(vcpu, irr[2]);
848 return (IA64_NO_FAULT);
849 }
851 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
852 {
853 *pval = PSCBX(vcpu, irr[3]);
854 return (IA64_NO_FAULT);
855 }
857 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
858 {
859 *pval = PSCB(vcpu,itv);
860 return (IA64_NO_FAULT);
861 }
863 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
864 {
865 *pval = PSCB(vcpu,pmv);
866 return (IA64_NO_FAULT);
867 }
869 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
870 {
871 *pval = PSCB(vcpu,cmcv);
872 return (IA64_NO_FAULT);
873 }
875 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
876 {
877 // fix this when setting values other than m-bit is supported
878 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
879 *pval = (1L << 16);
880 return (IA64_NO_FAULT);
881 }
883 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
884 {
885 // fix this when setting values other than m-bit is supported
886 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
887 *pval = (1L << 16);
888 return (IA64_NO_FAULT);
889 }
891 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
892 {
893 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
894 return (IA64_ILLOP_FAULT);
895 }
897 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
898 {
899 if (val & 0xff00) return IA64_RSVDREG_FAULT;
900 PSCB(vcpu,tpr) = val;
901 /* This can unmask interrupts. */
902 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
903 PSCB(vcpu,pending_interruption) = 1;
904 return (IA64_NO_FAULT);
905 }
907 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
908 {
909 UINT64 *p, bits, vec, bitnum;
910 int i;
912 p = &PSCBX(vcpu,insvc[3]);
913 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
914 if (i < 0) {
915 printf("Trying to EOI interrupt when none are in-service.\n");
916 return IA64_NO_FAULT;
917 }
918 bitnum = ia64_fls(bits);
919 vec = bitnum + (i*64);
920 /* clear the correct bit */
921 bits &= ~(1L << bitnum);
922 *p = bits;
923 /* clearing an eoi bit may unmask another pending interrupt... */
924 if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
925 // worry about this later... Linux only calls eoi
926 // with interrupts disabled
927 printf("Trying to EOI interrupt with interrupts enabled\n");
928 }
929 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
930 PSCB(vcpu,pending_interruption) = 1;
931 //printf("YYYYY vcpu_set_eoi: Successful\n");
932 return (IA64_NO_FAULT);
933 }
935 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
936 {
937 if (!(val & (1L << 16))) {
938 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
939 return (IA64_ILLOP_FAULT);
940 }
941 // no place to save this state but nothing to do anyway
942 return (IA64_NO_FAULT);
943 }
945 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
946 {
947 if (!(val & (1L << 16))) {
948 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
949 return (IA64_ILLOP_FAULT);
950 }
951 // no place to save this state but nothing to do anyway
952 return (IA64_NO_FAULT);
953 }
955 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
956 {
957 //extern unsigned long privop_trace;
958 //privop_trace=1;
959 if (val & 0xef00) return (IA64_ILLOP_FAULT);
960 PSCB(vcpu,itv) = val;
961 if (val & 0x10000) {
962 printf("**** vcpu_set_itv(%lu): vitm=%lx, setting to 0\n",
963 val,PSCBX(vcpu,domain_itm));
964 PSCBX(vcpu,domain_itm) = 0;
965 }
966 else vcpu_set_next_timer(vcpu);
967 return (IA64_NO_FAULT);
968 }
970 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
971 {
972 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
973 PSCB(vcpu,pmv) = val;
974 return (IA64_NO_FAULT);
975 }
977 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
978 {
979 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
980 PSCB(vcpu,cmcv) = val;
981 return (IA64_NO_FAULT);
982 }
984 /**************************************************************************
985 VCPU temporary register access routines
986 **************************************************************************/
987 UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
988 {
989 if (index > 7) return 0;
990 return PSCB(vcpu,tmp[index]);
991 }
993 void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
994 {
995 if (index <= 7) PSCB(vcpu,tmp[index]) = val;
996 }
998 /**************************************************************************
999 Interval timer routines
1000 **************************************************************************/
1002 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
1004 UINT64 itv = PSCB(vcpu,itv);
1005 return(!itv || !!(itv & 0x10000));
1008 BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
1010 UINT64 itv = PSCB(vcpu,itv);
1011 return (test_bit(itv, PSCBX(vcpu,insvc)));
1014 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
1016 unsigned long domain_itm = PSCBX(vcpu,domain_itm);
1017 unsigned long now = ia64_get_itc();
1019 if (!domain_itm) return FALSE;
1020 if (now < domain_itm) return FALSE;
1021 if (vcpu_timer_disabled(vcpu)) return FALSE;
1022 return TRUE;
1025 void vcpu_safe_set_itm(unsigned long val)
1027 unsigned long epsilon = 100;
1028 unsigned long flags;
1029 UINT64 now = ia64_get_itc();
1031 local_irq_save(flags);
1032 while (1) {
1033 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
1034 ia64_set_itm(val);
1035 if (val > (now = ia64_get_itc())) break;
1036 val = now + epsilon;
1037 epsilon <<= 1;
1039 local_irq_restore(flags);
1042 void vcpu_set_next_timer(VCPU *vcpu)
1044 UINT64 d = PSCBX(vcpu,domain_itm);
1045 //UINT64 s = PSCBX(vcpu,xen_itm);
1046 UINT64 s = local_cpu_data->itm_next;
1047 UINT64 now = ia64_get_itc();
1049 /* gloss over the wraparound problem for now... we know it exists
1050 * but it doesn't matter right now */
1052 if (is_idle_domain(vcpu->domain)) {
1053 // printf("****** vcpu_set_next_timer called during idle!!\n");
1054 vcpu_safe_set_itm(s);
1055 return;
1057 //s = PSCBX(vcpu,xen_itm);
1058 if (d && (d > now) && (d < s)) {
1059 vcpu_safe_set_itm(d);
1060 //using_domain_as_itm++;
1062 else {
1063 vcpu_safe_set_itm(s);
1064 //using_xen_as_itm++;
1068 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
1070 //UINT now = ia64_get_itc();
1072 //if (val < now) val = now + 1000;
1073 //printf("*** vcpu_set_itm: called with %lx\n",val);
1074 PSCBX(vcpu,domain_itm) = val;
1075 vcpu_set_next_timer(vcpu);
1076 return (IA64_NO_FAULT);
1079 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
1081 #define DISALLOW_SETTING_ITC_FOR_NOW
1082 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
1083 printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
1084 #else
1085 UINT64 oldnow = ia64_get_itc();
1086 UINT64 olditm = PSCBX(vcpu,domain_itm);
1087 unsigned long d = olditm - oldnow;
1088 unsigned long x = local_cpu_data->itm_next - oldnow;
1090 UINT64 newnow = val, min_delta;
1092 local_irq_disable();
1093 if (olditm) {
1094 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
1095 PSCBX(vcpu,domain_itm) = newnow + d;
1097 local_cpu_data->itm_next = newnow + x;
1098 d = PSCBX(vcpu,domain_itm);
1099 x = local_cpu_data->itm_next;
1101 ia64_set_itc(newnow);
1102 if (d && (d > newnow) && (d < x)) {
1103 vcpu_safe_set_itm(d);
1104 //using_domain_as_itm++;
1106 else {
1107 vcpu_safe_set_itm(x);
1108 //using_xen_as_itm++;
1110 local_irq_enable();
1111 #endif
1112 return (IA64_NO_FAULT);
1115 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
1117 //FIXME: Implement this
1118 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
1119 return (IA64_NO_FAULT);
1120 //return (IA64_ILLOP_FAULT);
1123 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
1125 //TODO: Implement this
1126 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
1127 return (IA64_ILLOP_FAULT);
1130 void vcpu_pend_timer(VCPU *vcpu)
1132 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1134 if (vcpu_timer_disabled(vcpu)) return;
1135 //if (vcpu_timer_inservice(vcpu)) return;
1136 if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
1137 // already delivered an interrupt for this so
1138 // don't deliver another
1139 return;
1141 vcpu_pend_interrupt(vcpu, itv);
1144 // returns true if ready to deliver a timer interrupt too early
1145 UINT64 vcpu_timer_pending_early(VCPU *vcpu)
1147 UINT64 now = ia64_get_itc();
1148 UINT64 itm = PSCBX(vcpu,domain_itm);
1150 if (vcpu_timer_disabled(vcpu)) return 0;
1151 if (!itm) return 0;
1152 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1155 /**************************************************************************
1156 Privileged operation emulation routines
1157 **************************************************************************/
1159 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
1161 PSCB(vcpu,ifa) = ifa;
1162 PSCB(vcpu,itir) = vcpu_get_itir_on_fault(vcpu,ifa);
1163 vcpu_thash(current, ifa, &PSCB(current,iha));
1164 return (vcpu_get_rr_ve(vcpu,ifa) ? IA64_DATA_TLB_VECTOR : IA64_ALT_DATA_TLB_VECTOR);
1168 IA64FAULT vcpu_rfi(VCPU *vcpu)
1170 // TODO: Only allowed for current vcpu
1171 PSR psr;
1172 UINT64 int_enable, regspsr = 0;
1173 UINT64 ifs;
1174 REGS *regs = vcpu_regs(vcpu);
1175 extern void dorfirfi(void);
1177 psr.i64 = PSCB(vcpu,ipsr);
1178 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
1179 int_enable = psr.ia64_psr.i;
1180 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
1181 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
1182 else vcpu_set_metaphysical_mode(vcpu,TRUE);
1183 psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
1184 psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
1185 psr.ia64_psr.bn = 1;
1186 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
1187 if (psr.ia64_psr.be) {
1188 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
1189 return (IA64_ILLOP_FAULT);
1191 PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
1192 ifs = PSCB(vcpu,ifs);
1193 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1194 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1195 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
1196 // TODO: validate PSCB(vcpu,iip)
1197 // TODO: PSCB(vcpu,ipsr) = psr;
1198 PSCB(vcpu,ipsr) = psr.i64;
1199 // now set up the trampoline
1200 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
1201 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
1202 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
1204 else {
1205 regs->cr_ipsr = psr.i64;
1206 regs->cr_iip = PSCB(vcpu,iip);
1208 PSCB(vcpu,interrupt_collection_enabled) = 1;
1209 vcpu_bsw1(vcpu);
1210 vcpu->vcpu_info->evtchn_upcall_mask = !int_enable;
1211 return (IA64_NO_FAULT);
1214 IA64FAULT vcpu_cover(VCPU *vcpu)
1216 // TODO: Only allowed for current vcpu
1217 REGS *regs = vcpu_regs(vcpu);
1219 if (!PSCB(vcpu,interrupt_collection_enabled)) {
1220 if (!PSCB(vcpu,incomplete_regframe))
1221 PSCB(vcpu,ifs) = regs->cr_ifs;
1222 else PSCB(vcpu,incomplete_regframe) = 0;
1224 regs->cr_ifs = 0;
1225 return (IA64_NO_FAULT);
1228 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
1230 UINT64 pta = PSCB(vcpu,pta);
1231 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1232 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
1233 UINT64 Mask = (1L << pta_sz) - 1;
1234 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1235 UINT64 compMask_60_15 = ~Mask_60_15;
1236 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
1237 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
1238 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
1239 UINT64 VHPT_addr2a =
1240 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1241 UINT64 VHPT_addr2b =
1242 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
1243 UINT64 VHPT_addr3 = VHPT_offset & 0x7fff;
1244 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1245 VHPT_addr3;
1247 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1248 *pval = VHPT_addr;
1249 return (IA64_NO_FAULT);
1252 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1254 printf("vcpu_ttag: ttag instruction unsupported\n");
1255 return (IA64_ILLOP_FAULT);
1258 unsigned long vhpt_translate_count = 0;
1259 unsigned long fast_vhpt_translate_count = 0;
1260 unsigned long recover_to_page_fault_count = 0;
1261 unsigned long recover_to_break_fault_count = 0;
1263 int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
1265 // FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
1266 static inline int vcpu_match_tr_entry_no_p(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
1268 return trp->rid == rid
1269 && ifa >= trp->vadr
1270 && ifa <= (trp->vadr + (1L << trp->ps) - 1);
1273 static inline int vcpu_match_tr_entry(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
1275 return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
1278 // in_tpa is not used when CONFIG_XEN_IA64_DOM0_VP
1279 IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN in_tpa, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
1281 unsigned long region = address >> 61;
1282 unsigned long pta, rid, rr;
1283 union pte_flags pte;
1284 int i;
1285 TR_ENTRY *trp;
1287 if (PSCB(vcpu,metaphysical_mode) && !(!is_data && region)) {
1288 // dom0 may generate an uncacheable physical address (msb=1)
1289 if (region && ((region != 4) || (vcpu->domain != dom0))) {
1290 // FIXME: This seems to happen even though it shouldn't. Need to track
1291 // this down, but since it has been apparently harmless, just flag it for now
1292 // panic_domain(vcpu_regs(vcpu),
1294 /*
1295 * Guest may execute itc.d and rfi with psr.dt=0
1296 * When VMM try to fetch opcode, tlb miss may happen,
1297 * At this time PSCB(vcpu,metaphysical_mode)=1,
1298 * region=5,VMM need to handle this tlb miss as if
1299 * PSCB(vcpu,metaphysical_mode)=0
1300 */
1301 printk("vcpu_translate: bad physical address: 0x%lx\n",
1302 address);
1304 } else {
1305 *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS |
1306 _PAGE_PL_2 | _PAGE_AR_RWX;
1307 *itir = PAGE_SHIFT << 2;
1308 phys_translate_count++;
1309 return IA64_NO_FAULT;
1312 else if (!region && warn_region0_address) {
1313 REGS *regs = vcpu_regs(vcpu);
1314 unsigned long viip = PSCB(vcpu,iip);
1315 unsigned long vipsr = PSCB(vcpu,ipsr);
1316 unsigned long iip = regs->cr_iip;
1317 unsigned long ipsr = regs->cr_ipsr;
1318 printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
1319 address, viip, vipsr, iip, ipsr);
1322 rr = PSCB(vcpu,rrs)[region];
1323 rid = rr & RR_RID_MASK;
1324 if (is_data) {
1325 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,address)) {
1326 for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++) {
1327 if (vcpu_match_tr_entry(trp,address,rid)) {
1328 *pteval = trp->pte.val;
1329 *itir = trp->itir;
1330 tr_translate_count++;
1331 return IA64_NO_FAULT;
1336 // FIXME?: check itr's for data accesses too, else bad things happen?
1337 /* else */ {
1338 if (vcpu_quick_region_check(vcpu->arch.itr_regions,address)) {
1339 for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++) {
1340 if (vcpu_match_tr_entry(trp,address,rid)) {
1341 *pteval = trp->pte.val;
1342 *itir = trp->itir;
1343 tr_translate_count++;
1344 return IA64_NO_FAULT;
1350 /* check 1-entry TLB */
1351 // FIXME?: check dtlb for inst accesses too, else bad things happen?
1352 trp = &vcpu->arch.dtlb;
1353 pte = trp->pte;
1354 if (/* is_data && */ pte.p
1355 && vcpu_match_tr_entry_no_p(trp,address,rid)) {
1356 #ifndef CONFIG_XEN_IA64_DOM0_VP
1357 if (vcpu->domain==dom0 && !in_tpa)
1358 *pteval = pte.val;
1359 else
1360 #endif
1361 *pteval = vcpu->arch.dtlb_pte;
1362 *itir = trp->itir;
1363 dtlb_translate_count++;
1364 return IA64_USE_TLB;
1367 /* check guest VHPT */
1368 pta = PSCB(vcpu,pta);
1369 if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
1370 panic_domain(vcpu_regs(vcpu),"can't do long format VHPT\n");
1371 //return (is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR);
1374 *itir = rr & (RR_RID_MASK | RR_PS_MASK);
1375 // note: architecturally, iha is optionally set for alt faults but
1376 // xenlinux depends on it so should document it as part of PV interface
1377 vcpu_thash(vcpu, address, iha);
1378 if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE))
1379 return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR);
1381 /* avoid recursively walking (short format) VHPT */
1382 if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
1383 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1385 if (!__access_ok (*iha)
1386 || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
1387 // virtual VHPT walker "missed" in TLB
1388 return IA64_VHPT_FAULT;
1390 /*
1391 * Optimisation: this VHPT walker aborts on not-present pages
1392 * instead of inserting a not-present translation, this allows
1393 * vectoring directly to the miss handler.
1394 */
1395 if (!pte.p)
1396 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1398 /* found mapping in guest VHPT! */
1399 *itir = rr & RR_PS_MASK;
1400 *pteval = pte.val;
1401 vhpt_translate_count++;
1402 return IA64_NO_FAULT;
1405 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1407 UINT64 pteval, itir, mask, iha;
1408 IA64FAULT fault;
1410 fault = vcpu_translate(vcpu, vadr, TRUE, TRUE, &pteval, &itir, &iha);
1411 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
1413 mask = itir_mask(itir);
1414 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
1415 return (IA64_NO_FAULT);
1417 return vcpu_force_data_miss(vcpu,vadr);
1420 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
1422 printf("vcpu_tak: tak instruction unsupported\n");
1423 return (IA64_ILLOP_FAULT);
1424 // HACK ALERT: tak does a thash for now
1425 //return vcpu_thash(vcpu,vadr,key);
1428 /**************************************************************************
1429 VCPU debug breakpoint register access routines
1430 **************************************************************************/
1432 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
1434 // TODO: unimplemented DBRs return a reserved register fault
1435 // TODO: Should set Logical CPU state, not just physical
1436 ia64_set_dbr(reg,val);
1437 return (IA64_NO_FAULT);
1440 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
1442 // TODO: unimplemented IBRs return a reserved register fault
1443 // TODO: Should set Logical CPU state, not just physical
1444 ia64_set_ibr(reg,val);
1445 return (IA64_NO_FAULT);
1448 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1450 // TODO: unimplemented DBRs return a reserved register fault
1451 UINT64 val = ia64_get_dbr(reg);
1452 *pval = val;
1453 return (IA64_NO_FAULT);
1456 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1458 // TODO: unimplemented IBRs return a reserved register fault
1459 UINT64 val = ia64_get_ibr(reg);
1460 *pval = val;
1461 return (IA64_NO_FAULT);
1464 /**************************************************************************
1465 VCPU performance monitor register access routines
1466 **************************************************************************/
1468 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
1470 // TODO: Should set Logical CPU state, not just physical
1471 // NOTE: Writes to unimplemented PMC registers are discarded
1472 #ifdef DEBUG_PFMON
1473 printf("vcpu_set_pmc(%x,%lx)\n",reg,val);
1474 #endif
1475 ia64_set_pmc(reg,val);
1476 return (IA64_NO_FAULT);
1479 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
1481 // TODO: Should set Logical CPU state, not just physical
1482 // NOTE: Writes to unimplemented PMD registers are discarded
1483 #ifdef DEBUG_PFMON
1484 printf("vcpu_set_pmd(%x,%lx)\n",reg,val);
1485 #endif
1486 ia64_set_pmd(reg,val);
1487 return (IA64_NO_FAULT);
1490 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1492 // NOTE: Reads from unimplemented PMC registers return zero
1493 UINT64 val = (UINT64)ia64_get_pmc(reg);
1494 #ifdef DEBUG_PFMON
1495 printf("%lx=vcpu_get_pmc(%x)\n",val,reg);
1496 #endif
1497 *pval = val;
1498 return (IA64_NO_FAULT);
1501 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1503 // NOTE: Reads from unimplemented PMD registers return zero
1504 UINT64 val = (UINT64)ia64_get_pmd(reg);
1505 #ifdef DEBUG_PFMON
1506 printf("%lx=vcpu_get_pmd(%x)\n",val,reg);
1507 #endif
1508 *pval = val;
1509 return (IA64_NO_FAULT);
1512 /**************************************************************************
1513 VCPU banked general register access routines
1514 **************************************************************************/
1515 #define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1516 do{ \
1517 __asm__ __volatile__ ( \
1518 ";;extr.u %0 = %3,%6,16;;\n" \
1519 "dep %1 = %0, %1, 0, 16;;\n" \
1520 "st8 [%4] = %1\n" \
1521 "extr.u %0 = %2, 16, 16;;\n" \
1522 "dep %3 = %0, %3, %6, 16;;\n" \
1523 "st8 [%5] = %3\n" \
1524 ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
1525 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1526 }while(0)
1528 IA64FAULT vcpu_bsw0(VCPU *vcpu)
1530 // TODO: Only allowed for current vcpu
1531 REGS *regs = vcpu_regs(vcpu);
1532 unsigned long *r = &regs->r16;
1533 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1534 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1535 unsigned long *runat = &regs->eml_unat;
1536 unsigned long *b0unat = &PSCB(vcpu,vbnat);
1537 unsigned long *b1unat = &PSCB(vcpu,vnat);
1539 unsigned long i;
1541 if(VMX_DOMAIN(vcpu)){
1542 if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
1543 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1544 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1545 VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
1547 }else{
1548 if (PSCB(vcpu,banknum)) {
1549 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1550 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1551 PSCB(vcpu,banknum) = 0;
1554 return (IA64_NO_FAULT);
1557 #define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1558 do{ \
1559 __asm__ __volatile__ ( \
1560 ";;extr.u %0 = %3,%6,16;;\n" \
1561 "dep %1 = %0, %1, 16, 16;;\n" \
1562 "st8 [%4] = %1\n" \
1563 "extr.u %0 = %2, 0, 16;;\n" \
1564 "dep %3 = %0, %3, %6, 16;;\n" \
1565 "st8 [%5] = %3\n" \
1566 ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
1567 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1568 }while(0)
1570 IA64FAULT vcpu_bsw1(VCPU *vcpu)
1572 // TODO: Only allowed for current vcpu
1573 REGS *regs = vcpu_regs(vcpu);
1574 unsigned long *r = &regs->r16;
1575 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1576 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1577 unsigned long *runat = &regs->eml_unat;
1578 unsigned long *b0unat = &PSCB(vcpu,vbnat);
1579 unsigned long *b1unat = &PSCB(vcpu,vnat);
1581 unsigned long i;
1583 if(VMX_DOMAIN(vcpu)){
1584 if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
1585 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1586 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1587 VCPU(vcpu,vpsr) |= IA64_PSR_BN;
1589 }else{
1590 if (!PSCB(vcpu,banknum)) {
1591 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1592 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1593 PSCB(vcpu,banknum) = 1;
1596 return (IA64_NO_FAULT);
1599 /**************************************************************************
1600 VCPU cpuid access routines
1601 **************************************************************************/
1604 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1606 // FIXME: This could get called as a result of a rsvd-reg fault
1607 // if reg > 3
1608 switch(reg) {
1609 case 0:
1610 memcpy(pval,"Xen/ia64",8);
1611 break;
1612 case 1:
1613 *pval = 0;
1614 break;
1615 case 2:
1616 *pval = 0;
1617 break;
1618 case 3:
1619 *pval = ia64_get_cpuid(3);
1620 break;
1621 case 4:
1622 *pval = ia64_get_cpuid(4);
1623 break;
1624 default:
1625 if (reg > (ia64_get_cpuid(3) & 0xff))
1626 return IA64_RSVDREG_FAULT;
1627 *pval = ia64_get_cpuid(reg);
1628 break;
1630 return (IA64_NO_FAULT);
1633 /**************************************************************************
1634 VCPU region register access routines
1635 **************************************************************************/
1637 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
1639 ia64_rr rr;
1641 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1642 return(rr.ve);
1645 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
1647 PSCB(vcpu,rrs)[reg>>61] = val;
1648 // warning: set_one_rr() does it "live"
1649 set_one_rr(reg,val);
1650 return (IA64_NO_FAULT);
1653 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1655 if(VMX_DOMAIN(vcpu)){
1656 *pval = VMX(vcpu,vrr[reg>>61]);
1657 }else{
1658 *pval = PSCB(vcpu,rrs)[reg>>61];
1660 return (IA64_NO_FAULT);
1663 /**************************************************************************
1664 VCPU protection key register access routines
1665 **************************************************************************/
1667 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1669 #ifndef PKR_USE_FIXED
1670 printk("vcpu_get_pkr: called, not implemented yet\n");
1671 return IA64_ILLOP_FAULT;
1672 #else
1673 UINT64 val = (UINT64)ia64_get_pkr(reg);
1674 *pval = val;
1675 return (IA64_NO_FAULT);
1676 #endif
1679 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
1681 #ifndef PKR_USE_FIXED
1682 printk("vcpu_set_pkr: called, not implemented yet\n");
1683 return IA64_ILLOP_FAULT;
1684 #else
1685 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
1686 vcpu->pkrs[reg] = val;
1687 ia64_set_pkr(reg,val);
1688 return (IA64_NO_FAULT);
1689 #endif
1692 /**************************************************************************
1693 VCPU translation register access routines
1694 **************************************************************************/
1696 void vcpu_purge_tr_entry(TR_ENTRY *trp)
1698 trp->pte.val = 0;
1701 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
1703 UINT64 ps;
1704 union pte_flags new_pte;
1706 trp->itir = itir;
1707 trp->rid = VCPU(current,rrs[ifa>>61]) & RR_RID_MASK;
1708 ps = trp->ps;
1709 new_pte.val = pte;
1710 if (new_pte.pl < 2) new_pte.pl = 2;
1711 trp->vadr = ifa & ~0xfff;
1712 if (ps > 12) { // "ignore" relevant low-order bits
1713 new_pte.ppn &= ~((1UL<<(ps-12))-1);
1714 trp->vadr &= ~((1UL<<ps)-1);
1717 /* Atomic write. */
1718 trp->pte.val = new_pte.val;
1721 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
1722 UINT64 itir, UINT64 ifa)
1724 TR_ENTRY *trp;
1726 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
1727 trp = &PSCBX(vcpu,dtrs[slot]);
1728 //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
1729 vcpu_set_tr_entry(trp,pte,itir,ifa);
1730 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),ifa);
1731 return IA64_NO_FAULT;
1734 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
1735 UINT64 itir, UINT64 ifa)
1737 TR_ENTRY *trp;
1739 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
1740 trp = &PSCBX(vcpu,itrs[slot]);
1741 //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
1742 vcpu_set_tr_entry(trp,pte,itir,ifa);
1743 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),ifa);
1744 return IA64_NO_FAULT;
1747 /**************************************************************************
1748 VCPU translation cache access routines
1749 **************************************************************************/
1751 void foobar(void) { /*vcpu_verbose = 1;*/ }
1753 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps)
1755 unsigned long psr;
1756 unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
1758 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1759 // FIXME, must be inlined or potential for nested fault here!
1760 if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT)) {
1761 printf("vcpu_itc_no_srlz: domain0 use of smaller page size!\n");
1762 //FIXME: kill domain here
1763 while(1);
1765 #ifdef CONFIG_XEN_IA64_DOM0_VP
1766 BUG_ON(logps > PAGE_SHIFT);
1767 #endif
1768 psr = ia64_clear_ic();
1769 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
1770 ia64_set_psr(psr);
1771 // ia64_srlz_i(); // no srls req'd, will rfi later
1772 #ifdef VHPT_GLOBAL
1773 if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
1774 // FIXME: this is dangerous... vhpt_flush_address ensures these
1775 // addresses never get flushed. More work needed if this
1776 // ever happens.
1777 //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
1778 if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
1779 else vhpt_insert(vaddr,pte,logps<<2);
1781 // even if domain pagesize is larger than PAGE_SIZE, just put
1782 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
1783 else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
1784 #endif
1785 if ((mp_pte == -1UL) || (IorD & 0x4)) // don't place in 1-entry TLB
1786 return;
1787 if (IorD & 0x1) {
1788 vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
1789 PSCBX(vcpu,itlb_pte) = mp_pte;
1791 if (IorD & 0x2) {
1792 vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
1793 PSCBX(vcpu,dtlb_pte) = mp_pte;
1797 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1799 unsigned long pteval, logps = itir_ps(itir);
1800 BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
1802 if (logps < PAGE_SHIFT) {
1803 printf("vcpu_itc_d: domain trying to use smaller page size!\n");
1804 //FIXME: kill domain here
1805 while(1);
1807 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1808 pteval = translate_domain_pte(pte, ifa, itir, &logps);
1809 if (!pteval) return IA64_ILLOP_FAULT;
1810 if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
1811 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
1812 if (swap_rr0) set_metaphysical_rr0();
1813 return IA64_NO_FAULT;
1816 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1818 unsigned long pteval, logps = itir_ps(itir);
1819 BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
1821 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1822 if (logps < PAGE_SHIFT) {
1823 printf("vcpu_itc_i: domain trying to use smaller page size!\n");
1824 //FIXME: kill domain here
1825 while(1);
1827 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1828 pteval = translate_domain_pte(pte, ifa, itir, &logps);
1829 // FIXME: what to do if bad physical address? (machine check?)
1830 if (!pteval) return IA64_ILLOP_FAULT;
1831 if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
1832 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
1833 if (swap_rr0) set_metaphysical_rr0();
1834 return IA64_NO_FAULT;
1837 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1839 printk("vcpu_ptc_l: called, not implemented yet\n");
1840 return IA64_ILLOP_FAULT;
1843 // At privlvl=0, fc performs no access rights or protection key checks, while
1844 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
1845 // read but no protection key check. Thus in order to avoid an unexpected
1846 // access rights fault, we have to translate the virtual address to a
1847 // physical address (possibly via a metaphysical address) and do the fc
1848 // on the physical address, which is guaranteed to flush the same cache line
1849 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
1851 // TODO: Only allowed for current vcpu
1852 UINT64 mpaddr, paddr;
1853 IA64FAULT fault;
1855 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
1856 if (fault == IA64_NO_FAULT) {
1857 paddr = translate_domain_mpaddr(mpaddr);
1858 ia64_fc(__va(paddr));
1860 return fault;
1863 int ptce_count = 0;
1864 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
1866 // Note that this only needs to be called once, i.e. the
1867 // architected loop to purge the entire TLB, should use
1868 // base = stride1 = stride2 = 0, count0 = count 1 = 1
1870 #ifdef VHPT_GLOBAL
1871 vhpt_flush(); // FIXME: This is overdoing it
1872 #endif
1873 local_flush_tlb_all();
1874 // just invalidate the "whole" tlb
1875 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1876 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1877 return IA64_NO_FAULT;
1880 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1882 printk("vcpu_ptc_g: called, not implemented yet\n");
1883 return IA64_ILLOP_FAULT;
1886 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1888 // FIXME: validate not flushing Xen addresses
1889 // if (Xen address) return(IA64_ILLOP_FAULT);
1890 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
1891 //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
1893 #ifdef CONFIG_XEN_SMP
1894 struct domain *d = vcpu->domain;
1895 struct vcpu *v;
1897 for_each_vcpu (d, v) {
1898 if (v == vcpu)
1899 continue;
1901 /* Purge TC entries.
1902 FIXME: clear only if match. */
1903 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1904 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1906 #ifdef VHPT_GLOBAL
1907 /* Invalidate VHPT entries. */
1908 vhpt_flush_address_remote (v->processor, vadr, addr_range);
1909 #endif
1911 #endif
1913 #ifdef VHPT_GLOBAL
1914 vhpt_flush_address(vadr,addr_range);
1915 #endif
1916 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
1917 /* Purge tc. */
1918 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1919 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1920 return IA64_NO_FAULT;
1923 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1925 printf("vcpu_ptr_d: Purging TLB is unsupported\n");
1926 // don't forget to recompute dtr_regions
1927 return (IA64_ILLOP_FAULT);
1930 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1932 printf("vcpu_ptr_i: Purging TLB is unsupported\n");
1933 // don't forget to recompute itr_regions
1934 return (IA64_ILLOP_FAULT);