ia64/xen-unstable

view xen/arch/ia64/xen/vcpu.c @ 9769:d23c088eac6d

[IA64] smp boot speed-up (sal cache flush, itc/itv messages)

Use sal_cache_flush to emulate SAL_CACHE_FLUSH instead of fc.
Only prints one vcpu_set_itc message, do not print set itv message.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue Apr 25 22:32:14 2006 -0600 (2006-04-25)
parents 2d2ef3f4c747
children 3ab5ab4d6d75
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004-2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <linux/sched.h>
10 #include <public/arch-ia64.h>
11 #include <asm/ia64_int.h>
12 #include <asm/vcpu.h>
13 #include <asm/regionreg.h>
14 #include <asm/tlb.h>
15 #include <asm/processor.h>
16 #include <asm/delay.h>
17 #include <asm/vmx_vcpu.h>
18 #include <asm/vhpt.h>
19 #include <asm/tlbflush.h>
20 #include <xen/event.h>
22 /* FIXME: where these declarations should be there ? */
23 extern void getreg(unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs);
24 extern void setreg(unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs);
25 extern void getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs);
27 extern void setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs);
29 extern void panic_domain(struct pt_regs *, const char *, ...);
30 extern unsigned long translate_domain_mpaddr(unsigned long);
31 extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
34 typedef union {
35 struct ia64_psr ia64_psr;
36 unsigned long i64;
37 } PSR;
39 // this def for vcpu_regs won't work if kernel stack is present
40 //#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
42 #define TRUE 1
43 #define FALSE 0
44 #define IA64_PTA_SZ_BIT 2
45 #define IA64_PTA_VF_BIT 8
46 #define IA64_PTA_BASE_BIT 15
47 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
48 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
50 #define STATIC
52 #ifdef PRIVOP_ADDR_COUNT
53 struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS+1] = {
54 { "=ifa", { 0 }, { 0 }, 0 },
55 { "thash", { 0 }, { 0 }, 0 },
56 { 0, { 0 }, { 0 }, 0 }
57 };
58 extern void privop_count_addr(unsigned long addr, int inst);
59 #define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
60 #else
61 #define PRIVOP_COUNT_ADDR(x,y) do {} while (0)
62 #endif
64 unsigned long dtlb_translate_count = 0;
65 unsigned long tr_translate_count = 0;
66 unsigned long phys_translate_count = 0;
68 unsigned long vcpu_verbose = 0;
70 /**************************************************************************
71 VCPU general register access routines
72 **************************************************************************/
73 #ifdef XEN
74 UINT64
75 vcpu_get_gr(VCPU *vcpu, unsigned long reg)
76 {
77 REGS *regs = vcpu_regs(vcpu);
78 UINT64 val;
80 if (!reg) return 0;
81 getreg(reg,&val,0,regs); // FIXME: handle NATs later
82 return val;
83 }
84 IA64FAULT
85 vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val)
86 {
87 REGS *regs = vcpu_regs(vcpu);
88 int nat;
90 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
91 if (nat)
92 return IA64_NAT_CONSUMPTION_VECTOR;
93 return 0;
94 }
96 // returns:
97 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
98 // IA64_NO_FAULT otherwise
99 IA64FAULT
100 vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int nat)
101 {
102 REGS *regs = vcpu_regs(vcpu);
103 long sof = (regs->cr_ifs) & 0x7f;
105 if (!reg) return IA64_ILLOP_FAULT;
106 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
107 setreg(reg,value,nat,regs); // FIXME: handle NATs later
108 return IA64_NO_FAULT;
109 }
111 IA64FAULT
112 vcpu_get_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val)
113 {
114 REGS *regs = vcpu_regs(vcpu);
115 getfpreg(reg,val,regs); // FIXME: handle NATs later
116 return IA64_NO_FAULT;
117 }
119 IA64FAULT
120 vcpu_set_fpreg(VCPU *vcpu, unsigned long reg, struct ia64_fpreg *val)
121 {
122 REGS *regs = vcpu_regs(vcpu);
123 if(reg > 1)
124 setfpreg(reg,val,regs); // FIXME: handle NATs later
125 return IA64_NO_FAULT;
126 }
128 #else
129 // returns:
130 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
131 // IA64_NO_FAULT otherwise
132 IA64FAULT
133 vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value)
134 {
135 REGS *regs = vcpu_regs(vcpu);
136 long sof = (regs->cr_ifs) & 0x7f;
138 if (!reg) return IA64_ILLOP_FAULT;
139 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
140 setreg(reg,value,0,regs); // FIXME: handle NATs later
141 return IA64_NO_FAULT;
142 }
144 #endif
145 /**************************************************************************
146 VCPU privileged application register access routines
147 **************************************************************************/
149 void vcpu_load_kernel_regs(VCPU *vcpu)
150 {
151 ia64_set_kr(0, VCPU(vcpu, krs[0]));
152 ia64_set_kr(1, VCPU(vcpu, krs[1]));
153 ia64_set_kr(2, VCPU(vcpu, krs[2]));
154 ia64_set_kr(3, VCPU(vcpu, krs[3]));
155 ia64_set_kr(4, VCPU(vcpu, krs[4]));
156 ia64_set_kr(5, VCPU(vcpu, krs[5]));
157 ia64_set_kr(6, VCPU(vcpu, krs[6]));
158 ia64_set_kr(7, VCPU(vcpu, krs[7]));
159 }
161 /* GCC 4.0.2 seems not to be able to suppress this call!. */
162 #define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
164 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
165 {
166 if (reg == 44) return (vcpu_set_itc(vcpu,val));
167 else if (reg == 27) return (IA64_ILLOP_FAULT);
168 else if (reg == 24)
169 printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
170 else if (reg > 7) return (IA64_ILLOP_FAULT);
171 else {
172 PSCB(vcpu,krs[reg]) = val;
173 ia64_set_kr(reg,val);
174 }
175 return IA64_NO_FAULT;
176 }
178 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
179 {
180 if (reg == 24)
181 printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
182 else if (reg > 7) return (IA64_ILLOP_FAULT);
183 else *val = PSCB(vcpu,krs[reg]);
184 return IA64_NO_FAULT;
185 }
187 /**************************************************************************
188 VCPU processor status register access routines
189 **************************************************************************/
191 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
192 {
193 /* only do something if mode changes */
194 if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
195 PSCB(vcpu,metaphysical_mode) = newmode;
196 if (newmode) set_metaphysical_rr0();
197 else if (PSCB(vcpu,rrs[0]) != -1)
198 set_one_rr(0, PSCB(vcpu,rrs[0]));
199 }
200 }
202 IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
203 {
204 vcpu_set_metaphysical_mode(vcpu,TRUE);
205 return IA64_NO_FAULT;
206 }
208 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
209 {
210 struct ia64_psr psr, imm, *ipsr;
211 REGS *regs = vcpu_regs(vcpu);
213 //PRIVOP_COUNT_ADDR(regs,_RSM);
214 // TODO: All of these bits need to be virtualized
215 // TODO: Only allowed for current vcpu
216 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
217 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
218 imm = *(struct ia64_psr *)&imm24;
219 // interrupt flag
220 if (imm.i)
221 vcpu->vcpu_info->evtchn_upcall_mask = 1;
222 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
223 // interrupt collection flag
224 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
225 // just handle psr.up and psr.pp for now
226 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
227 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
228 | IA64_PSR_DFL | IA64_PSR_DFH))
229 return (IA64_ILLOP_FAULT);
230 if (imm.dfh) ipsr->dfh = 0;
231 if (imm.dfl) ipsr->dfl = 0;
232 if (imm.pp) {
233 ipsr->pp = 1;
234 psr.pp = 1; // priv perf ctrs always enabled
235 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
236 PSCB(vcpu,tmp[8]) = 0; // but fool the domain if it gets psr
237 }
238 if (imm.up) { ipsr->up = 0; psr.up = 0; }
239 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
240 if (imm.be) ipsr->be = 0;
241 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
242 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
243 return IA64_NO_FAULT;
244 }
246 #define SPURIOUS_VECTOR 0xf
248 IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
249 {
250 vcpu_set_metaphysical_mode(vcpu,FALSE);
251 return IA64_NO_FAULT;
252 }
254 IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
255 {
256 vcpu->vcpu_info->evtchn_upcall_mask = 0;
257 PSCB(vcpu,interrupt_collection_enabled) = 1;
258 return IA64_NO_FAULT;
259 }
261 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
262 {
263 struct ia64_psr psr, imm, *ipsr;
264 REGS *regs = vcpu_regs(vcpu);
265 UINT64 mask, enabling_interrupts = 0;
267 //PRIVOP_COUNT_ADDR(regs,_SSM);
268 // TODO: All of these bits need to be virtualized
269 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
270 imm = *(struct ia64_psr *)&imm24;
271 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
272 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
273 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
274 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
275 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
276 if (imm.dfh) ipsr->dfh = 1;
277 if (imm.dfl) ipsr->dfl = 1;
278 if (imm.pp) {
279 ipsr->pp = 1; psr.pp = 1;
280 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
281 PSCB(vcpu,tmp[8]) = 1;
282 }
283 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
284 if (imm.i) {
285 if (vcpu->vcpu_info->evtchn_upcall_mask) {
286 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
287 enabling_interrupts = 1;
288 }
289 vcpu->vcpu_info->evtchn_upcall_mask = 0;
290 }
291 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
292 // TODO: do this faster
293 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
294 if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
295 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
296 if (imm.up) { ipsr->up = 1; psr.up = 1; }
297 if (imm.be) {
298 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
299 return (IA64_ILLOP_FAULT);
300 }
301 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
302 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
303 if (enabling_interrupts &&
304 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
305 PSCB(vcpu,pending_interruption) = 1;
306 return IA64_NO_FAULT;
307 }
309 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
310 {
311 struct ia64_psr psr, newpsr, *ipsr;
312 REGS *regs = vcpu_regs(vcpu);
313 UINT64 enabling_interrupts = 0;
315 // TODO: All of these bits need to be virtualized
316 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
317 newpsr = *(struct ia64_psr *)&val;
318 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
319 // just handle psr.up and psr.pp for now
320 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
321 // however trying to set other bits can't be an error as it is in ssm
322 if (newpsr.dfh) ipsr->dfh = 1;
323 if (newpsr.dfl) ipsr->dfl = 1;
324 if (newpsr.pp) {
325 ipsr->pp = 1; psr.pp = 1;
326 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
327 PSCB(vcpu,tmp[8]) = 1;
328 }
329 else {
330 ipsr->pp = 1; psr.pp = 1;
331 PSCB(vcpu,tmp[8]) = 0;
332 }
333 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
334 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
335 if (newpsr.i) {
336 if (vcpu->vcpu_info->evtchn_upcall_mask)
337 enabling_interrupts = 1;
338 vcpu->vcpu_info->evtchn_upcall_mask = 0;
339 }
340 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
341 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
342 if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
343 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
344 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
345 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
346 else vcpu_set_metaphysical_mode(vcpu,TRUE);
347 if (newpsr.be) {
348 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
349 return (IA64_ILLOP_FAULT);
350 }
351 if (enabling_interrupts &&
352 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
353 PSCB(vcpu,pending_interruption) = 1;
354 return IA64_NO_FAULT;
355 }
357 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
358 {
359 REGS *regs = vcpu_regs(vcpu);
360 struct ia64_psr newpsr;
362 newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
363 if (newpsr.cpl == 2) newpsr.cpl = 0;
364 if (!vcpu->vcpu_info->evtchn_upcall_mask) newpsr.i = 1;
365 else newpsr.i = 0;
366 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
367 else newpsr.ic = 0;
368 if (PSCB(vcpu,metaphysical_mode)) newpsr.dt = 0;
369 else newpsr.dt = 1;
370 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
371 if (PSCB(vcpu,tmp[8])) newpsr.pp = 1;
372 else newpsr.pp = 0;
373 *pval = *(unsigned long *)&newpsr;
374 return IA64_NO_FAULT;
375 }
377 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
378 {
379 return !!PSCB(vcpu,interrupt_collection_enabled);
380 }
382 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
383 {
384 return !vcpu->vcpu_info->evtchn_upcall_mask;
385 }
387 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
388 {
389 UINT64 dcr = PSCBX(vcpu,dcr);
390 PSR psr;
392 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
393 psr.i64 = prevpsr;
394 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
395 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
396 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
397 psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
398 psr.ia64_psr.bn = PSCB(vcpu,banknum);
399 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
400 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
401 // psr.pk = 1;
402 //printf("returns 0x%016lx...",psr.i64);
403 return psr.i64;
404 }
406 /**************************************************************************
407 VCPU control register access routines
408 **************************************************************************/
410 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
411 {
412 //extern unsigned long privop_trace;
413 //privop_trace=0;
414 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
415 // Reads of cr.dcr on Xen always have the sign bit set, so
416 // a domain can differentiate whether it is running on SP or not
417 *pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
418 return (IA64_NO_FAULT);
419 }
421 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
422 {
423 if(VMX_DOMAIN(vcpu)){
424 *pval = PSCB(vcpu,iva) & ~0x7fffL;
425 }else{
426 *pval = PSCBX(vcpu,iva) & ~0x7fffL;
427 }
428 return (IA64_NO_FAULT);
429 }
431 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
432 {
433 *pval = PSCB(vcpu,pta);
434 return (IA64_NO_FAULT);
435 }
437 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
438 {
439 //REGS *regs = vcpu_regs(vcpu);
440 //*pval = regs->cr_ipsr;
441 *pval = PSCB(vcpu,ipsr);
442 return (IA64_NO_FAULT);
443 }
445 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
446 {
447 *pval = PSCB(vcpu,isr);
448 return (IA64_NO_FAULT);
449 }
451 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
452 {
453 //REGS *regs = vcpu_regs(vcpu);
454 //*pval = regs->cr_iip;
455 *pval = PSCB(vcpu,iip);
456 return (IA64_NO_FAULT);
457 }
459 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
460 {
461 UINT64 val = PSCB(vcpu,ifa);
462 REGS *regs = vcpu_regs(vcpu);
463 PRIVOP_COUNT_ADDR(regs,_GET_IFA);
464 *pval = val;
465 return (IA64_NO_FAULT);
466 }
468 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
469 {
470 ia64_rr rr;
472 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
473 return(rr.ps);
474 }
476 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
477 {
478 ia64_rr rr;
480 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
481 return(rr.rid);
482 }
484 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
485 {
486 ia64_rr rr;
488 rr.rrval = 0;
489 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
490 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
491 return (rr.rrval);
492 }
495 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
496 {
497 UINT64 val = PSCB(vcpu,itir);
498 *pval = val;
499 return (IA64_NO_FAULT);
500 }
502 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
503 {
504 UINT64 val = PSCB(vcpu,iipa);
505 // SP entry code does not save iipa yet nor does it get
506 // properly delivered in the pscb
507 // printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
508 *pval = val;
509 return (IA64_NO_FAULT);
510 }
512 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
513 {
514 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
515 //*pval = PSCB(vcpu,regs).cr_ifs;
516 *pval = PSCB(vcpu,ifs);
517 PSCB(vcpu,incomplete_regframe) = 0;
518 return (IA64_NO_FAULT);
519 }
521 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
522 {
523 UINT64 val = PSCB(vcpu,iim);
524 *pval = val;
525 return (IA64_NO_FAULT);
526 }
528 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
529 {
530 //return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
531 UINT64 val = PSCB(vcpu,iha);
532 REGS *regs = vcpu_regs(vcpu);
533 PRIVOP_COUNT_ADDR(regs,_THASH);
534 *pval = val;
535 return (IA64_NO_FAULT);
536 }
538 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
539 {
540 //extern unsigned long privop_trace;
541 //privop_trace=1;
542 // Reads of cr.dcr on SP always have the sign bit set, so
543 // a domain can differentiate whether it is running on SP or not
544 // Thus, writes of DCR should ignore the sign bit
545 //verbose("vcpu_set_dcr: called\n");
546 PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
547 return (IA64_NO_FAULT);
548 }
550 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
551 {
552 if(VMX_DOMAIN(vcpu)){
553 PSCB(vcpu,iva) = val & ~0x7fffL;
554 }else{
555 PSCBX(vcpu,iva) = val & ~0x7fffL;
556 }
557 return (IA64_NO_FAULT);
558 }
560 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
561 {
562 if (val & IA64_PTA_LFMT) {
563 printf("*** No support for VHPT long format yet!!\n");
564 return (IA64_ILLOP_FAULT);
565 }
566 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
567 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
568 PSCB(vcpu,pta) = val;
569 return IA64_NO_FAULT;
570 }
572 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
573 {
574 PSCB(vcpu,ipsr) = val;
575 return IA64_NO_FAULT;
576 }
578 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
579 {
580 PSCB(vcpu,isr) = val;
581 return IA64_NO_FAULT;
582 }
584 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
585 {
586 PSCB(vcpu,iip) = val;
587 return IA64_NO_FAULT;
588 }
590 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
591 {
592 REGS *regs = vcpu_regs(vcpu);
593 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
594 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
595 else ipsr->ri++;
596 return (IA64_NO_FAULT);
597 }
599 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
600 {
601 PSCB(vcpu,ifa) = val;
602 return IA64_NO_FAULT;
603 }
605 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
606 {
607 PSCB(vcpu,itir) = val;
608 return IA64_NO_FAULT;
609 }
611 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
612 {
613 // SP entry code does not save iipa yet nor does it get
614 // properly delivered in the pscb
615 // printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
616 PSCB(vcpu,iipa) = val;
617 return IA64_NO_FAULT;
618 }
620 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
621 {
622 //REGS *regs = vcpu_regs(vcpu);
623 PSCB(vcpu,ifs) = val;
624 return IA64_NO_FAULT;
625 }
627 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
628 {
629 PSCB(vcpu,iim) = val;
630 return IA64_NO_FAULT;
631 }
633 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
634 {
635 PSCB(vcpu,iha) = val;
636 return IA64_NO_FAULT;
637 }
639 /**************************************************************************
640 VCPU interrupt control register access routines
641 **************************************************************************/
643 void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
644 {
645 PSCB(vcpu,pending_interruption) = 1;
646 }
648 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
649 {
650 if (vector & ~0xff) {
651 printf("vcpu_pend_interrupt: bad vector\n");
652 return;
653 }
654 if ( VMX_DOMAIN(vcpu) ) {
655 set_bit(vector,VCPU(vcpu,irr));
656 } else
657 {
658 if (test_bit(vector,PSCBX(vcpu,irr))) {
659 //printf("vcpu_pend_interrupt: overrun\n");
660 }
661 set_bit(vector,PSCBX(vcpu,irr));
662 PSCB(vcpu,pending_interruption) = 1;
663 }
664 }
666 #define IA64_TPR_MMI 0x10000
667 #define IA64_TPR_MIC 0x000f0
669 /* checks to see if a VCPU has any unmasked pending interrupts
670 * if so, returns the highest, else returns SPURIOUS_VECTOR */
671 /* NOTE: Since this gets called from vcpu_get_ivr() and the
672 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
673 * this routine also ignores pscb.interrupt_delivery_enabled
674 * and this must be checked independently; see vcpu_deliverable interrupts() */
675 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
676 {
677 UINT64 *p, *r, bits, bitnum, mask, i, vector;
679 /* Always check pending event, since guest may just ack the
680 * event injection without handle. Later guest may throw out
681 * the event itself.
682 */
683 check_start:
684 if (event_pending(vcpu) &&
685 !test_bit(vcpu->vcpu_info->arch.evtchn_vector,
686 &PSCBX(vcpu, insvc[0])))
687 vcpu_pend_interrupt(vcpu, vcpu->vcpu_info->arch.evtchn_vector);
689 p = &PSCBX(vcpu,irr[3]);
690 r = &PSCBX(vcpu,insvc[3]);
691 for (i = 3; ; p--, r--, i--) {
692 bits = *p ;
693 if (bits) break; // got a potential interrupt
694 if (*r) {
695 // nothing in this word which is pending+inservice
696 // but there is one inservice which masks lower
697 return SPURIOUS_VECTOR;
698 }
699 if (i == 0) {
700 // checked all bits... nothing pending+inservice
701 return SPURIOUS_VECTOR;
702 }
703 }
704 // have a pending,deliverable interrupt... see if it is masked
705 bitnum = ia64_fls(bits);
706 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
707 vector = bitnum+(i*64);
708 mask = 1L << bitnum;
709 /* sanity check for guest timer interrupt */
710 if (vector == (PSCB(vcpu,itv) & 0xff)) {
711 uint64_t now = ia64_get_itc();
712 if (now < PSCBX(vcpu,domain_itm)) {
713 // printk("Ooops, pending guest timer before its due\n");
714 PSCBX(vcpu,irr[i]) &= ~mask;
715 goto check_start;
716 }
717 }
718 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
719 if (*r >= mask) {
720 // masked by equal inservice
721 //printf("but masked by equal inservice\n");
722 return SPURIOUS_VECTOR;
723 }
724 if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
725 // tpr.mmi is set
726 //printf("but masked by tpr.mmi\n");
727 return SPURIOUS_VECTOR;
728 }
729 if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
730 //tpr.mic masks class
731 //printf("but masked by tpr.mic\n");
732 return SPURIOUS_VECTOR;
733 }
735 //printf("returned to caller\n");
736 return vector;
737 }
739 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
740 {
741 return (vcpu_get_psr_i(vcpu) &&
742 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
743 }
745 UINT64 vcpu_deliverable_timer(VCPU *vcpu)
746 {
747 return (vcpu_get_psr_i(vcpu) &&
748 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
749 }
751 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
752 {
753 /* Use real LID for domain0 until vIOSAPIC is present.
754 Use EID=0, ID=vcpu_id for domU. */
755 if (vcpu->domain == dom0)
756 *pval = ia64_getreg(_IA64_REG_CR_LID);
757 else
758 *pval = vcpu->vcpu_id << 24;
759 return IA64_NO_FAULT;
760 }
762 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
763 {
764 int i;
765 UINT64 vector, mask;
767 #define HEARTBEAT_FREQ 16 // period in seconds
768 #ifdef HEARTBEAT_FREQ
769 #define N_DOMS 16 // period in seconds
770 #if 0
771 static long count[N_DOMS] = { 0 };
772 #endif
773 static long nonclockcount[N_DOMS] = { 0 };
774 unsigned domid = vcpu->domain->domain_id;
775 #endif
776 #ifdef IRQ_DEBUG
777 static char firstivr = 1;
778 static char firsttime[256];
779 if (firstivr) {
780 int i;
781 for (i=0;i<256;i++) firsttime[i]=1;
782 firstivr=0;
783 }
784 #endif
786 vector = vcpu_check_pending_interrupts(vcpu);
787 if (vector == SPURIOUS_VECTOR) {
788 PSCB(vcpu,pending_interruption) = 0;
789 *pval = vector;
790 return IA64_NO_FAULT;
791 }
792 #ifdef HEARTBEAT_FREQ
793 if (domid >= N_DOMS) domid = N_DOMS-1;
794 #if 0
795 if (vector == (PSCB(vcpu,itv) & 0xff)) {
796 if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
797 printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
798 domid, count[domid], nonclockcount[domid]);
799 //count[domid] = 0;
800 //dump_runq();
801 }
802 }
803 #endif
804 else nonclockcount[domid]++;
805 #endif
806 // now have an unmasked, pending, deliverable vector!
807 // getting ivr has "side effects"
808 #ifdef IRQ_DEBUG
809 if (firsttime[vector]) {
810 printf("*** First get_ivr on vector=%lu,itc=%lx\n",
811 vector,ia64_get_itc());
812 firsttime[vector]=0;
813 }
814 #endif
815 /* if delivering a timer interrupt, remember domain_itm, which
816 * needs to be done before clearing irr
817 */
818 if (vector == (PSCB(vcpu,itv) & 0xff)) {
819 PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
820 }
822 i = vector >> 6;
823 mask = 1L << (vector & 0x3f);
824 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
825 PSCBX(vcpu,insvc[i]) |= mask;
826 PSCBX(vcpu,irr[i]) &= ~mask;
827 //PSCB(vcpu,pending_interruption)--;
828 *pval = vector;
829 return IA64_NO_FAULT;
830 }
832 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
833 {
834 *pval = PSCB(vcpu,tpr);
835 return (IA64_NO_FAULT);
836 }
838 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
839 {
840 *pval = 0L; // reads of eoi always return 0
841 return (IA64_NO_FAULT);
842 }
844 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
845 {
846 *pval = PSCBX(vcpu, irr[0]);
847 return (IA64_NO_FAULT);
848 }
850 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
851 {
852 *pval = PSCBX(vcpu, irr[1]);
853 return (IA64_NO_FAULT);
854 }
856 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
857 {
858 *pval = PSCBX(vcpu, irr[2]);
859 return (IA64_NO_FAULT);
860 }
862 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
863 {
864 *pval = PSCBX(vcpu, irr[3]);
865 return (IA64_NO_FAULT);
866 }
868 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
869 {
870 *pval = PSCB(vcpu,itv);
871 return (IA64_NO_FAULT);
872 }
874 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
875 {
876 *pval = PSCB(vcpu,pmv);
877 return (IA64_NO_FAULT);
878 }
880 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
881 {
882 *pval = PSCB(vcpu,cmcv);
883 return (IA64_NO_FAULT);
884 }
886 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
887 {
888 // fix this when setting values other than m-bit is supported
889 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
890 *pval = (1L << 16);
891 return (IA64_NO_FAULT);
892 }
894 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
895 {
896 // fix this when setting values other than m-bit is supported
897 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
898 *pval = (1L << 16);
899 return (IA64_NO_FAULT);
900 }
902 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
903 {
904 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
905 return (IA64_ILLOP_FAULT);
906 }
908 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
909 {
910 if (val & 0xff00) return IA64_RSVDREG_FAULT;
911 PSCB(vcpu,tpr) = val;
912 /* This can unmask interrupts. */
913 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
914 PSCB(vcpu,pending_interruption) = 1;
915 return (IA64_NO_FAULT);
916 }
918 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
919 {
920 UINT64 *p, bits, vec, bitnum;
921 int i;
923 p = &PSCBX(vcpu,insvc[3]);
924 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
925 if (i < 0) {
926 printf("Trying to EOI interrupt when none are in-service.\n");
927 return IA64_NO_FAULT;
928 }
929 bitnum = ia64_fls(bits);
930 vec = bitnum + (i*64);
931 /* clear the correct bit */
932 bits &= ~(1L << bitnum);
933 *p = bits;
934 /* clearing an eoi bit may unmask another pending interrupt... */
935 if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
936 // worry about this later... Linux only calls eoi
937 // with interrupts disabled
938 printf("Trying to EOI interrupt with interrupts enabled\n");
939 }
940 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
941 PSCB(vcpu,pending_interruption) = 1;
942 //printf("YYYYY vcpu_set_eoi: Successful\n");
943 return (IA64_NO_FAULT);
944 }
946 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
947 {
948 if (!(val & (1L << 16))) {
949 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
950 return (IA64_ILLOP_FAULT);
951 }
952 // no place to save this state but nothing to do anyway
953 return (IA64_NO_FAULT);
954 }
956 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
957 {
958 if (!(val & (1L << 16))) {
959 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
960 return (IA64_ILLOP_FAULT);
961 }
962 // no place to save this state but nothing to do anyway
963 return (IA64_NO_FAULT);
964 }
966 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
967 {
968 /* Check reserved fields. */
969 if (val & 0xef00)
970 return (IA64_ILLOP_FAULT);
971 PSCB(vcpu,itv) = val;
972 if (val & 0x10000) {
973 /* Disable itm. */
974 PSCBX(vcpu,domain_itm) = 0;
975 }
976 else vcpu_set_next_timer(vcpu);
977 return (IA64_NO_FAULT);
978 }
980 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
981 {
982 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
983 PSCB(vcpu,pmv) = val;
984 return (IA64_NO_FAULT);
985 }
987 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
988 {
989 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
990 PSCB(vcpu,cmcv) = val;
991 return (IA64_NO_FAULT);
992 }
994 /**************************************************************************
995 VCPU temporary register access routines
996 **************************************************************************/
997 UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
998 {
999 if (index > 7) return 0;
1000 return PSCB(vcpu,tmp[index]);
1003 void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
1005 if (index <= 7) PSCB(vcpu,tmp[index]) = val;
1008 /**************************************************************************
1009 Interval timer routines
1010 **************************************************************************/
1012 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
1014 UINT64 itv = PSCB(vcpu,itv);
1015 return(!itv || !!(itv & 0x10000));
1018 BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
1020 UINT64 itv = PSCB(vcpu,itv);
1021 return (test_bit(itv, PSCBX(vcpu,insvc)));
1024 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
1026 unsigned long domain_itm = PSCBX(vcpu,domain_itm);
1027 unsigned long now = ia64_get_itc();
1029 if (!domain_itm) return FALSE;
1030 if (now < domain_itm) return FALSE;
1031 if (vcpu_timer_disabled(vcpu)) return FALSE;
1032 return TRUE;
1035 void vcpu_safe_set_itm(unsigned long val)
1037 unsigned long epsilon = 100;
1038 unsigned long flags;
1039 UINT64 now = ia64_get_itc();
1041 local_irq_save(flags);
1042 while (1) {
1043 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
1044 ia64_set_itm(val);
1045 if (val > (now = ia64_get_itc())) break;
1046 val = now + epsilon;
1047 epsilon <<= 1;
1049 local_irq_restore(flags);
1052 void vcpu_set_next_timer(VCPU *vcpu)
1054 UINT64 d = PSCBX(vcpu,domain_itm);
1055 //UINT64 s = PSCBX(vcpu,xen_itm);
1056 UINT64 s = local_cpu_data->itm_next;
1057 UINT64 now = ia64_get_itc();
1059 /* gloss over the wraparound problem for now... we know it exists
1060 * but it doesn't matter right now */
1062 if (is_idle_domain(vcpu->domain)) {
1063 // printf("****** vcpu_set_next_timer called during idle!!\n");
1064 vcpu_safe_set_itm(s);
1065 return;
1067 //s = PSCBX(vcpu,xen_itm);
1068 if (d && (d > now) && (d < s)) {
1069 vcpu_safe_set_itm(d);
1070 //using_domain_as_itm++;
1072 else {
1073 vcpu_safe_set_itm(s);
1074 //using_xen_as_itm++;
1078 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
1080 //UINT now = ia64_get_itc();
1082 //if (val < now) val = now + 1000;
1083 //printf("*** vcpu_set_itm: called with %lx\n",val);
1084 PSCBX(vcpu,domain_itm) = val;
1085 vcpu_set_next_timer(vcpu);
1086 return (IA64_NO_FAULT);
1089 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
1091 #define DISALLOW_SETTING_ITC_FOR_NOW
1092 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
1093 static int did_print;
1094 if (!did_print) {
1095 printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
1096 printf("(this message is only displayed one)\n");
1097 did_print = 1;
1099 #else
1100 UINT64 oldnow = ia64_get_itc();
1101 UINT64 olditm = PSCBX(vcpu,domain_itm);
1102 unsigned long d = olditm - oldnow;
1103 unsigned long x = local_cpu_data->itm_next - oldnow;
1105 UINT64 newnow = val, min_delta;
1107 local_irq_disable();
1108 if (olditm) {
1109 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
1110 PSCBX(vcpu,domain_itm) = newnow + d;
1112 local_cpu_data->itm_next = newnow + x;
1113 d = PSCBX(vcpu,domain_itm);
1114 x = local_cpu_data->itm_next;
1116 ia64_set_itc(newnow);
1117 if (d && (d > newnow) && (d < x)) {
1118 vcpu_safe_set_itm(d);
1119 //using_domain_as_itm++;
1121 else {
1122 vcpu_safe_set_itm(x);
1123 //using_xen_as_itm++;
1125 local_irq_enable();
1126 #endif
1127 return (IA64_NO_FAULT);
1130 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
1132 //FIXME: Implement this
1133 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
1134 return (IA64_NO_FAULT);
1135 //return (IA64_ILLOP_FAULT);
1138 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
1140 //TODO: Implement this
1141 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
1142 return (IA64_ILLOP_FAULT);
1145 void vcpu_pend_timer(VCPU *vcpu)
1147 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1149 if (vcpu_timer_disabled(vcpu)) return;
1150 //if (vcpu_timer_inservice(vcpu)) return;
1151 if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
1152 // already delivered an interrupt for this so
1153 // don't deliver another
1154 return;
1156 vcpu_pend_interrupt(vcpu, itv);
1159 // returns true if ready to deliver a timer interrupt too early
1160 UINT64 vcpu_timer_pending_early(VCPU *vcpu)
1162 UINT64 now = ia64_get_itc();
1163 UINT64 itm = PSCBX(vcpu,domain_itm);
1165 if (vcpu_timer_disabled(vcpu)) return 0;
1166 if (!itm) return 0;
1167 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1170 /**************************************************************************
1171 Privileged operation emulation routines
1172 **************************************************************************/
1174 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
1176 PSCB(vcpu,ifa) = ifa;
1177 PSCB(vcpu,itir) = vcpu_get_itir_on_fault(vcpu,ifa);
1178 vcpu_thash(current, ifa, &PSCB(current,iha));
1179 return (vcpu_get_rr_ve(vcpu,ifa) ? IA64_DATA_TLB_VECTOR : IA64_ALT_DATA_TLB_VECTOR);
1183 IA64FAULT vcpu_rfi(VCPU *vcpu)
1185 // TODO: Only allowed for current vcpu
1186 PSR psr;
1187 UINT64 int_enable, regspsr = 0;
1188 UINT64 ifs;
1189 REGS *regs = vcpu_regs(vcpu);
1190 extern void dorfirfi(void);
1192 psr.i64 = PSCB(vcpu,ipsr);
1193 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
1194 int_enable = psr.ia64_psr.i;
1195 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
1196 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
1197 else vcpu_set_metaphysical_mode(vcpu,TRUE);
1198 psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
1199 psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
1200 psr.ia64_psr.bn = 1;
1201 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
1202 if (psr.ia64_psr.be) {
1203 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
1204 return (IA64_ILLOP_FAULT);
1206 PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
1207 ifs = PSCB(vcpu,ifs);
1208 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1209 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1210 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
1211 // TODO: validate PSCB(vcpu,iip)
1212 // TODO: PSCB(vcpu,ipsr) = psr;
1213 PSCB(vcpu,ipsr) = psr.i64;
1214 // now set up the trampoline
1215 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
1216 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
1217 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
1219 else {
1220 regs->cr_ipsr = psr.i64;
1221 regs->cr_iip = PSCB(vcpu,iip);
1223 PSCB(vcpu,interrupt_collection_enabled) = 1;
1224 vcpu_bsw1(vcpu);
1225 vcpu->vcpu_info->evtchn_upcall_mask = !int_enable;
1226 return (IA64_NO_FAULT);
1229 IA64FAULT vcpu_cover(VCPU *vcpu)
1231 // TODO: Only allowed for current vcpu
1232 REGS *regs = vcpu_regs(vcpu);
1234 if (!PSCB(vcpu,interrupt_collection_enabled)) {
1235 if (!PSCB(vcpu,incomplete_regframe))
1236 PSCB(vcpu,ifs) = regs->cr_ifs;
1237 else PSCB(vcpu,incomplete_regframe) = 0;
1239 regs->cr_ifs = 0;
1240 return (IA64_NO_FAULT);
1243 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
1245 UINT64 pta = PSCB(vcpu,pta);
1246 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1247 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
1248 UINT64 Mask = (1L << pta_sz) - 1;
1249 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1250 UINT64 compMask_60_15 = ~Mask_60_15;
1251 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
1252 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
1253 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
1254 UINT64 VHPT_addr2a =
1255 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1256 UINT64 VHPT_addr2b =
1257 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
1258 UINT64 VHPT_addr3 = VHPT_offset & 0x7fff;
1259 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1260 VHPT_addr3;
1262 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1263 *pval = VHPT_addr;
1264 return (IA64_NO_FAULT);
1267 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1269 printf("vcpu_ttag: ttag instruction unsupported\n");
1270 return (IA64_ILLOP_FAULT);
1273 unsigned long vhpt_translate_count = 0;
1274 unsigned long fast_vhpt_translate_count = 0;
1275 unsigned long recover_to_page_fault_count = 0;
1276 unsigned long recover_to_break_fault_count = 0;
1278 int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
1280 // FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
1281 static inline int vcpu_match_tr_entry_no_p(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
1283 return trp->rid == rid
1284 && ifa >= trp->vadr
1285 && ifa <= (trp->vadr + (1L << trp->ps) - 1);
1288 static inline int vcpu_match_tr_entry(TR_ENTRY *trp, UINT64 ifa, UINT64 rid)
1290 return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
1293 // in_tpa is not used when CONFIG_XEN_IA64_DOM0_VP
1294 IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN in_tpa, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
1296 unsigned long region = address >> 61;
1297 unsigned long pta, rid, rr;
1298 union pte_flags pte;
1299 int i;
1300 TR_ENTRY *trp;
1302 if (PSCB(vcpu,metaphysical_mode) && !(!is_data && region)) {
1303 // dom0 may generate an uncacheable physical address (msb=1)
1304 if (region && ((region != 4) || (vcpu->domain != dom0))) {
1305 // FIXME: This seems to happen even though it shouldn't. Need to track
1306 // this down, but since it has been apparently harmless, just flag it for now
1307 // panic_domain(vcpu_regs(vcpu),
1309 /*
1310 * Guest may execute itc.d and rfi with psr.dt=0
1311 * When VMM try to fetch opcode, tlb miss may happen,
1312 * At this time PSCB(vcpu,metaphysical_mode)=1,
1313 * region=5,VMM need to handle this tlb miss as if
1314 * PSCB(vcpu,metaphysical_mode)=0
1315 */
1316 printk("vcpu_translate: bad physical address: 0x%lx\n",
1317 address);
1319 } else {
1320 *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS |
1321 _PAGE_PL_2 | _PAGE_AR_RWX;
1322 *itir = PAGE_SHIFT << 2;
1323 phys_translate_count++;
1324 return IA64_NO_FAULT;
1327 else if (!region && warn_region0_address) {
1328 REGS *regs = vcpu_regs(vcpu);
1329 unsigned long viip = PSCB(vcpu,iip);
1330 unsigned long vipsr = PSCB(vcpu,ipsr);
1331 unsigned long iip = regs->cr_iip;
1332 unsigned long ipsr = regs->cr_ipsr;
1333 printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
1334 address, viip, vipsr, iip, ipsr);
1337 rr = PSCB(vcpu,rrs)[region];
1338 rid = rr & RR_RID_MASK;
1339 if (is_data) {
1340 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,address)) {
1341 for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++) {
1342 if (vcpu_match_tr_entry(trp,address,rid)) {
1343 *pteval = trp->pte.val;
1344 *itir = trp->itir;
1345 tr_translate_count++;
1346 return IA64_NO_FAULT;
1351 // FIXME?: check itr's for data accesses too, else bad things happen?
1352 /* else */ {
1353 if (vcpu_quick_region_check(vcpu->arch.itr_regions,address)) {
1354 for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++) {
1355 if (vcpu_match_tr_entry(trp,address,rid)) {
1356 *pteval = trp->pte.val;
1357 *itir = trp->itir;
1358 tr_translate_count++;
1359 return IA64_NO_FAULT;
1365 /* check 1-entry TLB */
1366 // FIXME?: check dtlb for inst accesses too, else bad things happen?
1367 trp = &vcpu->arch.dtlb;
1368 pte = trp->pte;
1369 if (/* is_data && */ pte.p
1370 && vcpu_match_tr_entry_no_p(trp,address,rid)) {
1371 #ifndef CONFIG_XEN_IA64_DOM0_VP
1372 if (vcpu->domain==dom0 && !in_tpa)
1373 *pteval = pte.val;
1374 else
1375 #endif
1376 *pteval = vcpu->arch.dtlb_pte;
1377 *itir = trp->itir;
1378 dtlb_translate_count++;
1379 return IA64_USE_TLB;
1382 /* check guest VHPT */
1383 pta = PSCB(vcpu,pta);
1384 if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
1385 panic_domain(vcpu_regs(vcpu),"can't do long format VHPT\n");
1386 //return (is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR);
1389 *itir = rr & (RR_RID_MASK | RR_PS_MASK);
1390 // note: architecturally, iha is optionally set for alt faults but
1391 // xenlinux depends on it so should document it as part of PV interface
1392 vcpu_thash(vcpu, address, iha);
1393 if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE))
1394 return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR);
1396 /* avoid recursively walking (short format) VHPT */
1397 if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
1398 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1400 if (!__access_ok (*iha)
1401 || __copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
1402 // virtual VHPT walker "missed" in TLB
1403 return IA64_VHPT_FAULT;
1405 /*
1406 * Optimisation: this VHPT walker aborts on not-present pages
1407 * instead of inserting a not-present translation, this allows
1408 * vectoring directly to the miss handler.
1409 */
1410 if (!pte.p)
1411 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1413 /* found mapping in guest VHPT! */
1414 *itir = rr & RR_PS_MASK;
1415 *pteval = pte.val;
1416 vhpt_translate_count++;
1417 return IA64_NO_FAULT;
1420 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1422 UINT64 pteval, itir, mask, iha;
1423 IA64FAULT fault;
1425 fault = vcpu_translate(vcpu, vadr, TRUE, TRUE, &pteval, &itir, &iha);
1426 if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB)
1428 mask = itir_mask(itir);
1429 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
1430 return (IA64_NO_FAULT);
1432 return vcpu_force_data_miss(vcpu,vadr);
1435 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
1437 printf("vcpu_tak: tak instruction unsupported\n");
1438 return (IA64_ILLOP_FAULT);
1439 // HACK ALERT: tak does a thash for now
1440 //return vcpu_thash(vcpu,vadr,key);
1443 /**************************************************************************
1444 VCPU debug breakpoint register access routines
1445 **************************************************************************/
1447 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
1449 // TODO: unimplemented DBRs return a reserved register fault
1450 // TODO: Should set Logical CPU state, not just physical
1451 ia64_set_dbr(reg,val);
1452 return (IA64_NO_FAULT);
1455 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
1457 // TODO: unimplemented IBRs return a reserved register fault
1458 // TODO: Should set Logical CPU state, not just physical
1459 ia64_set_ibr(reg,val);
1460 return (IA64_NO_FAULT);
1463 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1465 // TODO: unimplemented DBRs return a reserved register fault
1466 UINT64 val = ia64_get_dbr(reg);
1467 *pval = val;
1468 return (IA64_NO_FAULT);
1471 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1473 // TODO: unimplemented IBRs return a reserved register fault
1474 UINT64 val = ia64_get_ibr(reg);
1475 *pval = val;
1476 return (IA64_NO_FAULT);
1479 /**************************************************************************
1480 VCPU performance monitor register access routines
1481 **************************************************************************/
1483 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
1485 // TODO: Should set Logical CPU state, not just physical
1486 // NOTE: Writes to unimplemented PMC registers are discarded
1487 #ifdef DEBUG_PFMON
1488 printf("vcpu_set_pmc(%x,%lx)\n",reg,val);
1489 #endif
1490 ia64_set_pmc(reg,val);
1491 return (IA64_NO_FAULT);
1494 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
1496 // TODO: Should set Logical CPU state, not just physical
1497 // NOTE: Writes to unimplemented PMD registers are discarded
1498 #ifdef DEBUG_PFMON
1499 printf("vcpu_set_pmd(%x,%lx)\n",reg,val);
1500 #endif
1501 ia64_set_pmd(reg,val);
1502 return (IA64_NO_FAULT);
1505 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1507 // NOTE: Reads from unimplemented PMC registers return zero
1508 UINT64 val = (UINT64)ia64_get_pmc(reg);
1509 #ifdef DEBUG_PFMON
1510 printf("%lx=vcpu_get_pmc(%x)\n",val,reg);
1511 #endif
1512 *pval = val;
1513 return (IA64_NO_FAULT);
1516 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1518 // NOTE: Reads from unimplemented PMD registers return zero
1519 UINT64 val = (UINT64)ia64_get_pmd(reg);
1520 #ifdef DEBUG_PFMON
1521 printf("%lx=vcpu_get_pmd(%x)\n",val,reg);
1522 #endif
1523 *pval = val;
1524 return (IA64_NO_FAULT);
1527 /**************************************************************************
1528 VCPU banked general register access routines
1529 **************************************************************************/
1530 #define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1531 do{ \
1532 __asm__ __volatile__ ( \
1533 ";;extr.u %0 = %3,%6,16;;\n" \
1534 "dep %1 = %0, %1, 0, 16;;\n" \
1535 "st8 [%4] = %1\n" \
1536 "extr.u %0 = %2, 16, 16;;\n" \
1537 "dep %3 = %0, %3, %6, 16;;\n" \
1538 "st8 [%5] = %3\n" \
1539 ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
1540 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1541 }while(0)
1543 IA64FAULT vcpu_bsw0(VCPU *vcpu)
1545 // TODO: Only allowed for current vcpu
1546 REGS *regs = vcpu_regs(vcpu);
1547 unsigned long *r = &regs->r16;
1548 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1549 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1550 unsigned long *runat = &regs->eml_unat;
1551 unsigned long *b0unat = &PSCB(vcpu,vbnat);
1552 unsigned long *b1unat = &PSCB(vcpu,vnat);
1554 unsigned long i;
1556 if(VMX_DOMAIN(vcpu)){
1557 if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
1558 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1559 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1560 VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
1562 }else{
1563 if (PSCB(vcpu,banknum)) {
1564 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1565 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1566 PSCB(vcpu,banknum) = 0;
1569 return (IA64_NO_FAULT);
1572 #define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1573 do{ \
1574 __asm__ __volatile__ ( \
1575 ";;extr.u %0 = %3,%6,16;;\n" \
1576 "dep %1 = %0, %1, 16, 16;;\n" \
1577 "st8 [%4] = %1\n" \
1578 "extr.u %0 = %2, 0, 16;;\n" \
1579 "dep %3 = %0, %3, %6, 16;;\n" \
1580 "st8 [%5] = %3\n" \
1581 ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
1582 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1583 }while(0)
1585 IA64FAULT vcpu_bsw1(VCPU *vcpu)
1587 // TODO: Only allowed for current vcpu
1588 REGS *regs = vcpu_regs(vcpu);
1589 unsigned long *r = &regs->r16;
1590 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1591 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1592 unsigned long *runat = &regs->eml_unat;
1593 unsigned long *b0unat = &PSCB(vcpu,vbnat);
1594 unsigned long *b1unat = &PSCB(vcpu,vnat);
1596 unsigned long i;
1598 if(VMX_DOMAIN(vcpu)){
1599 if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
1600 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1601 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1602 VCPU(vcpu,vpsr) |= IA64_PSR_BN;
1604 }else{
1605 if (!PSCB(vcpu,banknum)) {
1606 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1607 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1608 PSCB(vcpu,banknum) = 1;
1611 return (IA64_NO_FAULT);
1614 /**************************************************************************
1615 VCPU cpuid access routines
1616 **************************************************************************/
1619 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1621 // FIXME: This could get called as a result of a rsvd-reg fault
1622 // if reg > 3
1623 switch(reg) {
1624 case 0:
1625 memcpy(pval,"Xen/ia64",8);
1626 break;
1627 case 1:
1628 *pval = 0;
1629 break;
1630 case 2:
1631 *pval = 0;
1632 break;
1633 case 3:
1634 *pval = ia64_get_cpuid(3);
1635 break;
1636 case 4:
1637 *pval = ia64_get_cpuid(4);
1638 break;
1639 default:
1640 if (reg > (ia64_get_cpuid(3) & 0xff))
1641 return IA64_RSVDREG_FAULT;
1642 *pval = ia64_get_cpuid(reg);
1643 break;
1645 return (IA64_NO_FAULT);
1648 /**************************************************************************
1649 VCPU region register access routines
1650 **************************************************************************/
1652 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
1654 ia64_rr rr;
1656 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1657 return(rr.ve);
1660 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
1662 PSCB(vcpu,rrs)[reg>>61] = val;
1663 // warning: set_one_rr() does it "live"
1664 set_one_rr(reg,val);
1665 return (IA64_NO_FAULT);
1668 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1670 if(VMX_DOMAIN(vcpu)){
1671 *pval = VMX(vcpu,vrr[reg>>61]);
1672 }else{
1673 *pval = PSCB(vcpu,rrs)[reg>>61];
1675 return (IA64_NO_FAULT);
1678 /**************************************************************************
1679 VCPU protection key register access routines
1680 **************************************************************************/
1682 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1684 #ifndef PKR_USE_FIXED
1685 printk("vcpu_get_pkr: called, not implemented yet\n");
1686 return IA64_ILLOP_FAULT;
1687 #else
1688 UINT64 val = (UINT64)ia64_get_pkr(reg);
1689 *pval = val;
1690 return (IA64_NO_FAULT);
1691 #endif
1694 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
1696 #ifndef PKR_USE_FIXED
1697 printk("vcpu_set_pkr: called, not implemented yet\n");
1698 return IA64_ILLOP_FAULT;
1699 #else
1700 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
1701 vcpu->pkrs[reg] = val;
1702 ia64_set_pkr(reg,val);
1703 return (IA64_NO_FAULT);
1704 #endif
1707 /**************************************************************************
1708 VCPU translation register access routines
1709 **************************************************************************/
1711 void vcpu_purge_tr_entry(TR_ENTRY *trp)
1713 trp->pte.val = 0;
1716 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
1718 UINT64 ps;
1719 union pte_flags new_pte;
1721 trp->itir = itir;
1722 trp->rid = VCPU(current,rrs[ifa>>61]) & RR_RID_MASK;
1723 ps = trp->ps;
1724 new_pte.val = pte;
1725 if (new_pte.pl < 2) new_pte.pl = 2;
1726 trp->vadr = ifa & ~0xfff;
1727 if (ps > 12) { // "ignore" relevant low-order bits
1728 new_pte.ppn &= ~((1UL<<(ps-12))-1);
1729 trp->vadr &= ~((1UL<<ps)-1);
1732 /* Atomic write. */
1733 trp->pte.val = new_pte.val;
1736 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
1737 UINT64 itir, UINT64 ifa)
1739 TR_ENTRY *trp;
1741 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
1742 trp = &PSCBX(vcpu,dtrs[slot]);
1743 //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
1744 vcpu_set_tr_entry(trp,pte,itir,ifa);
1745 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),ifa);
1746 return IA64_NO_FAULT;
1749 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
1750 UINT64 itir, UINT64 ifa)
1752 TR_ENTRY *trp;
1754 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
1755 trp = &PSCBX(vcpu,itrs[slot]);
1756 //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
1757 vcpu_set_tr_entry(trp,pte,itir,ifa);
1758 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),ifa);
1759 return IA64_NO_FAULT;
1762 /**************************************************************************
1763 VCPU translation cache access routines
1764 **************************************************************************/
1766 void foobar(void) { /*vcpu_verbose = 1;*/ }
1768 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps)
1770 unsigned long psr;
1771 unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
1773 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1774 // FIXME, must be inlined or potential for nested fault here!
1775 if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT)) {
1776 printf("vcpu_itc_no_srlz: domain0 use of smaller page size!\n");
1777 //FIXME: kill domain here
1778 while(1);
1780 #ifdef CONFIG_XEN_IA64_DOM0_VP
1781 BUG_ON(logps > PAGE_SHIFT);
1782 #endif
1783 psr = ia64_clear_ic();
1784 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
1785 ia64_set_psr(psr);
1786 // ia64_srlz_i(); // no srls req'd, will rfi later
1787 #ifdef VHPT_GLOBAL
1788 if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
1789 // FIXME: this is dangerous... vhpt_flush_address ensures these
1790 // addresses never get flushed. More work needed if this
1791 // ever happens.
1792 //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
1793 if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
1794 else vhpt_insert(vaddr,pte,logps<<2);
1796 // even if domain pagesize is larger than PAGE_SIZE, just put
1797 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
1798 else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
1799 #endif
1800 if ((mp_pte == -1UL) || (IorD & 0x4)) // don't place in 1-entry TLB
1801 return;
1802 if (IorD & 0x1) {
1803 vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
1804 PSCBX(vcpu,itlb_pte) = mp_pte;
1806 if (IorD & 0x2) {
1807 vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
1808 PSCBX(vcpu,dtlb_pte) = mp_pte;
1812 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1814 unsigned long pteval, logps = itir_ps(itir);
1815 BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
1817 if (logps < PAGE_SHIFT) {
1818 printf("vcpu_itc_d: domain trying to use smaller page size!\n");
1819 //FIXME: kill domain here
1820 while(1);
1822 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1823 pteval = translate_domain_pte(pte, ifa, itir, &logps);
1824 if (!pteval) return IA64_ILLOP_FAULT;
1825 if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
1826 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
1827 if (swap_rr0) set_metaphysical_rr0();
1828 return IA64_NO_FAULT;
1831 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1833 unsigned long pteval, logps = itir_ps(itir);
1834 BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
1836 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1837 if (logps < PAGE_SHIFT) {
1838 printf("vcpu_itc_i: domain trying to use smaller page size!\n");
1839 //FIXME: kill domain here
1840 while(1);
1842 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1843 pteval = translate_domain_pte(pte, ifa, itir, &logps);
1844 // FIXME: what to do if bad physical address? (machine check?)
1845 if (!pteval) return IA64_ILLOP_FAULT;
1846 if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
1847 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
1848 if (swap_rr0) set_metaphysical_rr0();
1849 return IA64_NO_FAULT;
1852 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1854 printk("vcpu_ptc_l: called, not implemented yet\n");
1855 return IA64_ILLOP_FAULT;
1858 // At privlvl=0, fc performs no access rights or protection key checks, while
1859 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
1860 // read but no protection key check. Thus in order to avoid an unexpected
1861 // access rights fault, we have to translate the virtual address to a
1862 // physical address (possibly via a metaphysical address) and do the fc
1863 // on the physical address, which is guaranteed to flush the same cache line
1864 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
1866 // TODO: Only allowed for current vcpu
1867 UINT64 mpaddr, paddr;
1868 IA64FAULT fault;
1870 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
1871 if (fault == IA64_NO_FAULT) {
1872 paddr = translate_domain_mpaddr(mpaddr);
1873 ia64_fc(__va(paddr));
1875 return fault;
1878 int ptce_count = 0;
1879 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
1881 // Note that this only needs to be called once, i.e. the
1882 // architected loop to purge the entire TLB, should use
1883 // base = stride1 = stride2 = 0, count0 = count 1 = 1
1885 #ifdef VHPT_GLOBAL
1886 vhpt_flush(); // FIXME: This is overdoing it
1887 #endif
1888 local_flush_tlb_all();
1889 // just invalidate the "whole" tlb
1890 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1891 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1892 return IA64_NO_FAULT;
1895 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1897 printk("vcpu_ptc_g: called, not implemented yet\n");
1898 return IA64_ILLOP_FAULT;
1901 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1903 // FIXME: validate not flushing Xen addresses
1904 // if (Xen address) return(IA64_ILLOP_FAULT);
1905 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
1906 //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
1908 #ifdef CONFIG_XEN_SMP
1909 struct domain *d = vcpu->domain;
1910 struct vcpu *v;
1912 for_each_vcpu (d, v) {
1913 if (v == vcpu)
1914 continue;
1916 /* Purge TC entries.
1917 FIXME: clear only if match. */
1918 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1919 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1921 #ifdef VHPT_GLOBAL
1922 /* Invalidate VHPT entries. */
1923 vhpt_flush_address_remote (v->processor, vadr, addr_range);
1924 #endif
1926 #endif
1928 #ifdef VHPT_GLOBAL
1929 vhpt_flush_address(vadr,addr_range);
1930 #endif
1931 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
1932 /* Purge tc. */
1933 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1934 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1935 return IA64_NO_FAULT;
1938 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1940 printf("vcpu_ptr_d: Purging TLB is unsupported\n");
1941 // don't forget to recompute dtr_regions
1942 return (IA64_ILLOP_FAULT);
1945 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1947 printf("vcpu_ptr_i: Purging TLB is unsupported\n");
1948 // don't forget to recompute itr_regions
1949 return (IA64_ILLOP_FAULT);