ia64/xen-unstable

view xen/arch/ia64/xen/vcpu.c @ 7283:333f722ed6d0

Fixes for correct itir handling in vcpu_translate (broke simulator)
author djm@kirby.fc.hp.com
date Tue Oct 11 15:50:21 2005 -0600 (2005-10-11)
parents f81e637e7741
children 760f5e85c706
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004-2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #if 1
10 // TEMPORARY PATCH for match_dtlb uses this, can be removed later
11 // FIXME SMP
12 int in_tpa = 0;
13 #endif
15 #include <linux/sched.h>
16 #include <public/arch-ia64.h>
17 #include <asm/ia64_int.h>
18 #include <asm/vcpu.h>
19 #include <asm/regionreg.h>
20 #include <asm/tlb.h>
21 #include <asm/processor.h>
22 #include <asm/delay.h>
23 #include <asm/vmx_vcpu.h>
24 #include <xen/event.h>
26 typedef union {
27 struct ia64_psr ia64_psr;
28 unsigned long i64;
29 } PSR;
31 //typedef struct pt_regs REGS;
32 //typedef struct domain VCPU;
34 // this def for vcpu_regs won't work if kernel stack is present
35 //#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
36 #define vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) + IA64_STK_OFFSET)) - 1)
37 #define PSCB(x,y) VCPU(x,y)
38 #define PSCBX(x,y) x->arch.y
40 #define TRUE 1
41 #define FALSE 0
42 #define IA64_PTA_SZ_BIT 2
43 #define IA64_PTA_VF_BIT 8
44 #define IA64_PTA_BASE_BIT 15
45 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
46 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
48 #define STATIC
50 #ifdef PRIVOP_ADDR_COUNT
51 struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
52 { "=ifa", { 0 }, { 0 }, 0 },
53 { "thash", { 0 }, { 0 }, 0 },
54 0
55 };
56 extern void privop_count_addr(unsigned long addr, int inst);
57 #define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
58 #else
59 #define PRIVOP_COUNT_ADDR(x,y) do {} while (0)
60 #endif
62 unsigned long dtlb_translate_count = 0;
63 unsigned long tr_translate_count = 0;
64 unsigned long phys_translate_count = 0;
66 unsigned long vcpu_verbose = 0;
67 #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
69 extern TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa);
70 extern TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa);
72 /**************************************************************************
73 VCPU general register access routines
74 **************************************************************************/
75 #ifdef XEN
76 UINT64
77 vcpu_get_gr(VCPU *vcpu, unsigned reg)
78 {
79 REGS *regs = vcpu_regs(vcpu);
80 UINT64 val;
81 if (!reg) return 0;
82 getreg(reg,&val,0,regs); // FIXME: handle NATs later
83 return val;
84 }
85 IA64FAULT
86 vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val)
87 {
88 REGS *regs = vcpu_regs(vcpu);
89 int nat;
90 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
91 if(nat)
92 return IA64_NAT_CONSUMPTION_VECTOR;
93 return 0;
94 }
96 // returns:
97 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
98 // IA64_NO_FAULT otherwise
99 IA64FAULT
100 vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat)
101 {
102 REGS *regs = vcpu_regs(vcpu);
103 if (!reg) return IA64_ILLOP_FAULT;
104 long sof = (regs->cr_ifs) & 0x7f;
105 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
106 setreg(reg,value,nat,regs); // FIXME: handle NATs later
107 return IA64_NO_FAULT;
108 }
109 #else
110 // returns:
111 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
112 // IA64_NO_FAULT otherwise
113 IA64FAULT
114 vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
115 {
116 REGS *regs = vcpu_regs(vcpu);
117 long sof = (regs->cr_ifs) & 0x7f;
119 if (!reg) return IA64_ILLOP_FAULT;
120 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
121 setreg(reg,value,0,regs); // FIXME: handle NATs later
122 return IA64_NO_FAULT;
123 }
125 #endif
126 /**************************************************************************
127 VCPU privileged application register access routines
128 **************************************************************************/
130 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
131 {
132 if (reg == 44) return (vcpu_set_itc(vcpu,val));
133 else if (reg == 27) return (IA64_ILLOP_FAULT);
134 else if (reg == 24)
135 printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
136 else if (reg > 7) return (IA64_ILLOP_FAULT);
137 else {
138 PSCB(vcpu,krs[reg]) = val;
139 ia64_set_kr(reg,val);
140 }
141 return IA64_NO_FAULT;
142 }
144 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
145 {
146 if (reg == 24)
147 printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
148 else if (reg > 7) return (IA64_ILLOP_FAULT);
149 else *val = PSCB(vcpu,krs[reg]);
150 return IA64_NO_FAULT;
151 }
153 /**************************************************************************
154 VCPU processor status register access routines
155 **************************************************************************/
157 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
158 {
159 /* only do something if mode changes */
160 if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
161 if (newmode) set_metaphysical_rr0();
162 else if (PSCB(vcpu,rrs[0]) != -1)
163 set_one_rr(0, PSCB(vcpu,rrs[0]));
164 PSCB(vcpu,metaphysical_mode) = newmode;
165 }
166 }
168 IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
169 {
170 vcpu_set_metaphysical_mode(vcpu,TRUE);
171 return IA64_NO_FAULT;
172 }
174 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
175 {
176 struct ia64_psr psr, imm, *ipsr;
177 REGS *regs = vcpu_regs(vcpu);
179 //PRIVOP_COUNT_ADDR(regs,_RSM);
180 // TODO: All of these bits need to be virtualized
181 // TODO: Only allowed for current vcpu
182 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
183 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
184 imm = *(struct ia64_psr *)&imm24;
185 // interrupt flag
186 if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
187 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
188 // interrupt collection flag
189 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
190 // just handle psr.up and psr.pp for now
191 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
192 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
193 | IA64_PSR_DFL | IA64_PSR_DFH))
194 return (IA64_ILLOP_FAULT);
195 if (imm.dfh) ipsr->dfh = 0;
196 if (imm.dfl) ipsr->dfl = 0;
197 if (imm.pp) {
198 ipsr->pp = 1;
199 psr.pp = 1; // priv perf ctrs always enabled
200 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
201 PSCB(vcpu,tmp[8]) = 0; // but fool the domain if it gets psr
202 }
203 if (imm.up) { ipsr->up = 0; psr.up = 0; }
204 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
205 if (imm.be) ipsr->be = 0;
206 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
207 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
208 return IA64_NO_FAULT;
209 }
211 extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
212 #define SPURIOUS_VECTOR 0xf
214 IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
215 {
216 vcpu_set_metaphysical_mode(vcpu,FALSE);
217 return IA64_NO_FAULT;
218 }
220 IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
221 {
222 PSCB(vcpu,interrupt_delivery_enabled) = 1;
223 PSCB(vcpu,interrupt_collection_enabled) = 1;
224 return IA64_NO_FAULT;
225 }
227 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
228 {
229 struct ia64_psr psr, imm, *ipsr;
230 REGS *regs = vcpu_regs(vcpu);
231 UINT64 mask, enabling_interrupts = 0;
233 //PRIVOP_COUNT_ADDR(regs,_SSM);
234 // TODO: All of these bits need to be virtualized
235 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
236 imm = *(struct ia64_psr *)&imm24;
237 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
238 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
239 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
240 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
241 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
242 if (imm.dfh) ipsr->dfh = 1;
243 if (imm.dfl) ipsr->dfl = 1;
244 if (imm.pp) {
245 ipsr->pp = 1; psr.pp = 1;
246 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
247 PSCB(vcpu,tmp[8]) = 1;
248 }
249 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
250 if (imm.i) {
251 if (!PSCB(vcpu,interrupt_delivery_enabled)) {
252 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
253 enabling_interrupts = 1;
254 }
255 PSCB(vcpu,interrupt_delivery_enabled) = 1;
256 }
257 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
258 // TODO: do this faster
259 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
260 if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
261 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
262 if (imm.up) { ipsr->up = 1; psr.up = 1; }
263 if (imm.be) {
264 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
265 return (IA64_ILLOP_FAULT);
266 }
267 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
268 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
269 if (enabling_interrupts &&
270 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
271 PSCB(vcpu,pending_interruption) = 1;
272 return IA64_NO_FAULT;
273 }
275 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
276 {
277 struct ia64_psr psr, newpsr, *ipsr;
278 REGS *regs = vcpu_regs(vcpu);
279 UINT64 enabling_interrupts = 0;
281 // TODO: All of these bits need to be virtualized
282 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
283 newpsr = *(struct ia64_psr *)&val;
284 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
285 // just handle psr.up and psr.pp for now
286 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
287 // however trying to set other bits can't be an error as it is in ssm
288 if (newpsr.dfh) ipsr->dfh = 1;
289 if (newpsr.dfl) ipsr->dfl = 1;
290 if (newpsr.pp) {
291 ipsr->pp = 1; psr.pp = 1;
292 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
293 PSCB(vcpu,tmp[8]) = 1;
294 }
295 else {
296 ipsr->pp = 1; psr.pp = 1;
297 PSCB(vcpu,tmp[8]) = 0;
298 }
299 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
300 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
301 if (newpsr.i) {
302 if (!PSCB(vcpu,interrupt_delivery_enabled))
303 enabling_interrupts = 1;
304 PSCB(vcpu,interrupt_delivery_enabled) = 1;
305 }
306 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
307 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
308 if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
309 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
310 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
311 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
312 else vcpu_set_metaphysical_mode(vcpu,TRUE);
313 if (newpsr.be) {
314 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
315 return (IA64_ILLOP_FAULT);
316 }
317 if (enabling_interrupts &&
318 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
319 PSCB(vcpu,pending_interruption) = 1;
320 return IA64_NO_FAULT;
321 }
323 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
324 {
325 UINT64 psr;
326 struct ia64_psr newpsr;
328 // TODO: This needs to return a "filtered" view of
329 // the psr, not the actual psr. Probably the psr needs
330 // to be a field in regs (in addition to ipsr).
331 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
332 newpsr = *(struct ia64_psr *)&psr;
333 if (newpsr.cpl == 2) newpsr.cpl = 0;
334 if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
335 else newpsr.i = 0;
336 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
337 else newpsr.ic = 0;
338 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
339 if (PSCB(vcpu,tmp[8])) newpsr.pp = 1;
340 else newpsr.pp = 0;
341 *pval = *(unsigned long *)&newpsr;
342 return IA64_NO_FAULT;
343 }
345 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
346 {
347 return !!PSCB(vcpu,interrupt_collection_enabled);
348 }
350 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
351 {
352 return !!PSCB(vcpu,interrupt_delivery_enabled);
353 }
355 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
356 {
357 UINT64 dcr = PSCBX(vcpu,dcr);
358 PSR psr = {0};
360 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
361 psr.i64 = prevpsr;
362 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
363 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
364 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
365 psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
366 psr.ia64_psr.bn = PSCB(vcpu,banknum);
367 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
368 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
369 // psr.pk = 1;
370 //printf("returns 0x%016lx...",psr.i64);
371 return psr.i64;
372 }
374 /**************************************************************************
375 VCPU control register access routines
376 **************************************************************************/
378 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
379 {
380 extern unsigned long privop_trace;
381 //privop_trace=0;
382 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
383 // Reads of cr.dcr on Xen always have the sign bit set, so
384 // a domain can differentiate whether it is running on SP or not
385 *pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
386 return (IA64_NO_FAULT);
387 }
389 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
390 {
391 if(VMX_DOMAIN(vcpu)){
392 *pval = PSCB(vcpu,iva) & ~0x7fffL;
393 }else{
394 *pval = PSCBX(vcpu,iva) & ~0x7fffL;
395 }
396 return (IA64_NO_FAULT);
397 }
399 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
400 {
401 *pval = PSCB(vcpu,pta);
402 return (IA64_NO_FAULT);
403 }
405 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
406 {
407 //REGS *regs = vcpu_regs(vcpu);
408 //*pval = regs->cr_ipsr;
409 *pval = PSCB(vcpu,ipsr);
410 return (IA64_NO_FAULT);
411 }
413 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
414 {
415 *pval = PSCB(vcpu,isr);
416 return (IA64_NO_FAULT);
417 }
419 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
420 {
421 //REGS *regs = vcpu_regs(vcpu);
422 //*pval = regs->cr_iip;
423 *pval = PSCB(vcpu,iip);
424 return (IA64_NO_FAULT);
425 }
427 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
428 {
429 UINT64 val = PSCB(vcpu,ifa);
430 REGS *regs = vcpu_regs(vcpu);
431 PRIVOP_COUNT_ADDR(regs,_GET_IFA);
432 *pval = val;
433 return (IA64_NO_FAULT);
434 }
436 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
437 {
438 ia64_rr rr;
440 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
441 return(rr.ps);
442 }
444 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
445 {
446 ia64_rr rr;
448 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
449 return(rr.rid);
450 }
452 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
453 {
454 ia64_rr rr;
456 rr.rrval = 0;
457 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
458 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
459 return (rr.rrval);
460 }
463 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
464 {
465 UINT64 val = PSCB(vcpu,itir);
466 *pval = val;
467 return (IA64_NO_FAULT);
468 }
470 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
471 {
472 UINT64 val = PSCB(vcpu,iipa);
473 // SP entry code does not save iipa yet nor does it get
474 // properly delivered in the pscb
475 // printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
476 *pval = val;
477 return (IA64_NO_FAULT);
478 }
480 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
481 {
482 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
483 //*pval = PSCB(vcpu,regs).cr_ifs;
484 *pval = PSCB(vcpu,ifs);
485 PSCB(vcpu,incomplete_regframe) = 0;
486 return (IA64_NO_FAULT);
487 }
489 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
490 {
491 UINT64 val = PSCB(vcpu,iim);
492 *pval = val;
493 return (IA64_NO_FAULT);
494 }
496 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
497 {
498 //return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
499 UINT64 val = PSCB(vcpu,iha);
500 REGS *regs = vcpu_regs(vcpu);
501 PRIVOP_COUNT_ADDR(regs,_THASH);
502 *pval = val;
503 return (IA64_NO_FAULT);
504 }
506 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
507 {
508 extern unsigned long privop_trace;
509 //privop_trace=1;
510 // Reads of cr.dcr on SP always have the sign bit set, so
511 // a domain can differentiate whether it is running on SP or not
512 // Thus, writes of DCR should ignore the sign bit
513 //verbose("vcpu_set_dcr: called\n");
514 PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
515 return (IA64_NO_FAULT);
516 }
518 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
519 {
520 if(VMX_DOMAIN(vcpu)){
521 PSCB(vcpu,iva) = val & ~0x7fffL;
522 }else{
523 PSCBX(vcpu,iva) = val & ~0x7fffL;
524 }
525 return (IA64_NO_FAULT);
526 }
528 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
529 {
530 if (val & IA64_PTA_LFMT) {
531 printf("*** No support for VHPT long format yet!!\n");
532 return (IA64_ILLOP_FAULT);
533 }
534 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
535 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
536 PSCB(vcpu,pta) = val;
537 return IA64_NO_FAULT;
538 }
540 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
541 {
542 PSCB(vcpu,ipsr) = val;
543 return IA64_NO_FAULT;
544 }
546 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
547 {
548 PSCB(vcpu,isr) = val;
549 return IA64_NO_FAULT;
550 }
552 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
553 {
554 PSCB(vcpu,iip) = val;
555 return IA64_NO_FAULT;
556 }
558 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
559 {
560 REGS *regs = vcpu_regs(vcpu);
561 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
562 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
563 else ipsr->ri++;
564 return (IA64_NO_FAULT);
565 }
567 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
568 {
569 PSCB(vcpu,ifa) = val;
570 return IA64_NO_FAULT;
571 }
573 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
574 {
575 PSCB(vcpu,itir) = val;
576 return IA64_NO_FAULT;
577 }
579 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
580 {
581 // SP entry code does not save iipa yet nor does it get
582 // properly delivered in the pscb
583 // printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
584 PSCB(vcpu,iipa) = val;
585 return IA64_NO_FAULT;
586 }
588 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
589 {
590 //REGS *regs = vcpu_regs(vcpu);
591 PSCB(vcpu,ifs) = val;
592 return IA64_NO_FAULT;
593 }
595 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
596 {
597 PSCB(vcpu,iim) = val;
598 return IA64_NO_FAULT;
599 }
601 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
602 {
603 PSCB(vcpu,iha) = val;
604 return IA64_NO_FAULT;
605 }
607 /**************************************************************************
608 VCPU interrupt control register access routines
609 **************************************************************************/
611 void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
612 {
613 PSCB(vcpu,pending_interruption) = 1;
614 }
616 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
617 {
618 if (vector & ~0xff) {
619 printf("vcpu_pend_interrupt: bad vector\n");
620 return;
621 }
622 if ( VMX_DOMAIN(vcpu) ) {
623 set_bit(vector,VCPU(vcpu,irr));
624 } else
625 {
626 /* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
627 if (test_bit(vector,PSCBX(vcpu,irr))) {
628 //printf("vcpu_pend_interrupt: overrun\n");
629 }
630 set_bit(vector,PSCBX(vcpu,irr));
631 PSCB(vcpu,pending_interruption) = 1;
632 }
634 #if 0
635 /* Keir: I think you should unblock when an interrupt is pending. */
636 {
637 int running = test_bit(_VCPUF_running, &vcpu->vcpu_flags);
638 vcpu_unblock(vcpu);
639 if ( running )
640 smp_send_event_check_cpu(vcpu->processor);
641 }
642 #endif
643 }
645 void early_tick(VCPU *vcpu)
646 {
647 UINT64 *p = &PSCBX(vcpu,irr[3]);
648 printf("vcpu_check_pending: about to deliver early tick\n");
649 printf("&irr[0]=%p, irr[0]=0x%lx\n",p,*p);
650 }
652 #define IA64_TPR_MMI 0x10000
653 #define IA64_TPR_MIC 0x000f0
655 /* checks to see if a VCPU has any unmasked pending interrupts
656 * if so, returns the highest, else returns SPURIOUS_VECTOR */
657 /* NOTE: Since this gets called from vcpu_get_ivr() and the
658 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
659 * this routine also ignores pscb.interrupt_delivery_enabled
660 * and this must be checked independently; see vcpu_deliverable interrupts() */
661 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
662 {
663 UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
665 /* Always check pending event, since guest may just ack the
666 * event injection without handle. Later guest may throw out
667 * the event itself.
668 */
669 if (event_pending(vcpu) &&
670 !test_bit(vcpu->vcpu_info->arch.evtchn_vector,
671 &PSCBX(vcpu, insvc[0])))
672 vcpu_pend_interrupt(vcpu, vcpu->vcpu_info->arch.evtchn_vector);
674 p = &PSCBX(vcpu,irr[3]);
675 /* q = &PSCB(vcpu,delivery_mask[3]); */
676 r = &PSCBX(vcpu,insvc[3]);
677 for (i = 3; ; p--, q--, r--, i--) {
678 bits = *p /* & *q */;
679 if (bits) break; // got a potential interrupt
680 if (*r) {
681 // nothing in this word which is pending+inservice
682 // but there is one inservice which masks lower
683 return SPURIOUS_VECTOR;
684 }
685 if (i == 0) {
686 // checked all bits... nothing pending+inservice
687 return SPURIOUS_VECTOR;
688 }
689 }
690 // have a pending,deliverable interrupt... see if it is masked
691 bitnum = ia64_fls(bits);
692 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
693 vector = bitnum+(i*64);
694 mask = 1L << bitnum;
695 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
696 if (*r >= mask) {
697 // masked by equal inservice
698 //printf("but masked by equal inservice\n");
699 return SPURIOUS_VECTOR;
700 }
701 if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
702 // tpr.mmi is set
703 //printf("but masked by tpr.mmi\n");
704 return SPURIOUS_VECTOR;
705 }
706 if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
707 //tpr.mic masks class
708 //printf("but masked by tpr.mic\n");
709 return SPURIOUS_VECTOR;
710 }
712 //printf("returned to caller\n");
713 #if 0
714 if (vector == (PSCB(vcpu,itv) & 0xff)) {
715 UINT64 now = ia64_get_itc();
716 UINT64 itm = PSCBX(vcpu,domain_itm);
717 if (now < itm) early_tick(vcpu);
719 }
720 #endif
721 return vector;
722 }
724 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
725 {
726 return (vcpu_get_psr_i(vcpu) &&
727 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
728 }
730 UINT64 vcpu_deliverable_timer(VCPU *vcpu)
731 {
732 return (vcpu_get_psr_i(vcpu) &&
733 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
734 }
736 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
737 {
738 extern unsigned long privop_trace;
739 //privop_trace=1;
740 //TODO: Implement this
741 printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
742 //*pval = 0;
743 *pval = ia64_getreg(_IA64_REG_CR_LID);
744 return IA64_NO_FAULT;
745 }
747 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
748 {
749 int i;
750 UINT64 vector, mask;
752 #define HEARTBEAT_FREQ 16 // period in seconds
753 #ifdef HEARTBEAT_FREQ
754 #define N_DOMS 16 // period in seconds
755 static long count[N_DOMS] = { 0 };
756 static long nonclockcount[N_DOMS] = { 0 };
757 REGS *regs = vcpu_regs(vcpu);
758 unsigned domid = vcpu->domain->domain_id;
759 #endif
760 #ifdef IRQ_DEBUG
761 static char firstivr = 1;
762 static char firsttime[256];
763 if (firstivr) {
764 int i;
765 for (i=0;i<256;i++) firsttime[i]=1;
766 firstivr=0;
767 }
768 #endif
770 vector = vcpu_check_pending_interrupts(vcpu);
771 if (vector == SPURIOUS_VECTOR) {
772 PSCB(vcpu,pending_interruption) = 0;
773 *pval = vector;
774 return IA64_NO_FAULT;
775 }
776 #ifdef HEARTBEAT_FREQ
777 if (domid >= N_DOMS) domid = N_DOMS-1;
778 if (vector == (PSCB(vcpu,itv) & 0xff)) {
779 if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
780 printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
781 domid, count[domid], nonclockcount[domid]);
782 //count[domid] = 0;
783 //dump_runq();
784 }
785 }
786 else nonclockcount[domid]++;
787 #endif
788 // now have an unmasked, pending, deliverable vector!
789 // getting ivr has "side effects"
790 #ifdef IRQ_DEBUG
791 if (firsttime[vector]) {
792 printf("*** First get_ivr on vector=%d,itc=%lx\n",
793 vector,ia64_get_itc());
794 firsttime[vector]=0;
795 }
796 #endif
797 i = vector >> 6;
798 mask = 1L << (vector & 0x3f);
799 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
800 PSCBX(vcpu,insvc[i]) |= mask;
801 PSCBX(vcpu,irr[i]) &= ~mask;
802 //PSCB(vcpu,pending_interruption)--;
803 *pval = vector;
804 // if delivering a timer interrupt, remember domain_itm
805 if (vector == (PSCB(vcpu,itv) & 0xff)) {
806 PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
807 }
808 return IA64_NO_FAULT;
809 }
811 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
812 {
813 *pval = PSCB(vcpu,tpr);
814 return (IA64_NO_FAULT);
815 }
817 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
818 {
819 *pval = 0L; // reads of eoi always return 0
820 return (IA64_NO_FAULT);
821 }
823 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
824 {
825 #ifndef IRR_USE_FIXED
826 printk("vcpu_get_irr: called, not implemented yet\n");
827 return IA64_ILLOP_FAULT;
828 #else
829 *pval = vcpu->irr[0];
830 return (IA64_NO_FAULT);
831 #endif
832 }
834 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
835 {
836 #ifndef IRR_USE_FIXED
837 printk("vcpu_get_irr: called, not implemented yet\n");
838 return IA64_ILLOP_FAULT;
839 #else
840 *pval = vcpu->irr[1];
841 return (IA64_NO_FAULT);
842 #endif
843 }
845 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
846 {
847 #ifndef IRR_USE_FIXED
848 printk("vcpu_get_irr: called, not implemented yet\n");
849 return IA64_ILLOP_FAULT;
850 #else
851 *pval = vcpu->irr[2];
852 return (IA64_NO_FAULT);
853 #endif
854 }
856 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
857 {
858 #ifndef IRR_USE_FIXED
859 printk("vcpu_get_irr: called, not implemented yet\n");
860 return IA64_ILLOP_FAULT;
861 #else
862 *pval = vcpu->irr[3];
863 return (IA64_NO_FAULT);
864 #endif
865 }
867 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
868 {
869 *pval = PSCB(vcpu,itv);
870 return (IA64_NO_FAULT);
871 }
873 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
874 {
875 *pval = PSCB(vcpu,pmv);
876 return (IA64_NO_FAULT);
877 }
879 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
880 {
881 *pval = PSCB(vcpu,cmcv);
882 return (IA64_NO_FAULT);
883 }
885 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
886 {
887 // fix this when setting values other than m-bit is supported
888 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
889 *pval = (1L << 16);
890 return (IA64_NO_FAULT);
891 }
893 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
894 {
895 // fix this when setting values other than m-bit is supported
896 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
897 *pval = (1L << 16);
898 return (IA64_NO_FAULT);
899 }
901 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
902 {
903 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
904 return (IA64_ILLOP_FAULT);
905 }
907 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
908 {
909 if (val & 0xff00) return IA64_RSVDREG_FAULT;
910 PSCB(vcpu,tpr) = val;
911 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
912 PSCB(vcpu,pending_interruption) = 1;
913 return (IA64_NO_FAULT);
914 }
916 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
917 {
918 UINT64 *p, bits, vec, bitnum;
919 int i;
921 p = &PSCBX(vcpu,insvc[3]);
922 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
923 if (i < 0) {
924 printf("Trying to EOI interrupt when none are in-service.\r\n");
925 return;
926 }
927 bitnum = ia64_fls(bits);
928 vec = bitnum + (i*64);
929 /* clear the correct bit */
930 bits &= ~(1L << bitnum);
931 *p = bits;
932 /* clearing an eoi bit may unmask another pending interrupt... */
933 if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
934 // worry about this later... Linux only calls eoi
935 // with interrupts disabled
936 printf("Trying to EOI interrupt with interrupts enabled\r\n");
937 }
938 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
939 PSCB(vcpu,pending_interruption) = 1;
940 //printf("YYYYY vcpu_set_eoi: Successful\n");
941 return (IA64_NO_FAULT);
942 }
944 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
945 {
946 if (!(val & (1L << 16))) {
947 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
948 return (IA64_ILLOP_FAULT);
949 }
950 // no place to save this state but nothing to do anyway
951 return (IA64_NO_FAULT);
952 }
954 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
955 {
956 if (!(val & (1L << 16))) {
957 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
958 return (IA64_ILLOP_FAULT);
959 }
960 // no place to save this state but nothing to do anyway
961 return (IA64_NO_FAULT);
962 }
964 // parameter is a time interval specified in cycles
965 void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
966 {
967 PSCBX(vcpu,xen_timer_interval) = cycles;
968 vcpu_set_next_timer(vcpu);
969 printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
970 PSCBX(vcpu,xen_timer_interval));
971 __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
972 }
974 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
975 {
976 extern unsigned long privop_trace;
977 //privop_trace=1;
978 if (val & 0xef00) return (IA64_ILLOP_FAULT);
979 PSCB(vcpu,itv) = val;
980 if (val & 0x10000) {
981 printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCBX(vcpu,domain_itm));
982 PSCBX(vcpu,domain_itm) = 0;
983 }
984 else vcpu_enable_timer(vcpu,1000000L);
985 return (IA64_NO_FAULT);
986 }
988 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
989 {
990 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
991 PSCB(vcpu,pmv) = val;
992 return (IA64_NO_FAULT);
993 }
995 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
996 {
997 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
998 PSCB(vcpu,cmcv) = val;
999 return (IA64_NO_FAULT);
1002 /**************************************************************************
1003 VCPU temporary register access routines
1004 **************************************************************************/
1005 UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
1007 if (index > 7) return 0;
1008 return PSCB(vcpu,tmp[index]);
1011 void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
1013 if (index <= 7) PSCB(vcpu,tmp[index]) = val;
1016 /**************************************************************************
1017 Interval timer routines
1018 **************************************************************************/
1020 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
1022 UINT64 itv = PSCB(vcpu,itv);
1023 return(!itv || !!(itv & 0x10000));
1026 BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
1028 UINT64 itv = PSCB(vcpu,itv);
1029 return (test_bit(itv, PSCBX(vcpu,insvc)));
1032 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
1034 unsigned long domain_itm = PSCBX(vcpu,domain_itm);
1035 unsigned long now = ia64_get_itc();
1037 if (!domain_itm) return FALSE;
1038 if (now < domain_itm) return FALSE;
1039 if (vcpu_timer_disabled(vcpu)) return FALSE;
1040 return TRUE;
1043 void vcpu_safe_set_itm(unsigned long val)
1045 unsigned long epsilon = 100;
1046 unsigned long flags;
1047 UINT64 now = ia64_get_itc();
1049 local_irq_save(flags);
1050 while (1) {
1051 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
1052 ia64_set_itm(val);
1053 if (val > (now = ia64_get_itc())) break;
1054 val = now + epsilon;
1055 epsilon <<= 1;
1057 local_irq_restore(flags);
1060 void vcpu_set_next_timer(VCPU *vcpu)
1062 UINT64 d = PSCBX(vcpu,domain_itm);
1063 //UINT64 s = PSCBX(vcpu,xen_itm);
1064 UINT64 s = local_cpu_data->itm_next;
1065 UINT64 now = ia64_get_itc();
1066 //UINT64 interval = PSCBX(vcpu,xen_timer_interval);
1068 /* gloss over the wraparound problem for now... we know it exists
1069 * but it doesn't matter right now */
1071 #if 0
1072 /* ensure at least next SP tick is in the future */
1073 if (!interval) PSCBX(vcpu,xen_itm) = now +
1074 #if 0
1075 (running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
1076 DEFAULT_CLOCK_RATE);
1077 #else
1078 3000000;
1079 //printf("vcpu_set_next_timer: HACK!\n");
1080 #endif
1081 #if 0
1082 if (PSCBX(vcpu,xen_itm) < now)
1083 while (PSCBX(vcpu,xen_itm) < now + (interval>>1))
1084 PSCBX(vcpu,xen_itm) += interval;
1085 #endif
1086 #endif
1088 if (is_idle_task(vcpu->domain)) {
1089 // printf("****** vcpu_set_next_timer called during idle!!\n");
1090 vcpu_safe_set_itm(s);
1091 return;
1093 //s = PSCBX(vcpu,xen_itm);
1094 if (d && (d > now) && (d < s)) {
1095 vcpu_safe_set_itm(d);
1096 //using_domain_as_itm++;
1098 else {
1099 vcpu_safe_set_itm(s);
1100 //using_xen_as_itm++;
1104 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
1106 UINT now = ia64_get_itc();
1108 //if (val < now) val = now + 1000;
1109 //printf("*** vcpu_set_itm: called with %lx\n",val);
1110 PSCBX(vcpu,domain_itm) = val;
1111 vcpu_set_next_timer(vcpu);
1112 return (IA64_NO_FAULT);
1115 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
1118 UINT64 oldnow = ia64_get_itc();
1119 UINT64 olditm = PSCBX(vcpu,domain_itm);
1120 unsigned long d = olditm - oldnow;
1121 unsigned long x = local_cpu_data->itm_next - oldnow;
1123 UINT64 newnow = val, min_delta;
1125 #define DISALLOW_SETTING_ITC_FOR_NOW
1126 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
1127 printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
1128 #else
1129 local_irq_disable();
1130 if (olditm) {
1131 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
1132 PSCBX(vcpu,domain_itm) = newnow + d;
1134 local_cpu_data->itm_next = newnow + x;
1135 d = PSCBX(vcpu,domain_itm);
1136 x = local_cpu_data->itm_next;
1138 ia64_set_itc(newnow);
1139 if (d && (d > newnow) && (d < x)) {
1140 vcpu_safe_set_itm(d);
1141 //using_domain_as_itm++;
1143 else {
1144 vcpu_safe_set_itm(x);
1145 //using_xen_as_itm++;
1147 local_irq_enable();
1148 #endif
1149 return (IA64_NO_FAULT);
1152 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
1154 //FIXME: Implement this
1155 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
1156 return (IA64_NO_FAULT);
1157 //return (IA64_ILLOP_FAULT);
1160 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
1162 //TODO: Implement this
1163 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
1164 return (IA64_ILLOP_FAULT);
1167 void vcpu_pend_timer(VCPU *vcpu)
1169 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1171 if (vcpu_timer_disabled(vcpu)) return;
1172 //if (vcpu_timer_inservice(vcpu)) return;
1173 if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
1174 // already delivered an interrupt for this so
1175 // don't deliver another
1176 return;
1178 #if 0
1179 // attempt to flag "timer tick before its due" source
1181 UINT64 itm = PSCBX(vcpu,domain_itm);
1182 UINT64 now = ia64_get_itc();
1183 if (now < itm) printf("******* vcpu_pend_timer: pending before due!\n");
1185 #endif
1186 vcpu_pend_interrupt(vcpu, itv);
1189 // returns true if ready to deliver a timer interrupt too early
1190 UINT64 vcpu_timer_pending_early(VCPU *vcpu)
1192 UINT64 now = ia64_get_itc();
1193 UINT64 itm = PSCBX(vcpu,domain_itm);
1195 if (vcpu_timer_disabled(vcpu)) return 0;
1196 if (!itm) return 0;
1197 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1200 //FIXME: This is a hack because everything dies if a timer tick is lost
1201 void vcpu_poke_timer(VCPU *vcpu)
1203 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1204 UINT64 now = ia64_get_itc();
1205 UINT64 itm = PSCBX(vcpu,domain_itm);
1206 UINT64 irr;
1208 if (vcpu_timer_disabled(vcpu)) return;
1209 if (!itm) return;
1210 if (itv != 0xefL) {
1211 printf("vcpu_poke_timer: unimplemented itv=%lx!\n",itv);
1212 while(1);
1214 // using 0xef instead of itv so can get real irr
1215 if (now > itm && !test_bit(0xefL, PSCBX(vcpu,insvc))) {
1216 if (!test_bit(0xefL,PSCBX(vcpu,irr))) {
1217 irr = ia64_getreg(_IA64_REG_CR_IRR3);
1218 if (irr & (1L<<(0xef-0xc0))) return;
1219 if (now-itm>0x800000)
1220 printf("*** poking timer: now=%lx,vitm=%lx,xitm=%lx,itm=%lx\n",now,itm,local_cpu_data->itm_next,ia64_get_itm());
1221 vcpu_pend_timer(vcpu);
1227 /**************************************************************************
1228 Privileged operation emulation routines
1229 **************************************************************************/
1231 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
1233 PSCB(vcpu,ifa) = ifa;
1234 return (vcpu_get_rr_ve(vcpu,ifa) ? IA64_DATA_TLB_VECTOR : IA64_ALT_DATA_TLB_VECTOR);
1238 IA64FAULT vcpu_rfi(VCPU *vcpu)
1240 // TODO: Only allowed for current vcpu
1241 PSR psr;
1242 UINT64 int_enable, regspsr = 0;
1243 UINT64 ifs;
1244 REGS *regs = vcpu_regs(vcpu);
1245 extern void dorfirfi(void);
1247 psr.i64 = PSCB(vcpu,ipsr);
1248 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
1249 if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
1250 int_enable = psr.ia64_psr.i;
1251 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
1252 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
1253 else vcpu_set_metaphysical_mode(vcpu,TRUE);
1254 psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
1255 psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
1256 psr.ia64_psr.bn = 1;
1257 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
1258 if (psr.ia64_psr.be) {
1259 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
1260 return (IA64_ILLOP_FAULT);
1262 PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
1263 ifs = PSCB(vcpu,ifs);
1264 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1265 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1266 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
1267 // TODO: validate PSCB(vcpu,iip)
1268 // TODO: PSCB(vcpu,ipsr) = psr;
1269 PSCB(vcpu,ipsr) = psr.i64;
1270 // now set up the trampoline
1271 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
1272 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
1273 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
1275 else {
1276 regs->cr_ipsr = psr.i64;
1277 regs->cr_iip = PSCB(vcpu,iip);
1279 PSCB(vcpu,interrupt_collection_enabled) = 1;
1280 vcpu_bsw1(vcpu);
1281 PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
1282 return (IA64_NO_FAULT);
1285 IA64FAULT vcpu_cover(VCPU *vcpu)
1287 // TODO: Only allowed for current vcpu
1288 REGS *regs = vcpu_regs(vcpu);
1290 if (!PSCB(vcpu,interrupt_collection_enabled)) {
1291 if (!PSCB(vcpu,incomplete_regframe))
1292 PSCB(vcpu,ifs) = regs->cr_ifs;
1293 else PSCB(vcpu,incomplete_regframe) = 0;
1295 regs->cr_ifs = 0;
1296 return (IA64_NO_FAULT);
1299 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
1301 UINT64 pta = PSCB(vcpu,pta);
1302 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1303 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
1304 UINT64 Mask = (1L << pta_sz) - 1;
1305 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1306 UINT64 compMask_60_15 = ~Mask_60_15;
1307 //UINT64 rr_ps = RR_TO_PS(get_rr(vadr));
1308 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
1309 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
1310 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
1311 UINT64 VHPT_addr2a =
1312 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1313 UINT64 VHPT_addr2b =
1314 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;;
1315 UINT64 VHPT_addr3 = VHPT_offset & 0x7fff;
1316 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1317 VHPT_addr3;
1319 #if 0
1320 if (VHPT_addr1 == 0xe000000000000000L) {
1321 printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
1322 PSCB(vcpu,iip));
1323 return (IA64_ILLOP_FAULT);
1325 #endif
1326 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1327 *pval = VHPT_addr;
1328 return (IA64_NO_FAULT);
1331 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1333 printf("vcpu_ttag: ttag instruction unsupported\n");
1334 return (IA64_ILLOP_FAULT);
1337 #define itir_ps(itir) ((itir >> 2) & 0x3f)
1338 #define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
1340 unsigned long vhpt_translate_count = 0;
1342 IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
1344 unsigned long pta, pta_mask, pte, ps;
1345 TR_ENTRY *trp;
1346 ia64_rr rr;
1348 if (!(address >> 61)) {
1349 if (!PSCB(vcpu,metaphysical_mode)) {
1350 REGS *regs = vcpu_regs(vcpu);
1351 unsigned long viip = PSCB(vcpu,iip);
1352 unsigned long vipsr = PSCB(vcpu,ipsr);
1353 unsigned long iip = regs->cr_iip;
1354 unsigned long ipsr = regs->cr_ipsr;
1355 printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p continuing\n", address, viip, vipsr, iip, ipsr);
1358 *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
1359 *itir = PAGE_SHIFT << 2;
1360 phys_translate_count++;
1361 return IA64_NO_FAULT;
1364 /* check translation registers */
1365 if ((trp = match_tr(vcpu,address))) {
1366 tr_translate_count++;
1367 *pteval = trp->page_flags;
1368 *itir = trp->itir;
1369 return IA64_NO_FAULT;
1372 /* check 1-entry TLB */
1373 if ((trp = match_dtlb(vcpu,address))) {
1374 dtlb_translate_count++;
1375 if (vcpu->domain==dom0 && !in_tpa) *pteval = trp->page_flags;
1376 else *pteval = vcpu->arch.dtlb_pte;
1377 // printf("DTLB MATCH... NEW, DOM%s, %s\n", vcpu->domain==dom0?
1378 // "0":"U", in_tpa?"vcpu_tpa":"ia64_do_page_fault");
1379 *itir = trp->itir;
1380 return IA64_NO_FAULT;
1383 /* check guest VHPT */
1384 pta = PSCB(vcpu,pta);
1385 rr.rrval = PSCB(vcpu,rrs)[address>>61];
1386 if (!rr.ve || !(pta & IA64_PTA_VE)) {
1387 // FIXME? does iha get set for alt faults? does xenlinux depend on it?
1388 vcpu_thash(vcpu, address, iha);
1389 // FIXME?: does itir get set for alt faults?
1390 *itir = vcpu_get_itir_on_fault(vcpu,address);
1391 return (is_data ? IA64_ALT_DATA_TLB_VECTOR :
1392 IA64_ALT_INST_TLB_VECTOR);
1394 if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
1395 // thash won't work right?
1396 panic_domain(vcpu_regs(vcpu),"can't do long format VHPT\n");
1397 //return (is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR);
1400 /* avoid recursively walking (short format) VHPT */
1401 pta_mask = (itir_mask(pta) << 3) >> 3;
1402 if (((address ^ pta) & pta_mask) == 0)
1403 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1405 vcpu_thash(vcpu, address, iha);
1406 if (__copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0) {
1407 // FIXME?: does itir get set for vhpt faults?
1408 *itir = vcpu_get_itir_on_fault(vcpu,*iha);
1409 return IA64_VHPT_FAULT;
1412 /*
1413 * Optimisation: this VHPT walker aborts on not-present pages
1414 * instead of inserting a not-present translation, this allows
1415 * vectoring directly to the miss handler.
1416 */
1417 if (pte & _PAGE_P) {
1418 *pteval = pte;
1419 *itir = vcpu_get_itir_on_fault(vcpu,address);
1420 vhpt_translate_count++;
1421 return IA64_NO_FAULT;
1423 *itir = vcpu_get_itir_on_fault(vcpu,address);
1424 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1427 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1429 UINT64 pteval, itir, mask, iha;
1430 IA64FAULT fault;
1432 in_tpa = 1;
1433 fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir, &iha);
1434 in_tpa = 0;
1435 if (fault == IA64_NO_FAULT)
1437 mask = itir_mask(itir);
1438 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
1439 return (IA64_NO_FAULT);
1441 return vcpu_force_data_miss(vcpu,vadr);
1444 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
1446 printf("vcpu_tak: tak instruction unsupported\n");
1447 return (IA64_ILLOP_FAULT);
1448 // HACK ALERT: tak does a thash for now
1449 //return vcpu_thash(vcpu,vadr,key);
1452 /**************************************************************************
1453 VCPU debug breakpoint register access routines
1454 **************************************************************************/
1456 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
1458 // TODO: unimplemented DBRs return a reserved register fault
1459 // TODO: Should set Logical CPU state, not just physical
1460 ia64_set_dbr(reg,val);
1461 return (IA64_NO_FAULT);
1464 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
1466 // TODO: unimplemented IBRs return a reserved register fault
1467 // TODO: Should set Logical CPU state, not just physical
1468 ia64_set_ibr(reg,val);
1469 return (IA64_NO_FAULT);
1472 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1474 // TODO: unimplemented DBRs return a reserved register fault
1475 UINT64 val = ia64_get_dbr(reg);
1476 *pval = val;
1477 return (IA64_NO_FAULT);
1480 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1482 // TODO: unimplemented IBRs return a reserved register fault
1483 UINT64 val = ia64_get_ibr(reg);
1484 *pval = val;
1485 return (IA64_NO_FAULT);
1488 /**************************************************************************
1489 VCPU performance monitor register access routines
1490 **************************************************************************/
1492 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
1494 // TODO: Should set Logical CPU state, not just physical
1495 // NOTE: Writes to unimplemented PMC registers are discarded
1496 #ifdef DEBUG_PFMON
1497 printf("vcpu_set_pmc(%x,%lx)\n",reg,val);
1498 #endif
1499 ia64_set_pmc(reg,val);
1500 return (IA64_NO_FAULT);
1503 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
1505 // TODO: Should set Logical CPU state, not just physical
1506 // NOTE: Writes to unimplemented PMD registers are discarded
1507 #ifdef DEBUG_PFMON
1508 printf("vcpu_set_pmd(%x,%lx)\n",reg,val);
1509 #endif
1510 ia64_set_pmd(reg,val);
1511 return (IA64_NO_FAULT);
1514 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1516 // NOTE: Reads from unimplemented PMC registers return zero
1517 UINT64 val = (UINT64)ia64_get_pmc(reg);
1518 #ifdef DEBUG_PFMON
1519 printf("%lx=vcpu_get_pmc(%x)\n",val,reg);
1520 #endif
1521 *pval = val;
1522 return (IA64_NO_FAULT);
1525 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1527 // NOTE: Reads from unimplemented PMD registers return zero
1528 UINT64 val = (UINT64)ia64_get_pmd(reg);
1529 #ifdef DEBUG_PFMON
1530 printf("%lx=vcpu_get_pmd(%x)\n",val,reg);
1531 #endif
1532 *pval = val;
1533 return (IA64_NO_FAULT);
1536 /**************************************************************************
1537 VCPU banked general register access routines
1538 **************************************************************************/
1539 #define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1540 do{ \
1541 __asm__ __volatile__ ( \
1542 ";;extr.u %0 = %3,%6,16;;\n" \
1543 "dep %1 = %0, %1, 0, 16;;\n" \
1544 "st8 [%4] = %1\n" \
1545 "extr.u %0 = %2, 16, 16;;\n" \
1546 "dep %3 = %0, %3, %6, 16;;\n" \
1547 "st8 [%5] = %3\n" \
1548 ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
1549 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1550 }while(0)
1552 IA64FAULT vcpu_bsw0(VCPU *vcpu)
1554 // TODO: Only allowed for current vcpu
1555 REGS *regs = vcpu_regs(vcpu);
1556 unsigned long *r = &regs->r16;
1557 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1558 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1559 unsigned long *runat = &regs->eml_unat;
1560 unsigned long *b0unat = &PSCB(vcpu,vbnat);
1561 unsigned long *b1unat = &PSCB(vcpu,vnat);
1563 unsigned long i;
1565 if(VMX_DOMAIN(vcpu)){
1566 if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
1567 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1568 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1569 VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
1571 }else{
1572 if (PSCB(vcpu,banknum)) {
1573 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1574 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1575 PSCB(vcpu,banknum) = 0;
1578 return (IA64_NO_FAULT);
1581 #define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1582 do{ \
1583 __asm__ __volatile__ ( \
1584 ";;extr.u %0 = %3,%6,16;;\n" \
1585 "dep %1 = %0, %1, 16, 16;;\n" \
1586 "st8 [%4] = %1\n" \
1587 "extr.u %0 = %2, 0, 16;;\n" \
1588 "dep %3 = %0, %3, %6, 16;;\n" \
1589 "st8 [%5] = %3\n" \
1590 ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
1591 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1592 }while(0)
1594 IA64FAULT vcpu_bsw1(VCPU *vcpu)
1596 // TODO: Only allowed for current vcpu
1597 REGS *regs = vcpu_regs(vcpu);
1598 unsigned long *r = &regs->r16;
1599 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1600 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1601 unsigned long *runat = &regs->eml_unat;
1602 unsigned long *b0unat = &PSCB(vcpu,vbnat);
1603 unsigned long *b1unat = &PSCB(vcpu,vnat);
1605 unsigned long i;
1607 if(VMX_DOMAIN(vcpu)){
1608 if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
1609 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1610 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1611 VCPU(vcpu,vpsr) |= IA64_PSR_BN;
1613 }else{
1614 if (!PSCB(vcpu,banknum)) {
1615 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1616 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1617 PSCB(vcpu,banknum) = 1;
1620 return (IA64_NO_FAULT);
1623 /**************************************************************************
1624 VCPU cpuid access routines
1625 **************************************************************************/
1628 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1630 // FIXME: This could get called as a result of a rsvd-reg fault
1631 // if reg > 3
1632 switch(reg) {
1633 case 0:
1634 memcpy(pval,"Xen/ia64",8);
1635 break;
1636 case 1:
1637 *pval = 0;
1638 break;
1639 case 2:
1640 *pval = 0;
1641 break;
1642 case 3:
1643 *pval = ia64_get_cpuid(3);
1644 break;
1645 case 4:
1646 *pval = ia64_get_cpuid(4);
1647 break;
1648 default:
1649 if (reg > (ia64_get_cpuid(3) & 0xff))
1650 return IA64_RSVDREG_FAULT;
1651 *pval = ia64_get_cpuid(reg);
1652 break;
1654 return (IA64_NO_FAULT);
1657 /**************************************************************************
1658 VCPU region register access routines
1659 **************************************************************************/
1661 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
1663 ia64_rr rr;
1665 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1666 return(rr.ve);
1669 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
1671 PSCB(vcpu,rrs)[reg>>61] = val;
1672 // warning: set_one_rr() does it "live"
1673 set_one_rr(reg,val);
1674 return (IA64_NO_FAULT);
1677 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1679 UINT val = PSCB(vcpu,rrs)[reg>>61];
1680 *pval = val;
1681 return (IA64_NO_FAULT);
1684 /**************************************************************************
1685 VCPU protection key register access routines
1686 **************************************************************************/
1688 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1690 #ifndef PKR_USE_FIXED
1691 printk("vcpu_get_pkr: called, not implemented yet\n");
1692 return IA64_ILLOP_FAULT;
1693 #else
1694 UINT64 val = (UINT64)ia64_get_pkr(reg);
1695 *pval = val;
1696 return (IA64_NO_FAULT);
1697 #endif
1700 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
1702 #ifndef PKR_USE_FIXED
1703 printk("vcpu_set_pkr: called, not implemented yet\n");
1704 return IA64_ILLOP_FAULT;
1705 #else
1706 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
1707 vcpu->pkrs[reg] = val;
1708 ia64_set_pkr(reg,val);
1709 return (IA64_NO_FAULT);
1710 #endif
1713 /**************************************************************************
1714 VCPU translation register access routines
1715 **************************************************************************/
1717 static void vcpu_purge_tr_entry(TR_ENTRY *trp)
1719 trp->p = 0;
1722 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
1724 UINT64 ps;
1726 trp->itir = itir;
1727 trp->rid = virtualize_rid(current, get_rr(ifa) & RR_RID_MASK);
1728 trp->p = 1;
1729 ps = trp->ps;
1730 trp->page_flags = pte;
1731 if (trp->pl < 2) trp->pl = 2;
1732 trp->vadr = ifa & ~0xfff;
1733 if (ps > 12) { // "ignore" relevant low-order bits
1734 trp->ppn &= ~((1UL<<(ps-12))-1);
1735 trp->vadr &= ~((1UL<<ps)-1);
1739 TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu, TR_ENTRY *trp, UINT64 ifa, int count)
1741 unsigned long rid = (get_rr(ifa) & RR_RID_MASK);
1742 int i;
1744 for (i = 0; i < count; i++, trp++) {
1745 if (!trp->p) continue;
1746 if (physicalize_rid(vcpu,trp->rid) != rid) continue;
1747 if (ifa < trp->vadr) continue;
1748 if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
1749 //if (trp->key && !match_pkr(vcpu,trp->key)) continue;
1750 return trp;
1752 return 0;
1755 TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa)
1757 TR_ENTRY *trp;
1759 trp = vcpu_match_tr_entry(vcpu,vcpu->arch.dtrs,ifa,NDTRS);
1760 if (trp) return trp;
1761 trp = vcpu_match_tr_entry(vcpu,vcpu->arch.itrs,ifa,NITRS);
1762 if (trp) return trp;
1763 return 0;
1766 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
1767 UINT64 itir, UINT64 ifa)
1769 TR_ENTRY *trp;
1771 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
1772 trp = &PSCBX(vcpu,dtrs[slot]);
1773 //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
1774 vcpu_set_tr_entry(trp,pte,itir,ifa);
1775 return IA64_NO_FAULT;
1778 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
1779 UINT64 itir, UINT64 ifa)
1781 TR_ENTRY *trp;
1783 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
1784 trp = &PSCBX(vcpu,itrs[slot]);
1785 //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
1786 vcpu_set_tr_entry(trp,pte,itir,ifa);
1787 return IA64_NO_FAULT;
1790 /**************************************************************************
1791 VCPU translation cache access routines
1792 **************************************************************************/
1794 void foobar(void) { /*vcpu_verbose = 1;*/ }
1796 extern struct domain *dom0;
1798 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps)
1800 unsigned long psr;
1801 unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
1803 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1804 // FIXME, must be inlined or potential for nested fault here!
1805 if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT)) {
1806 printf("vcpu_itc_no_srlz: domain0 use of smaller page size!\n");
1807 //FIXME: kill domain here
1808 while(1);
1810 psr = ia64_clear_ic();
1811 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
1812 ia64_set_psr(psr);
1813 // ia64_srlz_i(); // no srls req'd, will rfi later
1814 #ifdef VHPT_GLOBAL
1815 if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
1816 // FIXME: this is dangerous... vhpt_flush_address ensures these
1817 // addresses never get flushed. More work needed if this
1818 // ever happens.
1819 //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
1820 if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
1821 else vhpt_insert(vaddr,pte,logps<<2);
1823 // even if domain pagesize is larger than PAGE_SIZE, just put
1824 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
1825 else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
1826 #endif
1827 if ((mp_pte == -1UL) || (IorD & 0x4)) return; // don't place in 1-entry TLB
1828 if (IorD & 0x1) {
1829 vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
1830 PSCBX(vcpu,itlb_pte) = mp_pte;
1832 if (IorD & 0x2) {
1833 vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
1834 PSCBX(vcpu,dtlb_pte) = mp_pte;
1838 // NOTE: returns a physical pte, NOT a "metaphysical" pte, so do not check
1839 // the physical address contained for correctness
1840 TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
1842 TR_ENTRY *trp;
1844 if (trp = vcpu_match_tr_entry(vcpu,&vcpu->arch.dtlb,ifa,1))
1845 return (&vcpu->arch.dtlb);
1846 return 0UL;
1849 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1851 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1852 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1854 if (logps < PAGE_SHIFT) {
1855 printf("vcpu_itc_d: domain trying to use smaller page size!\n");
1856 //FIXME: kill domain here
1857 while(1);
1859 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1860 pteval = translate_domain_pte(pte,ifa,itir);
1861 if (!pteval) return IA64_ILLOP_FAULT;
1862 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
1863 return IA64_NO_FAULT;
1866 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1868 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1869 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1871 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1872 if (logps < PAGE_SHIFT) {
1873 printf("vcpu_itc_i: domain trying to use smaller page size!\n");
1874 //FIXME: kill domain here
1875 while(1);
1877 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1878 pteval = translate_domain_pte(pte,ifa,itir);
1879 // FIXME: what to do if bad physical address? (machine check?)
1880 if (!pteval) return IA64_ILLOP_FAULT;
1881 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
1882 return IA64_NO_FAULT;
1885 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1887 printk("vcpu_ptc_l: called, not implemented yet\n");
1888 return IA64_ILLOP_FAULT;
1891 // At privlvl=0, fc performs no access rights or protection key checks, while
1892 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
1893 // read but no protection key check. Thus in order to avoid an unexpected
1894 // access rights fault, we have to translate the virtual address to a
1895 // physical address (possibly via a metaphysical address) and do the fc
1896 // on the physical address, which is guaranteed to flush the same cache line
1897 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
1899 // TODO: Only allowed for current vcpu
1900 UINT64 mpaddr, paddr;
1901 IA64FAULT fault;
1902 unsigned long translate_domain_mpaddr(unsigned long);
1903 IA64FAULT vcpu_tpa(VCPU *, UINT64, UINT64 *);
1905 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
1906 if (fault == IA64_NO_FAULT) {
1907 paddr = translate_domain_mpaddr(mpaddr);
1908 ia64_fc(__va(paddr));
1910 return fault;
1913 int ptce_count = 0;
1914 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
1916 // Note that this only needs to be called once, i.e. the
1917 // architected loop to purge the entire TLB, should use
1918 // base = stride1 = stride2 = 0, count0 = count 1 = 1
1920 #ifdef VHPT_GLOBAL
1921 vhpt_flush(); // FIXME: This is overdoing it
1922 #endif
1923 local_flush_tlb_all();
1924 // just invalidate the "whole" tlb
1925 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1926 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1927 return IA64_NO_FAULT;
1930 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1932 printk("vcpu_ptc_g: called, not implemented yet\n");
1933 return IA64_ILLOP_FAULT;
1936 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1938 extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
1939 // FIXME: validate not flushing Xen addresses
1940 // if (Xen address) return(IA64_ILLOP_FAULT);
1941 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
1942 //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
1943 #ifdef VHPT_GLOBAL
1944 vhpt_flush_address(vadr,addr_range);
1945 #endif
1946 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
1947 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1948 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1949 return IA64_NO_FAULT;
1952 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1954 printf("vcpu_ptr_d: Purging TLB is unsupported\n");
1955 return (IA64_ILLOP_FAULT);
1958 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1960 printf("vcpu_ptr_i: Purging TLB is unsupported\n");
1961 return (IA64_ILLOP_FAULT);