ia64/xen-unstable

view xen/arch/ia64/xen/vcpu.c @ 6862:2556621a7759

Comment out DTLB MATCH message as it is more frequent and annoying now
author djm@kirby.fc.hp.com
date Fri Sep 16 16:51:55 2005 -0600 (2005-09-16)
parents 6bf96d977ecc
children 8d133d172bfd
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004-2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #if 1
10 // TEMPORARY PATCH for match_dtlb uses this, can be removed later
11 // FIXME SMP
12 int in_tpa = 0;
13 #endif
15 #include <linux/sched.h>
16 #include <public/arch-ia64.h>
17 #include <asm/ia64_int.h>
18 #include <asm/vcpu.h>
19 #include <asm/regionreg.h>
20 #include <asm/tlb.h>
21 #include <asm/processor.h>
22 #include <asm/delay.h>
23 #include <asm/vmx_vcpu.h>
25 typedef union {
26 struct ia64_psr ia64_psr;
27 unsigned long i64;
28 } PSR;
30 //typedef struct pt_regs REGS;
31 //typedef struct domain VCPU;
33 // this def for vcpu_regs won't work if kernel stack is present
34 #define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
35 #define PSCB(x,y) VCPU(x,y)
36 #define PSCBX(x,y) x->arch.y
38 #define TRUE 1
39 #define FALSE 0
40 #define IA64_PTA_SZ_BIT 2
41 #define IA64_PTA_VF_BIT 8
42 #define IA64_PTA_BASE_BIT 15
43 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
44 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
46 #define STATIC
48 #ifdef PRIVOP_ADDR_COUNT
49 struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
50 { "=ifa", { 0 }, { 0 }, 0 },
51 { "thash", { 0 }, { 0 }, 0 },
52 0
53 };
54 extern void privop_count_addr(unsigned long addr, int inst);
55 #define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
56 #else
57 #define PRIVOP_COUNT_ADDR(x,y) do {} while (0)
58 #endif
60 unsigned long dtlb_translate_count = 0;
61 unsigned long tr_translate_count = 0;
62 unsigned long phys_translate_count = 0;
64 unsigned long vcpu_verbose = 0;
65 #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
67 extern TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa);
68 extern TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa);
70 /**************************************************************************
71 VCPU general register access routines
72 **************************************************************************/
74 UINT64
75 vcpu_get_gr(VCPU *vcpu, unsigned reg)
76 {
77 REGS *regs = vcpu_regs(vcpu);
78 UINT64 val;
80 if (!reg) return 0;
81 getreg(reg,&val,0,regs); // FIXME: handle NATs later
82 return val;
83 }
85 // returns:
86 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
87 // IA64_NO_FAULT otherwise
88 IA64FAULT
89 vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
90 {
91 REGS *regs = vcpu_regs(vcpu);
92 long sof = (regs->cr_ifs) & 0x7f;
94 if (!reg) return IA64_ILLOP_FAULT;
95 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
96 setreg(reg,value,0,regs); // FIXME: handle NATs later
97 return IA64_NO_FAULT;
98 }
100 /**************************************************************************
101 VCPU privileged application register access routines
102 **************************************************************************/
104 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
105 {
106 if (reg == 44) return (vcpu_set_itc(vcpu,val));
107 else if (reg == 27) return (IA64_ILLOP_FAULT);
108 else if (reg == 24)
109 printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
110 else if (reg > 7) return (IA64_ILLOP_FAULT);
111 else PSCB(vcpu,krs[reg]) = val;
112 return IA64_NO_FAULT;
113 }
115 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
116 {
117 if (reg == 24)
118 printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
119 else if (reg > 7) return (IA64_ILLOP_FAULT);
120 else *val = PSCB(vcpu,krs[reg]);
121 return IA64_NO_FAULT;
122 }
124 /**************************************************************************
125 VCPU processor status register access routines
126 **************************************************************************/
128 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
129 {
130 /* only do something if mode changes */
131 if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
132 if (newmode) set_metaphysical_rr0();
133 else if (PSCB(vcpu,rrs[0]) != -1)
134 set_one_rr(0, PSCB(vcpu,rrs[0]));
135 PSCB(vcpu,metaphysical_mode) = newmode;
136 }
137 }
139 IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
140 {
141 vcpu_set_metaphysical_mode(vcpu,TRUE);
142 return IA64_NO_FAULT;
143 }
145 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
146 {
147 struct ia64_psr psr, imm, *ipsr;
148 REGS *regs = vcpu_regs(vcpu);
150 //PRIVOP_COUNT_ADDR(regs,_RSM);
151 // TODO: All of these bits need to be virtualized
152 // TODO: Only allowed for current vcpu
153 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
154 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
155 imm = *(struct ia64_psr *)&imm24;
156 // interrupt flag
157 if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
158 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
159 // interrupt collection flag
160 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
161 // just handle psr.up and psr.pp for now
162 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
163 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
164 | IA64_PSR_DFL | IA64_PSR_DFH))
165 return (IA64_ILLOP_FAULT);
166 if (imm.dfh) ipsr->dfh = 0;
167 if (imm.dfl) ipsr->dfl = 0;
168 if (imm.pp) { ipsr->pp = 0; psr.pp = 0; }
169 if (imm.up) { ipsr->up = 0; psr.up = 0; }
170 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
171 if (imm.be) ipsr->be = 0;
172 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
173 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
174 return IA64_NO_FAULT;
175 }
177 extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
178 #define SPURIOUS_VECTOR 0xf
180 IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
181 {
182 vcpu_set_metaphysical_mode(vcpu,FALSE);
183 return IA64_NO_FAULT;
184 }
186 IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
187 {
188 PSCB(vcpu,interrupt_delivery_enabled) = 1;
189 PSCB(vcpu,interrupt_collection_enabled) = 1;
190 return IA64_NO_FAULT;
191 }
193 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
194 {
195 struct ia64_psr psr, imm, *ipsr;
196 REGS *regs = vcpu_regs(vcpu);
197 UINT64 mask, enabling_interrupts = 0;
199 //PRIVOP_COUNT_ADDR(regs,_SSM);
200 // TODO: All of these bits need to be virtualized
201 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
202 imm = *(struct ia64_psr *)&imm24;
203 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
204 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
205 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
206 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
207 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
208 if (imm.dfh) ipsr->dfh = 1;
209 if (imm.dfl) ipsr->dfl = 1;
210 if (imm.pp) { ipsr->pp = 1; psr.pp = 1; }
211 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
212 if (imm.i) {
213 if (!PSCB(vcpu,interrupt_delivery_enabled)) {
214 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
215 enabling_interrupts = 1;
216 }
217 PSCB(vcpu,interrupt_delivery_enabled) = 1;
218 }
219 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
220 // TODO: do this faster
221 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
222 if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
223 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
224 if (imm.up) { ipsr->up = 1; psr.up = 1; }
225 if (imm.be) {
226 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
227 return (IA64_ILLOP_FAULT);
228 }
229 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
230 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
231 #if 0 // now done with deliver_pending_interrupts
232 if (enabling_interrupts) {
233 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) {
234 //printf("with interrupts pending\n");
235 return IA64_EXTINT_VECTOR;
236 }
237 //else printf("but nothing pending\n");
238 }
239 #endif
240 if (enabling_interrupts &&
241 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
242 PSCB(vcpu,pending_interruption) = 1;
243 return IA64_NO_FAULT;
244 }
246 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
247 {
248 struct ia64_psr psr, newpsr, *ipsr;
249 REGS *regs = vcpu_regs(vcpu);
250 UINT64 enabling_interrupts = 0;
252 // TODO: All of these bits need to be virtualized
253 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
254 newpsr = *(struct ia64_psr *)&val;
255 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
256 // just handle psr.up and psr.pp for now
257 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
258 // however trying to set other bits can't be an error as it is in ssm
259 if (newpsr.dfh) ipsr->dfh = 1;
260 if (newpsr.dfl) ipsr->dfl = 1;
261 if (newpsr.pp) { ipsr->pp = 1; psr.pp = 1; }
262 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
263 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
264 if (newpsr.i) {
265 if (!PSCB(vcpu,interrupt_delivery_enabled))
266 enabling_interrupts = 1;
267 PSCB(vcpu,interrupt_delivery_enabled) = 1;
268 }
269 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
270 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
271 if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
272 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
273 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
274 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
275 else vcpu_set_metaphysical_mode(vcpu,TRUE);
276 if (newpsr.be) {
277 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
278 return (IA64_ILLOP_FAULT);
279 }
280 //__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
281 #if 0 // now done with deliver_pending_interrupts
282 if (enabling_interrupts) {
283 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
284 return IA64_EXTINT_VECTOR;
285 }
286 #endif
287 if (enabling_interrupts &&
288 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
289 PSCB(vcpu,pending_interruption) = 1;
290 return IA64_NO_FAULT;
291 }
293 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
294 {
295 UINT64 psr;
296 struct ia64_psr newpsr;
298 // TODO: This needs to return a "filtered" view of
299 // the psr, not the actual psr. Probably the psr needs
300 // to be a field in regs (in addition to ipsr).
301 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
302 newpsr = *(struct ia64_psr *)&psr;
303 if (newpsr.cpl == 2) newpsr.cpl = 0;
304 if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
305 else newpsr.i = 0;
306 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
307 else newpsr.ic = 0;
308 *pval = *(unsigned long *)&newpsr;
309 return IA64_NO_FAULT;
310 }
312 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
313 {
314 return !!PSCB(vcpu,interrupt_collection_enabled);
315 }
317 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
318 {
319 return !!PSCB(vcpu,interrupt_delivery_enabled);
320 }
322 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
323 {
324 UINT64 dcr = PSCBX(vcpu,dcr);
325 PSR psr = {0};
327 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
328 psr.i64 = prevpsr;
329 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
330 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
331 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
332 psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
333 psr.ia64_psr.bn = PSCB(vcpu,banknum);
334 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
335 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
336 // psr.pk = 1;
337 //printf("returns 0x%016lx...",psr.i64);
338 return psr.i64;
339 }
341 /**************************************************************************
342 VCPU control register access routines
343 **************************************************************************/
345 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
346 {
347 extern unsigned long privop_trace;
348 //privop_trace=0;
349 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
350 // Reads of cr.dcr on Xen always have the sign bit set, so
351 // a domain can differentiate whether it is running on SP or not
352 *pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
353 return (IA64_NO_FAULT);
354 }
356 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
357 {
358 if(VMX_DOMAIN(vcpu)){
359 *pval = PSCB(vcpu,iva) & ~0x7fffL;
360 }else{
361 *pval = PSCBX(vcpu,iva) & ~0x7fffL;
362 }
363 return (IA64_NO_FAULT);
364 }
366 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
367 {
368 *pval = PSCB(vcpu,pta);
369 return (IA64_NO_FAULT);
370 }
372 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
373 {
374 //REGS *regs = vcpu_regs(vcpu);
375 //*pval = regs->cr_ipsr;
376 *pval = PSCB(vcpu,ipsr);
377 return (IA64_NO_FAULT);
378 }
380 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
381 {
382 *pval = PSCB(vcpu,isr);
383 return (IA64_NO_FAULT);
384 }
386 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
387 {
388 //REGS *regs = vcpu_regs(vcpu);
389 //*pval = regs->cr_iip;
390 *pval = PSCB(vcpu,iip);
391 return (IA64_NO_FAULT);
392 }
394 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
395 {
396 UINT64 val = PSCB(vcpu,ifa);
397 REGS *regs = vcpu_regs(vcpu);
398 PRIVOP_COUNT_ADDR(regs,_GET_IFA);
399 *pval = val;
400 return (IA64_NO_FAULT);
401 }
403 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
404 {
405 ia64_rr rr;
407 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
408 return(rr.ps);
409 }
411 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
412 {
413 ia64_rr rr;
415 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
416 return(rr.rid);
417 }
419 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
420 {
421 ia64_rr rr;
423 rr.rrval = 0;
424 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
425 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
426 return (rr.rrval);
427 }
430 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
431 {
432 UINT64 val = PSCB(vcpu,itir);
433 *pval = val;
434 return (IA64_NO_FAULT);
435 }
437 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
438 {
439 UINT64 val = PSCB(vcpu,iipa);
440 // SP entry code does not save iipa yet nor does it get
441 // properly delivered in the pscb
442 // printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
443 *pval = val;
444 return (IA64_NO_FAULT);
445 }
447 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
448 {
449 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
450 //*pval = PSCB(vcpu,regs).cr_ifs;
451 *pval = PSCB(vcpu,ifs);
452 PSCB(vcpu,incomplete_regframe) = 0;
453 return (IA64_NO_FAULT);
454 }
456 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
457 {
458 UINT64 val = PSCB(vcpu,iim);
459 *pval = val;
460 return (IA64_NO_FAULT);
461 }
463 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
464 {
465 //return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
466 UINT64 val = PSCB(vcpu,iha);
467 REGS *regs = vcpu_regs(vcpu);
468 PRIVOP_COUNT_ADDR(regs,_THASH);
469 *pval = val;
470 return (IA64_NO_FAULT);
471 }
473 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
474 {
475 extern unsigned long privop_trace;
476 //privop_trace=1;
477 // Reads of cr.dcr on SP always have the sign bit set, so
478 // a domain can differentiate whether it is running on SP or not
479 // Thus, writes of DCR should ignore the sign bit
480 //verbose("vcpu_set_dcr: called\n");
481 PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
482 return (IA64_NO_FAULT);
483 }
485 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
486 {
487 if(VMX_DOMAIN(vcpu)){
488 PSCB(vcpu,iva) = val & ~0x7fffL;
489 }else{
490 PSCBX(vcpu,iva) = val & ~0x7fffL;
491 }
492 return (IA64_NO_FAULT);
493 }
495 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
496 {
497 if (val & IA64_PTA_LFMT) {
498 printf("*** No support for VHPT long format yet!!\n");
499 return (IA64_ILLOP_FAULT);
500 }
501 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
502 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
503 PSCB(vcpu,pta) = val;
504 return IA64_NO_FAULT;
505 }
507 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
508 {
509 PSCB(vcpu,ipsr) = val;
510 return IA64_NO_FAULT;
511 }
513 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
514 {
515 PSCB(vcpu,isr) = val;
516 return IA64_NO_FAULT;
517 }
519 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
520 {
521 PSCB(vcpu,iip) = val;
522 return IA64_NO_FAULT;
523 }
525 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
526 {
527 REGS *regs = vcpu_regs(vcpu);
528 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
529 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
530 else ipsr->ri++;
531 return (IA64_NO_FAULT);
532 }
534 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
535 {
536 PSCB(vcpu,ifa) = val;
537 return IA64_NO_FAULT;
538 }
540 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
541 {
542 PSCB(vcpu,itir) = val;
543 return IA64_NO_FAULT;
544 }
546 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
547 {
548 // SP entry code does not save iipa yet nor does it get
549 // properly delivered in the pscb
550 // printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
551 PSCB(vcpu,iipa) = val;
552 return IA64_NO_FAULT;
553 }
555 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
556 {
557 //REGS *regs = vcpu_regs(vcpu);
558 PSCB(vcpu,ifs) = val;
559 return IA64_NO_FAULT;
560 }
562 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
563 {
564 PSCB(vcpu,iim) = val;
565 return IA64_NO_FAULT;
566 }
568 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
569 {
570 PSCB(vcpu,iha) = val;
571 return IA64_NO_FAULT;
572 }
574 /**************************************************************************
575 VCPU interrupt control register access routines
576 **************************************************************************/
578 void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
579 {
580 PSCB(vcpu,pending_interruption) = 1;
581 }
583 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
584 {
585 if (vector & ~0xff) {
586 printf("vcpu_pend_interrupt: bad vector\n");
587 return;
588 }
589 //#ifdef CONFIG_VTI
590 if ( VMX_DOMAIN(vcpu) ) {
591 set_bit(vector,VCPU(vcpu,irr));
592 } else
593 //#endif // CONFIG_VTI
594 {
595 /* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
596 if (test_bit(vector,PSCBX(vcpu,irr))) {
597 //printf("vcpu_pend_interrupt: overrun\n");
598 }
599 set_bit(vector,PSCBX(vcpu,irr));
600 PSCB(vcpu,pending_interruption) = 1;
601 }
603 #if 0
604 /* Keir: I think you should unblock when an interrupt is pending. */
605 {
606 int running = test_bit(_VCPUF_running, &vcpu->vcpu_flags);
607 vcpu_unblock(vcpu);
608 if ( running )
609 smp_send_event_check_cpu(vcpu->processor);
610 }
611 #endif
612 }
614 void early_tick(VCPU *vcpu)
615 {
616 UINT64 *p = &PSCBX(vcpu,irr[3]);
617 printf("vcpu_check_pending: about to deliver early tick\n");
618 printf("&irr[0]=%p, irr[0]=0x%lx\n",p,*p);
619 }
621 #define IA64_TPR_MMI 0x10000
622 #define IA64_TPR_MIC 0x000f0
624 /* checks to see if a VCPU has any unmasked pending interrupts
625 * if so, returns the highest, else returns SPURIOUS_VECTOR */
626 /* NOTE: Since this gets called from vcpu_get_ivr() and the
627 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
628 * this routine also ignores pscb.interrupt_delivery_enabled
629 * and this must be checked independently; see vcpu_deliverable interrupts() */
630 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
631 {
632 UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
634 p = &PSCBX(vcpu,irr[3]);
635 /* q = &PSCB(vcpu,delivery_mask[3]); */
636 r = &PSCBX(vcpu,insvc[3]);
637 for (i = 3; ; p--, q--, r--, i--) {
638 bits = *p /* & *q */;
639 if (bits) break; // got a potential interrupt
640 if (*r) {
641 // nothing in this word which is pending+inservice
642 // but there is one inservice which masks lower
643 return SPURIOUS_VECTOR;
644 }
645 if (i == 0) {
646 // checked all bits... nothing pending+inservice
647 return SPURIOUS_VECTOR;
648 }
649 }
650 // have a pending,deliverable interrupt... see if it is masked
651 bitnum = ia64_fls(bits);
652 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
653 vector = bitnum+(i*64);
654 mask = 1L << bitnum;
655 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
656 if (*r >= mask) {
657 // masked by equal inservice
658 //printf("but masked by equal inservice\n");
659 return SPURIOUS_VECTOR;
660 }
661 if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
662 // tpr.mmi is set
663 //printf("but masked by tpr.mmi\n");
664 return SPURIOUS_VECTOR;
665 }
666 if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
667 //tpr.mic masks class
668 //printf("but masked by tpr.mic\n");
669 return SPURIOUS_VECTOR;
670 }
672 //printf("returned to caller\n");
673 #if 0
674 if (vector == (PSCB(vcpu,itv) & 0xff)) {
675 UINT64 now = ia64_get_itc();
676 UINT64 itm = PSCBX(vcpu,domain_itm);
677 if (now < itm) early_tick(vcpu);
679 }
680 #endif
681 return vector;
682 }
684 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
685 {
686 return (vcpu_get_psr_i(vcpu) &&
687 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
688 }
690 UINT64 vcpu_deliverable_timer(VCPU *vcpu)
691 {
692 return (vcpu_get_psr_i(vcpu) &&
693 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
694 }
696 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
697 {
698 extern unsigned long privop_trace;
699 //privop_trace=1;
700 //TODO: Implement this
701 printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
702 //*pval = 0;
703 *pval = ia64_getreg(_IA64_REG_CR_LID);
704 return IA64_NO_FAULT;
705 }
707 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
708 {
709 int i;
710 UINT64 vector, mask;
712 #define HEARTBEAT_FREQ 16 // period in seconds
713 #ifdef HEARTBEAT_FREQ
714 #define N_DOMS 16 // period in seconds
715 static long count[N_DOMS] = { 0 };
716 static long nonclockcount[N_DOMS] = { 0 };
717 REGS *regs = vcpu_regs(vcpu);
718 unsigned domid = vcpu->domain->domain_id;
719 #endif
720 #ifdef IRQ_DEBUG
721 static char firstivr = 1;
722 static char firsttime[256];
723 if (firstivr) {
724 int i;
725 for (i=0;i<256;i++) firsttime[i]=1;
726 firstivr=0;
727 }
728 #endif
730 vector = vcpu_check_pending_interrupts(vcpu);
731 if (vector == SPURIOUS_VECTOR) {
732 PSCB(vcpu,pending_interruption) = 0;
733 *pval = vector;
734 return IA64_NO_FAULT;
735 }
736 #ifdef HEARTBEAT_FREQ
737 if (domid >= N_DOMS) domid = N_DOMS-1;
738 if (vector == (PSCB(vcpu,itv) & 0xff)) {
739 if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
740 printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
741 domid, count[domid], nonclockcount[domid]);
742 //count[domid] = 0;
743 //dump_runq();
744 }
745 }
746 else nonclockcount[domid]++;
747 #endif
748 // now have an unmasked, pending, deliverable vector!
749 // getting ivr has "side effects"
750 #ifdef IRQ_DEBUG
751 if (firsttime[vector]) {
752 printf("*** First get_ivr on vector=%d,itc=%lx\n",
753 vector,ia64_get_itc());
754 firsttime[vector]=0;
755 }
756 #endif
757 i = vector >> 6;
758 mask = 1L << (vector & 0x3f);
759 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
760 PSCBX(vcpu,insvc[i]) |= mask;
761 PSCBX(vcpu,irr[i]) &= ~mask;
762 //PSCB(vcpu,pending_interruption)--;
763 *pval = vector;
764 // if delivering a timer interrupt, remember domain_itm
765 if (vector == (PSCB(vcpu,itv) & 0xff)) {
766 PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
767 }
768 return IA64_NO_FAULT;
769 }
771 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
772 {
773 *pval = PSCB(vcpu,tpr);
774 return (IA64_NO_FAULT);
775 }
777 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
778 {
779 *pval = 0L; // reads of eoi always return 0
780 return (IA64_NO_FAULT);
781 }
783 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
784 {
785 #ifndef IRR_USE_FIXED
786 printk("vcpu_get_irr: called, not implemented yet\n");
787 return IA64_ILLOP_FAULT;
788 #else
789 *pval = vcpu->irr[0];
790 return (IA64_NO_FAULT);
791 #endif
792 }
794 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
795 {
796 #ifndef IRR_USE_FIXED
797 printk("vcpu_get_irr: called, not implemented yet\n");
798 return IA64_ILLOP_FAULT;
799 #else
800 *pval = vcpu->irr[1];
801 return (IA64_NO_FAULT);
802 #endif
803 }
805 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
806 {
807 #ifndef IRR_USE_FIXED
808 printk("vcpu_get_irr: called, not implemented yet\n");
809 return IA64_ILLOP_FAULT;
810 #else
811 *pval = vcpu->irr[2];
812 return (IA64_NO_FAULT);
813 #endif
814 }
816 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
817 {
818 #ifndef IRR_USE_FIXED
819 printk("vcpu_get_irr: called, not implemented yet\n");
820 return IA64_ILLOP_FAULT;
821 #else
822 *pval = vcpu->irr[3];
823 return (IA64_NO_FAULT);
824 #endif
825 }
827 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
828 {
829 *pval = PSCB(vcpu,itv);
830 return (IA64_NO_FAULT);
831 }
833 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
834 {
835 *pval = PSCB(vcpu,pmv);
836 return (IA64_NO_FAULT);
837 }
839 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
840 {
841 *pval = PSCB(vcpu,cmcv);
842 return (IA64_NO_FAULT);
843 }
845 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
846 {
847 // fix this when setting values other than m-bit is supported
848 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
849 *pval = (1L << 16);
850 return (IA64_NO_FAULT);
851 }
853 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
854 {
855 // fix this when setting values other than m-bit is supported
856 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
857 *pval = (1L << 16);
858 return (IA64_NO_FAULT);
859 }
861 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
862 {
863 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
864 return (IA64_ILLOP_FAULT);
865 }
867 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
868 {
869 if (val & 0xff00) return IA64_RSVDREG_FAULT;
870 PSCB(vcpu,tpr) = val;
871 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
872 PSCB(vcpu,pending_interruption) = 1;
873 return (IA64_NO_FAULT);
874 }
876 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
877 {
878 UINT64 *p, bits, vec, bitnum;
879 int i;
881 p = &PSCBX(vcpu,insvc[3]);
882 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
883 if (i < 0) {
884 printf("Trying to EOI interrupt when none are in-service.\r\n");
885 return;
886 }
887 bitnum = ia64_fls(bits);
888 vec = bitnum + (i*64);
889 /* clear the correct bit */
890 bits &= ~(1L << bitnum);
891 *p = bits;
892 /* clearing an eoi bit may unmask another pending interrupt... */
893 if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
894 // worry about this later... Linux only calls eoi
895 // with interrupts disabled
896 printf("Trying to EOI interrupt with interrupts enabled\r\n");
897 }
898 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
899 PSCB(vcpu,pending_interruption) = 1;
900 //printf("YYYYY vcpu_set_eoi: Successful\n");
901 return (IA64_NO_FAULT);
902 }
904 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
905 {
906 if (!(val & (1L << 16))) {
907 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
908 return (IA64_ILLOP_FAULT);
909 }
910 // no place to save this state but nothing to do anyway
911 return (IA64_NO_FAULT);
912 }
914 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
915 {
916 if (!(val & (1L << 16))) {
917 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
918 return (IA64_ILLOP_FAULT);
919 }
920 // no place to save this state but nothing to do anyway
921 return (IA64_NO_FAULT);
922 }
924 // parameter is a time interval specified in cycles
925 void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
926 {
927 PSCBX(vcpu,xen_timer_interval) = cycles;
928 vcpu_set_next_timer(vcpu);
929 printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
930 PSCBX(vcpu,xen_timer_interval));
931 __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
932 }
934 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
935 {
936 extern unsigned long privop_trace;
937 //privop_trace=1;
938 if (val & 0xef00) return (IA64_ILLOP_FAULT);
939 PSCB(vcpu,itv) = val;
940 if (val & 0x10000) {
941 printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCBX(vcpu,domain_itm));
942 PSCBX(vcpu,domain_itm) = 0;
943 }
944 else vcpu_enable_timer(vcpu,1000000L);
945 return (IA64_NO_FAULT);
946 }
948 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
949 {
950 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
951 PSCB(vcpu,pmv) = val;
952 return (IA64_NO_FAULT);
953 }
955 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
956 {
957 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
958 PSCB(vcpu,cmcv) = val;
959 return (IA64_NO_FAULT);
960 }
962 /**************************************************************************
963 VCPU temporary register access routines
964 **************************************************************************/
965 UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
966 {
967 if (index > 7) return 0;
968 return PSCB(vcpu,tmp[index]);
969 }
971 void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
972 {
973 if (index <= 7) PSCB(vcpu,tmp[index]) = val;
974 }
976 /**************************************************************************
977 Interval timer routines
978 **************************************************************************/
980 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
981 {
982 UINT64 itv = PSCB(vcpu,itv);
983 return(!itv || !!(itv & 0x10000));
984 }
986 BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
987 {
988 UINT64 itv = PSCB(vcpu,itv);
989 return (test_bit(itv, PSCBX(vcpu,insvc)));
990 }
992 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
993 {
994 unsigned long domain_itm = PSCBX(vcpu,domain_itm);
995 unsigned long now = ia64_get_itc();
997 if (!domain_itm) return FALSE;
998 if (now < domain_itm) return FALSE;
999 if (vcpu_timer_disabled(vcpu)) return FALSE;
1000 return TRUE;
1003 void vcpu_safe_set_itm(unsigned long val)
1005 unsigned long epsilon = 100;
1006 UINT64 now = ia64_get_itc();
1008 local_irq_disable();
1009 while (1) {
1010 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
1011 ia64_set_itm(val);
1012 if (val > (now = ia64_get_itc())) break;
1013 val = now + epsilon;
1014 epsilon <<= 1;
1016 local_irq_enable();
1019 void vcpu_set_next_timer(VCPU *vcpu)
1021 UINT64 d = PSCBX(vcpu,domain_itm);
1022 //UINT64 s = PSCBX(vcpu,xen_itm);
1023 UINT64 s = local_cpu_data->itm_next;
1024 UINT64 now = ia64_get_itc();
1025 //UINT64 interval = PSCBX(vcpu,xen_timer_interval);
1027 /* gloss over the wraparound problem for now... we know it exists
1028 * but it doesn't matter right now */
1030 #if 0
1031 /* ensure at least next SP tick is in the future */
1032 if (!interval) PSCBX(vcpu,xen_itm) = now +
1033 #if 0
1034 (running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
1035 DEFAULT_CLOCK_RATE);
1036 #else
1037 3000000;
1038 //printf("vcpu_set_next_timer: HACK!\n");
1039 #endif
1040 #if 0
1041 if (PSCBX(vcpu,xen_itm) < now)
1042 while (PSCBX(vcpu,xen_itm) < now + (interval>>1))
1043 PSCBX(vcpu,xen_itm) += interval;
1044 #endif
1045 #endif
1047 if (is_idle_task(vcpu->domain)) {
1048 // printf("****** vcpu_set_next_timer called during idle!!\n");
1049 vcpu_safe_set_itm(s);
1050 return;
1052 //s = PSCBX(vcpu,xen_itm);
1053 if (d && (d > now) && (d < s)) {
1054 vcpu_safe_set_itm(d);
1055 //using_domain_as_itm++;
1057 else {
1058 vcpu_safe_set_itm(s);
1059 //using_xen_as_itm++;
1063 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
1065 UINT now = ia64_get_itc();
1067 //if (val < now) val = now + 1000;
1068 //printf("*** vcpu_set_itm: called with %lx\n",val);
1069 PSCBX(vcpu,domain_itm) = val;
1070 vcpu_set_next_timer(vcpu);
1071 return (IA64_NO_FAULT);
1074 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
1077 UINT64 oldnow = ia64_get_itc();
1078 UINT64 olditm = PSCBX(vcpu,domain_itm);
1079 unsigned long d = olditm - oldnow;
1080 unsigned long x = local_cpu_data->itm_next - oldnow;
1082 UINT64 newnow = val, min_delta;
1084 #define DISALLOW_SETTING_ITC_FOR_NOW
1085 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
1086 printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
1087 #else
1088 local_irq_disable();
1089 if (olditm) {
1090 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
1091 PSCBX(vcpu,domain_itm) = newnow + d;
1093 local_cpu_data->itm_next = newnow + x;
1094 d = PSCBX(vcpu,domain_itm);
1095 x = local_cpu_data->itm_next;
1097 ia64_set_itc(newnow);
1098 if (d && (d > newnow) && (d < x)) {
1099 vcpu_safe_set_itm(d);
1100 //using_domain_as_itm++;
1102 else {
1103 vcpu_safe_set_itm(x);
1104 //using_xen_as_itm++;
1106 local_irq_enable();
1107 #endif
1108 return (IA64_NO_FAULT);
1111 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
1113 //FIXME: Implement this
1114 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
1115 return (IA64_NO_FAULT);
1116 //return (IA64_ILLOP_FAULT);
1119 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
1121 //TODO: Implement this
1122 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
1123 return (IA64_ILLOP_FAULT);
1126 void vcpu_pend_timer(VCPU *vcpu)
1128 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1130 if (vcpu_timer_disabled(vcpu)) return;
1131 //if (vcpu_timer_inservice(vcpu)) return;
1132 if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
1133 // already delivered an interrupt for this so
1134 // don't deliver another
1135 return;
1137 #if 0
1138 // attempt to flag "timer tick before its due" source
1140 UINT64 itm = PSCBX(vcpu,domain_itm);
1141 UINT64 now = ia64_get_itc();
1142 if (now < itm) printf("******* vcpu_pend_timer: pending before due!\n");
1144 #endif
1145 vcpu_pend_interrupt(vcpu, itv);
1148 // returns true if ready to deliver a timer interrupt too early
1149 UINT64 vcpu_timer_pending_early(VCPU *vcpu)
1151 UINT64 now = ia64_get_itc();
1152 UINT64 itm = PSCBX(vcpu,domain_itm);
1154 if (vcpu_timer_disabled(vcpu)) return 0;
1155 if (!itm) return 0;
1156 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1159 //FIXME: This is a hack because everything dies if a timer tick is lost
1160 void vcpu_poke_timer(VCPU *vcpu)
1162 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1163 UINT64 now = ia64_get_itc();
1164 UINT64 itm = PSCBX(vcpu,domain_itm);
1165 UINT64 irr;
1167 if (vcpu_timer_disabled(vcpu)) return;
1168 if (!itm) return;
1169 if (itv != 0xefL) {
1170 printf("vcpu_poke_timer: unimplemented itv=%lx!\n",itv);
1171 while(1);
1173 // using 0xef instead of itv so can get real irr
1174 if (now > itm && !test_bit(0xefL, PSCBX(vcpu,insvc))) {
1175 if (!test_bit(0xefL,PSCBX(vcpu,irr))) {
1176 irr = ia64_getreg(_IA64_REG_CR_IRR3);
1177 if (irr & (1L<<(0xef-0xc0))) return;
1178 if (now-itm>0x800000)
1179 printf("*** poking timer: now=%lx,vitm=%lx,xitm=%lx,itm=%lx\n",now,itm,local_cpu_data->itm_next,ia64_get_itm());
1180 vcpu_pend_timer(vcpu);
1186 /**************************************************************************
1187 Privileged operation emulation routines
1188 **************************************************************************/
1190 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
1192 PSCB(vcpu,tmp[0]) = ifa; // save ifa in vcpu structure, then specify IA64_FORCED_IFA
1193 return (vcpu_get_rr_ve(vcpu,ifa) ? IA64_DATA_TLB_VECTOR : IA64_ALT_DATA_TLB_VECTOR) | IA64_FORCED_IFA;
1197 IA64FAULT vcpu_rfi(VCPU *vcpu)
1199 // TODO: Only allowed for current vcpu
1200 PSR psr;
1201 UINT64 int_enable, regspsr = 0;
1202 UINT64 ifs;
1203 REGS *regs = vcpu_regs(vcpu);
1204 extern void dorfirfi(void);
1206 psr.i64 = PSCB(vcpu,ipsr);
1207 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
1208 if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
1209 int_enable = psr.ia64_psr.i;
1210 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
1211 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
1212 else vcpu_set_metaphysical_mode(vcpu,TRUE);
1213 psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
1214 psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
1215 psr.ia64_psr.bn = 1;
1216 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
1217 if (psr.ia64_psr.be) {
1218 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
1219 return (IA64_ILLOP_FAULT);
1221 PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
1222 ifs = PSCB(vcpu,ifs);
1223 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1224 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1225 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
1226 // TODO: validate PSCB(vcpu,iip)
1227 // TODO: PSCB(vcpu,ipsr) = psr;
1228 PSCB(vcpu,ipsr) = psr.i64;
1229 // now set up the trampoline
1230 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
1231 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
1232 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
1234 else {
1235 regs->cr_ipsr = psr.i64;
1236 regs->cr_iip = PSCB(vcpu,iip);
1238 PSCB(vcpu,interrupt_collection_enabled) = 1;
1239 vcpu_bsw1(vcpu);
1240 PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
1241 return (IA64_NO_FAULT);
1244 IA64FAULT vcpu_cover(VCPU *vcpu)
1246 // TODO: Only allowed for current vcpu
1247 REGS *regs = vcpu_regs(vcpu);
1249 if (!PSCB(vcpu,interrupt_collection_enabled)) {
1250 if (!PSCB(vcpu,incomplete_regframe))
1251 PSCB(vcpu,ifs) = regs->cr_ifs;
1252 else PSCB(vcpu,incomplete_regframe) = 0;
1254 regs->cr_ifs = 0;
1255 return (IA64_NO_FAULT);
1258 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
1260 UINT64 pta = PSCB(vcpu,pta);
1261 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1262 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
1263 UINT64 Mask = (1L << pta_sz) - 1;
1264 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1265 UINT64 compMask_60_15 = ~Mask_60_15;
1266 //UINT64 rr_ps = RR_TO_PS(get_rr(vadr));
1267 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
1268 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
1269 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
1270 UINT64 VHPT_addr2a =
1271 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1272 UINT64 VHPT_addr2b =
1273 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;;
1274 UINT64 VHPT_addr3 = VHPT_offset & 0x7fff;
1275 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1276 VHPT_addr3;
1278 #if 0
1279 if (VHPT_addr1 == 0xe000000000000000L) {
1280 printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
1281 PSCB(vcpu,iip));
1282 return (IA64_ILLOP_FAULT);
1284 #endif
1285 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1286 *pval = VHPT_addr;
1287 return (IA64_NO_FAULT);
1290 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1292 printf("vcpu_ttag: ttag instruction unsupported\n");
1293 return (IA64_ILLOP_FAULT);
1296 #define itir_ps(itir) ((itir >> 2) & 0x3f)
1297 #define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
1299 unsigned long vhpt_translate_count = 0;
1301 IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir)
1303 unsigned long pta, pta_mask, iha, pte, ps;
1304 TR_ENTRY *trp;
1305 ia64_rr rr;
1307 if (!(address >> 61)) {
1308 if (!PSCB(vcpu,metaphysical_mode)) {
1309 REGS *regs = vcpu_regs(vcpu);
1310 unsigned long viip = PSCB(vcpu,iip);
1311 unsigned long vipsr = PSCB(vcpu,ipsr);
1312 unsigned long iip = regs->cr_iip;
1313 unsigned long ipsr = regs->cr_ipsr;
1314 printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p continuing\n", address, viip, vipsr, iip, ipsr);
1317 *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
1318 *itir = PAGE_SHIFT << 2;
1319 phys_translate_count++;
1320 return IA64_NO_FAULT;
1323 /* check translation registers */
1324 if ((trp = match_tr(vcpu,address))) {
1325 tr_translate_count++;
1326 *pteval = trp->page_flags;
1327 *itir = trp->itir;
1328 return IA64_NO_FAULT;
1331 /* check 1-entry TLB */
1332 if ((trp = match_dtlb(vcpu,address))) {
1333 dtlb_translate_count++;
1334 if (vcpu->domain==dom0 && !in_tpa) *pteval = trp->page_flags;
1335 else *pteval = vcpu->arch.dtlb_pte;
1336 // printf("DTLB MATCH... NEW, DOM%s, %s\n", vcpu->domain==dom0?
1337 // "0":"U", in_tpa?"vcpu_tpa":"ia64_do_page_fault");
1338 *itir = trp->itir;
1339 return IA64_NO_FAULT;
1342 /* check guest VHPT */
1343 pta = PSCB(vcpu,pta);
1344 rr.rrval = PSCB(vcpu,rrs)[address>>61];
1345 if (rr.ve && (pta & IA64_PTA_VE))
1347 if (pta & IA64_PTA_VF)
1349 /* long format VHPT - not implemented */
1350 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1352 else
1354 /* short format VHPT */
1356 /* avoid recursively walking VHPT */
1357 pta_mask = (itir_mask(pta) << 3) >> 3;
1358 if (((address ^ pta) & pta_mask) == 0)
1359 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1361 vcpu_thash(vcpu, address, &iha);
1362 if (__copy_from_user(&pte, (void *)iha, sizeof(pte)) != 0)
1363 return IA64_VHPT_TRANS_VECTOR;
1365 /*
1366 * Optimisation: this VHPT walker aborts on not-present pages
1367 * instead of inserting a not-present translation, this allows
1368 * vectoring directly to the miss handler.
1369 \ */
1370 if (pte & _PAGE_P)
1372 *pteval = pte;
1373 *itir = vcpu_get_itir_on_fault(vcpu,address);
1374 vhpt_translate_count++;
1375 return IA64_NO_FAULT;
1377 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1380 return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR);
1383 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1385 UINT64 pteval, itir, mask;
1386 IA64FAULT fault;
1388 in_tpa = 1;
1389 fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir);
1390 in_tpa = 0;
1391 if (fault == IA64_NO_FAULT)
1393 mask = itir_mask(itir);
1394 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
1395 return (IA64_NO_FAULT);
1397 else
1399 PSCB(vcpu,tmp[0]) = vadr; // save ifa in vcpu structure, then specify IA64_FORCED_IFA
1400 return (fault | IA64_FORCED_IFA);
1404 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
1406 printf("vcpu_tak: tak instruction unsupported\n");
1407 return (IA64_ILLOP_FAULT);
1408 // HACK ALERT: tak does a thash for now
1409 //return vcpu_thash(vcpu,vadr,key);
1412 /**************************************************************************
1413 VCPU debug breakpoint register access routines
1414 **************************************************************************/
1416 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
1418 // TODO: unimplemented DBRs return a reserved register fault
1419 // TODO: Should set Logical CPU state, not just physical
1420 ia64_set_dbr(reg,val);
1421 return (IA64_NO_FAULT);
1424 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
1426 // TODO: unimplemented IBRs return a reserved register fault
1427 // TODO: Should set Logical CPU state, not just physical
1428 ia64_set_ibr(reg,val);
1429 return (IA64_NO_FAULT);
1432 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1434 // TODO: unimplemented DBRs return a reserved register fault
1435 UINT64 val = ia64_get_dbr(reg);
1436 *pval = val;
1437 return (IA64_NO_FAULT);
1440 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1442 // TODO: unimplemented IBRs return a reserved register fault
1443 UINT64 val = ia64_get_ibr(reg);
1444 *pval = val;
1445 return (IA64_NO_FAULT);
1448 /**************************************************************************
1449 VCPU performance monitor register access routines
1450 **************************************************************************/
1452 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
1454 // TODO: Should set Logical CPU state, not just physical
1455 // NOTE: Writes to unimplemented PMC registers are discarded
1456 ia64_set_pmc(reg,val);
1457 return (IA64_NO_FAULT);
1460 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
1462 // TODO: Should set Logical CPU state, not just physical
1463 // NOTE: Writes to unimplemented PMD registers are discarded
1464 ia64_set_pmd(reg,val);
1465 return (IA64_NO_FAULT);
1468 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1470 // NOTE: Reads from unimplemented PMC registers return zero
1471 UINT64 val = (UINT64)ia64_get_pmc(reg);
1472 *pval = val;
1473 return (IA64_NO_FAULT);
1476 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1478 // NOTE: Reads from unimplemented PMD registers return zero
1479 UINT64 val = (UINT64)ia64_get_pmd(reg);
1480 *pval = val;
1481 return (IA64_NO_FAULT);
1484 /**************************************************************************
1485 VCPU banked general register access routines
1486 **************************************************************************/
1488 IA64FAULT vcpu_bsw0(VCPU *vcpu)
1490 // TODO: Only allowed for current vcpu
1491 REGS *regs = vcpu_regs(vcpu);
1492 unsigned long *r = &regs->r16;
1493 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1494 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1495 int i;
1497 if (PSCB(vcpu,banknum)) {
1498 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1499 PSCB(vcpu,banknum) = 0;
1501 return (IA64_NO_FAULT);
1504 IA64FAULT vcpu_bsw1(VCPU *vcpu)
1506 // TODO: Only allowed for current vcpu
1507 REGS *regs = vcpu_regs(vcpu);
1508 unsigned long *r = &regs->r16;
1509 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1510 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1511 int i;
1513 if (!PSCB(vcpu,banknum)) {
1514 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1515 PSCB(vcpu,banknum) = 1;
1517 return (IA64_NO_FAULT);
1520 /**************************************************************************
1521 VCPU cpuid access routines
1522 **************************************************************************/
1525 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1527 // FIXME: This could get called as a result of a rsvd-reg fault
1528 // if reg > 3
1529 switch(reg) {
1530 case 0:
1531 memcpy(pval,"Xen/ia64",8);
1532 break;
1533 case 1:
1534 *pval = 0;
1535 break;
1536 case 2:
1537 *pval = 0;
1538 break;
1539 case 3:
1540 *pval = ia64_get_cpuid(3);
1541 break;
1542 case 4:
1543 *pval = ia64_get_cpuid(4);
1544 break;
1545 default:
1546 if (reg > (ia64_get_cpuid(3) & 0xff))
1547 return IA64_RSVDREG_FAULT;
1548 *pval = ia64_get_cpuid(reg);
1549 break;
1551 return (IA64_NO_FAULT);
1554 /**************************************************************************
1555 VCPU region register access routines
1556 **************************************************************************/
1558 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
1560 ia64_rr rr;
1562 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1563 return(rr.ve);
1566 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
1568 PSCB(vcpu,rrs)[reg>>61] = val;
1569 // warning: set_one_rr() does it "live"
1570 set_one_rr(reg,val);
1571 return (IA64_NO_FAULT);
1574 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1576 UINT val = PSCB(vcpu,rrs)[reg>>61];
1577 *pval = val;
1578 return (IA64_NO_FAULT);
1581 /**************************************************************************
1582 VCPU protection key register access routines
1583 **************************************************************************/
1585 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1587 #ifndef PKR_USE_FIXED
1588 printk("vcpu_get_pkr: called, not implemented yet\n");
1589 return IA64_ILLOP_FAULT;
1590 #else
1591 UINT64 val = (UINT64)ia64_get_pkr(reg);
1592 *pval = val;
1593 return (IA64_NO_FAULT);
1594 #endif
1597 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
1599 #ifndef PKR_USE_FIXED
1600 printk("vcpu_set_pkr: called, not implemented yet\n");
1601 return IA64_ILLOP_FAULT;
1602 #else
1603 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
1604 vcpu->pkrs[reg] = val;
1605 ia64_set_pkr(reg,val);
1606 return (IA64_NO_FAULT);
1607 #endif
1610 /**************************************************************************
1611 VCPU translation register access routines
1612 **************************************************************************/
1614 static void vcpu_purge_tr_entry(TR_ENTRY *trp)
1616 trp->p = 0;
1619 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
1621 UINT64 ps;
1623 trp->itir = itir;
1624 trp->rid = virtualize_rid(current, get_rr(ifa) & RR_RID_MASK);
1625 trp->p = 1;
1626 ps = trp->ps;
1627 trp->page_flags = pte;
1628 if (trp->pl < 2) trp->pl = 2;
1629 trp->vadr = ifa & ~0xfff;
1630 if (ps > 12) { // "ignore" relevant low-order bits
1631 trp->ppn &= ~((1UL<<(ps-12))-1);
1632 trp->vadr &= ~((1UL<<ps)-1);
1636 TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu, TR_ENTRY *trp, UINT64 ifa, int count)
1638 unsigned long rid = (get_rr(ifa) & RR_RID_MASK);
1639 int i;
1641 for (i = 0; i < count; i++, trp++) {
1642 if (!trp->p) continue;
1643 if (physicalize_rid(vcpu,trp->rid) != rid) continue;
1644 if (ifa < trp->vadr) continue;
1645 if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
1646 //if (trp->key && !match_pkr(vcpu,trp->key)) continue;
1647 return trp;
1649 return 0;
1652 TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa)
1654 TR_ENTRY *trp;
1656 trp = vcpu_match_tr_entry(vcpu,vcpu->arch.dtrs,ifa,NDTRS);
1657 if (trp) return trp;
1658 trp = vcpu_match_tr_entry(vcpu,vcpu->arch.itrs,ifa,NITRS);
1659 if (trp) return trp;
1660 return 0;
1663 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
1664 UINT64 itir, UINT64 ifa)
1666 TR_ENTRY *trp;
1668 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
1669 trp = &PSCBX(vcpu,dtrs[slot]);
1670 //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
1671 vcpu_set_tr_entry(trp,pte,itir,ifa);
1672 return IA64_NO_FAULT;
1675 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
1676 UINT64 itir, UINT64 ifa)
1678 TR_ENTRY *trp;
1680 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
1681 trp = &PSCBX(vcpu,itrs[slot]);
1682 //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
1683 vcpu_set_tr_entry(trp,pte,itir,ifa);
1684 return IA64_NO_FAULT;
1687 /**************************************************************************
1688 VCPU translation cache access routines
1689 **************************************************************************/
1691 void foobar(void) { /*vcpu_verbose = 1;*/ }
1693 extern struct domain *dom0;
1695 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps)
1697 unsigned long psr;
1698 unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
1700 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1701 // FIXME, must be inlined or potential for nested fault here!
1702 if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT)) {
1703 printf("vcpu_itc_no_srlz: domain0 use of smaller page size!\n");
1704 //FIXME: kill domain here
1705 while(1);
1707 psr = ia64_clear_ic();
1708 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
1709 ia64_set_psr(psr);
1710 // ia64_srlz_i(); // no srls req'd, will rfi later
1711 #ifdef VHPT_GLOBAL
1712 if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
1713 // FIXME: this is dangerous... vhpt_flush_address ensures these
1714 // addresses never get flushed. More work needed if this
1715 // ever happens.
1716 //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
1717 if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
1718 else vhpt_insert(vaddr,pte,logps<<2);
1720 // even if domain pagesize is larger than PAGE_SIZE, just put
1721 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
1722 else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
1723 #endif
1724 if ((mp_pte == -1UL) || (IorD & 0x4)) return; // don't place in 1-entry TLB
1725 if (IorD & 0x1) {
1726 vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
1727 PSCBX(vcpu,itlb_pte) = mp_pte;
1729 if (IorD & 0x2) {
1730 vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
1731 PSCBX(vcpu,dtlb_pte) = mp_pte;
1735 // NOTE: returns a physical pte, NOT a "metaphysical" pte, so do not check
1736 // the physical address contained for correctness
1737 TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
1739 TR_ENTRY *trp;
1741 if (trp = vcpu_match_tr_entry(vcpu,&vcpu->arch.dtlb,ifa,1))
1742 return (&vcpu->arch.dtlb);
1743 return 0UL;
1746 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1748 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1749 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1751 if (logps < PAGE_SHIFT) {
1752 printf("vcpu_itc_d: domain trying to use smaller page size!\n");
1753 //FIXME: kill domain here
1754 while(1);
1756 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1757 pteval = translate_domain_pte(pte,ifa,itir);
1758 if (!pteval) return IA64_ILLOP_FAULT;
1759 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
1760 return IA64_NO_FAULT;
1763 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1765 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1766 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1768 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1769 if (logps < PAGE_SHIFT) {
1770 printf("vcpu_itc_i: domain trying to use smaller page size!\n");
1771 //FIXME: kill domain here
1772 while(1);
1774 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1775 pteval = translate_domain_pte(pte,ifa,itir);
1776 // FIXME: what to do if bad physical address? (machine check?)
1777 if (!pteval) return IA64_ILLOP_FAULT;
1778 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
1779 return IA64_NO_FAULT;
1782 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1784 printk("vcpu_ptc_l: called, not implemented yet\n");
1785 return IA64_ILLOP_FAULT;
1788 // At privlvl=0, fc performs no access rights or protection key checks, while
1789 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
1790 // read but no protection key check. Thus in order to avoid an unexpected
1791 // access rights fault, we have to translate the virtual address to a
1792 // physical address (possibly via a metaphysical address) and do the fc
1793 // on the physical address, which is guaranteed to flush the same cache line
1794 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
1796 // TODO: Only allowed for current vcpu
1797 UINT64 mpaddr, paddr;
1798 IA64FAULT fault;
1799 unsigned long translate_domain_mpaddr(unsigned long);
1800 IA64FAULT vcpu_tpa(VCPU *, UINT64, UINT64 *);
1802 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
1803 if (fault == IA64_NO_FAULT) {
1804 paddr = translate_domain_mpaddr(mpaddr);
1805 ia64_fc(__va(paddr));
1807 return fault;
1810 int ptce_count = 0;
1811 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
1813 // Note that this only needs to be called once, i.e. the
1814 // architected loop to purge the entire TLB, should use
1815 // base = stride1 = stride2 = 0, count0 = count 1 = 1
1817 #ifdef VHPT_GLOBAL
1818 vhpt_flush(); // FIXME: This is overdoing it
1819 #endif
1820 local_flush_tlb_all();
1821 // just invalidate the "whole" tlb
1822 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1823 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1824 return IA64_NO_FAULT;
1827 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1829 printk("vcpu_ptc_g: called, not implemented yet\n");
1830 return IA64_ILLOP_FAULT;
1833 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1835 extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
1836 // FIXME: validate not flushing Xen addresses
1837 // if (Xen address) return(IA64_ILLOP_FAULT);
1838 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
1839 //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
1840 #ifdef VHPT_GLOBAL
1841 vhpt_flush_address(vadr,addr_range);
1842 #endif
1843 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
1844 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1845 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1846 return IA64_NO_FAULT;
1849 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1851 printf("vcpu_ptr_d: Purging TLB is unsupported\n");
1852 return (IA64_ILLOP_FAULT);
1855 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1857 printf("vcpu_ptr_i: Purging TLB is unsupported\n");
1858 return (IA64_ILLOP_FAULT);
1861 void vcpu_set_regs(VCPU *vcpu, REGS *regs)
1863 vcpu->arch.regs = regs;