ia64/xen-unstable

view xen/arch/ia64/xen/vcpu.c @ 6462:af3750d1ec53

Bug fixes from Kevin (x2) and Anthony
Missing prototypes (Kevin)
Bad n_rid_blocks computation (Anthony)
Bad pte when single-entry dtlb lookup is successful (Kevin)
author djm@kirby.fc.hp.com
date Fri Sep 02 11:59:08 2005 -0600 (2005-09-02)
parents 3ca4ca7a9cc2
children 21ad2828dbdf
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <linux/sched.h>
10 #include <public/arch-ia64.h>
11 #include <asm/ia64_int.h>
12 #include <asm/vcpu.h>
13 #include <asm/regionreg.h>
14 #include <asm/tlb.h>
15 #include <asm/processor.h>
16 #include <asm/delay.h>
17 #include <asm/vmx_vcpu.h>
19 typedef union {
20 struct ia64_psr ia64_psr;
21 unsigned long i64;
22 } PSR;
24 //typedef struct pt_regs REGS;
25 //typedef struct domain VCPU;
27 // this def for vcpu_regs won't work if kernel stack is present
28 #define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
29 #define PSCB(x,y) VCPU(x,y)
30 #define PSCBX(x,y) x->arch.y
32 #define TRUE 1
33 #define FALSE 0
34 #define IA64_PTA_SZ_BIT 2
35 #define IA64_PTA_VF_BIT 8
36 #define IA64_PTA_BASE_BIT 15
37 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
38 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
40 #define STATIC
42 #ifdef PRIVOP_ADDR_COUNT
43 struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
44 { "=ifa", { 0 }, { 0 }, 0 },
45 { "thash", { 0 }, { 0 }, 0 },
46 0
47 };
48 extern void privop_count_addr(unsigned long addr, int inst);
49 #define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
50 #else
51 #define PRIVOP_COUNT_ADDR(x,y) do {} while (0)
52 #endif
54 unsigned long dtlb_translate_count = 0;
55 unsigned long tr_translate_count = 0;
56 unsigned long phys_translate_count = 0;
58 unsigned long vcpu_verbose = 0;
59 #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
61 extern TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa);
62 extern TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa);
64 /**************************************************************************
65 VCPU general register access routines
66 **************************************************************************/
68 UINT64
69 vcpu_get_gr(VCPU *vcpu, unsigned reg)
70 {
71 REGS *regs = vcpu_regs(vcpu);
72 UINT64 val;
74 if (!reg) return 0;
75 getreg(reg,&val,0,regs); // FIXME: handle NATs later
76 return val;
77 }
79 // returns:
80 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
81 // IA64_NO_FAULT otherwise
82 IA64FAULT
83 vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
84 {
85 REGS *regs = vcpu_regs(vcpu);
86 long sof = (regs->cr_ifs) & 0x7f;
88 if (!reg) return IA64_ILLOP_FAULT;
89 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
90 setreg(reg,value,0,regs); // FIXME: handle NATs later
91 return IA64_NO_FAULT;
92 }
94 /**************************************************************************
95 VCPU privileged application register access routines
96 **************************************************************************/
98 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
99 {
100 if (reg == 44) return (vcpu_set_itc(vcpu,val));
101 else if (reg == 27) return (IA64_ILLOP_FAULT);
102 else if (reg == 24)
103 printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
104 else if (reg > 7) return (IA64_ILLOP_FAULT);
105 else PSCB(vcpu,krs[reg]) = val;
106 return IA64_NO_FAULT;
107 }
109 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
110 {
111 if (reg == 24)
112 printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
113 else if (reg > 7) return (IA64_ILLOP_FAULT);
114 else *val = PSCB(vcpu,krs[reg]);
115 return IA64_NO_FAULT;
116 }
118 /**************************************************************************
119 VCPU processor status register access routines
120 **************************************************************************/
122 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
123 {
124 /* only do something if mode changes */
125 if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
126 if (newmode) set_metaphysical_rr0();
127 else if (PSCB(vcpu,rrs[0]) != -1)
128 set_one_rr(0, PSCB(vcpu,rrs[0]));
129 PSCB(vcpu,metaphysical_mode) = newmode;
130 }
131 }
133 IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
134 {
135 vcpu_set_metaphysical_mode(vcpu,TRUE);
136 return IA64_NO_FAULT;
137 }
139 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
140 {
141 struct ia64_psr psr, imm, *ipsr;
142 REGS *regs = vcpu_regs(vcpu);
144 //PRIVOP_COUNT_ADDR(regs,_RSM);
145 // TODO: All of these bits need to be virtualized
146 // TODO: Only allowed for current vcpu
147 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
148 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
149 imm = *(struct ia64_psr *)&imm24;
150 // interrupt flag
151 if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
152 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
153 // interrupt collection flag
154 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
155 // just handle psr.up and psr.pp for now
156 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
157 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
158 | IA64_PSR_DFL | IA64_PSR_DFH))
159 return (IA64_ILLOP_FAULT);
160 if (imm.dfh) ipsr->dfh = 0;
161 if (imm.dfl) ipsr->dfl = 0;
162 if (imm.pp) { ipsr->pp = 0; psr.pp = 0; }
163 if (imm.up) { ipsr->up = 0; psr.up = 0; }
164 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
165 if (imm.be) ipsr->be = 0;
166 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
167 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
168 return IA64_NO_FAULT;
169 }
171 extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
172 #define SPURIOUS_VECTOR 0xf
174 IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
175 {
176 vcpu_set_metaphysical_mode(vcpu,FALSE);
177 return IA64_NO_FAULT;
178 }
180 IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
181 {
182 PSCB(vcpu,interrupt_delivery_enabled) = 1;
183 PSCB(vcpu,interrupt_collection_enabled) = 1;
184 return IA64_NO_FAULT;
185 }
187 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
188 {
189 struct ia64_psr psr, imm, *ipsr;
190 REGS *regs = vcpu_regs(vcpu);
191 UINT64 mask, enabling_interrupts = 0;
193 //PRIVOP_COUNT_ADDR(regs,_SSM);
194 // TODO: All of these bits need to be virtualized
195 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
196 imm = *(struct ia64_psr *)&imm24;
197 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
198 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
199 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
200 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
201 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
202 if (imm.dfh) ipsr->dfh = 1;
203 if (imm.dfl) ipsr->dfl = 1;
204 if (imm.pp) { ipsr->pp = 1; psr.pp = 1; }
205 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
206 if (imm.i) {
207 if (!PSCB(vcpu,interrupt_delivery_enabled)) {
208 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
209 enabling_interrupts = 1;
210 }
211 PSCB(vcpu,interrupt_delivery_enabled) = 1;
212 }
213 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
214 // TODO: do this faster
215 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
216 if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
217 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
218 if (imm.up) { ipsr->up = 1; psr.up = 1; }
219 if (imm.be) {
220 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
221 return (IA64_ILLOP_FAULT);
222 }
223 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
224 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
225 #if 0 // now done with deliver_pending_interrupts
226 if (enabling_interrupts) {
227 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) {
228 //printf("with interrupts pending\n");
229 return IA64_EXTINT_VECTOR;
230 }
231 //else printf("but nothing pending\n");
232 }
233 #endif
234 if (enabling_interrupts &&
235 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
236 PSCB(vcpu,pending_interruption) = 1;
237 return IA64_NO_FAULT;
238 }
240 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
241 {
242 struct ia64_psr psr, newpsr, *ipsr;
243 REGS *regs = vcpu_regs(vcpu);
244 UINT64 enabling_interrupts = 0;
246 // TODO: All of these bits need to be virtualized
247 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
248 newpsr = *(struct ia64_psr *)&val;
249 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
250 // just handle psr.up and psr.pp for now
251 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
252 // however trying to set other bits can't be an error as it is in ssm
253 if (newpsr.dfh) ipsr->dfh = 1;
254 if (newpsr.dfl) ipsr->dfl = 1;
255 if (newpsr.pp) { ipsr->pp = 1; psr.pp = 1; }
256 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
257 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
258 if (newpsr.i) {
259 if (!PSCB(vcpu,interrupt_delivery_enabled))
260 enabling_interrupts = 1;
261 PSCB(vcpu,interrupt_delivery_enabled) = 1;
262 }
263 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
264 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
265 if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
266 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
267 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
268 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
269 else vcpu_set_metaphysical_mode(vcpu,TRUE);
270 if (newpsr.be) {
271 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
272 return (IA64_ILLOP_FAULT);
273 }
274 //__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
275 #if 0 // now done with deliver_pending_interrupts
276 if (enabling_interrupts) {
277 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
278 return IA64_EXTINT_VECTOR;
279 }
280 #endif
281 if (enabling_interrupts &&
282 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
283 PSCB(vcpu,pending_interruption) = 1;
284 return IA64_NO_FAULT;
285 }
287 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
288 {
289 UINT64 psr;
290 struct ia64_psr newpsr;
292 // TODO: This needs to return a "filtered" view of
293 // the psr, not the actual psr. Probably the psr needs
294 // to be a field in regs (in addition to ipsr).
295 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
296 newpsr = *(struct ia64_psr *)&psr;
297 if (newpsr.cpl == 2) newpsr.cpl = 0;
298 if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
299 else newpsr.i = 0;
300 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
301 else newpsr.ic = 0;
302 *pval = *(unsigned long *)&newpsr;
303 return IA64_NO_FAULT;
304 }
306 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
307 {
308 return !!PSCB(vcpu,interrupt_collection_enabled);
309 }
311 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
312 {
313 return !!PSCB(vcpu,interrupt_delivery_enabled);
314 }
316 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
317 {
318 UINT64 dcr = PSCBX(vcpu,dcr);
319 PSR psr = {0};
321 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
322 psr.i64 = prevpsr;
323 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
324 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
325 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
326 psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
327 psr.ia64_psr.bn = PSCB(vcpu,banknum);
328 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
329 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
330 // psr.pk = 1;
331 //printf("returns 0x%016lx...",psr.i64);
332 return psr.i64;
333 }
335 /**************************************************************************
336 VCPU control register access routines
337 **************************************************************************/
339 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
340 {
341 extern unsigned long privop_trace;
342 //privop_trace=0;
343 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
344 // Reads of cr.dcr on Xen always have the sign bit set, so
345 // a domain can differentiate whether it is running on SP or not
346 *pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
347 return (IA64_NO_FAULT);
348 }
350 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
351 {
352 *pval = PSCBX(vcpu,iva) & ~0x7fffL;
353 return (IA64_NO_FAULT);
354 }
356 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
357 {
358 *pval = PSCB(vcpu,pta);
359 return (IA64_NO_FAULT);
360 }
362 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
363 {
364 //REGS *regs = vcpu_regs(vcpu);
365 //*pval = regs->cr_ipsr;
366 *pval = PSCB(vcpu,ipsr);
367 return (IA64_NO_FAULT);
368 }
370 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
371 {
372 *pval = PSCB(vcpu,isr);
373 return (IA64_NO_FAULT);
374 }
376 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
377 {
378 //REGS *regs = vcpu_regs(vcpu);
379 //*pval = regs->cr_iip;
380 *pval = PSCB(vcpu,iip);
381 return (IA64_NO_FAULT);
382 }
384 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
385 {
386 UINT64 val = PSCB(vcpu,ifa);
387 REGS *regs = vcpu_regs(vcpu);
388 PRIVOP_COUNT_ADDR(regs,_GET_IFA);
389 *pval = val;
390 return (IA64_NO_FAULT);
391 }
393 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
394 {
395 ia64_rr rr;
397 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
398 return(rr.ps);
399 }
401 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
402 {
403 ia64_rr rr;
405 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
406 return(rr.rid);
407 }
409 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
410 {
411 ia64_rr rr;
413 rr.rrval = 0;
414 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
415 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
416 return (rr.rrval);
417 }
420 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
421 {
422 UINT64 val = PSCB(vcpu,itir);
423 *pval = val;
424 return (IA64_NO_FAULT);
425 }
427 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
428 {
429 UINT64 val = PSCB(vcpu,iipa);
430 // SP entry code does not save iipa yet nor does it get
431 // properly delivered in the pscb
432 printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
433 *pval = val;
434 return (IA64_NO_FAULT);
435 }
437 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
438 {
439 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
440 //*pval = PSCB(vcpu,regs).cr_ifs;
441 *pval = PSCB(vcpu,ifs);
442 PSCB(vcpu,incomplete_regframe) = 0;
443 return (IA64_NO_FAULT);
444 }
446 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
447 {
448 UINT64 val = PSCB(vcpu,iim);
449 *pval = val;
450 return (IA64_NO_FAULT);
451 }
453 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
454 {
455 //return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
456 UINT64 val = PSCB(vcpu,iha);
457 REGS *regs = vcpu_regs(vcpu);
458 PRIVOP_COUNT_ADDR(regs,_THASH);
459 *pval = val;
460 return (IA64_NO_FAULT);
461 }
463 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
464 {
465 extern unsigned long privop_trace;
466 //privop_trace=1;
467 // Reads of cr.dcr on SP always have the sign bit set, so
468 // a domain can differentiate whether it is running on SP or not
469 // Thus, writes of DCR should ignore the sign bit
470 //verbose("vcpu_set_dcr: called\n");
471 PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
472 return (IA64_NO_FAULT);
473 }
475 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
476 {
477 PSCBX(vcpu,iva) = val & ~0x7fffL;
478 return (IA64_NO_FAULT);
479 }
481 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
482 {
483 if (val & IA64_PTA_LFMT) {
484 printf("*** No support for VHPT long format yet!!\n");
485 return (IA64_ILLOP_FAULT);
486 }
487 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
488 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
489 PSCB(vcpu,pta) = val;
490 return IA64_NO_FAULT;
491 }
493 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
494 {
495 PSCB(vcpu,ipsr) = val;
496 return IA64_NO_FAULT;
497 }
499 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
500 {
501 PSCB(vcpu,isr) = val;
502 return IA64_NO_FAULT;
503 }
505 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
506 {
507 PSCB(vcpu,iip) = val;
508 return IA64_NO_FAULT;
509 }
511 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
512 {
513 REGS *regs = vcpu_regs(vcpu);
514 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
515 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
516 else ipsr->ri++;
517 return (IA64_NO_FAULT);
518 }
520 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
521 {
522 PSCB(vcpu,ifa) = val;
523 return IA64_NO_FAULT;
524 }
526 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
527 {
528 PSCB(vcpu,itir) = val;
529 return IA64_NO_FAULT;
530 }
532 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
533 {
534 // SP entry code does not save iipa yet nor does it get
535 // properly delivered in the pscb
536 printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
537 PSCB(vcpu,iipa) = val;
538 return IA64_NO_FAULT;
539 }
541 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
542 {
543 //REGS *regs = vcpu_regs(vcpu);
544 PSCB(vcpu,ifs) = val;
545 return IA64_NO_FAULT;
546 }
548 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
549 {
550 PSCB(vcpu,iim) = val;
551 return IA64_NO_FAULT;
552 }
554 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
555 {
556 PSCB(vcpu,iha) = val;
557 return IA64_NO_FAULT;
558 }
560 /**************************************************************************
561 VCPU interrupt control register access routines
562 **************************************************************************/
564 void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
565 {
566 PSCB(vcpu,pending_interruption) = 1;
567 }
569 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
570 {
571 if (vector & ~0xff) {
572 printf("vcpu_pend_interrupt: bad vector\n");
573 return;
574 }
575 #ifdef CONFIG_VTI
576 if ( VMX_DOMAIN(vcpu) ) {
577 set_bit(vector,VPD_CR(vcpu,irr));
578 } else
579 #endif // CONFIG_VTI
580 {
581 /* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
582 if (test_bit(vector,PSCBX(vcpu,irr))) {
583 //printf("vcpu_pend_interrupt: overrun\n");
584 }
585 set_bit(vector,PSCBX(vcpu,irr));
586 PSCB(vcpu,pending_interruption) = 1;
587 }
589 #if 0
590 /* Keir: I think you should unblock when an interrupt is pending. */
591 {
592 int running = test_bit(_VCPUF_running, &vcpu->vcpu_flags);
593 vcpu_unblock(vcpu);
594 if ( running )
595 smp_send_event_check_cpu(vcpu->processor);
596 }
597 #endif
598 }
600 void early_tick(VCPU *vcpu)
601 {
602 UINT64 *p = &PSCBX(vcpu,irr[3]);
603 printf("vcpu_check_pending: about to deliver early tick\n");
604 printf("&irr[0]=%p, irr[0]=0x%lx\n",p,*p);
605 }
607 #define IA64_TPR_MMI 0x10000
608 #define IA64_TPR_MIC 0x000f0
610 /* checks to see if a VCPU has any unmasked pending interrupts
611 * if so, returns the highest, else returns SPURIOUS_VECTOR */
612 /* NOTE: Since this gets called from vcpu_get_ivr() and the
613 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
614 * this routine also ignores pscb.interrupt_delivery_enabled
615 * and this must be checked independently; see vcpu_deliverable interrupts() */
616 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
617 {
618 UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
620 p = &PSCBX(vcpu,irr[3]);
621 /* q = &PSCB(vcpu,delivery_mask[3]); */
622 r = &PSCBX(vcpu,insvc[3]);
623 for (i = 3; ; p--, q--, r--, i--) {
624 bits = *p /* & *q */;
625 if (bits) break; // got a potential interrupt
626 if (*r) {
627 // nothing in this word which is pending+inservice
628 // but there is one inservice which masks lower
629 return SPURIOUS_VECTOR;
630 }
631 if (i == 0) {
632 // checked all bits... nothing pending+inservice
633 return SPURIOUS_VECTOR;
634 }
635 }
636 // have a pending,deliverable interrupt... see if it is masked
637 bitnum = ia64_fls(bits);
638 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
639 vector = bitnum+(i*64);
640 mask = 1L << bitnum;
641 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
642 if (*r >= mask) {
643 // masked by equal inservice
644 //printf("but masked by equal inservice\n");
645 return SPURIOUS_VECTOR;
646 }
647 if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
648 // tpr.mmi is set
649 //printf("but masked by tpr.mmi\n");
650 return SPURIOUS_VECTOR;
651 }
652 if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
653 //tpr.mic masks class
654 //printf("but masked by tpr.mic\n");
655 return SPURIOUS_VECTOR;
656 }
658 //printf("returned to caller\n");
659 #if 0
660 if (vector == (PSCB(vcpu,itv) & 0xff)) {
661 UINT64 now = ia64_get_itc();
662 UINT64 itm = PSCBX(vcpu,domain_itm);
663 if (now < itm) early_tick(vcpu);
665 }
666 #endif
667 return vector;
668 }
670 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
671 {
672 return (vcpu_get_psr_i(vcpu) &&
673 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
674 }
676 UINT64 vcpu_deliverable_timer(VCPU *vcpu)
677 {
678 return (vcpu_get_psr_i(vcpu) &&
679 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
680 }
682 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
683 {
684 extern unsigned long privop_trace;
685 //privop_trace=1;
686 //TODO: Implement this
687 printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
688 //*pval = 0;
689 *pval = ia64_getreg(_IA64_REG_CR_LID);
690 return IA64_NO_FAULT;
691 }
693 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
694 {
695 int i;
696 UINT64 vector, mask;
698 #define HEARTBEAT_FREQ 16 // period in seconds
699 #ifdef HEARTBEAT_FREQ
700 #define N_DOMS 16 // period in seconds
701 static long count[N_DOMS] = { 0 };
702 static long nonclockcount[N_DOMS] = { 0 };
703 REGS *regs = vcpu_regs(vcpu);
704 unsigned domid = vcpu->domain->domain_id;
705 #endif
706 #ifdef IRQ_DEBUG
707 static char firstivr = 1;
708 static char firsttime[256];
709 if (firstivr) {
710 int i;
711 for (i=0;i<256;i++) firsttime[i]=1;
712 firstivr=0;
713 }
714 #endif
716 vector = vcpu_check_pending_interrupts(vcpu);
717 if (vector == SPURIOUS_VECTOR) {
718 PSCB(vcpu,pending_interruption) = 0;
719 *pval = vector;
720 return IA64_NO_FAULT;
721 }
722 #ifdef HEARTBEAT_FREQ
723 if (domid >= N_DOMS) domid = N_DOMS-1;
724 if (vector == (PSCB(vcpu,itv) & 0xff)) {
725 if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
726 printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
727 domid, count[domid], nonclockcount[domid]);
728 //count[domid] = 0;
729 //dump_runq();
730 }
731 }
732 else nonclockcount[domid]++;
733 #endif
734 // now have an unmasked, pending, deliverable vector!
735 // getting ivr has "side effects"
736 #ifdef IRQ_DEBUG
737 if (firsttime[vector]) {
738 printf("*** First get_ivr on vector=%d,itc=%lx\n",
739 vector,ia64_get_itc());
740 firsttime[vector]=0;
741 }
742 #endif
743 i = vector >> 6;
744 mask = 1L << (vector & 0x3f);
745 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
746 PSCBX(vcpu,insvc[i]) |= mask;
747 PSCBX(vcpu,irr[i]) &= ~mask;
748 //PSCB(vcpu,pending_interruption)--;
749 *pval = vector;
750 // if delivering a timer interrupt, remember domain_itm
751 if (vector == (PSCB(vcpu,itv) & 0xff)) {
752 PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
753 }
754 return IA64_NO_FAULT;
755 }
757 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
758 {
759 *pval = PSCB(vcpu,tpr);
760 return (IA64_NO_FAULT);
761 }
763 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
764 {
765 *pval = 0L; // reads of eoi always return 0
766 return (IA64_NO_FAULT);
767 }
769 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
770 {
771 #ifndef IRR_USE_FIXED
772 printk("vcpu_get_irr: called, not implemented yet\n");
773 return IA64_ILLOP_FAULT;
774 #else
775 *pval = vcpu->irr[0];
776 return (IA64_NO_FAULT);
777 #endif
778 }
780 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
781 {
782 #ifndef IRR_USE_FIXED
783 printk("vcpu_get_irr: called, not implemented yet\n");
784 return IA64_ILLOP_FAULT;
785 #else
786 *pval = vcpu->irr[1];
787 return (IA64_NO_FAULT);
788 #endif
789 }
791 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
792 {
793 #ifndef IRR_USE_FIXED
794 printk("vcpu_get_irr: called, not implemented yet\n");
795 return IA64_ILLOP_FAULT;
796 #else
797 *pval = vcpu->irr[2];
798 return (IA64_NO_FAULT);
799 #endif
800 }
802 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
803 {
804 #ifndef IRR_USE_FIXED
805 printk("vcpu_get_irr: called, not implemented yet\n");
806 return IA64_ILLOP_FAULT;
807 #else
808 *pval = vcpu->irr[3];
809 return (IA64_NO_FAULT);
810 #endif
811 }
813 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
814 {
815 *pval = PSCB(vcpu,itv);
816 return (IA64_NO_FAULT);
817 }
819 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
820 {
821 *pval = PSCB(vcpu,pmv);
822 return (IA64_NO_FAULT);
823 }
825 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
826 {
827 *pval = PSCB(vcpu,cmcv);
828 return (IA64_NO_FAULT);
829 }
831 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
832 {
833 // fix this when setting values other than m-bit is supported
834 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
835 *pval = (1L << 16);
836 return (IA64_NO_FAULT);
837 }
839 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
840 {
841 // fix this when setting values other than m-bit is supported
842 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
843 *pval = (1L << 16);
844 return (IA64_NO_FAULT);
845 }
847 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
848 {
849 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
850 return (IA64_ILLOP_FAULT);
851 }
853 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
854 {
855 if (val & 0xff00) return IA64_RSVDREG_FAULT;
856 PSCB(vcpu,tpr) = val;
857 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
858 PSCB(vcpu,pending_interruption) = 1;
859 return (IA64_NO_FAULT);
860 }
862 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
863 {
864 UINT64 *p, bits, vec, bitnum;
865 int i;
867 p = &PSCBX(vcpu,insvc[3]);
868 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
869 if (i < 0) {
870 printf("Trying to EOI interrupt when none are in-service.\r\n");
871 return;
872 }
873 bitnum = ia64_fls(bits);
874 vec = bitnum + (i*64);
875 /* clear the correct bit */
876 bits &= ~(1L << bitnum);
877 *p = bits;
878 /* clearing an eoi bit may unmask another pending interrupt... */
879 if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
880 // worry about this later... Linux only calls eoi
881 // with interrupts disabled
882 printf("Trying to EOI interrupt with interrupts enabled\r\n");
883 }
884 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
885 PSCB(vcpu,pending_interruption) = 1;
886 //printf("YYYYY vcpu_set_eoi: Successful\n");
887 return (IA64_NO_FAULT);
888 }
890 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
891 {
892 if (!(val & (1L << 16))) {
893 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
894 return (IA64_ILLOP_FAULT);
895 }
896 // no place to save this state but nothing to do anyway
897 return (IA64_NO_FAULT);
898 }
900 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
901 {
902 if (!(val & (1L << 16))) {
903 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
904 return (IA64_ILLOP_FAULT);
905 }
906 // no place to save this state but nothing to do anyway
907 return (IA64_NO_FAULT);
908 }
910 // parameter is a time interval specified in cycles
911 void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
912 {
913 PSCBX(vcpu,xen_timer_interval) = cycles;
914 vcpu_set_next_timer(vcpu);
915 printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
916 PSCBX(vcpu,xen_timer_interval));
917 __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
918 }
920 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
921 {
922 extern unsigned long privop_trace;
923 //privop_trace=1;
924 if (val & 0xef00) return (IA64_ILLOP_FAULT);
925 PSCB(vcpu,itv) = val;
926 if (val & 0x10000) {
927 printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCBX(vcpu,domain_itm));
928 PSCBX(vcpu,domain_itm) = 0;
929 }
930 else vcpu_enable_timer(vcpu,1000000L);
931 return (IA64_NO_FAULT);
932 }
934 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
935 {
936 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
937 PSCB(vcpu,pmv) = val;
938 return (IA64_NO_FAULT);
939 }
941 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
942 {
943 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
944 PSCB(vcpu,cmcv) = val;
945 return (IA64_NO_FAULT);
946 }
948 /**************************************************************************
949 VCPU temporary register access routines
950 **************************************************************************/
951 UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
952 {
953 if (index > 7) return 0;
954 return PSCB(vcpu,tmp[index]);
955 }
957 void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
958 {
959 if (index <= 7) PSCB(vcpu,tmp[index]) = val;
960 }
962 /**************************************************************************
963 Interval timer routines
964 **************************************************************************/
966 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
967 {
968 UINT64 itv = PSCB(vcpu,itv);
969 return(!itv || !!(itv & 0x10000));
970 }
972 BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
973 {
974 UINT64 itv = PSCB(vcpu,itv);
975 return (test_bit(itv, PSCBX(vcpu,insvc)));
976 }
978 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
979 {
980 unsigned long domain_itm = PSCBX(vcpu,domain_itm);
981 unsigned long now = ia64_get_itc();
983 if (!domain_itm) return FALSE;
984 if (now < domain_itm) return FALSE;
985 if (vcpu_timer_disabled(vcpu)) return FALSE;
986 return TRUE;
987 }
989 void vcpu_safe_set_itm(unsigned long val)
990 {
991 unsigned long epsilon = 100;
992 UINT64 now = ia64_get_itc();
994 local_irq_disable();
995 while (1) {
996 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
997 ia64_set_itm(val);
998 if (val > (now = ia64_get_itc())) break;
999 val = now + epsilon;
1000 epsilon <<= 1;
1002 local_irq_enable();
1005 void vcpu_set_next_timer(VCPU *vcpu)
1007 UINT64 d = PSCBX(vcpu,domain_itm);
1008 //UINT64 s = PSCBX(vcpu,xen_itm);
1009 UINT64 s = local_cpu_data->itm_next;
1010 UINT64 now = ia64_get_itc();
1011 //UINT64 interval = PSCBX(vcpu,xen_timer_interval);
1013 /* gloss over the wraparound problem for now... we know it exists
1014 * but it doesn't matter right now */
1016 #if 0
1017 /* ensure at least next SP tick is in the future */
1018 if (!interval) PSCBX(vcpu,xen_itm) = now +
1019 #if 0
1020 (running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
1021 DEFAULT_CLOCK_RATE);
1022 #else
1023 3000000;
1024 //printf("vcpu_set_next_timer: HACK!\n");
1025 #endif
1026 #if 0
1027 if (PSCBX(vcpu,xen_itm) < now)
1028 while (PSCBX(vcpu,xen_itm) < now + (interval>>1))
1029 PSCBX(vcpu,xen_itm) += interval;
1030 #endif
1031 #endif
1033 if (is_idle_task(vcpu->domain)) {
1034 printf("****** vcpu_set_next_timer called during idle!!\n");
1036 //s = PSCBX(vcpu,xen_itm);
1037 if (d && (d > now) && (d < s)) {
1038 vcpu_safe_set_itm(d);
1039 //using_domain_as_itm++;
1041 else {
1042 vcpu_safe_set_itm(s);
1043 //using_xen_as_itm++;
1047 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
1049 UINT now = ia64_get_itc();
1051 //if (val < now) val = now + 1000;
1052 //printf("*** vcpu_set_itm: called with %lx\n",val);
1053 PSCBX(vcpu,domain_itm) = val;
1054 vcpu_set_next_timer(vcpu);
1055 return (IA64_NO_FAULT);
1058 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
1061 UINT64 oldnow = ia64_get_itc();
1062 UINT64 olditm = PSCBX(vcpu,domain_itm);
1063 unsigned long d = olditm - oldnow;
1064 unsigned long x = local_cpu_data->itm_next - oldnow;
1066 UINT64 newnow = val, min_delta;
1068 #define DISALLOW_SETTING_ITC_FOR_NOW
1069 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
1070 printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
1071 #else
1072 local_irq_disable();
1073 if (olditm) {
1074 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
1075 PSCBX(vcpu,domain_itm) = newnow + d;
1077 local_cpu_data->itm_next = newnow + x;
1078 d = PSCBX(vcpu,domain_itm);
1079 x = local_cpu_data->itm_next;
1081 ia64_set_itc(newnow);
1082 if (d && (d > newnow) && (d < x)) {
1083 vcpu_safe_set_itm(d);
1084 //using_domain_as_itm++;
1086 else {
1087 vcpu_safe_set_itm(x);
1088 //using_xen_as_itm++;
1090 local_irq_enable();
1091 #endif
1092 return (IA64_NO_FAULT);
1095 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
1097 //FIXME: Implement this
1098 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
1099 return (IA64_NO_FAULT);
1100 //return (IA64_ILLOP_FAULT);
1103 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
1105 //TODO: Implement this
1106 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
1107 return (IA64_ILLOP_FAULT);
1110 void vcpu_pend_timer(VCPU *vcpu)
1112 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1114 if (vcpu_timer_disabled(vcpu)) return;
1115 //if (vcpu_timer_inservice(vcpu)) return;
1116 if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
1117 // already delivered an interrupt for this so
1118 // don't deliver another
1119 return;
1121 #if 0
1122 // attempt to flag "timer tick before its due" source
1124 UINT64 itm = PSCBX(vcpu,domain_itm);
1125 UINT64 now = ia64_get_itc();
1126 if (now < itm) printf("******* vcpu_pend_timer: pending before due!\n");
1128 #endif
1129 vcpu_pend_interrupt(vcpu, itv);
1132 // returns true if ready to deliver a timer interrupt too early
1133 UINT64 vcpu_timer_pending_early(VCPU *vcpu)
1135 UINT64 now = ia64_get_itc();
1136 UINT64 itm = PSCBX(vcpu,domain_itm);
1138 if (vcpu_timer_disabled(vcpu)) return 0;
1139 if (!itm) return 0;
1140 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1143 //FIXME: This is a hack because everything dies if a timer tick is lost
1144 void vcpu_poke_timer(VCPU *vcpu)
1146 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1147 UINT64 now = ia64_get_itc();
1148 UINT64 itm = PSCBX(vcpu,domain_itm);
1149 UINT64 irr;
1151 if (vcpu_timer_disabled(vcpu)) return;
1152 if (!itm) return;
1153 if (itv != 0xefL) {
1154 printf("vcpu_poke_timer: unimplemented itv=%lx!\n",itv);
1155 while(1);
1157 // using 0xef instead of itv so can get real irr
1158 if (now > itm && !test_bit(0xefL, PSCBX(vcpu,insvc))) {
1159 if (!test_bit(0xefL,PSCBX(vcpu,irr))) {
1160 irr = ia64_getreg(_IA64_REG_CR_IRR3);
1161 if (irr & (1L<<(0xef-0xc0))) return;
1162 if (now-itm>0x800000)
1163 printf("*** poking timer: now=%lx,vitm=%lx,xitm=%lx,itm=%lx\n",now,itm,local_cpu_data->itm_next,ia64_get_itm());
1164 vcpu_pend_timer(vcpu);
1170 /**************************************************************************
1171 Privileged operation emulation routines
1172 **************************************************************************/
1174 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
1176 PSCB(vcpu,tmp[0]) = ifa; // save ifa in vcpu structure, then specify IA64_FORCED_IFA
1177 return (vcpu_get_rr_ve(vcpu,ifa) ? IA64_DATA_TLB_VECTOR : IA64_ALT_DATA_TLB_VECTOR) | IA64_FORCED_IFA;
1181 IA64FAULT vcpu_rfi(VCPU *vcpu)
1183 // TODO: Only allowed for current vcpu
1184 PSR psr;
1185 UINT64 int_enable, regspsr = 0;
1186 UINT64 ifs;
1187 REGS *regs = vcpu_regs(vcpu);
1188 extern void dorfirfi(void);
1190 psr.i64 = PSCB(vcpu,ipsr);
1191 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
1192 if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
1193 int_enable = psr.ia64_psr.i;
1194 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
1195 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
1196 else vcpu_set_metaphysical_mode(vcpu,TRUE);
1197 psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
1198 psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
1199 psr.ia64_psr.bn = 1;
1200 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
1201 if (psr.ia64_psr.be) {
1202 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
1203 return (IA64_ILLOP_FAULT);
1205 PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
1206 ifs = PSCB(vcpu,ifs);
1207 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1208 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1209 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
1210 // TODO: validate PSCB(vcpu,iip)
1211 // TODO: PSCB(vcpu,ipsr) = psr;
1212 PSCB(vcpu,ipsr) = psr.i64;
1213 // now set up the trampoline
1214 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
1215 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
1216 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
1218 else {
1219 regs->cr_ipsr = psr.i64;
1220 regs->cr_iip = PSCB(vcpu,iip);
1222 PSCB(vcpu,interrupt_collection_enabled) = 1;
1223 vcpu_bsw1(vcpu);
1224 PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
1225 return (IA64_NO_FAULT);
1228 IA64FAULT vcpu_cover(VCPU *vcpu)
1230 // TODO: Only allowed for current vcpu
1231 REGS *regs = vcpu_regs(vcpu);
1233 if (!PSCB(vcpu,interrupt_collection_enabled)) {
1234 if (!PSCB(vcpu,incomplete_regframe))
1235 PSCB(vcpu,ifs) = regs->cr_ifs;
1236 else PSCB(vcpu,incomplete_regframe) = 0;
1238 regs->cr_ifs = 0;
1239 return (IA64_NO_FAULT);
1242 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
1244 UINT64 pta = PSCB(vcpu,pta);
1245 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1246 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
1247 UINT64 Mask = (1L << pta_sz) - 1;
1248 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1249 UINT64 compMask_60_15 = ~Mask_60_15;
1250 //UINT64 rr_ps = RR_TO_PS(get_rr(vadr));
1251 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
1252 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
1253 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
1254 UINT64 VHPT_addr2a =
1255 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1256 UINT64 VHPT_addr2b =
1257 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;;
1258 UINT64 VHPT_addr3 = VHPT_offset & 0x7fff;
1259 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1260 VHPT_addr3;
1262 #if 0
1263 if (VHPT_addr1 == 0xe000000000000000L) {
1264 printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
1265 PSCB(vcpu,iip));
1266 return (IA64_ILLOP_FAULT);
1268 #endif
1269 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1270 *pval = VHPT_addr;
1271 return (IA64_NO_FAULT);
1274 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1276 printf("vcpu_ttag: ttag instruction unsupported\n");
1277 return (IA64_ILLOP_FAULT);
1280 #define itir_ps(itir) ((itir >> 2) & 0x3f)
1281 #define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
1283 unsigned long vhpt_translate_count = 0;
1285 IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir)
1287 unsigned long pta, pta_mask, iha, pte, ps;
1288 TR_ENTRY *trp;
1289 ia64_rr rr;
1291 if (!(address >> 61)) {
1292 if (!PSCB(vcpu,metaphysical_mode)) {
1293 REGS *regs = vcpu_regs(vcpu);
1294 unsigned long viip = PSCB(vcpu,iip);
1295 unsigned long vipsr = PSCB(vcpu,ipsr);
1296 unsigned long iip = regs->cr_iip;
1297 unsigned long ipsr = regs->cr_ipsr;
1298 printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p continuing\n", address, viip, vipsr, iip, ipsr);
1301 *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
1302 *itir = PAGE_SHIFT << 2;
1303 phys_translate_count++;
1304 return IA64_NO_FAULT;
1307 /* check translation registers */
1308 if ((trp = match_tr(vcpu,address))) {
1309 tr_translate_count++;
1310 *pteval = trp->page_flags;
1311 *itir = trp->itir;
1312 return IA64_NO_FAULT;
1315 /* check 1-entry TLB */
1316 if ((trp = match_dtlb(vcpu,address))) {
1317 dtlb_translate_count++;
1318 //*pteval = trp->page_flags;
1319 *pteval = trp->arch.dtlb_pte;
1320 *itir = trp->itir;
1321 return IA64_NO_FAULT;
1324 /* check guest VHPT */
1325 pta = PSCB(vcpu,pta);
1326 rr.rrval = PSCB(vcpu,rrs)[address>>61];
1327 if (rr.ve && (pta & IA64_PTA_VE))
1329 if (pta & IA64_PTA_VF)
1331 /* long format VHPT - not implemented */
1332 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1334 else
1336 /* short format VHPT */
1338 /* avoid recursively walking VHPT */
1339 pta_mask = (itir_mask(pta) << 3) >> 3;
1340 if (((address ^ pta) & pta_mask) == 0)
1341 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1343 vcpu_thash(vcpu, address, &iha);
1344 if (__copy_from_user(&pte, (void *)iha, sizeof(pte)) != 0)
1345 return IA64_VHPT_TRANS_VECTOR;
1347 /*
1348 * Optimisation: this VHPT walker aborts on not-present pages
1349 * instead of inserting a not-present translation, this allows
1350 * vectoring directly to the miss handler.
1351 \ */
1352 if (pte & _PAGE_P)
1354 *pteval = pte;
1355 *itir = vcpu_get_itir_on_fault(vcpu,address);
1356 vhpt_translate_count++;
1357 return IA64_NO_FAULT;
1359 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1362 return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR);
1365 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1367 UINT64 pteval, itir, mask;
1368 IA64FAULT fault;
1370 fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir);
1371 if (fault == IA64_NO_FAULT)
1373 mask = itir_mask(itir);
1374 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
1375 return (IA64_NO_FAULT);
1377 else
1379 PSCB(vcpu,tmp[0]) = vadr; // save ifa in vcpu structure, then specify IA64_FORCED_IFA
1380 return (fault | IA64_FORCED_IFA);
1384 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
1386 printf("vcpu_tak: tak instruction unsupported\n");
1387 return (IA64_ILLOP_FAULT);
1388 // HACK ALERT: tak does a thash for now
1389 //return vcpu_thash(vcpu,vadr,key);
1392 /**************************************************************************
1393 VCPU debug breakpoint register access routines
1394 **************************************************************************/
1396 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
1398 // TODO: unimplemented DBRs return a reserved register fault
1399 // TODO: Should set Logical CPU state, not just physical
1400 ia64_set_dbr(reg,val);
1401 return (IA64_NO_FAULT);
1404 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
1406 // TODO: unimplemented IBRs return a reserved register fault
1407 // TODO: Should set Logical CPU state, not just physical
1408 ia64_set_ibr(reg,val);
1409 return (IA64_NO_FAULT);
1412 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1414 // TODO: unimplemented DBRs return a reserved register fault
1415 UINT64 val = ia64_get_dbr(reg);
1416 *pval = val;
1417 return (IA64_NO_FAULT);
1420 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1422 // TODO: unimplemented IBRs return a reserved register fault
1423 UINT64 val = ia64_get_ibr(reg);
1424 *pval = val;
1425 return (IA64_NO_FAULT);
1428 /**************************************************************************
1429 VCPU performance monitor register access routines
1430 **************************************************************************/
1432 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
1434 // TODO: Should set Logical CPU state, not just physical
1435 // NOTE: Writes to unimplemented PMC registers are discarded
1436 ia64_set_pmc(reg,val);
1437 return (IA64_NO_FAULT);
1440 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
1442 // TODO: Should set Logical CPU state, not just physical
1443 // NOTE: Writes to unimplemented PMD registers are discarded
1444 ia64_set_pmd(reg,val);
1445 return (IA64_NO_FAULT);
1448 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1450 // NOTE: Reads from unimplemented PMC registers return zero
1451 UINT64 val = (UINT64)ia64_get_pmc(reg);
1452 *pval = val;
1453 return (IA64_NO_FAULT);
1456 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1458 // NOTE: Reads from unimplemented PMD registers return zero
1459 UINT64 val = (UINT64)ia64_get_pmd(reg);
1460 *pval = val;
1461 return (IA64_NO_FAULT);
1464 /**************************************************************************
1465 VCPU banked general register access routines
1466 **************************************************************************/
1468 IA64FAULT vcpu_bsw0(VCPU *vcpu)
1470 // TODO: Only allowed for current vcpu
1471 REGS *regs = vcpu_regs(vcpu);
1472 unsigned long *r = &regs->r16;
1473 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1474 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1475 int i;
1477 if (PSCB(vcpu,banknum)) {
1478 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1479 PSCB(vcpu,banknum) = 0;
1481 return (IA64_NO_FAULT);
1484 IA64FAULT vcpu_bsw1(VCPU *vcpu)
1486 // TODO: Only allowed for current vcpu
1487 REGS *regs = vcpu_regs(vcpu);
1488 unsigned long *r = &regs->r16;
1489 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1490 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1491 int i;
1493 if (!PSCB(vcpu,banknum)) {
1494 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1495 PSCB(vcpu,banknum) = 1;
1497 return (IA64_NO_FAULT);
1500 /**************************************************************************
1501 VCPU cpuid access routines
1502 **************************************************************************/
1505 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1507 // FIXME: This could get called as a result of a rsvd-reg fault
1508 // if reg > 3
1509 switch(reg) {
1510 case 0:
1511 memcpy(pval,"Xen/ia64",8);
1512 break;
1513 case 1:
1514 *pval = 0;
1515 break;
1516 case 2:
1517 *pval = 0;
1518 break;
1519 case 3:
1520 *pval = ia64_get_cpuid(3);
1521 break;
1522 case 4:
1523 *pval = ia64_get_cpuid(4);
1524 break;
1525 default:
1526 if (reg > (ia64_get_cpuid(3) & 0xff))
1527 return IA64_RSVDREG_FAULT;
1528 *pval = ia64_get_cpuid(reg);
1529 break;
1531 return (IA64_NO_FAULT);
1534 /**************************************************************************
1535 VCPU region register access routines
1536 **************************************************************************/
1538 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
1540 ia64_rr rr;
1542 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1543 return(rr.ve);
1546 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
1548 PSCB(vcpu,rrs)[reg>>61] = val;
1549 // warning: set_one_rr() does it "live"
1550 set_one_rr(reg,val);
1551 return (IA64_NO_FAULT);
1554 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1556 UINT val = PSCB(vcpu,rrs)[reg>>61];
1557 *pval = val;
1558 return (IA64_NO_FAULT);
1561 /**************************************************************************
1562 VCPU protection key register access routines
1563 **************************************************************************/
1565 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1567 #ifndef PKR_USE_FIXED
1568 printk("vcpu_get_pkr: called, not implemented yet\n");
1569 return IA64_ILLOP_FAULT;
1570 #else
1571 UINT64 val = (UINT64)ia64_get_pkr(reg);
1572 *pval = val;
1573 return (IA64_NO_FAULT);
1574 #endif
1577 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
1579 #ifndef PKR_USE_FIXED
1580 printk("vcpu_set_pkr: called, not implemented yet\n");
1581 return IA64_ILLOP_FAULT;
1582 #else
1583 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
1584 vcpu->pkrs[reg] = val;
1585 ia64_set_pkr(reg,val);
1586 return (IA64_NO_FAULT);
1587 #endif
1590 /**************************************************************************
1591 VCPU translation register access routines
1592 **************************************************************************/
1594 static void vcpu_purge_tr_entry(TR_ENTRY *trp)
1596 trp->p = 0;
1599 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
1601 UINT64 ps;
1603 trp->itir = itir;
1604 trp->rid = virtualize_rid(current, get_rr(ifa) & RR_RID_MASK);
1605 trp->p = 1;
1606 ps = trp->ps;
1607 trp->page_flags = pte;
1608 if (trp->pl < 2) trp->pl = 2;
1609 trp->vadr = ifa & ~0xfff;
1610 if (ps > 12) { // "ignore" relevant low-order bits
1611 trp->ppn &= ~((1UL<<(ps-12))-1);
1612 trp->vadr &= ~((1UL<<ps)-1);
1616 TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu, TR_ENTRY *trp, UINT64 ifa, int count)
1618 unsigned long rid = (get_rr(ifa) & RR_RID_MASK);
1619 int i;
1621 for (i = 0; i < count; i++, trp++) {
1622 if (!trp->p) continue;
1623 if (physicalize_rid(vcpu,trp->rid) != rid) continue;
1624 if (ifa < trp->vadr) continue;
1625 if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
1626 //if (trp->key && !match_pkr(vcpu,trp->key)) continue;
1627 return trp;
1629 return 0;
1632 TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa)
1634 TR_ENTRY *trp;
1636 trp = vcpu_match_tr_entry(vcpu,vcpu->arch.dtrs,ifa,NDTRS);
1637 if (trp) return trp;
1638 trp = vcpu_match_tr_entry(vcpu,vcpu->arch.itrs,ifa,NITRS);
1639 if (trp) return trp;
1640 return 0;
1643 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
1644 UINT64 itir, UINT64 ifa)
1646 TR_ENTRY *trp;
1648 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
1649 trp = &PSCBX(vcpu,dtrs[slot]);
1650 //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
1651 vcpu_set_tr_entry(trp,pte,itir,ifa);
1652 return IA64_NO_FAULT;
1655 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
1656 UINT64 itir, UINT64 ifa)
1658 TR_ENTRY *trp;
1660 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
1661 trp = &PSCBX(vcpu,itrs[slot]);
1662 //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
1663 vcpu_set_tr_entry(trp,pte,itir,ifa);
1664 return IA64_NO_FAULT;
1667 /**************************************************************************
1668 VCPU translation cache access routines
1669 **************************************************************************/
1671 void foobar(void) { /*vcpu_verbose = 1;*/ }
1673 extern struct domain *dom0;
1675 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps)
1677 unsigned long psr;
1678 unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
1680 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1681 // FIXME, must be inlined or potential for nested fault here!
1682 if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT)) {
1683 printf("vcpu_itc_no_srlz: domain0 use of smaller page size!\n");
1684 //FIXME: kill domain here
1685 while(1);
1687 psr = ia64_clear_ic();
1688 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
1689 ia64_set_psr(psr);
1690 // ia64_srlz_i(); // no srls req'd, will rfi later
1691 #ifdef VHPT_GLOBAL
1692 if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
1693 // FIXME: this is dangerous... vhpt_flush_address ensures these
1694 // addresses never get flushed. More work needed if this
1695 // ever happens.
1696 //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
1697 if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
1698 else vhpt_insert(vaddr,pte,logps<<2);
1700 // even if domain pagesize is larger than PAGE_SIZE, just put
1701 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
1702 else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
1703 #endif
1704 if (IorD & 0x4) return; // don't place in 1-entry TLB
1705 if (IorD & 0x1) {
1706 vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
1707 PSCBX(vcpu,itlb_pte) = mp_pte;
1709 if (IorD & 0x2) {
1710 vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
1711 PSCBX(vcpu,dtlb_pte) = mp_pte;
1715 // NOTE: returns a physical pte, NOT a "metaphysical" pte, so do not check
1716 // the physical address contained for correctness
1717 TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
1719 TR_ENTRY *trp;
1721 if (trp = vcpu_match_tr_entry(vcpu,&vcpu->arch.dtlb,ifa,1))
1722 return (&vcpu->arch.dtlb);
1723 return 0UL;
1726 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1728 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1729 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1731 if (logps < PAGE_SHIFT) {
1732 printf("vcpu_itc_d: domain trying to use smaller page size!\n");
1733 //FIXME: kill domain here
1734 while(1);
1736 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1737 pteval = translate_domain_pte(pte,ifa,itir);
1738 if (!pteval) return IA64_ILLOP_FAULT;
1739 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
1740 return IA64_NO_FAULT;
1743 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1745 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1746 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1748 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1749 if (logps < PAGE_SHIFT) {
1750 printf("vcpu_itc_i: domain trying to use smaller page size!\n");
1751 //FIXME: kill domain here
1752 while(1);
1754 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1755 pteval = translate_domain_pte(pte,ifa,itir);
1756 // FIXME: what to do if bad physical address? (machine check?)
1757 if (!pteval) return IA64_ILLOP_FAULT;
1758 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
1759 return IA64_NO_FAULT;
1762 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1764 printk("vcpu_ptc_l: called, not implemented yet\n");
1765 return IA64_ILLOP_FAULT;
1768 // At privlvl=0, fc performs no access rights or protection key checks, while
1769 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
1770 // read but no protection key check. Thus in order to avoid an unexpected
1771 // access rights fault, we have to translate the virtual address to a
1772 // physical address (possibly via a metaphysical address) and do the fc
1773 // on the physical address, which is guaranteed to flush the same cache line
1774 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
1776 // TODO: Only allowed for current vcpu
1777 UINT64 mpaddr, paddr;
1778 IA64FAULT fault;
1779 unsigned long translate_domain_mpaddr(unsigned long);
1780 IA64FAULT vcpu_tpa(VCPU *, UINT64, UINT64 *);
1782 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
1783 if (fault == IA64_NO_FAULT) {
1784 paddr = translate_domain_mpaddr(mpaddr);
1785 ia64_fc(__va(paddr));
1787 return fault;
1790 int ptce_count = 0;
1791 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
1793 // Note that this only needs to be called once, i.e. the
1794 // architected loop to purge the entire TLB, should use
1795 // base = stride1 = stride2 = 0, count0 = count 1 = 1
1797 #ifdef VHPT_GLOBAL
1798 vhpt_flush(); // FIXME: This is overdoing it
1799 #endif
1800 local_flush_tlb_all();
1801 // just invalidate the "whole" tlb
1802 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1803 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1804 return IA64_NO_FAULT;
1807 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1809 printk("vcpu_ptc_g: called, not implemented yet\n");
1810 return IA64_ILLOP_FAULT;
1813 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1815 extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
1816 // FIXME: validate not flushing Xen addresses
1817 // if (Xen address) return(IA64_ILLOP_FAULT);
1818 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
1819 //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
1820 #ifdef VHPT_GLOBAL
1821 vhpt_flush_address(vadr,addr_range);
1822 #endif
1823 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
1824 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1825 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1826 return IA64_NO_FAULT;
1829 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1831 printf("vcpu_ptr_d: Purging TLB is unsupported\n");
1832 return (IA64_ILLOP_FAULT);
1835 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1837 printf("vcpu_ptr_i: Purging TLB is unsupported\n");
1838 return (IA64_ILLOP_FAULT);
1841 void vcpu_set_regs(VCPU *vcpu, REGS *regs)
1843 vcpu->arch.regs = regs;