ia64/xen-unstable

view xen/arch/ia64/xen/vcpu.c @ 9478:986538da9be0

[IA64] set_metaphysical_mode fix

If the old mode is metaphysical mode and the new mode is
virtual mode. set_one_rr(0, PSCB(vcpu,rrs[0]) will not
set machine region register 0, because at this time,
PSCB(vcpu,metaphysical_mode) is 1, VMM need to update
metaphysical_mode first, then call set_one_rr.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Mar 29 12:41:33 2006 -0700 (2006-03-29)
parents 7e3cbc409676
children 2b6e531dab38
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004-2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <linux/sched.h>
10 #include <public/arch-ia64.h>
11 #include <asm/ia64_int.h>
12 #include <asm/vcpu.h>
13 #include <asm/regionreg.h>
14 #include <asm/tlb.h>
15 #include <asm/processor.h>
16 #include <asm/delay.h>
17 #include <asm/vmx_vcpu.h>
18 #include <asm/vhpt.h>
19 #include <asm/tlbflush.h>
20 #include <xen/event.h>
22 /* FIXME: where these declarations should be there ? */
23 extern void getreg(unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs);
24 extern void setreg(unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs);
25 extern void panic_domain(struct pt_regs *, const char *, ...);
26 extern int set_metaphysical_rr0(void);
27 extern unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
28 extern unsigned long translate_domain_mpaddr(unsigned long);
29 extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
32 typedef union {
33 struct ia64_psr ia64_psr;
34 unsigned long i64;
35 } PSR;
37 // this def for vcpu_regs won't work if kernel stack is present
38 //#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
40 #define TRUE 1
41 #define FALSE 0
42 #define IA64_PTA_SZ_BIT 2
43 #define IA64_PTA_VF_BIT 8
44 #define IA64_PTA_BASE_BIT 15
45 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
46 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
48 #define STATIC
50 #ifdef PRIVOP_ADDR_COUNT
51 struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS+1] = {
52 { "=ifa", { 0 }, { 0 }, 0 },
53 { "thash", { 0 }, { 0 }, 0 },
54 { 0, { 0 }, { 0 }, 0 }
55 };
56 extern void privop_count_addr(unsigned long addr, int inst);
57 #define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
58 #else
59 #define PRIVOP_COUNT_ADDR(x,y) do {} while (0)
60 #endif
62 unsigned long dtlb_translate_count = 0;
63 unsigned long tr_translate_count = 0;
64 unsigned long phys_translate_count = 0;
66 unsigned long vcpu_verbose = 0;
68 /**************************************************************************
69 VCPU general register access routines
70 **************************************************************************/
71 #ifdef XEN
72 UINT64
73 vcpu_get_gr(VCPU *vcpu, unsigned long reg)
74 {
75 REGS *regs = vcpu_regs(vcpu);
76 UINT64 val;
78 if (!reg) return 0;
79 getreg(reg,&val,0,regs); // FIXME: handle NATs later
80 return val;
81 }
82 IA64FAULT
83 vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val)
84 {
85 REGS *regs = vcpu_regs(vcpu);
86 int nat;
88 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
89 if (nat)
90 return IA64_NAT_CONSUMPTION_VECTOR;
91 return 0;
92 }
94 // returns:
95 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
96 // IA64_NO_FAULT otherwise
97 IA64FAULT
98 vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int nat)
99 {
100 REGS *regs = vcpu_regs(vcpu);
101 long sof = (regs->cr_ifs) & 0x7f;
103 if (!reg) return IA64_ILLOP_FAULT;
104 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
105 setreg(reg,value,nat,regs); // FIXME: handle NATs later
106 return IA64_NO_FAULT;
107 }
108 #else
109 // returns:
110 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
111 // IA64_NO_FAULT otherwise
112 IA64FAULT
113 vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value)
114 {
115 REGS *regs = vcpu_regs(vcpu);
116 long sof = (regs->cr_ifs) & 0x7f;
118 if (!reg) return IA64_ILLOP_FAULT;
119 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
120 setreg(reg,value,0,regs); // FIXME: handle NATs later
121 return IA64_NO_FAULT;
122 }
124 #endif
125 /**************************************************************************
126 VCPU privileged application register access routines
127 **************************************************************************/
129 void vcpu_load_kernel_regs(VCPU *vcpu)
130 {
131 ia64_set_kr(0, VCPU(vcpu, krs[0]));
132 ia64_set_kr(1, VCPU(vcpu, krs[1]));
133 ia64_set_kr(2, VCPU(vcpu, krs[2]));
134 ia64_set_kr(3, VCPU(vcpu, krs[3]));
135 ia64_set_kr(4, VCPU(vcpu, krs[4]));
136 ia64_set_kr(5, VCPU(vcpu, krs[5]));
137 ia64_set_kr(6, VCPU(vcpu, krs[6]));
138 ia64_set_kr(7, VCPU(vcpu, krs[7]));
139 }
141 /* GCC 4.0.2 seems not to be able to suppress this call!. */
142 #define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
144 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
145 {
146 if (reg == 44) return (vcpu_set_itc(vcpu,val));
147 else if (reg == 27) return (IA64_ILLOP_FAULT);
148 else if (reg == 24)
149 printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
150 else if (reg > 7) return (IA64_ILLOP_FAULT);
151 else {
152 PSCB(vcpu,krs[reg]) = val;
153 ia64_set_kr(reg,val);
154 }
155 return IA64_NO_FAULT;
156 }
158 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
159 {
160 if (reg == 24)
161 printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
162 else if (reg > 7) return (IA64_ILLOP_FAULT);
163 else *val = PSCB(vcpu,krs[reg]);
164 return IA64_NO_FAULT;
165 }
167 /**************************************************************************
168 VCPU processor status register access routines
169 **************************************************************************/
171 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
172 {
173 /* only do something if mode changes */
174 if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
175 PSCB(vcpu,metaphysical_mode) = newmode;
176 if (newmode) set_metaphysical_rr0();
177 else if (PSCB(vcpu,rrs[0]) != -1)
178 set_one_rr(0, PSCB(vcpu,rrs[0]));
179 }
180 }
182 IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
183 {
184 vcpu_set_metaphysical_mode(vcpu,TRUE);
185 return IA64_NO_FAULT;
186 }
188 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
189 {
190 struct ia64_psr psr, imm, *ipsr;
191 REGS *regs = vcpu_regs(vcpu);
193 //PRIVOP_COUNT_ADDR(regs,_RSM);
194 // TODO: All of these bits need to be virtualized
195 // TODO: Only allowed for current vcpu
196 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
197 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
198 imm = *(struct ia64_psr *)&imm24;
199 // interrupt flag
200 if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
201 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
202 // interrupt collection flag
203 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
204 // just handle psr.up and psr.pp for now
205 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
206 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
207 | IA64_PSR_DFL | IA64_PSR_DFH))
208 return (IA64_ILLOP_FAULT);
209 if (imm.dfh) ipsr->dfh = 0;
210 if (imm.dfl) ipsr->dfl = 0;
211 if (imm.pp) {
212 ipsr->pp = 1;
213 psr.pp = 1; // priv perf ctrs always enabled
214 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
215 PSCB(vcpu,tmp[8]) = 0; // but fool the domain if it gets psr
216 }
217 if (imm.up) { ipsr->up = 0; psr.up = 0; }
218 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
219 if (imm.be) ipsr->be = 0;
220 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
221 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
222 return IA64_NO_FAULT;
223 }
225 #define SPURIOUS_VECTOR 0xf
227 IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
228 {
229 vcpu_set_metaphysical_mode(vcpu,FALSE);
230 return IA64_NO_FAULT;
231 }
233 IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
234 {
235 PSCB(vcpu,interrupt_delivery_enabled) = 1;
236 PSCB(vcpu,interrupt_collection_enabled) = 1;
237 return IA64_NO_FAULT;
238 }
240 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
241 {
242 struct ia64_psr psr, imm, *ipsr;
243 REGS *regs = vcpu_regs(vcpu);
244 UINT64 mask, enabling_interrupts = 0;
246 //PRIVOP_COUNT_ADDR(regs,_SSM);
247 // TODO: All of these bits need to be virtualized
248 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
249 imm = *(struct ia64_psr *)&imm24;
250 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
251 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
252 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
253 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
254 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
255 if (imm.dfh) ipsr->dfh = 1;
256 if (imm.dfl) ipsr->dfl = 1;
257 if (imm.pp) {
258 ipsr->pp = 1; psr.pp = 1;
259 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
260 PSCB(vcpu,tmp[8]) = 1;
261 }
262 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
263 if (imm.i) {
264 if (!PSCB(vcpu,interrupt_delivery_enabled)) {
265 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
266 enabling_interrupts = 1;
267 }
268 PSCB(vcpu,interrupt_delivery_enabled) = 1;
269 }
270 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
271 // TODO: do this faster
272 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
273 if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
274 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
275 if (imm.up) { ipsr->up = 1; psr.up = 1; }
276 if (imm.be) {
277 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
278 return (IA64_ILLOP_FAULT);
279 }
280 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
281 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
282 if (enabling_interrupts &&
283 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
284 PSCB(vcpu,pending_interruption) = 1;
285 return IA64_NO_FAULT;
286 }
288 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
289 {
290 struct ia64_psr psr, newpsr, *ipsr;
291 REGS *regs = vcpu_regs(vcpu);
292 UINT64 enabling_interrupts = 0;
294 // TODO: All of these bits need to be virtualized
295 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
296 newpsr = *(struct ia64_psr *)&val;
297 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
298 // just handle psr.up and psr.pp for now
299 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
300 // however trying to set other bits can't be an error as it is in ssm
301 if (newpsr.dfh) ipsr->dfh = 1;
302 if (newpsr.dfl) ipsr->dfl = 1;
303 if (newpsr.pp) {
304 ipsr->pp = 1; psr.pp = 1;
305 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
306 PSCB(vcpu,tmp[8]) = 1;
307 }
308 else {
309 ipsr->pp = 1; psr.pp = 1;
310 PSCB(vcpu,tmp[8]) = 0;
311 }
312 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
313 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
314 if (newpsr.i) {
315 if (!PSCB(vcpu,interrupt_delivery_enabled))
316 enabling_interrupts = 1;
317 PSCB(vcpu,interrupt_delivery_enabled) = 1;
318 }
319 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
320 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
321 if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
322 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
323 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
324 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
325 else vcpu_set_metaphysical_mode(vcpu,TRUE);
326 if (newpsr.be) {
327 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
328 return (IA64_ILLOP_FAULT);
329 }
330 if (enabling_interrupts &&
331 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
332 PSCB(vcpu,pending_interruption) = 1;
333 return IA64_NO_FAULT;
334 }
336 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
337 {
338 REGS *regs = vcpu_regs(vcpu);
339 struct ia64_psr newpsr;
341 newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
342 if (newpsr.cpl == 2) newpsr.cpl = 0;
343 if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
344 else newpsr.i = 0;
345 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
346 else newpsr.ic = 0;
347 if (PSCB(vcpu,metaphysical_mode)) newpsr.dt = 0;
348 else newpsr.dt = 1;
349 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
350 if (PSCB(vcpu,tmp[8])) newpsr.pp = 1;
351 else newpsr.pp = 0;
352 *pval = *(unsigned long *)&newpsr;
353 return IA64_NO_FAULT;
354 }
356 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
357 {
358 return !!PSCB(vcpu,interrupt_collection_enabled);
359 }
361 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
362 {
363 return !!PSCB(vcpu,interrupt_delivery_enabled);
364 }
366 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
367 {
368 UINT64 dcr = PSCBX(vcpu,dcr);
369 PSR psr;
371 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
372 psr.i64 = prevpsr;
373 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
374 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
375 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
376 psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
377 psr.ia64_psr.bn = PSCB(vcpu,banknum);
378 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
379 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
380 // psr.pk = 1;
381 //printf("returns 0x%016lx...",psr.i64);
382 return psr.i64;
383 }
385 /**************************************************************************
386 VCPU control register access routines
387 **************************************************************************/
389 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
390 {
391 //extern unsigned long privop_trace;
392 //privop_trace=0;
393 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
394 // Reads of cr.dcr on Xen always have the sign bit set, so
395 // a domain can differentiate whether it is running on SP or not
396 *pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
397 return (IA64_NO_FAULT);
398 }
400 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
401 {
402 if(VMX_DOMAIN(vcpu)){
403 *pval = PSCB(vcpu,iva) & ~0x7fffL;
404 }else{
405 *pval = PSCBX(vcpu,iva) & ~0x7fffL;
406 }
407 return (IA64_NO_FAULT);
408 }
410 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
411 {
412 *pval = PSCB(vcpu,pta);
413 return (IA64_NO_FAULT);
414 }
416 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
417 {
418 //REGS *regs = vcpu_regs(vcpu);
419 //*pval = regs->cr_ipsr;
420 *pval = PSCB(vcpu,ipsr);
421 return (IA64_NO_FAULT);
422 }
424 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
425 {
426 *pval = PSCB(vcpu,isr);
427 return (IA64_NO_FAULT);
428 }
430 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
431 {
432 //REGS *regs = vcpu_regs(vcpu);
433 //*pval = regs->cr_iip;
434 *pval = PSCB(vcpu,iip);
435 return (IA64_NO_FAULT);
436 }
438 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
439 {
440 UINT64 val = PSCB(vcpu,ifa);
441 REGS *regs = vcpu_regs(vcpu);
442 PRIVOP_COUNT_ADDR(regs,_GET_IFA);
443 *pval = val;
444 return (IA64_NO_FAULT);
445 }
447 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
448 {
449 ia64_rr rr;
451 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
452 return(rr.ps);
453 }
455 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
456 {
457 ia64_rr rr;
459 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
460 return(rr.rid);
461 }
463 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
464 {
465 ia64_rr rr;
467 rr.rrval = 0;
468 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
469 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
470 return (rr.rrval);
471 }
474 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
475 {
476 UINT64 val = PSCB(vcpu,itir);
477 *pval = val;
478 return (IA64_NO_FAULT);
479 }
481 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
482 {
483 UINT64 val = PSCB(vcpu,iipa);
484 // SP entry code does not save iipa yet nor does it get
485 // properly delivered in the pscb
486 // printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
487 *pval = val;
488 return (IA64_NO_FAULT);
489 }
491 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
492 {
493 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
494 //*pval = PSCB(vcpu,regs).cr_ifs;
495 *pval = PSCB(vcpu,ifs);
496 PSCB(vcpu,incomplete_regframe) = 0;
497 return (IA64_NO_FAULT);
498 }
500 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
501 {
502 UINT64 val = PSCB(vcpu,iim);
503 *pval = val;
504 return (IA64_NO_FAULT);
505 }
507 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
508 {
509 //return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
510 UINT64 val = PSCB(vcpu,iha);
511 REGS *regs = vcpu_regs(vcpu);
512 PRIVOP_COUNT_ADDR(regs,_THASH);
513 *pval = val;
514 return (IA64_NO_FAULT);
515 }
517 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
518 {
519 //extern unsigned long privop_trace;
520 //privop_trace=1;
521 // Reads of cr.dcr on SP always have the sign bit set, so
522 // a domain can differentiate whether it is running on SP or not
523 // Thus, writes of DCR should ignore the sign bit
524 //verbose("vcpu_set_dcr: called\n");
525 PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
526 return (IA64_NO_FAULT);
527 }
529 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
530 {
531 if(VMX_DOMAIN(vcpu)){
532 PSCB(vcpu,iva) = val & ~0x7fffL;
533 }else{
534 PSCBX(vcpu,iva) = val & ~0x7fffL;
535 }
536 return (IA64_NO_FAULT);
537 }
539 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
540 {
541 if (val & IA64_PTA_LFMT) {
542 printf("*** No support for VHPT long format yet!!\n");
543 return (IA64_ILLOP_FAULT);
544 }
545 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
546 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
547 PSCB(vcpu,pta) = val;
548 return IA64_NO_FAULT;
549 }
551 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
552 {
553 PSCB(vcpu,ipsr) = val;
554 return IA64_NO_FAULT;
555 }
557 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
558 {
559 PSCB(vcpu,isr) = val;
560 return IA64_NO_FAULT;
561 }
563 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
564 {
565 PSCB(vcpu,iip) = val;
566 return IA64_NO_FAULT;
567 }
569 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
570 {
571 REGS *regs = vcpu_regs(vcpu);
572 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
573 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
574 else ipsr->ri++;
575 return (IA64_NO_FAULT);
576 }
578 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
579 {
580 PSCB(vcpu,ifa) = val;
581 return IA64_NO_FAULT;
582 }
584 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
585 {
586 PSCB(vcpu,itir) = val;
587 return IA64_NO_FAULT;
588 }
590 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
591 {
592 // SP entry code does not save iipa yet nor does it get
593 // properly delivered in the pscb
594 // printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
595 PSCB(vcpu,iipa) = val;
596 return IA64_NO_FAULT;
597 }
599 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
600 {
601 //REGS *regs = vcpu_regs(vcpu);
602 PSCB(vcpu,ifs) = val;
603 return IA64_NO_FAULT;
604 }
606 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
607 {
608 PSCB(vcpu,iim) = val;
609 return IA64_NO_FAULT;
610 }
612 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
613 {
614 PSCB(vcpu,iha) = val;
615 return IA64_NO_FAULT;
616 }
618 /**************************************************************************
619 VCPU interrupt control register access routines
620 **************************************************************************/
622 void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
623 {
624 PSCB(vcpu,pending_interruption) = 1;
625 }
627 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
628 {
629 if (vector & ~0xff) {
630 printf("vcpu_pend_interrupt: bad vector\n");
631 return;
632 }
633 if ( VMX_DOMAIN(vcpu) ) {
634 set_bit(vector,VCPU(vcpu,irr));
635 } else
636 {
637 if (test_bit(vector,PSCBX(vcpu,irr))) {
638 //printf("vcpu_pend_interrupt: overrun\n");
639 }
640 set_bit(vector,PSCBX(vcpu,irr));
641 PSCB(vcpu,pending_interruption) = 1;
642 }
643 }
645 #define IA64_TPR_MMI 0x10000
646 #define IA64_TPR_MIC 0x000f0
648 /* checks to see if a VCPU has any unmasked pending interrupts
649 * if so, returns the highest, else returns SPURIOUS_VECTOR */
650 /* NOTE: Since this gets called from vcpu_get_ivr() and the
651 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
652 * this routine also ignores pscb.interrupt_delivery_enabled
653 * and this must be checked independently; see vcpu_deliverable interrupts() */
654 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
655 {
656 UINT64 *p, *r, bits, bitnum, mask, i, vector;
658 /* Always check pending event, since guest may just ack the
659 * event injection without handle. Later guest may throw out
660 * the event itself.
661 */
662 check_start:
663 if (event_pending(vcpu) &&
664 !test_bit(vcpu->vcpu_info->arch.evtchn_vector,
665 &PSCBX(vcpu, insvc[0])))
666 vcpu_pend_interrupt(vcpu, vcpu->vcpu_info->arch.evtchn_vector);
668 p = &PSCBX(vcpu,irr[3]);
669 r = &PSCBX(vcpu,insvc[3]);
670 for (i = 3; ; p--, r--, i--) {
671 bits = *p ;
672 if (bits) break; // got a potential interrupt
673 if (*r) {
674 // nothing in this word which is pending+inservice
675 // but there is one inservice which masks lower
676 return SPURIOUS_VECTOR;
677 }
678 if (i == 0) {
679 // checked all bits... nothing pending+inservice
680 return SPURIOUS_VECTOR;
681 }
682 }
683 // have a pending,deliverable interrupt... see if it is masked
684 bitnum = ia64_fls(bits);
685 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
686 vector = bitnum+(i*64);
687 mask = 1L << bitnum;
688 /* sanity check for guest timer interrupt */
689 if (vector == (PSCB(vcpu,itv) & 0xff)) {
690 uint64_t now = ia64_get_itc();
691 if (now < PSCBX(vcpu,domain_itm)) {
692 // printk("Ooops, pending guest timer before its due\n");
693 PSCBX(vcpu,irr[i]) &= ~mask;
694 goto check_start;
695 }
696 }
697 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
698 if (*r >= mask) {
699 // masked by equal inservice
700 //printf("but masked by equal inservice\n");
701 return SPURIOUS_VECTOR;
702 }
703 if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
704 // tpr.mmi is set
705 //printf("but masked by tpr.mmi\n");
706 return SPURIOUS_VECTOR;
707 }
708 if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
709 //tpr.mic masks class
710 //printf("but masked by tpr.mic\n");
711 return SPURIOUS_VECTOR;
712 }
714 //printf("returned to caller\n");
715 return vector;
716 }
718 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
719 {
720 return (vcpu_get_psr_i(vcpu) &&
721 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
722 }
724 UINT64 vcpu_deliverable_timer(VCPU *vcpu)
725 {
726 return (vcpu_get_psr_i(vcpu) &&
727 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
728 }
730 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
731 {
732 /* Use real LID for domain0 until vIOSAPIC is present.
733 Use EID=0, ID=vcpu_id for domU. */
734 if (vcpu->domain == dom0)
735 *pval = ia64_getreg(_IA64_REG_CR_LID);
736 else
737 *pval = vcpu->vcpu_id << 24;
738 return IA64_NO_FAULT;
739 }
741 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
742 {
743 int i;
744 UINT64 vector, mask;
746 #define HEARTBEAT_FREQ 16 // period in seconds
747 #ifdef HEARTBEAT_FREQ
748 #define N_DOMS 16 // period in seconds
749 #if 0
750 static long count[N_DOMS] = { 0 };
751 #endif
752 static long nonclockcount[N_DOMS] = { 0 };
753 unsigned domid = vcpu->domain->domain_id;
754 #endif
755 #ifdef IRQ_DEBUG
756 static char firstivr = 1;
757 static char firsttime[256];
758 if (firstivr) {
759 int i;
760 for (i=0;i<256;i++) firsttime[i]=1;
761 firstivr=0;
762 }
763 #endif
765 vector = vcpu_check_pending_interrupts(vcpu);
766 if (vector == SPURIOUS_VECTOR) {
767 PSCB(vcpu,pending_interruption) = 0;
768 *pval = vector;
769 return IA64_NO_FAULT;
770 }
771 #ifdef HEARTBEAT_FREQ
772 if (domid >= N_DOMS) domid = N_DOMS-1;
773 #if 0
774 if (vector == (PSCB(vcpu,itv) & 0xff)) {
775 if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
776 printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
777 domid, count[domid], nonclockcount[domid]);
778 //count[domid] = 0;
779 //dump_runq();
780 }
781 }
782 #endif
783 else nonclockcount[domid]++;
784 #endif
785 // now have an unmasked, pending, deliverable vector!
786 // getting ivr has "side effects"
787 #ifdef IRQ_DEBUG
788 if (firsttime[vector]) {
789 printf("*** First get_ivr on vector=%lu,itc=%lx\n",
790 vector,ia64_get_itc());
791 firsttime[vector]=0;
792 }
793 #endif
794 /* if delivering a timer interrupt, remember domain_itm, which
795 * needs to be done before clearing irr
796 */
797 if (vector == (PSCB(vcpu,itv) & 0xff)) {
798 PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
799 }
801 i = vector >> 6;
802 mask = 1L << (vector & 0x3f);
803 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
804 PSCBX(vcpu,insvc[i]) |= mask;
805 PSCBX(vcpu,irr[i]) &= ~mask;
806 //PSCB(vcpu,pending_interruption)--;
807 *pval = vector;
808 return IA64_NO_FAULT;
809 }
811 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
812 {
813 *pval = PSCB(vcpu,tpr);
814 return (IA64_NO_FAULT);
815 }
817 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
818 {
819 *pval = 0L; // reads of eoi always return 0
820 return (IA64_NO_FAULT);
821 }
823 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
824 {
825 #ifndef IRR_USE_FIXED
826 printk("vcpu_get_irr: called, not implemented yet\n");
827 return IA64_ILLOP_FAULT;
828 #else
829 *pval = vcpu->irr[0];
830 return (IA64_NO_FAULT);
831 #endif
832 }
834 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
835 {
836 #ifndef IRR_USE_FIXED
837 printk("vcpu_get_irr: called, not implemented yet\n");
838 return IA64_ILLOP_FAULT;
839 #else
840 *pval = vcpu->irr[1];
841 return (IA64_NO_FAULT);
842 #endif
843 }
845 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
846 {
847 #ifndef IRR_USE_FIXED
848 printk("vcpu_get_irr: called, not implemented yet\n");
849 return IA64_ILLOP_FAULT;
850 #else
851 *pval = vcpu->irr[2];
852 return (IA64_NO_FAULT);
853 #endif
854 }
856 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
857 {
858 #ifndef IRR_USE_FIXED
859 printk("vcpu_get_irr: called, not implemented yet\n");
860 return IA64_ILLOP_FAULT;
861 #else
862 *pval = vcpu->irr[3];
863 return (IA64_NO_FAULT);
864 #endif
865 }
867 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
868 {
869 *pval = PSCB(vcpu,itv);
870 return (IA64_NO_FAULT);
871 }
873 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
874 {
875 *pval = PSCB(vcpu,pmv);
876 return (IA64_NO_FAULT);
877 }
879 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
880 {
881 *pval = PSCB(vcpu,cmcv);
882 return (IA64_NO_FAULT);
883 }
885 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
886 {
887 // fix this when setting values other than m-bit is supported
888 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
889 *pval = (1L << 16);
890 return (IA64_NO_FAULT);
891 }
893 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
894 {
895 // fix this when setting values other than m-bit is supported
896 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
897 *pval = (1L << 16);
898 return (IA64_NO_FAULT);
899 }
901 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
902 {
903 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
904 return (IA64_ILLOP_FAULT);
905 }
907 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
908 {
909 if (val & 0xff00) return IA64_RSVDREG_FAULT;
910 PSCB(vcpu,tpr) = val;
911 /* This can unmask interrupts. */
912 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
913 PSCB(vcpu,pending_interruption) = 1;
914 return (IA64_NO_FAULT);
915 }
917 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
918 {
919 UINT64 *p, bits, vec, bitnum;
920 int i;
922 p = &PSCBX(vcpu,insvc[3]);
923 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
924 if (i < 0) {
925 printf("Trying to EOI interrupt when none are in-service.\n");
926 return IA64_NO_FAULT;
927 }
928 bitnum = ia64_fls(bits);
929 vec = bitnum + (i*64);
930 /* clear the correct bit */
931 bits &= ~(1L << bitnum);
932 *p = bits;
933 /* clearing an eoi bit may unmask another pending interrupt... */
934 if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
935 // worry about this later... Linux only calls eoi
936 // with interrupts disabled
937 printf("Trying to EOI interrupt with interrupts enabled\n");
938 }
939 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
940 PSCB(vcpu,pending_interruption) = 1;
941 //printf("YYYYY vcpu_set_eoi: Successful\n");
942 return (IA64_NO_FAULT);
943 }
945 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
946 {
947 if (!(val & (1L << 16))) {
948 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
949 return (IA64_ILLOP_FAULT);
950 }
951 // no place to save this state but nothing to do anyway
952 return (IA64_NO_FAULT);
953 }
955 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
956 {
957 if (!(val & (1L << 16))) {
958 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
959 return (IA64_ILLOP_FAULT);
960 }
961 // no place to save this state but nothing to do anyway
962 return (IA64_NO_FAULT);
963 }
965 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
966 {
967 //extern unsigned long privop_trace;
968 //privop_trace=1;
969 if (val & 0xef00) return (IA64_ILLOP_FAULT);
970 PSCB(vcpu,itv) = val;
971 if (val & 0x10000) {
972 printf("**** vcpu_set_itv(%lu): vitm=%lx, setting to 0\n",
973 val,PSCBX(vcpu,domain_itm));
974 PSCBX(vcpu,domain_itm) = 0;
975 }
976 else vcpu_set_next_timer(vcpu);
977 return (IA64_NO_FAULT);
978 }
980 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
981 {
982 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
983 PSCB(vcpu,pmv) = val;
984 return (IA64_NO_FAULT);
985 }
987 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
988 {
989 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
990 PSCB(vcpu,cmcv) = val;
991 return (IA64_NO_FAULT);
992 }
994 /**************************************************************************
995 VCPU temporary register access routines
996 **************************************************************************/
997 UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
998 {
999 if (index > 7) return 0;
1000 return PSCB(vcpu,tmp[index]);
1003 void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
1005 if (index <= 7) PSCB(vcpu,tmp[index]) = val;
1008 /**************************************************************************
1009 Interval timer routines
1010 **************************************************************************/
1012 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
1014 UINT64 itv = PSCB(vcpu,itv);
1015 return(!itv || !!(itv & 0x10000));
1018 BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
1020 UINT64 itv = PSCB(vcpu,itv);
1021 return (test_bit(itv, PSCBX(vcpu,insvc)));
1024 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
1026 unsigned long domain_itm = PSCBX(vcpu,domain_itm);
1027 unsigned long now = ia64_get_itc();
1029 if (!domain_itm) return FALSE;
1030 if (now < domain_itm) return FALSE;
1031 if (vcpu_timer_disabled(vcpu)) return FALSE;
1032 return TRUE;
1035 void vcpu_safe_set_itm(unsigned long val)
1037 unsigned long epsilon = 100;
1038 unsigned long flags;
1039 UINT64 now = ia64_get_itc();
1041 local_irq_save(flags);
1042 while (1) {
1043 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
1044 ia64_set_itm(val);
1045 if (val > (now = ia64_get_itc())) break;
1046 val = now + epsilon;
1047 epsilon <<= 1;
1049 local_irq_restore(flags);
1052 void vcpu_set_next_timer(VCPU *vcpu)
1054 UINT64 d = PSCBX(vcpu,domain_itm);
1055 //UINT64 s = PSCBX(vcpu,xen_itm);
1056 UINT64 s = local_cpu_data->itm_next;
1057 UINT64 now = ia64_get_itc();
1059 /* gloss over the wraparound problem for now... we know it exists
1060 * but it doesn't matter right now */
1062 if (is_idle_domain(vcpu->domain)) {
1063 // printf("****** vcpu_set_next_timer called during idle!!\n");
1064 vcpu_safe_set_itm(s);
1065 return;
1067 //s = PSCBX(vcpu,xen_itm);
1068 if (d && (d > now) && (d < s)) {
1069 vcpu_safe_set_itm(d);
1070 //using_domain_as_itm++;
1072 else {
1073 vcpu_safe_set_itm(s);
1074 //using_xen_as_itm++;
1078 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
1080 //UINT now = ia64_get_itc();
1082 //if (val < now) val = now + 1000;
1083 //printf("*** vcpu_set_itm: called with %lx\n",val);
1084 PSCBX(vcpu,domain_itm) = val;
1085 vcpu_set_next_timer(vcpu);
1086 return (IA64_NO_FAULT);
1089 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
1091 #define DISALLOW_SETTING_ITC_FOR_NOW
1092 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
1093 printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
1094 #else
1095 UINT64 oldnow = ia64_get_itc();
1096 UINT64 olditm = PSCBX(vcpu,domain_itm);
1097 unsigned long d = olditm - oldnow;
1098 unsigned long x = local_cpu_data->itm_next - oldnow;
1100 UINT64 newnow = val, min_delta;
1102 local_irq_disable();
1103 if (olditm) {
1104 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
1105 PSCBX(vcpu,domain_itm) = newnow + d;
1107 local_cpu_data->itm_next = newnow + x;
1108 d = PSCBX(vcpu,domain_itm);
1109 x = local_cpu_data->itm_next;
1111 ia64_set_itc(newnow);
1112 if (d && (d > newnow) && (d < x)) {
1113 vcpu_safe_set_itm(d);
1114 //using_domain_as_itm++;
1116 else {
1117 vcpu_safe_set_itm(x);
1118 //using_xen_as_itm++;
1120 local_irq_enable();
1121 #endif
1122 return (IA64_NO_FAULT);
1125 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
1127 //FIXME: Implement this
1128 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
1129 return (IA64_NO_FAULT);
1130 //return (IA64_ILLOP_FAULT);
1133 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
1135 //TODO: Implement this
1136 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
1137 return (IA64_ILLOP_FAULT);
1140 void vcpu_pend_timer(VCPU *vcpu)
1142 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1144 if (vcpu_timer_disabled(vcpu)) return;
1145 //if (vcpu_timer_inservice(vcpu)) return;
1146 if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
1147 // already delivered an interrupt for this so
1148 // don't deliver another
1149 return;
1151 vcpu_pend_interrupt(vcpu, itv);
1154 // returns true if ready to deliver a timer interrupt too early
1155 UINT64 vcpu_timer_pending_early(VCPU *vcpu)
1157 UINT64 now = ia64_get_itc();
1158 UINT64 itm = PSCBX(vcpu,domain_itm);
1160 if (vcpu_timer_disabled(vcpu)) return 0;
1161 if (!itm) return 0;
1162 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1165 /**************************************************************************
1166 Privileged operation emulation routines
1167 **************************************************************************/
1169 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
1171 PSCB(vcpu,ifa) = ifa;
1172 PSCB(vcpu,itir) = vcpu_get_itir_on_fault(vcpu,ifa);
1173 vcpu_thash(current, ifa, &PSCB(current,iha));
1174 return (vcpu_get_rr_ve(vcpu,ifa) ? IA64_DATA_TLB_VECTOR : IA64_ALT_DATA_TLB_VECTOR);
1178 IA64FAULT vcpu_rfi(VCPU *vcpu)
1180 // TODO: Only allowed for current vcpu
1181 PSR psr;
1182 UINT64 int_enable, regspsr = 0;
1183 UINT64 ifs;
1184 REGS *regs = vcpu_regs(vcpu);
1185 extern void dorfirfi(void);
1187 psr.i64 = PSCB(vcpu,ipsr);
1188 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
1189 if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
1190 int_enable = psr.ia64_psr.i;
1191 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
1192 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
1193 else vcpu_set_metaphysical_mode(vcpu,TRUE);
1194 psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
1195 psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
1196 psr.ia64_psr.bn = 1;
1197 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
1198 if (psr.ia64_psr.be) {
1199 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
1200 return (IA64_ILLOP_FAULT);
1202 PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
1203 ifs = PSCB(vcpu,ifs);
1204 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1205 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1206 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
1207 // TODO: validate PSCB(vcpu,iip)
1208 // TODO: PSCB(vcpu,ipsr) = psr;
1209 PSCB(vcpu,ipsr) = psr.i64;
1210 // now set up the trampoline
1211 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
1212 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
1213 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
1215 else {
1216 regs->cr_ipsr = psr.i64;
1217 regs->cr_iip = PSCB(vcpu,iip);
1219 PSCB(vcpu,interrupt_collection_enabled) = 1;
1220 vcpu_bsw1(vcpu);
1221 PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
1222 return (IA64_NO_FAULT);
1225 IA64FAULT vcpu_cover(VCPU *vcpu)
1227 // TODO: Only allowed for current vcpu
1228 REGS *regs = vcpu_regs(vcpu);
1230 if (!PSCB(vcpu,interrupt_collection_enabled)) {
1231 if (!PSCB(vcpu,incomplete_regframe))
1232 PSCB(vcpu,ifs) = regs->cr_ifs;
1233 else PSCB(vcpu,incomplete_regframe) = 0;
1235 regs->cr_ifs = 0;
1236 return (IA64_NO_FAULT);
1239 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
1241 UINT64 pta = PSCB(vcpu,pta);
1242 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1243 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
1244 UINT64 Mask = (1L << pta_sz) - 1;
1245 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1246 UINT64 compMask_60_15 = ~Mask_60_15;
1247 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
1248 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
1249 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
1250 UINT64 VHPT_addr2a =
1251 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1252 UINT64 VHPT_addr2b =
1253 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;
1254 UINT64 VHPT_addr3 = VHPT_offset & 0x7fff;
1255 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1256 VHPT_addr3;
1258 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1259 *pval = VHPT_addr;
1260 return (IA64_NO_FAULT);
1263 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1265 printf("vcpu_ttag: ttag instruction unsupported\n");
1266 return (IA64_ILLOP_FAULT);
1269 unsigned long vhpt_translate_count = 0;
1270 unsigned long fast_vhpt_translate_count = 0;
1271 unsigned long recover_to_page_fault_count = 0;
1272 unsigned long recover_to_break_fault_count = 0;
1274 int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
1276 IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN in_tpa, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
1278 unsigned long region = address >> 61;
1279 unsigned long pta, pte, rid, rr;
1280 int i;
1281 TR_ENTRY *trp;
1283 if (PSCB(vcpu,metaphysical_mode) && !(!is_data && region)) {
1284 // dom0 may generate an uncacheable physical address (msb=1)
1285 if (region && ((region != 4) || (vcpu->domain != dom0))) {
1286 // FIXME: This seems to happen even though it shouldn't. Need to track
1287 // this down, but since it has been apparently harmless, just flag it for now
1288 // panic_domain(vcpu_regs(vcpu),
1290 /*
1291 * Guest may execute itc.d and rfi with psr.dt=0
1292 * When VMM try to fetch opcode, tlb miss may happen,
1293 * At this time PSCB(vcpu,metaphysical_mode)=1,
1294 * region=5,VMM need to handle this tlb miss as if
1295 * PSCB(vcpu,metaphysical_mode)=0
1296 */
1297 printk("vcpu_translate: bad physical address: 0x%lx\n",
1298 address);
1299 } else {
1300 *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS |
1301 _PAGE_PL_2 | _PAGE_AR_RWX;
1302 *itir = PAGE_SHIFT << 2;
1303 phys_translate_count++;
1304 return IA64_NO_FAULT;
1307 else if (!region && warn_region0_address) {
1308 REGS *regs = vcpu_regs(vcpu);
1309 unsigned long viip = PSCB(vcpu,iip);
1310 unsigned long vipsr = PSCB(vcpu,ipsr);
1311 unsigned long iip = regs->cr_iip;
1312 unsigned long ipsr = regs->cr_ipsr;
1313 printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
1314 address, viip, vipsr, iip, ipsr);
1317 rr = PSCB(vcpu,rrs)[region];
1318 rid = rr & RR_RID_MASK;
1319 if (is_data) {
1320 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,address)) {
1321 for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++) {
1322 if (vcpu_match_tr_entry(trp,address,rid)) {
1323 *pteval = trp->page_flags;
1324 *itir = trp->itir;
1325 tr_translate_count++;
1326 return IA64_NO_FAULT;
1331 // FIXME?: check itr's for data accesses too, else bad things happen?
1332 /* else */ {
1333 if (vcpu_quick_region_check(vcpu->arch.itr_regions,address)) {
1334 for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++) {
1335 if (vcpu_match_tr_entry(trp,address,rid)) {
1336 *pteval = trp->page_flags;
1337 *itir = trp->itir;
1338 tr_translate_count++;
1339 return IA64_NO_FAULT;
1345 /* check 1-entry TLB */
1346 // FIXME?: check dtlb for inst accesses too, else bad things happen?
1347 trp = &vcpu->arch.dtlb;
1348 if (/* is_data && */ vcpu_match_tr_entry(trp,address,rid)) {
1349 if (vcpu->domain==dom0 && !in_tpa) *pteval = trp->page_flags;
1350 else *pteval = vcpu->arch.dtlb_pte;
1351 *itir = trp->itir;
1352 dtlb_translate_count++;
1353 return IA64_NO_FAULT;
1356 /* check guest VHPT */
1357 pta = PSCB(vcpu,pta);
1358 if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
1359 panic_domain(vcpu_regs(vcpu),"can't do long format VHPT\n");
1360 //return (is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR);
1363 *itir = rr & (RR_RID_MASK | RR_PS_MASK);
1364 // note: architecturally, iha is optionally set for alt faults but
1365 // xenlinux depends on it so should document it as part of PV interface
1366 vcpu_thash(vcpu, address, iha);
1367 if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE))
1368 return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR);
1370 /* avoid recursively walking (short format) VHPT */
1371 if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
1372 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1374 if (__copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
1375 // virtual VHPT walker "missed" in TLB
1376 return IA64_VHPT_FAULT;
1378 /*
1379 * Optimisation: this VHPT walker aborts on not-present pages
1380 * instead of inserting a not-present translation, this allows
1381 * vectoring directly to the miss handler.
1382 */
1383 if (!(pte & _PAGE_P))
1384 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1386 /* found mapping in guest VHPT! */
1387 *itir = rr & RR_PS_MASK;
1388 *pteval = pte;
1389 vhpt_translate_count++;
1390 return IA64_NO_FAULT;
1393 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1395 UINT64 pteval, itir, mask, iha;
1396 IA64FAULT fault;
1398 fault = vcpu_translate(vcpu, vadr, TRUE, TRUE, &pteval, &itir, &iha);
1399 if (fault == IA64_NO_FAULT)
1401 mask = itir_mask(itir);
1402 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
1403 return (IA64_NO_FAULT);
1405 return vcpu_force_data_miss(vcpu,vadr);
1408 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
1410 printf("vcpu_tak: tak instruction unsupported\n");
1411 return (IA64_ILLOP_FAULT);
1412 // HACK ALERT: tak does a thash for now
1413 //return vcpu_thash(vcpu,vadr,key);
1416 /**************************************************************************
1417 VCPU debug breakpoint register access routines
1418 **************************************************************************/
1420 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
1422 // TODO: unimplemented DBRs return a reserved register fault
1423 // TODO: Should set Logical CPU state, not just physical
1424 ia64_set_dbr(reg,val);
1425 return (IA64_NO_FAULT);
1428 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
1430 // TODO: unimplemented IBRs return a reserved register fault
1431 // TODO: Should set Logical CPU state, not just physical
1432 ia64_set_ibr(reg,val);
1433 return (IA64_NO_FAULT);
1436 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1438 // TODO: unimplemented DBRs return a reserved register fault
1439 UINT64 val = ia64_get_dbr(reg);
1440 *pval = val;
1441 return (IA64_NO_FAULT);
1444 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1446 // TODO: unimplemented IBRs return a reserved register fault
1447 UINT64 val = ia64_get_ibr(reg);
1448 *pval = val;
1449 return (IA64_NO_FAULT);
1452 /**************************************************************************
1453 VCPU performance monitor register access routines
1454 **************************************************************************/
1456 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
1458 // TODO: Should set Logical CPU state, not just physical
1459 // NOTE: Writes to unimplemented PMC registers are discarded
1460 #ifdef DEBUG_PFMON
1461 printf("vcpu_set_pmc(%x,%lx)\n",reg,val);
1462 #endif
1463 ia64_set_pmc(reg,val);
1464 return (IA64_NO_FAULT);
1467 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
1469 // TODO: Should set Logical CPU state, not just physical
1470 // NOTE: Writes to unimplemented PMD registers are discarded
1471 #ifdef DEBUG_PFMON
1472 printf("vcpu_set_pmd(%x,%lx)\n",reg,val);
1473 #endif
1474 ia64_set_pmd(reg,val);
1475 return (IA64_NO_FAULT);
1478 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1480 // NOTE: Reads from unimplemented PMC registers return zero
1481 UINT64 val = (UINT64)ia64_get_pmc(reg);
1482 #ifdef DEBUG_PFMON
1483 printf("%lx=vcpu_get_pmc(%x)\n",val,reg);
1484 #endif
1485 *pval = val;
1486 return (IA64_NO_FAULT);
1489 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1491 // NOTE: Reads from unimplemented PMD registers return zero
1492 UINT64 val = (UINT64)ia64_get_pmd(reg);
1493 #ifdef DEBUG_PFMON
1494 printf("%lx=vcpu_get_pmd(%x)\n",val,reg);
1495 #endif
1496 *pval = val;
1497 return (IA64_NO_FAULT);
1500 /**************************************************************************
1501 VCPU banked general register access routines
1502 **************************************************************************/
1503 #define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1504 do{ \
1505 __asm__ __volatile__ ( \
1506 ";;extr.u %0 = %3,%6,16;;\n" \
1507 "dep %1 = %0, %1, 0, 16;;\n" \
1508 "st8 [%4] = %1\n" \
1509 "extr.u %0 = %2, 16, 16;;\n" \
1510 "dep %3 = %0, %3, %6, 16;;\n" \
1511 "st8 [%5] = %3\n" \
1512 ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
1513 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1514 }while(0)
1516 IA64FAULT vcpu_bsw0(VCPU *vcpu)
1518 // TODO: Only allowed for current vcpu
1519 REGS *regs = vcpu_regs(vcpu);
1520 unsigned long *r = &regs->r16;
1521 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1522 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1523 unsigned long *runat = &regs->eml_unat;
1524 unsigned long *b0unat = &PSCB(vcpu,vbnat);
1525 unsigned long *b1unat = &PSCB(vcpu,vnat);
1527 unsigned long i;
1529 if(VMX_DOMAIN(vcpu)){
1530 if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
1531 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1532 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1533 VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
1535 }else{
1536 if (PSCB(vcpu,banknum)) {
1537 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1538 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1539 PSCB(vcpu,banknum) = 0;
1542 return (IA64_NO_FAULT);
1545 #define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1546 do{ \
1547 __asm__ __volatile__ ( \
1548 ";;extr.u %0 = %3,%6,16;;\n" \
1549 "dep %1 = %0, %1, 16, 16;;\n" \
1550 "st8 [%4] = %1\n" \
1551 "extr.u %0 = %2, 0, 16;;\n" \
1552 "dep %3 = %0, %3, %6, 16;;\n" \
1553 "st8 [%5] = %3\n" \
1554 ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
1555 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1556 }while(0)
1558 IA64FAULT vcpu_bsw1(VCPU *vcpu)
1560 // TODO: Only allowed for current vcpu
1561 REGS *regs = vcpu_regs(vcpu);
1562 unsigned long *r = &regs->r16;
1563 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1564 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1565 unsigned long *runat = &regs->eml_unat;
1566 unsigned long *b0unat = &PSCB(vcpu,vbnat);
1567 unsigned long *b1unat = &PSCB(vcpu,vnat);
1569 unsigned long i;
1571 if(VMX_DOMAIN(vcpu)){
1572 if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
1573 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1574 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1575 VCPU(vcpu,vpsr) |= IA64_PSR_BN;
1577 }else{
1578 if (!PSCB(vcpu,banknum)) {
1579 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1580 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1581 PSCB(vcpu,banknum) = 1;
1584 return (IA64_NO_FAULT);
1587 /**************************************************************************
1588 VCPU cpuid access routines
1589 **************************************************************************/
1592 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1594 // FIXME: This could get called as a result of a rsvd-reg fault
1595 // if reg > 3
1596 switch(reg) {
1597 case 0:
1598 memcpy(pval,"Xen/ia64",8);
1599 break;
1600 case 1:
1601 *pval = 0;
1602 break;
1603 case 2:
1604 *pval = 0;
1605 break;
1606 case 3:
1607 *pval = ia64_get_cpuid(3);
1608 break;
1609 case 4:
1610 *pval = ia64_get_cpuid(4);
1611 break;
1612 default:
1613 if (reg > (ia64_get_cpuid(3) & 0xff))
1614 return IA64_RSVDREG_FAULT;
1615 *pval = ia64_get_cpuid(reg);
1616 break;
1618 return (IA64_NO_FAULT);
1621 /**************************************************************************
1622 VCPU region register access routines
1623 **************************************************************************/
1625 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
1627 ia64_rr rr;
1629 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1630 return(rr.ve);
1633 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
1635 PSCB(vcpu,rrs)[reg>>61] = val;
1636 // warning: set_one_rr() does it "live"
1637 set_one_rr(reg,val);
1638 return (IA64_NO_FAULT);
1641 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1643 if(VMX_DOMAIN(vcpu)){
1644 *pval = VMX(vcpu,vrr[reg>>61]);
1645 }else{
1646 *pval = PSCB(vcpu,rrs)[reg>>61];
1648 return (IA64_NO_FAULT);
1651 /**************************************************************************
1652 VCPU protection key register access routines
1653 **************************************************************************/
1655 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1657 #ifndef PKR_USE_FIXED
1658 printk("vcpu_get_pkr: called, not implemented yet\n");
1659 return IA64_ILLOP_FAULT;
1660 #else
1661 UINT64 val = (UINT64)ia64_get_pkr(reg);
1662 *pval = val;
1663 return (IA64_NO_FAULT);
1664 #endif
1667 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
1669 #ifndef PKR_USE_FIXED
1670 printk("vcpu_set_pkr: called, not implemented yet\n");
1671 return IA64_ILLOP_FAULT;
1672 #else
1673 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
1674 vcpu->pkrs[reg] = val;
1675 ia64_set_pkr(reg,val);
1676 return (IA64_NO_FAULT);
1677 #endif
1680 /**************************************************************************
1681 VCPU translation register access routines
1682 **************************************************************************/
1684 static inline void vcpu_purge_tr_entry(TR_ENTRY *trp)
1686 trp->p = 0;
1689 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
1691 UINT64 ps;
1693 trp->itir = itir;
1694 trp->rid = VCPU(current,rrs[ifa>>61]) & RR_RID_MASK;
1695 trp->p = 1;
1696 ps = trp->ps;
1697 trp->page_flags = pte;
1698 if (trp->pl < 2) trp->pl = 2;
1699 trp->vadr = ifa & ~0xfff;
1700 if (ps > 12) { // "ignore" relevant low-order bits
1701 trp->ppn &= ~((1UL<<(ps-12))-1);
1702 trp->vadr &= ~((1UL<<ps)-1);
1706 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
1707 UINT64 itir, UINT64 ifa)
1709 TR_ENTRY *trp;
1711 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
1712 trp = &PSCBX(vcpu,dtrs[slot]);
1713 //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
1714 vcpu_set_tr_entry(trp,pte,itir,ifa);
1715 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),ifa);
1716 return IA64_NO_FAULT;
1719 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
1720 UINT64 itir, UINT64 ifa)
1722 TR_ENTRY *trp;
1724 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
1725 trp = &PSCBX(vcpu,itrs[slot]);
1726 //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
1727 vcpu_set_tr_entry(trp,pte,itir,ifa);
1728 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),ifa);
1729 return IA64_NO_FAULT;
1732 /**************************************************************************
1733 VCPU translation cache access routines
1734 **************************************************************************/
1736 void foobar(void) { /*vcpu_verbose = 1;*/ }
1738 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps)
1740 unsigned long psr;
1741 unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
1743 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1744 // FIXME, must be inlined or potential for nested fault here!
1745 if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT)) {
1746 printf("vcpu_itc_no_srlz: domain0 use of smaller page size!\n");
1747 //FIXME: kill domain here
1748 while(1);
1750 psr = ia64_clear_ic();
1751 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
1752 ia64_set_psr(psr);
1753 // ia64_srlz_i(); // no srls req'd, will rfi later
1754 #ifdef VHPT_GLOBAL
1755 if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
1756 // FIXME: this is dangerous... vhpt_flush_address ensures these
1757 // addresses never get flushed. More work needed if this
1758 // ever happens.
1759 //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
1760 if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
1761 else vhpt_insert(vaddr,pte,logps<<2);
1763 // even if domain pagesize is larger than PAGE_SIZE, just put
1764 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
1765 else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
1766 #endif
1767 if ((mp_pte == -1UL) || (IorD & 0x4)) // don't place in 1-entry TLB
1768 return;
1769 if (IorD & 0x1) {
1770 vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
1771 PSCBX(vcpu,itlb_pte) = mp_pte;
1773 if (IorD & 0x2) {
1774 vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
1775 PSCBX(vcpu,dtlb_pte) = mp_pte;
1779 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1781 unsigned long pteval, logps = itir_ps(itir);
1782 BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
1784 if (logps < PAGE_SHIFT) {
1785 printf("vcpu_itc_d: domain trying to use smaller page size!\n");
1786 //FIXME: kill domain here
1787 while(1);
1789 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1790 pteval = translate_domain_pte(pte,ifa,itir);
1791 if (!pteval) return IA64_ILLOP_FAULT;
1792 if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
1793 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
1794 if (swap_rr0) set_metaphysical_rr0();
1795 return IA64_NO_FAULT;
1798 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1800 unsigned long pteval, logps = itir_ps(itir);
1801 BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
1803 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1804 if (logps < PAGE_SHIFT) {
1805 printf("vcpu_itc_i: domain trying to use smaller page size!\n");
1806 //FIXME: kill domain here
1807 while(1);
1809 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1810 pteval = translate_domain_pte(pte,ifa,itir);
1811 // FIXME: what to do if bad physical address? (machine check?)
1812 if (!pteval) return IA64_ILLOP_FAULT;
1813 if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
1814 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
1815 if (swap_rr0) set_metaphysical_rr0();
1816 return IA64_NO_FAULT;
1819 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1821 printk("vcpu_ptc_l: called, not implemented yet\n");
1822 return IA64_ILLOP_FAULT;
1825 // At privlvl=0, fc performs no access rights or protection key checks, while
1826 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
1827 // read but no protection key check. Thus in order to avoid an unexpected
1828 // access rights fault, we have to translate the virtual address to a
1829 // physical address (possibly via a metaphysical address) and do the fc
1830 // on the physical address, which is guaranteed to flush the same cache line
1831 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
1833 // TODO: Only allowed for current vcpu
1834 UINT64 mpaddr, paddr;
1835 IA64FAULT fault;
1837 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
1838 if (fault == IA64_NO_FAULT) {
1839 paddr = translate_domain_mpaddr(mpaddr);
1840 ia64_fc(__va(paddr));
1842 return fault;
1845 int ptce_count = 0;
1846 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
1848 // Note that this only needs to be called once, i.e. the
1849 // architected loop to purge the entire TLB, should use
1850 // base = stride1 = stride2 = 0, count0 = count 1 = 1
1852 #ifdef VHPT_GLOBAL
1853 vhpt_flush(); // FIXME: This is overdoing it
1854 #endif
1855 local_flush_tlb_all();
1856 // just invalidate the "whole" tlb
1857 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1858 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1859 return IA64_NO_FAULT;
1862 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1864 printk("vcpu_ptc_g: called, not implemented yet\n");
1865 return IA64_ILLOP_FAULT;
1868 #if defined(CONFIG_XEN_SMP) && defined(VHPT_GLOBAL)
1869 struct ptc_ga_args {
1870 unsigned long vadr;
1871 unsigned long addr_range;
1872 };
1874 static void ptc_ga_remote_func (void *varg)
1876 struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
1877 vhpt_flush_address (args->vadr, args->addr_range);
1879 #endif
1881 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1883 // FIXME: validate not flushing Xen addresses
1884 // if (Xen address) return(IA64_ILLOP_FAULT);
1885 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
1886 //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
1888 #ifdef CONFIG_XEN_SMP
1889 struct domain *d = vcpu->domain;
1890 struct vcpu *v;
1891 struct ptc_ga_args args;
1893 args.vadr = vadr;
1894 args.addr_range = addr_range;
1896 /* This method is very conservative and should be optimized:
1897 - maybe IPI calls can be avoided,
1898 - a processor map can be built to avoid duplicate purge
1899 - maybe ptc.ga can be replaced by ptc.l+invala.
1900 Hopefully, it has no impact when UP.
1901 */
1902 for_each_vcpu (d, v) {
1903 if (v != vcpu) {
1904 /* Purge tc entry.
1905 Can we do this directly ? Well, this is just a
1906 single atomic write. */
1907 vcpu_purge_tr_entry(&PSCBX(v,dtlb));
1908 vcpu_purge_tr_entry(&PSCBX(v,itlb));
1909 #ifdef VHPT_GLOBAL
1910 /* Flush VHPT on remote processors.
1911 FIXME: invalidate directly the entries? */
1912 smp_call_function_single
1913 (v->processor, &ptc_ga_remote_func,
1914 &args, 0, 1);
1915 #endif
1918 #endif
1920 #ifdef VHPT_GLOBAL
1921 vhpt_flush_address(vadr,addr_range);
1922 #endif
1923 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
1924 /* Purge tc. */
1925 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1926 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1927 return IA64_NO_FAULT;
1930 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1932 printf("vcpu_ptr_d: Purging TLB is unsupported\n");
1933 // don't forget to recompute dtr_regions
1934 return (IA64_ILLOP_FAULT);
1937 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1939 printf("vcpu_ptr_i: Purging TLB is unsupported\n");
1940 // don't forget to recompute itr_regions
1941 return (IA64_ILLOP_FAULT);