ia64/xen-unstable

view xen/arch/ia64/xen/vcpu.c @ 7668:be53bbe62f2e

Don't panic for other region physaddr handling in vcpu_translate
author djm@kirby.fc.hp.com
date Wed Nov 09 15:31:01 2005 -0600 (2005-11-09)
parents 7fd6dff1631d
children 5b2d9807725d
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004-2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #if 1
10 // TEMPORARY PATCH for match_dtlb uses this, can be removed later
11 // FIXME SMP
12 int in_tpa = 0;
13 #endif
15 #include <linux/sched.h>
16 #include <public/arch-ia64.h>
17 #include <asm/ia64_int.h>
18 #include <asm/vcpu.h>
19 #include <asm/regionreg.h>
20 #include <asm/tlb.h>
21 #include <asm/processor.h>
22 #include <asm/delay.h>
23 #include <asm/vmx_vcpu.h>
24 #include <xen/event.h>
26 typedef union {
27 struct ia64_psr ia64_psr;
28 unsigned long i64;
29 } PSR;
31 //typedef struct pt_regs REGS;
32 //typedef struct domain VCPU;
34 // this def for vcpu_regs won't work if kernel stack is present
35 //#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
36 #define vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) + IA64_STK_OFFSET)) - 1)
37 #define PSCB(x,y) VCPU(x,y)
38 #define PSCBX(x,y) x->arch.y
40 #define TRUE 1
41 #define FALSE 0
42 #define IA64_PTA_SZ_BIT 2
43 #define IA64_PTA_VF_BIT 8
44 #define IA64_PTA_BASE_BIT 15
45 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
46 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
48 #define STATIC
50 #ifdef PRIVOP_ADDR_COUNT
51 struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
52 { "=ifa", { 0 }, { 0 }, 0 },
53 { "thash", { 0 }, { 0 }, 0 },
54 0
55 };
56 extern void privop_count_addr(unsigned long addr, int inst);
57 #define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
58 #else
59 #define PRIVOP_COUNT_ADDR(x,y) do {} while (0)
60 #endif
62 unsigned long dtlb_translate_count = 0;
63 unsigned long tr_translate_count = 0;
64 unsigned long phys_translate_count = 0;
66 unsigned long vcpu_verbose = 0;
67 #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
69 //#define vcpu_quick_region_check(_tr_regions,_ifa) 1
70 #define vcpu_quick_region_check(_tr_regions,_ifa) \
71 (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
72 #define vcpu_quick_region_set(_tr_regions,_ifa) \
73 do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
75 // FIXME: also need to check && (!trp->key || vcpu_pkr_match(trp->key))
76 #define vcpu_match_tr_entry(_trp,_ifa,_rid) \
77 ((_trp->p && (_trp->rid==_rid) && (_ifa >= _trp->vadr) && \
78 (_ifa < (_trp->vadr + (1L<< _trp->ps)) - 1)))
80 /**************************************************************************
81 VCPU general register access routines
82 **************************************************************************/
83 #ifdef XEN
84 UINT64
85 vcpu_get_gr(VCPU *vcpu, unsigned reg)
86 {
87 REGS *regs = vcpu_regs(vcpu);
88 UINT64 val;
89 if (!reg) return 0;
90 getreg(reg,&val,0,regs); // FIXME: handle NATs later
91 return val;
92 }
93 IA64FAULT
94 vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val)
95 {
96 REGS *regs = vcpu_regs(vcpu);
97 int nat;
98 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
99 if(nat)
100 return IA64_NAT_CONSUMPTION_VECTOR;
101 return 0;
102 }
104 // returns:
105 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
106 // IA64_NO_FAULT otherwise
107 IA64FAULT
108 vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat)
109 {
110 REGS *regs = vcpu_regs(vcpu);
111 if (!reg) return IA64_ILLOP_FAULT;
112 long sof = (regs->cr_ifs) & 0x7f;
113 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
114 setreg(reg,value,nat,regs); // FIXME: handle NATs later
115 return IA64_NO_FAULT;
116 }
117 #else
118 // returns:
119 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
120 // IA64_NO_FAULT otherwise
121 IA64FAULT
122 vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
123 {
124 REGS *regs = vcpu_regs(vcpu);
125 long sof = (regs->cr_ifs) & 0x7f;
127 if (!reg) return IA64_ILLOP_FAULT;
128 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
129 setreg(reg,value,0,regs); // FIXME: handle NATs later
130 return IA64_NO_FAULT;
131 }
133 #endif
134 /**************************************************************************
135 VCPU privileged application register access routines
136 **************************************************************************/
138 void vcpu_load_kernel_regs(VCPU *vcpu)
139 {
140 ia64_set_kr(0, VCPU(vcpu, krs[0]));
141 ia64_set_kr(1, VCPU(vcpu, krs[1]));
142 ia64_set_kr(2, VCPU(vcpu, krs[2]));
143 ia64_set_kr(3, VCPU(vcpu, krs[3]));
144 ia64_set_kr(4, VCPU(vcpu, krs[4]));
145 ia64_set_kr(5, VCPU(vcpu, krs[5]));
146 ia64_set_kr(6, VCPU(vcpu, krs[6]));
147 ia64_set_kr(7, VCPU(vcpu, krs[7]));
148 }
150 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
151 {
152 if (reg == 44) return (vcpu_set_itc(vcpu,val));
153 else if (reg == 27) return (IA64_ILLOP_FAULT);
154 else if (reg == 24)
155 printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
156 else if (reg > 7) return (IA64_ILLOP_FAULT);
157 else {
158 PSCB(vcpu,krs[reg]) = val;
159 ia64_set_kr(reg,val);
160 }
161 return IA64_NO_FAULT;
162 }
164 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
165 {
166 if (reg == 24)
167 printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
168 else if (reg > 7) return (IA64_ILLOP_FAULT);
169 else *val = PSCB(vcpu,krs[reg]);
170 return IA64_NO_FAULT;
171 }
173 /**************************************************************************
174 VCPU processor status register access routines
175 **************************************************************************/
177 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
178 {
179 /* only do something if mode changes */
180 if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
181 if (newmode) set_metaphysical_rr0();
182 else if (PSCB(vcpu,rrs[0]) != -1)
183 set_one_rr(0, PSCB(vcpu,rrs[0]));
184 PSCB(vcpu,metaphysical_mode) = newmode;
185 }
186 }
188 IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
189 {
190 vcpu_set_metaphysical_mode(vcpu,TRUE);
191 return IA64_NO_FAULT;
192 }
194 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
195 {
196 struct ia64_psr psr, imm, *ipsr;
197 REGS *regs = vcpu_regs(vcpu);
199 //PRIVOP_COUNT_ADDR(regs,_RSM);
200 // TODO: All of these bits need to be virtualized
201 // TODO: Only allowed for current vcpu
202 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
203 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
204 imm = *(struct ia64_psr *)&imm24;
205 // interrupt flag
206 if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
207 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
208 // interrupt collection flag
209 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
210 // just handle psr.up and psr.pp for now
211 if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
212 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
213 | IA64_PSR_DFL | IA64_PSR_DFH))
214 return (IA64_ILLOP_FAULT);
215 if (imm.dfh) ipsr->dfh = 0;
216 if (imm.dfl) ipsr->dfl = 0;
217 if (imm.pp) {
218 ipsr->pp = 1;
219 psr.pp = 1; // priv perf ctrs always enabled
220 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
221 PSCB(vcpu,tmp[8]) = 0; // but fool the domain if it gets psr
222 }
223 if (imm.up) { ipsr->up = 0; psr.up = 0; }
224 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
225 if (imm.be) ipsr->be = 0;
226 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
227 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
228 return IA64_NO_FAULT;
229 }
231 extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
232 #define SPURIOUS_VECTOR 0xf
234 IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
235 {
236 vcpu_set_metaphysical_mode(vcpu,FALSE);
237 return IA64_NO_FAULT;
238 }
240 IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
241 {
242 PSCB(vcpu,interrupt_delivery_enabled) = 1;
243 PSCB(vcpu,interrupt_collection_enabled) = 1;
244 return IA64_NO_FAULT;
245 }
247 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
248 {
249 struct ia64_psr psr, imm, *ipsr;
250 REGS *regs = vcpu_regs(vcpu);
251 UINT64 mask, enabling_interrupts = 0;
253 //PRIVOP_COUNT_ADDR(regs,_SSM);
254 // TODO: All of these bits need to be virtualized
255 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
256 imm = *(struct ia64_psr *)&imm24;
257 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
258 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
259 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
260 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
261 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
262 if (imm.dfh) ipsr->dfh = 1;
263 if (imm.dfl) ipsr->dfl = 1;
264 if (imm.pp) {
265 ipsr->pp = 1; psr.pp = 1;
266 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
267 PSCB(vcpu,tmp[8]) = 1;
268 }
269 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
270 if (imm.i) {
271 if (!PSCB(vcpu,interrupt_delivery_enabled)) {
272 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
273 enabling_interrupts = 1;
274 }
275 PSCB(vcpu,interrupt_delivery_enabled) = 1;
276 }
277 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
278 // TODO: do this faster
279 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
280 if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
281 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
282 if (imm.up) { ipsr->up = 1; psr.up = 1; }
283 if (imm.be) {
284 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
285 return (IA64_ILLOP_FAULT);
286 }
287 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
288 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
289 if (enabling_interrupts &&
290 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
291 PSCB(vcpu,pending_interruption) = 1;
292 return IA64_NO_FAULT;
293 }
295 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
296 {
297 struct ia64_psr psr, newpsr, *ipsr;
298 REGS *regs = vcpu_regs(vcpu);
299 UINT64 enabling_interrupts = 0;
301 // TODO: All of these bits need to be virtualized
302 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
303 newpsr = *(struct ia64_psr *)&val;
304 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
305 // just handle psr.up and psr.pp for now
306 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
307 // however trying to set other bits can't be an error as it is in ssm
308 if (newpsr.dfh) ipsr->dfh = 1;
309 if (newpsr.dfl) ipsr->dfl = 1;
310 if (newpsr.pp) {
311 ipsr->pp = 1; psr.pp = 1;
312 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
313 PSCB(vcpu,tmp[8]) = 1;
314 }
315 else {
316 ipsr->pp = 1; psr.pp = 1;
317 PSCB(vcpu,tmp[8]) = 0;
318 }
319 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
320 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
321 if (newpsr.i) {
322 if (!PSCB(vcpu,interrupt_delivery_enabled))
323 enabling_interrupts = 1;
324 PSCB(vcpu,interrupt_delivery_enabled) = 1;
325 }
326 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
327 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
328 if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; }
329 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
330 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
331 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
332 else vcpu_set_metaphysical_mode(vcpu,TRUE);
333 if (newpsr.be) {
334 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
335 return (IA64_ILLOP_FAULT);
336 }
337 if (enabling_interrupts &&
338 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
339 PSCB(vcpu,pending_interruption) = 1;
340 return IA64_NO_FAULT;
341 }
343 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
344 {
345 UINT64 psr;
346 struct ia64_psr newpsr;
348 // TODO: This needs to return a "filtered" view of
349 // the psr, not the actual psr. Probably the psr needs
350 // to be a field in regs (in addition to ipsr).
351 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
352 newpsr = *(struct ia64_psr *)&psr;
353 if (newpsr.cpl == 2) newpsr.cpl = 0;
354 if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
355 else newpsr.i = 0;
356 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
357 else newpsr.ic = 0;
358 // FIXME: need new field in mapped_regs_t for virtual psr.pp (psr.be too?)
359 if (PSCB(vcpu,tmp[8])) newpsr.pp = 1;
360 else newpsr.pp = 0;
361 *pval = *(unsigned long *)&newpsr;
362 return IA64_NO_FAULT;
363 }
365 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
366 {
367 return !!PSCB(vcpu,interrupt_collection_enabled);
368 }
370 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
371 {
372 return !!PSCB(vcpu,interrupt_delivery_enabled);
373 }
375 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
376 {
377 UINT64 dcr = PSCBX(vcpu,dcr);
378 PSR psr = {0};
380 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
381 psr.i64 = prevpsr;
382 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
383 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
384 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
385 psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
386 psr.ia64_psr.bn = PSCB(vcpu,banknum);
387 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
388 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
389 // psr.pk = 1;
390 //printf("returns 0x%016lx...",psr.i64);
391 return psr.i64;
392 }
394 /**************************************************************************
395 VCPU control register access routines
396 **************************************************************************/
398 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
399 {
400 extern unsigned long privop_trace;
401 //privop_trace=0;
402 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
403 // Reads of cr.dcr on Xen always have the sign bit set, so
404 // a domain can differentiate whether it is running on SP or not
405 *pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
406 return (IA64_NO_FAULT);
407 }
409 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
410 {
411 if(VMX_DOMAIN(vcpu)){
412 *pval = PSCB(vcpu,iva) & ~0x7fffL;
413 }else{
414 *pval = PSCBX(vcpu,iva) & ~0x7fffL;
415 }
416 return (IA64_NO_FAULT);
417 }
419 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
420 {
421 *pval = PSCB(vcpu,pta);
422 return (IA64_NO_FAULT);
423 }
425 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
426 {
427 //REGS *regs = vcpu_regs(vcpu);
428 //*pval = regs->cr_ipsr;
429 *pval = PSCB(vcpu,ipsr);
430 return (IA64_NO_FAULT);
431 }
433 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
434 {
435 *pval = PSCB(vcpu,isr);
436 return (IA64_NO_FAULT);
437 }
439 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
440 {
441 //REGS *regs = vcpu_regs(vcpu);
442 //*pval = regs->cr_iip;
443 *pval = PSCB(vcpu,iip);
444 return (IA64_NO_FAULT);
445 }
447 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
448 {
449 UINT64 val = PSCB(vcpu,ifa);
450 REGS *regs = vcpu_regs(vcpu);
451 PRIVOP_COUNT_ADDR(regs,_GET_IFA);
452 *pval = val;
453 return (IA64_NO_FAULT);
454 }
456 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
457 {
458 ia64_rr rr;
460 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
461 return(rr.ps);
462 }
464 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
465 {
466 ia64_rr rr;
468 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
469 return(rr.rid);
470 }
472 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
473 {
474 ia64_rr rr;
476 rr.rrval = 0;
477 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
478 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
479 return (rr.rrval);
480 }
483 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
484 {
485 UINT64 val = PSCB(vcpu,itir);
486 *pval = val;
487 return (IA64_NO_FAULT);
488 }
490 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
491 {
492 UINT64 val = PSCB(vcpu,iipa);
493 // SP entry code does not save iipa yet nor does it get
494 // properly delivered in the pscb
495 // printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
496 *pval = val;
497 return (IA64_NO_FAULT);
498 }
500 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
501 {
502 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
503 //*pval = PSCB(vcpu,regs).cr_ifs;
504 *pval = PSCB(vcpu,ifs);
505 PSCB(vcpu,incomplete_regframe) = 0;
506 return (IA64_NO_FAULT);
507 }
509 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
510 {
511 UINT64 val = PSCB(vcpu,iim);
512 *pval = val;
513 return (IA64_NO_FAULT);
514 }
516 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
517 {
518 //return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
519 UINT64 val = PSCB(vcpu,iha);
520 REGS *regs = vcpu_regs(vcpu);
521 PRIVOP_COUNT_ADDR(regs,_THASH);
522 *pval = val;
523 return (IA64_NO_FAULT);
524 }
526 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
527 {
528 extern unsigned long privop_trace;
529 //privop_trace=1;
530 // Reads of cr.dcr on SP always have the sign bit set, so
531 // a domain can differentiate whether it is running on SP or not
532 // Thus, writes of DCR should ignore the sign bit
533 //verbose("vcpu_set_dcr: called\n");
534 PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
535 return (IA64_NO_FAULT);
536 }
538 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
539 {
540 if(VMX_DOMAIN(vcpu)){
541 PSCB(vcpu,iva) = val & ~0x7fffL;
542 }else{
543 PSCBX(vcpu,iva) = val & ~0x7fffL;
544 }
545 return (IA64_NO_FAULT);
546 }
548 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
549 {
550 if (val & IA64_PTA_LFMT) {
551 printf("*** No support for VHPT long format yet!!\n");
552 return (IA64_ILLOP_FAULT);
553 }
554 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
555 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
556 PSCB(vcpu,pta) = val;
557 return IA64_NO_FAULT;
558 }
560 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
561 {
562 PSCB(vcpu,ipsr) = val;
563 return IA64_NO_FAULT;
564 }
566 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
567 {
568 PSCB(vcpu,isr) = val;
569 return IA64_NO_FAULT;
570 }
572 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
573 {
574 PSCB(vcpu,iip) = val;
575 return IA64_NO_FAULT;
576 }
578 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
579 {
580 REGS *regs = vcpu_regs(vcpu);
581 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
582 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
583 else ipsr->ri++;
584 return (IA64_NO_FAULT);
585 }
587 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
588 {
589 PSCB(vcpu,ifa) = val;
590 return IA64_NO_FAULT;
591 }
593 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
594 {
595 PSCB(vcpu,itir) = val;
596 return IA64_NO_FAULT;
597 }
599 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
600 {
601 // SP entry code does not save iipa yet nor does it get
602 // properly delivered in the pscb
603 // printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
604 PSCB(vcpu,iipa) = val;
605 return IA64_NO_FAULT;
606 }
608 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
609 {
610 //REGS *regs = vcpu_regs(vcpu);
611 PSCB(vcpu,ifs) = val;
612 return IA64_NO_FAULT;
613 }
615 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
616 {
617 PSCB(vcpu,iim) = val;
618 return IA64_NO_FAULT;
619 }
621 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
622 {
623 PSCB(vcpu,iha) = val;
624 return IA64_NO_FAULT;
625 }
627 /**************************************************************************
628 VCPU interrupt control register access routines
629 **************************************************************************/
631 void vcpu_pend_unspecified_interrupt(VCPU *vcpu)
632 {
633 PSCB(vcpu,pending_interruption) = 1;
634 }
636 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
637 {
638 if (vector & ~0xff) {
639 printf("vcpu_pend_interrupt: bad vector\n");
640 return;
641 }
642 if ( VMX_DOMAIN(vcpu) ) {
643 set_bit(vector,VCPU(vcpu,irr));
644 } else
645 {
646 /* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
647 if (test_bit(vector,PSCBX(vcpu,irr))) {
648 //printf("vcpu_pend_interrupt: overrun\n");
649 }
650 set_bit(vector,PSCBX(vcpu,irr));
651 PSCB(vcpu,pending_interruption) = 1;
652 }
653 }
655 void early_tick(VCPU *vcpu)
656 {
657 UINT64 *p = &PSCBX(vcpu,irr[3]);
658 printf("vcpu_check_pending: about to deliver early tick\n");
659 printf("&irr[0]=%p, irr[0]=0x%lx\n",p,*p);
660 }
662 #define IA64_TPR_MMI 0x10000
663 #define IA64_TPR_MIC 0x000f0
665 /* checks to see if a VCPU has any unmasked pending interrupts
666 * if so, returns the highest, else returns SPURIOUS_VECTOR */
667 /* NOTE: Since this gets called from vcpu_get_ivr() and the
668 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
669 * this routine also ignores pscb.interrupt_delivery_enabled
670 * and this must be checked independently; see vcpu_deliverable interrupts() */
671 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
672 {
673 UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
675 /* Always check pending event, since guest may just ack the
676 * event injection without handle. Later guest may throw out
677 * the event itself.
678 */
679 if (event_pending(vcpu) &&
680 !test_bit(vcpu->vcpu_info->arch.evtchn_vector,
681 &PSCBX(vcpu, insvc[0])))
682 vcpu_pend_interrupt(vcpu, vcpu->vcpu_info->arch.evtchn_vector);
684 p = &PSCBX(vcpu,irr[3]);
685 /* q = &PSCB(vcpu,delivery_mask[3]); */
686 r = &PSCBX(vcpu,insvc[3]);
687 for (i = 3; ; p--, q--, r--, i--) {
688 bits = *p /* & *q */;
689 if (bits) break; // got a potential interrupt
690 if (*r) {
691 // nothing in this word which is pending+inservice
692 // but there is one inservice which masks lower
693 return SPURIOUS_VECTOR;
694 }
695 if (i == 0) {
696 // checked all bits... nothing pending+inservice
697 return SPURIOUS_VECTOR;
698 }
699 }
700 // have a pending,deliverable interrupt... see if it is masked
701 bitnum = ia64_fls(bits);
702 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
703 vector = bitnum+(i*64);
704 mask = 1L << bitnum;
705 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
706 if (*r >= mask) {
707 // masked by equal inservice
708 //printf("but masked by equal inservice\n");
709 return SPURIOUS_VECTOR;
710 }
711 if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
712 // tpr.mmi is set
713 //printf("but masked by tpr.mmi\n");
714 return SPURIOUS_VECTOR;
715 }
716 if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
717 //tpr.mic masks class
718 //printf("but masked by tpr.mic\n");
719 return SPURIOUS_VECTOR;
720 }
722 //printf("returned to caller\n");
723 return vector;
724 }
726 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
727 {
728 return (vcpu_get_psr_i(vcpu) &&
729 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
730 }
732 UINT64 vcpu_deliverable_timer(VCPU *vcpu)
733 {
734 return (vcpu_get_psr_i(vcpu) &&
735 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
736 }
738 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
739 {
740 extern unsigned long privop_trace;
741 //privop_trace=1;
742 //TODO: Implement this
743 printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
744 //*pval = 0;
745 *pval = ia64_getreg(_IA64_REG_CR_LID);
746 return IA64_NO_FAULT;
747 }
749 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
750 {
751 int i;
752 UINT64 vector, mask;
754 #define HEARTBEAT_FREQ 16 // period in seconds
755 #ifdef HEARTBEAT_FREQ
756 #define N_DOMS 16 // period in seconds
757 static long count[N_DOMS] = { 0 };
758 static long nonclockcount[N_DOMS] = { 0 };
759 REGS *regs = vcpu_regs(vcpu);
760 unsigned domid = vcpu->domain->domain_id;
761 #endif
762 #ifdef IRQ_DEBUG
763 static char firstivr = 1;
764 static char firsttime[256];
765 if (firstivr) {
766 int i;
767 for (i=0;i<256;i++) firsttime[i]=1;
768 firstivr=0;
769 }
770 #endif
772 vector = vcpu_check_pending_interrupts(vcpu);
773 if (vector == SPURIOUS_VECTOR) {
774 PSCB(vcpu,pending_interruption) = 0;
775 *pval = vector;
776 return IA64_NO_FAULT;
777 }
778 #ifdef HEARTBEAT_FREQ
779 if (domid >= N_DOMS) domid = N_DOMS-1;
780 #if 0
781 if (vector == (PSCB(vcpu,itv) & 0xff)) {
782 if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
783 printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
784 domid, count[domid], nonclockcount[domid]);
785 //count[domid] = 0;
786 //dump_runq();
787 }
788 }
789 #endif
790 else nonclockcount[domid]++;
791 #endif
792 // now have an unmasked, pending, deliverable vector!
793 // getting ivr has "side effects"
794 #ifdef IRQ_DEBUG
795 if (firsttime[vector]) {
796 printf("*** First get_ivr on vector=%d,itc=%lx\n",
797 vector,ia64_get_itc());
798 firsttime[vector]=0;
799 }
800 #endif
801 i = vector >> 6;
802 mask = 1L << (vector & 0x3f);
803 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
804 PSCBX(vcpu,insvc[i]) |= mask;
805 PSCBX(vcpu,irr[i]) &= ~mask;
806 //PSCB(vcpu,pending_interruption)--;
807 *pval = vector;
808 // if delivering a timer interrupt, remember domain_itm
809 if (vector == (PSCB(vcpu,itv) & 0xff)) {
810 PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
811 }
812 return IA64_NO_FAULT;
813 }
815 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
816 {
817 *pval = PSCB(vcpu,tpr);
818 return (IA64_NO_FAULT);
819 }
821 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
822 {
823 *pval = 0L; // reads of eoi always return 0
824 return (IA64_NO_FAULT);
825 }
827 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
828 {
829 #ifndef IRR_USE_FIXED
830 printk("vcpu_get_irr: called, not implemented yet\n");
831 return IA64_ILLOP_FAULT;
832 #else
833 *pval = vcpu->irr[0];
834 return (IA64_NO_FAULT);
835 #endif
836 }
838 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
839 {
840 #ifndef IRR_USE_FIXED
841 printk("vcpu_get_irr: called, not implemented yet\n");
842 return IA64_ILLOP_FAULT;
843 #else
844 *pval = vcpu->irr[1];
845 return (IA64_NO_FAULT);
846 #endif
847 }
849 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
850 {
851 #ifndef IRR_USE_FIXED
852 printk("vcpu_get_irr: called, not implemented yet\n");
853 return IA64_ILLOP_FAULT;
854 #else
855 *pval = vcpu->irr[2];
856 return (IA64_NO_FAULT);
857 #endif
858 }
860 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
861 {
862 #ifndef IRR_USE_FIXED
863 printk("vcpu_get_irr: called, not implemented yet\n");
864 return IA64_ILLOP_FAULT;
865 #else
866 *pval = vcpu->irr[3];
867 return (IA64_NO_FAULT);
868 #endif
869 }
871 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
872 {
873 *pval = PSCB(vcpu,itv);
874 return (IA64_NO_FAULT);
875 }
877 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
878 {
879 *pval = PSCB(vcpu,pmv);
880 return (IA64_NO_FAULT);
881 }
883 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
884 {
885 *pval = PSCB(vcpu,cmcv);
886 return (IA64_NO_FAULT);
887 }
889 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
890 {
891 // fix this when setting values other than m-bit is supported
892 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
893 *pval = (1L << 16);
894 return (IA64_NO_FAULT);
895 }
897 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
898 {
899 // fix this when setting values other than m-bit is supported
900 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
901 *pval = (1L << 16);
902 return (IA64_NO_FAULT);
903 }
905 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
906 {
907 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
908 return (IA64_ILLOP_FAULT);
909 }
911 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
912 {
913 if (val & 0xff00) return IA64_RSVDREG_FAULT;
914 PSCB(vcpu,tpr) = val;
915 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
916 PSCB(vcpu,pending_interruption) = 1;
917 return (IA64_NO_FAULT);
918 }
920 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
921 {
922 UINT64 *p, bits, vec, bitnum;
923 int i;
925 p = &PSCBX(vcpu,insvc[3]);
926 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
927 if (i < 0) {
928 printf("Trying to EOI interrupt when none are in-service.\r\n");
929 return;
930 }
931 bitnum = ia64_fls(bits);
932 vec = bitnum + (i*64);
933 /* clear the correct bit */
934 bits &= ~(1L << bitnum);
935 *p = bits;
936 /* clearing an eoi bit may unmask another pending interrupt... */
937 if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
938 // worry about this later... Linux only calls eoi
939 // with interrupts disabled
940 printf("Trying to EOI interrupt with interrupts enabled\r\n");
941 }
942 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
943 PSCB(vcpu,pending_interruption) = 1;
944 //printf("YYYYY vcpu_set_eoi: Successful\n");
945 return (IA64_NO_FAULT);
946 }
948 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
949 {
950 if (!(val & (1L << 16))) {
951 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
952 return (IA64_ILLOP_FAULT);
953 }
954 // no place to save this state but nothing to do anyway
955 return (IA64_NO_FAULT);
956 }
958 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
959 {
960 if (!(val & (1L << 16))) {
961 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
962 return (IA64_ILLOP_FAULT);
963 }
964 // no place to save this state but nothing to do anyway
965 return (IA64_NO_FAULT);
966 }
968 // parameter is a time interval specified in cycles
969 void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
970 {
971 PSCBX(vcpu,xen_timer_interval) = cycles;
972 vcpu_set_next_timer(vcpu);
973 printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
974 PSCBX(vcpu,xen_timer_interval));
975 __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
976 }
978 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
979 {
980 extern unsigned long privop_trace;
981 //privop_trace=1;
982 if (val & 0xef00) return (IA64_ILLOP_FAULT);
983 PSCB(vcpu,itv) = val;
984 if (val & 0x10000) {
985 printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCBX(vcpu,domain_itm));
986 PSCBX(vcpu,domain_itm) = 0;
987 }
988 else vcpu_enable_timer(vcpu,1000000L);
989 return (IA64_NO_FAULT);
990 }
992 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
993 {
994 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
995 PSCB(vcpu,pmv) = val;
996 return (IA64_NO_FAULT);
997 }
999 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
1001 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
1002 PSCB(vcpu,cmcv) = val;
1003 return (IA64_NO_FAULT);
1006 /**************************************************************************
1007 VCPU temporary register access routines
1008 **************************************************************************/
1009 UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
1011 if (index > 7) return 0;
1012 return PSCB(vcpu,tmp[index]);
1015 void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
1017 if (index <= 7) PSCB(vcpu,tmp[index]) = val;
1020 /**************************************************************************
1021 Interval timer routines
1022 **************************************************************************/
1024 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
1026 UINT64 itv = PSCB(vcpu,itv);
1027 return(!itv || !!(itv & 0x10000));
1030 BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
1032 UINT64 itv = PSCB(vcpu,itv);
1033 return (test_bit(itv, PSCBX(vcpu,insvc)));
1036 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
1038 unsigned long domain_itm = PSCBX(vcpu,domain_itm);
1039 unsigned long now = ia64_get_itc();
1041 if (!domain_itm) return FALSE;
1042 if (now < domain_itm) return FALSE;
1043 if (vcpu_timer_disabled(vcpu)) return FALSE;
1044 return TRUE;
1047 void vcpu_safe_set_itm(unsigned long val)
1049 unsigned long epsilon = 100;
1050 unsigned long flags;
1051 UINT64 now = ia64_get_itc();
1053 local_irq_save(flags);
1054 while (1) {
1055 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
1056 ia64_set_itm(val);
1057 if (val > (now = ia64_get_itc())) break;
1058 val = now + epsilon;
1059 epsilon <<= 1;
1061 local_irq_restore(flags);
1064 void vcpu_set_next_timer(VCPU *vcpu)
1066 UINT64 d = PSCBX(vcpu,domain_itm);
1067 //UINT64 s = PSCBX(vcpu,xen_itm);
1068 UINT64 s = local_cpu_data->itm_next;
1069 UINT64 now = ia64_get_itc();
1070 //UINT64 interval = PSCBX(vcpu,xen_timer_interval);
1072 /* gloss over the wraparound problem for now... we know it exists
1073 * but it doesn't matter right now */
1075 if (is_idle_task(vcpu->domain)) {
1076 // printf("****** vcpu_set_next_timer called during idle!!\n");
1077 vcpu_safe_set_itm(s);
1078 return;
1080 //s = PSCBX(vcpu,xen_itm);
1081 if (d && (d > now) && (d < s)) {
1082 vcpu_safe_set_itm(d);
1083 //using_domain_as_itm++;
1085 else {
1086 vcpu_safe_set_itm(s);
1087 //using_xen_as_itm++;
1091 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
1093 UINT now = ia64_get_itc();
1095 //if (val < now) val = now + 1000;
1096 //printf("*** vcpu_set_itm: called with %lx\n",val);
1097 PSCBX(vcpu,domain_itm) = val;
1098 vcpu_set_next_timer(vcpu);
1099 return (IA64_NO_FAULT);
1102 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
1105 UINT64 oldnow = ia64_get_itc();
1106 UINT64 olditm = PSCBX(vcpu,domain_itm);
1107 unsigned long d = olditm - oldnow;
1108 unsigned long x = local_cpu_data->itm_next - oldnow;
1110 UINT64 newnow = val, min_delta;
1112 #define DISALLOW_SETTING_ITC_FOR_NOW
1113 #ifdef DISALLOW_SETTING_ITC_FOR_NOW
1114 printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
1115 #else
1116 local_irq_disable();
1117 if (olditm) {
1118 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
1119 PSCBX(vcpu,domain_itm) = newnow + d;
1121 local_cpu_data->itm_next = newnow + x;
1122 d = PSCBX(vcpu,domain_itm);
1123 x = local_cpu_data->itm_next;
1125 ia64_set_itc(newnow);
1126 if (d && (d > newnow) && (d < x)) {
1127 vcpu_safe_set_itm(d);
1128 //using_domain_as_itm++;
1130 else {
1131 vcpu_safe_set_itm(x);
1132 //using_xen_as_itm++;
1134 local_irq_enable();
1135 #endif
1136 return (IA64_NO_FAULT);
1139 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
1141 //FIXME: Implement this
1142 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
1143 return (IA64_NO_FAULT);
1144 //return (IA64_ILLOP_FAULT);
1147 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
1149 //TODO: Implement this
1150 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
1151 return (IA64_ILLOP_FAULT);
1154 void vcpu_pend_timer(VCPU *vcpu)
1156 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1158 if (vcpu_timer_disabled(vcpu)) return;
1159 //if (vcpu_timer_inservice(vcpu)) return;
1160 if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
1161 // already delivered an interrupt for this so
1162 // don't deliver another
1163 return;
1165 vcpu_pend_interrupt(vcpu, itv);
1168 // returns true if ready to deliver a timer interrupt too early
1169 UINT64 vcpu_timer_pending_early(VCPU *vcpu)
1171 UINT64 now = ia64_get_itc();
1172 UINT64 itm = PSCBX(vcpu,domain_itm);
1174 if (vcpu_timer_disabled(vcpu)) return 0;
1175 if (!itm) return 0;
1176 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1179 /**************************************************************************
1180 Privileged operation emulation routines
1181 **************************************************************************/
1183 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
1185 PSCB(vcpu,ifa) = ifa;
1186 return (vcpu_get_rr_ve(vcpu,ifa) ? IA64_DATA_TLB_VECTOR : IA64_ALT_DATA_TLB_VECTOR);
1190 IA64FAULT vcpu_rfi(VCPU *vcpu)
1192 // TODO: Only allowed for current vcpu
1193 PSR psr;
1194 UINT64 int_enable, regspsr = 0;
1195 UINT64 ifs;
1196 REGS *regs = vcpu_regs(vcpu);
1197 extern void dorfirfi(void);
1199 psr.i64 = PSCB(vcpu,ipsr);
1200 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
1201 if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
1202 int_enable = psr.ia64_psr.i;
1203 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
1204 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
1205 else vcpu_set_metaphysical_mode(vcpu,TRUE);
1206 psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
1207 psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
1208 psr.ia64_psr.bn = 1;
1209 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
1210 if (psr.ia64_psr.be) {
1211 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
1212 return (IA64_ILLOP_FAULT);
1214 PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
1215 ifs = PSCB(vcpu,ifs);
1216 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1217 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1218 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
1219 // TODO: validate PSCB(vcpu,iip)
1220 // TODO: PSCB(vcpu,ipsr) = psr;
1221 PSCB(vcpu,ipsr) = psr.i64;
1222 // now set up the trampoline
1223 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
1224 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
1225 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
1227 else {
1228 regs->cr_ipsr = psr.i64;
1229 regs->cr_iip = PSCB(vcpu,iip);
1231 PSCB(vcpu,interrupt_collection_enabled) = 1;
1232 vcpu_bsw1(vcpu);
1233 PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
1234 return (IA64_NO_FAULT);
1237 IA64FAULT vcpu_cover(VCPU *vcpu)
1239 // TODO: Only allowed for current vcpu
1240 REGS *regs = vcpu_regs(vcpu);
1242 if (!PSCB(vcpu,interrupt_collection_enabled)) {
1243 if (!PSCB(vcpu,incomplete_regframe))
1244 PSCB(vcpu,ifs) = regs->cr_ifs;
1245 else PSCB(vcpu,incomplete_regframe) = 0;
1247 regs->cr_ifs = 0;
1248 return (IA64_NO_FAULT);
1251 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
1253 UINT64 pta = PSCB(vcpu,pta);
1254 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1255 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
1256 UINT64 Mask = (1L << pta_sz) - 1;
1257 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1258 UINT64 compMask_60_15 = ~Mask_60_15;
1259 //UINT64 rr_ps = RR_TO_PS(get_rr(vadr));
1260 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
1261 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
1262 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
1263 UINT64 VHPT_addr2a =
1264 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1265 UINT64 VHPT_addr2b =
1266 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;;
1267 UINT64 VHPT_addr3 = VHPT_offset & 0x7fff;
1268 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1269 VHPT_addr3;
1271 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1272 *pval = VHPT_addr;
1273 return (IA64_NO_FAULT);
1276 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1278 printf("vcpu_ttag: ttag instruction unsupported\n");
1279 return (IA64_ILLOP_FAULT);
1282 #define itir_ps(itir) ((itir >> 2) & 0x3f)
1283 #define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
1285 unsigned long vhpt_translate_count = 0;
1286 unsigned long fast_vhpt_translate_count = 0;
1287 unsigned long recover_to_page_fault_count = 0;
1288 unsigned long recover_to_break_fault_count = 0;
1290 int warn_region0_address = 0; // FIXME later: tie to a boot parameter?
1292 IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
1294 unsigned long pta, pte, rid, rr;
1295 int i;
1296 TR_ENTRY *trp;
1298 if (PSCB(vcpu,metaphysical_mode)) {
1299 unsigned long region = address >> 61;
1300 // dom0 may generate an uncacheable physical address (msb=1)
1301 if (region && ((region != 4) || (vcpu->domain != dom0))) {
1302 // FIXME: This seems to happen even though it shouldn't. Need to track
1303 // this down, but since it has been apparently harmless, just flag it for now
1304 // panic_domain(vcpu_regs(vcpu),
1305 printk(
1306 "vcpu_translate: bad physical address: %p\n",address);
1308 *pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
1309 *itir = PAGE_SHIFT << 2;
1310 phys_translate_count++;
1311 return IA64_NO_FAULT;
1313 else if (!(address >> 61) && warn_region0_address) {
1314 REGS *regs = vcpu_regs(vcpu);
1315 unsigned long viip = PSCB(vcpu,iip);
1316 unsigned long vipsr = PSCB(vcpu,ipsr);
1317 unsigned long iip = regs->cr_iip;
1318 unsigned long ipsr = regs->cr_ipsr;
1319 printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p continuing\n", address, viip, vipsr, iip, ipsr);
1322 rr = PSCB(vcpu,rrs)[address>>61];
1323 rid = rr & RR_RID_MASK;
1324 if (is_data) {
1325 if (vcpu_quick_region_check(vcpu->arch.dtr_regions,address)) {
1326 for (trp = vcpu->arch.dtrs, i = NDTRS; i; i--, trp++) {
1327 if (vcpu_match_tr_entry(trp,address,rid)) {
1328 *pteval = trp->page_flags;
1329 *itir = trp->itir;
1330 tr_translate_count++;
1331 return IA64_NO_FAULT;
1336 // FIXME?: check itr's for data accesses too, else bad things happen?
1337 /* else */ {
1338 if (vcpu_quick_region_check(vcpu->arch.itr_regions,address)) {
1339 for (trp = vcpu->arch.itrs, i = NITRS; i; i--, trp++) {
1340 if (vcpu_match_tr_entry(trp,address,rid)) {
1341 *pteval = trp->page_flags;
1342 *itir = trp->itir;
1343 tr_translate_count++;
1344 return IA64_NO_FAULT;
1350 /* check 1-entry TLB */
1351 // FIXME?: check dtlb for inst accesses too, else bad things happen?
1352 trp = &vcpu->arch.dtlb;
1353 if (/* is_data && */ vcpu_match_tr_entry(trp,address,rid)) {
1354 if (vcpu->domain==dom0 && !in_tpa) *pteval = trp->page_flags;
1355 else *pteval = vcpu->arch.dtlb_pte;
1356 *itir = trp->itir;
1357 dtlb_translate_count++;
1358 return IA64_NO_FAULT;
1361 /* check guest VHPT */
1362 pta = PSCB(vcpu,pta);
1363 if (pta & IA64_PTA_VF) { /* long format VHPT - not implemented */
1364 panic_domain(vcpu_regs(vcpu),"can't do long format VHPT\n");
1365 //return (is_data ? IA64_DATA_TLB_VECTOR:IA64_INST_TLB_VECTOR);
1368 *itir = rr & (RR_RID_MASK | RR_PS_MASK);
1369 // note: architecturally, iha is optionally set for alt faults but
1370 // xenlinux depends on it so should document it as part of PV interface
1371 vcpu_thash(vcpu, address, iha);
1372 if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE))
1373 return (is_data ? IA64_ALT_DATA_TLB_VECTOR : IA64_ALT_INST_TLB_VECTOR);
1375 /* avoid recursively walking (short format) VHPT */
1376 if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
1377 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1379 if (__copy_from_user(&pte, (void *)(*iha), sizeof(pte)) != 0)
1380 // virtual VHPT walker "missed" in TLB
1381 return IA64_VHPT_FAULT;
1383 /*
1384 * Optimisation: this VHPT walker aborts on not-present pages
1385 * instead of inserting a not-present translation, this allows
1386 * vectoring directly to the miss handler.
1387 */
1388 if (!(pte & _PAGE_P))
1389 return (is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR);
1391 /* found mapping in guest VHPT! */
1392 *itir = rr & RR_PS_MASK;
1393 *pteval = pte;
1394 vhpt_translate_count++;
1395 return IA64_NO_FAULT;
1398 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1400 UINT64 pteval, itir, mask, iha;
1401 IA64FAULT fault;
1403 in_tpa = 1;
1404 fault = vcpu_translate(vcpu, vadr, 1, &pteval, &itir, &iha);
1405 in_tpa = 0;
1406 if (fault == IA64_NO_FAULT)
1408 mask = itir_mask(itir);
1409 *padr = (pteval & _PAGE_PPN_MASK & mask) | (vadr & ~mask);
1410 return (IA64_NO_FAULT);
1412 return vcpu_force_data_miss(vcpu,vadr);
1415 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
1417 printf("vcpu_tak: tak instruction unsupported\n");
1418 return (IA64_ILLOP_FAULT);
1419 // HACK ALERT: tak does a thash for now
1420 //return vcpu_thash(vcpu,vadr,key);
1423 /**************************************************************************
1424 VCPU debug breakpoint register access routines
1425 **************************************************************************/
1427 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
1429 // TODO: unimplemented DBRs return a reserved register fault
1430 // TODO: Should set Logical CPU state, not just physical
1431 ia64_set_dbr(reg,val);
1432 return (IA64_NO_FAULT);
1435 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
1437 // TODO: unimplemented IBRs return a reserved register fault
1438 // TODO: Should set Logical CPU state, not just physical
1439 ia64_set_ibr(reg,val);
1440 return (IA64_NO_FAULT);
1443 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1445 // TODO: unimplemented DBRs return a reserved register fault
1446 UINT64 val = ia64_get_dbr(reg);
1447 *pval = val;
1448 return (IA64_NO_FAULT);
1451 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1453 // TODO: unimplemented IBRs return a reserved register fault
1454 UINT64 val = ia64_get_ibr(reg);
1455 *pval = val;
1456 return (IA64_NO_FAULT);
1459 /**************************************************************************
1460 VCPU performance monitor register access routines
1461 **************************************************************************/
1463 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
1465 // TODO: Should set Logical CPU state, not just physical
1466 // NOTE: Writes to unimplemented PMC registers are discarded
1467 #ifdef DEBUG_PFMON
1468 printf("vcpu_set_pmc(%x,%lx)\n",reg,val);
1469 #endif
1470 ia64_set_pmc(reg,val);
1471 return (IA64_NO_FAULT);
1474 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
1476 // TODO: Should set Logical CPU state, not just physical
1477 // NOTE: Writes to unimplemented PMD registers are discarded
1478 #ifdef DEBUG_PFMON
1479 printf("vcpu_set_pmd(%x,%lx)\n",reg,val);
1480 #endif
1481 ia64_set_pmd(reg,val);
1482 return (IA64_NO_FAULT);
1485 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1487 // NOTE: Reads from unimplemented PMC registers return zero
1488 UINT64 val = (UINT64)ia64_get_pmc(reg);
1489 #ifdef DEBUG_PFMON
1490 printf("%lx=vcpu_get_pmc(%x)\n",val,reg);
1491 #endif
1492 *pval = val;
1493 return (IA64_NO_FAULT);
1496 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1498 // NOTE: Reads from unimplemented PMD registers return zero
1499 UINT64 val = (UINT64)ia64_get_pmd(reg);
1500 #ifdef DEBUG_PFMON
1501 printf("%lx=vcpu_get_pmd(%x)\n",val,reg);
1502 #endif
1503 *pval = val;
1504 return (IA64_NO_FAULT);
1507 /**************************************************************************
1508 VCPU banked general register access routines
1509 **************************************************************************/
1510 #define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1511 do{ \
1512 __asm__ __volatile__ ( \
1513 ";;extr.u %0 = %3,%6,16;;\n" \
1514 "dep %1 = %0, %1, 0, 16;;\n" \
1515 "st8 [%4] = %1\n" \
1516 "extr.u %0 = %2, 16, 16;;\n" \
1517 "dep %3 = %0, %3, %6, 16;;\n" \
1518 "st8 [%5] = %3\n" \
1519 ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
1520 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1521 }while(0)
1523 IA64FAULT vcpu_bsw0(VCPU *vcpu)
1525 // TODO: Only allowed for current vcpu
1526 REGS *regs = vcpu_regs(vcpu);
1527 unsigned long *r = &regs->r16;
1528 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1529 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1530 unsigned long *runat = &regs->eml_unat;
1531 unsigned long *b0unat = &PSCB(vcpu,vbnat);
1532 unsigned long *b1unat = &PSCB(vcpu,vnat);
1534 unsigned long i;
1536 if(VMX_DOMAIN(vcpu)){
1537 if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
1538 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1539 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1540 VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
1542 }else{
1543 if (PSCB(vcpu,banknum)) {
1544 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1545 vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1546 PSCB(vcpu,banknum) = 0;
1549 return (IA64_NO_FAULT);
1552 #define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
1553 do{ \
1554 __asm__ __volatile__ ( \
1555 ";;extr.u %0 = %3,%6,16;;\n" \
1556 "dep %1 = %0, %1, 16, 16;;\n" \
1557 "st8 [%4] = %1\n" \
1558 "extr.u %0 = %2, 0, 16;;\n" \
1559 "dep %3 = %0, %3, %6, 16;;\n" \
1560 "st8 [%5] = %3\n" \
1561 ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
1562 "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
1563 }while(0)
1565 IA64FAULT vcpu_bsw1(VCPU *vcpu)
1567 // TODO: Only allowed for current vcpu
1568 REGS *regs = vcpu_regs(vcpu);
1569 unsigned long *r = &regs->r16;
1570 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1571 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1572 unsigned long *runat = &regs->eml_unat;
1573 unsigned long *b0unat = &PSCB(vcpu,vbnat);
1574 unsigned long *b1unat = &PSCB(vcpu,vnat);
1576 unsigned long i;
1578 if(VMX_DOMAIN(vcpu)){
1579 if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
1580 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1581 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1582 VCPU(vcpu,vpsr) |= IA64_PSR_BN;
1584 }else{
1585 if (!PSCB(vcpu,banknum)) {
1586 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1587 vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
1588 PSCB(vcpu,banknum) = 1;
1591 return (IA64_NO_FAULT);
1594 /**************************************************************************
1595 VCPU cpuid access routines
1596 **************************************************************************/
1599 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1601 // FIXME: This could get called as a result of a rsvd-reg fault
1602 // if reg > 3
1603 switch(reg) {
1604 case 0:
1605 memcpy(pval,"Xen/ia64",8);
1606 break;
1607 case 1:
1608 *pval = 0;
1609 break;
1610 case 2:
1611 *pval = 0;
1612 break;
1613 case 3:
1614 *pval = ia64_get_cpuid(3);
1615 break;
1616 case 4:
1617 *pval = ia64_get_cpuid(4);
1618 break;
1619 default:
1620 if (reg > (ia64_get_cpuid(3) & 0xff))
1621 return IA64_RSVDREG_FAULT;
1622 *pval = ia64_get_cpuid(reg);
1623 break;
1625 return (IA64_NO_FAULT);
1628 /**************************************************************************
1629 VCPU region register access routines
1630 **************************************************************************/
1632 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
1634 ia64_rr rr;
1636 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1637 return(rr.ve);
1640 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
1642 PSCB(vcpu,rrs)[reg>>61] = val;
1643 // warning: set_one_rr() does it "live"
1644 set_one_rr(reg,val);
1645 return (IA64_NO_FAULT);
1648 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1650 UINT val = PSCB(vcpu,rrs)[reg>>61];
1651 *pval = val;
1652 return (IA64_NO_FAULT);
1655 /**************************************************************************
1656 VCPU protection key register access routines
1657 **************************************************************************/
1659 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1661 #ifndef PKR_USE_FIXED
1662 printk("vcpu_get_pkr: called, not implemented yet\n");
1663 return IA64_ILLOP_FAULT;
1664 #else
1665 UINT64 val = (UINT64)ia64_get_pkr(reg);
1666 *pval = val;
1667 return (IA64_NO_FAULT);
1668 #endif
1671 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
1673 #ifndef PKR_USE_FIXED
1674 printk("vcpu_set_pkr: called, not implemented yet\n");
1675 return IA64_ILLOP_FAULT;
1676 #else
1677 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
1678 vcpu->pkrs[reg] = val;
1679 ia64_set_pkr(reg,val);
1680 return (IA64_NO_FAULT);
1681 #endif
1684 /**************************************************************************
1685 VCPU translation register access routines
1686 **************************************************************************/
1688 static void vcpu_purge_tr_entry(TR_ENTRY *trp)
1690 trp->p = 0;
1693 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
1695 UINT64 ps;
1697 trp->itir = itir;
1698 trp->rid = virtualize_rid(current, get_rr(ifa) & RR_RID_MASK);
1699 trp->p = 1;
1700 ps = trp->ps;
1701 trp->page_flags = pte;
1702 if (trp->pl < 2) trp->pl = 2;
1703 trp->vadr = ifa & ~0xfff;
1704 if (ps > 12) { // "ignore" relevant low-order bits
1705 trp->ppn &= ~((1UL<<(ps-12))-1);
1706 trp->vadr &= ~((1UL<<ps)-1);
1710 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
1711 UINT64 itir, UINT64 ifa)
1713 TR_ENTRY *trp;
1715 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
1716 trp = &PSCBX(vcpu,dtrs[slot]);
1717 //printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
1718 vcpu_set_tr_entry(trp,pte,itir,ifa);
1719 vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),ifa);
1720 return IA64_NO_FAULT;
1723 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
1724 UINT64 itir, UINT64 ifa)
1726 TR_ENTRY *trp;
1728 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
1729 trp = &PSCBX(vcpu,itrs[slot]);
1730 //printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
1731 vcpu_set_tr_entry(trp,pte,itir,ifa);
1732 vcpu_quick_region_set(PSCBX(vcpu,itr_regions),ifa);
1733 return IA64_NO_FAULT;
1736 /**************************************************************************
1737 VCPU translation cache access routines
1738 **************************************************************************/
1740 void foobar(void) { /*vcpu_verbose = 1;*/ }
1742 extern struct domain *dom0;
1744 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps)
1746 unsigned long psr;
1747 unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
1749 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1750 // FIXME, must be inlined or potential for nested fault here!
1751 if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT)) {
1752 printf("vcpu_itc_no_srlz: domain0 use of smaller page size!\n");
1753 //FIXME: kill domain here
1754 while(1);
1756 psr = ia64_clear_ic();
1757 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
1758 ia64_set_psr(psr);
1759 // ia64_srlz_i(); // no srls req'd, will rfi later
1760 #ifdef VHPT_GLOBAL
1761 if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
1762 // FIXME: this is dangerous... vhpt_flush_address ensures these
1763 // addresses never get flushed. More work needed if this
1764 // ever happens.
1765 //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
1766 if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
1767 else vhpt_insert(vaddr,pte,logps<<2);
1769 // even if domain pagesize is larger than PAGE_SIZE, just put
1770 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
1771 else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
1772 #endif
1773 if ((mp_pte == -1UL) || (IorD & 0x4)) return; // don't place in 1-entry TLB
1774 if (IorD & 0x1) {
1775 vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
1776 PSCBX(vcpu,itlb_pte) = mp_pte;
1778 if (IorD & 0x2) {
1779 vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
1780 PSCBX(vcpu,dtlb_pte) = mp_pte;
1784 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1786 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1787 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1789 if (logps < PAGE_SHIFT) {
1790 printf("vcpu_itc_d: domain trying to use smaller page size!\n");
1791 //FIXME: kill domain here
1792 while(1);
1794 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1795 pteval = translate_domain_pte(pte,ifa,itir);
1796 if (!pteval) return IA64_ILLOP_FAULT;
1797 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
1798 return IA64_NO_FAULT;
1801 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1803 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1804 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1806 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1807 if (logps < PAGE_SHIFT) {
1808 printf("vcpu_itc_i: domain trying to use smaller page size!\n");
1809 //FIXME: kill domain here
1810 while(1);
1812 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1813 pteval = translate_domain_pte(pte,ifa,itir);
1814 // FIXME: what to do if bad physical address? (machine check?)
1815 if (!pteval) return IA64_ILLOP_FAULT;
1816 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
1817 return IA64_NO_FAULT;
1820 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1822 printk("vcpu_ptc_l: called, not implemented yet\n");
1823 return IA64_ILLOP_FAULT;
1826 // At privlvl=0, fc performs no access rights or protection key checks, while
1827 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
1828 // read but no protection key check. Thus in order to avoid an unexpected
1829 // access rights fault, we have to translate the virtual address to a
1830 // physical address (possibly via a metaphysical address) and do the fc
1831 // on the physical address, which is guaranteed to flush the same cache line
1832 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
1834 // TODO: Only allowed for current vcpu
1835 UINT64 mpaddr, paddr;
1836 IA64FAULT fault;
1837 unsigned long translate_domain_mpaddr(unsigned long);
1838 IA64FAULT vcpu_tpa(VCPU *, UINT64, UINT64 *);
1840 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
1841 if (fault == IA64_NO_FAULT) {
1842 paddr = translate_domain_mpaddr(mpaddr);
1843 ia64_fc(__va(paddr));
1845 return fault;
1848 int ptce_count = 0;
1849 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
1851 // Note that this only needs to be called once, i.e. the
1852 // architected loop to purge the entire TLB, should use
1853 // base = stride1 = stride2 = 0, count0 = count 1 = 1
1855 #ifdef VHPT_GLOBAL
1856 vhpt_flush(); // FIXME: This is overdoing it
1857 #endif
1858 local_flush_tlb_all();
1859 // just invalidate the "whole" tlb
1860 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1861 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1862 return IA64_NO_FAULT;
1865 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1867 printk("vcpu_ptc_g: called, not implemented yet\n");
1868 return IA64_ILLOP_FAULT;
1871 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1873 extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
1874 // FIXME: validate not flushing Xen addresses
1875 // if (Xen address) return(IA64_ILLOP_FAULT);
1876 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
1877 //printf("######## vcpu_ptc_ga(%p,%p) ##############\n",vadr,addr_range);
1878 #ifdef VHPT_GLOBAL
1879 vhpt_flush_address(vadr,addr_range);
1880 #endif
1881 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
1882 vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
1883 vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
1884 return IA64_NO_FAULT;
1887 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1889 printf("vcpu_ptr_d: Purging TLB is unsupported\n");
1890 // don't forget to recompute dtr_regions
1891 return (IA64_ILLOP_FAULT);
1894 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1896 printf("vcpu_ptr_i: Purging TLB is unsupported\n");
1897 // don't forget to recompute itr_regions
1898 return (IA64_ILLOP_FAULT);