ia64/xen-unstable

view xen/arch/ia64/vcpu.c @ 3108:85d6a1145160

bitkeeper revision 1.1159.187.7 (41a4e12eWWEz6Rwd4YlbRFZKcBjaMQ)

Merge arcadians.cl.cam.ac.uk:/auto/groups/xeno/BK/xen-2.0-testing.bk
into arcadians.cl.cam.ac.uk:/local/scratch-2/cl349/xen-2.0-testing.bk
author cl349@arcadians.cl.cam.ac.uk
date Wed Nov 24 19:29:50 2004 +0000 (2004-11-24)
parents b7cbbc4c7a3e
children 7ef582b6c9c4
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <linux/sched.h>
10 #include <asm/ia64_int.h>
11 #include <asm/vcpu.h>
12 #include <asm/regionreg.h>
13 #include <asm/tlb.h>
14 #include <asm/processor.h>
15 #include <asm/delay.h>
17 typedef union {
18 struct ia64_psr;
19 unsigned long i64;
20 } PSR;
22 //typedef struct pt_regs REGS;
23 //typedef struct domain VCPU;
25 // this def for vcpu_regs won't work if kernel stack is present
26 #define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->regs)
27 #define PSCB(x) x->shared_info->arch
29 #define TRUE 1
30 #define FALSE 0
31 #define IA64_PTA_SZ_BIT 2
32 #define IA64_PTA_VF_BIT 8
33 #define IA64_PTA_BASE_BIT 15
34 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
35 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
37 #define STATIC
39 unsigned long vcpu_verbose = 0;
40 #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
42 /**************************************************************************
43 VCPU general register access routines
44 **************************************************************************/
46 UINT64
47 vcpu_get_gr(VCPU *vcpu, unsigned reg)
48 {
49 REGS *regs = vcpu_regs(vcpu);
50 UINT64 val;
52 if (!reg) return 0;
53 getreg(reg,&val,0,regs); // FIXME: handle NATs later
54 return val;
55 }
57 // returns:
58 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
59 // IA64_NO_FAULT otherwise
60 IA64FAULT
61 vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
62 {
63 REGS *regs = vcpu_regs(vcpu);
64 long sof = (regs->cr_ifs) & 0x7f;
66 if (!reg) return IA64_ILLOP_FAULT;
67 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
68 setreg(reg,value,0,regs); // FIXME: handle NATs later
69 return IA64_NO_FAULT;
70 }
72 /**************************************************************************
73 VCPU privileged application register access routines
74 **************************************************************************/
76 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
77 {
78 if (reg == 44) return (vcpu_set_itc(vcpu,val));
79 if (reg == 27) return (IA64_ILLOP_FAULT);
80 if (reg > 7) return (IA64_ILLOP_FAULT);
81 PSCB(vcpu).krs[reg] = val;
82 #if 0
83 // for now, privify kr read's so all kr accesses are privileged
84 switch (reg) {
85 case 0: asm volatile ("mov ar.k0=%0" :: "r"(val)); break;
86 case 1: asm volatile ("mov ar.k1=%0" :: "r"(val)); break;
87 case 2: asm volatile ("mov ar.k2=%0" :: "r"(val)); break;
88 case 3: asm volatile ("mov ar.k3=%0" :: "r"(val)); break;
89 case 4: asm volatile ("mov ar.k4=%0" :: "r"(val)); break;
90 case 5: asm volatile ("mov ar.k5=%0" :: "r"(val)); break;
91 case 6: asm volatile ("mov ar.k6=%0" :: "r"(val)); break;
92 case 7: asm volatile ("mov ar.k7=%0" :: "r"(val)); break;
93 case 27: asm volatile ("mov ar.cflg=%0" :: "r"(val)); break;
94 }
95 #endif
96 return IA64_NO_FAULT;
97 }
99 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
100 {
101 if (reg > 7) return (IA64_ILLOP_FAULT);
102 *val = PSCB(vcpu).krs[reg];
103 return IA64_NO_FAULT;
104 }
106 /**************************************************************************
107 VCPU processor status register access routines
108 **************************************************************************/
110 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
111 {
112 /* only do something if mode changes */
113 if (!!newmode ^ !!vcpu->metaphysical_mode) {
114 if (newmode) set_metaphysical_rr(0,vcpu->metaphysical_rid);
115 else if (PSCB(vcpu).rrs[0] != -1)
116 set_one_rr(0, PSCB(vcpu).rrs[0]);
117 vcpu->metaphysical_mode = newmode;
118 }
119 }
121 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
122 {
123 struct ia64_psr psr, imm, *ipsr;
124 REGS *regs = vcpu_regs(vcpu);
126 // TODO: All of these bits need to be virtualized
127 // TODO: Only allowed for current vcpu
128 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
129 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
130 imm = *(struct ia64_psr *)&imm24;
131 // interrupt flag
132 if (imm.i) PSCB(vcpu).interrupt_delivery_enabled = 0;
133 if (imm.ic) PSCB(vcpu).interrupt_collection_enabled = 0;
134 // interrupt collection flag
135 //if (imm.ic) PSCB(vcpu).interrupt_delivery_enabled = 0;
136 // just handle psr.up and psr.pp for now
137 if (imm24 & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
138 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
139 | IA64_PSR_DFL | IA64_PSR_DFH))
140 return (IA64_ILLOP_FAULT);
141 if (imm.dfh) ipsr->dfh = 0;
142 if (imm.dfl) ipsr->dfl = 0;
143 if (imm.pp) { ipsr->pp = 0; psr.pp = 0; }
144 if (imm.up) { ipsr->up = 0; psr.up = 0; }
145 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
146 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
147 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
148 return IA64_NO_FAULT;
149 }
151 extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
152 #define SPURIOUS_VECTOR 0xf
154 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
155 {
156 struct ia64_psr psr, imm, *ipsr;
157 REGS *regs = vcpu_regs(vcpu);
158 UINT64 mask, enabling_interrupts = 0;
160 // TODO: All of these bits need to be virtualized
161 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
162 imm = *(struct ia64_psr *)&imm24;
163 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
164 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
165 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
166 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
167 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
168 if (imm.dfh) ipsr->dfh = 1;
169 if (imm.dfl) ipsr->dfl = 1;
170 if (imm.pp) { ipsr->pp = 1; psr.pp = 1; }
171 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
172 if (imm.i) {
173 if (!PSCB(vcpu).interrupt_delivery_enabled) {
174 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
175 enabling_interrupts = 1;
176 }
177 PSCB(vcpu).interrupt_delivery_enabled = 1;
178 }
179 if (imm.ic) PSCB(vcpu).interrupt_collection_enabled = 1;
180 // TODO: do this faster
181 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
182 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
183 if (imm.up) { ipsr->up = 1; psr.up = 1; }
184 if (imm.be) {
185 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
186 return (IA64_ILLOP_FAULT);
187 }
188 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
189 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
190 #if 0 // now done with deliver_pending_interrupts
191 if (enabling_interrupts) {
192 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) {
193 //printf("with interrupts pending\n");
194 return IA64_EXTINT_VECTOR;
195 }
196 //else printf("but nothing pending\n");
197 }
198 #endif
199 return IA64_NO_FAULT;
200 }
202 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
203 {
204 struct ia64_psr psr, newpsr, *ipsr;
205 REGS *regs = vcpu_regs(vcpu);
206 UINT64 enabling_interrupts = 0;
208 // TODO: All of these bits need to be virtualized
209 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
210 newpsr = *(struct ia64_psr *)&val;
211 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
212 // just handle psr.up and psr.pp for now
213 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
214 // however trying to set other bits can't be an error as it is in ssm
215 if (newpsr.dfh) ipsr->dfh = 1;
216 if (newpsr.dfl) ipsr->dfl = 1;
217 if (newpsr.pp) { ipsr->pp = 1; psr.pp = 1; }
218 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
219 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
220 if (newpsr.i) {
221 if (!PSCB(vcpu).interrupt_delivery_enabled)
222 enabling_interrupts = 1;
223 PSCB(vcpu).interrupt_delivery_enabled = 1;
224 }
225 if (newpsr.ic) PSCB(vcpu).interrupt_collection_enabled = 1;
226 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
227 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
228 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
229 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
230 else vcpu_set_metaphysical_mode(vcpu,TRUE);
231 if (newpsr.be) {
232 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
233 return (IA64_ILLOP_FAULT);
234 }
235 //__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
236 #if 0 // now done with deliver_pending_interrupts
237 if (enabling_interrupts) {
238 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
239 return IA64_EXTINT_VECTOR;
240 }
241 #endif
242 return IA64_NO_FAULT;
243 }
245 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
246 {
247 UINT64 psr;
248 struct ia64_psr newpsr;
250 // TODO: This needs to return a "filtered" view of
251 // the psr, not the actual psr. Probably the psr needs
252 // to be a field in regs (in addition to ipsr).
253 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
254 newpsr = *(struct ia64_psr *)&psr;
255 if (newpsr.cpl == 2) newpsr.cpl = 0;
256 if (PSCB(vcpu).interrupt_delivery_enabled) newpsr.i = 1;
257 else newpsr.i = 0;
258 if (PSCB(vcpu).interrupt_collection_enabled) newpsr.ic = 1;
259 else newpsr.ic = 0;
260 *pval = *(unsigned long *)&newpsr;
261 return IA64_NO_FAULT;
262 }
264 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
265 {
266 return !!PSCB(vcpu).interrupt_collection_enabled;
267 }
269 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
270 {
271 return !!PSCB(vcpu).interrupt_delivery_enabled;
272 }
274 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
275 {
276 UINT64 dcr = PSCB(vcpu).dcr;
277 PSR psr = {0};
279 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
280 psr.i64 = prevpsr;
281 psr.be = 0; if (dcr & IA64_DCR_BE) psr.be = 1;
282 psr.pp = 0; if (dcr & IA64_DCR_PP) psr.pp = 1;
283 psr.ic = PSCB(vcpu).interrupt_collection_enabled;
284 psr.i = PSCB(vcpu).interrupt_delivery_enabled;
285 psr.bn = PSCB(vcpu).banknum;
286 psr.dt = 1; psr.it = 1; psr.rt = 1;
287 if (psr.cpl == 2) psr.cpl = 0; // !!!! fool domain
288 // psr.pk = 1;
289 //printf("returns 0x%016lx...",psr.i64);
290 return psr.i64;
291 }
293 /**************************************************************************
294 VCPU control register access routines
295 **************************************************************************/
297 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
298 {
299 extern unsigned long privop_trace;
300 //privop_trace=0;
301 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu).iip);
302 // Reads of cr.dcr on Xen always have the sign bit set, so
303 // a domain can differentiate whether it is running on SP or not
304 *pval = PSCB(vcpu).dcr | 0x8000000000000000L;
305 return (IA64_NO_FAULT);
306 }
308 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
309 {
310 *pval = PSCB(vcpu).iva & ~0x7fffL;
311 return (IA64_NO_FAULT);
312 }
314 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
315 {
316 *pval = PSCB(vcpu).pta;
317 return (IA64_NO_FAULT);
318 }
320 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
321 {
322 //REGS *regs = vcpu_regs(vcpu);
323 //*pval = regs->cr_ipsr;
324 *pval = PSCB(vcpu).ipsr;
325 return (IA64_NO_FAULT);
326 }
328 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
329 {
330 *pval = PSCB(vcpu).isr;
331 return (IA64_NO_FAULT);
332 }
334 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
335 {
336 //REGS *regs = vcpu_regs(vcpu);
337 //*pval = regs->cr_iip;
338 *pval = PSCB(vcpu).iip;
339 return (IA64_NO_FAULT);
340 }
342 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
343 {
344 UINT64 val = PSCB(vcpu).ifa;
345 *pval = val;
346 return (IA64_NO_FAULT);
347 }
350 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
351 {
352 ia64_rr rr;
354 rr.rrval = 0;
355 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
356 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
357 return (rr.rrval);
358 }
361 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
362 {
363 UINT64 val = PSCB(vcpu).itir;
364 *pval = val;
365 return (IA64_NO_FAULT);
366 }
368 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
369 {
370 UINT64 val = PSCB(vcpu).iipa;
371 // SP entry code does not save iipa yet nor does it get
372 // properly delivered in the pscb
373 printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
374 *pval = val;
375 return (IA64_NO_FAULT);
376 }
378 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
379 {
380 //PSCB(vcpu).ifs = PSCB(vcpu)->regs.cr_ifs;
381 //*pval = PSCB(vcpu).regs.cr_ifs;
382 *pval = PSCB(vcpu).ifs;
383 PSCB(vcpu).incomplete_regframe = 0;
384 return (IA64_NO_FAULT);
385 }
387 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
388 {
389 UINT64 val = PSCB(vcpu).iim;
390 *pval = val;
391 return (IA64_NO_FAULT);
392 }
394 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
395 {
396 return vcpu_thash(vcpu,PSCB(vcpu).ifa,pval);
397 }
399 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
400 {
401 extern unsigned long privop_trace;
402 //privop_trace=1;
403 // Reads of cr.dcr on SP always have the sign bit set, so
404 // a domain can differentiate whether it is running on SP or not
405 // Thus, writes of DCR should ignore the sign bit
406 //verbose("vcpu_set_dcr: called\n");
407 PSCB(vcpu).dcr = val & ~0x8000000000000000L;
408 return (IA64_NO_FAULT);
409 }
411 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
412 {
413 PSCB(vcpu).iva = val & ~0x7fffL;
414 return (IA64_NO_FAULT);
415 }
417 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
418 {
419 if (val & IA64_PTA_LFMT) {
420 printf("*** No support for VHPT long format yet!!\n");
421 return (IA64_ILLOP_FAULT);
422 }
423 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
424 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
425 PSCB(vcpu).pta = val;
426 return IA64_NO_FAULT;
427 }
429 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
430 {
431 PSCB(vcpu).ipsr = val;
432 return IA64_NO_FAULT;
433 }
435 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
436 {
437 PSCB(vcpu).isr = val;
438 return IA64_NO_FAULT;
439 }
441 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
442 {
443 PSCB(vcpu).iip = val;
444 return IA64_NO_FAULT;
445 }
447 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
448 {
449 REGS *regs = vcpu_regs(vcpu);
450 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
451 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
452 else ipsr->ri++;
453 return (IA64_NO_FAULT);
454 }
456 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
457 {
458 PSCB(vcpu).ifa = val;
459 return IA64_NO_FAULT;
460 }
462 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
463 {
464 PSCB(vcpu).itir = val;
465 return IA64_NO_FAULT;
466 }
468 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
469 {
470 // SP entry code does not save iipa yet nor does it get
471 // properly delivered in the pscb
472 printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
473 PSCB(vcpu).iipa = val;
474 return IA64_NO_FAULT;
475 }
477 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
478 {
479 //REGS *regs = vcpu_regs(vcpu);
480 PSCB(vcpu).ifs = val;
481 return IA64_NO_FAULT;
482 }
484 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
485 {
486 PSCB(vcpu).iim = val;
487 return IA64_NO_FAULT;
488 }
490 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
491 {
492 PSCB(vcpu).iha = val;
493 return IA64_NO_FAULT;
494 }
496 /**************************************************************************
497 VCPU interrupt control register access routines
498 **************************************************************************/
500 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
501 {
502 if (vector & ~0xff) {
503 printf("vcpu_pend_interrupt: bad vector\n");
504 return;
505 }
506 if (!test_bit(vector,PSCB(vcpu).delivery_mask)) return;
507 if (test_bit(vector,PSCB(vcpu).irr)) {
508 //printf("vcpu_pend_interrupt: overrun\n");
509 }
510 set_bit(vector,PSCB(vcpu).irr);
511 }
513 #define IA64_TPR_MMI 0x10000
514 #define IA64_TPR_MIC 0x000f0
516 /* checks to see if a VCPU has any unmasked pending interrupts
517 * if so, returns the highest, else returns SPURIOUS_VECTOR */
518 /* NOTE: Since this gets called from vcpu_get_ivr() and the
519 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
520 * this routine also ignores pscb.interrupt_delivery_enabled
521 * and this must be checked independently; see vcpu_deliverable interrupts() */
522 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
523 {
524 UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
526 p = &PSCB(vcpu).irr[3];
527 q = &PSCB(vcpu).delivery_mask[3];
528 r = &PSCB(vcpu).insvc[3];
529 for (i = 3; ; p--, q--, r--, i--) {
530 bits = *p & *q;
531 if (bits) break; // got a potential interrupt
532 if (*r) {
533 // nothing in this word which is pending+inservice
534 // but there is one inservice which masks lower
535 return SPURIOUS_VECTOR;
536 }
537 if (i == 0) {
538 // checked all bits... nothing pending+inservice
539 return SPURIOUS_VECTOR;
540 }
541 }
542 // have a pending,deliverable interrupt... see if it is masked
543 bitnum = ia64_fls(bits);
544 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
545 vector = bitnum+(i*64);
546 mask = 1L << bitnum;
547 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
548 if (*r >= mask) {
549 // masked by equal inservice
550 //printf("but masked by equal inservice\n");
551 return SPURIOUS_VECTOR;
552 }
553 if (PSCB(vcpu).tpr & IA64_TPR_MMI) {
554 // tpr.mmi is set
555 //printf("but masked by tpr.mmi\n");
556 return SPURIOUS_VECTOR;
557 }
558 if (((PSCB(vcpu).tpr & IA64_TPR_MIC) + 15) >= vector) {
559 //tpr.mic masks class
560 //printf("but masked by tpr.mic\n");
561 return SPURIOUS_VECTOR;
562 }
564 //printf("returned to caller\n");
565 return vector;
566 }
568 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
569 {
570 return (vcpu_get_psr_i(vcpu) &&
571 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
572 }
574 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
575 {
576 extern unsigned long privop_trace;
577 //privop_trace=1;
578 //TODO: Implement this
579 printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
580 *pval = 0;
581 return IA64_NO_FAULT;
582 }
584 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
585 {
586 int i;
587 UINT64 vector, mask;
588 #if 1
589 static char firstivr = 1;
590 static char firsttime[256];
591 if (firstivr) {
592 int i;
593 for (i=0;i<256;i++) firsttime[i]=1;
594 firstivr=0;
595 }
596 #endif
598 vector = vcpu_check_pending_interrupts(vcpu);
599 if (vector == SPURIOUS_VECTOR) {
600 PSCB(vcpu).pending_interruption = 0;
601 *pval = vector;
602 return IA64_NO_FAULT;
603 }
604 // now have an unmasked, pending, deliverable vector!
605 // getting ivr has "side effects"
606 #if 0
607 if (firsttime[vector]) {
608 printf("*** First get_ivr on vector=%d,itc=%lx\n",
609 vector,ia64_get_itc());
610 firsttime[vector]=0;
611 }
612 #endif
613 i = vector >> 6;
614 mask = 1L << (vector & 0x3f);
615 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
616 PSCB(vcpu).insvc[i] |= mask;
617 PSCB(vcpu).irr[i] &= ~mask;
618 PSCB(vcpu).pending_interruption--;
619 *pval = vector;
620 return IA64_NO_FAULT;
621 }
623 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
624 {
625 *pval = PSCB(vcpu).tpr;
626 return (IA64_NO_FAULT);
627 }
629 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
630 {
631 *pval = 0L; // reads of eoi always return 0
632 return (IA64_NO_FAULT);
633 }
635 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
636 {
637 #ifndef IRR_USE_FIXED
638 printk("vcpu_get_irr: called, not implemented yet\n");
639 return IA64_ILLOP_FAULT;
640 #else
641 *pval = vcpu->irr[0];
642 return (IA64_NO_FAULT);
643 #endif
644 }
646 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
647 {
648 #ifndef IRR_USE_FIXED
649 printk("vcpu_get_irr: called, not implemented yet\n");
650 return IA64_ILLOP_FAULT;
651 #else
652 *pval = vcpu->irr[1];
653 return (IA64_NO_FAULT);
654 #endif
655 }
657 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
658 {
659 #ifndef IRR_USE_FIXED
660 printk("vcpu_get_irr: called, not implemented yet\n");
661 return IA64_ILLOP_FAULT;
662 #else
663 *pval = vcpu->irr[2];
664 return (IA64_NO_FAULT);
665 #endif
666 }
668 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
669 {
670 #ifndef IRR_USE_FIXED
671 printk("vcpu_get_irr: called, not implemented yet\n");
672 return IA64_ILLOP_FAULT;
673 #else
674 *pval = vcpu->irr[3];
675 return (IA64_NO_FAULT);
676 #endif
677 }
679 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
680 {
681 *pval = PSCB(vcpu).itv;
682 return (IA64_NO_FAULT);
683 }
685 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
686 {
687 *pval = PSCB(vcpu).pmv;
688 return (IA64_NO_FAULT);
689 }
691 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
692 {
693 *pval = PSCB(vcpu).cmcv;
694 return (IA64_NO_FAULT);
695 }
697 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
698 {
699 // fix this when setting values other than m-bit is supported
700 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
701 *pval = (1L << 16);
702 return (IA64_NO_FAULT);
703 }
705 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
706 {
707 // fix this when setting values other than m-bit is supported
708 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
709 *pval = (1L << 16);
710 return (IA64_NO_FAULT);
711 }
713 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
714 {
715 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
716 return (IA64_ILLOP_FAULT);
717 }
719 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
720 {
721 if (val & 0xff00) return IA64_RSVDREG_FAULT;
722 PSCB(vcpu).tpr = val;
723 return (IA64_NO_FAULT);
724 }
726 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
727 {
728 UINT64 *p, bits, vec, bitnum;
729 int i;
731 p = &PSCB(vcpu).insvc[3];
732 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
733 if (i < 0) {
734 printf("Trying to EOI interrupt when none are in-service.\r\n");
735 return;
736 }
737 bitnum = ia64_fls(bits);
738 vec = bitnum + (i*64);
739 /* clear the correct bit */
740 bits &= ~(1L << bitnum);
741 *p = bits;
742 /* clearing an eoi bit may unmask another pending interrupt... */
743 if (PSCB(vcpu).interrupt_delivery_enabled) { // but only if enabled...
744 // worry about this later... Linux only calls eoi
745 // with interrupts disabled
746 printf("Trying to EOI interrupt with interrupts enabled\r\n");
747 }
748 //printf("YYYYY vcpu_set_eoi: Successful\n");
749 return (IA64_NO_FAULT);
750 }
752 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
753 {
754 if (!(val & (1L << 16))) {
755 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
756 return (IA64_ILLOP_FAULT);
757 }
758 // no place to save this state but nothing to do anyway
759 return (IA64_NO_FAULT);
760 }
762 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
763 {
764 if (!(val & (1L << 16))) {
765 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
766 return (IA64_ILLOP_FAULT);
767 }
768 // no place to save this state but nothing to do anyway
769 return (IA64_NO_FAULT);
770 }
773 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
774 {
775 extern unsigned long privop_trace;
776 //privop_trace=1;
777 if (val & 0xef00) return (IA64_ILLOP_FAULT);
778 PSCB(vcpu).itv = val;
779 if (val & 0x10000) {
780 printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu).domain_itm);
781 PSCB(vcpu).domain_itm = 0;
782 }
783 else vcpu_enable_timer(vcpu,1000000L);
784 return (IA64_NO_FAULT);
785 }
787 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
788 {
789 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
790 PSCB(vcpu).pmv = val;
791 return (IA64_NO_FAULT);
792 }
794 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
795 {
796 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
797 PSCB(vcpu).cmcv = val;
798 return (IA64_NO_FAULT);
799 }
801 /**************************************************************************
802 Interval timer routines
803 **************************************************************************/
805 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
806 {
807 UINT64 itv = PSCB(vcpu).itv;
808 return(!itv || !!(itv & 0x10000));
809 }
811 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
812 {
813 unsigned long domain_itm = PSCB(vcpu).domain_itm;
814 unsigned long now = ia64_get_itc();
816 if (domain_itm && (now > domain_itm) &&
817 !vcpu_timer_disabled(vcpu)) return TRUE;
818 return FALSE;
819 }
821 void vcpu_safe_set_itm(unsigned long val)
822 {
823 unsigned long epsilon = 100;
824 UINT64 now = ia64_get_itc();
826 local_irq_disable();
827 while (1) {
828 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
829 ia64_set_itm(val);
830 if (val > (now = ia64_get_itc())) break;
831 val = now + epsilon;
832 epsilon <<= 1;
833 }
834 local_irq_enable();
835 }
837 void vcpu_set_next_timer(VCPU *vcpu)
838 {
839 UINT64 d = PSCB(vcpu).domain_itm;
840 //UINT64 s = PSCB(vcpu).xen_itm;
841 UINT64 s = local_cpu_data->itm_next;
842 UINT64 now = ia64_get_itc();
843 //UINT64 interval = PSCB(vcpu).xen_timer_interval;
845 /* gloss over the wraparound problem for now... we know it exists
846 * but it doesn't matter right now */
848 #if 0
849 /* ensure at least next SP tick is in the future */
850 if (!interval) PSCB(vcpu).xen_itm = now +
851 #if 0
852 (running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
853 DEFAULT_CLOCK_RATE);
854 #else
855 3000000;
856 //printf("vcpu_set_next_timer: HACK!\n");
857 #endif
858 #if 0
859 if (PSCB(vcpu).xen_itm < now)
860 while (PSCB(vcpu).xen_itm < now + (interval>>1))
861 PSCB(vcpu).xen_itm += interval;
862 #endif
863 #endif
865 if (is_idle_task(vcpu)) {
866 printf("****** vcpu_set_next_timer called during idle!!\n");
867 }
868 //s = PSCB(vcpu).xen_itm;
869 if (d && (d > now) && (d < s)) {
870 vcpu_safe_set_itm(d);
871 //using_domain_as_itm++;
872 }
873 else {
874 vcpu_safe_set_itm(s);
875 //using_xen_as_itm++;
876 }
877 }
879 // parameter is a time interval specified in cycles
880 void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
881 {
882 PSCB(vcpu).xen_timer_interval = cycles;
883 vcpu_set_next_timer(vcpu);
884 printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
885 PSCB(vcpu).xen_timer_interval);
886 __set_bit(PSCB(vcpu).itv, PSCB(vcpu).delivery_mask);
887 }
889 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
890 {
891 UINT now = ia64_get_itc();
893 //if (val < now) val = now + 1000;
894 //printf("*** vcpu_set_itm: called with %lx\n",val);
895 PSCB(vcpu).domain_itm = val;
896 vcpu_set_next_timer(vcpu);
897 return (IA64_NO_FAULT);
898 }
900 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
901 {
903 UINT64 oldnow = ia64_get_itc();
904 UINT64 olditm = PSCB(vcpu).domain_itm;
905 unsigned long d = olditm - oldnow;
906 unsigned long x = local_cpu_data->itm_next - oldnow;
908 UINT64 newnow = val, min_delta;
910 local_irq_disable();
911 if (olditm) {
912 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
913 PSCB(vcpu).domain_itm = newnow + d;
914 }
915 local_cpu_data->itm_next = newnow + x;
916 d = PSCB(vcpu).domain_itm;
917 x = local_cpu_data->itm_next;
919 ia64_set_itc(newnow);
920 if (d && (d > newnow) && (d < x)) {
921 vcpu_safe_set_itm(d);
922 //using_domain_as_itm++;
923 }
924 else {
925 vcpu_safe_set_itm(x);
926 //using_xen_as_itm++;
927 }
928 local_irq_enable();
929 return (IA64_NO_FAULT);
930 }
932 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
933 {
934 //FIXME: Implement this
935 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
936 return (IA64_NO_FAULT);
937 //return (IA64_ILLOP_FAULT);
938 }
940 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
941 {
942 //TODO: Implement this
943 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
944 return (IA64_ILLOP_FAULT);
945 }
947 void vcpu_pend_timer(VCPU *vcpu)
948 {
949 UINT64 itv = PSCB(vcpu).itv & 0xff;
951 if (vcpu_timer_disabled(vcpu)) return;
952 vcpu_pend_interrupt(vcpu, itv);
953 }
955 //FIXME: This is a hack because everything dies if a timer tick is lost
956 void vcpu_poke_timer(VCPU *vcpu)
957 {
958 UINT64 itv = PSCB(vcpu).itv & 0xff;
959 UINT64 now = ia64_get_itc();
960 UINT64 itm = PSCB(vcpu).domain_itm;
961 UINT64 irr;
963 if (vcpu_timer_disabled(vcpu)) return;
964 if (!itm) return;
965 if (itv != 0xefL) {
966 printf("vcpu_poke_timer: unimplemented itv=%lx!\n",itv);
967 while(1);
968 }
969 // using 0xef instead of itv so can get real irr
970 if (now > itm && !test_bit(0xefL, PSCB(vcpu).insvc)) {
971 if (!test_bit(0xefL,PSCB(vcpu).irr)) {
972 irr = ia64_getreg(_IA64_REG_CR_IRR3);
973 if (irr & (1L<<(0xef-0xc0))) return;
974 if (now-itm>0x800000)
975 printf("*** poking timer: now=%lx,vitm=%lx,xitm=%lx,itm=%lx\n",now,itm,local_cpu_data->itm_next,ia64_get_itm());
976 vcpu_pend_interrupt(vcpu, 0xefL);
977 }
978 }
979 }
982 /**************************************************************************
983 Privileged operation emulation routines
984 **************************************************************************/
986 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
987 {
988 PSCB(vcpu).ifa = ifa; // privop traps don't set ifa so do it here
989 return (IA64_DATA_TLB_VECTOR | IA64_FORCED_IFA);
990 }
993 IA64FAULT vcpu_rfi(VCPU *vcpu)
994 {
995 // TODO: Only allowed for current vcpu
996 PSR psr;
997 UINT64 int_enable, regspsr = 0;
998 UINT64 ifs;
999 REGS *regs = vcpu_regs(vcpu);
1000 extern void dorfirfi(void);
1002 psr.i64 = PSCB(vcpu).ipsr;
1003 if (psr.cpl < 3) psr.cpl = 2;
1004 if (psr.i) PSCB(vcpu).interrupt_delivery_enabled = 1;
1005 int_enable = psr.i;
1006 if (psr.ic) PSCB(vcpu).interrupt_collection_enabled = 1;
1007 if (psr.dt && psr.rt && psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
1008 else vcpu_set_metaphysical_mode(vcpu,TRUE);
1009 psr.ic = 1; psr.i = 1;
1010 psr.dt = 1; psr.rt = 1; psr.it = 1;
1011 psr.bn = 1;
1012 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
1013 if (psr.be) {
1014 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
1015 return (IA64_ILLOP_FAULT);
1017 PSCB(vcpu).incomplete_regframe = 0; // is this necessary?
1018 ifs = PSCB(vcpu).ifs;
1019 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1020 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1021 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
1022 #define SI_OFS(x) ((char *)(&PSCB(vcpu).x) - (char *)(vcpu->shared_info))
1023 if (SI_OFS(iip)!=0x150 || SI_OFS(ipsr)!=0x148 || SI_OFS(ifs)!=0x158) {
1024 printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
1025 while(1);
1027 // TODO: validate PSCB(vcpu).iip
1028 // TODO: PSCB(vcpu).ipsr = psr;
1029 PSCB(vcpu).ipsr = psr.i64;
1030 // now set up the trampoline
1031 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
1032 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
1033 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
1035 else {
1036 regs->cr_ipsr = psr.i64;
1037 regs->cr_iip = PSCB(vcpu).iip;
1039 PSCB(vcpu).interrupt_collection_enabled = 1;
1040 vcpu_bsw1(vcpu);
1041 PSCB(vcpu).interrupt_delivery_enabled = int_enable;
1042 return (IA64_NO_FAULT);
1045 IA64FAULT vcpu_cover(VCPU *vcpu)
1047 REGS *regs = vcpu_regs(vcpu);
1049 if (!PSCB(vcpu).interrupt_collection_enabled) {
1050 if (!PSCB(vcpu).incomplete_regframe)
1051 PSCB(vcpu).ifs = regs->cr_ifs;
1052 else PSCB(vcpu).incomplete_regframe = 0;
1054 regs->cr_ifs = 0;
1055 return (IA64_NO_FAULT);
1058 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
1060 extern unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr);
1061 UINT64 pta = PSCB(vcpu).pta;
1062 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1063 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
1064 UINT64 Mask = (1L << pta_sz) - 1;
1065 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1066 UINT64 compMask_60_15 = ~Mask_60_15;
1067 //UINT64 rr_ps = RR_TO_PS(get_rr(vadr));
1068 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
1069 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
1070 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
1071 UINT64 VHPT_addr2a =
1072 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1073 UINT64 VHPT_addr2b =
1074 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;;
1075 UINT64 VHPT_addr3 = VHPT_offset & 0x3fff;
1076 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1077 VHPT_addr3;
1079 if (VHPT_addr1 == 0xe000000000000000L) {
1080 printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
1081 PSCB(vcpu).iip);
1082 return (IA64_ILLOP_FAULT);
1084 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1085 *pval = VHPT_addr;
1086 return (IA64_NO_FAULT);
1089 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1091 printf("vcpu_ttag: ttag instruction unsupported\n");
1092 return (IA64_ILLOP_FAULT);
1095 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1097 extern TR_ENTRY *match_tr(VCPU *,UINT64);
1098 extern TR_ENTRY *match_dtlb(VCPU *,UINT64);
1099 TR_ENTRY *trp;
1100 UINT64 mask;
1102 extern unsigned long privop_trace;
1103 if ((trp=match_tr(current,vadr)) || (trp=match_dtlb(current,vadr))) {
1104 mask = (1L << trp->ps) - 1;
1105 *padr = ((trp->ppn << 12) & ~mask) | (vadr & mask);
1106 verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu).iip,*padr);
1107 return (IA64_NO_FAULT);
1109 verbose("vcpu_tpa addr=%p, @%p, forcing data miss\n",vadr,PSCB(vcpu).iip);
1110 return vcpu_force_data_miss(vcpu, vadr);
1113 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
1115 printf("vcpu_tak: tak instruction unsupported\n");
1116 return (IA64_ILLOP_FAULT);
1117 // HACK ALERT: tak does a thash for now
1118 //return vcpu_thash(vcpu,vadr,key);
1121 /**************************************************************************
1122 VCPU debug breakpoint register access routines
1123 **************************************************************************/
1125 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
1127 // TODO: unimplemented DBRs return a reserved register fault
1128 // TODO: Should set Logical CPU state, not just physical
1129 ia64_set_dbr(reg,val);
1130 return (IA64_NO_FAULT);
1133 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
1135 // TODO: unimplemented IBRs return a reserved register fault
1136 // TODO: Should set Logical CPU state, not just physical
1137 ia64_set_ibr(reg,val);
1138 return (IA64_NO_FAULT);
1141 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1143 // TODO: unimplemented DBRs return a reserved register fault
1144 UINT64 val = ia64_get_dbr(reg);
1145 *pval = val;
1146 return (IA64_NO_FAULT);
1149 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1151 // TODO: unimplemented IBRs return a reserved register fault
1152 UINT64 val = ia64_get_ibr(reg);
1153 *pval = val;
1154 return (IA64_NO_FAULT);
1157 /**************************************************************************
1158 VCPU performance monitor register access routines
1159 **************************************************************************/
1161 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
1163 // TODO: Should set Logical CPU state, not just physical
1164 // NOTE: Writes to unimplemented PMC registers are discarded
1165 ia64_set_pmc(reg,val);
1166 return (IA64_NO_FAULT);
1169 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
1171 // TODO: Should set Logical CPU state, not just physical
1172 // NOTE: Writes to unimplemented PMD registers are discarded
1173 ia64_set_pmd(reg,val);
1174 return (IA64_NO_FAULT);
1177 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1179 // NOTE: Reads from unimplemented PMC registers return zero
1180 UINT64 val = (UINT64)ia64_get_pmc(reg);
1181 *pval = val;
1182 return (IA64_NO_FAULT);
1185 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1187 // NOTE: Reads from unimplemented PMD registers return zero
1188 UINT64 val = (UINT64)ia64_get_pmd(reg);
1189 *pval = val;
1190 return (IA64_NO_FAULT);
1193 /**************************************************************************
1194 VCPU banked general register access routines
1195 **************************************************************************/
1197 IA64FAULT vcpu_bsw0(VCPU *vcpu)
1199 REGS *regs = vcpu_regs(vcpu);
1200 unsigned long *r = &regs->r16;
1201 unsigned long *b0 = &PSCB(vcpu).bank0_regs[0];
1202 unsigned long *b1 = &PSCB(vcpu).bank1_regs[0];
1203 int i;
1205 if (PSCB(vcpu).banknum) {
1206 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1207 PSCB(vcpu).banknum = 0;
1209 return (IA64_NO_FAULT);
1212 IA64FAULT vcpu_bsw1(VCPU *vcpu)
1214 REGS *regs = vcpu_regs(vcpu);
1215 unsigned long *r = &regs->r16;
1216 unsigned long *b0 = &PSCB(vcpu).bank0_regs[0];
1217 unsigned long *b1 = &PSCB(vcpu).bank1_regs[0];
1218 int i;
1220 if (!PSCB(vcpu).banknum) {
1221 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1222 PSCB(vcpu).banknum = 1;
1224 return (IA64_NO_FAULT);
1227 /**************************************************************************
1228 VCPU cpuid access routines
1229 **************************************************************************/
1232 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1234 // FIXME: This could get called as a result of a rsvd-reg fault
1235 // if reg > 3
1236 switch(reg) {
1237 case 0:
1238 case 1:
1239 memcpy(pval,"Xen/ia64",8);
1240 break;
1241 case 2:
1242 *pval = 0;
1243 break;
1244 case 3:
1245 *pval = 0; //FIXME: See vol1, 3.1.11
1246 break;
1247 case 4:
1248 *pval = 1; //FIXME: See vol1, 3.1.11
1249 break;
1250 default:
1251 *pval = 0; //FIXME: See vol1, 3.1.11
1252 break;
1254 return (IA64_NO_FAULT);
1257 /**************************************************************************
1258 VCPU region register access routines
1259 **************************************************************************/
1261 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
1264 ia64_rr rr;
1266 rr.rrval = PSCB(vcpu).rrs[vadr>>61];
1267 return(rr.ve);
1271 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
1274 ia64_rr rr;
1276 rr.rrval = PSCB(vcpu).rrs[vadr>>61];
1277 return(rr.ps);
1281 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
1284 ia64_rr rr;
1286 rr.rrval = PSCB(vcpu).rrs[vadr>>61];
1287 return(rr.rid);
1291 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
1293 extern void set_one_rr(UINT64, UINT64);
1294 PSCB(vcpu).rrs[reg>>61] = val;
1295 // warning: set_one_rr() does it "live"
1296 set_one_rr(reg,val);
1297 return (IA64_NO_FAULT);
1300 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1302 UINT val = PSCB(vcpu).rrs[reg>>61];
1303 *pval = val;
1304 return (IA64_NO_FAULT);
1307 /**************************************************************************
1308 VCPU protection key register access routines
1309 **************************************************************************/
1311 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1313 #ifndef PKR_USE_FIXED
1314 printk("vcpu_get_pkr: called, not implemented yet\n");
1315 return IA64_ILLOP_FAULT;
1316 #else
1317 UINT64 val = (UINT64)ia64_get_pkr(reg);
1318 *pval = val;
1319 return (IA64_NO_FAULT);
1320 #endif
1323 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
1325 #ifndef PKR_USE_FIXED
1326 printk("vcpu_set_pkr: called, not implemented yet\n");
1327 return IA64_ILLOP_FAULT;
1328 #else
1329 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
1330 vcpu->pkrs[reg] = val;
1331 ia64_set_pkr(reg,val);
1332 return (IA64_NO_FAULT);
1333 #endif
1336 /**************************************************************************
1337 VCPU translation register access routines
1338 **************************************************************************/
1340 static void vcpu_purge_tr_entry(TR_ENTRY *trp)
1342 trp->p = 0;
1345 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
1347 UINT64 ps;
1349 trp->itir = itir;
1350 trp->rid = virtualize_rid(current, get_rr(ifa) & RR_RID_MASK);
1351 trp->p = 1;
1352 ps = trp->ps;
1353 trp->page_flags = pte;
1354 if (trp->pl < 2) trp->pl = 2;
1355 trp->vadr = ifa & ~0xfff;
1356 if (ps > 12) { // "ignore" relevant low-order bits
1357 trp->ppn &= ~((1UL<<(ps-12))-1);
1358 trp->vadr &= ~((1UL<<ps)-1);
1362 TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu, TR_ENTRY *trp, UINT64 ifa, int count)
1364 unsigned long rid = (get_rr(ifa) & RR_RID_MASK);
1365 int i;
1367 for (i = 0; i < count; i++, trp++) {
1368 if (!trp->p) continue;
1369 if (physicalize_rid(vcpu,trp->rid) != rid) continue;
1370 if (ifa < trp->vadr) continue;
1371 if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
1372 //if (trp->key && !match_pkr(vcpu,trp->key)) continue;
1373 return trp;
1375 return 0;
1378 TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa)
1380 TR_ENTRY *trp;
1382 trp = vcpu_match_tr_entry(vcpu,vcpu->shared_info->arch.dtrs,ifa,NDTRS);
1383 if (trp) return trp;
1384 trp = vcpu_match_tr_entry(vcpu,vcpu->shared_info->arch.itrs,ifa,NITRS);
1385 if (trp) return trp;
1386 return 0;
1389 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
1390 UINT64 itir, UINT64 ifa)
1392 TR_ENTRY *trp;
1394 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
1395 trp = &PSCB(vcpu).dtrs[slot];
1396 vcpu_set_tr_entry(trp,pte,itir,ifa);
1397 return IA64_NO_FAULT;
1400 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
1401 UINT64 itir, UINT64 ifa)
1403 TR_ENTRY *trp;
1405 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
1406 trp = &PSCB(vcpu).itrs[slot];
1407 vcpu_set_tr_entry(trp,pte,itir,ifa);
1408 return IA64_NO_FAULT;
1411 /**************************************************************************
1412 VCPU translation cache access routines
1413 **************************************************************************/
1415 void foobar(void) { /*vcpu_verbose = 1;*/ }
1417 extern VCPU *dom0;
1419 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 logps)
1421 unsigned long psr;
1422 unsigned long ps = (vcpu==dom0) ? logps : PAGE_SHIFT;
1424 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1425 // FIXME, must be inlined or potential for nested fault here!
1426 psr = ia64_clear_ic();
1427 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
1428 ia64_set_psr(psr);
1429 // ia64_srlz_i(); // no srls req'd, will rfi later
1430 if (IorD & 0x1) vcpu_set_tr_entry(&PSCB(vcpu).itlb,pte,logps<<2,vaddr);
1431 if (IorD & 0x2) vcpu_set_tr_entry(&PSCB(vcpu).dtlb,pte,logps<<2,vaddr);
1434 TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
1436 return vcpu_match_tr_entry(vcpu,&vcpu->shared_info->arch.dtlb,ifa,1);
1439 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1441 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1442 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1444 if (((itir & ~0xfc) >> 2) < PAGE_SHIFT) {
1445 printf("vcpu_itc_d: domain trying to use smaller page size!\n");
1446 //FIXME: kill domain here
1447 while(1);
1449 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1450 pteval = translate_domain_pte(pte,ifa,itir);
1451 if (!pteval) return IA64_ILLOP_FAULT;
1452 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,logps);
1453 return IA64_NO_FAULT;
1456 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1458 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1459 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1461 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1462 if (((itir & ~0xfc) >> 2) < PAGE_SHIFT) {
1463 printf("vcpu_itc_i: domain trying to use smaller page size!\n");
1464 //FIXME: kill domain here
1465 while(1);
1467 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1468 pteval = translate_domain_pte(pte,ifa,itir);
1469 // FIXME: what to do if bad physical address? (machine check?)
1470 if (!pteval) return IA64_ILLOP_FAULT;
1471 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,logps);
1472 return IA64_NO_FAULT;
1475 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1477 printk("vcpu_ptc_l: called, not implemented yet\n");
1478 return IA64_ILLOP_FAULT;
1481 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
1483 UINT64 mpaddr;
1484 IA64FAULT fault;
1485 unsigned long lookup_domain_mpa(VCPU *,unsigned long);
1486 unsigned long pteval, dom_imva;
1488 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
1489 if (fault == IA64_NO_FAULT) {
1490 struct domain *dom0;
1491 unsigned long dom0_start, dom0_size;
1492 if (vcpu == dom0) {
1493 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
1494 printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr);
1497 pteval = lookup_domain_mpa(vcpu,mpaddr);
1498 if (pteval) {
1499 dom_imva = __va(pteval & _PFN_MASK);
1500 ia64_fc(dom_imva);
1502 else {
1503 REGS *regs = vcpu_regs(vcpu);
1504 printk("vcpu_fc: can't flush vadr=%p, iip=%p\n",
1505 vadr,regs->cr_iip);
1508 return fault;
1511 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
1514 // Note that this only needs to be called once, i.e. the
1515 // architected loop to purge the entire TLB, should use
1516 // base = stride1 = stride2 = 0, count0 = count 1 = 1
1518 // FIXME: When VHPT is in place, flush that too!
1519 local_flush_tlb_all();
1520 // just invalidate the "whole" tlb
1521 vcpu_purge_tr_entry(&PSCB(vcpu).dtlb);
1522 vcpu_purge_tr_entry(&PSCB(vcpu).itlb);
1523 return IA64_NO_FAULT;
1526 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1528 printk("vcpu_ptc_g: called, not implemented yet\n");
1529 return IA64_ILLOP_FAULT;
1532 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1534 extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
1535 // FIXME: validate not flushing Xen addresses
1536 // if (Xen address) return(IA64_ILLOP_FAULT);
1537 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
1538 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
1539 vcpu_purge_tr_entry(&PSCB(vcpu).dtlb);
1540 vcpu_purge_tr_entry(&PSCB(vcpu).itlb);
1541 return IA64_NO_FAULT;
1544 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1546 printf("vcpu_ptr_d: Purging TLB is unsupported\n");
1547 return (IA64_ILLOP_FAULT);
1550 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1552 printf("vcpu_ptr_i: Purging TLB is unsupported\n");
1553 return (IA64_ILLOP_FAULT);
1556 void vcpu_set_regs(VCPU *vcpu, REGS *regs)
1558 vcpu->regs = regs;