ia64/xen-unstable

view xen/arch/ia64/vcpu.c @ 4600:628d7ea9f439

bitkeeper revision 1.1327.1.4 (42669bd4kZSb8k2rkjOs_SZ__ekHtA)

Merge djm@sportsman.fc.hp.com:/home/djm/xeno-unstable-ia64.bk
into kirby.fc.hp.com:/home/djm/src/xen/xeno-unstable-ia64.bk
author djm@kirby.fc.hp.com
date Wed Apr 20 18:13:40 2005 +0000 (2005-04-20)
parents 58efb3448933 828a6e563cf1
children 89d65362afad
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <linux/sched.h>
10 #include <public/arch-ia64.h>
11 #include <asm/ia64_int.h>
12 #include <asm/vcpu.h>
13 #include <asm/regionreg.h>
14 #include <asm/tlb.h>
15 #include <asm/processor.h>
16 #include <asm/delay.h>
18 typedef union {
19 struct ia64_psr ia64_psr;
20 unsigned long i64;
21 } PSR;
23 //typedef struct pt_regs REGS;
24 //typedef struct domain VCPU;
26 // this def for vcpu_regs won't work if kernel stack is present
27 #define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
28 #define PSCB(x,y) x->vcpu_info->arch.y
30 #define TRUE 1
31 #define FALSE 0
32 #define IA64_PTA_SZ_BIT 2
33 #define IA64_PTA_VF_BIT 8
34 #define IA64_PTA_BASE_BIT 15
35 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
36 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
38 #define STATIC
40 unsigned long vcpu_verbose = 0;
41 #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
43 /**************************************************************************
44 VCPU general register access routines
45 **************************************************************************/
47 UINT64
48 vcpu_get_gr(VCPU *vcpu, unsigned reg)
49 {
50 REGS *regs = vcpu_regs(vcpu);
51 UINT64 val;
53 if (!reg) return 0;
54 getreg(reg,&val,0,regs); // FIXME: handle NATs later
55 return val;
56 }
58 // returns:
59 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
60 // IA64_NO_FAULT otherwise
61 IA64FAULT
62 vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
63 {
64 REGS *regs = vcpu_regs(vcpu);
65 long sof = (regs->cr_ifs) & 0x7f;
67 if (!reg) return IA64_ILLOP_FAULT;
68 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
69 setreg(reg,value,0,regs); // FIXME: handle NATs later
70 return IA64_NO_FAULT;
71 }
73 /**************************************************************************
74 VCPU privileged application register access routines
75 **************************************************************************/
77 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
78 {
79 if (reg == 44) return (vcpu_set_itc(vcpu,val));
80 if (reg == 27) return (IA64_ILLOP_FAULT);
81 if (reg > 7) return (IA64_ILLOP_FAULT);
82 PSCB(vcpu,krs[reg]) = val;
83 #if 0
84 // for now, privify kr read's so all kr accesses are privileged
85 switch (reg) {
86 case 0: asm volatile ("mov ar.k0=%0" :: "r"(val)); break;
87 case 1: asm volatile ("mov ar.k1=%0" :: "r"(val)); break;
88 case 2: asm volatile ("mov ar.k2=%0" :: "r"(val)); break;
89 case 3: asm volatile ("mov ar.k3=%0" :: "r"(val)); break;
90 case 4: asm volatile ("mov ar.k4=%0" :: "r"(val)); break;
91 case 5: asm volatile ("mov ar.k5=%0" :: "r"(val)); break;
92 case 6: asm volatile ("mov ar.k6=%0" :: "r"(val)); break;
93 case 7: asm volatile ("mov ar.k7=%0" :: "r"(val)); break;
94 case 27: asm volatile ("mov ar.cflg=%0" :: "r"(val)); break;
95 }
96 #endif
97 return IA64_NO_FAULT;
98 }
100 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
101 {
102 if (reg > 7) return (IA64_ILLOP_FAULT);
103 *val = PSCB(vcpu,krs[reg]);
104 return IA64_NO_FAULT;
105 }
107 /**************************************************************************
108 VCPU processor status register access routines
109 **************************************************************************/
111 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
112 {
113 /* only do something if mode changes */
114 if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
115 if (newmode) set_metaphysical_rr(0,vcpu->domain->metaphysical_rid);
116 else if (PSCB(vcpu,rrs[0]) != -1)
117 set_one_rr(0, PSCB(vcpu,rrs[0]));
118 PSCB(vcpu,metaphysical_mode) = newmode;
119 }
120 }
122 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
123 {
124 struct ia64_psr psr, imm, *ipsr;
125 REGS *regs = vcpu_regs(vcpu);
127 // TODO: All of these bits need to be virtualized
128 // TODO: Only allowed for current vcpu
129 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
130 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
131 imm = *(struct ia64_psr *)&imm24;
132 // interrupt flag
133 if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
134 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
135 // interrupt collection flag
136 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
137 // just handle psr.up and psr.pp for now
138 if (imm24 & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
139 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
140 | IA64_PSR_DFL | IA64_PSR_DFH))
141 return (IA64_ILLOP_FAULT);
142 if (imm.dfh) ipsr->dfh = 0;
143 if (imm.dfl) ipsr->dfl = 0;
144 if (imm.pp) { ipsr->pp = 0; psr.pp = 0; }
145 if (imm.up) { ipsr->up = 0; psr.up = 0; }
146 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
147 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
148 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
149 return IA64_NO_FAULT;
150 }
152 extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
153 #define SPURIOUS_VECTOR 0xf
155 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
156 {
157 struct ia64_psr psr, imm, *ipsr;
158 REGS *regs = vcpu_regs(vcpu);
159 UINT64 mask, enabling_interrupts = 0;
161 // TODO: All of these bits need to be virtualized
162 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
163 imm = *(struct ia64_psr *)&imm24;
164 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
165 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
166 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
167 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
168 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
169 if (imm.dfh) ipsr->dfh = 1;
170 if (imm.dfl) ipsr->dfl = 1;
171 if (imm.pp) { ipsr->pp = 1; psr.pp = 1; }
172 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
173 if (imm.i) {
174 if (!PSCB(vcpu,interrupt_delivery_enabled)) {
175 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
176 enabling_interrupts = 1;
177 }
178 PSCB(vcpu,interrupt_delivery_enabled) = 1;
179 }
180 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
181 // TODO: do this faster
182 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
183 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
184 if (imm.up) { ipsr->up = 1; psr.up = 1; }
185 if (imm.be) {
186 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
187 return (IA64_ILLOP_FAULT);
188 }
189 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
190 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
191 #if 0 // now done with deliver_pending_interrupts
192 if (enabling_interrupts) {
193 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) {
194 //printf("with interrupts pending\n");
195 return IA64_EXTINT_VECTOR;
196 }
197 //else printf("but nothing pending\n");
198 }
199 #endif
200 return IA64_NO_FAULT;
201 }
203 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
204 {
205 struct ia64_psr psr, newpsr, *ipsr;
206 REGS *regs = vcpu_regs(vcpu);
207 UINT64 enabling_interrupts = 0;
209 // TODO: All of these bits need to be virtualized
210 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
211 newpsr = *(struct ia64_psr *)&val;
212 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
213 // just handle psr.up and psr.pp for now
214 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
215 // however trying to set other bits can't be an error as it is in ssm
216 if (newpsr.dfh) ipsr->dfh = 1;
217 if (newpsr.dfl) ipsr->dfl = 1;
218 if (newpsr.pp) { ipsr->pp = 1; psr.pp = 1; }
219 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
220 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
221 if (newpsr.i) {
222 if (!PSCB(vcpu,interrupt_delivery_enabled))
223 enabling_interrupts = 1;
224 PSCB(vcpu,interrupt_delivery_enabled) = 1;
225 }
226 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
227 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
228 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
229 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
230 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
231 else vcpu_set_metaphysical_mode(vcpu,TRUE);
232 if (newpsr.be) {
233 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
234 return (IA64_ILLOP_FAULT);
235 }
236 //__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
237 #if 0 // now done with deliver_pending_interrupts
238 if (enabling_interrupts) {
239 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
240 return IA64_EXTINT_VECTOR;
241 }
242 #endif
243 return IA64_NO_FAULT;
244 }
246 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
247 {
248 UINT64 psr;
249 struct ia64_psr newpsr;
251 // TODO: This needs to return a "filtered" view of
252 // the psr, not the actual psr. Probably the psr needs
253 // to be a field in regs (in addition to ipsr).
254 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
255 newpsr = *(struct ia64_psr *)&psr;
256 if (newpsr.cpl == 2) newpsr.cpl = 0;
257 if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
258 else newpsr.i = 0;
259 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
260 else newpsr.ic = 0;
261 *pval = *(unsigned long *)&newpsr;
262 return IA64_NO_FAULT;
263 }
265 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
266 {
267 return !!PSCB(vcpu,interrupt_collection_enabled);
268 }
270 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
271 {
272 return !!PSCB(vcpu,interrupt_delivery_enabled);
273 }
275 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
276 {
277 UINT64 dcr = PSCB(vcpu,dcr);
278 PSR psr = {0};
280 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
281 psr.i64 = prevpsr;
282 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
283 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
284 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
285 psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
286 psr.ia64_psr.bn = PSCB(vcpu,banknum);
287 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
288 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
289 // psr.pk = 1;
290 //printf("returns 0x%016lx...",psr.i64);
291 return psr.i64;
292 }
294 /**************************************************************************
295 VCPU control register access routines
296 **************************************************************************/
298 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
299 {
300 extern unsigned long privop_trace;
301 //privop_trace=0;
302 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
303 // Reads of cr.dcr on Xen always have the sign bit set, so
304 // a domain can differentiate whether it is running on SP or not
305 *pval = PSCB(vcpu,dcr) | 0x8000000000000000L;
306 return (IA64_NO_FAULT);
307 }
309 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
310 {
311 *pval = PSCB(vcpu,iva) & ~0x7fffL;
312 return (IA64_NO_FAULT);
313 }
315 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
316 {
317 *pval = PSCB(vcpu,pta);
318 return (IA64_NO_FAULT);
319 }
321 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
322 {
323 //REGS *regs = vcpu_regs(vcpu);
324 //*pval = regs->cr_ipsr;
325 *pval = PSCB(vcpu,ipsr);
326 return (IA64_NO_FAULT);
327 }
329 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
330 {
331 *pval = PSCB(vcpu,isr);
332 return (IA64_NO_FAULT);
333 }
335 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
336 {
337 //REGS *regs = vcpu_regs(vcpu);
338 //*pval = regs->cr_iip;
339 *pval = PSCB(vcpu,iip);
340 return (IA64_NO_FAULT);
341 }
343 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
344 {
345 UINT64 val = PSCB(vcpu,ifa);
346 *pval = val;
347 return (IA64_NO_FAULT);
348 }
351 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
352 {
353 ia64_rr rr;
355 rr.rrval = 0;
356 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
357 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
358 return (rr.rrval);
359 }
362 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
363 {
364 UINT64 val = PSCB(vcpu,itir);
365 *pval = val;
366 return (IA64_NO_FAULT);
367 }
369 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
370 {
371 UINT64 val = PSCB(vcpu,iipa);
372 // SP entry code does not save iipa yet nor does it get
373 // properly delivered in the pscb
374 printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
375 *pval = val;
376 return (IA64_NO_FAULT);
377 }
379 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
380 {
381 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
382 //*pval = PSCB(vcpu,regs).cr_ifs;
383 *pval = PSCB(vcpu,ifs);
384 PSCB(vcpu,incomplete_regframe) = 0;
385 return (IA64_NO_FAULT);
386 }
388 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
389 {
390 UINT64 val = PSCB(vcpu,iim);
391 *pval = val;
392 return (IA64_NO_FAULT);
393 }
395 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
396 {
397 //return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
398 UINT64 val = PSCB(vcpu,iha);
399 *pval = val;
400 return (IA64_NO_FAULT);
401 }
403 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
404 {
405 extern unsigned long privop_trace;
406 //privop_trace=1;
407 // Reads of cr.dcr on SP always have the sign bit set, so
408 // a domain can differentiate whether it is running on SP or not
409 // Thus, writes of DCR should ignore the sign bit
410 //verbose("vcpu_set_dcr: called\n");
411 PSCB(vcpu,dcr) = val & ~0x8000000000000000L;
412 return (IA64_NO_FAULT);
413 }
415 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
416 {
417 PSCB(vcpu,iva) = val & ~0x7fffL;
418 return (IA64_NO_FAULT);
419 }
421 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
422 {
423 if (val & IA64_PTA_LFMT) {
424 printf("*** No support for VHPT long format yet!!\n");
425 return (IA64_ILLOP_FAULT);
426 }
427 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
428 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
429 PSCB(vcpu,pta) = val;
430 return IA64_NO_FAULT;
431 }
433 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
434 {
435 PSCB(vcpu,ipsr) = val;
436 return IA64_NO_FAULT;
437 }
439 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
440 {
441 PSCB(vcpu,isr) = val;
442 return IA64_NO_FAULT;
443 }
445 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
446 {
447 PSCB(vcpu,iip) = val;
448 return IA64_NO_FAULT;
449 }
451 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
452 {
453 REGS *regs = vcpu_regs(vcpu);
454 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
455 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
456 else ipsr->ri++;
457 return (IA64_NO_FAULT);
458 }
460 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
461 {
462 PSCB(vcpu,ifa) = val;
463 return IA64_NO_FAULT;
464 }
466 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
467 {
468 PSCB(vcpu,itir) = val;
469 return IA64_NO_FAULT;
470 }
472 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
473 {
474 // SP entry code does not save iipa yet nor does it get
475 // properly delivered in the pscb
476 printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
477 PSCB(vcpu,iipa) = val;
478 return IA64_NO_FAULT;
479 }
481 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
482 {
483 //REGS *regs = vcpu_regs(vcpu);
484 PSCB(vcpu,ifs) = val;
485 return IA64_NO_FAULT;
486 }
488 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
489 {
490 PSCB(vcpu,iim) = val;
491 return IA64_NO_FAULT;
492 }
494 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
495 {
496 PSCB(vcpu,iha) = val;
497 return IA64_NO_FAULT;
498 }
500 /**************************************************************************
501 VCPU interrupt control register access routines
502 **************************************************************************/
504 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
505 {
506 if (vector & ~0xff) {
507 printf("vcpu_pend_interrupt: bad vector\n");
508 return;
509 }
510 if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
511 if (test_bit(vector,PSCB(vcpu,irr))) {
512 //printf("vcpu_pend_interrupt: overrun\n");
513 }
514 set_bit(vector,PSCB(vcpu,irr));
515 PSCB(vcpu,pending_interruption) = 1;
516 }
518 void early_tick(VCPU *vcpu)
519 {
520 UINT64 *p = &PSCB(vcpu,irr[3]);
521 printf("vcpu_check_pending: about to deliver early tick\n");
522 printf("&irr[0]=%p, irr[0]=0x%lx\n",p,*p);
523 }
525 #define IA64_TPR_MMI 0x10000
526 #define IA64_TPR_MIC 0x000f0
528 /* checks to see if a VCPU has any unmasked pending interrupts
529 * if so, returns the highest, else returns SPURIOUS_VECTOR */
530 /* NOTE: Since this gets called from vcpu_get_ivr() and the
531 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
532 * this routine also ignores pscb.interrupt_delivery_enabled
533 * and this must be checked independently; see vcpu_deliverable interrupts() */
534 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
535 {
536 UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
538 p = &PSCB(vcpu,irr[3]);
539 q = &PSCB(vcpu,delivery_mask[3]);
540 r = &PSCB(vcpu,insvc[3]);
541 for (i = 3; ; p--, q--, r--, i--) {
542 bits = *p & *q;
543 if (bits) break; // got a potential interrupt
544 if (*r) {
545 // nothing in this word which is pending+inservice
546 // but there is one inservice which masks lower
547 return SPURIOUS_VECTOR;
548 }
549 if (i == 0) {
550 // checked all bits... nothing pending+inservice
551 return SPURIOUS_VECTOR;
552 }
553 }
554 // have a pending,deliverable interrupt... see if it is masked
555 bitnum = ia64_fls(bits);
556 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
557 vector = bitnum+(i*64);
558 mask = 1L << bitnum;
559 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
560 if (*r >= mask) {
561 // masked by equal inservice
562 //printf("but masked by equal inservice\n");
563 return SPURIOUS_VECTOR;
564 }
565 if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
566 // tpr.mmi is set
567 //printf("but masked by tpr.mmi\n");
568 return SPURIOUS_VECTOR;
569 }
570 if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
571 //tpr.mic masks class
572 //printf("but masked by tpr.mic\n");
573 return SPURIOUS_VECTOR;
574 }
576 //printf("returned to caller\n");
577 #if 0
578 if (vector == (PSCB(vcpu,itv) & 0xff)) {
579 UINT64 now = ia64_get_itc();
580 UINT64 itm = PSCB(vcpu,domain_itm);
581 if (now < itm) early_tick(vcpu);
583 }
584 #endif
585 return vector;
586 }
588 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
589 {
590 return (vcpu_get_psr_i(vcpu) &&
591 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
592 }
594 UINT64 vcpu_deliverable_timer(VCPU *vcpu)
595 {
596 return (vcpu_get_psr_i(vcpu) &&
597 vcpu_check_pending_interrupts(vcpu) == PSCB(vcpu,itv));
598 }
600 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
601 {
602 extern unsigned long privop_trace;
603 //privop_trace=1;
604 //TODO: Implement this
605 printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
606 *pval = 0;
607 return IA64_NO_FAULT;
608 }
610 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
611 {
612 int i;
613 UINT64 vector, mask;
614 #if 1
615 static char firstivr = 1;
616 static char firsttime[256];
617 if (firstivr) {
618 int i;
619 for (i=0;i<256;i++) firsttime[i]=1;
620 firstivr=0;
621 }
622 #endif
624 vector = vcpu_check_pending_interrupts(vcpu);
625 if (vector == SPURIOUS_VECTOR) {
626 PSCB(vcpu,pending_interruption) = 0;
627 *pval = vector;
628 return IA64_NO_FAULT;
629 }
630 // now have an unmasked, pending, deliverable vector!
631 // getting ivr has "side effects"
632 #if 0
633 if (firsttime[vector]) {
634 printf("*** First get_ivr on vector=%d,itc=%lx\n",
635 vector,ia64_get_itc());
636 firsttime[vector]=0;
637 }
638 #endif
639 i = vector >> 6;
640 mask = 1L << (vector & 0x3f);
641 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
642 PSCB(vcpu,insvc[i]) |= mask;
643 PSCB(vcpu,irr[i]) &= ~mask;
644 //PSCB(vcpu,pending_interruption)--;
645 *pval = vector;
646 // if delivering a timer interrupt, remember domain_itm
647 if (vector == (PSCB(vcpu,itv) & 0xff)) {
648 PSCB(vcpu,domain_itm_last) = PSCB(vcpu,domain_itm);
649 }
650 return IA64_NO_FAULT;
651 }
653 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
654 {
655 *pval = PSCB(vcpu,tpr);
656 return (IA64_NO_FAULT);
657 }
659 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
660 {
661 *pval = 0L; // reads of eoi always return 0
662 return (IA64_NO_FAULT);
663 }
665 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
666 {
667 #ifndef IRR_USE_FIXED
668 printk("vcpu_get_irr: called, not implemented yet\n");
669 return IA64_ILLOP_FAULT;
670 #else
671 *pval = vcpu->irr[0];
672 return (IA64_NO_FAULT);
673 #endif
674 }
676 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
677 {
678 #ifndef IRR_USE_FIXED
679 printk("vcpu_get_irr: called, not implemented yet\n");
680 return IA64_ILLOP_FAULT;
681 #else
682 *pval = vcpu->irr[1];
683 return (IA64_NO_FAULT);
684 #endif
685 }
687 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
688 {
689 #ifndef IRR_USE_FIXED
690 printk("vcpu_get_irr: called, not implemented yet\n");
691 return IA64_ILLOP_FAULT;
692 #else
693 *pval = vcpu->irr[2];
694 return (IA64_NO_FAULT);
695 #endif
696 }
698 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
699 {
700 #ifndef IRR_USE_FIXED
701 printk("vcpu_get_irr: called, not implemented yet\n");
702 return IA64_ILLOP_FAULT;
703 #else
704 *pval = vcpu->irr[3];
705 return (IA64_NO_FAULT);
706 #endif
707 }
709 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
710 {
711 *pval = PSCB(vcpu,itv);
712 return (IA64_NO_FAULT);
713 }
715 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
716 {
717 *pval = PSCB(vcpu,pmv);
718 return (IA64_NO_FAULT);
719 }
721 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
722 {
723 *pval = PSCB(vcpu,cmcv);
724 return (IA64_NO_FAULT);
725 }
727 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
728 {
729 // fix this when setting values other than m-bit is supported
730 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
731 *pval = (1L << 16);
732 return (IA64_NO_FAULT);
733 }
735 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
736 {
737 // fix this when setting values other than m-bit is supported
738 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
739 *pval = (1L << 16);
740 return (IA64_NO_FAULT);
741 }
743 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
744 {
745 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
746 return (IA64_ILLOP_FAULT);
747 }
749 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
750 {
751 if (val & 0xff00) return IA64_RSVDREG_FAULT;
752 PSCB(vcpu,tpr) = val;
753 //PSCB(vcpu,pending_interruption) = 1;
754 return (IA64_NO_FAULT);
755 }
757 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
758 {
759 UINT64 *p, bits, vec, bitnum;
760 int i;
762 p = &PSCB(vcpu,insvc[3]);
763 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
764 if (i < 0) {
765 printf("Trying to EOI interrupt when none are in-service.\r\n");
766 return;
767 }
768 bitnum = ia64_fls(bits);
769 vec = bitnum + (i*64);
770 /* clear the correct bit */
771 bits &= ~(1L << bitnum);
772 *p = bits;
773 /* clearing an eoi bit may unmask another pending interrupt... */
774 if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
775 // worry about this later... Linux only calls eoi
776 // with interrupts disabled
777 printf("Trying to EOI interrupt with interrupts enabled\r\n");
778 }
779 //printf("YYYYY vcpu_set_eoi: Successful\n");
780 return (IA64_NO_FAULT);
781 }
783 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
784 {
785 if (!(val & (1L << 16))) {
786 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
787 return (IA64_ILLOP_FAULT);
788 }
789 // no place to save this state but nothing to do anyway
790 return (IA64_NO_FAULT);
791 }
793 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
794 {
795 if (!(val & (1L << 16))) {
796 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
797 return (IA64_ILLOP_FAULT);
798 }
799 // no place to save this state but nothing to do anyway
800 return (IA64_NO_FAULT);
801 }
804 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
805 {
806 extern unsigned long privop_trace;
807 //privop_trace=1;
808 if (val & 0xef00) return (IA64_ILLOP_FAULT);
809 PSCB(vcpu,itv) = val;
810 if (val & 0x10000) {
811 printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu,domain_itm));
812 PSCB(vcpu,domain_itm) = 0;
813 }
814 else vcpu_enable_timer(vcpu,1000000L);
815 return (IA64_NO_FAULT);
816 }
818 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
819 {
820 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
821 PSCB(vcpu,pmv) = val;
822 return (IA64_NO_FAULT);
823 }
825 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
826 {
827 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
828 PSCB(vcpu,cmcv) = val;
829 return (IA64_NO_FAULT);
830 }
832 /**************************************************************************
833 Interval timer routines
834 **************************************************************************/
836 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
837 {
838 UINT64 itv = PSCB(vcpu,itv);
839 return(!itv || !!(itv & 0x10000));
840 }
842 BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
843 {
844 UINT64 itv = PSCB(vcpu,itv);
845 return (test_bit(itv, PSCB(vcpu,insvc)));
846 }
848 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
849 {
850 unsigned long domain_itm = PSCB(vcpu,domain_itm);
851 unsigned long now = ia64_get_itc();
853 if (!domain_itm) return FALSE;
854 if (now < domain_itm) return FALSE;
855 if (vcpu_timer_disabled(vcpu)) return FALSE;
856 return TRUE;
857 }
859 void vcpu_safe_set_itm(unsigned long val)
860 {
861 unsigned long epsilon = 100;
862 UINT64 now = ia64_get_itc();
864 local_irq_disable();
865 while (1) {
866 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
867 ia64_set_itm(val);
868 if (val > (now = ia64_get_itc())) break;
869 val = now + epsilon;
870 epsilon <<= 1;
871 }
872 local_irq_enable();
873 }
875 void vcpu_set_next_timer(VCPU *vcpu)
876 {
877 UINT64 d = PSCB(vcpu,domain_itm);
878 //UINT64 s = PSCB(vcpu,xen_itm);
879 UINT64 s = local_cpu_data->itm_next;
880 UINT64 now = ia64_get_itc();
881 //UINT64 interval = PSCB(vcpu,xen_timer_interval);
883 /* gloss over the wraparound problem for now... we know it exists
884 * but it doesn't matter right now */
886 #if 0
887 /* ensure at least next SP tick is in the future */
888 if (!interval) PSCB(vcpu,xen_itm) = now +
889 #if 0
890 (running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
891 DEFAULT_CLOCK_RATE);
892 #else
893 3000000;
894 //printf("vcpu_set_next_timer: HACK!\n");
895 #endif
896 #if 0
897 if (PSCB(vcpu,xen_itm) < now)
898 while (PSCB(vcpu,xen_itm) < now + (interval>>1))
899 PSCB(vcpu,xen_itm) += interval;
900 #endif
901 #endif
903 if (is_idle_task(vcpu->domain)) {
904 printf("****** vcpu_set_next_timer called during idle!!\n");
905 }
906 //s = PSCB(vcpu,xen_itm);
907 if (d && (d > now) && (d < s)) {
908 vcpu_safe_set_itm(d);
909 //using_domain_as_itm++;
910 }
911 else {
912 vcpu_safe_set_itm(s);
913 //using_xen_as_itm++;
914 }
915 }
917 // parameter is a time interval specified in cycles
918 void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
919 {
920 PSCB(vcpu,xen_timer_interval) = cycles;
921 vcpu_set_next_timer(vcpu);
922 printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
923 PSCB(vcpu,xen_timer_interval));
924 __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
925 }
927 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
928 {
929 UINT now = ia64_get_itc();
931 //if (val < now) val = now + 1000;
932 //printf("*** vcpu_set_itm: called with %lx\n",val);
933 PSCB(vcpu,domain_itm) = val;
934 vcpu_set_next_timer(vcpu);
935 return (IA64_NO_FAULT);
936 }
938 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
939 {
941 UINT64 oldnow = ia64_get_itc();
942 UINT64 olditm = PSCB(vcpu,domain_itm);
943 unsigned long d = olditm - oldnow;
944 unsigned long x = local_cpu_data->itm_next - oldnow;
946 UINT64 newnow = val, min_delta;
948 local_irq_disable();
949 if (olditm) {
950 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
951 PSCB(vcpu,domain_itm) = newnow + d;
952 }
953 local_cpu_data->itm_next = newnow + x;
954 d = PSCB(vcpu,domain_itm);
955 x = local_cpu_data->itm_next;
957 ia64_set_itc(newnow);
958 if (d && (d > newnow) && (d < x)) {
959 vcpu_safe_set_itm(d);
960 //using_domain_as_itm++;
961 }
962 else {
963 vcpu_safe_set_itm(x);
964 //using_xen_as_itm++;
965 }
966 local_irq_enable();
967 return (IA64_NO_FAULT);
968 }
970 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
971 {
972 //FIXME: Implement this
973 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
974 return (IA64_NO_FAULT);
975 //return (IA64_ILLOP_FAULT);
976 }
978 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
979 {
980 //TODO: Implement this
981 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
982 return (IA64_ILLOP_FAULT);
983 }
985 void vcpu_pend_timer(VCPU *vcpu)
986 {
987 UINT64 itv = PSCB(vcpu,itv) & 0xff;
989 if (vcpu_timer_disabled(vcpu)) return;
990 //if (vcpu_timer_inservice(vcpu)) return;
991 if (PSCB(vcpu,domain_itm_last) == PSCB(vcpu,domain_itm)) {
992 // already delivered an interrupt for this so
993 // don't deliver another
994 return;
995 }
996 #if 0
997 // attempt to flag "timer tick before its due" source
998 {
999 UINT64 itm = PSCB(vcpu,domain_itm);
1000 UINT64 now = ia64_get_itc();
1001 if (now < itm) printf("******* vcpu_pend_timer: pending before due!\n");
1003 #endif
1004 vcpu_pend_interrupt(vcpu, itv);
1007 // returns true if ready to deliver a timer interrupt too early
1008 UINT64 vcpu_timer_pending_early(VCPU *vcpu)
1010 UINT64 now = ia64_get_itc();
1011 UINT64 itm = PSCB(vcpu,domain_itm);
1013 if (vcpu_timer_disabled(vcpu)) return 0;
1014 if (!itm) return 0;
1015 return (vcpu_deliverable_timer(vcpu) && (now < itm));
1018 //FIXME: This is a hack because everything dies if a timer tick is lost
1019 void vcpu_poke_timer(VCPU *vcpu)
1021 UINT64 itv = PSCB(vcpu,itv) & 0xff;
1022 UINT64 now = ia64_get_itc();
1023 UINT64 itm = PSCB(vcpu,domain_itm);
1024 UINT64 irr;
1026 if (vcpu_timer_disabled(vcpu)) return;
1027 if (!itm) return;
1028 if (itv != 0xefL) {
1029 printf("vcpu_poke_timer: unimplemented itv=%lx!\n",itv);
1030 while(1);
1032 // using 0xef instead of itv so can get real irr
1033 if (now > itm && !test_bit(0xefL, PSCB(vcpu,insvc))) {
1034 if (!test_bit(0xefL,PSCB(vcpu,irr))) {
1035 irr = ia64_getreg(_IA64_REG_CR_IRR3);
1036 if (irr & (1L<<(0xef-0xc0))) return;
1037 if (now-itm>0x800000)
1038 printf("*** poking timer: now=%lx,vitm=%lx,xitm=%lx,itm=%lx\n",now,itm,local_cpu_data->itm_next,ia64_get_itm());
1039 vcpu_pend_timer(vcpu);
1045 /**************************************************************************
1046 Privileged operation emulation routines
1047 **************************************************************************/
1049 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
1051 PSCB(vcpu,ifa) = ifa; // privop traps don't set ifa so do it here
1052 return (IA64_DATA_TLB_VECTOR | IA64_FORCED_IFA);
1056 IA64FAULT vcpu_rfi(VCPU *vcpu)
1058 // TODO: Only allowed for current vcpu
1059 PSR psr;
1060 UINT64 int_enable, regspsr = 0;
1061 UINT64 ifs;
1062 REGS *regs = vcpu_regs(vcpu);
1063 extern void dorfirfi(void);
1065 psr.i64 = PSCB(vcpu,ipsr);
1066 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
1067 if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
1068 int_enable = psr.ia64_psr.i;
1069 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
1070 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
1071 else vcpu_set_metaphysical_mode(vcpu,TRUE);
1072 psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
1073 psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
1074 psr.ia64_psr.bn = 1;
1075 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
1076 if (psr.ia64_psr.be) {
1077 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
1078 return (IA64_ILLOP_FAULT);
1080 PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
1081 ifs = PSCB(vcpu,ifs);
1082 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1083 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1084 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
1085 #define SI_OFS(x) ((char *)(&PSCB(vcpu,x)) - (char *)(vcpu->vcpu_info))
1086 if (SI_OFS(iip)!=0x10 || SI_OFS(ipsr)!=0x08 || SI_OFS(ifs)!=0x18) {
1087 printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
1088 printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs));
1089 while(1);
1091 // TODO: validate PSCB(vcpu,iip)
1092 // TODO: PSCB(vcpu,ipsr) = psr;
1093 PSCB(vcpu,ipsr) = psr.i64;
1094 // now set up the trampoline
1095 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
1096 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
1097 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
1099 else {
1100 regs->cr_ipsr = psr.i64;
1101 regs->cr_iip = PSCB(vcpu,iip);
1103 PSCB(vcpu,interrupt_collection_enabled) = 1;
1104 vcpu_bsw1(vcpu);
1105 PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
1106 return (IA64_NO_FAULT);
1109 IA64FAULT vcpu_cover(VCPU *vcpu)
1111 // TODO: Only allowed for current vcpu
1112 REGS *regs = vcpu_regs(vcpu);
1114 if (!PSCB(vcpu,interrupt_collection_enabled)) {
1115 if (!PSCB(vcpu,incomplete_regframe))
1116 PSCB(vcpu,ifs) = regs->cr_ifs;
1117 else PSCB(vcpu,incomplete_regframe) = 0;
1119 regs->cr_ifs = 0;
1120 return (IA64_NO_FAULT);
1123 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
1125 extern unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr);
1126 UINT64 pta = PSCB(vcpu,pta);
1127 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1128 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
1129 UINT64 Mask = (1L << pta_sz) - 1;
1130 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1131 UINT64 compMask_60_15 = ~Mask_60_15;
1132 //UINT64 rr_ps = RR_TO_PS(get_rr(vadr));
1133 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
1134 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
1135 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
1136 UINT64 VHPT_addr2a =
1137 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1138 UINT64 VHPT_addr2b =
1139 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;;
1140 UINT64 VHPT_addr3 = VHPT_offset & 0x3fff;
1141 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1142 VHPT_addr3;
1144 #if 0
1145 if (VHPT_addr1 == 0xe000000000000000L) {
1146 printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
1147 PSCB(vcpu,iip));
1148 return (IA64_ILLOP_FAULT);
1150 #endif
1151 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1152 *pval = VHPT_addr;
1153 return (IA64_NO_FAULT);
1156 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1158 printf("vcpu_ttag: ttag instruction unsupported\n");
1159 return (IA64_ILLOP_FAULT);
1162 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1164 extern TR_ENTRY *match_tr(VCPU *,UINT64);
1165 unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *, unsigned long *);
1166 TR_ENTRY *trp;
1167 UINT64 mask, pteval, mp_pte, ps;
1169 extern unsigned long privop_trace;
1170 if (pteval = match_dtlb(vcpu, vadr, &ps, &mp_pte) && (mp_pte != -1UL)) {
1171 mask = (1L << ps) - 1;
1172 *padr = ((mp_pte & _PAGE_PPN_MASK) & ~mask) | (vadr & mask);
1173 verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
1174 return (IA64_NO_FAULT);
1176 if (trp=match_tr(current,vadr)) {
1177 mask = (1L << trp->ps) - 1;
1178 *padr = ((trp->ppn << 12) & ~mask) | (vadr & mask);
1179 verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
1180 return (IA64_NO_FAULT);
1182 verbose("vcpu_tpa addr=%p, @%p, forcing data miss\n",vadr,PSCB(vcpu,iip));
1183 return vcpu_force_data_miss(vcpu, vadr);
1186 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
1188 printf("vcpu_tak: tak instruction unsupported\n");
1189 return (IA64_ILLOP_FAULT);
1190 // HACK ALERT: tak does a thash for now
1191 //return vcpu_thash(vcpu,vadr,key);
1194 /**************************************************************************
1195 VCPU debug breakpoint register access routines
1196 **************************************************************************/
1198 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
1200 // TODO: unimplemented DBRs return a reserved register fault
1201 // TODO: Should set Logical CPU state, not just physical
1202 ia64_set_dbr(reg,val);
1203 return (IA64_NO_FAULT);
1206 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
1208 // TODO: unimplemented IBRs return a reserved register fault
1209 // TODO: Should set Logical CPU state, not just physical
1210 ia64_set_ibr(reg,val);
1211 return (IA64_NO_FAULT);
1214 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1216 // TODO: unimplemented DBRs return a reserved register fault
1217 UINT64 val = ia64_get_dbr(reg);
1218 *pval = val;
1219 return (IA64_NO_FAULT);
1222 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1224 // TODO: unimplemented IBRs return a reserved register fault
1225 UINT64 val = ia64_get_ibr(reg);
1226 *pval = val;
1227 return (IA64_NO_FAULT);
1230 /**************************************************************************
1231 VCPU performance monitor register access routines
1232 **************************************************************************/
1234 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
1236 // TODO: Should set Logical CPU state, not just physical
1237 // NOTE: Writes to unimplemented PMC registers are discarded
1238 ia64_set_pmc(reg,val);
1239 return (IA64_NO_FAULT);
1242 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
1244 // TODO: Should set Logical CPU state, not just physical
1245 // NOTE: Writes to unimplemented PMD registers are discarded
1246 ia64_set_pmd(reg,val);
1247 return (IA64_NO_FAULT);
1250 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1252 // NOTE: Reads from unimplemented PMC registers return zero
1253 UINT64 val = (UINT64)ia64_get_pmc(reg);
1254 *pval = val;
1255 return (IA64_NO_FAULT);
1258 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1260 // NOTE: Reads from unimplemented PMD registers return zero
1261 UINT64 val = (UINT64)ia64_get_pmd(reg);
1262 *pval = val;
1263 return (IA64_NO_FAULT);
1266 /**************************************************************************
1267 VCPU banked general register access routines
1268 **************************************************************************/
1270 IA64FAULT vcpu_bsw0(VCPU *vcpu)
1272 // TODO: Only allowed for current vcpu
1273 REGS *regs = vcpu_regs(vcpu);
1274 unsigned long *r = &regs->r16;
1275 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1276 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1277 int i;
1279 if (PSCB(vcpu,banknum)) {
1280 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1281 PSCB(vcpu,banknum) = 0;
1283 return (IA64_NO_FAULT);
1286 IA64FAULT vcpu_bsw1(VCPU *vcpu)
1288 // TODO: Only allowed for current vcpu
1289 REGS *regs = vcpu_regs(vcpu);
1290 unsigned long *r = &regs->r16;
1291 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1292 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1293 int i;
1295 if (!PSCB(vcpu,banknum)) {
1296 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1297 PSCB(vcpu,banknum) = 1;
1299 return (IA64_NO_FAULT);
1302 /**************************************************************************
1303 VCPU cpuid access routines
1304 **************************************************************************/
1307 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1309 // FIXME: This could get called as a result of a rsvd-reg fault
1310 // if reg > 3
1311 switch(reg) {
1312 case 0:
1313 case 1:
1314 memcpy(pval,"Xen/ia64",8);
1315 break;
1316 case 2:
1317 *pval = 0;
1318 break;
1319 case 3:
1320 *pval = 0; //FIXME: See vol1, 3.1.11
1321 break;
1322 case 4:
1323 *pval = 1; //FIXME: See vol1, 3.1.11
1324 break;
1325 default:
1326 *pval = 0; //FIXME: See vol1, 3.1.11
1327 break;
1329 return (IA64_NO_FAULT);
1332 /**************************************************************************
1333 VCPU region register access routines
1334 **************************************************************************/
1336 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
1339 ia64_rr rr;
1341 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1342 return(rr.ve);
1346 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
1349 ia64_rr rr;
1351 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1352 return(rr.ps);
1356 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
1359 ia64_rr rr;
1361 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1362 return(rr.rid);
1366 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
1368 extern void set_one_rr(UINT64, UINT64);
1369 PSCB(vcpu,rrs)[reg>>61] = val;
1370 // warning: set_one_rr() does it "live"
1371 set_one_rr(reg,val);
1372 return (IA64_NO_FAULT);
1375 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1377 UINT val = PSCB(vcpu,rrs)[reg>>61];
1378 *pval = val;
1379 return (IA64_NO_FAULT);
1382 /**************************************************************************
1383 VCPU protection key register access routines
1384 **************************************************************************/
1386 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1388 #ifndef PKR_USE_FIXED
1389 printk("vcpu_get_pkr: called, not implemented yet\n");
1390 return IA64_ILLOP_FAULT;
1391 #else
1392 UINT64 val = (UINT64)ia64_get_pkr(reg);
1393 *pval = val;
1394 return (IA64_NO_FAULT);
1395 #endif
1398 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
1400 #ifndef PKR_USE_FIXED
1401 printk("vcpu_set_pkr: called, not implemented yet\n");
1402 return IA64_ILLOP_FAULT;
1403 #else
1404 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
1405 vcpu->pkrs[reg] = val;
1406 ia64_set_pkr(reg,val);
1407 return (IA64_NO_FAULT);
1408 #endif
1411 /**************************************************************************
1412 VCPU translation register access routines
1413 **************************************************************************/
1415 static void vcpu_purge_tr_entry(TR_ENTRY *trp)
1417 trp->p = 0;
1420 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
1422 UINT64 ps;
1424 trp->itir = itir;
1425 trp->rid = virtualize_rid(current, get_rr(ifa) & RR_RID_MASK);
1426 trp->p = 1;
1427 ps = trp->ps;
1428 trp->page_flags = pte;
1429 if (trp->pl < 2) trp->pl = 2;
1430 trp->vadr = ifa & ~0xfff;
1431 if (ps > 12) { // "ignore" relevant low-order bits
1432 trp->ppn &= ~((1UL<<(ps-12))-1);
1433 trp->vadr &= ~((1UL<<ps)-1);
1437 TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu, TR_ENTRY *trp, UINT64 ifa, int count)
1439 unsigned long rid = (get_rr(ifa) & RR_RID_MASK);
1440 int i;
1442 for (i = 0; i < count; i++, trp++) {
1443 if (!trp->p) continue;
1444 if (physicalize_rid(vcpu,trp->rid) != rid) continue;
1445 if (ifa < trp->vadr) continue;
1446 if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
1447 //if (trp->key && !match_pkr(vcpu,trp->key)) continue;
1448 return trp;
1450 return 0;
1453 TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa)
1455 TR_ENTRY *trp;
1457 trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.dtrs,ifa,NDTRS);
1458 if (trp) return trp;
1459 trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.itrs,ifa,NITRS);
1460 if (trp) return trp;
1461 return 0;
1464 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
1465 UINT64 itir, UINT64 ifa)
1467 TR_ENTRY *trp;
1469 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
1470 trp = &PSCB(vcpu,dtrs[slot]);
1471 vcpu_set_tr_entry(trp,pte,itir,ifa);
1472 return IA64_NO_FAULT;
1475 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
1476 UINT64 itir, UINT64 ifa)
1478 TR_ENTRY *trp;
1480 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
1481 trp = &PSCB(vcpu,itrs[slot]);
1482 vcpu_set_tr_entry(trp,pte,itir,ifa);
1483 return IA64_NO_FAULT;
1486 /**************************************************************************
1487 VCPU translation cache access routines
1488 **************************************************************************/
1490 void foobar(void) { /*vcpu_verbose = 1;*/ }
1492 extern struct domain *dom0;
1494 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 mp_pte, UINT64 logps)
1496 unsigned long psr;
1497 unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
1499 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1500 // FIXME, must be inlined or potential for nested fault here!
1501 if ((vcpu->domain==dom0) && (logps < PAGE_SHIFT)) {
1502 printf("vcpu_itc_no_srlz: domain0 use of smaller page size!\n");
1503 //FIXME: kill domain here
1504 while(1);
1506 psr = ia64_clear_ic();
1507 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
1508 ia64_set_psr(psr);
1509 // ia64_srlz_i(); // no srls req'd, will rfi later
1510 #ifdef VHPT_GLOBAL
1511 if (vcpu->domain==dom0 && ((vaddr >> 61) == 7)) {
1512 // FIXME: this is dangerous... vhpt_flush_address ensures these
1513 // addresses never get flushed. More work needed if this
1514 // ever happens.
1515 //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
1516 vhpt_insert(vaddr,pte,logps<<2);
1518 // even if domain pagesize is larger than PAGE_SIZE, just put
1519 // PAGE_SIZE mapping in the vhpt for now, else purging is complicated
1520 else vhpt_insert(vaddr,pte,PAGE_SHIFT<<2);
1521 #endif
1522 if (IorD & 0x4) return; // don't place in 1-entry TLB
1523 if (IorD & 0x1) {
1524 vcpu_set_tr_entry(&PSCB(vcpu,itlb),pte,ps<<2,vaddr);
1525 PSCB(vcpu,itlb_pte) = mp_pte;
1527 if (IorD & 0x2) {
1528 vcpu_set_tr_entry(&PSCB(vcpu,dtlb),pte,ps<<2,vaddr);
1529 PSCB(vcpu,dtlb_pte) = mp_pte;
1533 // NOTE: returns a physical pte, NOT a "metaphysical" pte, so do not check
1534 // the physical address contained for correctness
1535 unsigned long match_dtlb(VCPU *vcpu, unsigned long ifa, unsigned long *ps, unsigned long *mp_pte)
1537 TR_ENTRY *trp;
1539 if (trp = vcpu_match_tr_entry(vcpu,&vcpu->vcpu_info->arch.dtlb,ifa,1)) {
1540 if (ps) *ps = trp->ps;
1541 if (mp_pte) *mp_pte = vcpu->vcpu_info->arch.dtlb_pte;
1542 return (trp->page_flags);
1544 return 0UL;
1547 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1549 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1550 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1552 if (((itir & ~0xfc) >> 2) < PAGE_SHIFT) {
1553 printf("vcpu_itc_d: domain trying to use smaller page size!\n");
1554 //FIXME: kill domain here
1555 while(1);
1557 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1558 pteval = translate_domain_pte(pte,ifa,itir);
1559 if (!pteval) return IA64_ILLOP_FAULT;
1560 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
1561 return IA64_NO_FAULT;
1564 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1566 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1567 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1569 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1570 if (((itir & ~0xfc) >> 2) < PAGE_SHIFT) {
1571 printf("vcpu_itc_i: domain trying to use smaller page size!\n");
1572 //FIXME: kill domain here
1573 while(1);
1575 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1576 pteval = translate_domain_pte(pte,ifa,itir);
1577 // FIXME: what to do if bad physical address? (machine check?)
1578 if (!pteval) return IA64_ILLOP_FAULT;
1579 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,pte,logps);
1580 return IA64_NO_FAULT;
1583 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1585 printk("vcpu_ptc_l: called, not implemented yet\n");
1586 return IA64_ILLOP_FAULT;
1589 // At privlvl=0, fc performs no access rights or protection key checks, while
1590 // at privlvl!=0, fc performs access rights checks as if it were a 1-byte
1591 // read but no protection key check. Thus in order to avoid an unexpected
1592 // access rights fault, we have to translate the virtual address to a
1593 // physical address (possibly via a metaphysical address) and do the fc
1594 // on the physical address, which is guaranteed to flush the same cache line
1595 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
1597 // TODO: Only allowed for current vcpu
1598 UINT64 mpaddr, ps;
1599 IA64FAULT fault;
1600 unsigned long match_dtlb(VCPU *, unsigned long, unsigned long *, unsigned long *);
1601 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
1602 unsigned long pteval, dom_imva;
1604 if (pteval = match_dtlb(vcpu, vadr, NULL, NULL)) {
1605 dom_imva = __va(pteval & _PFN_MASK);
1606 ia64_fc(dom_imva);
1607 return IA64_NO_FAULT;
1609 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
1610 if (fault == IA64_NO_FAULT) {
1611 struct domain *dom0;
1612 unsigned long dom0_start, dom0_size;
1613 if (vcpu == dom0) {
1614 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
1615 printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr);
1618 pteval = lookup_domain_mpa(vcpu->domain,mpaddr);
1619 if (pteval) {
1620 dom_imva = __va(pteval & _PFN_MASK);
1621 ia64_fc(dom_imva);
1623 else {
1624 REGS *regs = vcpu_regs(vcpu);
1625 printk("vcpu_fc: can't flush vadr=%p, iip=%p\n",
1626 vadr,regs->cr_iip);
1629 return fault;
1632 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
1635 // Note that this only needs to be called once, i.e. the
1636 // architected loop to purge the entire TLB, should use
1637 // base = stride1 = stride2 = 0, count0 = count 1 = 1
1639 // FIXME: When VHPT is in place, flush that too!
1640 #ifdef VHPT_GLOBAL
1641 vhpt_flush(); // FIXME: This is overdoing it
1642 #endif
1643 local_flush_tlb_all();
1644 // just invalidate the "whole" tlb
1645 vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
1646 vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
1647 return IA64_NO_FAULT;
1650 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1652 printk("vcpu_ptc_g: called, not implemented yet\n");
1653 return IA64_ILLOP_FAULT;
1656 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1658 extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
1659 // FIXME: validate not flushing Xen addresses
1660 // if (Xen address) return(IA64_ILLOP_FAULT);
1661 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
1662 #ifdef VHPT_GLOBAL
1663 vhpt_flush_address(vadr,addr_range);
1664 #endif
1665 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
1666 vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
1667 vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
1668 return IA64_NO_FAULT;
1671 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1673 printf("vcpu_ptr_d: Purging TLB is unsupported\n");
1674 return (IA64_ILLOP_FAULT);
1677 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1679 printf("vcpu_ptr_i: Purging TLB is unsupported\n");
1680 return (IA64_ILLOP_FAULT);
1683 void vcpu_set_regs(VCPU *vcpu, REGS *regs)
1685 vcpu->arch.regs = regs;