ia64/xen-unstable

view xen/arch/ia64/vcpu.c @ 3940:6a7bbb8b60f4

bitkeeper revision 1.1236.1.30 (42204c4csSlUlMNeJeX9_Mcy3_XPYA)

Merge http://xen-ia64.bkbits.net/xeno-unstable-ia64.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xeno-unstable-ia64.bk
author kaf24@scramble.cl.cam.ac.uk
date Sat Feb 26 10:15:40 2005 +0000 (2005-02-26)
parents 89cabb316ba2 4ce1aebf725f
children a6914c2c15cf dce709e1e050 f8026d38aa87
line source
1 /*
2 * Virtualized CPU functions
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <linux/sched.h>
10 #include <public/arch-ia64.h>
11 #include <asm/ia64_int.h>
12 #include <asm/vcpu.h>
13 #include <asm/regionreg.h>
14 #include <asm/tlb.h>
15 #include <asm/processor.h>
16 #include <asm/delay.h>
18 typedef union {
19 struct ia64_psr ia64_psr;
20 unsigned long i64;
21 } PSR;
23 //typedef struct pt_regs REGS;
24 //typedef struct domain VCPU;
26 // this def for vcpu_regs won't work if kernel stack is present
27 #define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
28 #define PSCB(x,y) x->vcpu_info->arch.y
30 #define TRUE 1
31 #define FALSE 0
32 #define IA64_PTA_SZ_BIT 2
33 #define IA64_PTA_VF_BIT 8
34 #define IA64_PTA_BASE_BIT 15
35 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
36 #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
38 #define STATIC
40 unsigned long vcpu_verbose = 0;
41 #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
43 /**************************************************************************
44 VCPU general register access routines
45 **************************************************************************/
47 UINT64
48 vcpu_get_gr(VCPU *vcpu, unsigned reg)
49 {
50 REGS *regs = vcpu_regs(vcpu);
51 UINT64 val;
53 if (!reg) return 0;
54 getreg(reg,&val,0,regs); // FIXME: handle NATs later
55 return val;
56 }
58 // returns:
59 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
60 // IA64_NO_FAULT otherwise
61 IA64FAULT
62 vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
63 {
64 REGS *regs = vcpu_regs(vcpu);
65 long sof = (regs->cr_ifs) & 0x7f;
67 if (!reg) return IA64_ILLOP_FAULT;
68 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
69 setreg(reg,value,0,regs); // FIXME: handle NATs later
70 return IA64_NO_FAULT;
71 }
73 /**************************************************************************
74 VCPU privileged application register access routines
75 **************************************************************************/
77 IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
78 {
79 if (reg == 44) return (vcpu_set_itc(vcpu,val));
80 if (reg == 27) return (IA64_ILLOP_FAULT);
81 if (reg > 7) return (IA64_ILLOP_FAULT);
82 PSCB(vcpu,krs[reg]) = val;
83 #if 0
84 // for now, privify kr read's so all kr accesses are privileged
85 switch (reg) {
86 case 0: asm volatile ("mov ar.k0=%0" :: "r"(val)); break;
87 case 1: asm volatile ("mov ar.k1=%0" :: "r"(val)); break;
88 case 2: asm volatile ("mov ar.k2=%0" :: "r"(val)); break;
89 case 3: asm volatile ("mov ar.k3=%0" :: "r"(val)); break;
90 case 4: asm volatile ("mov ar.k4=%0" :: "r"(val)); break;
91 case 5: asm volatile ("mov ar.k5=%0" :: "r"(val)); break;
92 case 6: asm volatile ("mov ar.k6=%0" :: "r"(val)); break;
93 case 7: asm volatile ("mov ar.k7=%0" :: "r"(val)); break;
94 case 27: asm volatile ("mov ar.cflg=%0" :: "r"(val)); break;
95 }
96 #endif
97 return IA64_NO_FAULT;
98 }
100 IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
101 {
102 if (reg > 7) return (IA64_ILLOP_FAULT);
103 *val = PSCB(vcpu,krs[reg]);
104 return IA64_NO_FAULT;
105 }
107 /**************************************************************************
108 VCPU processor status register access routines
109 **************************************************************************/
111 void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
112 {
113 /* only do something if mode changes */
114 if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
115 if (newmode) set_metaphysical_rr(0,vcpu->domain->metaphysical_rid);
116 else if (PSCB(vcpu,rrs[0]) != -1)
117 set_one_rr(0, PSCB(vcpu,rrs[0]));
118 PSCB(vcpu,metaphysical_mode) = newmode;
119 }
120 }
122 IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
123 {
124 struct ia64_psr psr, imm, *ipsr;
125 REGS *regs = vcpu_regs(vcpu);
127 // TODO: All of these bits need to be virtualized
128 // TODO: Only allowed for current vcpu
129 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
130 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
131 imm = *(struct ia64_psr *)&imm24;
132 // interrupt flag
133 if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
134 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
135 // interrupt collection flag
136 //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
137 // just handle psr.up and psr.pp for now
138 if (imm24 & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
139 | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
140 | IA64_PSR_DFL | IA64_PSR_DFH))
141 return (IA64_ILLOP_FAULT);
142 if (imm.dfh) ipsr->dfh = 0;
143 if (imm.dfl) ipsr->dfl = 0;
144 if (imm.pp) { ipsr->pp = 0; psr.pp = 0; }
145 if (imm.up) { ipsr->up = 0; psr.up = 0; }
146 if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
147 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
148 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
149 return IA64_NO_FAULT;
150 }
152 extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
153 #define SPURIOUS_VECTOR 0xf
155 IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
156 {
157 struct ia64_psr psr, imm, *ipsr;
158 REGS *regs = vcpu_regs(vcpu);
159 UINT64 mask, enabling_interrupts = 0;
161 // TODO: All of these bits need to be virtualized
162 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
163 imm = *(struct ia64_psr *)&imm24;
164 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
165 // just handle psr.sp,pp and psr.i,ic (and user mask) for now
166 mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
167 IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
168 if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
169 if (imm.dfh) ipsr->dfh = 1;
170 if (imm.dfl) ipsr->dfl = 1;
171 if (imm.pp) { ipsr->pp = 1; psr.pp = 1; }
172 if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
173 if (imm.i) {
174 if (!PSCB(vcpu,interrupt_delivery_enabled)) {
175 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
176 enabling_interrupts = 1;
177 }
178 PSCB(vcpu,interrupt_delivery_enabled) = 1;
179 }
180 if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
181 // TODO: do this faster
182 if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
183 if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
184 if (imm.up) { ipsr->up = 1; psr.up = 1; }
185 if (imm.be) {
186 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
187 return (IA64_ILLOP_FAULT);
188 }
189 if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
190 __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
191 #if 0 // now done with deliver_pending_interrupts
192 if (enabling_interrupts) {
193 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) {
194 //printf("with interrupts pending\n");
195 return IA64_EXTINT_VECTOR;
196 }
197 //else printf("but nothing pending\n");
198 }
199 #endif
200 return IA64_NO_FAULT;
201 }
203 IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
204 {
205 struct ia64_psr psr, newpsr, *ipsr;
206 REGS *regs = vcpu_regs(vcpu);
207 UINT64 enabling_interrupts = 0;
209 // TODO: All of these bits need to be virtualized
210 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
211 newpsr = *(struct ia64_psr *)&val;
212 ipsr = (struct ia64_psr *)&regs->cr_ipsr;
213 // just handle psr.up and psr.pp for now
214 //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
215 // however trying to set other bits can't be an error as it is in ssm
216 if (newpsr.dfh) ipsr->dfh = 1;
217 if (newpsr.dfl) ipsr->dfl = 1;
218 if (newpsr.pp) { ipsr->pp = 1; psr.pp = 1; }
219 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
220 if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
221 if (newpsr.i) {
222 if (!PSCB(vcpu,interrupt_delivery_enabled))
223 enabling_interrupts = 1;
224 PSCB(vcpu,interrupt_delivery_enabled) = 1;
225 }
226 if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
227 if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
228 if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
229 if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
230 if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
231 else vcpu_set_metaphysical_mode(vcpu,TRUE);
232 if (newpsr.be) {
233 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
234 return (IA64_ILLOP_FAULT);
235 }
236 //__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
237 #if 0 // now done with deliver_pending_interrupts
238 if (enabling_interrupts) {
239 if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
240 return IA64_EXTINT_VECTOR;
241 }
242 #endif
243 return IA64_NO_FAULT;
244 }
246 IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
247 {
248 UINT64 psr;
249 struct ia64_psr newpsr;
251 // TODO: This needs to return a "filtered" view of
252 // the psr, not the actual psr. Probably the psr needs
253 // to be a field in regs (in addition to ipsr).
254 __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
255 newpsr = *(struct ia64_psr *)&psr;
256 if (newpsr.cpl == 2) newpsr.cpl = 0;
257 if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
258 else newpsr.i = 0;
259 if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
260 else newpsr.ic = 0;
261 *pval = *(unsigned long *)&newpsr;
262 return IA64_NO_FAULT;
263 }
265 BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
266 {
267 return !!PSCB(vcpu,interrupt_collection_enabled);
268 }
270 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
271 {
272 return !!PSCB(vcpu,interrupt_delivery_enabled);
273 }
275 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
276 {
277 UINT64 dcr = PSCB(vcpu,dcr);
278 PSR psr = {0};
280 //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
281 psr.i64 = prevpsr;
282 psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
283 psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
284 psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
285 psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
286 psr.ia64_psr.bn = PSCB(vcpu,banknum);
287 psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
288 if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
289 // psr.pk = 1;
290 //printf("returns 0x%016lx...",psr.i64);
291 return psr.i64;
292 }
294 /**************************************************************************
295 VCPU control register access routines
296 **************************************************************************/
298 IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
299 {
300 extern unsigned long privop_trace;
301 //privop_trace=0;
302 //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
303 // Reads of cr.dcr on Xen always have the sign bit set, so
304 // a domain can differentiate whether it is running on SP or not
305 *pval = PSCB(vcpu,dcr) | 0x8000000000000000L;
306 return (IA64_NO_FAULT);
307 }
309 IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
310 {
311 *pval = PSCB(vcpu,iva) & ~0x7fffL;
312 return (IA64_NO_FAULT);
313 }
315 IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
316 {
317 *pval = PSCB(vcpu,pta);
318 return (IA64_NO_FAULT);
319 }
321 IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
322 {
323 //REGS *regs = vcpu_regs(vcpu);
324 //*pval = regs->cr_ipsr;
325 *pval = PSCB(vcpu,ipsr);
326 return (IA64_NO_FAULT);
327 }
329 IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
330 {
331 *pval = PSCB(vcpu,isr);
332 return (IA64_NO_FAULT);
333 }
335 IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
336 {
337 //REGS *regs = vcpu_regs(vcpu);
338 //*pval = regs->cr_iip;
339 *pval = PSCB(vcpu,iip);
340 return (IA64_NO_FAULT);
341 }
343 IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
344 {
345 UINT64 val = PSCB(vcpu,ifa);
346 *pval = val;
347 return (IA64_NO_FAULT);
348 }
351 unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
352 {
353 ia64_rr rr;
355 rr.rrval = 0;
356 rr.ps = vcpu_get_rr_ps(vcpu,ifa);
357 rr.rid = vcpu_get_rr_rid(vcpu,ifa);
358 return (rr.rrval);
359 }
362 IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
363 {
364 UINT64 val = PSCB(vcpu,itir);
365 *pval = val;
366 return (IA64_NO_FAULT);
367 }
369 IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
370 {
371 UINT64 val = PSCB(vcpu,iipa);
372 // SP entry code does not save iipa yet nor does it get
373 // properly delivered in the pscb
374 printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
375 *pval = val;
376 return (IA64_NO_FAULT);
377 }
379 IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
380 {
381 //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
382 //*pval = PSCB(vcpu,regs).cr_ifs;
383 *pval = PSCB(vcpu,ifs);
384 PSCB(vcpu,incomplete_regframe) = 0;
385 return (IA64_NO_FAULT);
386 }
388 IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
389 {
390 UINT64 val = PSCB(vcpu,iim);
391 *pval = val;
392 return (IA64_NO_FAULT);
393 }
395 IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
396 {
397 return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
398 }
400 IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
401 {
402 extern unsigned long privop_trace;
403 //privop_trace=1;
404 // Reads of cr.dcr on SP always have the sign bit set, so
405 // a domain can differentiate whether it is running on SP or not
406 // Thus, writes of DCR should ignore the sign bit
407 //verbose("vcpu_set_dcr: called\n");
408 PSCB(vcpu,dcr) = val & ~0x8000000000000000L;
409 return (IA64_NO_FAULT);
410 }
412 IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
413 {
414 PSCB(vcpu,iva) = val & ~0x7fffL;
415 return (IA64_NO_FAULT);
416 }
418 IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
419 {
420 if (val & IA64_PTA_LFMT) {
421 printf("*** No support for VHPT long format yet!!\n");
422 return (IA64_ILLOP_FAULT);
423 }
424 if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
425 if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
426 PSCB(vcpu,pta) = val;
427 return IA64_NO_FAULT;
428 }
430 IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
431 {
432 PSCB(vcpu,ipsr) = val;
433 return IA64_NO_FAULT;
434 }
436 IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
437 {
438 PSCB(vcpu,isr) = val;
439 return IA64_NO_FAULT;
440 }
442 IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
443 {
444 PSCB(vcpu,iip) = val;
445 return IA64_NO_FAULT;
446 }
448 IA64FAULT vcpu_increment_iip(VCPU *vcpu)
449 {
450 REGS *regs = vcpu_regs(vcpu);
451 struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
452 if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
453 else ipsr->ri++;
454 return (IA64_NO_FAULT);
455 }
457 IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
458 {
459 PSCB(vcpu,ifa) = val;
460 return IA64_NO_FAULT;
461 }
463 IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
464 {
465 PSCB(vcpu,itir) = val;
466 return IA64_NO_FAULT;
467 }
469 IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
470 {
471 // SP entry code does not save iipa yet nor does it get
472 // properly delivered in the pscb
473 printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
474 PSCB(vcpu,iipa) = val;
475 return IA64_NO_FAULT;
476 }
478 IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
479 {
480 //REGS *regs = vcpu_regs(vcpu);
481 PSCB(vcpu,ifs) = val;
482 return IA64_NO_FAULT;
483 }
485 IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
486 {
487 PSCB(vcpu,iim) = val;
488 return IA64_NO_FAULT;
489 }
491 IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
492 {
493 PSCB(vcpu,iha) = val;
494 return IA64_NO_FAULT;
495 }
497 /**************************************************************************
498 VCPU interrupt control register access routines
499 **************************************************************************/
501 void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
502 {
503 if (vector & ~0xff) {
504 printf("vcpu_pend_interrupt: bad vector\n");
505 return;
506 }
507 if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
508 if (test_bit(vector,PSCB(vcpu,irr))) {
509 //printf("vcpu_pend_interrupt: overrun\n");
510 }
511 set_bit(vector,PSCB(vcpu,irr));
512 }
514 #define IA64_TPR_MMI 0x10000
515 #define IA64_TPR_MIC 0x000f0
517 /* checks to see if a VCPU has any unmasked pending interrupts
518 * if so, returns the highest, else returns SPURIOUS_VECTOR */
519 /* NOTE: Since this gets called from vcpu_get_ivr() and the
520 * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
521 * this routine also ignores pscb.interrupt_delivery_enabled
522 * and this must be checked independently; see vcpu_deliverable interrupts() */
523 UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
524 {
525 UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
527 p = &PSCB(vcpu,irr[3]);
528 q = &PSCB(vcpu,delivery_mask[3]);
529 r = &PSCB(vcpu,insvc[3]);
530 for (i = 3; ; p--, q--, r--, i--) {
531 bits = *p & *q;
532 if (bits) break; // got a potential interrupt
533 if (*r) {
534 // nothing in this word which is pending+inservice
535 // but there is one inservice which masks lower
536 return SPURIOUS_VECTOR;
537 }
538 if (i == 0) {
539 // checked all bits... nothing pending+inservice
540 return SPURIOUS_VECTOR;
541 }
542 }
543 // have a pending,deliverable interrupt... see if it is masked
544 bitnum = ia64_fls(bits);
545 //printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
546 vector = bitnum+(i*64);
547 mask = 1L << bitnum;
548 //printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
549 if (*r >= mask) {
550 // masked by equal inservice
551 //printf("but masked by equal inservice\n");
552 return SPURIOUS_VECTOR;
553 }
554 if (PSCB(vcpu,tpr) & IA64_TPR_MMI) {
555 // tpr.mmi is set
556 //printf("but masked by tpr.mmi\n");
557 return SPURIOUS_VECTOR;
558 }
559 if (((PSCB(vcpu,tpr) & IA64_TPR_MIC) + 15) >= vector) {
560 //tpr.mic masks class
561 //printf("but masked by tpr.mic\n");
562 return SPURIOUS_VECTOR;
563 }
565 //printf("returned to caller\n");
566 return vector;
567 }
569 UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
570 {
571 return (vcpu_get_psr_i(vcpu) &&
572 vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
573 }
575 IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
576 {
577 extern unsigned long privop_trace;
578 //privop_trace=1;
579 //TODO: Implement this
580 printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
581 *pval = 0;
582 return IA64_NO_FAULT;
583 }
585 IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
586 {
587 int i;
588 UINT64 vector, mask;
589 #if 1
590 static char firstivr = 1;
591 static char firsttime[256];
592 if (firstivr) {
593 int i;
594 for (i=0;i<256;i++) firsttime[i]=1;
595 firstivr=0;
596 }
597 #endif
599 vector = vcpu_check_pending_interrupts(vcpu);
600 if (vector == SPURIOUS_VECTOR) {
601 PSCB(vcpu,pending_interruption) = 0;
602 *pval = vector;
603 return IA64_NO_FAULT;
604 }
605 // now have an unmasked, pending, deliverable vector!
606 // getting ivr has "side effects"
607 #if 0
608 if (firsttime[vector]) {
609 printf("*** First get_ivr on vector=%d,itc=%lx\n",
610 vector,ia64_get_itc());
611 firsttime[vector]=0;
612 }
613 #endif
614 i = vector >> 6;
615 mask = 1L << (vector & 0x3f);
616 //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
617 PSCB(vcpu,insvc[i]) |= mask;
618 PSCB(vcpu,irr[i]) &= ~mask;
619 PSCB(vcpu,pending_interruption)--;
620 *pval = vector;
621 return IA64_NO_FAULT;
622 }
624 IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
625 {
626 *pval = PSCB(vcpu,tpr);
627 return (IA64_NO_FAULT);
628 }
630 IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
631 {
632 *pval = 0L; // reads of eoi always return 0
633 return (IA64_NO_FAULT);
634 }
636 IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
637 {
638 #ifndef IRR_USE_FIXED
639 printk("vcpu_get_irr: called, not implemented yet\n");
640 return IA64_ILLOP_FAULT;
641 #else
642 *pval = vcpu->irr[0];
643 return (IA64_NO_FAULT);
644 #endif
645 }
647 IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
648 {
649 #ifndef IRR_USE_FIXED
650 printk("vcpu_get_irr: called, not implemented yet\n");
651 return IA64_ILLOP_FAULT;
652 #else
653 *pval = vcpu->irr[1];
654 return (IA64_NO_FAULT);
655 #endif
656 }
658 IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
659 {
660 #ifndef IRR_USE_FIXED
661 printk("vcpu_get_irr: called, not implemented yet\n");
662 return IA64_ILLOP_FAULT;
663 #else
664 *pval = vcpu->irr[2];
665 return (IA64_NO_FAULT);
666 #endif
667 }
669 IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
670 {
671 #ifndef IRR_USE_FIXED
672 printk("vcpu_get_irr: called, not implemented yet\n");
673 return IA64_ILLOP_FAULT;
674 #else
675 *pval = vcpu->irr[3];
676 return (IA64_NO_FAULT);
677 #endif
678 }
680 IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
681 {
682 *pval = PSCB(vcpu,itv);
683 return (IA64_NO_FAULT);
684 }
686 IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
687 {
688 *pval = PSCB(vcpu,pmv);
689 return (IA64_NO_FAULT);
690 }
692 IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
693 {
694 *pval = PSCB(vcpu,cmcv);
695 return (IA64_NO_FAULT);
696 }
698 IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
699 {
700 // fix this when setting values other than m-bit is supported
701 printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
702 *pval = (1L << 16);
703 return (IA64_NO_FAULT);
704 }
706 IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
707 {
708 // fix this when setting values other than m-bit is supported
709 printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
710 *pval = (1L << 16);
711 return (IA64_NO_FAULT);
712 }
714 IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
715 {
716 printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
717 return (IA64_ILLOP_FAULT);
718 }
720 IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
721 {
722 if (val & 0xff00) return IA64_RSVDREG_FAULT;
723 PSCB(vcpu,tpr) = val;
724 return (IA64_NO_FAULT);
725 }
727 IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
728 {
729 UINT64 *p, bits, vec, bitnum;
730 int i;
732 p = &PSCB(vcpu,insvc[3]);
733 for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
734 if (i < 0) {
735 printf("Trying to EOI interrupt when none are in-service.\r\n");
736 return;
737 }
738 bitnum = ia64_fls(bits);
739 vec = bitnum + (i*64);
740 /* clear the correct bit */
741 bits &= ~(1L << bitnum);
742 *p = bits;
743 /* clearing an eoi bit may unmask another pending interrupt... */
744 if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
745 // worry about this later... Linux only calls eoi
746 // with interrupts disabled
747 printf("Trying to EOI interrupt with interrupts enabled\r\n");
748 }
749 //printf("YYYYY vcpu_set_eoi: Successful\n");
750 return (IA64_NO_FAULT);
751 }
753 IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
754 {
755 if (!(val & (1L << 16))) {
756 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
757 return (IA64_ILLOP_FAULT);
758 }
759 // no place to save this state but nothing to do anyway
760 return (IA64_NO_FAULT);
761 }
763 IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
764 {
765 if (!(val & (1L << 16))) {
766 printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
767 return (IA64_ILLOP_FAULT);
768 }
769 // no place to save this state but nothing to do anyway
770 return (IA64_NO_FAULT);
771 }
774 IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
775 {
776 extern unsigned long privop_trace;
777 //privop_trace=1;
778 if (val & 0xef00) return (IA64_ILLOP_FAULT);
779 PSCB(vcpu,itv) = val;
780 if (val & 0x10000) {
781 printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu,domain_itm));
782 PSCB(vcpu,domain_itm) = 0;
783 }
784 else vcpu_enable_timer(vcpu,1000000L);
785 return (IA64_NO_FAULT);
786 }
788 IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
789 {
790 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
791 PSCB(vcpu,pmv) = val;
792 return (IA64_NO_FAULT);
793 }
795 IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
796 {
797 if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
798 PSCB(vcpu,cmcv) = val;
799 return (IA64_NO_FAULT);
800 }
802 /**************************************************************************
803 Interval timer routines
804 **************************************************************************/
806 BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
807 {
808 UINT64 itv = PSCB(vcpu,itv);
809 return(!itv || !!(itv & 0x10000));
810 }
812 BOOLEAN vcpu_timer_expired(VCPU *vcpu)
813 {
814 unsigned long domain_itm = PSCB(vcpu,domain_itm);
815 unsigned long now = ia64_get_itc();
817 if (domain_itm && (now > domain_itm) &&
818 !vcpu_timer_disabled(vcpu)) return TRUE;
819 return FALSE;
820 }
822 void vcpu_safe_set_itm(unsigned long val)
823 {
824 unsigned long epsilon = 100;
825 UINT64 now = ia64_get_itc();
827 local_irq_disable();
828 while (1) {
829 //printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
830 ia64_set_itm(val);
831 if (val > (now = ia64_get_itc())) break;
832 val = now + epsilon;
833 epsilon <<= 1;
834 }
835 local_irq_enable();
836 }
838 void vcpu_set_next_timer(VCPU *vcpu)
839 {
840 UINT64 d = PSCB(vcpu,domain_itm);
841 //UINT64 s = PSCB(vcpu,xen_itm);
842 UINT64 s = local_cpu_data->itm_next;
843 UINT64 now = ia64_get_itc();
844 //UINT64 interval = PSCB(vcpu,xen_timer_interval);
846 /* gloss over the wraparound problem for now... we know it exists
847 * but it doesn't matter right now */
849 #if 0
850 /* ensure at least next SP tick is in the future */
851 if (!interval) PSCB(vcpu,xen_itm) = now +
852 #if 0
853 (running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
854 DEFAULT_CLOCK_RATE);
855 #else
856 3000000;
857 //printf("vcpu_set_next_timer: HACK!\n");
858 #endif
859 #if 0
860 if (PSCB(vcpu,xen_itm) < now)
861 while (PSCB(vcpu,xen_itm) < now + (interval>>1))
862 PSCB(vcpu,xen_itm) += interval;
863 #endif
864 #endif
866 if (is_idle_task(vcpu->domain)) {
867 printf("****** vcpu_set_next_timer called during idle!!\n");
868 }
869 //s = PSCB(vcpu,xen_itm);
870 if (d && (d > now) && (d < s)) {
871 vcpu_safe_set_itm(d);
872 //using_domain_as_itm++;
873 }
874 else {
875 vcpu_safe_set_itm(s);
876 //using_xen_as_itm++;
877 }
878 }
880 // parameter is a time interval specified in cycles
881 void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
882 {
883 PSCB(vcpu,xen_timer_interval) = cycles;
884 vcpu_set_next_timer(vcpu);
885 printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
886 PSCB(vcpu,xen_timer_interval));
887 __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
888 }
890 IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
891 {
892 UINT now = ia64_get_itc();
894 //if (val < now) val = now + 1000;
895 //printf("*** vcpu_set_itm: called with %lx\n",val);
896 PSCB(vcpu,domain_itm) = val;
897 vcpu_set_next_timer(vcpu);
898 return (IA64_NO_FAULT);
899 }
901 IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
902 {
904 UINT64 oldnow = ia64_get_itc();
905 UINT64 olditm = PSCB(vcpu,domain_itm);
906 unsigned long d = olditm - oldnow;
907 unsigned long x = local_cpu_data->itm_next - oldnow;
909 UINT64 newnow = val, min_delta;
911 local_irq_disable();
912 if (olditm) {
913 printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
914 PSCB(vcpu,domain_itm) = newnow + d;
915 }
916 local_cpu_data->itm_next = newnow + x;
917 d = PSCB(vcpu,domain_itm);
918 x = local_cpu_data->itm_next;
920 ia64_set_itc(newnow);
921 if (d && (d > newnow) && (d < x)) {
922 vcpu_safe_set_itm(d);
923 //using_domain_as_itm++;
924 }
925 else {
926 vcpu_safe_set_itm(x);
927 //using_xen_as_itm++;
928 }
929 local_irq_enable();
930 return (IA64_NO_FAULT);
931 }
933 IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
934 {
935 //FIXME: Implement this
936 printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
937 return (IA64_NO_FAULT);
938 //return (IA64_ILLOP_FAULT);
939 }
941 IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
942 {
943 //TODO: Implement this
944 printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
945 return (IA64_ILLOP_FAULT);
946 }
948 void vcpu_pend_timer(VCPU *vcpu)
949 {
950 UINT64 itv = PSCB(vcpu,itv) & 0xff;
952 if (vcpu_timer_disabled(vcpu)) return;
953 vcpu_pend_interrupt(vcpu, itv);
954 }
956 //FIXME: This is a hack because everything dies if a timer tick is lost
957 void vcpu_poke_timer(VCPU *vcpu)
958 {
959 UINT64 itv = PSCB(vcpu,itv) & 0xff;
960 UINT64 now = ia64_get_itc();
961 UINT64 itm = PSCB(vcpu,domain_itm);
962 UINT64 irr;
964 if (vcpu_timer_disabled(vcpu)) return;
965 if (!itm) return;
966 if (itv != 0xefL) {
967 printf("vcpu_poke_timer: unimplemented itv=%lx!\n",itv);
968 while(1);
969 }
970 // using 0xef instead of itv so can get real irr
971 if (now > itm && !test_bit(0xefL, PSCB(vcpu,insvc))) {
972 if (!test_bit(0xefL,PSCB(vcpu,irr))) {
973 irr = ia64_getreg(_IA64_REG_CR_IRR3);
974 if (irr & (1L<<(0xef-0xc0))) return;
975 if (now-itm>0x800000)
976 printf("*** poking timer: now=%lx,vitm=%lx,xitm=%lx,itm=%lx\n",now,itm,local_cpu_data->itm_next,ia64_get_itm());
977 vcpu_pend_interrupt(vcpu, 0xefL);
978 }
979 }
980 }
983 /**************************************************************************
984 Privileged operation emulation routines
985 **************************************************************************/
987 IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
988 {
989 PSCB(vcpu,ifa) = ifa; // privop traps don't set ifa so do it here
990 return (IA64_DATA_TLB_VECTOR | IA64_FORCED_IFA);
991 }
994 IA64FAULT vcpu_rfi(VCPU *vcpu)
995 {
996 // TODO: Only allowed for current vcpu
997 PSR psr;
998 UINT64 int_enable, regspsr = 0;
999 UINT64 ifs;
1000 REGS *regs = vcpu_regs(vcpu);
1001 extern void dorfirfi(void);
1003 psr.i64 = PSCB(vcpu,ipsr);
1004 if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
1005 if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
1006 int_enable = psr.ia64_psr.i;
1007 if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
1008 if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
1009 else vcpu_set_metaphysical_mode(vcpu,TRUE);
1010 psr.ia64_psr.ic = 1; psr.ia64_psr.i = 1;
1011 psr.ia64_psr.dt = 1; psr.ia64_psr.rt = 1; psr.ia64_psr.it = 1;
1012 psr.ia64_psr.bn = 1;
1013 //psr.pk = 1; // checking pkeys shouldn't be a problem but seems broken
1014 if (psr.ia64_psr.be) {
1015 printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
1016 return (IA64_ILLOP_FAULT);
1018 PSCB(vcpu,incomplete_regframe) = 0; // is this necessary?
1019 ifs = PSCB(vcpu,ifs);
1020 //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1021 //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
1022 if (ifs & regs->cr_ifs & 0x8000000000000000L) {
1023 #define SI_OFS(x) ((char *)(&PSCB(vcpu,x)) - (char *)(vcpu->vcpu_info))
1024 if (SI_OFS(iip)!=0x10 || SI_OFS(ipsr)!=0x08 || SI_OFS(ifs)!=0x18) {
1025 printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
1026 printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs));
1027 while(1);
1029 // TODO: validate PSCB(vcpu,iip)
1030 // TODO: PSCB(vcpu,ipsr) = psr;
1031 PSCB(vcpu,ipsr) = psr.i64;
1032 // now set up the trampoline
1033 regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
1034 __asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
1035 regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
1037 else {
1038 regs->cr_ipsr = psr.i64;
1039 regs->cr_iip = PSCB(vcpu,iip);
1041 PSCB(vcpu,interrupt_collection_enabled) = 1;
1042 vcpu_bsw1(vcpu);
1043 PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
1044 return (IA64_NO_FAULT);
1047 IA64FAULT vcpu_cover(VCPU *vcpu)
1049 REGS *regs = vcpu_regs(vcpu);
1051 if (!PSCB(vcpu,interrupt_collection_enabled)) {
1052 if (!PSCB(vcpu,incomplete_regframe))
1053 PSCB(vcpu,ifs) = regs->cr_ifs;
1054 else PSCB(vcpu,incomplete_regframe) = 0;
1056 regs->cr_ifs = 0;
1057 return (IA64_NO_FAULT);
1060 IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
1062 extern unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr);
1063 UINT64 pta = PSCB(vcpu,pta);
1064 UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
1065 UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
1066 UINT64 Mask = (1L << pta_sz) - 1;
1067 UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
1068 UINT64 compMask_60_15 = ~Mask_60_15;
1069 //UINT64 rr_ps = RR_TO_PS(get_rr(vadr));
1070 UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
1071 UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
1072 UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
1073 UINT64 VHPT_addr2a =
1074 ((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
1075 UINT64 VHPT_addr2b =
1076 ((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;;
1077 UINT64 VHPT_addr3 = VHPT_offset & 0x3fff;
1078 UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
1079 VHPT_addr3;
1081 if (VHPT_addr1 == 0xe000000000000000L) {
1082 printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
1083 PSCB(vcpu,iip));
1084 return (IA64_ILLOP_FAULT);
1086 //verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
1087 *pval = VHPT_addr;
1088 return (IA64_NO_FAULT);
1091 IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1093 printf("vcpu_ttag: ttag instruction unsupported\n");
1094 return (IA64_ILLOP_FAULT);
1097 IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
1099 extern TR_ENTRY *match_tr(VCPU *,UINT64);
1100 extern TR_ENTRY *match_dtlb(VCPU *,UINT64);
1101 TR_ENTRY *trp;
1102 UINT64 mask;
1104 extern unsigned long privop_trace;
1105 if ((trp=match_tr(current,vadr)) || (trp=match_dtlb(current,vadr))) {
1106 mask = (1L << trp->ps) - 1;
1107 *padr = ((trp->ppn << 12) & ~mask) | (vadr & mask);
1108 verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu,iip),*padr);
1109 return (IA64_NO_FAULT);
1111 verbose("vcpu_tpa addr=%p, @%p, forcing data miss\n",vadr,PSCB(vcpu,iip));
1112 return vcpu_force_data_miss(vcpu, vadr);
1115 IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
1117 printf("vcpu_tak: tak instruction unsupported\n");
1118 return (IA64_ILLOP_FAULT);
1119 // HACK ALERT: tak does a thash for now
1120 //return vcpu_thash(vcpu,vadr,key);
1123 /**************************************************************************
1124 VCPU debug breakpoint register access routines
1125 **************************************************************************/
1127 IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
1129 // TODO: unimplemented DBRs return a reserved register fault
1130 // TODO: Should set Logical CPU state, not just physical
1131 ia64_set_dbr(reg,val);
1132 return (IA64_NO_FAULT);
1135 IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
1137 // TODO: unimplemented IBRs return a reserved register fault
1138 // TODO: Should set Logical CPU state, not just physical
1139 ia64_set_ibr(reg,val);
1140 return (IA64_NO_FAULT);
1143 IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1145 // TODO: unimplemented DBRs return a reserved register fault
1146 UINT64 val = ia64_get_dbr(reg);
1147 *pval = val;
1148 return (IA64_NO_FAULT);
1151 IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1153 // TODO: unimplemented IBRs return a reserved register fault
1154 UINT64 val = ia64_get_ibr(reg);
1155 *pval = val;
1156 return (IA64_NO_FAULT);
1159 /**************************************************************************
1160 VCPU performance monitor register access routines
1161 **************************************************************************/
1163 IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
1165 // TODO: Should set Logical CPU state, not just physical
1166 // NOTE: Writes to unimplemented PMC registers are discarded
1167 ia64_set_pmc(reg,val);
1168 return (IA64_NO_FAULT);
1171 IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
1173 // TODO: Should set Logical CPU state, not just physical
1174 // NOTE: Writes to unimplemented PMD registers are discarded
1175 ia64_set_pmd(reg,val);
1176 return (IA64_NO_FAULT);
1179 IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1181 // NOTE: Reads from unimplemented PMC registers return zero
1182 UINT64 val = (UINT64)ia64_get_pmc(reg);
1183 *pval = val;
1184 return (IA64_NO_FAULT);
1187 IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1189 // NOTE: Reads from unimplemented PMD registers return zero
1190 UINT64 val = (UINT64)ia64_get_pmd(reg);
1191 *pval = val;
1192 return (IA64_NO_FAULT);
1195 /**************************************************************************
1196 VCPU banked general register access routines
1197 **************************************************************************/
1199 IA64FAULT vcpu_bsw0(VCPU *vcpu)
1201 REGS *regs = vcpu_regs(vcpu);
1202 unsigned long *r = &regs->r16;
1203 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1204 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1205 int i;
1207 if (PSCB(vcpu,banknum)) {
1208 for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
1209 PSCB(vcpu,banknum) = 0;
1211 return (IA64_NO_FAULT);
1214 IA64FAULT vcpu_bsw1(VCPU *vcpu)
1216 REGS *regs = vcpu_regs(vcpu);
1217 unsigned long *r = &regs->r16;
1218 unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
1219 unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
1220 int i;
1222 if (!PSCB(vcpu,banknum)) {
1223 for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
1224 PSCB(vcpu,banknum) = 1;
1226 return (IA64_NO_FAULT);
1229 /**************************************************************************
1230 VCPU cpuid access routines
1231 **************************************************************************/
1234 IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1236 // FIXME: This could get called as a result of a rsvd-reg fault
1237 // if reg > 3
1238 switch(reg) {
1239 case 0:
1240 case 1:
1241 memcpy(pval,"Xen/ia64",8);
1242 break;
1243 case 2:
1244 *pval = 0;
1245 break;
1246 case 3:
1247 *pval = 0; //FIXME: See vol1, 3.1.11
1248 break;
1249 case 4:
1250 *pval = 1; //FIXME: See vol1, 3.1.11
1251 break;
1252 default:
1253 *pval = 0; //FIXME: See vol1, 3.1.11
1254 break;
1256 return (IA64_NO_FAULT);
1259 /**************************************************************************
1260 VCPU region register access routines
1261 **************************************************************************/
1263 unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
1266 ia64_rr rr;
1268 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1269 return(rr.ve);
1273 unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
1276 ia64_rr rr;
1278 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1279 return(rr.ps);
1283 unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
1286 ia64_rr rr;
1288 rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
1289 return(rr.rid);
1293 IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
1295 extern void set_one_rr(UINT64, UINT64);
1296 PSCB(vcpu,rrs)[reg>>61] = val;
1297 // warning: set_one_rr() does it "live"
1298 set_one_rr(reg,val);
1299 return (IA64_NO_FAULT);
1302 IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1304 UINT val = PSCB(vcpu,rrs)[reg>>61];
1305 *pval = val;
1306 return (IA64_NO_FAULT);
1309 /**************************************************************************
1310 VCPU protection key register access routines
1311 **************************************************************************/
1313 IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
1315 #ifndef PKR_USE_FIXED
1316 printk("vcpu_get_pkr: called, not implemented yet\n");
1317 return IA64_ILLOP_FAULT;
1318 #else
1319 UINT64 val = (UINT64)ia64_get_pkr(reg);
1320 *pval = val;
1321 return (IA64_NO_FAULT);
1322 #endif
1325 IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
1327 #ifndef PKR_USE_FIXED
1328 printk("vcpu_set_pkr: called, not implemented yet\n");
1329 return IA64_ILLOP_FAULT;
1330 #else
1331 // if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
1332 vcpu->pkrs[reg] = val;
1333 ia64_set_pkr(reg,val);
1334 return (IA64_NO_FAULT);
1335 #endif
1338 /**************************************************************************
1339 VCPU translation register access routines
1340 **************************************************************************/
1342 static void vcpu_purge_tr_entry(TR_ENTRY *trp)
1344 trp->p = 0;
1347 static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
1349 UINT64 ps;
1351 trp->itir = itir;
1352 trp->rid = virtualize_rid(current, get_rr(ifa) & RR_RID_MASK);
1353 trp->p = 1;
1354 ps = trp->ps;
1355 trp->page_flags = pte;
1356 if (trp->pl < 2) trp->pl = 2;
1357 trp->vadr = ifa & ~0xfff;
1358 if (ps > 12) { // "ignore" relevant low-order bits
1359 trp->ppn &= ~((1UL<<(ps-12))-1);
1360 trp->vadr &= ~((1UL<<ps)-1);
1364 TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu, TR_ENTRY *trp, UINT64 ifa, int count)
1366 unsigned long rid = (get_rr(ifa) & RR_RID_MASK);
1367 int i;
1369 for (i = 0; i < count; i++, trp++) {
1370 if (!trp->p) continue;
1371 if (physicalize_rid(vcpu,trp->rid) != rid) continue;
1372 if (ifa < trp->vadr) continue;
1373 if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
1374 //if (trp->key && !match_pkr(vcpu,trp->key)) continue;
1375 return trp;
1377 return 0;
1380 TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa)
1382 TR_ENTRY *trp;
1384 trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.dtrs,ifa,NDTRS);
1385 if (trp) return trp;
1386 trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.itrs,ifa,NITRS);
1387 if (trp) return trp;
1388 return 0;
1391 IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
1392 UINT64 itir, UINT64 ifa)
1394 TR_ENTRY *trp;
1396 if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
1397 trp = &PSCB(vcpu,dtrs[slot]);
1398 vcpu_set_tr_entry(trp,pte,itir,ifa);
1399 return IA64_NO_FAULT;
1402 IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
1403 UINT64 itir, UINT64 ifa)
1405 TR_ENTRY *trp;
1407 if (slot >= NITRS) return IA64_RSVDREG_FAULT;
1408 trp = &PSCB(vcpu,itrs[slot]);
1409 vcpu_set_tr_entry(trp,pte,itir,ifa);
1410 return IA64_NO_FAULT;
1413 /**************************************************************************
1414 VCPU translation cache access routines
1415 **************************************************************************/
1417 void foobar(void) { /*vcpu_verbose = 1;*/ }
1419 extern struct domain *dom0;
1421 void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 logps)
1423 unsigned long psr;
1424 unsigned long ps = (vcpu->domain==dom0) ? logps : PAGE_SHIFT;
1426 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1427 // FIXME, must be inlined or potential for nested fault here!
1428 psr = ia64_clear_ic();
1429 ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
1430 ia64_set_psr(psr);
1431 // ia64_srlz_i(); // no srls req'd, will rfi later
1432 if (IorD & 0x1) vcpu_set_tr_entry(&PSCB(vcpu,itlb),pte,logps<<2,vaddr);
1433 if (IorD & 0x2) vcpu_set_tr_entry(&PSCB(vcpu,dtlb),pte,logps<<2,vaddr);
1436 TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
1438 return vcpu_match_tr_entry(vcpu,&vcpu->vcpu_info->arch.dtlb,ifa,1);
1441 IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1443 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1444 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1446 if (((itir & ~0xfc) >> 2) < PAGE_SHIFT) {
1447 printf("vcpu_itc_d: domain trying to use smaller page size!\n");
1448 //FIXME: kill domain here
1449 while(1);
1451 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1452 pteval = translate_domain_pte(pte,ifa,itir);
1453 if (!pteval) return IA64_ILLOP_FAULT;
1454 vcpu_itc_no_srlz(vcpu,2,ifa,pteval,logps);
1455 return IA64_NO_FAULT;
1458 IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
1460 unsigned long pteval, logps = (itir >> 2) & 0x3f;
1461 unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
1463 // FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
1464 if (((itir & ~0xfc) >> 2) < PAGE_SHIFT) {
1465 printf("vcpu_itc_i: domain trying to use smaller page size!\n");
1466 //FIXME: kill domain here
1467 while(1);
1469 //itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
1470 pteval = translate_domain_pte(pte,ifa,itir);
1471 // FIXME: what to do if bad physical address? (machine check?)
1472 if (!pteval) return IA64_ILLOP_FAULT;
1473 vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,logps);
1474 return IA64_NO_FAULT;
1477 IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1479 printk("vcpu_ptc_l: called, not implemented yet\n");
1480 return IA64_ILLOP_FAULT;
1483 IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
1485 UINT64 mpaddr;
1486 IA64FAULT fault;
1487 unsigned long lookup_domain_mpa(struct domain *,unsigned long);
1488 unsigned long pteval, dom_imva;
1490 fault = vcpu_tpa(vcpu, vadr, &mpaddr);
1491 if (fault == IA64_NO_FAULT) {
1492 struct domain *dom0;
1493 unsigned long dom0_start, dom0_size;
1494 if (vcpu == dom0) {
1495 if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
1496 printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr);
1499 pteval = lookup_domain_mpa(vcpu->domain,mpaddr);
1500 if (pteval) {
1501 dom_imva = __va(pteval & _PFN_MASK);
1502 ia64_fc(dom_imva);
1504 else {
1505 REGS *regs = vcpu_regs(vcpu);
1506 printk("vcpu_fc: can't flush vadr=%p, iip=%p\n",
1507 vadr,regs->cr_iip);
1510 return fault;
1513 IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
1516 // Note that this only needs to be called once, i.e. the
1517 // architected loop to purge the entire TLB, should use
1518 // base = stride1 = stride2 = 0, count0 = count 1 = 1
1520 // FIXME: When VHPT is in place, flush that too!
1521 local_flush_tlb_all();
1522 // just invalidate the "whole" tlb
1523 vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
1524 vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
1525 return IA64_NO_FAULT;
1528 IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
1530 printk("vcpu_ptc_g: called, not implemented yet\n");
1531 return IA64_ILLOP_FAULT;
1534 IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1536 extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
1537 // FIXME: validate not flushing Xen addresses
1538 // if (Xen address) return(IA64_ILLOP_FAULT);
1539 // FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
1540 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
1541 vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
1542 vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
1543 return IA64_NO_FAULT;
1546 IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1548 printf("vcpu_ptr_d: Purging TLB is unsupported\n");
1549 return (IA64_ILLOP_FAULT);
1552 IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
1554 printf("vcpu_ptr_i: Purging TLB is unsupported\n");
1555 return (IA64_ILLOP_FAULT);
1558 void vcpu_set_regs(VCPU *vcpu, REGS *regs)
1560 vcpu->arch.regs = regs;